repo_name
string
path
string
copies
string
size
string
content
string
license
string
akhilnarang/ThugLife_bullhead
sound/isa/adlib.c
2247
2964
/* * AdLib FM card driver. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/isa.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/opl3.h> #define CRD_NAME "AdLib FM" #define DEV_NAME "adlib" MODULE_DESCRIPTION(CRD_NAME); MODULE_AUTHOR("Rene Herman"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; static long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CRD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CRD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable " CRD_NAME " soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for " CRD_NAME " driver."); static int snd_adlib_match(struct device *dev, unsigned int n) { if (!enable[n]) return 0; if (port[n] == SNDRV_AUTO_PORT) { dev_err(dev, "please specify port\n"); return 0; } return 1; } static void snd_adlib_free(struct snd_card *card) { release_and_free_resource(card->private_data); } static int snd_adlib_probe(struct device *dev, unsigned int n) { struct snd_card *card; struct snd_opl3 *opl3; int error; error = snd_card_create(index[n], id[n], THIS_MODULE, 0, &card); if (error < 0) { dev_err(dev, "could not create card\n"); return error; } card->private_data = request_region(port[n], 4, CRD_NAME); if (!card->private_data) { dev_err(dev, "could not grab ports\n"); error = -EBUSY; goto out; } card->private_free = snd_adlib_free; strcpy(card->driver, DEV_NAME); strcpy(card->shortname, CRD_NAME); sprintf(card->longname, CRD_NAME " at %#lx", port[n]); error = snd_opl3_create(card, port[n], port[n] + 2, OPL3_HW_AUTO, 1, &opl3); if (error < 0) { dev_err(dev, "could not create OPL\n"); goto out; } error = snd_opl3_hwdep_new(opl3, 0, 0, NULL); if (error < 0) { dev_err(dev, "could not create FM\n"); goto out; } snd_card_set_dev(card, dev); error = snd_card_register(card); if (error < 0) { dev_err(dev, "could not register card\n"); goto out; } dev_set_drvdata(dev, card); return 0; out: snd_card_free(card); return error; } static int snd_adlib_remove(struct device *dev, unsigned int n) { snd_card_free(dev_get_drvdata(dev)); dev_set_drvdata(dev, NULL); return 0; } static struct isa_driver snd_adlib_driver = { .match = snd_adlib_match, .probe = snd_adlib_probe, .remove = snd_adlib_remove, .driver = { .name = DEV_NAME } }; static int __init alsa_card_adlib_init(void) { return isa_register_driver(&snd_adlib_driver, SNDRV_CARDS); } static void __exit alsa_card_adlib_exit(void) { isa_unregister_driver(&snd_adlib_driver); } module_init(alsa_card_adlib_init); module_exit(alsa_card_adlib_exit);
gpl-2.0
BOOTMGR/Kernel_JB_3.4
security/selinux/netport.c
4551
6889
/* * Network port table * * SELinux must keep a mapping of network ports to labels/SIDs. This * mapping is maintained as part of the normal policy but a fast cache is * needed to reduce the lookup overhead. * * Author: Paul Moore <paul@paul-moore.com> * * This code is heavily based on the "netif" concept originally developed by * James Morris <jmorris@redhat.com> * (see security/selinux/netif.c for more information) * */ /* * (c) Copyright Hewlett-Packard Development Company, L.P., 2008 * * This program is free software: you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/types.h> #include <linux/rcupdate.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/spinlock.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <net/ip.h> #include <net/ipv6.h> #include "netport.h" #include "objsec.h" #define SEL_NETPORT_HASH_SIZE 256 #define SEL_NETPORT_HASH_BKT_LIMIT 16 struct sel_netport_bkt { int size; struct list_head list; }; struct sel_netport { struct netport_security_struct psec; struct list_head list; struct rcu_head rcu; }; /* NOTE: we are using a combined hash table for both IPv4 and IPv6, the reason * for this is that I suspect most users will not make heavy use of both * address families at the same time so one table will usually end up wasted, * if this becomes a problem we can always add a hash table for each address * family later */ static LIST_HEAD(sel_netport_list); static DEFINE_SPINLOCK(sel_netport_lock); static struct sel_netport_bkt sel_netport_hash[SEL_NETPORT_HASH_SIZE]; /** * sel_netport_hashfn - Hashing function for the port table * @pnum: port number * * Description: * This is the hashing function for the port table, it returns the bucket * number for the given port. * */ static unsigned int sel_netport_hashfn(u16 pnum) { return (pnum & (SEL_NETPORT_HASH_SIZE - 1)); } /** * sel_netport_find - Search for a port record * @protocol: protocol * @port: pnum * * Description: * Search the network port table and return the matching record. If an entry * can not be found in the table return NULL. * */ static struct sel_netport *sel_netport_find(u8 protocol, u16 pnum) { unsigned int idx; struct sel_netport *port; idx = sel_netport_hashfn(pnum); list_for_each_entry_rcu(port, &sel_netport_hash[idx].list, list) if (port->psec.port == pnum && port->psec.protocol == protocol) return port; return NULL; } /** * sel_netport_insert - Insert a new port into the table * @port: the new port record * * Description: * Add a new port record to the network address hash table. * */ static void sel_netport_insert(struct sel_netport *port) { unsigned int idx; /* we need to impose a limit on the growth of the hash table so check * this bucket to make sure it is within the specified bounds */ idx = sel_netport_hashfn(port->psec.port); list_add_rcu(&port->list, &sel_netport_hash[idx].list); if (sel_netport_hash[idx].size == SEL_NETPORT_HASH_BKT_LIMIT) { struct sel_netport *tail; tail = list_entry( rcu_dereference_protected( sel_netport_hash[idx].list.prev, lockdep_is_held(&sel_netport_lock)), struct sel_netport, list); list_del_rcu(&tail->list); kfree_rcu(tail, rcu); } else sel_netport_hash[idx].size++; } /** * sel_netport_sid_slow - Lookup the SID of a network address using the policy * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port by quering the security * policy. The result is added to the network port table to speedup future * queries. Returns zero on success, negative values on failure. * */ static int sel_netport_sid_slow(u8 protocol, u16 pnum, u32 *sid) { int ret = -ENOMEM; struct sel_netport *port; struct sel_netport *new = NULL; spin_lock_bh(&sel_netport_lock); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; spin_unlock_bh(&sel_netport_lock); return 0; } new = kzalloc(sizeof(*new), GFP_ATOMIC); if (new == NULL) goto out; ret = security_port_sid(protocol, pnum, sid); if (ret != 0) goto out; new->psec.port = pnum; new->psec.protocol = protocol; new->psec.sid = *sid; sel_netport_insert(new); out: spin_unlock_bh(&sel_netport_lock); if (unlikely(ret)) { printk(KERN_WARNING "SELinux: failure in sel_netport_sid_slow()," " unable to determine network port label\n"); kfree(new); } return ret; } /** * sel_netport_sid - Lookup the SID of a network port * @protocol: protocol * @pnum: port * @sid: port SID * * Description: * This function determines the SID of a network port using the fastest method * possible. First the port table is queried, but if an entry can't be found * then the policy is queried and the result is added to the table to speedup * future queries. Returns zero on success, negative values on failure. * */ int sel_netport_sid(u8 protocol, u16 pnum, u32 *sid) { struct sel_netport *port; rcu_read_lock(); port = sel_netport_find(protocol, pnum); if (port != NULL) { *sid = port->psec.sid; rcu_read_unlock(); return 0; } rcu_read_unlock(); return sel_netport_sid_slow(protocol, pnum, sid); } /** * sel_netport_flush - Flush the entire network port table * * Description: * Remove all entries from the network address table. * */ static void sel_netport_flush(void) { unsigned int idx; struct sel_netport *port, *port_tmp; spin_lock_bh(&sel_netport_lock); for (idx = 0; idx < SEL_NETPORT_HASH_SIZE; idx++) { list_for_each_entry_safe(port, port_tmp, &sel_netport_hash[idx].list, list) { list_del_rcu(&port->list); kfree_rcu(port, rcu); } sel_netport_hash[idx].size = 0; } spin_unlock_bh(&sel_netport_lock); } static int sel_netport_avc_callback(u32 event, u32 ssid, u32 tsid, u16 class, u32 perms, u32 *retained) { if (event == AVC_CALLBACK_RESET) { sel_netport_flush(); synchronize_net(); } return 0; } static __init int sel_netport_init(void) { int iter; int ret; if (!selinux_enabled) return 0; for (iter = 0; iter < SEL_NETPORT_HASH_SIZE; iter++) { INIT_LIST_HEAD(&sel_netport_hash[iter].list); sel_netport_hash[iter].size = 0; } ret = avc_add_callback(sel_netport_avc_callback, AVC_CALLBACK_RESET, SECSID_NULL, SECSID_NULL, SECCLASS_NULL, 0); if (ret != 0) panic("avc_add_callback() failed, error %d\n", ret); return ret; } __initcall(sel_netport_init);
gpl-2.0
invisiblek/android_kernel_htc_msm8960
drivers/staging/comedi/drivers/unioxx5.c
4807
17059
/*************************************************************************** * * * comedi/drivers/unioxx5.c * * Driver for Fastwel UNIOxx-5 (analog and digital i/o) boards. * * * * Copyright (C) 2006 Kruchinin Daniil (asgard) [asgard@etersoft.ru] * * * * COMEDI - Linux Control and Measurement Device Interface * * Copyright (C) 1998,2000 David A. Schleef <ds@schleef.org> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * ***************************************************************************/ /* Driver: unioxx5 Description: Driver for Fastwel UNIOxx-5 (analog and digital i/o) boards. Author: Kruchinin Daniil (asgard) <asgard@etersoft.ru> Status: unknown Updated: 2006-10-09 Devices: [Fastwel] UNIOxx-5 (unioxx5), This card supports digital and analog I/O. It written for g01 subdevices only. channels range: 0 .. 23 dio channels and 0 .. 11 analog modules range During attaching unioxx5 module displays modules identifiers (see dmesg after comedi_config) in format: | [module_number] module_id | */ #include "../comedidev.h" #include <linux/ioport.h> #include <linux/slab.h> #define DRIVER_NAME "unioxx5" #define UNIOXX5_SIZE 0x10 #define UNIOXX5_SUBDEV_BASE 0xA000 /* base addr of first subdev */ #define UNIOXX5_SUBDEV_ODDS 0x400 /* modules types */ #define MODULE_DIGITAL 0 #define MODULE_OUTPUT_MASK 0x80 /* analog input/output */ /* constants for digital i/o */ #define UNIOXX5_NUM_OF_CHANS 24 /* constants for analog i/o */ #define TxBE 0x10 /* transmit buffer enable */ #define RxCA 0x20 /* 1 receive character available */ #define Rx2CA 0x40 /* 2 receive character available */ #define Rx4CA 0x80 /* 4 receive character available */ /* bytes mask errors */ #define Rx2CA_ERR_MASK 0x04 /* 2 bytes receiving error */ #define Rx4CA_ERR_MASK 0x08 /* 4 bytes receiving error */ /* channel modes */ #define ALL_2_INPUT 0 /* config all digital channels to input */ #define ALL_2_OUTPUT 1 /* config all digital channels to output */ /* 'private' structure for each subdevice */ struct unioxx5_subd_priv { int usp_iobase; /* 12 modules. each can be 70L or 73L */ unsigned char usp_module_type[12]; /* for saving previous written value for analog modules */ unsigned char usp_extra_data[12][4]; unsigned char usp_prev_wr_val[3]; /* previous written value */ unsigned char usp_prev_cn_val[3]; /* previous channel value */ }; static int unioxx5_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int unioxx5_subdev_write(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data); static int unioxx5_subdev_read(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data); static int unioxx5_insn_config(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data); static int unioxx5_detach(struct comedi_device *dev); static int __unioxx5_subdev_init(struct comedi_subdevice *subdev, int subdev_iobase, int minor); static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor); static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor); /* static void __unioxx5_digital_config(struct unioxx5_subd_priv* usp, int mode); */ static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor); static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor); static int __unioxx5_define_chan_offset(int chan_num); static void __unioxx5_analog_config(struct unioxx5_subd_priv *usp, int channel); static struct comedi_driver unioxx5_driver = { .driver_name = DRIVER_NAME, .module = THIS_MODULE, .attach = unioxx5_attach, .detach = unioxx5_detach }; static int __init unioxx5_driver_init_module(void) { return comedi_driver_register(&unioxx5_driver); } static void __exit unioxx5_driver_cleanup_module(void) { comedi_driver_unregister(&unioxx5_driver); } module_init(unioxx5_driver_init_module); module_exit(unioxx5_driver_cleanup_module); static int unioxx5_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int iobase, i, n_subd; int id, num, ba; iobase = it->options[0]; dev->board_name = DRIVER_NAME; dev->iobase = iobase; iobase += UNIOXX5_SUBDEV_BASE; /* defining number of subdevices and getting they types (it must be 'g01') */ for (i = n_subd = 0, ba = iobase; i < 4; i++, ba += UNIOXX5_SUBDEV_ODDS) { id = inb(ba + 0xE); num = inb(ba + 0xF); if (id != 'g' || num != 1) continue; n_subd++; } /* unioxx5 can has from two to four subdevices */ if (n_subd < 2) { printk(KERN_ERR "your card must has at least 2 'g01' subdevices\n"); return -1; } if (alloc_subdevices(dev, n_subd) < 0) { printk(KERN_ERR "out of memory\n"); return -ENOMEM; } /* initializing each of for same subdevices */ for (i = 0; i < n_subd; i++, iobase += UNIOXX5_SUBDEV_ODDS) { if (__unioxx5_subdev_init(&dev->subdevices[i], iobase, dev->minor) < 0) return -1; } printk(KERN_INFO "attached\n"); return 0; } static int unioxx5_subdev_read(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data) { struct unioxx5_subd_priv *usp = subdev->private; int channel, type; channel = CR_CHAN(insn->chanspec); /* defining module type(analog or digital) */ type = usp->usp_module_type[channel / 2]; if (type == MODULE_DIGITAL) { if (!__unioxx5_digital_read(usp, data, channel, dev->minor)) return -1; } else { if (!__unioxx5_analog_read(usp, data, channel, dev->minor)) return -1; } return 1; } static int unioxx5_subdev_write(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data) { struct unioxx5_subd_priv *usp = subdev->private; int channel, type; channel = CR_CHAN(insn->chanspec); /* defining module type(analog or digital) */ type = usp->usp_module_type[channel / 2]; if (type == MODULE_DIGITAL) { if (!__unioxx5_digital_write(usp, data, channel, dev->minor)) return -1; } else { if (!__unioxx5_analog_write(usp, data, channel, dev->minor)) return -1; } return 1; } /* for digital modules only */ static int unioxx5_insn_config(struct comedi_device *dev, struct comedi_subdevice *subdev, struct comedi_insn *insn, unsigned int *data) { int channel_offset, flags, channel = CR_CHAN(insn->chanspec), type; struct unioxx5_subd_priv *usp = subdev->private; int mask = 1 << (channel & 0x07); type = usp->usp_module_type[channel / 2]; if (type != MODULE_DIGITAL) { printk(KERN_ERR "comedi%d: channel configuration accessible only for digital modules\n", dev->minor); return -1; } channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { printk(KERN_ERR "comedi%d: undefined channel %d. channel range is 0 .. 23\n", dev->minor, channel); return -1; } /* gets previously written value */ flags = usp->usp_prev_cn_val[channel_offset - 1]; switch (*data) { case COMEDI_INPUT: flags &= ~mask; break; case COMEDI_OUTPUT: flags |= mask; break; default: printk(KERN_ERR "comedi%d: unknown flag\n", dev->minor); return -1; } /* *\ * sets channels buffer to 1(after this we are allowed to * * change channel type on input or output) * \* */ outb(1, usp->usp_iobase + 0); /* changes type of _one_ channel */ outb(flags, usp->usp_iobase + channel_offset); /* sets channels bank to 0(allows directly input/output) */ outb(0, usp->usp_iobase + 0); /* saves written value */ usp->usp_prev_cn_val[channel_offset - 1] = flags; return 0; } static int unioxx5_detach(struct comedi_device *dev) { int i; struct comedi_subdevice *subdev; struct unioxx5_subd_priv *usp; for (i = 0; i < dev->n_subdevices; i++) { subdev = &dev->subdevices[i]; usp = subdev->private; release_region(usp->usp_iobase, UNIOXX5_SIZE); kfree(subdev->private); } return 0; } /* initializing subdevice with given address */ static int __unioxx5_subdev_init(struct comedi_subdevice *subdev, int subdev_iobase, int minor) { struct unioxx5_subd_priv *usp; int i, to, ndef_flag = 0; if (!request_region(subdev_iobase, UNIOXX5_SIZE, DRIVER_NAME)) { printk(KERN_ERR "comedi%d: I/O port conflict\n", minor); return -EIO; } usp = kzalloc(sizeof(*usp), GFP_KERNEL); if (usp == NULL) { printk(KERN_ERR "comedi%d: error! --> out of memory!\n", minor); return -1; } usp->usp_iobase = subdev_iobase; printk(KERN_INFO "comedi%d: |", minor); /* defining modules types */ for (i = 0; i < 12; i++) { to = 10000; __unioxx5_analog_config(usp, i * 2); /* sends channel number to card */ outb(i + 1, subdev_iobase + 5); outb('H', subdev_iobase + 6); /* requests EEPROM world */ while (!(inb(subdev_iobase + 0) & TxBE)) ; /* waits while writting will be allowed */ outb(0, subdev_iobase + 6); /* waits while reading of two bytes will be allowed */ while (!(inb(subdev_iobase + 0) & Rx2CA)) { if (--to <= 0) { ndef_flag = 1; break; } } if (ndef_flag) { usp->usp_module_type[i] = 0; ndef_flag = 0; } else usp->usp_module_type[i] = inb(subdev_iobase + 6); printk(" [%d] 0x%02x |", i, usp->usp_module_type[i]); udelay(1); } printk("\n"); /* initial subdevice for digital or analog i/o */ subdev->type = COMEDI_SUBD_DIO; subdev->private = usp; subdev->subdev_flags = SDF_READABLE | SDF_WRITABLE; subdev->n_chan = UNIOXX5_NUM_OF_CHANS; subdev->maxdata = 0xFFF; subdev->range_table = &range_digital; subdev->insn_read = unioxx5_subdev_read; subdev->insn_write = unioxx5_subdev_write; /* for digital modules only!!! */ subdev->insn_config = unioxx5_insn_config; printk(KERN_INFO "subdevice configured\n"); return 0; } static int __unioxx5_digital_write(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor) { int channel_offset, val; int mask = 1 << (channel & 0x07); channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { printk(KERN_ERR "comedi%d: undefined channel %d. channel range is 0 .. 23\n", minor, channel); return 0; } /* getting previous written value */ val = usp->usp_prev_wr_val[channel_offset - 1]; if (*data) val |= mask; else val &= ~mask; outb(val, usp->usp_iobase + channel_offset); /* saving new written value */ usp->usp_prev_wr_val[channel_offset - 1] = val; return 1; } /* function for digital reading */ static int __unioxx5_digital_read(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor) { int channel_offset, mask = 1 << (channel & 0x07); channel_offset = __unioxx5_define_chan_offset(channel); if (channel_offset < 0) { printk(KERN_ERR "comedi%d: undefined channel %d. channel range is 0 .. 23\n", minor, channel); return 0; } *data = inb(usp->usp_iobase + channel_offset); *data &= mask; if (channel_offset > 1) channel -= 2 << channel_offset; /* this operation is created for correct readed value to 0 or 1 */ *data >>= channel; return 1; } #if 0 /* not used? */ static void __unioxx5_digital_config(struct unioxx5_subd_priv *usp, int mode) { int i, mask; mask = (mode == ALL_2_OUTPUT) ? 0xFF : 0x00; printk("COMEDI: mode = %d\n", mask); outb(1, usp->usp_iobase + 0); for (i = 0; i < 3; i++) outb(mask, usp->usp_iobase + i); outb(0, usp->usp_iobase + 0); } #endif static int __unioxx5_analog_write(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor) { int module, i; module = channel / 2; /* definig module number(0 .. 11) */ i = (channel % 2) << 1; /* depends on type of channel (A or B) */ /* defining if given module can work on output */ if (!(usp->usp_module_type[module] & MODULE_OUTPUT_MASK)) { printk(KERN_ERR "comedi%d: module in position %d with id 0x%0x is for input only!\n", minor, module, usp->usp_module_type[module]); return 0; } __unioxx5_analog_config(usp, channel); /* saving minor byte */ usp->usp_extra_data[module][i++] = (unsigned char)(*data & 0x00FF); /* saving major byte */ usp->usp_extra_data[module][i] = (unsigned char)((*data & 0xFF00) >> 8); /* while(!((inb(usp->usp_iobase + 0)) & TxBE)); */ /* sending module number to card(1 .. 12) */ outb(module + 1, usp->usp_iobase + 5); outb('W', usp->usp_iobase + 6); /* sends (W)rite command to module */ /* sending for bytes to module(one byte per cycle iteration) */ for (i = 0; i < 4; i++) { while (!((inb(usp->usp_iobase + 0)) & TxBE)) ; /* waits while writting will be allowed */ outb(usp->usp_extra_data[module][i], usp->usp_iobase + 6); } return 1; } static int __unioxx5_analog_read(struct unioxx5_subd_priv *usp, unsigned int *data, int channel, int minor) { int module_no, read_ch; char control; module_no = channel / 2; read_ch = channel % 2; /* depend on type of channel (A or B) */ /* defining if given module can work on input */ if (usp->usp_module_type[module_no] & MODULE_OUTPUT_MASK) { printk(KERN_ERR "comedi%d: module in position %d with id 0x%02x is for output only", minor, module_no, usp->usp_module_type[module_no]); return 0; } __unioxx5_analog_config(usp, channel); /* sends module number to card(1 .. 12) */ outb(module_no + 1, usp->usp_iobase + 5); outb('V', usp->usp_iobase + 6); /* sends to module (V)erify command */ control = inb(usp->usp_iobase); /* get control register byte */ /* waits while reading four bytes will be allowed */ while (!((control = inb(usp->usp_iobase + 0)) & Rx4CA)) ; /* if four bytes readding error occurs - return 0(false) */ if ((control & Rx4CA_ERR_MASK)) { printk("COMEDI: 4 bytes error\n"); return 0; } if (read_ch) *data = inw(usp->usp_iobase + 6); /* channel B */ else *data = inw(usp->usp_iobase + 4); /* channel A */ return 1; } /* configure channels for analog i/o (even to output, odd to input) */ static void __unioxx5_analog_config(struct unioxx5_subd_priv *usp, int channel) { int chan_a, chan_b, conf, channel_offset; channel_offset = __unioxx5_define_chan_offset(channel); conf = usp->usp_prev_cn_val[channel_offset - 1]; chan_a = chan_b = 1; /* setting channel A and channel B mask */ if (channel % 2 == 0) { chan_a <<= channel & 0x07; chan_b <<= (channel + 1) & 0x07; } else { chan_a <<= (channel - 1) & 0x07; chan_b <<= channel & 0x07; } conf |= chan_a; /* even channel ot output */ conf &= ~chan_b; /* odd channel to input */ outb(1, usp->usp_iobase + 0); outb(conf, usp->usp_iobase + channel_offset); outb(0, usp->usp_iobase + 0); usp->usp_prev_cn_val[channel_offset - 1] = conf; } /* *\ * this function defines if the given channel number * * enters in default numeric interspace(from 0 to 23) * * and it returns address offset for usage needed * * channel. * \* */ static int __unioxx5_define_chan_offset(int chan_num) { if (chan_num < 0 || chan_num > 23) return -1; return (chan_num >> 3) + 1; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
OwnROM-Devices/OwnKernel-bacon
drivers/char/hw_random/omap-rng.c
4807
5039
/* * omap-rng.c - RNG driver for TI OMAP CPU family * * Author: Deepak Saxena <dsaxena@plexity.net> * * Copyright 2005 (c) MontaVista Software, Inc. * * Mostly based on original driver: * * Copyright (C) 2005 Nokia Corporation * Author: Juha Yrjölä <juha.yrjola@nokia.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/init.h> #include <linux/random.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <asm/io.h> #include <plat/cpu.h> #define RNG_OUT_REG 0x00 /* Output register */ #define RNG_STAT_REG 0x04 /* Status register [0] = STAT_BUSY */ #define RNG_ALARM_REG 0x24 /* Alarm register [7:0] = ALARM_COUNTER */ #define RNG_CONFIG_REG 0x28 /* Configuration register [11:6] = RESET_COUNT [5:3] = RING2_DELAY [2:0] = RING1_DELAY */ #define RNG_REV_REG 0x3c /* Revision register [7:0] = REV_NB */ #define RNG_MASK_REG 0x40 /* Mask and reset register [2] = IT_EN [1] = SOFTRESET [0] = AUTOIDLE */ #define RNG_SYSSTATUS 0x44 /* System status [0] = RESETDONE */ static void __iomem *rng_base; static struct clk *rng_ick; static struct platform_device *rng_dev; static inline u32 omap_rng_read_reg(int reg) { return __raw_readl(rng_base + reg); } static inline void omap_rng_write_reg(int reg, u32 val) { __raw_writel(val, rng_base + reg); } static int omap_rng_data_present(struct hwrng *rng, int wait) { int data, i; for (i = 0; i < 20; i++) { data = omap_rng_read_reg(RNG_STAT_REG) ? 0 : 1; if (data || !wait) break; /* RNG produces data fast enough (2+ MBit/sec, even * during "rngtest" loads, that these delays don't * seem to trigger. We *could* use the RNG IRQ, but * that'd be higher overhead ... so why bother? */ udelay(10); } return data; } static int omap_rng_data_read(struct hwrng *rng, u32 *data) { *data = omap_rng_read_reg(RNG_OUT_REG); return 4; } static struct hwrng omap_rng_ops = { .name = "omap", .data_present = omap_rng_data_present, .data_read = omap_rng_data_read, }; static int __devinit omap_rng_probe(struct platform_device *pdev) { struct resource *res; int ret; /* * A bit ugly, and it will never actually happen but there can * be only one RNG and this catches any bork */ if (rng_dev) return -EBUSY; if (cpu_is_omap24xx()) { rng_ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(rng_ick)) { dev_err(&pdev->dev, "Could not get rng_ick\n"); ret = PTR_ERR(rng_ick); return ret; } else clk_enable(rng_ick); } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENOENT; goto err_region; } if (!request_mem_region(res->start, resource_size(res), pdev->name)) { ret = -EBUSY; goto err_region; } dev_set_drvdata(&pdev->dev, res); rng_base = ioremap(res->start, resource_size(res)); if (!rng_base) { ret = -ENOMEM; goto err_ioremap; } ret = hwrng_register(&omap_rng_ops); if (ret) goto err_register; dev_info(&pdev->dev, "OMAP Random Number Generator ver. %02x\n", omap_rng_read_reg(RNG_REV_REG)); omap_rng_write_reg(RNG_MASK_REG, 0x1); rng_dev = pdev; return 0; err_register: iounmap(rng_base); rng_base = NULL; err_ioremap: release_mem_region(res->start, resource_size(res)); err_region: if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } return ret; } static int __exit omap_rng_remove(struct platform_device *pdev) { struct resource *res = dev_get_drvdata(&pdev->dev); hwrng_unregister(&omap_rng_ops); omap_rng_write_reg(RNG_MASK_REG, 0x0); iounmap(rng_base); if (cpu_is_omap24xx()) { clk_disable(rng_ick); clk_put(rng_ick); } release_mem_region(res->start, resource_size(res)); rng_base = NULL; return 0; } #ifdef CONFIG_PM static int omap_rng_suspend(struct platform_device *pdev, pm_message_t message) { omap_rng_write_reg(RNG_MASK_REG, 0x0); return 0; } static int omap_rng_resume(struct platform_device *pdev) { omap_rng_write_reg(RNG_MASK_REG, 0x1); return 0; } #else #define omap_rng_suspend NULL #define omap_rng_resume NULL #endif /* work with hotplug and coldplug */ MODULE_ALIAS("platform:omap_rng"); static struct platform_driver omap_rng_driver = { .driver = { .name = "omap_rng", .owner = THIS_MODULE, }, .probe = omap_rng_probe, .remove = __exit_p(omap_rng_remove), .suspend = omap_rng_suspend, .resume = omap_rng_resume }; static int __init omap_rng_init(void) { if (!cpu_is_omap16xx() && !cpu_is_omap24xx()) return -ENODEV; return platform_driver_register(&omap_rng_driver); } static void __exit omap_rng_exit(void) { platform_driver_unregister(&omap_rng_driver); } module_init(omap_rng_init); module_exit(omap_rng_exit); MODULE_AUTHOR("Deepak Saxena (and others)"); MODULE_LICENSE("GPL");
gpl-2.0
flar2/m8-GPE-5.0.1
drivers/scsi/qla4xxx/ql4_init.c
4807
32213
/* * QLogic iSCSI HBA Driver * Copyright (c) 2003-2010 QLogic Corporation * * See LICENSE.qla4xxx for copyright and licensing details. */ #include <scsi/iscsi_if.h> #include "ql4_def.h" #include "ql4_glbl.h" #include "ql4_dbg.h" #include "ql4_inline.h" static void ql4xxx_set_mac_number(struct scsi_qla_host *ha) { uint32_t value; uint8_t func_number; unsigned long flags; /* Get the function number */ spin_lock_irqsave(&ha->hardware_lock, flags); value = readw(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); func_number = (uint8_t) ((value >> 4) & 0x30); switch (value & ISP_CONTROL_FN_MASK) { case ISP_CONTROL_FN0_SCSI: ha->mac_index = 1; break; case ISP_CONTROL_FN1_SCSI: ha->mac_index = 3; break; default: DEBUG2(printk("scsi%ld: %s: Invalid function number, " "ispControlStatus = 0x%x\n", ha->host_no, __func__, value)); break; } DEBUG2(printk("scsi%ld: %s: mac_index %d.\n", ha->host_no, __func__, ha->mac_index)); } /** * qla4xxx_free_ddb - deallocate ddb * @ha: pointer to host adapter structure. * @ddb_entry: pointer to device database entry * * This routine marks a DDB entry INVALID **/ void qla4xxx_free_ddb(struct scsi_qla_host *ha, struct ddb_entry *ddb_entry) { /* Remove device pointer from index mapping arrays */ ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = (struct ddb_entry *) INVALID_ENTRY; ha->tot_ddbs--; } /** * qla4xxx_init_response_q_entries() - Initializes response queue entries. * @ha: HA context * * Beginning of request ring has initialization control block already built * by nvram config routine. **/ static void qla4xxx_init_response_q_entries(struct scsi_qla_host *ha) { uint16_t cnt; struct response *pkt; pkt = (struct response *)ha->response_ptr; for (cnt = 0; cnt < RESPONSE_QUEUE_DEPTH; cnt++) { pkt->signature = RESPONSE_PROCESSED; pkt++; } } /** * qla4xxx_init_rings - initialize hw queues * @ha: pointer to host adapter structure. * * This routine initializes the internal queues for the specified adapter. * The QLA4010 requires us to restart the queues at index 0. * The QLA4000 doesn't care, so just default to QLA4010's requirement. **/ int qla4xxx_init_rings(struct scsi_qla_host *ha) { unsigned long flags = 0; int i; /* Initialize request queue. */ spin_lock_irqsave(&ha->hardware_lock, flags); ha->request_out = 0; ha->request_in = 0; ha->request_ptr = &ha->request_ring[ha->request_in]; ha->req_q_count = REQUEST_QUEUE_DEPTH; /* Initialize response queue. */ ha->response_in = 0; ha->response_out = 0; ha->response_ptr = &ha->response_ring[ha->response_out]; if (is_qla8022(ha)) { writel(0, (unsigned long __iomem *)&ha->qla4_8xxx_reg->req_q_out); writel(0, (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_in); writel(0, (unsigned long __iomem *)&ha->qla4_8xxx_reg->rsp_q_out); } else { /* * Initialize DMA Shadow registers. The firmware is really * supposed to take care of this, but on some uniprocessor * systems, the shadow registers aren't cleared-- causing * the interrupt_handler to think there are responses to be * processed when there aren't. */ ha->shadow_regs->req_q_out = __constant_cpu_to_le32(0); ha->shadow_regs->rsp_q_in = __constant_cpu_to_le32(0); wmb(); writel(0, &ha->reg->req_q_in); writel(0, &ha->reg->rsp_q_out); readl(&ha->reg->rsp_q_out); } qla4xxx_init_response_q_entries(ha); /* Initialize mabilbox active array */ for (i = 0; i < MAX_MRB; i++) ha->active_mrb_array[i] = NULL; spin_unlock_irqrestore(&ha->hardware_lock, flags); return QLA_SUCCESS; } /** * qla4xxx_get_sys_info - validate adapter MAC address(es) * @ha: pointer to host adapter structure. * **/ int qla4xxx_get_sys_info(struct scsi_qla_host *ha) { struct flash_sys_info *sys_info; dma_addr_t sys_info_dma; int status = QLA_ERROR; sys_info = dma_alloc_coherent(&ha->pdev->dev, sizeof(*sys_info), &sys_info_dma, GFP_KERNEL); if (sys_info == NULL) { DEBUG2(printk("scsi%ld: %s: Unable to allocate dma buffer.\n", ha->host_no, __func__)); goto exit_get_sys_info_no_free; } memset(sys_info, 0, sizeof(*sys_info)); /* Get flash sys info */ if (qla4xxx_get_flash(ha, sys_info_dma, FLASH_OFFSET_SYS_INFO, sizeof(*sys_info)) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: get_flash FLASH_OFFSET_SYS_INFO " "failed\n", ha->host_no, __func__)); goto exit_get_sys_info; } /* Save M.A.C. address & serial_number */ memcpy(ha->my_mac, &sys_info->physAddr[0].address[0], min(sizeof(ha->my_mac), sizeof(sys_info->physAddr[0].address))); memcpy(ha->serial_number, &sys_info->acSerialNumber, min(sizeof(ha->serial_number), sizeof(sys_info->acSerialNumber))); status = QLA_SUCCESS; exit_get_sys_info: dma_free_coherent(&ha->pdev->dev, sizeof(*sys_info), sys_info, sys_info_dma); exit_get_sys_info_no_free: return status; } /** * qla4xxx_init_local_data - initialize adapter specific local data * @ha: pointer to host adapter structure. * **/ static int qla4xxx_init_local_data(struct scsi_qla_host *ha) { /* Initialize aen queue */ ha->aen_q_count = MAX_AEN_ENTRIES; return qla4xxx_get_firmware_status(ha); } static uint8_t qla4xxx_wait_for_ip_config(struct scsi_qla_host *ha) { uint8_t ipv4_wait = 0; uint8_t ipv6_wait = 0; int8_t ip_address[IPv6_ADDR_LEN] = {0} ; /* If both IPv4 & IPv6 are enabled, possibly only one * IP address may be acquired, so check to see if we * need to wait for another */ if (is_ipv4_enabled(ha) && is_ipv6_enabled(ha)) { if (((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0) && ((ha->addl_fw_state & FW_ADDSTATE_DHCPv4_LEASE_ACQUIRED) == 0)) { ipv4_wait = 1; } if (((ha->ip_config.ipv6_addl_options & IPV6_ADDOPT_NEIGHBOR_DISCOVERY_ADDR_ENABLE) != 0) && ((ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) || (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) || (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING))) { ipv6_wait = 1; if ((ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_PREFERRED) || (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_PREFERRED) || (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_PREFERRED)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "Preferred IP configured." " Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } if (memcmp(&ha->ip_config.ipv6_default_router_addr, ip_address, IPv6_ADDR_LEN) == 0) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "No Router configured. " "Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } if ((ha->ip_config.ipv6_default_router_state == IPV6_RTRSTATE_MANUAL) && (ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_TENTATIVE) && (memcmp(&ha->ip_config.ipv6_link_local_addr, &ha->ip_config.ipv6_default_router_addr, 4) == 0)) { DEBUG2(printk("scsi%ld: %s: LinkLocal Router & " "IP configured. Don't wait!\n", ha->host_no, __func__)); ipv6_wait = 0; } } if (ipv4_wait || ipv6_wait) { DEBUG2(printk("scsi%ld: %s: Wait for additional " "IP(s) \"", ha->host_no, __func__)); if (ipv4_wait) DEBUG2(printk("IPv4 ")); if (ha->ip_config.ipv6_link_local_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6LinkLocal ")); if (ha->ip_config.ipv6_addr0_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr0 ")); if (ha->ip_config.ipv6_addr1_state == IP_ADDRSTATE_ACQUIRING) DEBUG2(printk("IPv6Addr1 ")); DEBUG2(printk("\"\n")); } } return ipv4_wait|ipv6_wait; } static int qla4xxx_fw_ready(struct scsi_qla_host *ha) { uint32_t timeout_count; int ready = 0; DEBUG2(ql4_printk(KERN_INFO, ha, "Waiting for Firmware Ready..\n")); for (timeout_count = ADAPTER_INIT_TOV; timeout_count > 0; timeout_count--) { if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); /* Get firmware state. */ if (qla4xxx_get_firmware_state(ha) != QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: unable to get firmware " "state\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_ERROR) { DEBUG2(printk("scsi%ld: %s: an unrecoverable error has" " occurred\n", ha->host_no, __func__)); break; } if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { /* * The firmware has not yet been issued an Initialize * Firmware command, so issue it now. */ if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) break; /* Go back and test for ready state - no wait. */ continue; } if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" "AUTOCONNECT in progress\n", ha->host_no, __func__)); } if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: fwstate:" " CONFIGURING IP\n", ha->host_no, __func__)); /* * Check for link state after 15 secs and if link is * still DOWN then, cable is unplugged. Ignore "DHCP * in Progress/CONFIGURING IP" bit to check if firmware * is in ready state or not after 15 secs. * This is applicable for both 2.x & 3.x firmware */ if (timeout_count <= (ADAPTER_INIT_TOV - 15)) { if (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) { DEBUG2(printk(KERN_INFO "scsi%ld: %s:" " LINK UP (Cable plugged)\n", ha->host_no, __func__)); } else if (ha->firmware_state & (FW_STATE_CONFIGURING_IP | FW_STATE_READY)) { DEBUG2(printk(KERN_INFO "scsi%ld: %s: " "LINK DOWN (Cable unplugged)\n", ha->host_no, __func__)); ha->firmware_state = FW_STATE_READY; } } } if (ha->firmware_state == FW_STATE_READY) { /* If DHCP IP Addr is available, retrieve it now. */ if (test_and_clear_bit(DPC_GET_DHCP_IP_ADDR, &ha->dpc_flags)) qla4xxx_get_dhcp_ip_address(ha); if (!qla4xxx_wait_for_ip_config(ha) || timeout_count == 1) { DEBUG2(ql4_printk(KERN_INFO, ha, "Firmware Ready..\n")); /* The firmware is ready to process SCSI commands. */ DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: MEDIA TYPE" " - %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_OPTICAL_MEDIA) != 0 ? "OPTICAL" : "COPPER")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: DHCPv4 STATE" " Enabled %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_DHCPv4_ENABLED) != 0 ? "YES" : "NO")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: LINK %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_LINK_UP) != 0 ? "UP" : "DOWN")); DEBUG2(ql4_printk(KERN_INFO, ha, "scsi%ld: %s: iSNS Service " "Started %s\n", ha->host_no, __func__, (ha->addl_fw_state & FW_ADDSTATE_ISNS_SVC_ENABLED) != 0 ? "YES" : "NO")); ready = 1; break; } } DEBUG2(printk("scsi%ld: %s: waiting on fw, state=%x:%x - " "seconds expired= %d\n", ha->host_no, __func__, ha->firmware_state, ha->addl_fw_state, timeout_count)); if (is_qla4032(ha) && !(ha->addl_fw_state & FW_ADDSTATE_LINK_UP) && (timeout_count < ADAPTER_INIT_TOV - 5)) { break; } msleep(1000); } /* end of for */ if (timeout_count <= 0) DEBUG2(printk("scsi%ld: %s: FW Initialization timed out!\n", ha->host_no, __func__)); if (ha->firmware_state & FW_STATE_CONFIGURING_IP) { DEBUG2(printk("scsi%ld: %s: FW initialized, but is reporting " "it's waiting to configure an IP address\n", ha->host_no, __func__)); ready = 1; } else if (ha->firmware_state & FW_STATE_WAIT_AUTOCONNECT) { DEBUG2(printk("scsi%ld: %s: FW initialized, but " "auto-discovery still in process\n", ha->host_no, __func__)); ready = 1; } return ready; } /** * qla4xxx_init_firmware - initializes the firmware. * @ha: pointer to host adapter structure. * **/ static int qla4xxx_init_firmware(struct scsi_qla_host *ha) { int status = QLA_ERROR; if (is_aer_supported(ha) && test_bit(AF_PCI_CHANNEL_IO_PERM_FAILURE, &ha->flags)) return status; /* For 82xx, stop firmware before initializing because if BIOS * has previously initialized firmware, then driver's initialize * firmware will fail. */ if (is_qla8022(ha)) qla4_8xxx_stop_firmware(ha); ql4_printk(KERN_INFO, ha, "Initializing firmware..\n"); if (qla4xxx_initialize_fw_cb(ha) == QLA_ERROR) { DEBUG2(printk("scsi%ld: %s: Failed to initialize firmware " "control block\n", ha->host_no, __func__)); return status; } if (!qla4xxx_fw_ready(ha)) return status; return qla4xxx_get_firmware_status(ha); } static void qla4xxx_set_model_info(struct scsi_qla_host *ha) { uint16_t board_id_string[8]; int i; int size = sizeof(ha->nvram->isp4022.boardIdStr); int offset = offsetof(struct eeprom_data, isp4022.boardIdStr) / 2; for (i = 0; i < (size / 2) ; i++) { board_id_string[i] = rd_nvram_word(ha, offset); offset += 1; } memcpy(ha->model_name, board_id_string, size); } static int qla4xxx_config_nvram(struct scsi_qla_host *ha) { unsigned long flags; union external_hw_config_reg extHwConfig; DEBUG2(printk("scsi%ld: %s: Get EEProm parameters \n", ha->host_no, __func__)); if (ql4xxx_lock_flash(ha) != QLA_SUCCESS) return QLA_ERROR; if (ql4xxx_lock_nvram(ha) != QLA_SUCCESS) { ql4xxx_unlock_flash(ha); return QLA_ERROR; } /* Get EEPRom Parameters from NVRAM and validate */ ql4_printk(KERN_INFO, ha, "Configuring NVRAM ...\n"); if (qla4xxx_is_nvram_configuration_valid(ha) == QLA_SUCCESS) { spin_lock_irqsave(&ha->hardware_lock, flags); extHwConfig.Asuint32_t = rd_nvram_word(ha, eeprom_ext_hw_conf_offset(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); } else { ql4_printk(KERN_WARNING, ha, "scsi%ld: %s: EEProm checksum invalid. " "Please update your EEPROM\n", ha->host_no, __func__); /* Attempt to set defaults */ if (is_qla4010(ha)) extHwConfig.Asuint32_t = 0x1912; else if (is_qla4022(ha) | is_qla4032(ha)) extHwConfig.Asuint32_t = 0x0023; else return QLA_ERROR; } if (is_qla4022(ha) || is_qla4032(ha)) qla4xxx_set_model_info(ha); else strcpy(ha->model_name, "QLA4010"); DEBUG(printk("scsi%ld: %s: Setting extHwConfig to 0xFFFF%04x\n", ha->host_no, __func__, extHwConfig.Asuint32_t)); spin_lock_irqsave(&ha->hardware_lock, flags); writel((0xFFFF << 16) | extHwConfig.Asuint32_t, isp_ext_hw_conf(ha)); readl(isp_ext_hw_conf(ha)); spin_unlock_irqrestore(&ha->hardware_lock, flags); ql4xxx_unlock_nvram(ha); ql4xxx_unlock_flash(ha); return QLA_SUCCESS; } /** * qla4_8xxx_pci_config() - Setup ISP82xx PCI configuration registers. * @ha: HA context */ void qla4_8xxx_pci_config(struct scsi_qla_host *ha) { pci_set_master(ha->pdev); } void qla4xxx_pci_config(struct scsi_qla_host *ha) { uint16_t w; int status; ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); pci_set_master(ha->pdev); status = pci_set_mwi(ha->pdev); /* * We want to respect framework's setting of PCI configuration space * command register and also want to make sure that all bits of * interest to us are properly set in command register. */ pci_read_config_word(ha->pdev, PCI_COMMAND, &w); w |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR; w &= ~PCI_COMMAND_INTX_DISABLE; pci_write_config_word(ha->pdev, PCI_COMMAND, w); } static int qla4xxx_start_firmware_from_flash(struct scsi_qla_host *ha) { int status = QLA_ERROR; unsigned long max_wait_time; unsigned long flags; uint32_t mbox_status; ql4_printk(KERN_INFO, ha, "Starting firmware ...\n"); /* * Start firmware from flash ROM * * WORKAROUND: Stuff a non-constant value that the firmware can * use as a seed for a random number generator in MB7 prior to * setting BOOT_ENABLE. Fixes problem where the TCP * connections use the same TCP ports after each reboot, * causing some connections to not get re-established. */ DEBUG(printk("scsi%d: %s: Start firmware from flash ROM\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(jiffies, &ha->reg->mailbox[7]); if (is_qla4022(ha) | is_qla4032(ha)) writel(set_rmask(NVR_WRITE_ENABLE), &ha->reg->u1.isp4022.nvram); writel(2, &ha->reg->mailbox[6]); readl(&ha->reg->mailbox[6]); writel(set_rmask(CSR_BOOT_ENABLE), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); /* Wait for firmware to come UP. */ DEBUG2(printk(KERN_INFO "scsi%ld: %s: Wait up to %d seconds for " "boot firmware to complete...\n", ha->host_no, __func__, FIRMWARE_UP_TOV)); max_wait_time = jiffies + (FIRMWARE_UP_TOV * HZ); do { uint32_t ctrl_status; spin_lock_irqsave(&ha->hardware_lock, flags); ctrl_status = readw(&ha->reg->ctrl_status); mbox_status = readw(&ha->reg->mailbox[0]); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (ctrl_status & set_rmask(CSR_SCSI_PROCESSOR_INTR)) break; if (mbox_status == MBOX_STS_COMMAND_COMPLETE) break; DEBUG2(printk(KERN_INFO "scsi%ld: %s: Waiting for boot " "firmware to complete... ctrl_sts=0x%x, remaining=%ld\n", ha->host_no, __func__, ctrl_status, max_wait_time)); msleep_interruptible(250); } while (!time_after_eq(jiffies, max_wait_time)); if (mbox_status == MBOX_STS_COMMAND_COMPLETE) { DEBUG(printk(KERN_INFO "scsi%ld: %s: Firmware has started\n", ha->host_no, __func__)); spin_lock_irqsave(&ha->hardware_lock, flags); writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); status = QLA_SUCCESS; } else { printk(KERN_INFO "scsi%ld: %s: Boot firmware failed " "- mbox status 0x%x\n", ha->host_no, __func__, mbox_status); status = QLA_ERROR; } return status; } int ql4xxx_lock_drvr_wait(struct scsi_qla_host *a) { #define QL4_LOCK_DRVR_WAIT 60 #define QL4_LOCK_DRVR_SLEEP 1 int drvr_wait = QL4_LOCK_DRVR_WAIT; while (drvr_wait) { if (ql4xxx_lock_drvr(a) == 0) { ssleep(QL4_LOCK_DRVR_SLEEP); if (drvr_wait) { DEBUG2(printk("scsi%ld: %s: Waiting for " "Global Init Semaphore(%d)...\n", a->host_no, __func__, drvr_wait)); } drvr_wait -= QL4_LOCK_DRVR_SLEEP; } else { DEBUG2(printk("scsi%ld: %s: Global Init Semaphore " "acquired\n", a->host_no, __func__)); return QLA_SUCCESS; } } return QLA_ERROR; } /** * qla4xxx_start_firmware - starts qla4xxx firmware * @ha: Pointer to host adapter structure. * * This routine performs the necessary steps to start the firmware for * the QLA4010 adapter. **/ int qla4xxx_start_firmware(struct scsi_qla_host *ha) { unsigned long flags = 0; uint32_t mbox_status; int status = QLA_ERROR; int soft_reset = 1; int config_chip = 0; if (is_qla4022(ha) | is_qla4032(ha)) ql4xxx_set_mac_number(ha); if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; spin_lock_irqsave(&ha->hardware_lock, flags); DEBUG2(printk("scsi%ld: %s: port_ctrl = 0x%08X\n", ha->host_no, __func__, readw(isp_port_ctrl(ha)))); DEBUG(printk("scsi%ld: %s: port_status = 0x%08X\n", ha->host_no, __func__, readw(isp_port_status(ha)))); /* Is Hardware already initialized? */ if ((readw(isp_port_ctrl(ha)) & 0x8000) != 0) { DEBUG(printk("scsi%ld: %s: Hardware has already been " "initialized\n", ha->host_no, __func__)); /* Receive firmware boot acknowledgement */ mbox_status = readw(&ha->reg->mailbox[0]); DEBUG2(printk("scsi%ld: %s: H/W Config complete - mbox[0]= " "0x%x\n", ha->host_no, __func__, mbox_status)); /* Is firmware already booted? */ if (mbox_status == 0) { /* F/W not running, must be config by net driver */ config_chip = 1; soft_reset = 0; } else { writel(set_rmask(CSR_SCSI_PROCESSOR_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); writel(set_rmask(CSR_SCSI_COMPLETION_INTR), &ha->reg->ctrl_status); readl(&ha->reg->ctrl_status); spin_unlock_irqrestore(&ha->hardware_lock, flags); if (qla4xxx_get_firmware_state(ha) == QLA_SUCCESS) { DEBUG2(printk("scsi%ld: %s: Get firmware " "state -- state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); /* F/W is running */ if (ha->firmware_state & FW_STATE_CONFIG_WAIT) { DEBUG2(printk("scsi%ld: %s: Firmware " "in known state -- " "config and " "boot, state = 0x%x\n", ha->host_no, __func__, ha->firmware_state)); config_chip = 1; soft_reset = 0; } } else { DEBUG2(printk("scsi%ld: %s: Firmware in " "unknown state -- resetting," " state = " "0x%x\n", ha->host_no, __func__, ha->firmware_state)); } spin_lock_irqsave(&ha->hardware_lock, flags); } } else { DEBUG(printk("scsi%ld: %s: H/W initialization hasn't been " "started - resetting\n", ha->host_no, __func__)); } spin_unlock_irqrestore(&ha->hardware_lock, flags); DEBUG(printk("scsi%ld: %s: Flags soft_rest=%d, config= %d\n ", ha->host_no, __func__, soft_reset, config_chip)); if (soft_reset) { DEBUG(printk("scsi%ld: %s: Issue Soft Reset\n", ha->host_no, __func__)); status = qla4xxx_soft_reset(ha); /* NOTE: acquires drvr * lock again, but ok */ if (status == QLA_ERROR) { DEBUG(printk("scsi%d: %s: Soft Reset failed!\n", ha->host_no, __func__)); ql4xxx_unlock_drvr(ha); return QLA_ERROR; } config_chip = 1; /* Reset clears the semaphore, so acquire again */ if (ql4xxx_lock_drvr_wait(ha) != QLA_SUCCESS) return QLA_ERROR; } if (config_chip) { if ((status = qla4xxx_config_nvram(ha)) == QLA_SUCCESS) status = qla4xxx_start_firmware_from_flash(ha); } ql4xxx_unlock_drvr(ha); if (status == QLA_SUCCESS) { if (test_and_clear_bit(AF_GET_CRASH_RECORD, &ha->flags)) qla4xxx_get_crash_record(ha); } else { DEBUG(printk("scsi%ld: %s: Firmware has NOT started\n", ha->host_no, __func__)); } return status; } /** * qla4xxx_free_ddb_index - Free DDBs reserved by firmware * @ha: pointer to adapter structure * * Since firmware is not running in autoconnect mode the DDB indices should * be freed so that when login happens from user space there are free DDB * indices available. **/ void qla4xxx_free_ddb_index(struct scsi_qla_host *ha) { int max_ddbs; int ret; uint32_t idx = 0, next_idx = 0; uint32_t state = 0, conn_err = 0; max_ddbs = is_qla40XX(ha) ? MAX_DEV_DB_ENTRIES_40XX : MAX_DEV_DB_ENTRIES; for (idx = 0; idx < max_ddbs; idx = next_idx) { ret = qla4xxx_get_fwddb_entry(ha, idx, NULL, 0, NULL, &next_idx, &state, &conn_err, NULL, NULL); if (ret == QLA_ERROR) { next_idx++; continue; } if (state == DDB_DS_NO_CONNECTION_ACTIVE || state == DDB_DS_SESSION_FAILED) { DEBUG2(ql4_printk(KERN_INFO, ha, "Freeing DDB index = 0x%x\n", idx)); ret = qla4xxx_clear_ddb_entry(ha, idx); if (ret == QLA_ERROR) ql4_printk(KERN_ERR, ha, "Unable to clear DDB index = " "0x%x\n", idx); } if (next_idx == 0) break; } } /** * qla4xxx_initialize_adapter - initiailizes hba * @ha: Pointer to host adapter structure. * * This routine parforms all of the steps necessary to initialize the adapter. * **/ int qla4xxx_initialize_adapter(struct scsi_qla_host *ha, int is_reset) { int status = QLA_ERROR; ha->eeprom_cmd_data = 0; ql4_printk(KERN_INFO, ha, "Configuring PCI space...\n"); ha->isp_ops->pci_config(ha); ha->isp_ops->disable_intrs(ha); /* Initialize the Host adapter request/response queues and firmware */ if (ha->isp_ops->start_firmware(ha) == QLA_ERROR) goto exit_init_hba; if (qla4xxx_about_firmware(ha) == QLA_ERROR) goto exit_init_hba; if (ha->isp_ops->get_sys_info(ha) == QLA_ERROR) goto exit_init_hba; if (qla4xxx_init_local_data(ha) == QLA_ERROR) goto exit_init_hba; status = qla4xxx_init_firmware(ha); if (status == QLA_ERROR) goto exit_init_hba; if (is_reset == RESET_ADAPTER) qla4xxx_build_ddb_list(ha, is_reset); set_bit(AF_ONLINE, &ha->flags); exit_init_hba: if (is_qla8022(ha) && (status == QLA_ERROR)) { /* Since interrupts are registered in start_firmware for * 82xx, release them here if initialize_adapter fails */ qla4xxx_free_irqs(ha); } DEBUG2(printk("scsi%ld: initialize adapter: %s\n", ha->host_no, status == QLA_ERROR ? "FAILED" : "SUCCEEDED")); return status; } int qla4xxx_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, struct ddb_entry *ddb_entry, uint32_t state) { uint32_t old_fw_ddb_device_state; int status = QLA_ERROR; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB - old state = 0x%x, new state = 0x%x for " "index [%d]\n", __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case DDB_DS_LOGIN_IN_PROCESS: switch (state) { case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: case DDB_DS_NO_CONNECTION_ACTIVE: iscsi_conn_login_event(ddb_entry->conn, ISCSI_CONN_STATE_FREE); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_ACTIVE: switch (state) { case DDB_DS_SESSION_FAILED: /* * iscsi_session failure will cause userspace to * stop the connection which in turn would block the * iscsi_session and start relogin */ iscsi_session_failure(ddb_entry->sess->dd_data, ISCSI_ERR_CONN_FAILED); status = QLA_SUCCESS; break; case DDB_DS_NO_CONNECTION_ACTIVE: clear_bit(fw_ddb_index, ha->ddb_idx_map); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_FAILED: switch (state) { case DDB_DS_SESSION_ACTIVE: case DDB_DS_DISCOVERY: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: iscsi_session_failure(ddb_entry->sess->dd_data, ISCSI_ERR_CONN_FAILED); status = QLA_SUCCESS; break; } break; default: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", __func__)); break; } return status; } void qla4xxx_arm_relogin_timer(struct ddb_entry *ddb_entry) { /* * This triggers a relogin. After the relogin_timer * expires, the relogin gets scheduled. We must wait a * minimum amount of time since receiving an 0x8014 AEN * with failed device_state or a logout response before * we can issue another relogin. * * Firmware pads this timeout: (time2wait +1). * Driver retry to login should be longer than F/W. * Otherwise F/W will fail * set_ddb() mbx cmd with 0x4005 since it still * counting down its time2wait. */ atomic_set(&ddb_entry->relogin_timer, 0); atomic_set(&ddb_entry->retry_relogin_timer, ddb_entry->default_time2wait + 4); } int qla4xxx_flash_ddb_change(struct scsi_qla_host *ha, uint32_t fw_ddb_index, struct ddb_entry *ddb_entry, uint32_t state) { uint32_t old_fw_ddb_device_state; int status = QLA_ERROR; old_fw_ddb_device_state = ddb_entry->fw_ddb_device_state; DEBUG2(ql4_printk(KERN_INFO, ha, "%s: DDB - old state = 0x%x, new state = 0x%x for " "index [%d]\n", __func__, ddb_entry->fw_ddb_device_state, state, fw_ddb_index)); ddb_entry->fw_ddb_device_state = state; switch (old_fw_ddb_device_state) { case DDB_DS_LOGIN_IN_PROCESS: case DDB_DS_NO_CONNECTION_ACTIVE: switch (state) { case DDB_DS_SESSION_ACTIVE: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: iscsi_block_session(ddb_entry->sess); if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_ACTIVE: switch (state) { case DDB_DS_SESSION_FAILED: iscsi_block_session(ddb_entry->sess); if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; case DDB_DS_SESSION_FAILED: switch (state) { case DDB_DS_SESSION_ACTIVE: ddb_entry->unblock_sess(ddb_entry->sess); qla4xxx_update_session_conn_fwddb_param(ha, ddb_entry); status = QLA_SUCCESS; break; case DDB_DS_SESSION_FAILED: if (!test_bit(DF_RELOGIN, &ddb_entry->flags)) qla4xxx_arm_relogin_timer(ddb_entry); status = QLA_SUCCESS; break; } break; default: DEBUG2(ql4_printk(KERN_INFO, ha, "%s: Unknown Event\n", __func__)); break; } return status; } /** * qla4xxx_process_ddb_changed - process ddb state change * @ha - Pointer to host adapter structure. * @fw_ddb_index - Firmware's device database index * @state - Device state * * This routine processes a Decive Database Changed AEN Event. **/ int qla4xxx_process_ddb_changed(struct scsi_qla_host *ha, uint32_t fw_ddb_index, uint32_t state, uint32_t conn_err) { struct ddb_entry *ddb_entry; int status = QLA_ERROR; /* check for out of range index */ if (fw_ddb_index >= MAX_DDB_ENTRIES) goto exit_ddb_event; /* Get the corresponging ddb entry */ ddb_entry = qla4xxx_lookup_ddb_by_fw_index(ha, fw_ddb_index); /* Device does not currently exist in our database. */ if (ddb_entry == NULL) { ql4_printk(KERN_ERR, ha, "%s: No ddb_entry at FW index [%d]\n", __func__, fw_ddb_index); if (state == DDB_DS_NO_CONNECTION_ACTIVE) clear_bit(fw_ddb_index, ha->ddb_idx_map); goto exit_ddb_event; } ddb_entry->ddb_change(ha, fw_ddb_index, ddb_entry, state); exit_ddb_event: return status; } /** * qla4xxx_login_flash_ddb - Login to target (DDB) * @cls_session: Pointer to the session to login * * This routine logins to the target. * Issues setddb and conn open mbx **/ void qla4xxx_login_flash_ddb(struct iscsi_cls_session *cls_session) { struct iscsi_session *sess; struct ddb_entry *ddb_entry; struct scsi_qla_host *ha; struct dev_db_entry *fw_ddb_entry = NULL; dma_addr_t fw_ddb_dma; uint32_t mbx_sts = 0; int ret; sess = cls_session->dd_data; ddb_entry = sess->dd_data; ha = ddb_entry->ha; if (!test_bit(AF_LINK_UP, &ha->flags)) return; if (ddb_entry->ddb_type != FLASH_DDB) { DEBUG2(ql4_printk(KERN_INFO, ha, "Skipping login to non FLASH DB")); goto exit_login; } fw_ddb_entry = dma_pool_alloc(ha->fw_ddb_dma_pool, GFP_KERNEL, &fw_ddb_dma); if (fw_ddb_entry == NULL) { DEBUG2(ql4_printk(KERN_ERR, ha, "Out of memory\n")); goto exit_login; } if (ddb_entry->fw_ddb_index == INVALID_ENTRY) { ret = qla4xxx_get_ddb_index(ha, &ddb_entry->fw_ddb_index); if (ret == QLA_ERROR) goto exit_login; ha->fw_ddb_index_map[ddb_entry->fw_ddb_index] = ddb_entry; ha->tot_ddbs++; } memcpy(fw_ddb_entry, &ddb_entry->fw_ddb_entry, sizeof(struct dev_db_entry)); ddb_entry->sess->target_id = ddb_entry->fw_ddb_index; ret = qla4xxx_set_ddb_entry(ha, ddb_entry->fw_ddb_index, fw_ddb_dma, &mbx_sts); if (ret == QLA_ERROR) { DEBUG2(ql4_printk(KERN_ERR, ha, "Set DDB failed\n")); goto exit_login; } ddb_entry->fw_ddb_device_state = DDB_DS_LOGIN_IN_PROCESS; ret = qla4xxx_conn_open(ha, ddb_entry->fw_ddb_index); if (ret == QLA_ERROR) { ql4_printk(KERN_ERR, ha, "%s: Login failed: %s\n", __func__, sess->targetname); goto exit_login; } exit_login: if (fw_ddb_entry) dma_pool_free(ha->fw_ddb_dma_pool, fw_ddb_entry, fw_ddb_dma); }
gpl-2.0
DC07/spirit_ghost
drivers/rtc/rtc-msm7x00a.c
4807
6582
/* drivers/rtc/rtc-msm7x00a.c * * Copyright (C) 2008 Google, Inc. * Author: San Mehat <san@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/version.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/types.h> #include <linux/rtc.h> #include <linux/msm_rpcrouter.h> #include <mach/msm_rpcrouter.h> #define RTC_DEBUG 0 extern void msm_pm_set_max_sleep_time(int64_t sleep_time_ns); #if CONFIG_MSM_AMSS_VERSION >= 6350 || defined(CONFIG_ARCH_QSD8X50) #define APP_TIMEREMOTE_PDEV_NAME "rs30000048:00010000" #else #define APP_TIMEREMOTE_PDEV_NAME "rs30000048:0da5b528" #endif #define TIMEREMOTE_PROCEEDURE_SET_JULIAN 6 #define TIMEREMOTE_PROCEEDURE_GET_JULIAN 7 struct rpc_time_julian { uint32_t year; uint32_t month; uint32_t day; uint32_t hour; uint32_t minute; uint32_t second; uint32_t day_of_week; }; static struct msm_rpc_endpoint *ep; static struct rtc_device *rtc; static unsigned long rtcalarm_time; static int msmrtc_timeremote_set_time(struct device *dev, struct rtc_time *tm) { int rc; struct timeremote_set_julian_req { struct rpc_request_hdr hdr; uint32_t opt_arg; struct rpc_time_julian time; } req; struct timeremote_set_julian_rep { struct rpc_reply_hdr hdr; } rep; if (tm->tm_year < 1900) tm->tm_year += 1900; if (tm->tm_year < 1970) return -EINVAL; #if RTC_DEBUG printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n", __func__, tm->tm_mon, tm->tm_mday, tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); #endif req.opt_arg = cpu_to_be32(1); req.time.year = cpu_to_be32(tm->tm_year); req.time.month = cpu_to_be32(tm->tm_mon + 1); req.time.day = cpu_to_be32(tm->tm_mday); req.time.hour = cpu_to_be32(tm->tm_hour); req.time.minute = cpu_to_be32(tm->tm_min); req.time.second = cpu_to_be32(tm->tm_sec); req.time.day_of_week = cpu_to_be32(tm->tm_wday); rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_SET_JULIAN, &req, sizeof(req), &rep, sizeof(rep), 5 * HZ); return rc; } static int msmrtc_timeremote_read_time(struct device *dev, struct rtc_time *tm) { int rc; struct timeremote_get_julian_req { struct rpc_request_hdr hdr; uint32_t julian_time_not_null; } req; struct timeremote_get_julian_rep { struct rpc_reply_hdr hdr; uint32_t opt_arg; struct rpc_time_julian time; } rep; req.julian_time_not_null = cpu_to_be32(1); rc = msm_rpc_call_reply(ep, TIMEREMOTE_PROCEEDURE_GET_JULIAN, &req, sizeof(req), &rep, sizeof(rep), 5 * HZ); if (rc < 0) return rc; if (!be32_to_cpu(rep.opt_arg)) { printk(KERN_ERR "%s: No data from RTC\n", __func__); return -ENODATA; } tm->tm_year = be32_to_cpu(rep.time.year); tm->tm_mon = be32_to_cpu(rep.time.month); tm->tm_mday = be32_to_cpu(rep.time.day); tm->tm_hour = be32_to_cpu(rep.time.hour); tm->tm_min = be32_to_cpu(rep.time.minute); tm->tm_sec = be32_to_cpu(rep.time.second); tm->tm_wday = be32_to_cpu(rep.time.day_of_week); #if RTC_DEBUG printk(KERN_DEBUG "%s: %.2u/%.2u/%.4u %.2u:%.2u:%.2u (%.2u)\n", __func__, tm->tm_mon, tm->tm_mday, tm->tm_year, tm->tm_hour, tm->tm_min, tm->tm_sec, tm->tm_wday); #endif tm->tm_year -= 1900; /* RTC layer expects years to start at 1900 */ tm->tm_mon--; /* RTC layer expects mons to be 0 based */ if (rtc_valid_tm(tm) < 0) { dev_err(dev, "retrieved date/time is not valid.\n"); rtc_time_to_tm(0, tm); } return 0; } static int msmrtc_virtual_alarm_set(struct device *dev, struct rtc_wkalrm *a) { unsigned long now = get_seconds(); if (!a->enabled) { rtcalarm_time = 0; return 0; } else rtc_tm_to_time(&a->time, &rtcalarm_time); if (now > rtcalarm_time) { printk(KERN_ERR "%s: Attempt to set alarm in the past\n", __func__); rtcalarm_time = 0; return -EINVAL; } return 0; } static struct rtc_class_ops msm_rtc_ops = { .read_time = msmrtc_timeremote_read_time, .set_time = msmrtc_timeremote_set_time, .set_alarm = msmrtc_virtual_alarm_set, }; static void msmrtc_alarmtimer_expired(unsigned long _data) { #if RTC_DEBUG printk(KERN_DEBUG "%s: Generating alarm event (src %lu)\n", rtc->name, _data); #endif rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); rtcalarm_time = 0; } static int msmrtc_probe(struct platform_device *pdev) { struct rpcsvr_platform_device *rdev = container_of(pdev, struct rpcsvr_platform_device, base); ep = msm_rpc_connect(rdev->prog, rdev->vers, 0); if (IS_ERR(ep)) { printk(KERN_ERR "%s: init rpc failed! rc = %ld\n", __func__, PTR_ERR(ep)); return PTR_ERR(ep); } rtc = rtc_device_register("msm_rtc", &pdev->dev, &msm_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) { printk(KERN_ERR "%s: Can't register RTC device (%ld)\n", pdev->name, PTR_ERR(rtc)); return PTR_ERR(rtc); } return 0; } static unsigned long msmrtc_get_seconds(void) { struct rtc_time tm; unsigned long now; msmrtc_timeremote_read_time(NULL, &tm); rtc_tm_to_time(&tm, &now); return now; } static int msmrtc_suspend(struct platform_device *dev, pm_message_t state) { if (rtcalarm_time) { unsigned long now = msmrtc_get_seconds(); int diff = rtcalarm_time - now; if (diff <= 0) { msmrtc_alarmtimer_expired(1); msm_pm_set_max_sleep_time(0); return 0; } msm_pm_set_max_sleep_time((int64_t) ((int64_t) diff * NSEC_PER_SEC)); } else msm_pm_set_max_sleep_time(0); return 0; } static int msmrtc_resume(struct platform_device *dev) { if (rtcalarm_time) { unsigned long now = msmrtc_get_seconds(); int diff = rtcalarm_time - now; if (diff <= 0) msmrtc_alarmtimer_expired(2); } return 0; } static struct platform_driver msmrtc_driver = { .probe = msmrtc_probe, .suspend = msmrtc_suspend, .resume = msmrtc_resume, .driver = { .name = APP_TIMEREMOTE_PDEV_NAME, .owner = THIS_MODULE, }, }; static int __init msmrtc_init(void) { rtcalarm_time = 0; return platform_driver_register(&msmrtc_driver); } module_init(msmrtc_init); MODULE_DESCRIPTION("RTC driver for Qualcomm MSM7x00a chipsets"); MODULE_AUTHOR("San Mehat <san@android.com>"); MODULE_LICENSE("GPL");
gpl-2.0
k2wl/kernel-gt-i9082-stock-based
drivers/rtc/rtc-r9701.c
4807
4399
/* * Driver for Epson RTC-9701JE * * Copyright (C) 2008 Magnus Damm * * Based on rtc-max6902.c * * Copyright (C) 2006 8D Technologies inc. * Copyright (C) 2004 Compulab Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/device.h> #include <linux/init.h> #include <linux/rtc.h> #include <linux/spi/spi.h> #include <linux/bcd.h> #include <linux/delay.h> #include <linux/bitops.h> #define RSECCNT 0x00 /* Second Counter */ #define RMINCNT 0x01 /* Minute Counter */ #define RHRCNT 0x02 /* Hour Counter */ #define RWKCNT 0x03 /* Week Counter */ #define RDAYCNT 0x04 /* Day Counter */ #define RMONCNT 0x05 /* Month Counter */ #define RYRCNT 0x06 /* Year Counter */ #define R100CNT 0x07 /* Y100 Counter */ #define RMINAR 0x08 /* Minute Alarm */ #define RHRAR 0x09 /* Hour Alarm */ #define RWKAR 0x0a /* Week/Day Alarm */ #define RTIMCNT 0x0c /* Interval Timer */ #define REXT 0x0d /* Extension Register */ #define RFLAG 0x0e /* RTC Flag Register */ #define RCR 0x0f /* RTC Control Register */ static int write_reg(struct device *dev, int address, unsigned char data) { struct spi_device *spi = to_spi_device(dev); unsigned char buf[2]; buf[0] = address & 0x7f; buf[1] = data; return spi_write(spi, buf, ARRAY_SIZE(buf)); } static int read_regs(struct device *dev, unsigned char *regs, int no_regs) { struct spi_device *spi = to_spi_device(dev); u8 txbuf[1], rxbuf[1]; int k, ret; ret = 0; for (k = 0; ret == 0 && k < no_regs; k++) { txbuf[0] = 0x80 | regs[k]; ret = spi_write_then_read(spi, txbuf, 1, rxbuf, 1); regs[k] = rxbuf[0]; } return ret; } static int r9701_get_datetime(struct device *dev, struct rtc_time *dt) { int ret; unsigned char buf[] = { RSECCNT, RMINCNT, RHRCNT, RDAYCNT, RMONCNT, RYRCNT }; ret = read_regs(dev, buf, ARRAY_SIZE(buf)); if (ret) return ret; memset(dt, 0, sizeof(*dt)); dt->tm_sec = bcd2bin(buf[0]); /* RSECCNT */ dt->tm_min = bcd2bin(buf[1]); /* RMINCNT */ dt->tm_hour = bcd2bin(buf[2]); /* RHRCNT */ dt->tm_mday = bcd2bin(buf[3]); /* RDAYCNT */ dt->tm_mon = bcd2bin(buf[4]) - 1; /* RMONCNT */ dt->tm_year = bcd2bin(buf[5]) + 100; /* RYRCNT */ /* the rtc device may contain illegal values on power up * according to the data sheet. make sure they are valid. */ return rtc_valid_tm(dt); } static int r9701_set_datetime(struct device *dev, struct rtc_time *dt) { int ret, year; year = dt->tm_year + 1900; if (year >= 2100 || year < 2000) return -EINVAL; ret = write_reg(dev, RHRCNT, bin2bcd(dt->tm_hour)); ret = ret ? ret : write_reg(dev, RMINCNT, bin2bcd(dt->tm_min)); ret = ret ? ret : write_reg(dev, RSECCNT, bin2bcd(dt->tm_sec)); ret = ret ? ret : write_reg(dev, RDAYCNT, bin2bcd(dt->tm_mday)); ret = ret ? ret : write_reg(dev, RMONCNT, bin2bcd(dt->tm_mon + 1)); ret = ret ? ret : write_reg(dev, RYRCNT, bin2bcd(dt->tm_year - 100)); ret = ret ? ret : write_reg(dev, RWKCNT, 1 << dt->tm_wday); return ret; } static const struct rtc_class_ops r9701_rtc_ops = { .read_time = r9701_get_datetime, .set_time = r9701_set_datetime, }; static int __devinit r9701_probe(struct spi_device *spi) { struct rtc_device *rtc; unsigned char tmp; int res; rtc = rtc_device_register("r9701", &spi->dev, &r9701_rtc_ops, THIS_MODULE); if (IS_ERR(rtc)) return PTR_ERR(rtc); dev_set_drvdata(&spi->dev, rtc); tmp = R100CNT; res = read_regs(&spi->dev, &tmp, 1); if (res || tmp != 0x20) { rtc_device_unregister(rtc); return res; } return 0; } static int __devexit r9701_remove(struct spi_device *spi) { struct rtc_device *rtc = dev_get_drvdata(&spi->dev); rtc_device_unregister(rtc); return 0; } static struct spi_driver r9701_driver = { .driver = { .name = "rtc-r9701", .owner = THIS_MODULE, }, .probe = r9701_probe, .remove = __devexit_p(r9701_remove), }; static __init int r9701_init(void) { return spi_register_driver(&r9701_driver); } module_init(r9701_init); static __exit void r9701_exit(void) { spi_unregister_driver(&r9701_driver); } module_exit(r9701_exit); MODULE_DESCRIPTION("r9701 spi RTC driver"); MODULE_AUTHOR("Magnus Damm <damm@opensource.se>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:rtc-r9701");
gpl-2.0
Joshndroid/kernel_samsung_lt03wifi
drivers/staging/comedi/drivers/me4000.c
4807
69271
/* comedi/drivers/me4000.c Source code for the Meilhaus ME-4000 board family. COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: me4000 Description: Meilhaus ME-4000 series boards Devices: [Meilhaus] ME-4650 (me4000), ME-4670i, ME-4680, ME-4680i, ME-4680is Author: gg (Guenter Gebhardt <g.gebhardt@meilhaus.com>) Updated: Mon, 18 Mar 2002 15:34:01 -0800 Status: broken (no support for loading firmware) Supports: - Analog Input - Analog Output - Digital I/O - Counter Configuration Options: [0] - PCI bus number (optional) [1] - PCI slot number (optional) If bus/slot is not specified, the first available PCI device will be used. The firmware required by these boards is available in the comedi_nonfree_firmware tarball available from http://www.comedi.org. However, the driver's support for loading the firmware through comedi_config is currently broken. */ #include <linux/interrupt.h> #include "../comedidev.h" #include <linux/delay.h> #include <linux/list.h> #include <linux/spinlock.h> #include "comedi_pci.h" #include "me4000.h" #if 0 /* file removed due to GPL incompatibility */ #include "me4000_fw.h" #endif /*============================================================================= PCI device table. This is used by modprobe to translate PCI IDs to drivers. ===========================================================================*/ static DEFINE_PCI_DEVICE_TABLE(me4000_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4650) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4660) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4661) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4662) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4663) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4670) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4671) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4672) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4673) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4680) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4681) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4682) }, { PCI_DEVICE(PCI_VENDOR_ID_MEILHAUS, 0x4683) }, { 0 } }; MODULE_DEVICE_TABLE(pci, me4000_pci_table); static const struct me4000_board me4000_boards[] = { {"ME-4650", 0x4650, {0, 0}, {16, 0, 0, 0}, {4}, {0} }, {"ME-4660", 0x4660, {0, 0}, {32, 0, 16, 0}, {4}, {3} }, {"ME-4660i", 0x4661, {0, 0}, {32, 0, 16, 0}, {4}, {3} }, {"ME-4660s", 0x4662, {0, 0}, {32, 8, 16, 0}, {4}, {3} }, {"ME-4660is", 0x4663, {0, 0}, {32, 8, 16, 0}, {4}, {3} }, {"ME-4670", 0x4670, {4, 0}, {32, 0, 16, 1}, {4}, {3} }, {"ME-4670i", 0x4671, {4, 0}, {32, 0, 16, 1}, {4}, {3} }, {"ME-4670s", 0x4672, {4, 0}, {32, 8, 16, 1}, {4}, {3} }, {"ME-4670is", 0x4673, {4, 0}, {32, 8, 16, 1}, {4}, {3} }, {"ME-4680", 0x4680, {4, 4}, {32, 0, 16, 1}, {4}, {3} }, {"ME-4680i", 0x4681, {4, 4}, {32, 0, 16, 1}, {4}, {3} }, {"ME-4680s", 0x4682, {4, 4}, {32, 8, 16, 1}, {4}, {3} }, {"ME-4680is", 0x4683, {4, 4}, {32, 8, 16, 1}, {4}, {3} }, {0}, }; #define ME4000_BOARD_VERSIONS (ARRAY_SIZE(me4000_boards) - 1) /*----------------------------------------------------------------------------- Comedi function prototypes ---------------------------------------------------------------------------*/ static int me4000_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int me4000_detach(struct comedi_device *dev); static struct comedi_driver driver_me4000 = { .driver_name = "me4000", .module = THIS_MODULE, .attach = me4000_attach, .detach = me4000_detach, }; /*----------------------------------------------------------------------------- Meilhaus function prototypes ---------------------------------------------------------------------------*/ static int me4000_probe(struct comedi_device *dev, struct comedi_devconfig *it); static int get_registers(struct comedi_device *dev, struct pci_dev *pci_dev_p); static int init_board_info(struct comedi_device *dev, struct pci_dev *pci_dev_p); static int init_ao_context(struct comedi_device *dev); static int init_ai_context(struct comedi_device *dev); static int init_dio_context(struct comedi_device *dev); static int init_cnt_context(struct comedi_device *dev); static int xilinx_download(struct comedi_device *dev); static int reset_board(struct comedi_device *dev); static int me4000_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int me4000_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int cnt_reset(struct comedi_device *dev, unsigned int channel); static int cnt_config(struct comedi_device *dev, unsigned int channel, unsigned int mode); static int me4000_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int me4000_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int me4000_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int me4000_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *subdevice, struct comedi_insn *insn, unsigned int *data); static int me4000_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s); static int ai_check_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int ai_round_cmd_args(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd, unsigned int *init_ticks, unsigned int *scan_ticks, unsigned int *chan_ticks); static int ai_prepare(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd, unsigned int init_ticks, unsigned int scan_ticks, unsigned int chan_ticks); static int ai_write_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static irqreturn_t me4000_ai_isr(int irq, void *dev_id); static int me4000_ai_do_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int me4000_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s); static int me4000_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int me4000_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); /*----------------------------------------------------------------------------- Meilhaus inline functions ---------------------------------------------------------------------------*/ static inline void me4000_outb(struct comedi_device *dev, unsigned char value, unsigned long port) { PORT_PDEBUG("--> 0x%02X port 0x%04lX\n", value, port); outb(value, port); } static inline void me4000_outl(struct comedi_device *dev, unsigned long value, unsigned long port) { PORT_PDEBUG("--> 0x%08lX port 0x%04lX\n", value, port); outl(value, port); } static inline unsigned long me4000_inl(struct comedi_device *dev, unsigned long port) { unsigned long value; value = inl(port); PORT_PDEBUG("<-- 0x%08lX port 0x%04lX\n", value, port); return value; } static inline unsigned char me4000_inb(struct comedi_device *dev, unsigned long port) { unsigned char value; value = inb(port); PORT_PDEBUG("<-- 0x%08X port 0x%04lX\n", value, port); return value; } static const struct comedi_lrange me4000_ai_range = { 4, { UNI_RANGE(2.5), UNI_RANGE(10), BIP_RANGE(2.5), BIP_RANGE(10), } }; static const struct comedi_lrange me4000_ao_range = { 1, { BIP_RANGE(10), } }; static int me4000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int result; CALL_PDEBUG("In me4000_attach()\n"); result = me4000_probe(dev, it); if (result) return result; /* * Allocate the subdevice structures. alloc_subdevice() is a * convenient macro defined in comedidev.h. It relies on * n_subdevices being set correctly. */ if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; /*========================================================================= Analog input subdevice ========================================================================*/ s = dev->subdevices + 0; if (thisboard->ai.count) { s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; s->n_chan = thisboard->ai.count; s->maxdata = 0xFFFF; /* 16 bit ADC */ s->len_chanlist = ME4000_AI_CHANNEL_LIST_COUNT; s->range_table = &me4000_ai_range; s->insn_read = me4000_ai_insn_read; if (info->irq > 0) { if (request_irq(info->irq, me4000_ai_isr, IRQF_SHARED, "ME-4000", dev)) { printk ("comedi%d: me4000: me4000_attach(): " "Unable to allocate irq\n", dev->minor); } else { dev->read_subdev = s; s->subdev_flags |= SDF_CMD_READ; s->cancel = me4000_ai_cancel; s->do_cmdtest = me4000_ai_do_cmd_test; s->do_cmd = me4000_ai_do_cmd; } } else { printk(KERN_WARNING "comedi%d: me4000: me4000_attach(): " "No interrupt available\n", dev->minor); } } else { s->type = COMEDI_SUBD_UNUSED; } /*========================================================================= Analog output subdevice ========================================================================*/ s = dev->subdevices + 1; if (thisboard->ao.count) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_COMMON | SDF_GROUND; s->n_chan = thisboard->ao.count; s->maxdata = 0xFFFF; /* 16 bit DAC */ s->range_table = &me4000_ao_range; s->insn_write = me4000_ao_insn_write; s->insn_read = me4000_ao_insn_read; } else { s->type = COMEDI_SUBD_UNUSED; } /*========================================================================= Digital I/O subdevice ========================================================================*/ s = dev->subdevices + 2; if (thisboard->dio.count) { s->type = COMEDI_SUBD_DIO; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = thisboard->dio.count * 8; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = me4000_dio_insn_bits; s->insn_config = me4000_dio_insn_config; } else { s->type = COMEDI_SUBD_UNUSED; } /* * Check for optoisolated ME-4000 version. If one the first * port is a fixed output port and the second is a fixed input port. */ if (!me4000_inl(dev, info->dio_context.dir_reg)) { s->io_bits |= 0xFF; me4000_outl(dev, ME4000_DIO_CTRL_BIT_MODE_0, info->dio_context.dir_reg); } /*========================================================================= Counter subdevice ========================================================================*/ s = dev->subdevices + 3; if (thisboard->cnt.count) { s->type = COMEDI_SUBD_COUNTER; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = thisboard->cnt.count; s->maxdata = 0xFFFF; /* 16 bit counters */ s->insn_read = me4000_cnt_insn_read; s->insn_write = me4000_cnt_insn_write; s->insn_config = me4000_cnt_insn_config; } else { s->type = COMEDI_SUBD_UNUSED; } return 0; } static int me4000_probe(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pci_device = NULL; int result, i; struct me4000_board *board; CALL_PDEBUG("In me4000_probe()\n"); /* Allocate private memory */ if (alloc_private(dev, sizeof(struct me4000_info)) < 0) return -ENOMEM; /* * Probe the device to determine what device in the series it is. */ for_each_pci_dev(pci_device) { if (pci_device->vendor == PCI_VENDOR_ID_MEILHAUS) { for (i = 0; i < ME4000_BOARD_VERSIONS; i++) { if (me4000_boards[i].device_id == pci_device->device) { /* * Was a particular * bus/slot requested? */ if ((it->options[0] != 0) || (it->options[1] != 0)) { /* * Are we on the wrong * bus/slot? */ if (pci_device->bus->number != it->options[0] || PCI_SLOT(pci_device->devfn) != it->options[1]) { continue; } } dev->board_ptr = me4000_boards + i; board = (struct me4000_board *) dev->board_ptr; info->pci_dev_p = pci_device; goto found; } } } } printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "No supported board found (req. bus/slot : %d/%d)\n", dev->minor, it->options[0], it->options[1]); return -ENODEV; found: printk(KERN_INFO "comedi%d: me4000: me4000_probe(): " "Found %s at PCI bus %d, slot %d\n", dev->minor, me4000_boards[i].name, pci_device->bus->number, PCI_SLOT(pci_device->devfn)); /* Set data in device structure */ dev->board_name = board->name; /* Enable PCI device and request regions */ result = comedi_pci_enable(pci_device, dev->board_name); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): Cannot enable PCI " "device and request I/O regions\n", dev->minor); return result; } /* Get the PCI base registers */ result = get_registers(dev, pci_device); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot get registers\n", dev->minor); return result; } /* Initialize board info */ result = init_board_info(dev, pci_device); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot init baord info\n", dev->minor); return result; } /* Init analog output context */ result = init_ao_context(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot init ao context\n", dev->minor); return result; } /* Init analog input context */ result = init_ai_context(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot init ai context\n", dev->minor); return result; } /* Init digital I/O context */ result = init_dio_context(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot init dio context\n", dev->minor); return result; } /* Init counter context */ result = init_cnt_context(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Cannot init cnt context\n", dev->minor); return result; } /* Download the xilinx firmware */ result = xilinx_download(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): " "Can't download firmware\n", dev->minor); return result; } /* Make a hardware reset */ result = reset_board(dev); if (result) { printk(KERN_ERR "comedi%d: me4000: me4000_probe(): Can't reset board\n", dev->minor); return result; } return 0; } static int get_registers(struct comedi_device *dev, struct pci_dev *pci_dev_p) { CALL_PDEBUG("In get_registers()\n"); /*--------------------------- plx regbase -------------------------------*/ info->plx_regbase = pci_resource_start(pci_dev_p, 1); if (info->plx_regbase == 0) { printk(KERN_ERR "comedi%d: me4000: get_registers(): " "PCI base address 1 is not available\n", dev->minor); return -ENODEV; } info->plx_regbase_size = pci_resource_len(pci_dev_p, 1); /*--------------------------- me4000 regbase ----------------------------*/ info->me4000_regbase = pci_resource_start(pci_dev_p, 2); if (info->me4000_regbase == 0) { printk(KERN_ERR "comedi%d: me4000: get_registers(): " "PCI base address 2 is not available\n", dev->minor); return -ENODEV; } info->me4000_regbase_size = pci_resource_len(pci_dev_p, 2); /*--------------------------- timer regbase ------------------------------*/ info->timer_regbase = pci_resource_start(pci_dev_p, 3); if (info->timer_regbase == 0) { printk(KERN_ERR "comedi%d: me4000: get_registers(): " "PCI base address 3 is not available\n", dev->minor); return -ENODEV; } info->timer_regbase_size = pci_resource_len(pci_dev_p, 3); /*--------------------------- program regbase ----------------------------*/ info->program_regbase = pci_resource_start(pci_dev_p, 5); if (info->program_regbase == 0) { printk(KERN_ERR "comedi%d: me4000: get_registers(): " "PCI base address 5 is not available\n", dev->minor); return -ENODEV; } info->program_regbase_size = pci_resource_len(pci_dev_p, 5); return 0; } static int init_board_info(struct comedi_device *dev, struct pci_dev *pci_dev_p) { int result; CALL_PDEBUG("In init_board_info()\n"); /* Init spin locks */ /* spin_lock_init(&info->preload_lock); */ /* spin_lock_init(&info->ai_ctrl_lock); */ /* Get the serial number */ result = pci_read_config_dword(pci_dev_p, 0x2C, &info->serial_no); if (result != PCIBIOS_SUCCESSFUL) return result; /* Get the hardware revision */ result = pci_read_config_byte(pci_dev_p, 0x08, &info->hw_revision); if (result != PCIBIOS_SUCCESSFUL) return result; /* Get the vendor id */ info->vendor_id = pci_dev_p->vendor; /* Get the device id */ info->device_id = pci_dev_p->device; /* Get the irq assigned to the board */ info->irq = pci_dev_p->irq; return 0; } static int init_ao_context(struct comedi_device *dev) { int i; CALL_PDEBUG("In init_ao_context()\n"); for (i = 0; i < thisboard->ao.count; i++) { /* spin_lock_init(&info->ao_context[i].use_lock); */ info->ao_context[i].irq = info->irq; switch (i) { case 0: info->ao_context[i].ctrl_reg = info->me4000_regbase + ME4000_AO_00_CTRL_REG; info->ao_context[i].status_reg = info->me4000_regbase + ME4000_AO_00_STATUS_REG; info->ao_context[i].fifo_reg = info->me4000_regbase + ME4000_AO_00_FIFO_REG; info->ao_context[i].single_reg = info->me4000_regbase + ME4000_AO_00_SINGLE_REG; info->ao_context[i].timer_reg = info->me4000_regbase + ME4000_AO_00_TIMER_REG; info->ao_context[i].irq_status_reg = info->me4000_regbase + ME4000_IRQ_STATUS_REG; info->ao_context[i].preload_reg = info->me4000_regbase + ME4000_AO_LOADSETREG_XX; break; case 1: info->ao_context[i].ctrl_reg = info->me4000_regbase + ME4000_AO_01_CTRL_REG; info->ao_context[i].status_reg = info->me4000_regbase + ME4000_AO_01_STATUS_REG; info->ao_context[i].fifo_reg = info->me4000_regbase + ME4000_AO_01_FIFO_REG; info->ao_context[i].single_reg = info->me4000_regbase + ME4000_AO_01_SINGLE_REG; info->ao_context[i].timer_reg = info->me4000_regbase + ME4000_AO_01_TIMER_REG; info->ao_context[i].irq_status_reg = info->me4000_regbase + ME4000_IRQ_STATUS_REG; info->ao_context[i].preload_reg = info->me4000_regbase + ME4000_AO_LOADSETREG_XX; break; case 2: info->ao_context[i].ctrl_reg = info->me4000_regbase + ME4000_AO_02_CTRL_REG; info->ao_context[i].status_reg = info->me4000_regbase + ME4000_AO_02_STATUS_REG; info->ao_context[i].fifo_reg = info->me4000_regbase + ME4000_AO_02_FIFO_REG; info->ao_context[i].single_reg = info->me4000_regbase + ME4000_AO_02_SINGLE_REG; info->ao_context[i].timer_reg = info->me4000_regbase + ME4000_AO_02_TIMER_REG; info->ao_context[i].irq_status_reg = info->me4000_regbase + ME4000_IRQ_STATUS_REG; info->ao_context[i].preload_reg = info->me4000_regbase + ME4000_AO_LOADSETREG_XX; break; case 3: info->ao_context[i].ctrl_reg = info->me4000_regbase + ME4000_AO_03_CTRL_REG; info->ao_context[i].status_reg = info->me4000_regbase + ME4000_AO_03_STATUS_REG; info->ao_context[i].fifo_reg = info->me4000_regbase + ME4000_AO_03_FIFO_REG; info->ao_context[i].single_reg = info->me4000_regbase + ME4000_AO_03_SINGLE_REG; info->ao_context[i].timer_reg = info->me4000_regbase + ME4000_AO_03_TIMER_REG; info->ao_context[i].irq_status_reg = info->me4000_regbase + ME4000_IRQ_STATUS_REG; info->ao_context[i].preload_reg = info->me4000_regbase + ME4000_AO_LOADSETREG_XX; break; default: break; } } return 0; } static int init_ai_context(struct comedi_device *dev) { CALL_PDEBUG("In init_ai_context()\n"); info->ai_context.irq = info->irq; info->ai_context.ctrl_reg = info->me4000_regbase + ME4000_AI_CTRL_REG; info->ai_context.status_reg = info->me4000_regbase + ME4000_AI_STATUS_REG; info->ai_context.channel_list_reg = info->me4000_regbase + ME4000_AI_CHANNEL_LIST_REG; info->ai_context.data_reg = info->me4000_regbase + ME4000_AI_DATA_REG; info->ai_context.chan_timer_reg = info->me4000_regbase + ME4000_AI_CHAN_TIMER_REG; info->ai_context.chan_pre_timer_reg = info->me4000_regbase + ME4000_AI_CHAN_PRE_TIMER_REG; info->ai_context.scan_timer_low_reg = info->me4000_regbase + ME4000_AI_SCAN_TIMER_LOW_REG; info->ai_context.scan_timer_high_reg = info->me4000_regbase + ME4000_AI_SCAN_TIMER_HIGH_REG; info->ai_context.scan_pre_timer_low_reg = info->me4000_regbase + ME4000_AI_SCAN_PRE_TIMER_LOW_REG; info->ai_context.scan_pre_timer_high_reg = info->me4000_regbase + ME4000_AI_SCAN_PRE_TIMER_HIGH_REG; info->ai_context.start_reg = info->me4000_regbase + ME4000_AI_START_REG; info->ai_context.irq_status_reg = info->me4000_regbase + ME4000_IRQ_STATUS_REG; info->ai_context.sample_counter_reg = info->me4000_regbase + ME4000_AI_SAMPLE_COUNTER_REG; return 0; } static int init_dio_context(struct comedi_device *dev) { CALL_PDEBUG("In init_dio_context()\n"); info->dio_context.dir_reg = info->me4000_regbase + ME4000_DIO_DIR_REG; info->dio_context.ctrl_reg = info->me4000_regbase + ME4000_DIO_CTRL_REG; info->dio_context.port_0_reg = info->me4000_regbase + ME4000_DIO_PORT_0_REG; info->dio_context.port_1_reg = info->me4000_regbase + ME4000_DIO_PORT_1_REG; info->dio_context.port_2_reg = info->me4000_regbase + ME4000_DIO_PORT_2_REG; info->dio_context.port_3_reg = info->me4000_regbase + ME4000_DIO_PORT_3_REG; return 0; } static int init_cnt_context(struct comedi_device *dev) { CALL_PDEBUG("In init_cnt_context()\n"); info->cnt_context.ctrl_reg = info->timer_regbase + ME4000_CNT_CTRL_REG; info->cnt_context.counter_0_reg = info->timer_regbase + ME4000_CNT_COUNTER_0_REG; info->cnt_context.counter_1_reg = info->timer_regbase + ME4000_CNT_COUNTER_1_REG; info->cnt_context.counter_2_reg = info->timer_regbase + ME4000_CNT_COUNTER_2_REG; return 0; } #define FIRMWARE_NOT_AVAILABLE 1 #if FIRMWARE_NOT_AVAILABLE extern unsigned char *xilinx_firm; #endif static int xilinx_download(struct comedi_device *dev) { u32 value = 0; wait_queue_head_t queue; int idx = 0; int size = 0; CALL_PDEBUG("In xilinx_download()\n"); init_waitqueue_head(&queue); /* * Set PLX local interrupt 2 polarity to high. * Interrupt is thrown by init pin of xilinx. */ outl(0x10, info->plx_regbase + PLX_INTCSR); /* Set /CS and /WRITE of the Xilinx */ value = inl(info->plx_regbase + PLX_ICR); value |= 0x100; outl(value, info->plx_regbase + PLX_ICR); /* Init Xilinx with CS1 */ inb(info->program_regbase + 0xC8); /* Wait until /INIT pin is set */ udelay(20); if (!(inl(info->plx_regbase + PLX_INTCSR) & 0x20)) { printk(KERN_ERR "comedi%d: me4000: xilinx_download(): " "Can't init Xilinx\n", dev->minor); return -EIO; } /* Reset /CS and /WRITE of the Xilinx */ value = inl(info->plx_regbase + PLX_ICR); value &= ~0x100; outl(value, info->plx_regbase + PLX_ICR); if (FIRMWARE_NOT_AVAILABLE) { comedi_error(dev, "xilinx firmware unavailable " "due to licensing, aborting"); return -EIO; } else { /* Download Xilinx firmware */ size = (xilinx_firm[0] << 24) + (xilinx_firm[1] << 16) + (xilinx_firm[2] << 8) + xilinx_firm[3]; udelay(10); for (idx = 0; idx < size; idx++) { outb(xilinx_firm[16 + idx], info->program_regbase); udelay(10); /* Check if BUSY flag is low */ if (inl(info->plx_regbase + PLX_ICR) & 0x20) { printk(KERN_ERR "comedi%d: me4000: xilinx_download(): " "Xilinx is still busy (idx = %d)\n", dev->minor, idx); return -EIO; } } } /* If done flag is high download was successful */ if (inl(info->plx_regbase + PLX_ICR) & 0x4) { } else { printk(KERN_ERR "comedi%d: me4000: xilinx_download(): " "DONE flag is not set\n", dev->minor); printk(KERN_ERR "comedi%d: me4000: xilinx_download(): " "Download not successful\n", dev->minor); return -EIO; } /* Set /CS and /WRITE */ value = inl(info->plx_regbase + PLX_ICR); value |= 0x100; outl(value, info->plx_regbase + PLX_ICR); return 0; } static int reset_board(struct comedi_device *dev) { unsigned long icr; CALL_PDEBUG("In reset_board()\n"); /* Make a hardware reset */ icr = me4000_inl(dev, info->plx_regbase + PLX_ICR); icr |= 0x40000000; me4000_outl(dev, icr, info->plx_regbase + PLX_ICR); icr &= ~0x40000000; me4000_outl(dev, icr, info->plx_regbase + PLX_ICR); /* 0x8000 to the DACs means an output voltage of 0V */ me4000_outl(dev, 0x8000, info->me4000_regbase + ME4000_AO_00_SINGLE_REG); me4000_outl(dev, 0x8000, info->me4000_regbase + ME4000_AO_01_SINGLE_REG); me4000_outl(dev, 0x8000, info->me4000_regbase + ME4000_AO_02_SINGLE_REG); me4000_outl(dev, 0x8000, info->me4000_regbase + ME4000_AO_03_SINGLE_REG); /* Set both stop bits in the analog input control register */ me4000_outl(dev, ME4000_AI_CTRL_BIT_IMMEDIATE_STOP | ME4000_AI_CTRL_BIT_STOP, info->me4000_regbase + ME4000_AI_CTRL_REG); /* Set both stop bits in the analog output control register */ me4000_outl(dev, ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP, info->me4000_regbase + ME4000_AO_00_CTRL_REG); me4000_outl(dev, ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP, info->me4000_regbase + ME4000_AO_01_CTRL_REG); me4000_outl(dev, ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP, info->me4000_regbase + ME4000_AO_02_CTRL_REG); me4000_outl(dev, ME4000_AO_CTRL_BIT_IMMEDIATE_STOP | ME4000_AO_CTRL_BIT_STOP, info->me4000_regbase + ME4000_AO_03_CTRL_REG); /* Enable interrupts on the PLX */ me4000_outl(dev, 0x43, info->plx_regbase + PLX_INTCSR); /* Set the adustment register for AO demux */ me4000_outl(dev, ME4000_AO_DEMUX_ADJUST_VALUE, info->me4000_regbase + ME4000_AO_DEMUX_ADJUST_REG); /* * Set digital I/O direction for port 0 * to output on isolated versions */ if (!(me4000_inl(dev, info->me4000_regbase + ME4000_DIO_DIR_REG) & 0x1)) { me4000_outl(dev, 0x1, info->me4000_regbase + ME4000_DIO_CTRL_REG); } return 0; } static int me4000_detach(struct comedi_device *dev) { CALL_PDEBUG("In me4000_detach()\n"); if (info) { if (info->pci_dev_p) { reset_board(dev); if (info->plx_regbase) comedi_pci_disable(info->pci_dev_p); pci_dev_put(info->pci_dev_p); } } return 0; } /*============================================================================= Analog input section ===========================================================================*/ static int me4000_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *subdevice, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); int rang = CR_RANGE(insn->chanspec); int aref = CR_AREF(insn->chanspec); unsigned long entry = 0; unsigned long tmp; long lval; CALL_PDEBUG("In me4000_ai_insn_read()\n"); if (insn->n == 0) { return 0; } else if (insn->n > 1) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Invalid instruction length %d\n", dev->minor, insn->n); return -EINVAL; } switch (rang) { case 0: entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5; break; case 1: entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10; break; case 2: entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5; break; case 3: entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Invalid range specified\n", dev->minor); return -EINVAL; } switch (aref) { case AREF_GROUND: case AREF_COMMON: if (chan >= thisboard->ai.count) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Analog input is not available\n", dev->minor); return -EINVAL; } entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED | chan; break; case AREF_DIFF: if (rang == 0 || rang == 1) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Range must be bipolar when aref = diff\n", dev->minor); return -EINVAL; } if (chan >= thisboard->ai.diff_count) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Analog input is not available\n", dev->minor); return -EINVAL; } entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL | chan; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Invalid aref specified\n", dev->minor); return -EINVAL; } entry |= ME4000_AI_LIST_LAST_ENTRY; /* Clear channel list, data fifo and both stop bits */ tmp = me4000_inl(dev, info->ai_context.ctrl_reg); tmp &= ~(ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO | ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP); me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Set the acquisition mode to single */ tmp &= ~(ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_MODE_1 | ME4000_AI_CTRL_BIT_MODE_2); me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Enable channel list and data fifo */ tmp |= ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO; me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Generate channel list entry */ me4000_outl(dev, entry, info->ai_context.channel_list_reg); /* Set the timer to maximum sample rate */ me4000_outl(dev, ME4000_AI_MIN_TICKS, info->ai_context.chan_timer_reg); me4000_outl(dev, ME4000_AI_MIN_TICKS, info->ai_context.chan_pre_timer_reg); /* Start conversion by dummy read */ me4000_inl(dev, info->ai_context.start_reg); /* Wait until ready */ udelay(10); if (! (me4000_inl(dev, info->ai_context.status_reg) & ME4000_AI_STATUS_BIT_EF_DATA)) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_insn_read(): " "Value not available after wait\n", dev->minor); return -EIO; } /* Read value from data fifo */ lval = me4000_inl(dev, info->ai_context.data_reg) & 0xFFFF; data[0] = lval ^ 0x8000; return 1; } static int me4000_ai_cancel(struct comedi_device *dev, struct comedi_subdevice *s) { unsigned long tmp; CALL_PDEBUG("In me4000_ai_cancel()\n"); /* Stop any running conversion */ tmp = me4000_inl(dev, info->ai_context.ctrl_reg); tmp &= ~(ME4000_AI_CTRL_BIT_STOP | ME4000_AI_CTRL_BIT_IMMEDIATE_STOP); me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Clear the control register */ me4000_outl(dev, 0x0, info->ai_context.ctrl_reg); return 0; } static int ai_check_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { int aref; int i; CALL_PDEBUG("In ai_check_chanlist()\n"); /* Check whether a channel list is available */ if (!cmd->chanlist_len) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "No channel list available\n", dev->minor); return -EINVAL; } /* Check the channel list size */ if (cmd->chanlist_len > ME4000_AI_CHANNEL_LIST_COUNT) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "Channel list is to large\n", dev->minor); return -EINVAL; } /* Check the pointer */ if (!cmd->chanlist) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "NULL pointer to channel list\n", dev->minor); return -EFAULT; } /* Check whether aref is equal for all entries */ aref = CR_AREF(cmd->chanlist[0]); for (i = 0; i < cmd->chanlist_len; i++) { if (CR_AREF(cmd->chanlist[i]) != aref) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "Mode is not equal for all entries\n", dev->minor); return -EINVAL; } } /* Check whether channels are available for this ending */ if (aref == SDF_DIFF) { for (i = 0; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) >= thisboard->ai.diff_count) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist():" " Channel number to high\n", dev->minor); return -EINVAL; } } } else { for (i = 0; i < cmd->chanlist_len; i++) { if (CR_CHAN(cmd->chanlist[i]) >= thisboard->ai.count) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "Channel number to high\n", dev->minor); return -EINVAL; } } } /* Check if bipolar is set for all entries when in differential mode */ if (aref == SDF_DIFF) { for (i = 0; i < cmd->chanlist_len; i++) { if (CR_RANGE(cmd->chanlist[i]) != 1 && CR_RANGE(cmd->chanlist[i]) != 2) { printk(KERN_ERR "comedi%d: me4000: ai_check_chanlist(): " "Bipolar is not selected in " "differential mode\n", dev->minor); return -EINVAL; } } } return 0; } static int ai_round_cmd_args(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd, unsigned int *init_ticks, unsigned int *scan_ticks, unsigned int *chan_ticks) { int rest; CALL_PDEBUG("In ai_round_cmd_args()\n"); *init_ticks = 0; *scan_ticks = 0; *chan_ticks = 0; PDEBUG("ai_round_cmd_arg(): start_arg = %d\n", cmd->start_arg); PDEBUG("ai_round_cmd_arg(): scan_begin_arg = %d\n", cmd->scan_begin_arg); PDEBUG("ai_round_cmd_arg(): convert_arg = %d\n", cmd->convert_arg); if (cmd->start_arg) { *init_ticks = (cmd->start_arg * 33) / 1000; rest = (cmd->start_arg * 33) % 1000; if (cmd->flags & TRIG_ROUND_NEAREST) { if (rest > 33) (*init_ticks)++; } else if (cmd->flags & TRIG_ROUND_UP) { if (rest) (*init_ticks)++; } } if (cmd->scan_begin_arg) { *scan_ticks = (cmd->scan_begin_arg * 33) / 1000; rest = (cmd->scan_begin_arg * 33) % 1000; if (cmd->flags & TRIG_ROUND_NEAREST) { if (rest > 33) (*scan_ticks)++; } else if (cmd->flags & TRIG_ROUND_UP) { if (rest) (*scan_ticks)++; } } if (cmd->convert_arg) { *chan_ticks = (cmd->convert_arg * 33) / 1000; rest = (cmd->convert_arg * 33) % 1000; if (cmd->flags & TRIG_ROUND_NEAREST) { if (rest > 33) (*chan_ticks)++; } else if (cmd->flags & TRIG_ROUND_UP) { if (rest) (*chan_ticks)++; } } PDEBUG("ai_round_cmd_args(): init_ticks = %d\n", *init_ticks); PDEBUG("ai_round_cmd_args(): scan_ticks = %d\n", *scan_ticks); PDEBUG("ai_round_cmd_args(): chan_ticks = %d\n", *chan_ticks); return 0; } static void ai_write_timer(struct comedi_device *dev, unsigned int init_ticks, unsigned int scan_ticks, unsigned int chan_ticks) { CALL_PDEBUG("In ai_write_timer()\n"); me4000_outl(dev, init_ticks - 1, info->ai_context.scan_pre_timer_low_reg); me4000_outl(dev, 0x0, info->ai_context.scan_pre_timer_high_reg); if (scan_ticks) { me4000_outl(dev, scan_ticks - 1, info->ai_context.scan_timer_low_reg); me4000_outl(dev, 0x0, info->ai_context.scan_timer_high_reg); } me4000_outl(dev, chan_ticks - 1, info->ai_context.chan_pre_timer_reg); me4000_outl(dev, chan_ticks - 1, info->ai_context.chan_timer_reg); } static int ai_prepare(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd, unsigned int init_ticks, unsigned int scan_ticks, unsigned int chan_ticks) { unsigned long tmp = 0; CALL_PDEBUG("In ai_prepare()\n"); /* Write timer arguments */ ai_write_timer(dev, init_ticks, scan_ticks, chan_ticks); /* Reset control register */ me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Start sources */ if ((cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER) || (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_TIMER)) { tmp = ME4000_AI_CTRL_BIT_MODE_1 | ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO; } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_TIMER) { tmp = ME4000_AI_CTRL_BIT_MODE_2 | ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO; } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_EXT) { tmp = ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_MODE_1 | ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO; } else { tmp = ME4000_AI_CTRL_BIT_MODE_0 | ME4000_AI_CTRL_BIT_CHANNEL_FIFO | ME4000_AI_CTRL_BIT_DATA_FIFO; } /* Stop triggers */ if (cmd->stop_src == TRIG_COUNT) { me4000_outl(dev, cmd->chanlist_len * cmd->stop_arg, info->ai_context.sample_counter_reg); tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ; } else if (cmd->stop_src == TRIG_NONE && cmd->scan_end_src == TRIG_COUNT) { me4000_outl(dev, cmd->scan_end_arg, info->ai_context.sample_counter_reg); tmp |= ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ; } else { tmp |= ME4000_AI_CTRL_BIT_HF_IRQ; } /* Write the setup to the control register */ me4000_outl(dev, tmp, info->ai_context.ctrl_reg); /* Write the channel list */ ai_write_chanlist(dev, s, cmd); return 0; } static int ai_write_chanlist(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int entry; unsigned int chan; unsigned int rang; unsigned int aref; int i; CALL_PDEBUG("In ai_write_chanlist()\n"); for (i = 0; i < cmd->chanlist_len; i++) { chan = CR_CHAN(cmd->chanlist[i]); rang = CR_RANGE(cmd->chanlist[i]); aref = CR_AREF(cmd->chanlist[i]); entry = chan; if (rang == 0) entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_2_5; else if (rang == 1) entry |= ME4000_AI_LIST_RANGE_UNIPOLAR_10; else if (rang == 2) entry |= ME4000_AI_LIST_RANGE_BIPOLAR_2_5; else entry |= ME4000_AI_LIST_RANGE_BIPOLAR_10; if (aref == SDF_DIFF) entry |= ME4000_AI_LIST_INPUT_DIFFERENTIAL; else entry |= ME4000_AI_LIST_INPUT_SINGLE_ENDED; me4000_outl(dev, entry, info->ai_context.channel_list_reg); } return 0; } static int me4000_ai_do_cmd(struct comedi_device *dev, struct comedi_subdevice *s) { int err; unsigned int init_ticks = 0; unsigned int scan_ticks = 0; unsigned int chan_ticks = 0; struct comedi_cmd *cmd = &s->async->cmd; CALL_PDEBUG("In me4000_ai_do_cmd()\n"); /* Reset the analog input */ err = me4000_ai_cancel(dev, s); if (err) return err; /* Round the timer arguments */ err = ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks); if (err) return err; /* Prepare the AI for acquisition */ err = ai_prepare(dev, s, cmd, init_ticks, scan_ticks, chan_ticks); if (err) return err; /* Start acquistion by dummy read */ me4000_inl(dev, info->ai_context.start_reg); return 0; } /* * me4000_ai_do_cmd_test(): * * The demo cmd.c in ./comedilib/demo specifies 6 return values: * - success * - invalid source * - source conflict * - invalid argument * - argument conflict * - invalid chanlist * So I tried to adopt this scheme. */ static int me4000_ai_do_cmd_test(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { unsigned int init_ticks; unsigned int chan_ticks; unsigned int scan_ticks; int err = 0; CALL_PDEBUG("In me4000_ai_do_cmd_test()\n"); PDEBUG("me4000_ai_do_cmd_test(): subdev = %d\n", cmd->subdev); PDEBUG("me4000_ai_do_cmd_test(): flags = %08X\n", cmd->flags); PDEBUG("me4000_ai_do_cmd_test(): start_src = %08X\n", cmd->start_src); PDEBUG("me4000_ai_do_cmd_test(): start_arg = %d\n", cmd->start_arg); PDEBUG("me4000_ai_do_cmd_test(): scan_begin_src = %08X\n", cmd->scan_begin_src); PDEBUG("me4000_ai_do_cmd_test(): scan_begin_arg = %d\n", cmd->scan_begin_arg); PDEBUG("me4000_ai_do_cmd_test(): convert_src = %08X\n", cmd->convert_src); PDEBUG("me4000_ai_do_cmd_test(): convert_arg = %d\n", cmd->convert_arg); PDEBUG("me4000_ai_do_cmd_test(): scan_end_src = %08X\n", cmd->scan_end_src); PDEBUG("me4000_ai_do_cmd_test(): scan_end_arg = %d\n", cmd->scan_end_arg); PDEBUG("me4000_ai_do_cmd_test(): stop_src = %08X\n", cmd->stop_src); PDEBUG("me4000_ai_do_cmd_test(): stop_arg = %d\n", cmd->stop_arg); PDEBUG("me4000_ai_do_cmd_test(): chanlist = %d\n", (unsigned int)cmd->chanlist); PDEBUG("me4000_ai_do_cmd_test(): chanlist_len = %d\n", cmd->chanlist_len); /* Only rounding flags are implemented */ cmd->flags &= TRIG_ROUND_NEAREST | TRIG_ROUND_UP | TRIG_ROUND_DOWN; /* Round the timer arguments */ ai_round_cmd_args(dev, s, cmd, &init_ticks, &scan_ticks, &chan_ticks); /* * Stage 1. Check if the trigger sources are generally valid. */ switch (cmd->start_src) { case TRIG_NOW: case TRIG_EXT: break; case TRIG_ANY: cmd->start_src &= TRIG_NOW | TRIG_EXT; err++; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start source\n", dev->minor); cmd->start_src = TRIG_NOW; err++; } switch (cmd->scan_begin_src) { case TRIG_FOLLOW: case TRIG_TIMER: case TRIG_EXT: break; case TRIG_ANY: cmd->scan_begin_src &= TRIG_FOLLOW | TRIG_TIMER | TRIG_EXT; err++; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid scan begin source\n", dev->minor); cmd->scan_begin_src = TRIG_FOLLOW; err++; } switch (cmd->convert_src) { case TRIG_TIMER: case TRIG_EXT: break; case TRIG_ANY: cmd->convert_src &= TRIG_TIMER | TRIG_EXT; err++; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert source\n", dev->minor); cmd->convert_src = TRIG_TIMER; err++; } switch (cmd->scan_end_src) { case TRIG_NONE: case TRIG_COUNT: break; case TRIG_ANY: cmd->scan_end_src &= TRIG_NONE | TRIG_COUNT; err++; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid scan end source\n", dev->minor); cmd->scan_end_src = TRIG_NONE; err++; } switch (cmd->stop_src) { case TRIG_NONE: case TRIG_COUNT: break; case TRIG_ANY: cmd->stop_src &= TRIG_NONE | TRIG_COUNT; err++; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid stop source\n", dev->minor); cmd->stop_src = TRIG_NONE; err++; } if (err) return 1; /* * Stage 2. Check for trigger source conflicts. */ if (cmd->start_src == TRIG_NOW && cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER) { } else if (cmd->start_src == TRIG_NOW && cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_TIMER) { } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER) { } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_TIMER) { } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_TIMER) { } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_EXT) { } else { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start trigger combination\n", dev->minor); cmd->start_src = TRIG_NOW; cmd->scan_begin_src = TRIG_FOLLOW; cmd->convert_src = TRIG_TIMER; err++; } if (cmd->stop_src == TRIG_NONE && cmd->scan_end_src == TRIG_NONE) { } else if (cmd->stop_src == TRIG_COUNT && cmd->scan_end_src == TRIG_NONE) { } else if (cmd->stop_src == TRIG_NONE && cmd->scan_end_src == TRIG_COUNT) { } else if (cmd->stop_src == TRIG_COUNT && cmd->scan_end_src == TRIG_COUNT) { } else { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid stop trigger combination\n", dev->minor); cmd->stop_src = TRIG_NONE; cmd->scan_end_src = TRIG_NONE; err++; } if (err) return 2; /* * Stage 3. Check if arguments are generally valid. */ if (cmd->chanlist_len < 1) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "No channel list\n", dev->minor); cmd->chanlist_len = 1; err++; } if (init_ticks < 66) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Start arg to low\n", dev->minor); cmd->start_arg = 2000; err++; } if (scan_ticks && scan_ticks < 67) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Scan begin arg to low\n", dev->minor); cmd->scan_begin_arg = 2031; err++; } if (chan_ticks < 66) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Convert arg to low\n", dev->minor); cmd->convert_arg = 2000; err++; } if (err) return 3; /* * Stage 4. Check for argument conflicts. */ if (cmd->start_src == TRIG_NOW && cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } if (chan_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert arg\n", dev->minor); cmd->convert_arg = 2000; /* 66 ticks at least */ err++; } if (scan_ticks <= cmd->chanlist_len * chan_ticks) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid scan end arg\n", dev->minor); /* At least one tick more */ cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31; err++; } } else if (cmd->start_src == TRIG_NOW && cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_TIMER) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } if (chan_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert arg\n", dev->minor); cmd->convert_arg = 2000; /* 66 ticks at least */ err++; } } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_TIMER && cmd->convert_src == TRIG_TIMER) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } if (chan_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert arg\n", dev->minor); cmd->convert_arg = 2000; /* 66 ticks at least */ err++; } if (scan_ticks <= cmd->chanlist_len * chan_ticks) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid scan end arg\n", dev->minor); /* At least one tick more */ cmd->scan_end_arg = 2000 * cmd->chanlist_len + 31; err++; } } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_FOLLOW && cmd->convert_src == TRIG_TIMER) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } if (chan_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert arg\n", dev->minor); cmd->convert_arg = 2000; /* 66 ticks at least */ err++; } } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_TIMER) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } if (chan_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid convert arg\n", dev->minor); cmd->convert_arg = 2000; /* 66 ticks at least */ err++; } } else if (cmd->start_src == TRIG_EXT && cmd->scan_begin_src == TRIG_EXT && cmd->convert_src == TRIG_EXT) { /* Check timer arguments */ if (init_ticks < ME4000_AI_MIN_TICKS) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid start arg\n", dev->minor); cmd->start_arg = 2000; /* 66 ticks at least */ err++; } } if (cmd->stop_src == TRIG_COUNT) { if (cmd->stop_arg == 0) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid stop arg\n", dev->minor); cmd->stop_arg = 1; err++; } } if (cmd->scan_end_src == TRIG_COUNT) { if (cmd->scan_end_arg == 0) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_do_cmd_test(): " "Invalid scan end arg\n", dev->minor); cmd->scan_end_arg = 1; err++; } } if (err) return 4; /* * Stage 5. Check the channel list. */ if (ai_check_chanlist(dev, s, cmd)) return 5; return 0; } static irqreturn_t me4000_ai_isr(int irq, void *dev_id) { unsigned int tmp; struct comedi_device *dev = dev_id; struct comedi_subdevice *s = dev->subdevices; struct me4000_ai_context *ai_context = &info->ai_context; int i; int c = 0; long lval; ISR_PDEBUG("me4000_ai_isr() is executed\n"); if (!dev->attached) { ISR_PDEBUG("me4000_ai_isr() premature interrupt\n"); return IRQ_NONE; } /* Reset all events */ s->async->events = 0; /* Check if irq number is right */ if (irq != ai_context->irq) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "Incorrect interrupt num: %d\n", dev->minor, irq); return IRQ_HANDLED; } if (me4000_inl(dev, ai_context->irq_status_reg) & ME4000_IRQ_STATUS_BIT_AI_HF) { ISR_PDEBUG ("me4000_ai_isr(): Fifo half full interrupt occurred\n"); /* Read status register to find out what happened */ tmp = me4000_inl(dev, ai_context->ctrl_reg); if (!(tmp & ME4000_AI_STATUS_BIT_FF_DATA) && !(tmp & ME4000_AI_STATUS_BIT_HF_DATA) && (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) { ISR_PDEBUG("me4000_ai_isr(): Fifo full\n"); c = ME4000_AI_FIFO_COUNT; /* * FIFO overflow, so stop conversion * and disable all interrupts */ tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP; tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ); me4000_outl(dev, tmp, ai_context->ctrl_reg); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "FIFO overflow\n", dev->minor); } else if ((tmp & ME4000_AI_STATUS_BIT_FF_DATA) && !(tmp & ME4000_AI_STATUS_BIT_HF_DATA) && (tmp & ME4000_AI_STATUS_BIT_EF_DATA)) { ISR_PDEBUG("me4000_ai_isr(): Fifo half full\n"); s->async->events |= COMEDI_CB_BLOCK; c = ME4000_AI_FIFO_COUNT / 2; } else { printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "Can't determine state of fifo\n", dev->minor); c = 0; /* * Undefined state, so stop conversion * and disable all interrupts */ tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP; tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ); me4000_outl(dev, tmp, ai_context->ctrl_reg); s->async->events |= COMEDI_CB_ERROR | COMEDI_CB_EOA; printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "Undefined FIFO state\n", dev->minor); } ISR_PDEBUG("me4000_ai_isr(): Try to read %d values\n", c); for (i = 0; i < c; i++) { /* Read value from data fifo */ lval = inl(ai_context->data_reg) & 0xFFFF; lval ^= 0x8000; if (!comedi_buf_put(s->async, lval)) { /* * Buffer overflow, so stop conversion * and disable all interrupts */ tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP; tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ); me4000_outl(dev, tmp, ai_context->ctrl_reg); s->async->events |= COMEDI_CB_OVERFLOW; printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "Buffer overflow\n", dev->minor); break; } } /* Work is done, so reset the interrupt */ ISR_PDEBUG("me4000_ai_isr(): Reset fifo half full interrupt\n"); tmp |= ME4000_AI_CTRL_BIT_HF_IRQ_RESET; me4000_outl(dev, tmp, ai_context->ctrl_reg); tmp &= ~ME4000_AI_CTRL_BIT_HF_IRQ_RESET; me4000_outl(dev, tmp, ai_context->ctrl_reg); } if (me4000_inl(dev, ai_context->irq_status_reg) & ME4000_IRQ_STATUS_BIT_SC) { ISR_PDEBUG ("me4000_ai_isr(): Sample counter interrupt occurred\n"); s->async->events |= COMEDI_CB_BLOCK | COMEDI_CB_EOA; /* * Acquisition is complete, so stop * conversion and disable all interrupts */ tmp = me4000_inl(dev, ai_context->ctrl_reg); tmp |= ME4000_AI_CTRL_BIT_IMMEDIATE_STOP; tmp &= ~(ME4000_AI_CTRL_BIT_HF_IRQ | ME4000_AI_CTRL_BIT_SC_IRQ); me4000_outl(dev, tmp, ai_context->ctrl_reg); /* Poll data until fifo empty */ while (inl(ai_context->ctrl_reg) & ME4000_AI_STATUS_BIT_EF_DATA) { /* Read value from data fifo */ lval = inl(ai_context->data_reg) & 0xFFFF; lval ^= 0x8000; if (!comedi_buf_put(s->async, lval)) { printk(KERN_ERR "comedi%d: me4000: me4000_ai_isr(): " "Buffer overflow\n", dev->minor); s->async->events |= COMEDI_CB_OVERFLOW; break; } } /* Work is done, so reset the interrupt */ ISR_PDEBUG ("me4000_ai_isr(): Reset interrupt from sample counter\n"); tmp |= ME4000_AI_CTRL_BIT_SC_IRQ_RESET; me4000_outl(dev, tmp, ai_context->ctrl_reg); tmp &= ~ME4000_AI_CTRL_BIT_SC_IRQ_RESET; me4000_outl(dev, tmp, ai_context->ctrl_reg); } ISR_PDEBUG("me4000_ai_isr(): Events = 0x%X\n", s->async->events); if (s->async->events) comedi_event(dev, s); return IRQ_HANDLED; } /*============================================================================= Analog output section ===========================================================================*/ static int me4000_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); int rang = CR_RANGE(insn->chanspec); int aref = CR_AREF(insn->chanspec); unsigned long tmp; CALL_PDEBUG("In me4000_ao_insn_write()\n"); if (insn->n == 0) { return 0; } else if (insn->n > 1) { printk(KERN_ERR "comedi%d: me4000: me4000_ao_insn_write(): " "Invalid instruction length %d\n", dev->minor, insn->n); return -EINVAL; } if (chan >= thisboard->ao.count) { printk(KERN_ERR "comedi%d: me4000: me4000_ao_insn_write(): " "Invalid channel %d\n", dev->minor, insn->n); return -EINVAL; } if (rang != 0) { printk(KERN_ERR "comedi%d: me4000: me4000_ao_insn_write(): " "Invalid range %d\n", dev->minor, insn->n); return -EINVAL; } if (aref != AREF_GROUND && aref != AREF_COMMON) { printk(KERN_ERR "comedi%d: me4000: me4000_ao_insn_write(): " "Invalid aref %d\n", dev->minor, insn->n); return -EINVAL; } /* Stop any running conversion */ tmp = me4000_inl(dev, info->ao_context[chan].ctrl_reg); tmp |= ME4000_AO_CTRL_BIT_IMMEDIATE_STOP; me4000_outl(dev, tmp, info->ao_context[chan].ctrl_reg); /* Clear control register and set to single mode */ me4000_outl(dev, 0x0, info->ao_context[chan].ctrl_reg); /* Write data value */ me4000_outl(dev, data[0], info->ao_context[chan].single_reg); /* Store in the mirror */ info->ao_context[chan].mirror = data[0]; return 1; } static int me4000_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); if (insn->n == 0) { return 0; } else if (insn->n > 1) { printk ("comedi%d: me4000: me4000_ao_insn_read(): " "Invalid instruction length\n", dev->minor); return -EINVAL; } data[0] = info->ao_context[chan].mirror; return 1; } /*============================================================================= Digital I/O section ===========================================================================*/ static int me4000_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { CALL_PDEBUG("In me4000_dio_insn_bits()\n"); /* Length of data must be 2 (mask and new data, see below) */ if (insn->n == 0) return 0; if (insn->n != 2) { printk ("comedi%d: me4000: me4000_dio_insn_bits(): " "Invalid instruction length\n", dev->minor); return -EINVAL; } /* * The insn data consists of a mask in data[0] and the new data * in data[1]. The mask defines which bits we are concerning about. * The new data must be anded with the mask. * Each channel corresponds to a bit. */ if (data[0]) { /* Check if requested ports are configured for output */ if ((s->io_bits & data[0]) != data[0]) return -EIO; s->state &= ~data[0]; s->state |= data[0] & data[1]; /* Write out the new digital output lines */ me4000_outl(dev, (s->state >> 0) & 0xFF, info->dio_context.port_0_reg); me4000_outl(dev, (s->state >> 8) & 0xFF, info->dio_context.port_1_reg); me4000_outl(dev, (s->state >> 16) & 0xFF, info->dio_context.port_2_reg); me4000_outl(dev, (s->state >> 24) & 0xFF, info->dio_context.port_3_reg); } /* On return, data[1] contains the value of the digital input and output lines. */ data[1] = ((me4000_inl(dev, info->dio_context.port_0_reg) & 0xFF) << 0) | ((me4000_inl(dev, info->dio_context.port_1_reg) & 0xFF) << 8) | ((me4000_inl(dev, info->dio_context.port_2_reg) & 0xFF) << 16) | ((me4000_inl(dev, info->dio_context.port_3_reg) & 0xFF) << 24); return 2; } static int me4000_dio_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned long tmp; int chan = CR_CHAN(insn->chanspec); CALL_PDEBUG("In me4000_dio_insn_config()\n"); switch (data[0]) { default: return -EINVAL; case INSN_CONFIG_DIO_QUERY: data[1] = (s->io_bits & (1 << chan)) ? COMEDI_OUTPUT : COMEDI_INPUT; return insn->n; case INSN_CONFIG_DIO_INPUT: case INSN_CONFIG_DIO_OUTPUT: break; } /* * The input or output configuration of each digital line is * configured by a special insn_config instruction. chanspec * contains the channel to be changed, and data[0] contains the * value INSN_CONFIG_DIO_INPUT or INSN_CONFIG_DIO_OUTPUT. * On the ME-4000 it is only possible to switch port wise (8 bit) */ tmp = me4000_inl(dev, info->dio_context.ctrl_reg); if (data[0] == INSN_CONFIG_DIO_OUTPUT) { if (chan < 8) { s->io_bits |= 0xFF; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1); tmp |= ME4000_DIO_CTRL_BIT_MODE_0; } else if (chan < 16) { /* * Chech for optoisolated ME-4000 version. * If one the first port is a fixed output * port and the second is a fixed input port. */ if (!me4000_inl(dev, info->dio_context.dir_reg)) return -ENODEV; s->io_bits |= 0xFF00; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3); tmp |= ME4000_DIO_CTRL_BIT_MODE_2; } else if (chan < 24) { s->io_bits |= 0xFF0000; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5); tmp |= ME4000_DIO_CTRL_BIT_MODE_4; } else if (chan < 32) { s->io_bits |= 0xFF000000; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7); tmp |= ME4000_DIO_CTRL_BIT_MODE_6; } else { return -EINVAL; } } else { if (chan < 8) { /* * Chech for optoisolated ME-4000 version. * If one the first port is a fixed output * port and the second is a fixed input port. */ if (!me4000_inl(dev, info->dio_context.dir_reg)) return -ENODEV; s->io_bits &= ~0xFF; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_0 | ME4000_DIO_CTRL_BIT_MODE_1); } else if (chan < 16) { s->io_bits &= ~0xFF00; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_2 | ME4000_DIO_CTRL_BIT_MODE_3); } else if (chan < 24) { s->io_bits &= ~0xFF0000; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_4 | ME4000_DIO_CTRL_BIT_MODE_5); } else if (chan < 32) { s->io_bits &= ~0xFF000000; tmp &= ~(ME4000_DIO_CTRL_BIT_MODE_6 | ME4000_DIO_CTRL_BIT_MODE_7); } else { return -EINVAL; } } me4000_outl(dev, tmp, info->dio_context.ctrl_reg); return 1; } /*============================================================================= Counter section ===========================================================================*/ static int cnt_reset(struct comedi_device *dev, unsigned int channel) { CALL_PDEBUG("In cnt_reset()\n"); switch (channel) { case 0: me4000_outb(dev, 0x30, info->cnt_context.ctrl_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_0_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_0_reg); break; case 1: me4000_outb(dev, 0x70, info->cnt_context.ctrl_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_1_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_1_reg); break; case 2: me4000_outb(dev, 0xB0, info->cnt_context.ctrl_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_2_reg); me4000_outb(dev, 0x00, info->cnt_context.counter_2_reg); break; default: printk(KERN_ERR "comedi%d: me4000: cnt_reset(): Invalid channel\n", dev->minor); return -EINVAL; } return 0; } static int cnt_config(struct comedi_device *dev, unsigned int channel, unsigned int mode) { int tmp = 0; CALL_PDEBUG("In cnt_config()\n"); switch (channel) { case 0: tmp |= ME4000_CNT_COUNTER_0; break; case 1: tmp |= ME4000_CNT_COUNTER_1; break; case 2: tmp |= ME4000_CNT_COUNTER_2; break; default: printk(KERN_ERR "comedi%d: me4000: cnt_config(): Invalid channel\n", dev->minor); return -EINVAL; } switch (mode) { case 0: tmp |= ME4000_CNT_MODE_0; break; case 1: tmp |= ME4000_CNT_MODE_1; break; case 2: tmp |= ME4000_CNT_MODE_2; break; case 3: tmp |= ME4000_CNT_MODE_3; break; case 4: tmp |= ME4000_CNT_MODE_4; break; case 5: tmp |= ME4000_CNT_MODE_5; break; default: printk(KERN_ERR "comedi%d: me4000: cnt_config(): Invalid counter mode\n", dev->minor); return -EINVAL; } /* Write the control word */ tmp |= 0x30; me4000_outb(dev, tmp, info->cnt_context.ctrl_reg); return 0; } static int me4000_cnt_insn_config(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int err; CALL_PDEBUG("In me4000_cnt_insn_config()\n"); switch (data[0]) { case GPCT_RESET: if (insn->n != 1) { printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_config(): " "Invalid instruction length%d\n", dev->minor, insn->n); return -EINVAL; } err = cnt_reset(dev, insn->chanspec); if (err) return err; break; case GPCT_SET_OPERATION: if (insn->n != 2) { printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_config(): " "Invalid instruction length%d\n", dev->minor, insn->n); return -EINVAL; } err = cnt_config(dev, insn->chanspec, data[1]); if (err) return err; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_config(): " "Invalid instruction\n", dev->minor); return -EINVAL; } return 2; } static int me4000_cnt_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short tmp; CALL_PDEBUG("In me4000_cnt_insn_read()\n"); if (insn->n == 0) return 0; if (insn->n > 1) { printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_read(): " "Invalid instruction length %d\n", dev->minor, insn->n); return -EINVAL; } switch (insn->chanspec) { case 0: tmp = me4000_inb(dev, info->cnt_context.counter_0_reg); data[0] = tmp; tmp = me4000_inb(dev, info->cnt_context.counter_0_reg); data[0] |= tmp << 8; break; case 1: tmp = me4000_inb(dev, info->cnt_context.counter_1_reg); data[0] = tmp; tmp = me4000_inb(dev, info->cnt_context.counter_1_reg); data[0] |= tmp << 8; break; case 2: tmp = me4000_inb(dev, info->cnt_context.counter_2_reg); data[0] = tmp; tmp = me4000_inb(dev, info->cnt_context.counter_2_reg); data[0] |= tmp << 8; break; default: printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_read(): " "Invalid channel %d\n", dev->minor, insn->chanspec); return -EINVAL; } return 1; } static int me4000_cnt_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short tmp; CALL_PDEBUG("In me4000_cnt_insn_write()\n"); if (insn->n == 0) { return 0; } else if (insn->n > 1) { printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_write(): " "Invalid instruction length %d\n", dev->minor, insn->n); return -EINVAL; } switch (insn->chanspec) { case 0: tmp = data[0] & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_0_reg); tmp = (data[0] >> 8) & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_0_reg); break; case 1: tmp = data[0] & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_1_reg); tmp = (data[0] >> 8) & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_1_reg); break; case 2: tmp = data[0] & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_2_reg); tmp = (data[0] >> 8) & 0xFF; me4000_outb(dev, tmp, info->cnt_context.counter_2_reg); break; default: printk(KERN_ERR "comedi%d: me4000: me4000_cnt_insn_write(): " "Invalid channel %d\n", dev->minor, insn->chanspec); return -EINVAL; } return 1; } static int __devinit driver_me4000_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_me4000.driver_name); } static void __devexit driver_me4000_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_me4000_pci_driver = { .id_table = me4000_pci_table, .probe = &driver_me4000_pci_probe, .remove = __devexit_p(&driver_me4000_pci_remove) }; static int __init driver_me4000_init_module(void) { int retval; retval = comedi_driver_register(&driver_me4000); if (retval < 0) return retval; driver_me4000_pci_driver.name = (char *)driver_me4000.driver_name; return pci_register_driver(&driver_me4000_pci_driver); } static void __exit driver_me4000_cleanup_module(void) { pci_unregister_driver(&driver_me4000_pci_driver); comedi_driver_unregister(&driver_me4000); } module_init(driver_me4000_init_module); module_exit(driver_me4000_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
Snuzzo/PLUS_kernel
arch/avr32/boards/atngw100/mrmt.c
9671
10132
/* * Board-specific setup code for Remote Media Terminal 1 (RMT1) * add-on board for the ATNGW100 Network Gateway * * Copyright (C) 2008 Mediama Technologies * Based on ATNGW100 Network Gateway (Copyright (C) Atmel) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/linkage.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/fb.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/atmel_serial.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <video/atmel_lcdc.h> #include <sound/atmel-ac97c.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/board.h> #include <mach/init.h> #include <mach/portmux.h> /* Define board-specifoic GPIO assignments */ #define PIN_LCD_BL GPIO_PIN_PA(28) #define PWM_CH_BL 0 /* Must match with GPIO pin definition */ #define PIN_LCD_DISP GPIO_PIN_PA(31) #define PIN_AC97_RST_N GPIO_PIN_PA(30) #define PB_EXTINT_BASE 25 #define TS_IRQ 0 #define PIN_TS_EXTINT GPIO_PIN_PB(PB_EXTINT_BASE+TS_IRQ) #define PIN_PB_LEFT GPIO_PIN_PB(11) #define PIN_PB_RIGHT GPIO_PIN_PB(12) #define PIN_PWR_SW_N GPIO_PIN_PB(14) #define PIN_PWR_ON GPIO_PIN_PB(13) #define PIN_ZB_RST_N GPIO_PIN_PA(21) #define PIN_BT_RST GPIO_PIN_PA(22) #define PIN_LED_SYS GPIO_PIN_PA(16) #define PIN_LED_A GPIO_PIN_PA(19) #define PIN_LED_B GPIO_PIN_PE(19) #ifdef CONFIG_BOARD_MRMT_LCD_LQ043T3DX0X /* Sharp LQ043T3DX0x (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "SHA", .monitor = "LQ043T3DX02", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_NORMAL | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_LCD_KWH043GM08 /* Sharp KWH043GM08-Fxx (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "FOR", .monitor = "KWH043GM08", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_INVERTED | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_AC97 static struct ac97c_platform_data __initdata ac97c0_data = { .reset_pin = PIN_AC97_RST_N, }; #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* NOTE: IRQ assignment relies on kernel module parameter */ static struct platform_device rmt_ts_device = { .name = "ucb1400_ts", .id = -1, } }; #endif #ifdef CONFIG_BOARD_MRMT_BL_PWM /* PWM LEDs: LCD Backlight, etc */ static struct gpio_led rmt_pwm_led[] = { /* here the "gpio" is actually a PWM channel */ { .name = "backlight", .gpio = PWM_CH_BL, }, }; static struct gpio_led_platform_data rmt_pwm_led_data = { .num_leds = ARRAY_SIZE(rmt_pwm_led), .leds = rmt_pwm_led, }; static struct platform_device rmt_pwm_led_dev = { .name = "leds-atmel-pwm", .id = -1, .dev = { .platform_data = &rmt_pwm_led_data, }, }; #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS static int ads7846_pendown_state(void) { return !gpio_get_value( PIN_TS_EXTINT ); /* PENIRQ.*/ } static struct ads7846_platform_data ads_info = { .model = 7846, .keep_vref_on = 0, /* Use external VREF pin */ .vref_delay_usecs = 0, .vref_mv = 3300, /* VREF = 3.3V */ .settle_delay_usecs = 800, .penirq_recheck_delay_usecs = 800, .x_plate_ohms = 750, .y_plate_ohms = 300, .pressure_max = 4096, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7846_pendown_state, .filter = NULL, .filter_init = NULL, }; static struct spi_board_info spi01_board_info[] __initdata = { { .modalias = "ads7846", .max_speed_hz = 31250*26, .bus_num = 0, .chip_select = 1, .platform_data = &ads_info, .irq = AT32_EXTINT(TS_IRQ), }, }; #endif /* GPIO Keys: left, right, power, etc */ static const struct gpio_keys_button rmt_gpio_keys_buttons[] = { [0] = { .type = EV_KEY, .code = KEY_POWER, .gpio = PIN_PWR_SW_N, .active_low = 1, .desc = "power button", }, [1] = { .type = EV_KEY, .code = KEY_LEFT, .gpio = PIN_PB_LEFT, .active_low = 1, .desc = "left button", }, [2] = { .type = EV_KEY, .code = KEY_RIGHT, .gpio = PIN_PB_RIGHT, .active_low = 1, .desc = "right button", }, }; static const struct gpio_keys_platform_data rmt_gpio_keys_data = { .nbuttons = ARRAY_SIZE(rmt_gpio_keys_buttons), .buttons = (void *) rmt_gpio_keys_buttons, }; static struct platform_device rmt_gpio_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = (void *) &rmt_gpio_keys_data, } }; #ifdef CONFIG_BOARD_MRMT_RTC_I2C static struct i2c_board_info __initdata mrmt1_i2c_rtc = { I2C_BOARD_INFO("s35390a", 0x30), .irq = 0, }; #endif static void mrmt_power_off(void) { /* PWR_ON=0 will force power off */ gpio_set_value( PIN_PWR_ON, 0 ); } static int __init mrmt1_init(void) { gpio_set_value( PIN_PWR_ON, 1 ); /* Ensure PWR_ON is enabled */ pm_power_off = mrmt_power_off; /* Setup USARTS (other than console) */ at32_map_usart(2, 1, 0); /* USART 2: /dev/ttyS1, RMT1:DB9M */ at32_map_usart(3, 2, ATMEL_USART_RTS | ATMEL_USART_CTS); /* USART 3: /dev/ttyS2, RMT1:Wireless, w/ RTS/CTS */ at32_add_device_usart(1); at32_add_device_usart(2); /* Select GPIO Key pins */ at32_select_gpio( PIN_PWR_SW_N, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_LEFT, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_RIGHT, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_gpio_keys); #ifdef CONFIG_BOARD_MRMT_RTC_I2C i2c_register_board_info(0, &mrmt1_i2c_rtc, 1); #endif #ifndef CONFIG_BOARD_MRMT_LCD_DISABLE /* User "alternate" LCDC inferface on Port E & D */ /* NB: exclude LCDC_CC pin, as NGW100 reserves it for other use */ at32_add_device_lcdc(0, &rmt_lcdc_data, fbmem_start, fbmem_size, (ATMEL_LCDC_ALT_24BIT | ATMEL_LCDC_PE_DVAL ) ); #endif #ifdef CONFIG_BOARD_MRMT_AC97 at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); irq_set_irq_type(AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING); at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info)); spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_ts_device); #endif at32_select_gpio( PIN_LCD_DISP, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_DISP, "LCD_DISP" ); gpio_direction_output( PIN_LCD_DISP, 0 ); /* LCD DISP */ #ifdef CONFIG_BOARD_MRMT_LCD_DISABLE /* Keep Backlight and DISP off */ at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 0 ); /* Backlight */ #else gpio_set_value( PIN_LCD_DISP, 1 ); /* DISP asserted first */ #ifdef CONFIG_BOARD_MRMT_BL_PWM /* Use PWM for Backlight controls */ at32_add_device_pwm(1 << PWM_CH_BL); platform_device_register(&rmt_pwm_led_dev); #else /* Backlight always on */ udelay( 1 ); at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 1 ); #endif #endif /* Make sure BT and Zigbee modules in reset */ at32_select_gpio( PIN_BT_RST, AT32_GPIOF_OUTPUT ); gpio_request( PIN_BT_RST, "BT_RST" ); gpio_direction_output( PIN_BT_RST, 1 ); /* BT Module in Reset */ at32_select_gpio( PIN_ZB_RST_N, AT32_GPIOF_OUTPUT ); gpio_request( PIN_ZB_RST_N, "ZB_RST_N" ); gpio_direction_output( PIN_ZB_RST_N, 0 ); /* XBee Module in Reset */ #ifdef CONFIG_BOARD_MRMT_WIRELESS_ZB udelay( 1000 ); /* Unreset the XBee Module */ gpio_set_value( PIN_ZB_RST_N, 1 ); #endif #ifdef CONFIG_BOARD_MRMT_WIRELESS_BT udelay( 1000 ); /* Unreset the BT Module */ gpio_set_value( PIN_BT_RST, 0 ); #endif return 0; } arch_initcall(mrmt1_init); static int __init mrmt1_early_init(void) { /* To maintain power-on signal in case boot loader did not already */ at32_select_gpio( PIN_PWR_ON, AT32_GPIOF_OUTPUT ); gpio_request( PIN_PWR_ON, "PIN_PWR_ON" ); gpio_direction_output( PIN_PWR_ON, 1 ); return 0; } core_initcall(mrmt1_early_init);
gpl-2.0
javelinanddart/shamu
arch/avr32/boards/atngw100/mrmt.c
9671
10132
/* * Board-specific setup code for Remote Media Terminal 1 (RMT1) * add-on board for the ATNGW100 Network Gateway * * Copyright (C) 2008 Mediama Technologies * Based on ATNGW100 Network Gateway (Copyright (C) Atmel) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/irq.h> #include <linux/linkage.h> #include <linux/platform_device.h> #include <linux/types.h> #include <linux/fb.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/gpio_keys.h> #include <linux/atmel_serial.h> #include <linux/spi/spi.h> #include <linux/spi/ads7846.h> #include <video/atmel_lcdc.h> #include <sound/atmel-ac97c.h> #include <asm/delay.h> #include <asm/io.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/board.h> #include <mach/init.h> #include <mach/portmux.h> /* Define board-specifoic GPIO assignments */ #define PIN_LCD_BL GPIO_PIN_PA(28) #define PWM_CH_BL 0 /* Must match with GPIO pin definition */ #define PIN_LCD_DISP GPIO_PIN_PA(31) #define PIN_AC97_RST_N GPIO_PIN_PA(30) #define PB_EXTINT_BASE 25 #define TS_IRQ 0 #define PIN_TS_EXTINT GPIO_PIN_PB(PB_EXTINT_BASE+TS_IRQ) #define PIN_PB_LEFT GPIO_PIN_PB(11) #define PIN_PB_RIGHT GPIO_PIN_PB(12) #define PIN_PWR_SW_N GPIO_PIN_PB(14) #define PIN_PWR_ON GPIO_PIN_PB(13) #define PIN_ZB_RST_N GPIO_PIN_PA(21) #define PIN_BT_RST GPIO_PIN_PA(22) #define PIN_LED_SYS GPIO_PIN_PA(16) #define PIN_LED_A GPIO_PIN_PA(19) #define PIN_LED_B GPIO_PIN_PE(19) #ifdef CONFIG_BOARD_MRMT_LCD_LQ043T3DX0X /* Sharp LQ043T3DX0x (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "SHA", .monitor = "LQ043T3DX02", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_NORMAL | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_LCD_KWH043GM08 /* Sharp KWH043GM08-Fxx (or compatible) panel */ static struct fb_videomode __initdata lcd_fb_modes[] = { { .name = "480x272 @ 59.94Hz", .refresh = 59.94, .xres = 480, .yres = 272, .pixclock = KHZ2PICOS(9000), .left_margin = 2, .right_margin = 2, .upper_margin = 3, .lower_margin = 9, .hsync_len = 41, .vsync_len = 1, .sync = 0, .vmode = FB_VMODE_NONINTERLACED, }, }; static struct fb_monspecs __initdata lcd_fb_default_monspecs = { .manufacturer = "FOR", .monitor = "KWH043GM08", .modedb = lcd_fb_modes, .modedb_len = ARRAY_SIZE(lcd_fb_modes), .hfmin = 14915, .hfmax = 17638, .vfmin = 53, .vfmax = 61, .dclkmax = 9260000, }; static struct atmel_lcdfb_info __initdata rmt_lcdc_data = { .default_bpp = 24, .default_dmacon = ATMEL_LCDC_DMAEN | ATMEL_LCDC_DMA2DEN, .default_lcdcon2 = (ATMEL_LCDC_DISTYPE_TFT | ATMEL_LCDC_CLKMOD_ALWAYSACTIVE | ATMEL_LCDC_INVCLK_INVERTED | ATMEL_LCDC_MEMOR_BIG), .lcd_wiring_mode = ATMEL_LCDC_WIRING_RGB, .default_monspecs = &lcd_fb_default_monspecs, .guard_time = 2, }; #endif #ifdef CONFIG_BOARD_MRMT_AC97 static struct ac97c_platform_data __initdata ac97c0_data = { .reset_pin = PIN_AC97_RST_N, }; #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* NOTE: IRQ assignment relies on kernel module parameter */ static struct platform_device rmt_ts_device = { .name = "ucb1400_ts", .id = -1, } }; #endif #ifdef CONFIG_BOARD_MRMT_BL_PWM /* PWM LEDs: LCD Backlight, etc */ static struct gpio_led rmt_pwm_led[] = { /* here the "gpio" is actually a PWM channel */ { .name = "backlight", .gpio = PWM_CH_BL, }, }; static struct gpio_led_platform_data rmt_pwm_led_data = { .num_leds = ARRAY_SIZE(rmt_pwm_led), .leds = rmt_pwm_led, }; static struct platform_device rmt_pwm_led_dev = { .name = "leds-atmel-pwm", .id = -1, .dev = { .platform_data = &rmt_pwm_led_data, }, }; #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS static int ads7846_pendown_state(void) { return !gpio_get_value( PIN_TS_EXTINT ); /* PENIRQ.*/ } static struct ads7846_platform_data ads_info = { .model = 7846, .keep_vref_on = 0, /* Use external VREF pin */ .vref_delay_usecs = 0, .vref_mv = 3300, /* VREF = 3.3V */ .settle_delay_usecs = 800, .penirq_recheck_delay_usecs = 800, .x_plate_ohms = 750, .y_plate_ohms = 300, .pressure_max = 4096, .debounce_max = 1, .debounce_rep = 0, .debounce_tol = (~0), .get_pendown_state = ads7846_pendown_state, .filter = NULL, .filter_init = NULL, }; static struct spi_board_info spi01_board_info[] __initdata = { { .modalias = "ads7846", .max_speed_hz = 31250*26, .bus_num = 0, .chip_select = 1, .platform_data = &ads_info, .irq = AT32_EXTINT(TS_IRQ), }, }; #endif /* GPIO Keys: left, right, power, etc */ static const struct gpio_keys_button rmt_gpio_keys_buttons[] = { [0] = { .type = EV_KEY, .code = KEY_POWER, .gpio = PIN_PWR_SW_N, .active_low = 1, .desc = "power button", }, [1] = { .type = EV_KEY, .code = KEY_LEFT, .gpio = PIN_PB_LEFT, .active_low = 1, .desc = "left button", }, [2] = { .type = EV_KEY, .code = KEY_RIGHT, .gpio = PIN_PB_RIGHT, .active_low = 1, .desc = "right button", }, }; static const struct gpio_keys_platform_data rmt_gpio_keys_data = { .nbuttons = ARRAY_SIZE(rmt_gpio_keys_buttons), .buttons = (void *) rmt_gpio_keys_buttons, }; static struct platform_device rmt_gpio_keys = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = (void *) &rmt_gpio_keys_data, } }; #ifdef CONFIG_BOARD_MRMT_RTC_I2C static struct i2c_board_info __initdata mrmt1_i2c_rtc = { I2C_BOARD_INFO("s35390a", 0x30), .irq = 0, }; #endif static void mrmt_power_off(void) { /* PWR_ON=0 will force power off */ gpio_set_value( PIN_PWR_ON, 0 ); } static int __init mrmt1_init(void) { gpio_set_value( PIN_PWR_ON, 1 ); /* Ensure PWR_ON is enabled */ pm_power_off = mrmt_power_off; /* Setup USARTS (other than console) */ at32_map_usart(2, 1, 0); /* USART 2: /dev/ttyS1, RMT1:DB9M */ at32_map_usart(3, 2, ATMEL_USART_RTS | ATMEL_USART_CTS); /* USART 3: /dev/ttyS2, RMT1:Wireless, w/ RTS/CTS */ at32_add_device_usart(1); at32_add_device_usart(2); /* Select GPIO Key pins */ at32_select_gpio( PIN_PWR_SW_N, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_LEFT, AT32_GPIOF_DEGLITCH); at32_select_gpio( PIN_PB_RIGHT, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_gpio_keys); #ifdef CONFIG_BOARD_MRMT_RTC_I2C i2c_register_board_info(0, &mrmt1_i2c_rtc, 1); #endif #ifndef CONFIG_BOARD_MRMT_LCD_DISABLE /* User "alternate" LCDC inferface on Port E & D */ /* NB: exclude LCDC_CC pin, as NGW100 reserves it for other use */ at32_add_device_lcdc(0, &rmt_lcdc_data, fbmem_start, fbmem_size, (ATMEL_LCDC_ALT_24BIT | ATMEL_LCDC_PE_DVAL ) ); #endif #ifdef CONFIG_BOARD_MRMT_AC97 at32_add_device_ac97c(0, &ac97c0_data, AC97C_BOTH); #endif #ifdef CONFIG_BOARD_MRMT_ADS7846_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); irq_set_irq_type(AT32_EXTINT(TS_IRQ), IRQ_TYPE_EDGE_FALLING); at32_spi_setup_slaves(0,spi01_board_info,ARRAY_SIZE(spi01_board_info)); spi_register_board_info(spi01_board_info,ARRAY_SIZE(spi01_board_info)); #endif #ifdef CONFIG_BOARD_MRMT_UCB1400_TS /* Select the Touchscreen interrupt pin mode */ at32_select_periph( GPIO_PIOB_BASE, 1 << (PB_EXTINT_BASE+TS_IRQ), GPIO_PERIPH_A, AT32_GPIOF_DEGLITCH); platform_device_register(&rmt_ts_device); #endif at32_select_gpio( PIN_LCD_DISP, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_DISP, "LCD_DISP" ); gpio_direction_output( PIN_LCD_DISP, 0 ); /* LCD DISP */ #ifdef CONFIG_BOARD_MRMT_LCD_DISABLE /* Keep Backlight and DISP off */ at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 0 ); /* Backlight */ #else gpio_set_value( PIN_LCD_DISP, 1 ); /* DISP asserted first */ #ifdef CONFIG_BOARD_MRMT_BL_PWM /* Use PWM for Backlight controls */ at32_add_device_pwm(1 << PWM_CH_BL); platform_device_register(&rmt_pwm_led_dev); #else /* Backlight always on */ udelay( 1 ); at32_select_gpio( PIN_LCD_BL, AT32_GPIOF_OUTPUT ); gpio_request( PIN_LCD_BL, "LCD_BL" ); gpio_direction_output( PIN_LCD_BL, 1 ); #endif #endif /* Make sure BT and Zigbee modules in reset */ at32_select_gpio( PIN_BT_RST, AT32_GPIOF_OUTPUT ); gpio_request( PIN_BT_RST, "BT_RST" ); gpio_direction_output( PIN_BT_RST, 1 ); /* BT Module in Reset */ at32_select_gpio( PIN_ZB_RST_N, AT32_GPIOF_OUTPUT ); gpio_request( PIN_ZB_RST_N, "ZB_RST_N" ); gpio_direction_output( PIN_ZB_RST_N, 0 ); /* XBee Module in Reset */ #ifdef CONFIG_BOARD_MRMT_WIRELESS_ZB udelay( 1000 ); /* Unreset the XBee Module */ gpio_set_value( PIN_ZB_RST_N, 1 ); #endif #ifdef CONFIG_BOARD_MRMT_WIRELESS_BT udelay( 1000 ); /* Unreset the BT Module */ gpio_set_value( PIN_BT_RST, 0 ); #endif return 0; } arch_initcall(mrmt1_init); static int __init mrmt1_early_init(void) { /* To maintain power-on signal in case boot loader did not already */ at32_select_gpio( PIN_PWR_ON, AT32_GPIOF_OUTPUT ); gpio_request( PIN_PWR_ON, "PIN_PWR_ON" ); gpio_direction_output( PIN_PWR_ON, 1 ); return 0; } core_initcall(mrmt1_early_init);
gpl-2.0
rancher/linux
block/blk-mq-cpumap.c
200
2598
/* * CPU <-> hardware queue mapping helpers * * Copyright (C) 2013-2014 Jens Axboe */ #include <linux/kernel.h> #include <linux/threads.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/cpu.h> #include <linux/blk-mq.h> #include "blk.h" #include "blk-mq.h" static int cpu_to_queue_index(unsigned int nr_cpus, unsigned int nr_queues, const int cpu) { return cpu * nr_queues / nr_cpus; } static int get_first_sibling(unsigned int cpu) { unsigned int ret; ret = cpumask_first(topology_sibling_cpumask(cpu)); if (ret < nr_cpu_ids) return ret; return cpu; } int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, const struct cpumask *online_mask) { unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; cpumask_var_t cpus; if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) return 1; cpumask_clear(cpus); nr_cpus = nr_uniq_cpus = 0; for_each_cpu(i, online_mask) { nr_cpus++; first_sibling = get_first_sibling(i); if (!cpumask_test_cpu(first_sibling, cpus)) nr_uniq_cpus++; cpumask_set_cpu(i, cpus); } queue = 0; for_each_possible_cpu(i) { if (!cpumask_test_cpu(i, online_mask)) { map[i] = 0; continue; } /* * Easy case - we have equal or more hardware queues. Or * there are no thread siblings to take into account. Do * 1:1 if enough, or sequential mapping if less. */ if (nr_queues >= nr_cpus || nr_cpus == nr_uniq_cpus) { map[i] = cpu_to_queue_index(nr_cpus, nr_queues, queue); queue++; continue; } /* * Less then nr_cpus queues, and we have some number of * threads per cores. Map sibling threads to the same * queue. */ first_sibling = get_first_sibling(i); if (first_sibling == i) { map[i] = cpu_to_queue_index(nr_uniq_cpus, nr_queues, queue); queue++; } else map[i] = map[first_sibling]; } free_cpumask_var(cpus); return 0; } unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) { unsigned int *map; /* If cpus are offline, map them to first hctx */ map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, set->numa_node); if (!map) return NULL; if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) return map; kfree(map); return NULL; } /* * We have no quick way of doing reverse lookups. This is only used at * queue init time, so runtime isn't important. */ int blk_mq_hw_queue_to_node(unsigned int *mq_map, unsigned int index) { int i; for_each_possible_cpu(i) { if (index == mq_map[i]) return cpu_to_node(i); } return NUMA_NO_NODE; }
gpl-2.0
loongson-community/linux-loongson-community
drivers/scsi/aha1542.c
200
29280
/* * Driver for Adaptec AHA-1542 SCSI host adapters * * Copyright (C) 1992 Tommy Thorn * Copyright (C) 1993, 1994, 1995 Eric Youngdale * Copyright (C) 2015 Ondrej Zary */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/isa.h> #include <linux/pnp.h> #include <linux/slab.h> #include <linux/io.h> #include <asm/dma.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <scsi/scsi_host.h> #include "aha1542.h" #define MAXBOARDS 4 static bool isapnp = 1; module_param(isapnp, bool, 0); MODULE_PARM_DESC(isapnp, "enable PnP support (default=1)"); static int io[MAXBOARDS] = { 0x330, 0x334, 0, 0 }; module_param_array(io, int, NULL, 0); MODULE_PARM_DESC(io, "base IO address of controller (0x130,0x134,0x230,0x234,0x330,0x334, default=0x330,0x334)"); /* time AHA spends on the AT-bus during data transfer */ static int bus_on[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 11us */ module_param_array(bus_on, int, NULL, 0); MODULE_PARM_DESC(bus_on, "bus on time [us] (2-15, default=-1 [HW default: 11])"); /* time AHA spends off the bus (not to monopolize it) during data transfer */ static int bus_off[MAXBOARDS] = { -1, -1, -1, -1 }; /* power-on default: 4us */ module_param_array(bus_off, int, NULL, 0); MODULE_PARM_DESC(bus_off, "bus off time [us] (1-64, default=-1 [HW default: 4])"); /* default is jumper selected (J1 on 1542A), factory default = 5 MB/s */ static int dma_speed[MAXBOARDS] = { -1, -1, -1, -1 }; module_param_array(dma_speed, int, NULL, 0); MODULE_PARM_DESC(dma_speed, "DMA speed [MB/s] (5,6,7,8,10, default=-1 [by jumper])"); #define BIOS_TRANSLATION_6432 1 /* Default case these days */ #define BIOS_TRANSLATION_25563 2 /* Big disk case */ struct aha1542_hostdata { /* This will effectively start both of them at the first mailbox */ int bios_translation; /* Mapping bios uses - for compatibility */ int aha1542_last_mbi_used; int aha1542_last_mbo_used; struct scsi_cmnd *int_cmds[AHA1542_MAILBOXES]; struct mailbox mb[2 * AHA1542_MAILBOXES]; struct ccb ccb[AHA1542_MAILBOXES]; }; static inline void aha1542_intr_reset(u16 base) { outb(IRST, CONTROL(base)); } static inline bool wait_mask(u16 port, u8 mask, u8 allof, u8 noneof, int timeout) { bool delayed = true; if (timeout == 0) { timeout = 3000000; delayed = false; } while (1) { u8 bits = inb(port) & mask; if ((bits & allof) == allof && ((bits & noneof) == 0)) break; if (delayed) mdelay(1); if (--timeout == 0) return false; } return true; } static int aha1542_outb(unsigned int base, u8 val) { if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) return 1; outb(val, DATA(base)); return 0; } static int aha1542_out(unsigned int base, u8 *buf, int len) { while (len--) { if (!wait_mask(STATUS(base), CDF, 0, CDF, 0)) return 1; outb(*buf++, DATA(base)); } if (!wait_mask(INTRFLAGS(base), INTRMASK, HACC, 0, 0)) return 1; return 0; } /* Only used at boot time, so we do not need to worry about latency as much here */ static int aha1542_in(unsigned int base, u8 *buf, int len, int timeout) { while (len--) { if (!wait_mask(STATUS(base), DF, DF, 0, timeout)) return 1; *buf++ = inb(DATA(base)); } return 0; } static int makecode(unsigned hosterr, unsigned scsierr) { switch (hosterr) { case 0x0: case 0xa: /* Linked command complete without error and linked normally */ case 0xb: /* Linked command complete without error, interrupt generated */ hosterr = 0; break; case 0x11: /* Selection time out-The initiator selection or target reselection was not complete within the SCSI Time out period */ hosterr = DID_TIME_OUT; break; case 0x12: /* Data overrun/underrun-The target attempted to transfer more data than was allocated by the Data Length field or the sum of the Scatter / Gather Data Length fields. */ case 0x13: /* Unexpected bus free-The target dropped the SCSI BSY at an unexpected time. */ case 0x15: /* MBO command was not 00, 01 or 02-The first byte of the CB was invalid. This usually indicates a software failure. */ case 0x16: /* Invalid CCB Operation Code-The first byte of the CCB was invalid. This usually indicates a software failure. */ case 0x17: /* Linked CCB does not have the same LUN-A subsequent CCB of a set of linked CCB's does not specify the same logical unit number as the first. */ case 0x18: /* Invalid Target Direction received from Host-The direction of a Target Mode CCB was invalid. */ case 0x19: /* Duplicate CCB Received in Target Mode-More than once CCB was received to service data transfer between the same target LUN and initiator SCSI ID in the same direction. */ case 0x1a: /* Invalid CCB or Segment List Parameter-A segment list with a zero length segment or invalid segment list boundaries was received. A CCB parameter was invalid. */ #ifdef DEBUG printk("Aha1542: %x %x\n", hosterr, scsierr); #endif hosterr = DID_ERROR; /* Couldn't find any better */ break; case 0x14: /* Target bus phase sequence failure-An invalid bus phase or bus phase sequence was requested by the target. The host adapter will generate a SCSI Reset Condition, notifying the host with a SCRD interrupt */ hosterr = DID_RESET; break; default: printk(KERN_ERR "aha1542: makecode: unknown hoststatus %x\n", hosterr); break; } return scsierr | (hosterr << 16); } static int aha1542_test_port(struct Scsi_Host *sh) { u8 inquiry_result[4]; int i; /* Quick and dirty test for presence of the card. */ if (inb(STATUS(sh->io_port)) == 0xff) return 0; /* Reset the adapter. I ought to make a hard reset, but it's not really necessary */ /* In case some other card was probing here, reset interrupts */ aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ outb(SRST | IRST /*|SCRST */ , CONTROL(sh->io_port)); mdelay(20); /* Wait a little bit for things to settle down. */ /* Expect INIT and IDLE, any of the others are bad */ if (!wait_mask(STATUS(sh->io_port), STATMASK, INIT | IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) return 0; /* Shouldn't have generated any interrupts during reset */ if (inb(INTRFLAGS(sh->io_port)) & INTRMASK) return 0; /* Perform a host adapter inquiry instead so we do not need to set up the mailboxes ahead of time */ aha1542_outb(sh->io_port, CMD_INQUIRY); for (i = 0; i < 4; i++) { if (!wait_mask(STATUS(sh->io_port), DF, DF, 0, 0)) return 0; inquiry_result[i] = inb(DATA(sh->io_port)); } /* Reading port should reset DF */ if (inb(STATUS(sh->io_port)) & DF) return 0; /* When HACC, command is completed, and we're though testing */ if (!wait_mask(INTRFLAGS(sh->io_port), HACC, HACC, 0, 0)) return 0; /* Clear interrupts */ outb(IRST, CONTROL(sh->io_port)); return 1; } static irqreturn_t aha1542_interrupt(int irq, void *dev_id) { struct Scsi_Host *sh = dev_id; struct aha1542_hostdata *aha1542 = shost_priv(sh); void (*my_done)(struct scsi_cmnd *) = NULL; int errstatus, mbi, mbo, mbistatus; int number_serviced; unsigned long flags; struct scsi_cmnd *tmp_cmd; int flag; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; #ifdef DEBUG { flag = inb(INTRFLAGS(sh->io_port)); shost_printk(KERN_DEBUG, sh, "aha1542_intr_handle: "); if (!(flag & ANYINTR)) printk("no interrupt?"); if (flag & MBIF) printk("MBIF "); if (flag & MBOA) printk("MBOF "); if (flag & HACC) printk("HACC "); if (flag & SCRD) printk("SCRD "); printk("status %02x\n", inb(STATUS(sh->io_port))); }; #endif number_serviced = 0; spin_lock_irqsave(sh->host_lock, flags); while (1) { flag = inb(INTRFLAGS(sh->io_port)); /* Check for unusual interrupts. If any of these happen, we should probably do something special, but for now just printing a message is sufficient. A SCSI reset detected is something that we really need to deal with in some way. */ if (flag & ~MBIF) { if (flag & MBOA) printk("MBOF "); if (flag & HACC) printk("HACC "); if (flag & SCRD) printk("SCRD "); } aha1542_intr_reset(sh->io_port); mbi = aha1542->aha1542_last_mbi_used + 1; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; do { if (mb[mbi].status != 0) break; mbi++; if (mbi >= 2 * AHA1542_MAILBOXES) mbi = AHA1542_MAILBOXES; } while (mbi != aha1542->aha1542_last_mbi_used); if (mb[mbi].status == 0) { spin_unlock_irqrestore(sh->host_lock, flags); /* Hmm, no mail. Must have read it the last time around */ if (!number_serviced) shost_printk(KERN_WARNING, sh, "interrupt received, but no mail.\n"); return IRQ_HANDLED; }; mbo = (scsi2int(mb[mbi].ccbptr) - (isa_virt_to_bus(&ccb[0]))) / sizeof(struct ccb); mbistatus = mb[mbi].status; mb[mbi].status = 0; aha1542->aha1542_last_mbi_used = mbi; #ifdef DEBUG if (ccb[mbo].tarstat | ccb[mbo].hastat) shost_printk(KERN_DEBUG, sh, "aha1542_command: returning %x (status %d)\n", ccb[mbo].tarstat + ((int) ccb[mbo].hastat << 16), mb[mbi].status); #endif if (mbistatus == 3) continue; /* Aborted command not found */ #ifdef DEBUG shost_printk(KERN_DEBUG, sh, "...done %d %d\n", mbo, mbi); #endif tmp_cmd = aha1542->int_cmds[mbo]; if (!tmp_cmd || !tmp_cmd->scsi_done) { spin_unlock_irqrestore(sh->host_lock, flags); shost_printk(KERN_WARNING, sh, "Unexpected interrupt\n"); shost_printk(KERN_WARNING, sh, "tarstat=%x, hastat=%x idlun=%x ccb#=%d\n", ccb[mbo].tarstat, ccb[mbo].hastat, ccb[mbo].idlun, mbo); return IRQ_HANDLED; } my_done = tmp_cmd->scsi_done; kfree(tmp_cmd->host_scribble); tmp_cmd->host_scribble = NULL; /* Fetch the sense data, and tuck it away, in the required slot. The Adaptec automatically fetches it, and there is no guarantee that we will still have it in the cdb when we come back */ if (ccb[mbo].tarstat == 2) memcpy(tmp_cmd->sense_buffer, &ccb[mbo].cdb[ccb[mbo].cdblen], SCSI_SENSE_BUFFERSIZE); /* is there mail :-) */ /* more error checking left out here */ if (mbistatus != 1) /* This is surely wrong, but I don't know what's right */ errstatus = makecode(ccb[mbo].hastat, ccb[mbo].tarstat); else errstatus = 0; #ifdef DEBUG if (errstatus) shost_printk(KERN_DEBUG, sh, "(aha1542 error:%x %x %x) ", errstatus, ccb[mbo].hastat, ccb[mbo].tarstat); if (ccb[mbo].tarstat == 2) print_hex_dump_bytes("sense: ", DUMP_PREFIX_NONE, &ccb[mbo].cdb[ccb[mbo].cdblen], 12); if (errstatus) printk("aha1542_intr_handle: returning %6x\n", errstatus); #endif tmp_cmd->result = errstatus; aha1542->int_cmds[mbo] = NULL; /* This effectively frees up the mailbox slot, as far as queuecommand is concerned */ my_done(tmp_cmd); number_serviced++; }; } static int aha1542_queuecommand(struct Scsi_Host *sh, struct scsi_cmnd *cmd) { struct aha1542_hostdata *aha1542 = shost_priv(sh); u8 direction; u8 target = cmd->device->id; u8 lun = cmd->device->lun; unsigned long flags; int bufflen = scsi_bufflen(cmd); int mbo, sg_count; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; struct chain *cptr; if (*cmd->cmnd == REQUEST_SENSE) { /* Don't do the command - we have the sense data already */ cmd->result = 0; cmd->scsi_done(cmd); return 0; } #ifdef DEBUG { int i = -1; if (*cmd->cmnd == READ_10 || *cmd->cmnd == WRITE_10) i = xscsi2int(cmd->cmnd + 2); else if (*cmd->cmnd == READ_6 || *cmd->cmnd == WRITE_6) i = scsi2int(cmd->cmnd + 2); shost_printk(KERN_DEBUG, sh, "aha1542_queuecommand: dev %d cmd %02x pos %d len %d", target, *cmd->cmnd, i, bufflen); print_hex_dump_bytes("command: ", DUMP_PREFIX_NONE, cmd->cmnd, cmd->cmd_len); } #endif if (bufflen) { /* allocate memory before taking host_lock */ sg_count = scsi_sg_count(cmd); cptr = kmalloc(sizeof(*cptr) * sg_count, GFP_KERNEL | GFP_DMA); if (!cptr) return SCSI_MLQUEUE_HOST_BUSY; } /* Use the outgoing mailboxes in a round-robin fashion, because this is how the host adapter will scan for them */ spin_lock_irqsave(sh->host_lock, flags); mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; } while (mbo != aha1542->aha1542_last_mbo_used); if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from screwing with this cdb. */ aha1542->aha1542_last_mbo_used = mbo; #ifdef DEBUG shost_printk(KERN_DEBUG, sh, "Sending command (%d %p)...", mbo, cmd->scsi_done); #endif any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ memset(&ccb[mbo], 0, sizeof(struct ccb)); ccb[mbo].cdblen = cmd->cmd_len; direction = 0; if (*cmd->cmnd == READ_10 || *cmd->cmnd == READ_6) direction = 8; else if (*cmd->cmnd == WRITE_10 || *cmd->cmnd == WRITE_6) direction = 16; memcpy(ccb[mbo].cdb, cmd->cmnd, ccb[mbo].cdblen); if (bufflen) { struct scatterlist *sg; int i; ccb[mbo].op = 2; /* SCSI Initiator Command w/scatter-gather */ cmd->host_scribble = (void *)cptr; scsi_for_each_sg(cmd, sg, sg_count, i) { any2scsi(cptr[i].dataptr, isa_page_to_bus(sg_page(sg)) + sg->offset); any2scsi(cptr[i].datalen, sg->length); }; any2scsi(ccb[mbo].datalen, sg_count * sizeof(struct chain)); any2scsi(ccb[mbo].dataptr, isa_virt_to_bus(cptr)); #ifdef DEBUG shost_printk(KERN_DEBUG, sh, "cptr %p: ", cptr); print_hex_dump_bytes("cptr: ", DUMP_PREFIX_NONE, cptr, 18); #endif } else { ccb[mbo].op = 0; /* SCSI Initiator Command */ cmd->host_scribble = NULL; any2scsi(ccb[mbo].datalen, 0); any2scsi(ccb[mbo].dataptr, 0); }; ccb[mbo].idlun = (target & 7) << 5 | direction | (lun & 7); /*SCSI Target Id */ ccb[mbo].rsalen = 16; ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; ccb[mbo].commlinkid = 0; #ifdef DEBUG print_hex_dump_bytes("sending: ", DUMP_PREFIX_NONE, &ccb[mbo], sizeof(ccb[mbo]) - 10); printk("aha1542_queuecommand: now waiting for interrupt "); #endif mb[mbo].status = 1; aha1542_outb(cmd->device->host->io_port, CMD_START_SCSI); spin_unlock_irqrestore(sh->host_lock, flags); return 0; } /* Initialize mailboxes */ static void setup_mailboxes(struct Scsi_Host *sh) { struct aha1542_hostdata *aha1542 = shost_priv(sh); int i; struct mailbox *mb = aha1542->mb; struct ccb *ccb = aha1542->ccb; u8 mb_cmd[5] = { CMD_MBINIT, AHA1542_MAILBOXES, 0, 0, 0}; for (i = 0; i < AHA1542_MAILBOXES; i++) { mb[i].status = mb[AHA1542_MAILBOXES + i].status = 0; any2scsi(mb[i].ccbptr, isa_virt_to_bus(&ccb[i])); }; aha1542_intr_reset(sh->io_port); /* reset interrupts, so they don't block */ any2scsi((mb_cmd + 2), isa_virt_to_bus(mb)); if (aha1542_out(sh->io_port, mb_cmd, 5)) shost_printk(KERN_ERR, sh, "failed setting up mailboxes\n"); aha1542_intr_reset(sh->io_port); } static int aha1542_getconfig(struct Scsi_Host *sh) { u8 inquiry_result[3]; int i; i = inb(STATUS(sh->io_port)); if (i & DF) { i = inb(DATA(sh->io_port)); }; aha1542_outb(sh->io_port, CMD_RETCONF); aha1542_in(sh->io_port, inquiry_result, 3, 0); if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) shost_printk(KERN_ERR, sh, "error querying board settings\n"); aha1542_intr_reset(sh->io_port); switch (inquiry_result[0]) { case 0x80: sh->dma_channel = 7; break; case 0x40: sh->dma_channel = 6; break; case 0x20: sh->dma_channel = 5; break; case 0x01: sh->dma_channel = 0; break; case 0: /* This means that the adapter, although Adaptec 1542 compatible, doesn't use a DMA channel. Currently only aware of the BusLogic BT-445S VL-Bus adapter which needs this. */ sh->dma_channel = 0xFF; break; default: shost_printk(KERN_ERR, sh, "Unable to determine DMA channel.\n"); return -1; }; switch (inquiry_result[1]) { case 0x40: sh->irq = 15; break; case 0x20: sh->irq = 14; break; case 0x8: sh->irq = 12; break; case 0x4: sh->irq = 11; break; case 0x2: sh->irq = 10; break; case 0x1: sh->irq = 9; break; default: shost_printk(KERN_ERR, sh, "Unable to determine IRQ level.\n"); return -1; }; sh->this_id = inquiry_result[2] & 7; return 0; } /* This function should only be called for 1542C boards - we can detect the special firmware settings and unlock the board */ static int aha1542_mbenable(struct Scsi_Host *sh) { static u8 mbenable_cmd[3]; static u8 mbenable_result[2]; int retval; retval = BIOS_TRANSLATION_6432; aha1542_outb(sh->io_port, CMD_EXTBIOS); if (aha1542_in(sh->io_port, mbenable_result, 2, 100)) return retval; if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 100)) goto fail; aha1542_intr_reset(sh->io_port); if ((mbenable_result[0] & 0x08) || mbenable_result[1]) { mbenable_cmd[0] = CMD_MBENABLE; mbenable_cmd[1] = 0; mbenable_cmd[2] = mbenable_result[1]; if ((mbenable_result[0] & 0x08) && (mbenable_result[1] & 0x03)) retval = BIOS_TRANSLATION_25563; if (aha1542_out(sh->io_port, mbenable_cmd, 3)) goto fail; }; while (0) { fail: shost_printk(KERN_ERR, sh, "Mailbox init failed\n"); } aha1542_intr_reset(sh->io_port); return retval; } /* Query the board to find out if it is a 1542 or a 1740, or whatever. */ static int aha1542_query(struct Scsi_Host *sh) { struct aha1542_hostdata *aha1542 = shost_priv(sh); u8 inquiry_result[4]; int i; i = inb(STATUS(sh->io_port)); if (i & DF) { i = inb(DATA(sh->io_port)); }; aha1542_outb(sh->io_port, CMD_INQUIRY); aha1542_in(sh->io_port, inquiry_result, 4, 0); if (!wait_mask(INTRFLAGS(sh->io_port), INTRMASK, HACC, 0, 0)) shost_printk(KERN_ERR, sh, "error querying card type\n"); aha1542_intr_reset(sh->io_port); aha1542->bios_translation = BIOS_TRANSLATION_6432; /* Default case */ /* For an AHA1740 series board, we ignore the board since there is a hardware bug which can lead to wrong blocks being returned if the board is operating in the 1542 emulation mode. Since there is an extended mode driver, we simply ignore the board and let the 1740 driver pick it up. */ if (inquiry_result[0] == 0x43) { shost_printk(KERN_INFO, sh, "Emulation mode not supported for AHA-1740 hardware, use aha1740 driver instead.\n"); return 1; }; /* Always call this - boards that do not support extended bios translation will ignore the command, and we will set the proper default */ aha1542->bios_translation = aha1542_mbenable(sh); return 0; } static u8 dma_speed_hw(int dma_speed) { switch (dma_speed) { case 5: return 0x00; case 6: return 0x04; case 7: return 0x01; case 8: return 0x02; case 10: return 0x03; } return 0xff; /* invalid */ } /* Set the Bus on/off-times as not to ruin floppy performance */ static void aha1542_set_bus_times(struct Scsi_Host *sh, int bus_on, int bus_off, int dma_speed) { if (bus_on > 0) { u8 oncmd[] = { CMD_BUSON_TIME, clamp(bus_on, 2, 15) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, oncmd, 2)) goto fail; } if (bus_off > 0) { u8 offcmd[] = { CMD_BUSOFF_TIME, clamp(bus_off, 1, 64) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, offcmd, 2)) goto fail; } if (dma_speed_hw(dma_speed) != 0xff) { u8 dmacmd[] = { CMD_DMASPEED, dma_speed_hw(dma_speed) }; aha1542_intr_reset(sh->io_port); if (aha1542_out(sh->io_port, dmacmd, 2)) goto fail; } aha1542_intr_reset(sh->io_port); return; fail: shost_printk(KERN_ERR, sh, "setting bus on/off-time failed\n"); aha1542_intr_reset(sh->io_port); } /* return non-zero on detection */ static struct Scsi_Host *aha1542_hw_init(struct scsi_host_template *tpnt, struct device *pdev, int indx) { unsigned int base_io = io[indx]; struct Scsi_Host *sh; struct aha1542_hostdata *aha1542; char dma_info[] = "no DMA"; if (base_io == 0) return NULL; if (!request_region(base_io, AHA1542_REGION_SIZE, "aha1542")) return NULL; sh = scsi_host_alloc(tpnt, sizeof(struct aha1542_hostdata)); if (!sh) goto release; aha1542 = shost_priv(sh); sh->unique_id = base_io; sh->io_port = base_io; sh->n_io_port = AHA1542_REGION_SIZE; aha1542->aha1542_last_mbi_used = 2 * AHA1542_MAILBOXES - 1; aha1542->aha1542_last_mbo_used = AHA1542_MAILBOXES - 1; if (!aha1542_test_port(sh)) goto unregister; aha1542_set_bus_times(sh, bus_on[indx], bus_off[indx], dma_speed[indx]); if (aha1542_query(sh)) goto unregister; if (aha1542_getconfig(sh) == -1) goto unregister; if (sh->dma_channel != 0xFF) snprintf(dma_info, sizeof(dma_info), "DMA %d", sh->dma_channel); shost_printk(KERN_INFO, sh, "Adaptec AHA-1542 (SCSI-ID %d) at IO 0x%x, IRQ %d, %s\n", sh->this_id, base_io, sh->irq, dma_info); if (aha1542->bios_translation == BIOS_TRANSLATION_25563) shost_printk(KERN_INFO, sh, "Using extended bios translation\n"); setup_mailboxes(sh); if (request_irq(sh->irq, aha1542_interrupt, 0, "aha1542", sh)) { shost_printk(KERN_ERR, sh, "Unable to allocate IRQ.\n"); goto unregister; } if (sh->dma_channel != 0xFF) { if (request_dma(sh->dma_channel, "aha1542")) { shost_printk(KERN_ERR, sh, "Unable to allocate DMA channel.\n"); goto free_irq; } if (sh->dma_channel == 0 || sh->dma_channel >= 5) { set_dma_mode(sh->dma_channel, DMA_MODE_CASCADE); enable_dma(sh->dma_channel); } } if (scsi_add_host(sh, pdev)) goto free_dma; scsi_scan_host(sh); return sh; free_dma: if (sh->dma_channel != 0xff) free_dma(sh->dma_channel); free_irq: free_irq(sh->irq, sh); unregister: scsi_host_put(sh); release: release_region(base_io, AHA1542_REGION_SIZE); return NULL; } static int aha1542_release(struct Scsi_Host *sh) { scsi_remove_host(sh); if (sh->dma_channel != 0xff) free_dma(sh->dma_channel); if (sh->irq) free_irq(sh->irq, sh); if (sh->io_port && sh->n_io_port) release_region(sh->io_port, sh->n_io_port); scsi_host_put(sh); return 0; } /* * This is a device reset. This is handled by sending a special command * to the device. */ static int aha1542_dev_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *sh = cmd->device->host; struct aha1542_hostdata *aha1542 = shost_priv(sh); unsigned long flags; struct mailbox *mb = aha1542->mb; u8 target = cmd->device->id; u8 lun = cmd->device->lun; int mbo; struct ccb *ccb = aha1542->ccb; spin_lock_irqsave(sh->host_lock, flags); mbo = aha1542->aha1542_last_mbo_used + 1; if (mbo >= AHA1542_MAILBOXES) mbo = 0; do { if (mb[mbo].status == 0 && aha1542->int_cmds[mbo] == NULL) break; mbo++; if (mbo >= AHA1542_MAILBOXES) mbo = 0; } while (mbo != aha1542->aha1542_last_mbo_used); if (mb[mbo].status || aha1542->int_cmds[mbo]) panic("Unable to find empty mailbox for aha1542.\n"); aha1542->int_cmds[mbo] = cmd; /* This will effectively prevent someone else from screwing with this cdb. */ aha1542->aha1542_last_mbo_used = mbo; any2scsi(mb[mbo].ccbptr, isa_virt_to_bus(&ccb[mbo])); /* This gets trashed for some reason */ memset(&ccb[mbo], 0, sizeof(struct ccb)); ccb[mbo].op = 0x81; /* BUS DEVICE RESET */ ccb[mbo].idlun = (target & 7) << 5 | (lun & 7); /*SCSI Target Id */ ccb[mbo].linkptr[0] = ccb[mbo].linkptr[1] = ccb[mbo].linkptr[2] = 0; ccb[mbo].commlinkid = 0; /* * Now tell the 1542 to flush all pending commands for this * target */ aha1542_outb(sh->io_port, CMD_START_SCSI); spin_unlock_irqrestore(sh->host_lock, flags); scmd_printk(KERN_WARNING, cmd, "Trying device reset for target\n"); return SUCCESS; } static int aha1542_reset(struct scsi_cmnd *cmd, u8 reset_cmd) { struct Scsi_Host *sh = cmd->device->host; struct aha1542_hostdata *aha1542 = shost_priv(sh); unsigned long flags; int i; spin_lock_irqsave(sh->host_lock, flags); /* * This does a scsi reset for all devices on the bus. * In principle, we could also reset the 1542 - should * we do this? Try this first, and we can add that later * if it turns out to be useful. */ outb(reset_cmd, CONTROL(cmd->device->host->io_port)); if (!wait_mask(STATUS(cmd->device->host->io_port), STATMASK, IDLE, STST | DIAGF | INVDCMD | DF | CDF, 0)) { spin_unlock_irqrestore(sh->host_lock, flags); return FAILED; } /* * We need to do this too before the 1542 can interact with * us again after host reset. */ if (reset_cmd & HRST) setup_mailboxes(cmd->device->host); /* * Now try to pick up the pieces. For all pending commands, * free any internal data structures, and basically clear things * out. We do not try and restart any commands or anything - * the strategy handler takes care of that crap. */ shost_printk(KERN_WARNING, cmd->device->host, "Sent BUS RESET to scsi host %d\n", cmd->device->host->host_no); for (i = 0; i < AHA1542_MAILBOXES; i++) { if (aha1542->int_cmds[i] != NULL) { struct scsi_cmnd *tmp_cmd; tmp_cmd = aha1542->int_cmds[i]; if (tmp_cmd->device->soft_reset) { /* * If this device implements the soft reset option, * then it is still holding onto the command, and * may yet complete it. In this case, we don't * flush the data. */ continue; } kfree(tmp_cmd->host_scribble); tmp_cmd->host_scribble = NULL; aha1542->int_cmds[i] = NULL; aha1542->mb[i].status = 0; } } spin_unlock_irqrestore(sh->host_lock, flags); return SUCCESS; } static int aha1542_bus_reset(struct scsi_cmnd *cmd) { return aha1542_reset(cmd, SCRST); } static int aha1542_host_reset(struct scsi_cmnd *cmd) { return aha1542_reset(cmd, HRST | SCRST); } static int aha1542_biosparam(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int geom[]) { struct aha1542_hostdata *aha1542 = shost_priv(sdev->host); if (capacity >= 0x200000 && aha1542->bios_translation == BIOS_TRANSLATION_25563) { /* Please verify that this is the same as what DOS returns */ geom[0] = 255; /* heads */ geom[1] = 63; /* sectors */ } else { geom[0] = 64; /* heads */ geom[1] = 32; /* sectors */ } geom[2] = sector_div(capacity, geom[0] * geom[1]); /* cylinders */ return 0; } MODULE_LICENSE("GPL"); static struct scsi_host_template driver_template = { .module = THIS_MODULE, .proc_name = "aha1542", .name = "Adaptec 1542", .queuecommand = aha1542_queuecommand, .eh_device_reset_handler= aha1542_dev_reset, .eh_bus_reset_handler = aha1542_bus_reset, .eh_host_reset_handler = aha1542_host_reset, .bios_param = aha1542_biosparam, .can_queue = AHA1542_MAILBOXES, .this_id = 7, .sg_tablesize = 16, .cmd_per_lun = 1, .unchecked_isa_dma = 1, .use_clustering = ENABLE_CLUSTERING, }; static int aha1542_isa_match(struct device *pdev, unsigned int ndev) { struct Scsi_Host *sh = aha1542_hw_init(&driver_template, pdev, ndev); if (!sh) return 0; dev_set_drvdata(pdev, sh); return 1; } static int aha1542_isa_remove(struct device *pdev, unsigned int ndev) { aha1542_release(dev_get_drvdata(pdev)); dev_set_drvdata(pdev, NULL); return 0; } static struct isa_driver aha1542_isa_driver = { .match = aha1542_isa_match, .remove = aha1542_isa_remove, .driver = { .name = "aha1542" }, }; static int isa_registered; #ifdef CONFIG_PNP static struct pnp_device_id aha1542_pnp_ids[] = { { .id = "ADP1542" }, { .id = "" } }; MODULE_DEVICE_TABLE(pnp, aha1542_pnp_ids); static int aha1542_pnp_probe(struct pnp_dev *pdev, const struct pnp_device_id *id) { int indx; struct Scsi_Host *sh; for (indx = 0; indx < ARRAY_SIZE(io); indx++) { if (io[indx]) continue; if (pnp_activate_dev(pdev) < 0) continue; io[indx] = pnp_port_start(pdev, 0); /* The card can be queried for its DMA, we have the DMA set up that is enough */ dev_info(&pdev->dev, "ISAPnP found an AHA1535 at I/O 0x%03X", io[indx]); } sh = aha1542_hw_init(&driver_template, &pdev->dev, indx); if (!sh) return -ENODEV; pnp_set_drvdata(pdev, sh); return 0; } static void aha1542_pnp_remove(struct pnp_dev *pdev) { aha1542_release(pnp_get_drvdata(pdev)); pnp_set_drvdata(pdev, NULL); } static struct pnp_driver aha1542_pnp_driver = { .name = "aha1542", .id_table = aha1542_pnp_ids, .probe = aha1542_pnp_probe, .remove = aha1542_pnp_remove, }; static int pnp_registered; #endif /* CONFIG_PNP */ static int __init aha1542_init(void) { int ret = 0; #ifdef CONFIG_PNP if (isapnp) { ret = pnp_register_driver(&aha1542_pnp_driver); if (!ret) pnp_registered = 1; } #endif ret = isa_register_driver(&aha1542_isa_driver, MAXBOARDS); if (!ret) isa_registered = 1; #ifdef CONFIG_PNP if (pnp_registered) ret = 0; #endif if (isa_registered) ret = 0; return ret; } static void __exit aha1542_exit(void) { #ifdef CONFIG_PNP if (pnp_registered) pnp_unregister_driver(&aha1542_pnp_driver); #endif if (isa_registered) isa_unregister_driver(&aha1542_isa_driver); } module_init(aha1542_init); module_exit(aha1542_exit);
gpl-2.0
Chairshot215/starship_kernel_moto_shamu
drivers/spi/spi-s3c24xx.c
2248
16709
/* * Copyright (c) 2006 Ben Dooks * Copyright 2006-2009 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/spinlock.h> #include <linux/workqueue.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/spi/spi.h> #include <linux/spi/spi_bitbang.h> #include <linux/spi/s3c24xx.h> #include <linux/module.h> #include <plat/regs-spi.h> #include <plat/fiq.h> #include <asm/fiq.h> #include "spi-s3c24xx-fiq.h" /** * s3c24xx_spi_devstate - per device data * @hz: Last frequency calculated for @sppre field. * @mode: Last mode setting for the @spcon field. * @spcon: Value to write to the SPCON register. * @sppre: Value to write to the SPPRE register. */ struct s3c24xx_spi_devstate { unsigned int hz; unsigned int mode; u8 spcon; u8 sppre; }; enum spi_fiq_mode { FIQ_MODE_NONE = 0, FIQ_MODE_TX = 1, FIQ_MODE_RX = 2, FIQ_MODE_TXRX = 3, }; struct s3c24xx_spi { /* bitbang has to be first */ struct spi_bitbang bitbang; struct completion done; void __iomem *regs; int irq; int len; int count; struct fiq_handler fiq_handler; enum spi_fiq_mode fiq_mode; unsigned char fiq_inuse; unsigned char fiq_claimed; void (*set_cs)(struct s3c2410_spi_info *spi, int cs, int pol); /* data buffers */ const unsigned char *tx; unsigned char *rx; struct clk *clk; struct resource *ioarea; struct spi_master *master; struct spi_device *curdev; struct device *dev; struct s3c2410_spi_info *pdata; }; #define SPCON_DEFAULT (S3C2410_SPCON_MSTR | S3C2410_SPCON_SMOD_INT) #define SPPIN_DEFAULT (S3C2410_SPPIN_KEEP) static inline struct s3c24xx_spi *to_hw(struct spi_device *sdev) { return spi_master_get_devdata(sdev->master); } static void s3c24xx_spi_gpiocs(struct s3c2410_spi_info *spi, int cs, int pol) { gpio_set_value(spi->pin_cs, pol); } static void s3c24xx_spi_chipsel(struct spi_device *spi, int value) { struct s3c24xx_spi_devstate *cs = spi->controller_state; struct s3c24xx_spi *hw = to_hw(spi); unsigned int cspol = spi->mode & SPI_CS_HIGH ? 1 : 0; /* change the chipselect state and the state of the spi engine clock */ switch (value) { case BITBANG_CS_INACTIVE: hw->set_cs(hw->pdata, spi->chip_select, cspol^1); writeb(cs->spcon, hw->regs + S3C2410_SPCON); break; case BITBANG_CS_ACTIVE: writeb(cs->spcon | S3C2410_SPCON_ENSCK, hw->regs + S3C2410_SPCON); hw->set_cs(hw->pdata, spi->chip_select, cspol); break; } } static int s3c24xx_spi_update_state(struct spi_device *spi, struct spi_transfer *t) { struct s3c24xx_spi *hw = to_hw(spi); struct s3c24xx_spi_devstate *cs = spi->controller_state; unsigned int bpw; unsigned int hz; unsigned int div; unsigned long clk; bpw = t ? t->bits_per_word : spi->bits_per_word; hz = t ? t->speed_hz : spi->max_speed_hz; if (!bpw) bpw = 8; if (!hz) hz = spi->max_speed_hz; if (bpw != 8) { dev_err(&spi->dev, "invalid bits-per-word (%d)\n", bpw); return -EINVAL; } if (spi->mode != cs->mode) { u8 spcon = SPCON_DEFAULT | S3C2410_SPCON_ENSCK; if (spi->mode & SPI_CPHA) spcon |= S3C2410_SPCON_CPHA_FMTB; if (spi->mode & SPI_CPOL) spcon |= S3C2410_SPCON_CPOL_HIGH; cs->mode = spi->mode; cs->spcon = spcon; } if (cs->hz != hz) { clk = clk_get_rate(hw->clk); div = DIV_ROUND_UP(clk, hz * 2) - 1; if (div > 255) div = 255; dev_dbg(&spi->dev, "pre-scaler=%d (wanted %d, got %ld)\n", div, hz, clk / (2 * (div + 1))); cs->hz = hz; cs->sppre = div; } return 0; } static int s3c24xx_spi_setupxfer(struct spi_device *spi, struct spi_transfer *t) { struct s3c24xx_spi_devstate *cs = spi->controller_state; struct s3c24xx_spi *hw = to_hw(spi); int ret; ret = s3c24xx_spi_update_state(spi, t); if (!ret) writeb(cs->sppre, hw->regs + S3C2410_SPPRE); return ret; } static int s3c24xx_spi_setup(struct spi_device *spi) { struct s3c24xx_spi_devstate *cs = spi->controller_state; struct s3c24xx_spi *hw = to_hw(spi); int ret; /* allocate settings on the first call */ if (!cs) { cs = kzalloc(sizeof(struct s3c24xx_spi_devstate), GFP_KERNEL); if (!cs) { dev_err(&spi->dev, "no memory for controller state\n"); return -ENOMEM; } cs->spcon = SPCON_DEFAULT; cs->hz = -1; spi->controller_state = cs; } /* initialise the state from the device */ ret = s3c24xx_spi_update_state(spi, NULL); if (ret) return ret; spin_lock(&hw->bitbang.lock); if (!hw->bitbang.busy) { hw->bitbang.chipselect(spi, BITBANG_CS_INACTIVE); /* need to ndelay for 0.5 clocktick ? */ } spin_unlock(&hw->bitbang.lock); return 0; } static void s3c24xx_spi_cleanup(struct spi_device *spi) { kfree(spi->controller_state); } static inline unsigned int hw_txbyte(struct s3c24xx_spi *hw, int count) { return hw->tx ? hw->tx[count] : 0; } #ifdef CONFIG_SPI_S3C24XX_FIQ /* Support for FIQ based pseudo-DMA to improve the transfer speed. * * This code uses the assembly helper in spi_s3c24xx_spi.S which is * used by the FIQ core to move data between main memory and the peripheral * block. Since this is code running on the processor, there is no problem * with cache coherency of the buffers, so we can use any buffer we like. */ /** * struct spi_fiq_code - FIQ code and header * @length: The length of the code fragment, excluding this header. * @ack_offset: The offset from @data to the word to place the IRQ ACK bit at. * @data: The code itself to install as a FIQ handler. */ struct spi_fiq_code { u32 length; u32 ack_offset; u8 data[0]; }; extern struct spi_fiq_code s3c24xx_spi_fiq_txrx; extern struct spi_fiq_code s3c24xx_spi_fiq_tx; extern struct spi_fiq_code s3c24xx_spi_fiq_rx; /** * ack_bit - turn IRQ into IRQ acknowledgement bit * @irq: The interrupt number * * Returns the bit to write to the interrupt acknowledge register. */ static inline u32 ack_bit(unsigned int irq) { return 1 << (irq - IRQ_EINT0); } /** * s3c24xx_spi_tryfiq - attempt to claim and setup FIQ for transfer * @hw: The hardware state. * * Claim the FIQ handler (only one can be active at any one time) and * then setup the correct transfer code for this transfer. * * This call updates all the necessary state information if successful, * so the caller does not need to do anything more than start the transfer * as normal, since the IRQ will have been re-routed to the FIQ handler. */ void s3c24xx_spi_tryfiq(struct s3c24xx_spi *hw) { struct pt_regs regs; enum spi_fiq_mode mode; struct spi_fiq_code *code; int ret; if (!hw->fiq_claimed) { /* try and claim fiq if we haven't got it, and if not * then return and simply use another transfer method */ ret = claim_fiq(&hw->fiq_handler); if (ret) return; } if (hw->tx && !hw->rx) mode = FIQ_MODE_TX; else if (hw->rx && !hw->tx) mode = FIQ_MODE_RX; else mode = FIQ_MODE_TXRX; regs.uregs[fiq_rspi] = (long)hw->regs; regs.uregs[fiq_rrx] = (long)hw->rx; regs.uregs[fiq_rtx] = (long)hw->tx + 1; regs.uregs[fiq_rcount] = hw->len - 1; regs.uregs[fiq_rirq] = (long)S3C24XX_VA_IRQ; set_fiq_regs(&regs); if (hw->fiq_mode != mode) { u32 *ack_ptr; hw->fiq_mode = mode; switch (mode) { case FIQ_MODE_TX: code = &s3c24xx_spi_fiq_tx; break; case FIQ_MODE_RX: code = &s3c24xx_spi_fiq_rx; break; case FIQ_MODE_TXRX: code = &s3c24xx_spi_fiq_txrx; break; default: code = NULL; } BUG_ON(!code); ack_ptr = (u32 *)&code->data[code->ack_offset]; *ack_ptr = ack_bit(hw->irq); set_fiq_handler(&code->data, code->length); } s3c24xx_set_fiq(hw->irq, true); hw->fiq_mode = mode; hw->fiq_inuse = 1; } /** * s3c24xx_spi_fiqop - FIQ core code callback * @pw: Data registered with the handler * @release: Whether this is a release or a return. * * Called by the FIQ code when another module wants to use the FIQ, so * return whether we are currently using this or not and then update our * internal state. */ static int s3c24xx_spi_fiqop(void *pw, int release) { struct s3c24xx_spi *hw = pw; int ret = 0; if (release) { if (hw->fiq_inuse) ret = -EBUSY; /* note, we do not need to unroute the FIQ, as the FIQ * vector code de-routes it to signal the end of transfer */ hw->fiq_mode = FIQ_MODE_NONE; hw->fiq_claimed = 0; } else { hw->fiq_claimed = 1; } return ret; } /** * s3c24xx_spi_initfiq - setup the information for the FIQ core * @hw: The hardware state. * * Setup the fiq_handler block to pass to the FIQ core. */ static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *hw) { hw->fiq_handler.dev_id = hw; hw->fiq_handler.name = dev_name(hw->dev); hw->fiq_handler.fiq_op = s3c24xx_spi_fiqop; } /** * s3c24xx_spi_usefiq - return if we should be using FIQ. * @hw: The hardware state. * * Return true if the platform data specifies whether this channel is * allowed to use the FIQ. */ static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *hw) { return hw->pdata->use_fiq; } /** * s3c24xx_spi_usingfiq - return if channel is using FIQ * @spi: The hardware state. * * Return whether the channel is currently using the FIQ (separate from * whether the FIQ is claimed). */ static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *spi) { return spi->fiq_inuse; } #else static inline void s3c24xx_spi_initfiq(struct s3c24xx_spi *s) { } static inline void s3c24xx_spi_tryfiq(struct s3c24xx_spi *s) { } static inline bool s3c24xx_spi_usefiq(struct s3c24xx_spi *s) { return false; } static inline bool s3c24xx_spi_usingfiq(struct s3c24xx_spi *s) { return false; } #endif /* CONFIG_SPI_S3C24XX_FIQ */ static int s3c24xx_spi_txrx(struct spi_device *spi, struct spi_transfer *t) { struct s3c24xx_spi *hw = to_hw(spi); hw->tx = t->tx_buf; hw->rx = t->rx_buf; hw->len = t->len; hw->count = 0; init_completion(&hw->done); hw->fiq_inuse = 0; if (s3c24xx_spi_usefiq(hw) && t->len >= 3) s3c24xx_spi_tryfiq(hw); /* send the first byte */ writeb(hw_txbyte(hw, 0), hw->regs + S3C2410_SPTDAT); wait_for_completion(&hw->done); return hw->count; } static irqreturn_t s3c24xx_spi_irq(int irq, void *dev) { struct s3c24xx_spi *hw = dev; unsigned int spsta = readb(hw->regs + S3C2410_SPSTA); unsigned int count = hw->count; if (spsta & S3C2410_SPSTA_DCOL) { dev_dbg(hw->dev, "data-collision\n"); complete(&hw->done); goto irq_done; } if (!(spsta & S3C2410_SPSTA_READY)) { dev_dbg(hw->dev, "spi not ready for tx?\n"); complete(&hw->done); goto irq_done; } if (!s3c24xx_spi_usingfiq(hw)) { hw->count++; if (hw->rx) hw->rx[count] = readb(hw->regs + S3C2410_SPRDAT); count++; if (count < hw->len) writeb(hw_txbyte(hw, count), hw->regs + S3C2410_SPTDAT); else complete(&hw->done); } else { hw->count = hw->len; hw->fiq_inuse = 0; if (hw->rx) hw->rx[hw->len-1] = readb(hw->regs + S3C2410_SPRDAT); complete(&hw->done); } irq_done: return IRQ_HANDLED; } static void s3c24xx_spi_initialsetup(struct s3c24xx_spi *hw) { /* for the moment, permanently enable the clock */ clk_enable(hw->clk); /* program defaults into the registers */ writeb(0xff, hw->regs + S3C2410_SPPRE); writeb(SPPIN_DEFAULT, hw->regs + S3C2410_SPPIN); writeb(SPCON_DEFAULT, hw->regs + S3C2410_SPCON); if (hw->pdata) { if (hw->set_cs == s3c24xx_spi_gpiocs) gpio_direction_output(hw->pdata->pin_cs, 1); if (hw->pdata->gpio_setup) hw->pdata->gpio_setup(hw->pdata, 1); } } static int s3c24xx_spi_probe(struct platform_device *pdev) { struct s3c2410_spi_info *pdata; struct s3c24xx_spi *hw; struct spi_master *master; struct resource *res; int err = 0; master = spi_alloc_master(&pdev->dev, sizeof(struct s3c24xx_spi)); if (master == NULL) { dev_err(&pdev->dev, "No memory for spi_master\n"); err = -ENOMEM; goto err_nomem; } hw = spi_master_get_devdata(master); memset(hw, 0, sizeof(struct s3c24xx_spi)); hw->master = spi_master_get(master); hw->pdata = pdata = pdev->dev.platform_data; hw->dev = &pdev->dev; if (pdata == NULL) { dev_err(&pdev->dev, "No platform data supplied\n"); err = -ENOENT; goto err_no_pdata; } platform_set_drvdata(pdev, hw); init_completion(&hw->done); /* initialise fiq handler */ s3c24xx_spi_initfiq(hw); /* setup the master state. */ /* the spi->mode bits understood by this driver: */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH; master->num_chipselect = hw->pdata->num_cs; master->bus_num = pdata->bus_num; /* setup the state for the bitbang driver */ hw->bitbang.master = hw->master; hw->bitbang.setup_transfer = s3c24xx_spi_setupxfer; hw->bitbang.chipselect = s3c24xx_spi_chipsel; hw->bitbang.txrx_bufs = s3c24xx_spi_txrx; hw->master->setup = s3c24xx_spi_setup; hw->master->cleanup = s3c24xx_spi_cleanup; dev_dbg(hw->dev, "bitbang at %p\n", &hw->bitbang); /* find and map our resources */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); err = -ENOENT; goto err_no_iores; } hw->ioarea = request_mem_region(res->start, resource_size(res), pdev->name); if (hw->ioarea == NULL) { dev_err(&pdev->dev, "Cannot reserve region\n"); err = -ENXIO; goto err_no_iores; } hw->regs = ioremap(res->start, resource_size(res)); if (hw->regs == NULL) { dev_err(&pdev->dev, "Cannot map IO\n"); err = -ENXIO; goto err_no_iomap; } hw->irq = platform_get_irq(pdev, 0); if (hw->irq < 0) { dev_err(&pdev->dev, "No IRQ specified\n"); err = -ENOENT; goto err_no_irq; } err = request_irq(hw->irq, s3c24xx_spi_irq, 0, pdev->name, hw); if (err) { dev_err(&pdev->dev, "Cannot claim IRQ\n"); goto err_no_irq; } hw->clk = clk_get(&pdev->dev, "spi"); if (IS_ERR(hw->clk)) { dev_err(&pdev->dev, "No clock for device\n"); err = PTR_ERR(hw->clk); goto err_no_clk; } /* setup any gpio we can */ if (!pdata->set_cs) { if (pdata->pin_cs < 0) { dev_err(&pdev->dev, "No chipselect pin\n"); err = -EINVAL; goto err_register; } err = gpio_request(pdata->pin_cs, dev_name(&pdev->dev)); if (err) { dev_err(&pdev->dev, "Failed to get gpio for cs\n"); goto err_register; } hw->set_cs = s3c24xx_spi_gpiocs; gpio_direction_output(pdata->pin_cs, 1); } else hw->set_cs = pdata->set_cs; s3c24xx_spi_initialsetup(hw); /* register our spi controller */ err = spi_bitbang_start(&hw->bitbang); if (err) { dev_err(&pdev->dev, "Failed to register SPI master\n"); goto err_register; } return 0; err_register: if (hw->set_cs == s3c24xx_spi_gpiocs) gpio_free(pdata->pin_cs); clk_disable(hw->clk); clk_put(hw->clk); err_no_clk: free_irq(hw->irq, hw); err_no_irq: iounmap(hw->regs); err_no_iomap: release_resource(hw->ioarea); kfree(hw->ioarea); err_no_iores: err_no_pdata: spi_master_put(hw->master); err_nomem: return err; } static int s3c24xx_spi_remove(struct platform_device *dev) { struct s3c24xx_spi *hw = platform_get_drvdata(dev); platform_set_drvdata(dev, NULL); spi_bitbang_stop(&hw->bitbang); clk_disable(hw->clk); clk_put(hw->clk); free_irq(hw->irq, hw); iounmap(hw->regs); if (hw->set_cs == s3c24xx_spi_gpiocs) gpio_free(hw->pdata->pin_cs); release_resource(hw->ioarea); kfree(hw->ioarea); spi_master_put(hw->master); return 0; } #ifdef CONFIG_PM static int s3c24xx_spi_suspend(struct device *dev) { struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); if (hw->pdata && hw->pdata->gpio_setup) hw->pdata->gpio_setup(hw->pdata, 0); clk_disable(hw->clk); return 0; } static int s3c24xx_spi_resume(struct device *dev) { struct s3c24xx_spi *hw = platform_get_drvdata(to_platform_device(dev)); s3c24xx_spi_initialsetup(hw); return 0; } static const struct dev_pm_ops s3c24xx_spi_pmops = { .suspend = s3c24xx_spi_suspend, .resume = s3c24xx_spi_resume, }; #define S3C24XX_SPI_PMOPS &s3c24xx_spi_pmops #else #define S3C24XX_SPI_PMOPS NULL #endif /* CONFIG_PM */ MODULE_ALIAS("platform:s3c2410-spi"); static struct platform_driver s3c24xx_spi_driver = { .probe = s3c24xx_spi_probe, .remove = s3c24xx_spi_remove, .driver = { .name = "s3c2410-spi", .owner = THIS_MODULE, .pm = S3C24XX_SPI_PMOPS, }, }; module_platform_driver(s3c24xx_spi_driver); MODULE_DESCRIPTION("S3C24XX SPI Driver"); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_LICENSE("GPL");
gpl-2.0
michaelspacejames/android_kernel_cyanogen_msm8916
drivers/power/avs/smartreflex.c
2248
30013
/* * OMAP SmartReflex Voltage Control * * Author: Thara Gopinath <thara@ti.com> * * Copyright (C) 2012 Texas Instruments, Inc. * Thara Gopinath <thara@ti.com> * * Copyright (C) 2008 Nokia Corporation * Kalle Jokiniemi * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/power/smartreflex.h> #define SMARTREFLEX_NAME_LEN 16 #define NVALUE_NAME_LEN 40 #define SR_DISABLE_TIMEOUT 200 /* sr_list contains all the instances of smartreflex module */ static LIST_HEAD(sr_list); static struct omap_sr_class_data *sr_class; static struct omap_sr_pmic_data *sr_pmic_data; static struct dentry *sr_dbg_dir; static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value) { __raw_writel(value, (sr->base + offset)); } static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask, u32 value) { u32 reg_val; /* * Smartreflex error config register is special as it contains * certain status bits which if written a 1 into means a clear * of those bits. So in order to make sure no accidental write of * 1 happens to those status bits, do a clear of them in the read * value. This mean this API doesn't rewrite values in these bits * if they are currently set, but does allow the caller to write * those bits. */ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1) mask |= ERRCONFIG_STATUS_V1_MASK; else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2) mask |= ERRCONFIG_VPBOUNDINTST_V2; reg_val = __raw_readl(sr->base + offset); reg_val &= ~mask; value &= mask; reg_val |= value; __raw_writel(reg_val, (sr->base + offset)); } static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset) { return __raw_readl(sr->base + offset); } static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm) { struct omap_sr *sr_info; if (!voltdm) { pr_err("%s: Null voltage domain passed!\n", __func__); return ERR_PTR(-EINVAL); } list_for_each_entry(sr_info, &sr_list, node) { if (voltdm == sr_info->voltdm) return sr_info; } return ERR_PTR(-ENODATA); } static irqreturn_t sr_interrupt(int irq, void *data) { struct omap_sr *sr_info = data; u32 status = 0; switch (sr_info->ip_type) { case SR_TYPE_V1: /* Read the status bits */ status = sr_read_reg(sr_info, ERRCONFIG_V1); /* Clear them by writing back */ sr_write_reg(sr_info, ERRCONFIG_V1, status); break; case SR_TYPE_V2: /* Read the status bits */ status = sr_read_reg(sr_info, IRQSTATUS); /* Clear them by writing back */ sr_write_reg(sr_info, IRQSTATUS, status); break; default: dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n", sr_info->ip_type); return IRQ_NONE; } if (sr_class->notify) sr_class->notify(sr_info, status); return IRQ_HANDLED; } static void sr_set_clk_length(struct omap_sr *sr) { struct clk *fck; u32 fclk_speed; fck = clk_get(&sr->pdev->dev, "fck"); if (IS_ERR(fck)) { dev_err(&sr->pdev->dev, "%s: unable to get fck for device %s\n", __func__, dev_name(&sr->pdev->dev)); return; } fclk_speed = clk_get_rate(fck); clk_put(fck); switch (fclk_speed) { case 12000000: sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK; break; case 13000000: sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK; break; case 19200000: sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK; break; case 26000000: sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK; break; case 38400000: sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK; break; default: dev_err(&sr->pdev->dev, "%s: Invalid fclk rate: %d\n", __func__, fclk_speed); break; } } static void sr_start_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (!sr_class->enable(sr)) sr->autocomp_active = true; } static void sr_stop_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (sr->autocomp_active) { sr_class->disable(sr, 1); sr->autocomp_active = false; } } /* * This function handles the intializations which have to be done * only when both sr device and class driver regiter has * completed. This will be attempted to be called from both sr class * driver register and sr device intializtion API's. Only one call * will ultimately succeed. * * Currently this function registers interrupt handler for a particular SR * if smartreflex class driver is already registered and has * requested for interrupts and the SR interrupt line in present. */ static int sr_late_init(struct omap_sr *sr_info) { struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data; struct resource *mem; int ret = 0; if (sr_class->notify && sr_class->notify_flags && sr_info->irq) { ret = request_irq(sr_info->irq, sr_interrupt, 0, sr_info->name, sr_info); if (ret) goto error; disable_irq(sr_info->irq); } if (pdata && pdata->enable_on_init) sr_start_vddautocomp(sr_info); return ret; error: iounmap(sr_info->base); mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); list_del(&sr_info->node); dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" "interrupt handler. Smartreflex will" "not function as desired\n", __func__); kfree(sr_info); return ret; } static void sr_v1_disable(struct omap_sr *sr) { int timeout = 0; int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTST; /* Enable MCUDisableAcknowledge interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* Disable all other SR interrupts and clear the status as needed */ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1) errconf_val |= ERRCONFIG_VPBOUNDINTST_V1; sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1), errconf_val); /* * Wait for SR to be disabled. * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTST); } static void sr_v2_disable(struct omap_sr *sr) { int timeout = 0; /* Enable MCUDisableAcknowledge interrupt */ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* * Disable all other SR interrupts and clear the status * write to status register ONLY on need basis - only if status * is set. */ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2) sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, ERRCONFIG_VPBOUNDINTST_V2); else sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, 0x0); sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT)); sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT)); /* * Wait for SR to be disabled. * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) & IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT); sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT); } static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row( struct omap_sr *sr, u32 efuse_offs) { int i; if (!sr->nvalue_table) { dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n", __func__); return NULL; } for (i = 0; i < sr->nvalue_count; i++) { if (sr->nvalue_table[i].efuse_offs == efuse_offs) return &sr->nvalue_table[i]; } return NULL; } /* Public Functions */ /** * sr_configure_errgen() - Configures the smrtreflex to perform AVS using the * error generator module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * configure the error generator module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_errgen(struct voltagedomain *voltdm) { u32 sr_config, sr_errconfig, errconfig_offs; u32 vpboundint_en, vpboundint_st; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN; switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) | (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) | (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT); sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK | SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK), sr_errconfig); /* Enabling the interrupts if the ERROR module is used */ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st), vpboundint_en); return 0; } /** * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * disable the error generator module inside the smartreflex module. * * Returns 0 on success and error value in case of failure. */ int sr_disable_errgen(struct voltagedomain *voltdm) { u32 errconfig_offs; u32 vpboundint_en, vpboundint_st; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } switch (sr->ip_type) { case SR_TYPE_V1: errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } /* Disable the interrupts of ERROR module */ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0); /* Disable the Sensor and errorgen */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0); return 0; } /** * sr_configure_minmax() - Configures the smrtreflex to perform AVS using the * minmaxavg module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * configure the minmaxavg module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_minmax(struct voltagedomain *voltdm) { u32 sr_config, sr_avgwt; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT); switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) | (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT); sr_write_reg(sr, AVGWEIGHT, sr_avgwt); /* * Enabling the interrupts if MINMAXAVG module is used. * TODO: check if all the interrupts are mandatory */ switch (sr->ip_type) { case SR_TYPE_V1: sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN), (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST)); break; case SR_TYPE_V2: sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT); sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT); break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } return 0; } /** * sr_enable() - Enables the smartreflex module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * @volt: The voltage at which the Voltage domain associated with * the smartreflex module is operating at. * This is required only to program the correct Ntarget value. * * This API is to be called from the smartreflex class driver to * enable a smartreflex module. Returns 0 on success. Returns error * value if the voltage passed is wrong or if ntarget value is wrong. */ int sr_enable(struct voltagedomain *voltdm, unsigned long volt) { struct omap_volt_data *volt_data; struct omap_sr *sr = _sr_lookup(voltdm); struct omap_sr_nvalue_table *nvalue_row; int ret; if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } volt_data = omap_voltage_get_voltdata(sr->voltdm, volt); if (IS_ERR(volt_data)) { dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table" "for nominal voltage %ld\n", __func__, volt); return PTR_ERR(volt_data); } nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs); if (!nvalue_row) { dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n", __func__, volt); return -ENODATA; } /* errminlimit is opp dependent and hence linked to voltage */ sr->err_minlimit = nvalue_row->errminlimit; pm_runtime_get_sync(&sr->pdev->dev); /* Check if SR is already enabled. If yes do nothing */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) return 0; /* Configure SR */ ret = sr_class->configure(sr); if (ret) return ret; sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue); /* SRCONFIG - enable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE); return 0; } /** * sr_disable() - Disables the smartreflex module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * disable a smartreflex module. */ void sr_disable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } /* Check if SR clocks are already disabled. If yes do nothing */ if (pm_runtime_suspended(&sr->pdev->dev)) return; /* * Disable SR if only it is indeed enabled. Else just * disable the clocks. */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) { switch (sr->ip_type) { case SR_TYPE_V1: sr_v1_disable(sr); break; case SR_TYPE_V2: sr_v2_disable(sr); break; default: dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n", sr->ip_type); } } pm_runtime_put_sync_suspend(&sr->pdev->dev); } /** * sr_register_class() - API to register a smartreflex class parameters. * @class_data: The structure containing various sr class specific data. * * This API is to be called by the smartreflex class driver to register itself * with the smartreflex driver during init. Returns 0 on success else the * error value. */ int sr_register_class(struct omap_sr_class_data *class_data) { struct omap_sr *sr_info; if (!class_data) { pr_warning("%s:, Smartreflex class data passed is NULL\n", __func__); return -EINVAL; } if (sr_class) { pr_warning("%s: Smartreflex class driver already registered\n", __func__); return -EBUSY; } sr_class = class_data; /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ list_for_each_entry(sr_info, &sr_list, node) sr_late_init(sr_info); return 0; } /** * omap_sr_enable() - API to enable SR clocks and to call into the * registered smartreflex class enable API. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to enable * a particular smartreflex module. This API will do the initial * configurations to turn on the smartreflex module and in turn call * into the registered smartreflex class enable API. */ void omap_sr_enable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->enable(sr); } /** * omap_sr_disable() - API to disable SR without resetting the voltage * processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable not to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 0); } /** * omap_sr_disable_reset_volt() - API to disable SR and reset the * voltage processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 1); } /** * omap_sr_register_pmic() - API to register pmic specific info. * @pmic_data: The structure containing pmic specific data. * * This API is to be called from the PMIC specific code to register with * smartreflex driver pmic specific info. Currently the only info required * is the smartreflex init on the PMIC side. */ void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data) { if (!pmic_data) { pr_warning("%s: Trying to register NULL PMIC data structure" "with smartreflex\n", __func__); return; } sr_pmic_data = pmic_data; } /* PM Debug FS entries to enable and disable smartreflex. */ static int omap_sr_autocomp_show(void *data, u64 *val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } *val = sr_info->autocomp_active; return 0; } static int omap_sr_autocomp_store(void *data, u64 val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } /* Sanity check */ if (val > 1) { pr_warning("%s: Invalid argument %lld\n", __func__, val); return -EINVAL; } /* control enable/disable only if there is a delta in value */ if (sr_info->autocomp_active != val) { if (!val) sr_stop_vddautocomp(sr_info); else sr_start_vddautocomp(sr_info); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show, omap_sr_autocomp_store, "%llu\n"); static int __init omap_sr_probe(struct platform_device *pdev) { struct omap_sr *sr_info; struct omap_sr_data *pdata = pdev->dev.platform_data; struct resource *mem, *irq; struct dentry *nvalue_dir; int i, ret = 0; sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL); if (!sr_info) { dev_err(&pdev->dev, "%s: unable to allocate sr_info\n", __func__); return -ENOMEM; } platform_set_drvdata(pdev, sr_info); if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); ret = -EINVAL; goto err_free_devinfo; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "%s: no mem resource\n", __func__); ret = -ENODEV; goto err_free_devinfo; } mem = request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev)); if (!mem) { dev_err(&pdev->dev, "%s: no mem region\n", __func__); ret = -EBUSY; goto err_free_devinfo; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); pm_runtime_enable(&pdev->dev); pm_runtime_irq_safe(&pdev->dev); sr_info->name = kasprintf(GFP_KERNEL, "%s", pdata->name); if (!sr_info->name) { dev_err(&pdev->dev, "%s: Unable to alloc SR instance name\n", __func__); ret = -ENOMEM; goto err_release_region; } sr_info->pdev = pdev; sr_info->srid = pdev->id; sr_info->voltdm = pdata->voltdm; sr_info->nvalue_table = pdata->nvalue_table; sr_info->nvalue_count = pdata->nvalue_count; sr_info->senn_mod = pdata->senn_mod; sr_info->senp_mod = pdata->senp_mod; sr_info->err_weight = pdata->err_weight; sr_info->err_maxlimit = pdata->err_maxlimit; sr_info->accum_data = pdata->accum_data; sr_info->senn_avgweight = pdata->senn_avgweight; sr_info->senp_avgweight = pdata->senp_avgweight; sr_info->autocomp_active = false; sr_info->ip_type = pdata->ip_type; sr_info->base = ioremap(mem->start, resource_size(mem)); if (!sr_info->base) { dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); ret = -ENOMEM; goto err_free_name; } if (irq) sr_info->irq = irq->start; sr_set_clk_length(sr_info); list_add(&sr_info->node, &sr_list); /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ if (sr_class) { ret = sr_late_init(sr_info); if (ret) { pr_warning("%s: Error in SR late init\n", __func__); goto err_iounmap; } } dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__); if (!sr_dbg_dir) { sr_dbg_dir = debugfs_create_dir("smartreflex", NULL); if (IS_ERR_OR_NULL(sr_dbg_dir)) { ret = PTR_ERR(sr_dbg_dir); pr_err("%s:sr debugfs dir creation failed(%d)\n", __func__, ret); goto err_iounmap; } } sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir); if (IS_ERR_OR_NULL(sr_info->dbg_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", __func__); ret = PTR_ERR(sr_info->dbg_dir); goto err_debugfs; } (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops); (void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir, &sr_info->err_weight); (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir, &sr_info->err_maxlimit); nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir); if (IS_ERR_OR_NULL(nvalue_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory" "for n-values\n", __func__); ret = PTR_ERR(nvalue_dir); goto err_debugfs; } if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) { dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n", __func__, sr_info->name); ret = -ENODATA; goto err_debugfs; } for (i = 0; i < sr_info->nvalue_count; i++) { char name[NVALUE_NAME_LEN + 1]; snprintf(name, sizeof(name), "volt_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].nvalue)); snprintf(name, sizeof(name), "errminlimit_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].errminlimit)); } return ret; err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_iounmap: list_del(&sr_info->node); iounmap(sr_info->base); err_free_name: kfree(sr_info->name); err_release_region: release_mem_region(mem->start, resource_size(mem)); err_free_devinfo: kfree(sr_info); return ret; } static int omap_sr_remove(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; struct resource *mem; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return -EINVAL; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return PTR_ERR(sr_info); } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); if (sr_info->dbg_dir) debugfs_remove_recursive(sr_info->dbg_dir); list_del(&sr_info->node); iounmap(sr_info->base); kfree(sr_info->name); kfree(sr_info); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); return 0; } static void omap_sr_shutdown(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return; } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); return; } static struct platform_driver smartreflex_driver = { .remove = omap_sr_remove, .shutdown = omap_sr_shutdown, .driver = { .name = "smartreflex", }, }; static int __init sr_init(void) { int ret = 0; /* * sr_init is a late init. If by then a pmic specific API is not * registered either there is no need for anything to be done on * the PMIC side or somebody has forgotten to register a PMIC * handler. Warn for the second condition. */ if (sr_pmic_data && sr_pmic_data->sr_pmic_init) sr_pmic_data->sr_pmic_init(); else pr_warning("%s: No PMIC hook to init smartreflex\n", __func__); ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe); if (ret) { pr_err("%s: platform driver register failed for SR\n", __func__); return ret; } return 0; } late_initcall(sr_init); static void __exit sr_exit(void) { platform_driver_unregister(&smartreflex_driver); } module_exit(sr_exit); MODULE_DESCRIPTION("OMAP Smartreflex Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc");
gpl-2.0
jetonbacaj/SomeKernel_920P_OL1
sound/soc/codecs/wm9705.c
2248
12250
/* * wm9705.c -- ALSA Soc WM9705 codec support * * Copyright 2008 Ian Molton <spyro@f2s.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; Version 2 of the License only. * */ #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <sound/soc.h> #include "wm9705.h" /* * WM9705 register cache */ static const u16 wm9705_reg[] = { 0x6150, 0x8000, 0x8000, 0x8000, /* 0x0 */ 0x0000, 0x8000, 0x8008, 0x8008, /* 0x8 */ 0x8808, 0x8808, 0x8808, 0x8808, /* 0x10 */ 0x8808, 0x0000, 0x8000, 0x0000, /* 0x18 */ 0x0000, 0x0000, 0x0000, 0x000f, /* 0x20 */ 0x0605, 0x0000, 0xbb80, 0x0000, /* 0x28 */ 0x0000, 0xbb80, 0x0000, 0x0000, /* 0x30 */ 0x0000, 0x2000, 0x0000, 0x0000, /* 0x38 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x40 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x48 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x50 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x58 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x60 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 0x68 */ 0x0000, 0x0808, 0x0000, 0x0006, /* 0x70 */ 0x0000, 0x0000, 0x574d, 0x4c05, /* 0x78 */ }; static const struct snd_kcontrol_new wm9705_snd_ac97_controls[] = { SOC_DOUBLE("Master Playback Volume", AC97_MASTER, 8, 0, 31, 1), SOC_SINGLE("Master Playback Switch", AC97_MASTER, 15, 1, 1), SOC_DOUBLE("Headphone Playback Volume", AC97_HEADPHONE, 8, 0, 31, 1), SOC_SINGLE("Headphone Playback Switch", AC97_HEADPHONE, 15, 1, 1), SOC_DOUBLE("PCM Playback Volume", AC97_PCM, 8, 0, 31, 1), SOC_SINGLE("PCM Playback Switch", AC97_PCM, 15, 1, 1), SOC_SINGLE("Mono Playback Volume", AC97_MASTER_MONO, 0, 31, 1), SOC_SINGLE("Mono Playback Switch", AC97_MASTER_MONO, 15, 1, 1), SOC_SINGLE("PCBeep Playback Volume", AC97_PC_BEEP, 1, 15, 1), SOC_SINGLE("Phone Playback Volume", AC97_PHONE, 0, 31, 1), SOC_DOUBLE("Line Playback Volume", AC97_LINE, 8, 0, 31, 1), SOC_DOUBLE("CD Playback Volume", AC97_CD, 8, 0, 31, 1), SOC_SINGLE("Mic Playback Volume", AC97_MIC, 0, 31, 1), SOC_SINGLE("Mic 20dB Boost Switch", AC97_MIC, 6, 1, 0), SOC_DOUBLE("Capture Volume", AC97_REC_GAIN, 8, 0, 15, 0), SOC_SINGLE("Capture Switch", AC97_REC_GAIN, 15, 1, 1), }; static const char *wm9705_mic[] = {"Mic 1", "Mic 2"}; static const char *wm9705_rec_sel[] = {"Mic", "CD", "NC", "NC", "Line", "Stereo Mix", "Mono Mix", "Phone"}; static const struct soc_enum wm9705_enum_mic = SOC_ENUM_SINGLE(AC97_GENERAL_PURPOSE, 8, 2, wm9705_mic); static const struct soc_enum wm9705_enum_rec_l = SOC_ENUM_SINGLE(AC97_REC_SEL, 8, 8, wm9705_rec_sel); static const struct soc_enum wm9705_enum_rec_r = SOC_ENUM_SINGLE(AC97_REC_SEL, 0, 8, wm9705_rec_sel); /* Headphone Mixer */ static const struct snd_kcontrol_new wm9705_hp_mixer_controls[] = { SOC_DAPM_SINGLE("PCBeep Playback Switch", AC97_PC_BEEP, 15, 1, 1), SOC_DAPM_SINGLE("CD Playback Switch", AC97_CD, 15, 1, 1), SOC_DAPM_SINGLE("Mic Playback Switch", AC97_MIC, 15, 1, 1), SOC_DAPM_SINGLE("Phone Playback Switch", AC97_PHONE, 15, 1, 1), SOC_DAPM_SINGLE("Line Playback Switch", AC97_LINE, 15, 1, 1), }; /* Mic source */ static const struct snd_kcontrol_new wm9705_mic_src_controls = SOC_DAPM_ENUM("Route", wm9705_enum_mic); /* Capture source */ static const struct snd_kcontrol_new wm9705_capture_selectl_controls = SOC_DAPM_ENUM("Route", wm9705_enum_rec_l); static const struct snd_kcontrol_new wm9705_capture_selectr_controls = SOC_DAPM_ENUM("Route", wm9705_enum_rec_r); /* DAPM widgets */ static const struct snd_soc_dapm_widget wm9705_dapm_widgets[] = { SND_SOC_DAPM_MUX("Mic Source", SND_SOC_NOPM, 0, 0, &wm9705_mic_src_controls), SND_SOC_DAPM_MUX("Left Capture Source", SND_SOC_NOPM, 0, 0, &wm9705_capture_selectl_controls), SND_SOC_DAPM_MUX("Right Capture Source", SND_SOC_NOPM, 0, 0, &wm9705_capture_selectr_controls), SND_SOC_DAPM_DAC("Left DAC", "Left HiFi Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_DAC("Right DAC", "Right HiFi Playback", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_MIXER_NAMED_CTL("HP Mixer", SND_SOC_NOPM, 0, 0, &wm9705_hp_mixer_controls[0], ARRAY_SIZE(wm9705_hp_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_ADC("Left ADC", "Left HiFi Capture", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_ADC("Right ADC", "Right HiFi Capture", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_PGA("Headphone PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Speaker PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Line PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Line out PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Mono PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Phone PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("Mic PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("PCBEEP PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("CD PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_PGA("ADC PGA", SND_SOC_NOPM, 0, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("HPOUTL"), SND_SOC_DAPM_OUTPUT("HPOUTR"), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_OUTPUT("MONOOUT"), SND_SOC_DAPM_INPUT("PHONE"), SND_SOC_DAPM_INPUT("LINEINL"), SND_SOC_DAPM_INPUT("LINEINR"), SND_SOC_DAPM_INPUT("CDINL"), SND_SOC_DAPM_INPUT("CDINR"), SND_SOC_DAPM_INPUT("PCBEEP"), SND_SOC_DAPM_INPUT("MIC1"), SND_SOC_DAPM_INPUT("MIC2"), }; /* Audio map * WM9705 has no switches to disable the route from the inputs to the HP mixer * so in order to prevent active inputs from forcing the audio outputs to be * constantly enabled, we use the mutes on those inputs to simulate such * controls. */ static const struct snd_soc_dapm_route wm9705_audio_map[] = { /* HP mixer */ {"HP Mixer", "PCBeep Playback Switch", "PCBEEP PGA"}, {"HP Mixer", "CD Playback Switch", "CD PGA"}, {"HP Mixer", "Mic Playback Switch", "Mic PGA"}, {"HP Mixer", "Phone Playback Switch", "Phone PGA"}, {"HP Mixer", "Line Playback Switch", "Line PGA"}, {"HP Mixer", NULL, "Left DAC"}, {"HP Mixer", NULL, "Right DAC"}, /* mono mixer */ {"Mono Mixer", NULL, "HP Mixer"}, /* outputs */ {"Headphone PGA", NULL, "HP Mixer"}, {"HPOUTL", NULL, "Headphone PGA"}, {"HPOUTR", NULL, "Headphone PGA"}, {"Line out PGA", NULL, "HP Mixer"}, {"LOUT", NULL, "Line out PGA"}, {"ROUT", NULL, "Line out PGA"}, {"Mono PGA", NULL, "Mono Mixer"}, {"MONOOUT", NULL, "Mono PGA"}, /* inputs */ {"CD PGA", NULL, "CDINL"}, {"CD PGA", NULL, "CDINR"}, {"Line PGA", NULL, "LINEINL"}, {"Line PGA", NULL, "LINEINR"}, {"Phone PGA", NULL, "PHONE"}, {"Mic Source", "Mic 1", "MIC1"}, {"Mic Source", "Mic 2", "MIC2"}, {"Mic PGA", NULL, "Mic Source"}, {"PCBEEP PGA", NULL, "PCBEEP"}, /* Left capture selector */ {"Left Capture Source", "Mic", "Mic Source"}, {"Left Capture Source", "CD", "CDINL"}, {"Left Capture Source", "Line", "LINEINL"}, {"Left Capture Source", "Stereo Mix", "HP Mixer"}, {"Left Capture Source", "Mono Mix", "HP Mixer"}, {"Left Capture Source", "Phone", "PHONE"}, /* Right capture source */ {"Right Capture Source", "Mic", "Mic Source"}, {"Right Capture Source", "CD", "CDINR"}, {"Right Capture Source", "Line", "LINEINR"}, {"Right Capture Source", "Stereo Mix", "HP Mixer"}, {"Right Capture Source", "Mono Mix", "HP Mixer"}, {"Right Capture Source", "Phone", "PHONE"}, {"ADC PGA", NULL, "Left Capture Source"}, {"ADC PGA", NULL, "Right Capture Source"}, /* ADC's */ {"Left ADC", NULL, "ADC PGA"}, {"Right ADC", NULL, "ADC PGA"}, }; /* We use a register cache to enhance read performance. */ static unsigned int ac97_read(struct snd_soc_codec *codec, unsigned int reg) { u16 *cache = codec->reg_cache; switch (reg) { case AC97_RESET: case AC97_VENDOR_ID1: case AC97_VENDOR_ID2: return soc_ac97_ops.read(codec->ac97, reg); default: reg = reg >> 1; if (reg >= (ARRAY_SIZE(wm9705_reg))) return -EIO; return cache[reg]; } } static int ac97_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int val) { u16 *cache = codec->reg_cache; soc_ac97_ops.write(codec->ac97, reg, val); reg = reg >> 1; if (reg < (ARRAY_SIZE(wm9705_reg))) cache[reg] = val; return 0; } static int ac97_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_codec *codec = dai->codec; int reg; u16 vra; vra = ac97_read(codec, AC97_EXTENDED_STATUS); ac97_write(codec, AC97_EXTENDED_STATUS, vra | 0x1); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) reg = AC97_PCM_FRONT_DAC_RATE; else reg = AC97_PCM_LR_ADC_RATE; return ac97_write(codec, reg, substream->runtime->rate); } #define WM9705_AC97_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 | \ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | \ SNDRV_PCM_RATE_32000 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000) static const struct snd_soc_dai_ops wm9705_dai_ops = { .prepare = ac97_prepare, }; static struct snd_soc_dai_driver wm9705_dai[] = { { .name = "wm9705-hifi", .ac97_control = 1, .playback = { .stream_name = "HiFi Playback", .channels_min = 1, .channels_max = 2, .rates = WM9705_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS, }, .capture = { .stream_name = "HiFi Capture", .channels_min = 1, .channels_max = 2, .rates = WM9705_AC97_RATES, .formats = SND_SOC_STD_AC97_FMTS, }, .ops = &wm9705_dai_ops, }, { .name = "wm9705-aux", .playback = { .stream_name = "Aux Playback", .channels_min = 1, .channels_max = 1, .rates = WM9705_AC97_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE, }, } }; static int wm9705_reset(struct snd_soc_codec *codec) { if (soc_ac97_ops.reset) { soc_ac97_ops.reset(codec->ac97); if (ac97_read(codec, 0) == wm9705_reg[0]) return 0; /* Success */ } return -EIO; } #ifdef CONFIG_PM static int wm9705_soc_suspend(struct snd_soc_codec *codec) { soc_ac97_ops.write(codec->ac97, AC97_POWERDOWN, 0xffff); return 0; } static int wm9705_soc_resume(struct snd_soc_codec *codec) { int i, ret; u16 *cache = codec->reg_cache; ret = wm9705_reset(codec); if (ret < 0) { printk(KERN_ERR "could not reset AC97 codec\n"); return ret; } for (i = 2; i < ARRAY_SIZE(wm9705_reg) << 1; i += 2) { soc_ac97_ops.write(codec->ac97, i, cache[i>>1]); } return 0; } #else #define wm9705_soc_suspend NULL #define wm9705_soc_resume NULL #endif static int wm9705_soc_probe(struct snd_soc_codec *codec) { int ret = 0; printk(KERN_INFO "WM9705 SoC Audio Codec\n"); ret = snd_soc_new_ac97_codec(codec, &soc_ac97_ops, 0); if (ret < 0) { printk(KERN_ERR "wm9705: failed to register AC97 codec\n"); return ret; } ret = wm9705_reset(codec); if (ret) goto reset_err; snd_soc_add_codec_controls(codec, wm9705_snd_ac97_controls, ARRAY_SIZE(wm9705_snd_ac97_controls)); return 0; reset_err: snd_soc_free_ac97_codec(codec); return ret; } static int wm9705_soc_remove(struct snd_soc_codec *codec) { snd_soc_free_ac97_codec(codec); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm9705 = { .probe = wm9705_soc_probe, .remove = wm9705_soc_remove, .suspend = wm9705_soc_suspend, .resume = wm9705_soc_resume, .read = ac97_read, .write = ac97_write, .reg_cache_size = ARRAY_SIZE(wm9705_reg), .reg_word_size = sizeof(u16), .reg_cache_step = 2, .reg_cache_default = wm9705_reg, .dapm_widgets = wm9705_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm9705_dapm_widgets), .dapm_routes = wm9705_audio_map, .num_dapm_routes = ARRAY_SIZE(wm9705_audio_map), }; static int wm9705_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm9705, wm9705_dai, ARRAY_SIZE(wm9705_dai)); } static int wm9705_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver wm9705_codec_driver = { .driver = { .name = "wm9705-codec", .owner = THIS_MODULE, }, .probe = wm9705_probe, .remove = wm9705_remove, }; module_platform_driver(wm9705_codec_driver); MODULE_DESCRIPTION("ASoC WM9705 driver"); MODULE_AUTHOR("Ian Molton"); MODULE_LICENSE("GPL v2");
gpl-2.0
FlukeNetworks/snackers-kernel
drivers/net/caif/caif_spi_slave.c
2248
6704
/* * Copyright (C) ST-Ericsson AB 2010 * Contact: Sjur Brendeland / sjur.brandeland@stericsson.com * Author: Daniel Martensson / Daniel.Martensson@stericsson.com * License terms: GNU General Public License (GPL) version 2. */ #include <linux/version.h> #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/string.h> #include <linux/semaphore.h> #include <linux/workqueue.h> #include <linux/completion.h> #include <linux/list.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/debugfs.h> #include <net/caif/caif_spi.h> #ifndef CONFIG_CAIF_SPI_SYNC #define SPI_DATA_POS 0 static inline int forward_to_spi_cmd(struct cfspi *cfspi) { return cfspi->rx_cpck_len; } #else #define SPI_DATA_POS SPI_CMD_SZ static inline int forward_to_spi_cmd(struct cfspi *cfspi) { return 0; } #endif int spi_frm_align = 2; /* * SPI padding options. * Warning: must be a base of 2 (& operation used) and can not be zero ! */ int spi_up_head_align = 1 << 1; int spi_up_tail_align = 1 << 0; int spi_down_head_align = 1 << 2; int spi_down_tail_align = 1 << 1; #ifdef CONFIG_DEBUG_FS static inline void debugfs_store_prev(struct cfspi *cfspi) { /* Store previous command for debugging reasons.*/ cfspi->pcmd = cfspi->cmd; /* Store previous transfer. */ cfspi->tx_ppck_len = cfspi->tx_cpck_len; cfspi->rx_ppck_len = cfspi->rx_cpck_len; } #else static inline void debugfs_store_prev(struct cfspi *cfspi) { } #endif void cfspi_xfer(struct work_struct *work) { struct cfspi *cfspi; u8 *ptr = NULL; unsigned long flags; int ret; cfspi = container_of(work, struct cfspi, work); /* Initialize state. */ cfspi->cmd = SPI_CMD_EOT; for (;;) { cfspi_dbg_state(cfspi, CFSPI_STATE_WAITING); /* Wait for master talk or transmit event. */ wait_event_interruptible(cfspi->wait, test_bit(SPI_XFER, &cfspi->state) || test_bit(SPI_TERMINATE, &cfspi->state)); if (test_bit(SPI_TERMINATE, &cfspi->state)) return; #if CFSPI_DBG_PREFILL /* Prefill buffers for easier debugging. */ memset(cfspi->xfer.va_tx, 0xFF, SPI_DMA_BUF_LEN); memset(cfspi->xfer.va_rx, 0xFF, SPI_DMA_BUF_LEN); #endif /* CFSPI_DBG_PREFILL */ cfspi_dbg_state(cfspi, CFSPI_STATE_AWAKE); /* Check whether we have a committed frame. */ if (cfspi->tx_cpck_len) { int len; cfspi_dbg_state(cfspi, CFSPI_STATE_FETCH_PKT); /* Copy committed SPI frames after the SPI indication. */ ptr = (u8 *) cfspi->xfer.va_tx; ptr += SPI_IND_SZ; len = cfspi_xmitfrm(cfspi, ptr, cfspi->tx_cpck_len); WARN_ON(len != cfspi->tx_cpck_len); } cfspi_dbg_state(cfspi, CFSPI_STATE_GET_NEXT); /* Get length of next frame to commit. */ cfspi->tx_npck_len = cfspi_xmitlen(cfspi); WARN_ON(cfspi->tx_npck_len > SPI_DMA_BUF_LEN); /* * Add indication and length at the beginning of the frame, * using little endian. */ ptr = (u8 *) cfspi->xfer.va_tx; *ptr++ = SPI_CMD_IND; *ptr++ = (SPI_CMD_IND & 0xFF00) >> 8; *ptr++ = cfspi->tx_npck_len & 0x00FF; *ptr++ = (cfspi->tx_npck_len & 0xFF00) >> 8; /* Calculate length of DMAs. */ cfspi->xfer.tx_dma_len = cfspi->tx_cpck_len + SPI_IND_SZ; cfspi->xfer.rx_dma_len = cfspi->rx_cpck_len + SPI_CMD_SZ; /* Add SPI TX frame alignment padding, if necessary. */ if (cfspi->tx_cpck_len && (cfspi->xfer.tx_dma_len % spi_frm_align)) { cfspi->xfer.tx_dma_len += spi_frm_align - (cfspi->xfer.tx_dma_len % spi_frm_align); } /* Add SPI RX frame alignment padding, if necessary. */ if (cfspi->rx_cpck_len && (cfspi->xfer.rx_dma_len % spi_frm_align)) { cfspi->xfer.rx_dma_len += spi_frm_align - (cfspi->xfer.rx_dma_len % spi_frm_align); } cfspi_dbg_state(cfspi, CFSPI_STATE_INIT_XFER); /* Start transfer. */ ret = cfspi->dev->init_xfer(&cfspi->xfer, cfspi->dev); WARN_ON(ret); cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_ACTIVE); /* * TODO: We might be able to make an assumption if this is the * first loop. Make sure that minimum toggle time is respected. */ udelay(MIN_TRANSITION_TIME_USEC); cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_ACTIVE); /* Signal that we are ready to receive data. */ cfspi->dev->sig_xfer(true, cfspi->dev); cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_XFER_DONE); /* Wait for transfer completion. */ wait_for_completion(&cfspi->comp); cfspi_dbg_state(cfspi, CFSPI_STATE_XFER_DONE); if (cfspi->cmd == SPI_CMD_EOT) { /* * Clear the master talk bit. A xfer is always at * least two bursts. */ clear_bit(SPI_SS_ON, &cfspi->state); } cfspi_dbg_state(cfspi, CFSPI_STATE_WAIT_INACTIVE); /* Make sure that the minimum toggle time is respected. */ if (SPI_XFER_TIME_USEC(cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz) < MIN_TRANSITION_TIME_USEC) { udelay(MIN_TRANSITION_TIME_USEC - SPI_XFER_TIME_USEC (cfspi->xfer.tx_dma_len, cfspi->dev->clk_mhz)); } cfspi_dbg_state(cfspi, CFSPI_STATE_SIG_INACTIVE); /* De-assert transfer signal. */ cfspi->dev->sig_xfer(false, cfspi->dev); /* Check whether we received a CAIF packet. */ if (cfspi->rx_cpck_len) { int len; cfspi_dbg_state(cfspi, CFSPI_STATE_DELIVER_PKT); /* Parse SPI frame. */ ptr = ((u8 *)(cfspi->xfer.va_rx + SPI_DATA_POS)); len = cfspi_rxfrm(cfspi, ptr, cfspi->rx_cpck_len); WARN_ON(len != cfspi->rx_cpck_len); } /* Check the next SPI command and length. */ ptr = (u8 *) cfspi->xfer.va_rx; ptr += forward_to_spi_cmd(cfspi); cfspi->cmd = *ptr++; cfspi->cmd |= ((*ptr++) << 8) & 0xFF00; cfspi->rx_npck_len = *ptr++; cfspi->rx_npck_len |= ((*ptr++) << 8) & 0xFF00; WARN_ON(cfspi->rx_npck_len > SPI_DMA_BUF_LEN); WARN_ON(cfspi->cmd > SPI_CMD_EOT); debugfs_store_prev(cfspi); /* Check whether the master issued an EOT command. */ if (cfspi->cmd == SPI_CMD_EOT) { /* Reset state. */ cfspi->tx_cpck_len = 0; cfspi->rx_cpck_len = 0; } else { /* Update state. */ cfspi->tx_cpck_len = cfspi->tx_npck_len; cfspi->rx_cpck_len = cfspi->rx_npck_len; } /* * Check whether we need to clear the xfer bit. * Spin lock needed for packet insertion. * Test and clear of different bits * are not supported. */ spin_lock_irqsave(&cfspi->lock, flags); if (cfspi->cmd == SPI_CMD_EOT && !cfspi_xmitlen(cfspi) && !test_bit(SPI_SS_ON, &cfspi->state)) clear_bit(SPI_XFER, &cfspi->state); spin_unlock_irqrestore(&cfspi->lock, flags); } } struct platform_driver cfspi_spi_driver = { .probe = cfspi_spi_probe, .remove = cfspi_spi_remove, .driver = { .name = "cfspi_sspi", .owner = THIS_MODULE, }, };
gpl-2.0
Albinoman887/android_kernel_htc_msm8660
arch/s390/hypfs/inode.c
2760
12560
/* * arch/s390/hypfs/inode.c * Hypervisor filesystem for Linux on s390. * * Copyright IBM Corp. 2006, 2008 * Author(s): Michael Holzheu <holzheu@de.ibm.com> */ #define KMSG_COMPONENT "hypfs" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/types.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/namei.h> #include <linux/vfs.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/time.h> #include <linux/parser.h> #include <linux/sysfs.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <asm/ebcdic.h> #include "hypfs.h" #define HYPFS_MAGIC 0x687970 /* ASCII 'hyp' */ #define TMP_SIZE 64 /* size of temporary buffers */ static struct dentry *hypfs_create_update_file(struct super_block *sb, struct dentry *dir); struct hypfs_sb_info { uid_t uid; /* uid used for files and dirs */ gid_t gid; /* gid used for files and dirs */ struct dentry *update_file; /* file to trigger update */ time_t last_update; /* last update time in secs since 1970 */ struct mutex lock; /* lock to protect update process */ }; static const struct file_operations hypfs_file_ops; static struct file_system_type hypfs_type; static const struct super_operations hypfs_s_ops; /* start of list of all dentries, which have to be deleted on update */ static struct dentry *hypfs_last_dentry; static void hypfs_update_update(struct super_block *sb) { struct hypfs_sb_info *sb_info = sb->s_fs_info; struct inode *inode = sb_info->update_file->d_inode; sb_info->last_update = get_seconds(); inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME; } /* directory tree removal functions */ static void hypfs_add_dentry(struct dentry *dentry) { dentry->d_fsdata = hypfs_last_dentry; hypfs_last_dentry = dentry; } static inline int hypfs_positive(struct dentry *dentry) { return dentry->d_inode && !d_unhashed(dentry); } static void hypfs_remove(struct dentry *dentry) { struct dentry *parent; parent = dentry->d_parent; if (!parent || !parent->d_inode) return; mutex_lock(&parent->d_inode->i_mutex); if (hypfs_positive(dentry)) { if (S_ISDIR(dentry->d_inode->i_mode)) simple_rmdir(parent->d_inode, dentry); else simple_unlink(parent->d_inode, dentry); } d_delete(dentry); dput(dentry); mutex_unlock(&parent->d_inode->i_mutex); } static void hypfs_delete_tree(struct dentry *root) { while (hypfs_last_dentry) { struct dentry *next_dentry; next_dentry = hypfs_last_dentry->d_fsdata; hypfs_remove(hypfs_last_dentry); hypfs_last_dentry = next_dentry; } } static struct inode *hypfs_make_inode(struct super_block *sb, int mode) { struct inode *ret = new_inode(sb); if (ret) { struct hypfs_sb_info *hypfs_info = sb->s_fs_info; ret->i_mode = mode; ret->i_uid = hypfs_info->uid; ret->i_gid = hypfs_info->gid; ret->i_atime = ret->i_mtime = ret->i_ctime = CURRENT_TIME; if (mode & S_IFDIR) ret->i_nlink = 2; else ret->i_nlink = 1; } return ret; } static void hypfs_evict_inode(struct inode *inode) { end_writeback(inode); kfree(inode->i_private); } static int hypfs_open(struct inode *inode, struct file *filp) { char *data = filp->f_path.dentry->d_inode->i_private; struct hypfs_sb_info *fs_info; if (filp->f_mode & FMODE_WRITE) { if (!(inode->i_mode & S_IWUGO)) return -EACCES; } if (filp->f_mode & FMODE_READ) { if (!(inode->i_mode & S_IRUGO)) return -EACCES; } fs_info = inode->i_sb->s_fs_info; if(data) { mutex_lock(&fs_info->lock); filp->private_data = kstrdup(data, GFP_KERNEL); if (!filp->private_data) { mutex_unlock(&fs_info->lock); return -ENOMEM; } mutex_unlock(&fs_info->lock); } return nonseekable_open(inode, filp); } static ssize_t hypfs_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t offset) { char *data; ssize_t ret; struct file *filp = iocb->ki_filp; /* XXX: temporary */ char __user *buf = iov[0].iov_base; size_t count = iov[0].iov_len; if (nr_segs != 1) return -EINVAL; data = filp->private_data; ret = simple_read_from_buffer(buf, count, &offset, data, strlen(data)); if (ret <= 0) return ret; iocb->ki_pos += ret; file_accessed(filp); return ret; } static ssize_t hypfs_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t offset) { int rc; struct super_block *sb; struct hypfs_sb_info *fs_info; size_t count = iov_length(iov, nr_segs); sb = iocb->ki_filp->f_path.dentry->d_inode->i_sb; fs_info = sb->s_fs_info; /* * Currently we only allow one update per second for two reasons: * 1. diag 204 is VERY expensive * 2. If several processes do updates in parallel and then read the * hypfs data, the likelihood of collisions is reduced, if we restrict * the minimum update interval. A collision occurs, if during the * data gathering of one process another process triggers an update * If the first process wants to ensure consistent data, it has * to restart data collection in this case. */ mutex_lock(&fs_info->lock); if (fs_info->last_update == get_seconds()) { rc = -EBUSY; goto out; } hypfs_delete_tree(sb->s_root); if (MACHINE_IS_VM) rc = hypfs_vm_create_files(sb, sb->s_root); else rc = hypfs_diag_create_files(sb, sb->s_root); if (rc) { pr_err("Updating the hypfs tree failed\n"); hypfs_delete_tree(sb->s_root); goto out; } hypfs_update_update(sb); rc = count; out: mutex_unlock(&fs_info->lock); return rc; } static int hypfs_release(struct inode *inode, struct file *filp) { kfree(filp->private_data); return 0; } enum { opt_uid, opt_gid, opt_err }; static const match_table_t hypfs_tokens = { {opt_uid, "uid=%u"}, {opt_gid, "gid=%u"}, {opt_err, NULL} }; static int hypfs_parse_options(char *options, struct super_block *sb) { char *str; substring_t args[MAX_OPT_ARGS]; if (!options) return 0; while ((str = strsep(&options, ",")) != NULL) { int token, option; struct hypfs_sb_info *hypfs_info = sb->s_fs_info; if (!*str) continue; token = match_token(str, hypfs_tokens, args); switch (token) { case opt_uid: if (match_int(&args[0], &option)) return -EINVAL; hypfs_info->uid = option; break; case opt_gid: if (match_int(&args[0], &option)) return -EINVAL; hypfs_info->gid = option; break; case opt_err: default: pr_err("%s is not a valid mount option\n", str); return -EINVAL; } } return 0; } static int hypfs_show_options(struct seq_file *s, struct vfsmount *mnt) { struct hypfs_sb_info *hypfs_info = mnt->mnt_sb->s_fs_info; seq_printf(s, ",uid=%u", hypfs_info->uid); seq_printf(s, ",gid=%u", hypfs_info->gid); return 0; } static int hypfs_fill_super(struct super_block *sb, void *data, int silent) { struct inode *root_inode; struct dentry *root_dentry; int rc = 0; struct hypfs_sb_info *sbi; sbi = kzalloc(sizeof(struct hypfs_sb_info), GFP_KERNEL); if (!sbi) return -ENOMEM; mutex_init(&sbi->lock); sbi->uid = current_uid(); sbi->gid = current_gid(); sb->s_fs_info = sbi; sb->s_blocksize = PAGE_CACHE_SIZE; sb->s_blocksize_bits = PAGE_CACHE_SHIFT; sb->s_magic = HYPFS_MAGIC; sb->s_op = &hypfs_s_ops; if (hypfs_parse_options(data, sb)) return -EINVAL; root_inode = hypfs_make_inode(sb, S_IFDIR | 0755); if (!root_inode) return -ENOMEM; root_inode->i_op = &simple_dir_inode_operations; root_inode->i_fop = &simple_dir_operations; sb->s_root = root_dentry = d_alloc_root(root_inode); if (!root_dentry) { iput(root_inode); return -ENOMEM; } if (MACHINE_IS_VM) rc = hypfs_vm_create_files(sb, root_dentry); else rc = hypfs_diag_create_files(sb, root_dentry); if (rc) return rc; sbi->update_file = hypfs_create_update_file(sb, root_dentry); if (IS_ERR(sbi->update_file)) return PTR_ERR(sbi->update_file); hypfs_update_update(sb); pr_info("Hypervisor filesystem mounted\n"); return 0; } static struct dentry *hypfs_mount(struct file_system_type *fst, int flags, const char *devname, void *data) { return mount_single(fst, flags, data, hypfs_fill_super); } static void hypfs_kill_super(struct super_block *sb) { struct hypfs_sb_info *sb_info = sb->s_fs_info; if (sb->s_root) hypfs_delete_tree(sb->s_root); if (sb_info->update_file) hypfs_remove(sb_info->update_file); kfree(sb->s_fs_info); sb->s_fs_info = NULL; kill_litter_super(sb); } static struct dentry *hypfs_create_file(struct super_block *sb, struct dentry *parent, const char *name, char *data, mode_t mode) { struct dentry *dentry; struct inode *inode; mutex_lock(&parent->d_inode->i_mutex); dentry = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(dentry)) { dentry = ERR_PTR(-ENOMEM); goto fail; } inode = hypfs_make_inode(sb, mode); if (!inode) { dput(dentry); dentry = ERR_PTR(-ENOMEM); goto fail; } if (mode & S_IFREG) { inode->i_fop = &hypfs_file_ops; if (data) inode->i_size = strlen(data); else inode->i_size = 0; } else if (mode & S_IFDIR) { inode->i_op = &simple_dir_inode_operations; inode->i_fop = &simple_dir_operations; parent->d_inode->i_nlink++; } else BUG(); inode->i_private = data; d_instantiate(dentry, inode); dget(dentry); fail: mutex_unlock(&parent->d_inode->i_mutex); return dentry; } struct dentry *hypfs_mkdir(struct super_block *sb, struct dentry *parent, const char *name) { struct dentry *dentry; dentry = hypfs_create_file(sb, parent, name, NULL, S_IFDIR | DIR_MODE); if (IS_ERR(dentry)) return dentry; hypfs_add_dentry(dentry); return dentry; } static struct dentry *hypfs_create_update_file(struct super_block *sb, struct dentry *dir) { struct dentry *dentry; dentry = hypfs_create_file(sb, dir, "update", NULL, S_IFREG | UPDATE_FILE_MODE); /* * We do not put the update file on the 'delete' list with * hypfs_add_dentry(), since it should not be removed when the tree * is updated. */ return dentry; } struct dentry *hypfs_create_u64(struct super_block *sb, struct dentry *dir, const char *name, __u64 value) { char *buffer; char tmp[TMP_SIZE]; struct dentry *dentry; snprintf(tmp, TMP_SIZE, "%llu\n", (unsigned long long int)value); buffer = kstrdup(tmp, GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); dentry = hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE); if (IS_ERR(dentry)) { kfree(buffer); return ERR_PTR(-ENOMEM); } hypfs_add_dentry(dentry); return dentry; } struct dentry *hypfs_create_str(struct super_block *sb, struct dentry *dir, const char *name, char *string) { char *buffer; struct dentry *dentry; buffer = kmalloc(strlen(string) + 2, GFP_KERNEL); if (!buffer) return ERR_PTR(-ENOMEM); sprintf(buffer, "%s\n", string); dentry = hypfs_create_file(sb, dir, name, buffer, S_IFREG | REG_FILE_MODE); if (IS_ERR(dentry)) { kfree(buffer); return ERR_PTR(-ENOMEM); } hypfs_add_dentry(dentry); return dentry; } static const struct file_operations hypfs_file_ops = { .open = hypfs_open, .release = hypfs_release, .read = do_sync_read, .write = do_sync_write, .aio_read = hypfs_aio_read, .aio_write = hypfs_aio_write, .llseek = no_llseek, }; static struct file_system_type hypfs_type = { .owner = THIS_MODULE, .name = "s390_hypfs", .mount = hypfs_mount, .kill_sb = hypfs_kill_super }; static const struct super_operations hypfs_s_ops = { .statfs = simple_statfs, .evict_inode = hypfs_evict_inode, .show_options = hypfs_show_options, }; static struct kobject *s390_kobj; static int __init hypfs_init(void) { int rc; rc = hypfs_dbfs_init(); if (rc) return rc; if (hypfs_diag_init()) { rc = -ENODATA; goto fail_dbfs_exit; } if (hypfs_vm_init()) { rc = -ENODATA; goto fail_hypfs_diag_exit; } s390_kobj = kobject_create_and_add("s390", hypervisor_kobj); if (!s390_kobj) { rc = -ENOMEM; goto fail_hypfs_vm_exit; } rc = register_filesystem(&hypfs_type); if (rc) goto fail_filesystem; return 0; fail_filesystem: kobject_put(s390_kobj); fail_hypfs_vm_exit: hypfs_vm_exit(); fail_hypfs_diag_exit: hypfs_diag_exit(); fail_dbfs_exit: hypfs_dbfs_exit(); pr_err("Initialization of hypfs failed with rc=%i\n", rc); return rc; } static void __exit hypfs_exit(void) { hypfs_diag_exit(); hypfs_vm_exit(); hypfs_dbfs_exit(); unregister_filesystem(&hypfs_type); kobject_put(s390_kobj); } module_init(hypfs_init) module_exit(hypfs_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Holzheu <holzheu@de.ibm.com>"); MODULE_DESCRIPTION("s390 Hypervisor Filesystem");
gpl-2.0
cypresskr/linux
arch/alpha/kernel/signal.c
3016
15312
/* * linux/arch/alpha/kernel/signal.c * * Copyright (C) 1995 Linus Torvalds * * 1997-11-02 Modified for POSIX.1b signals by Richard Henderson */ #include <linux/sched.h> #include <linux/kernel.h> #include <linux/signal.h> #include <linux/errno.h> #include <linux/wait.h> #include <linux/ptrace.h> #include <linux/unistd.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/tty.h> #include <linux/binfmts.h> #include <linux/bitops.h> #include <linux/syscalls.h> #include <linux/tracehook.h> #include <asm/uaccess.h> #include <asm/sigcontext.h> #include <asm/ucontext.h> #include "proto.h" #define DEBUG_SIG 0 #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) asmlinkage void ret_from_sys_call(void); /* * The OSF/1 sigprocmask calling sequence is different from the * C sigprocmask() sequence.. */ SYSCALL_DEFINE2(osf_sigprocmask, int, how, unsigned long, newmask) { sigset_t oldmask; sigset_t mask; unsigned long res; siginitset(&mask, newmask & _BLOCKABLE); res = sigprocmask(how, &mask, &oldmask); if (!res) { force_successful_syscall_return(); res = oldmask.sig[0]; } return res; } SYSCALL_DEFINE3(osf_sigaction, int, sig, const struct osf_sigaction __user *, act, struct osf_sigaction __user *, oact) { struct k_sigaction new_ka, old_ka; int ret; if (act) { old_sigset_t mask; if (!access_ok(VERIFY_READ, act, sizeof(*act)) || __get_user(new_ka.sa.sa_handler, &act->sa_handler) || __get_user(new_ka.sa.sa_flags, &act->sa_flags) || __get_user(mask, &act->sa_mask)) return -EFAULT; siginitset(&new_ka.sa.sa_mask, mask); new_ka.ka_restorer = NULL; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || __put_user(old_ka.sa.sa_flags, &oact->sa_flags) || __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask)) return -EFAULT; } return ret; } SYSCALL_DEFINE5(rt_sigaction, int, sig, const struct sigaction __user *, act, struct sigaction __user *, oact, size_t, sigsetsize, void __user *, restorer) { struct k_sigaction new_ka, old_ka; int ret; /* XXX: Don't preclude handling different sized sigset_t's. */ if (sigsetsize != sizeof(sigset_t)) return -EINVAL; if (act) { new_ka.ka_restorer = restorer; if (copy_from_user(&new_ka.sa, act, sizeof(*act))) return -EFAULT; } ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); if (!ret && oact) { if (copy_to_user(oact, &old_ka.sa, sizeof(*oact))) return -EFAULT; } return ret; } /* * Do a signal return; undo the signal stack. */ #if _NSIG_WORDS > 1 # error "Non SA_SIGINFO frame needs rearranging" #endif struct sigframe { struct sigcontext sc; unsigned int retcode[3]; }; struct rt_sigframe { struct siginfo info; struct ucontext uc; unsigned int retcode[3]; }; /* If this changes, userland unwinders that Know Things about our signal frame will break. Do not undertake lightly. It also implies an ABI change wrt the size of siginfo_t, which may cause some pain. */ extern char compile_time_assert [offsetof(struct rt_sigframe, uc.uc_mcontext) == 176 ? 1 : -1]; #define INSN_MOV_R30_R16 0x47fe0410 #define INSN_LDI_R0 0x201f0000 #define INSN_CALLSYS 0x00000083 static long restore_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs) { unsigned long usp; struct switch_stack *sw = (struct switch_stack *)regs - 1; long i, err = __get_user(regs->pc, &sc->sc_pc); current_thread_info()->restart_block.fn = do_no_restart_syscall; sw->r26 = (unsigned long) ret_from_sys_call; err |= __get_user(regs->r0, sc->sc_regs+0); err |= __get_user(regs->r1, sc->sc_regs+1); err |= __get_user(regs->r2, sc->sc_regs+2); err |= __get_user(regs->r3, sc->sc_regs+3); err |= __get_user(regs->r4, sc->sc_regs+4); err |= __get_user(regs->r5, sc->sc_regs+5); err |= __get_user(regs->r6, sc->sc_regs+6); err |= __get_user(regs->r7, sc->sc_regs+7); err |= __get_user(regs->r8, sc->sc_regs+8); err |= __get_user(sw->r9, sc->sc_regs+9); err |= __get_user(sw->r10, sc->sc_regs+10); err |= __get_user(sw->r11, sc->sc_regs+11); err |= __get_user(sw->r12, sc->sc_regs+12); err |= __get_user(sw->r13, sc->sc_regs+13); err |= __get_user(sw->r14, sc->sc_regs+14); err |= __get_user(sw->r15, sc->sc_regs+15); err |= __get_user(regs->r16, sc->sc_regs+16); err |= __get_user(regs->r17, sc->sc_regs+17); err |= __get_user(regs->r18, sc->sc_regs+18); err |= __get_user(regs->r19, sc->sc_regs+19); err |= __get_user(regs->r20, sc->sc_regs+20); err |= __get_user(regs->r21, sc->sc_regs+21); err |= __get_user(regs->r22, sc->sc_regs+22); err |= __get_user(regs->r23, sc->sc_regs+23); err |= __get_user(regs->r24, sc->sc_regs+24); err |= __get_user(regs->r25, sc->sc_regs+25); err |= __get_user(regs->r26, sc->sc_regs+26); err |= __get_user(regs->r27, sc->sc_regs+27); err |= __get_user(regs->r28, sc->sc_regs+28); err |= __get_user(regs->gp, sc->sc_regs+29); err |= __get_user(usp, sc->sc_regs+30); wrusp(usp); for (i = 0; i < 31; i++) err |= __get_user(sw->fp[i], sc->sc_fpregs+i); err |= __get_user(sw->fp[31], &sc->sc_fpcr); return err; } /* Note that this syscall is also used by setcontext(3) to install a given sigcontext. This because it's impossible to set *all* registers and transfer control from userland. */ asmlinkage void do_sigreturn(struct sigcontext __user *sc) { struct pt_regs *regs = current_pt_regs(); sigset_t set; /* Verify that it's a good sigcontext before using it */ if (!access_ok(VERIFY_READ, sc, sizeof(*sc))) goto give_sigsegv; if (__get_user(set.sig[0], &sc->sc_mask)) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(sc, regs)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); } return; give_sigsegv: force_sig(SIGSEGV, current); } asmlinkage void do_rt_sigreturn(struct rt_sigframe __user *frame) { struct pt_regs *regs = current_pt_regs(); sigset_t set; /* Verify that it's a good ucontext_t before using it */ if (!access_ok(VERIFY_READ, &frame->uc, sizeof(frame->uc))) goto give_sigsegv; if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set))) goto give_sigsegv; set_current_blocked(&set); if (restore_sigcontext(&frame->uc.uc_mcontext, regs)) goto give_sigsegv; /* Send SIGTRAP if we're single-stepping: */ if (ptrace_cancel_bpt (current)) { siginfo_t info; info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); } return; give_sigsegv: force_sig(SIGSEGV, current); } /* * Set up a signal frame. */ static inline void __user * get_sigframe(struct ksignal *ksig, unsigned long sp, size_t frame_size) { return (void __user *)((sigsp(sp, ksig) - frame_size) & -32ul); } static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs, unsigned long mask, unsigned long sp) { struct switch_stack *sw = (struct switch_stack *)regs - 1; long i, err = 0; err |= __put_user(on_sig_stack((unsigned long)sc), &sc->sc_onstack); err |= __put_user(mask, &sc->sc_mask); err |= __put_user(regs->pc, &sc->sc_pc); err |= __put_user(8, &sc->sc_ps); err |= __put_user(regs->r0 , sc->sc_regs+0); err |= __put_user(regs->r1 , sc->sc_regs+1); err |= __put_user(regs->r2 , sc->sc_regs+2); err |= __put_user(regs->r3 , sc->sc_regs+3); err |= __put_user(regs->r4 , sc->sc_regs+4); err |= __put_user(regs->r5 , sc->sc_regs+5); err |= __put_user(regs->r6 , sc->sc_regs+6); err |= __put_user(regs->r7 , sc->sc_regs+7); err |= __put_user(regs->r8 , sc->sc_regs+8); err |= __put_user(sw->r9 , sc->sc_regs+9); err |= __put_user(sw->r10 , sc->sc_regs+10); err |= __put_user(sw->r11 , sc->sc_regs+11); err |= __put_user(sw->r12 , sc->sc_regs+12); err |= __put_user(sw->r13 , sc->sc_regs+13); err |= __put_user(sw->r14 , sc->sc_regs+14); err |= __put_user(sw->r15 , sc->sc_regs+15); err |= __put_user(regs->r16, sc->sc_regs+16); err |= __put_user(regs->r17, sc->sc_regs+17); err |= __put_user(regs->r18, sc->sc_regs+18); err |= __put_user(regs->r19, sc->sc_regs+19); err |= __put_user(regs->r20, sc->sc_regs+20); err |= __put_user(regs->r21, sc->sc_regs+21); err |= __put_user(regs->r22, sc->sc_regs+22); err |= __put_user(regs->r23, sc->sc_regs+23); err |= __put_user(regs->r24, sc->sc_regs+24); err |= __put_user(regs->r25, sc->sc_regs+25); err |= __put_user(regs->r26, sc->sc_regs+26); err |= __put_user(regs->r27, sc->sc_regs+27); err |= __put_user(regs->r28, sc->sc_regs+28); err |= __put_user(regs->gp , sc->sc_regs+29); err |= __put_user(sp, sc->sc_regs+30); err |= __put_user(0, sc->sc_regs+31); for (i = 0; i < 31; i++) err |= __put_user(sw->fp[i], sc->sc_fpregs+i); err |= __put_user(0, sc->sc_fpregs+31); err |= __put_user(sw->fp[31], &sc->sc_fpcr); err |= __put_user(regs->trap_a0, &sc->sc_traparg_a0); err |= __put_user(regs->trap_a1, &sc->sc_traparg_a1); err |= __put_user(regs->trap_a2, &sc->sc_traparg_a2); return err; } static int setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { unsigned long oldsp, r26, err = 0; struct sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; err |= setup_sigcontext(&frame->sc, regs, set->sig[0], oldsp); if (err) return -EFAULT; /* Set up to return from userspace. If provided, use a stub already in userspace. */ r26 = (unsigned long) ksig->ka.ka_restorer; if (!r26) { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } /* Check that everything was written properly. */ if (err) return err; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ regs->r17 = 0; /* a1: exception code */ regs->r18 = (unsigned long) &frame->sc; /* a2: sigcontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; } static int setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs) { unsigned long oldsp, r26, err = 0; struct rt_sigframe __user *frame; oldsp = rdusp(); frame = get_sigframe(ksig, oldsp, sizeof(*frame)); if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame))) return -EFAULT; err |= copy_siginfo_to_user(&frame->info, &ksig->info); /* Create the ucontext. */ err |= __put_user(0, &frame->uc.uc_flags); err |= __put_user(0, &frame->uc.uc_link); err |= __put_user(set->sig[0], &frame->uc.uc_osf_sigmask); err |= __save_altstack(&frame->uc.uc_stack, oldsp); err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, set->sig[0], oldsp); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) return -EFAULT; /* Set up to return from userspace. If provided, use a stub already in userspace. */ r26 = (unsigned long) ksig->ka.ka_restorer; if (!r26) { err |= __put_user(INSN_MOV_R30_R16, frame->retcode+0); err |= __put_user(INSN_LDI_R0+__NR_rt_sigreturn, frame->retcode+1); err |= __put_user(INSN_CALLSYS, frame->retcode+2); imb(); r26 = (unsigned long) frame->retcode; } if (err) return -EFAULT; /* "Return" to the handler */ regs->r26 = r26; regs->r27 = regs->pc = (unsigned long) ksig->ka.sa.sa_handler; regs->r16 = ksig->sig; /* a0: signal number */ regs->r17 = (unsigned long) &frame->info; /* a1: siginfo pointer */ regs->r18 = (unsigned long) &frame->uc; /* a2: ucontext pointer */ wrusp((unsigned long) frame); #if DEBUG_SIG printk("SIG deliver (%s:%d): sp=%p pc=%p ra=%p\n", current->comm, current->pid, frame, regs->pc, regs->r26); #endif return 0; } /* * OK, we're invoking a handler. */ static inline void handle_signal(struct ksignal *ksig, struct pt_regs *regs) { sigset_t *oldset = sigmask_to_save(); int ret; if (ksig->ka.sa.sa_flags & SA_SIGINFO) ret = setup_rt_frame(ksig, oldset, regs); else ret = setup_frame(ksig, oldset, regs); signal_setup_done(ret, ksig, 0); } static inline void syscall_restart(unsigned long r0, unsigned long r19, struct pt_regs *regs, struct k_sigaction *ka) { switch (regs->r0) { case ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { case ERESTARTNOHAND: regs->r0 = EINTR; break; } /* fallthrough */ case ERESTARTNOINTR: regs->r0 = r0; /* reset v0 and a3 and replay syscall */ regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: regs->r0 = EINTR; break; } } /* * Note that 'init' is a special process: it doesn't get signals it doesn't * want to handle. Thus you cannot kill init even with a SIGKILL even by * mistake. * * Note that we go through the signals twice: once to check the signals that * the kernel can handle, and then we build all the user-level signal handling * stack-frames in one go after that. * * "r0" and "r19" are the registers we need to restore for system call * restart. "r0" is also used as an indicator whether we can restart at * all (if we get here from anything but a syscall return, it will be 0) */ static void do_signal(struct pt_regs *regs, unsigned long r0, unsigned long r19) { unsigned long single_stepping = ptrace_cancel_bpt(current); struct ksignal ksig; /* This lets the debugger run, ... */ if (get_signal(&ksig)) { /* ... so re-check the single stepping. */ single_stepping |= ptrace_cancel_bpt(current); /* Whee! Actually deliver the signal. */ if (r0) syscall_restart(r0, r19, regs, &ksig.ka); handle_signal(&ksig, regs); } else { single_stepping |= ptrace_cancel_bpt(current); if (r0) { switch (regs->r0) { case ERESTARTNOHAND: case ERESTARTSYS: case ERESTARTNOINTR: /* Reset v0 and a3 and replay syscall. */ regs->r0 = r0; regs->r19 = r19; regs->pc -= 4; break; case ERESTART_RESTARTBLOCK: /* Set v0 to the restart_syscall and replay */ regs->r0 = __NR_restart_syscall; regs->pc -= 4; break; } } restore_saved_sigmask(); } if (single_stepping) ptrace_set_bpt(current); /* re-set breakpoint */ } void do_work_pending(struct pt_regs *regs, unsigned long thread_flags, unsigned long r0, unsigned long r19) { do { if (thread_flags & _TIF_NEED_RESCHED) { schedule(); } else { local_irq_enable(); if (thread_flags & _TIF_SIGPENDING) { do_signal(regs, r0, r19); r0 = 0; } else { clear_thread_flag(TIF_NOTIFY_RESUME); tracehook_notify_resume(regs); } } local_irq_disable(); thread_flags = current_thread_info()->flags; } while (thread_flags & _TIF_WORK_MASK); }
gpl-2.0
Nicklas373/Hana-CoreUX-Kernel_MSM8627-AOSP_7.0
arch/hexagon/kernel/signal.c
3016
9307
/* * Signal support for Hexagon processor * * Copyright (c) 2010-2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/linkage.h> #include <linux/syscalls.h> #include <linux/freezer.h> #include <linux/tracehook.h> #include <asm/registers.h> #include <asm/thread_info.h> #include <asm/unistd.h> #include <asm/uaccess.h> #include <asm/ucontext.h> #include <asm/cacheflush.h> #include <asm/signal.h> #include <asm/vdso.h> #define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP))) struct rt_sigframe { unsigned long tramp[2]; struct siginfo info; struct ucontext uc; }; static void __user *get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size) { unsigned long sp = regs->r29; /* Switch to signal stack if appropriate */ if ((ka->sa.sa_flags & SA_ONSTACK) && (sas_ss_flags(sp) == 0)) sp = current->sas_ss_sp + current->sas_ss_size; return (void __user *)((sp - frame_size) & ~(sizeof(long long) - 1)); } static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned long tmp; int err = 0; err |= copy_to_user(&sc->sc_regs.r0, &regs->r00, 32*sizeof(unsigned long)); err |= __put_user(regs->sa0, &sc->sc_regs.sa0); err |= __put_user(regs->lc0, &sc->sc_regs.lc0); err |= __put_user(regs->sa1, &sc->sc_regs.sa1); err |= __put_user(regs->lc1, &sc->sc_regs.lc1); err |= __put_user(regs->m0, &sc->sc_regs.m0); err |= __put_user(regs->m1, &sc->sc_regs.m1); err |= __put_user(regs->usr, &sc->sc_regs.usr); err |= __put_user(regs->preds, &sc->sc_regs.p3_0); err |= __put_user(regs->gp, &sc->sc_regs.gp); err |= __put_user(regs->ugp, &sc->sc_regs.ugp); tmp = pt_elr(regs); err |= __put_user(tmp, &sc->sc_regs.pc); tmp = pt_cause(regs); err |= __put_user(tmp, &sc->sc_regs.cause); tmp = pt_badva(regs); err |= __put_user(tmp, &sc->sc_regs.badva); return err; } static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) { unsigned long tmp; int err = 0; err |= copy_from_user(&regs->r00, &sc->sc_regs.r0, 32 * sizeof(unsigned long)); err |= __get_user(regs->sa0, &sc->sc_regs.sa0); err |= __get_user(regs->lc0, &sc->sc_regs.lc0); err |= __get_user(regs->sa1, &sc->sc_regs.sa1); err |= __get_user(regs->lc1, &sc->sc_regs.lc1); err |= __get_user(regs->m0, &sc->sc_regs.m0); err |= __get_user(regs->m1, &sc->sc_regs.m1); err |= __get_user(regs->usr, &sc->sc_regs.usr); err |= __get_user(regs->preds, &sc->sc_regs.p3_0); err |= __get_user(regs->gp, &sc->sc_regs.gp); err |= __get_user(regs->ugp, &sc->sc_regs.ugp); err |= __get_user(tmp, &sc->sc_regs.pc); pt_set_elr(regs, tmp); return err; } /* * Setup signal stack frame with siginfo structure */ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info, sigset_t *set, struct pt_regs *regs) { int err = 0; struct rt_sigframe __user *frame; struct hexagon_vdso *vdso = current->mm->context.vdso; frame = get_sigframe(ka, regs, sizeof(struct rt_sigframe)); if (!access_ok(VERIFY_WRITE, frame, sizeof(struct rt_sigframe))) goto sigsegv; if (copy_siginfo_to_user(&frame->info, info)) goto sigsegv; /* The on-stack signal trampoline is no longer executed; * however, the libgcc signal frame unwinding code checks for * the presence of these two numeric magic values. */ err |= __put_user(0x7800d166, &frame->tramp[0]); err |= __put_user(0x5400c004, &frame->tramp[1]); err |= setup_sigcontext(regs, &frame->uc.uc_mcontext); err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); if (err) goto sigsegv; /* Load r0/r1 pair with signumber/siginfo pointer... */ regs->r0100 = ((unsigned long long)((unsigned long)&frame->info) << 32) | (unsigned long long)signr; regs->r02 = (unsigned long) &frame->uc; regs->r31 = (unsigned long) vdso->rt_signal_trampoline; pt_psp(regs) = (unsigned long) frame; pt_set_elr(regs, (unsigned long)ka->sa.sa_handler); return 0; sigsegv: force_sigsegv(signr, current); return -EFAULT; } /* * Setup invocation of signal handler */ static int handle_signal(int sig, siginfo_t *info, struct k_sigaction *ka, sigset_t *oldset, struct pt_regs *regs) { int rc; /* * If we're handling a signal that aborted a system call, * set up the error return value before adding the signal * frame to the stack. */ if (regs->syscall_nr >= 0) { switch (regs->r00) { case -ERESTART_RESTARTBLOCK: case -ERESTARTNOHAND: regs->r00 = -EINTR; break; case -ERESTARTSYS: if (!(ka->sa.sa_flags & SA_RESTART)) { regs->r00 = -EINTR; break; } /* Fall through */ case -ERESTARTNOINTR: regs->r06 = regs->syscall_nr; pt_set_elr(regs, pt_elr(regs) - 4); regs->r00 = regs->restart_r0; break; default: break; } } /* * Set up the stack frame; not doing the SA_SIGINFO thing. We * only set up the rt_frame flavor. */ rc = setup_rt_frame(sig, ka, info, oldset, regs); /* If there was an error on setup, no signal was delivered. */ if (rc) return rc; block_sigmask(ka, sig); return 0; } /* * Called from return-from-event code. */ static void do_signal(struct pt_regs *regs) { struct k_sigaction sigact; siginfo_t info; int signo; if (!user_mode(regs)) return; if (try_to_freeze()) goto no_signal; signo = get_signal_to_deliver(&info, &sigact, regs, NULL); if (signo > 0) { sigset_t *oldset; if (test_thread_flag(TIF_RESTORE_SIGMASK)) oldset = &current->saved_sigmask; else oldset = &current->blocked; if (handle_signal(signo, &info, &sigact, oldset, regs) == 0) { /* * Successful delivery case. The saved sigmask is * stored in the signal frame, and will be restored * by sigreturn. We can clear the TIF flag. */ clear_thread_flag(TIF_RESTORE_SIGMASK); tracehook_signal_handler(signo, &info, &sigact, regs, test_thread_flag(TIF_SINGLESTEP)); } return; } no_signal: /* * If we came from a system call, handle the restart. */ if (regs->syscall_nr >= 0) { switch (regs->r00) { case -ERESTARTNOHAND: case -ERESTARTSYS: case -ERESTARTNOINTR: regs->r06 = regs->syscall_nr; break; case -ERESTART_RESTARTBLOCK: regs->r06 = __NR_restart_syscall; break; default: goto no_restart; } pt_set_elr(regs, pt_elr(regs) - 4); regs->r00 = regs->restart_r0; } no_restart: /* If there's no signal to deliver, put the saved sigmask back */ if (test_thread_flag(TIF_RESTORE_SIGMASK)) { clear_thread_flag(TIF_RESTORE_SIGMASK); sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL); } } void do_notify_resume(struct pt_regs *regs, unsigned long thread_info_flags) { if (thread_info_flags & _TIF_SIGPENDING) do_signal(regs); if (thread_info_flags & _TIF_NOTIFY_RESUME) { clear_thread_flag(TIF_NOTIFY_RESUME); if (current->replacement_session_keyring) key_replace_session_keyring(); } } /* * Architecture-specific wrappers for signal-related system calls */ asmlinkage int sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss) { struct pt_regs *regs = current_thread_info()->regs; return do_sigaltstack(uss, uoss, regs->r29); } asmlinkage int sys_rt_sigreturn(void) { struct pt_regs *regs = current_thread_info()->regs; struct rt_sigframe __user *frame; sigset_t blocked; frame = (struct rt_sigframe __user *)pt_psp(regs); if (!access_ok(VERIFY_READ, frame, sizeof(*frame))) goto badframe; if (__copy_from_user(&blocked, &frame->uc.uc_sigmask, sizeof(blocked))) goto badframe; sigdelsetmask(&blocked, ~_BLOCKABLE); set_current_blocked(&blocked); if (restore_sigcontext(regs, &frame->uc.uc_mcontext)) goto badframe; /* Restore the user's stack as well */ pt_psp(regs) = regs->r29; /* * Leave a trace in the stack frame that this was a sigreturn. * If the system call is to replay, we've already restored the * number in the GPR slot and it will be regenerated on the * new system call trap entry. Note that if restore_sigcontext() * did something other than a bulk copy of the pt_regs struct, * we could avoid this assignment by simply not overwriting * regs->syscall_nr. */ regs->syscall_nr = __NR_rt_sigreturn; /* * If we were meticulous, we'd only call this if we knew that * we were actually going to use an alternate stack, and we'd * consider any error to be fatal. What we do here, in common * with many other architectures, is call it blindly and only * consider the -EFAULT return case to be proof of a problem. */ if (do_sigaltstack(&frame->uc.uc_stack, NULL, pt_psp(regs)) == -EFAULT) goto badframe; return 0; badframe: force_sig(SIGSEGV, current); return 0; }
gpl-2.0
versusx/android_kernel_samsung_logan2g
drivers/net/wireless/bcm4329/bcmsdstd.c
4040
90214
/* * 'Standard' SDIO HOST CONTROLLER driver * * Copyright (C) 1999-2010, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: bcmsdstd.c,v 1.64.4.1.4.4.2.18 2010/08/17 17:00:48 Exp $ */ #include <typedefs.h> #include <bcmdevs.h> #include <bcmendian.h> #include <bcmutils.h> #include <osl.h> #include <siutils.h> #include <sdio.h> /* SDIO Device and Protocol Specs */ #include <sdioh.h> /* SDIO Host Controller Specification */ #include <bcmsdbus.h> /* bcmsdh to/from specific controller APIs */ #include <sdiovar.h> /* ioctl/iovars */ #include <pcicfg.h> #define SD_PAGE_BITS 12 #define SD_PAGE (1 << SD_PAGE_BITS) #include <bcmsdstd.h> /* Globals */ uint sd_msglevel = SDH_ERROR_VAL; uint sd_hiok = TRUE; /* Use hi-speed mode if available? */ uint sd_sdmode = SDIOH_MODE_SD4; /* Use SD4 mode by default */ uint sd_f2_blocksize = 64; /* Default blocksize */ #ifdef BCMSDYIELD bool sd_yieldcpu = TRUE; /* Allow CPU yielding for buffer requests */ uint sd_minyield = 0; /* Minimum xfer size to allow CPU yield */ bool sd_forcerb = FALSE; /* Force sync readback in intrs_on/off */ #endif uint sd_divisor = 2; /* Default 48MHz/2 = 24MHz */ uint sd_power = 1; /* Default to SD Slot powered ON */ uint sd_clock = 1; /* Default to SD Clock turned ON */ uint sd_pci_slot = 0xFFFFffff; /* Used to force selection of a particular PCI slot */ uint8 sd_dma_mode = DMA_MODE_SDMA; /* Default to SDMA for now */ uint sd_toctl = 7; static bool trap_errs = FALSE; static const char *dma_mode_description[] = { "PIO", "SDMA", "ADMA1", "32b ADMA2", "64b ADMA2" }; /* Prototypes */ static bool sdstd_start_clock(sdioh_info_t *sd, uint16 divisor); static bool sdstd_start_power(sdioh_info_t *sd); static bool sdstd_bus_width(sdioh_info_t *sd, int width); static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode); static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode); static int sdstd_card_enablefuncs(sdioh_info_t *sd); static void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count); static int sdstd_cmd_issue(sdioh_info_t *sd, bool use_dma, uint32 cmd, uint32 arg); static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data); static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data); static int sdstd_driver_init(sdioh_info_t *sd); static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset); static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data); static int sdstd_abort(sdioh_info_t *sd, uint func); static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg); static int set_client_block_size(sdioh_info_t *sd, int func, int blocksize); static void sd_map_dma(sdioh_info_t * sd); static void sd_unmap_dma(sdioh_info_t * sd); static void sd_clear_adma_dscr_buf(sdioh_info_t *sd); static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data); static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index, uint32 addr_phys, uint16 length, uint16 flags); static void sd_dump_adma_dscr(sdioh_info_t *sd); static void sdstd_dumpregs(sdioh_info_t *sd); /* * Private register access routines. */ /* 16 bit PCI regs */ extern uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg); uint16 sdstd_rreg16(sdioh_info_t *sd, uint reg) { volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); sd_ctrl(("16: R Reg 0x%02x, Data 0x%x\n", reg, data)); return data; } extern void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data); void sdstd_wreg16(sdioh_info_t *sd, uint reg, uint16 data) { *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; sd_ctrl(("16: W Reg 0x%02x, Data 0x%x\n", reg, data)); } static void sdstd_or_reg16(sdioh_info_t *sd, uint reg, uint16 val) { volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); sd_ctrl(("16: OR Reg 0x%02x, Val 0x%x\n", reg, val)); data |= val; *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; } static void sdstd_mod_reg16(sdioh_info_t *sd, uint reg, int16 mask, uint16 val) { volatile uint16 data = *(volatile uint16 *)(sd->mem_space + reg); sd_ctrl(("16: MOD Reg 0x%02x, Mask 0x%x, Val 0x%x\n", reg, mask, val)); data &= ~mask; data |= (val & mask); *(volatile uint16 *)(sd->mem_space + reg) = (uint16)data; } /* 32 bit PCI regs */ static uint32 sdstd_rreg(sdioh_info_t *sd, uint reg) { volatile uint32 data = *(volatile uint32 *)(sd->mem_space + reg); sd_ctrl(("32: R Reg 0x%02x, Data 0x%x\n", reg, data)); return data; } static inline void sdstd_wreg(sdioh_info_t *sd, uint reg, uint32 data) { *(volatile uint32 *)(sd->mem_space + reg) = (uint32)data; sd_ctrl(("32: W Reg 0x%02x, Data 0x%x\n", reg, data)); } /* 8 bit PCI regs */ static inline void sdstd_wreg8(sdioh_info_t *sd, uint reg, uint8 data) { *(volatile uint8 *)(sd->mem_space + reg) = (uint8)data; sd_ctrl(("08: W Reg 0x%02x, Data 0x%x\n", reg, data)); } static uint8 sdstd_rreg8(sdioh_info_t *sd, uint reg) { volatile uint8 data = *(volatile uint8 *)(sd->mem_space + reg); sd_ctrl(("08: R Reg 0x%02x, Data 0x%x\n", reg, data)); return data; } /* * Private work routines */ sdioh_info_t *glob_sd; /* * Public entry points & extern's */ extern sdioh_info_t * sdioh_attach(osl_t *osh, void *bar0, uint irq) { sdioh_info_t *sd; sd_trace(("%s\n", __FUNCTION__)); if ((sd = (sdioh_info_t *)MALLOC(osh, sizeof(sdioh_info_t))) == NULL) { sd_err(("sdioh_attach: out of memory, malloced %d bytes\n", MALLOCED(osh))); return NULL; } bzero((char *)sd, sizeof(sdioh_info_t)); glob_sd = sd; sd->osh = osh; if (sdstd_osinit(sd) != 0) { sd_err(("%s:sdstd_osinit() failed\n", __FUNCTION__)); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); return NULL; } sd->mem_space = (volatile char *)sdstd_reg_map(osh, (uintptr)bar0, SDIOH_REG_WINSZ); sd_init_dma(sd); sd->irq = irq; if (sd->mem_space == NULL) { sd_err(("%s:ioremap() failed\n", __FUNCTION__)); sdstd_osfree(sd); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); return NULL; } sd_info(("%s:sd->mem_space = %p\n", __FUNCTION__, sd->mem_space)); sd->intr_handler = NULL; sd->intr_handler_arg = NULL; sd->intr_handler_valid = FALSE; /* Set defaults */ sd->sd_blockmode = TRUE; sd->use_client_ints = TRUE; sd->sd_dma_mode = sd_dma_mode; if (!sd->sd_blockmode) sd->sd_dma_mode = DMA_MODE_NONE; if (sdstd_driver_init(sd) != SUCCESS) { /* If host CPU was reset without resetting SD bus or SD device, the device will still have its RCA but driver no longer knows what it is (since driver has been restarted). go through once to clear the RCA and a gain reassign it. */ sd_info(("driver_init failed - Reset RCA and try again\n")); if (sdstd_driver_init(sd) != SUCCESS) { sd_err(("%s:driver_init() failed()\n", __FUNCTION__)); if (sd->mem_space) { sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); sd->mem_space = NULL; } sdstd_osfree(sd); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); return (NULL); } } OSL_DMADDRWIDTH(osh, 32); /* Always map DMA buffers, so we can switch between DMA modes. */ sd_map_dma(sd); if (sdstd_register_irq(sd, irq) != SUCCESS) { sd_err(("%s: sdstd_register_irq() failed for irq = %d\n", __FUNCTION__, irq)); sdstd_free_irq(sd->irq, sd); if (sd->mem_space) { sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); sd->mem_space = NULL; } sdstd_osfree(sd); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); return (NULL); } sd_trace(("%s: Done\n", __FUNCTION__)); return sd; } extern SDIOH_API_RC sdioh_detach(osl_t *osh, sdioh_info_t *sd) { sd_trace(("%s\n", __FUNCTION__)); if (sd) { sd_unmap_dma(sd); sdstd_wreg16(sd, SD_IntrSignalEnable, 0); sd_trace(("%s: freeing irq %d\n", __FUNCTION__, sd->irq)); sdstd_free_irq(sd->irq, sd); if (sd->card_init_done) sdstd_reset(sd, 1, 1); if (sd->mem_space) { sdstd_reg_unmap(osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); sd->mem_space = NULL; } sdstd_osfree(sd); MFREE(sd->osh, sd, sizeof(sdioh_info_t)); } return SDIOH_API_RC_SUCCESS; } /* Configure callback to client when we receive client interrupt */ extern SDIOH_API_RC sdioh_interrupt_register(sdioh_info_t *sd, sdioh_cb_fn_t fn, void *argh) { sd_trace(("%s: Entering\n", __FUNCTION__)); sd->intr_handler = fn; sd->intr_handler_arg = argh; sd->intr_handler_valid = TRUE; return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_interrupt_deregister(sdioh_info_t *sd) { sd_trace(("%s: Entering\n", __FUNCTION__)); sd->intr_handler_valid = FALSE; sd->intr_handler = NULL; sd->intr_handler_arg = NULL; return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_interrupt_query(sdioh_info_t *sd, bool *onoff) { sd_trace(("%s: Entering\n", __FUNCTION__)); *onoff = sd->client_intr_enabled; return SDIOH_API_RC_SUCCESS; } #if defined(DHD_DEBUG) extern bool sdioh_interrupt_pending(sdioh_info_t *sd) { uint16 intrstatus; intrstatus = sdstd_rreg16(sd, SD_IntrStatus); return !!(intrstatus & CLIENT_INTR); } #endif uint sdioh_query_iofnum(sdioh_info_t *sd) { return sd->num_funcs; } /* IOVar table */ enum { IOV_MSGLEVEL = 1, IOV_BLOCKMODE, IOV_BLOCKSIZE, IOV_DMA, IOV_USEINTS, IOV_NUMINTS, IOV_NUMLOCALINTS, IOV_HOSTREG, IOV_DEVREG, IOV_DIVISOR, IOV_SDMODE, IOV_HISPEED, IOV_HCIREGS, IOV_POWER, IOV_YIELDCPU, IOV_MINYIELD, IOV_FORCERB, IOV_CLOCK }; const bcm_iovar_t sdioh_iovars[] = { {"sd_msglevel", IOV_MSGLEVEL, 0, IOVT_UINT32, 0 }, {"sd_blockmode", IOV_BLOCKMODE, 0, IOVT_BOOL, 0 }, {"sd_blocksize", IOV_BLOCKSIZE, 0, IOVT_UINT32, 0 }, /* ((fn << 16) | size) */ {"sd_dma", IOV_DMA, 0, IOVT_UINT32, 0 }, #ifdef BCMSDYIELD {"sd_yieldcpu", IOV_YIELDCPU, 0, IOVT_BOOL, 0 }, {"sd_minyield", IOV_MINYIELD, 0, IOVT_UINT32, 0 }, {"sd_forcerb", IOV_FORCERB, 0, IOVT_BOOL, 0 }, #endif {"sd_ints", IOV_USEINTS, 0, IOVT_BOOL, 0 }, {"sd_numints", IOV_NUMINTS, 0, IOVT_UINT32, 0 }, {"sd_numlocalints", IOV_NUMLOCALINTS, 0, IOVT_UINT32, 0 }, {"sd_hostreg", IOV_HOSTREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"sd_devreg", IOV_DEVREG, 0, IOVT_BUFFER, sizeof(sdreg_t) }, {"sd_divisor", IOV_DIVISOR, 0, IOVT_UINT32, 0 }, {"sd_power", IOV_POWER, 0, IOVT_UINT32, 0 }, {"sd_clock", IOV_CLOCK, 0, IOVT_UINT32, 0 }, {"sd_mode", IOV_SDMODE, 0, IOVT_UINT32, 100}, {"sd_highspeed", IOV_HISPEED, 0, IOVT_UINT32, 0}, {NULL, 0, 0, 0, 0 } }; int sdioh_iovar_op(sdioh_info_t *si, const char *name, void *params, int plen, void *arg, int len, bool set) { const bcm_iovar_t *vi = NULL; int bcmerror = 0; int val_size; int32 int_val = 0; bool bool_val; uint32 actionid; ASSERT(name); ASSERT(len >= 0); /* Get must have return space; Set does not take qualifiers */ ASSERT(set || (arg && len)); ASSERT(!set || (!params && !plen)); sd_trace(("%s: Enter (%s %s)\n", __FUNCTION__, (set ? "set" : "get"), name)); if ((vi = bcm_iovar_lookup(sdioh_iovars, name)) == NULL) { bcmerror = BCME_UNSUPPORTED; goto exit; } if ((bcmerror = bcm_iovar_lencheck(vi, arg, len, set)) != 0) goto exit; /* Set up params so get and set can share the convenience variables */ if (params == NULL) { params = arg; plen = len; } if (vi->type == IOVT_VOID) val_size = 0; else if (vi->type == IOVT_BUFFER) val_size = len; else val_size = sizeof(int); if (plen >= (int)sizeof(int_val)) bcopy(params, &int_val, sizeof(int_val)); bool_val = (int_val != 0) ? TRUE : FALSE; actionid = set ? IOV_SVAL(vi->varid) : IOV_GVAL(vi->varid); switch (actionid) { case IOV_GVAL(IOV_MSGLEVEL): int_val = (int32)sd_msglevel; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_MSGLEVEL): sd_msglevel = int_val; break; case IOV_GVAL(IOV_BLOCKMODE): int_val = (int32)si->sd_blockmode; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_BLOCKMODE): si->sd_blockmode = (bool)int_val; /* Haven't figured out how to make non-block mode with DMA */ if (!si->sd_blockmode) si->sd_dma_mode = DMA_MODE_NONE; break; #ifdef BCMSDYIELD case IOV_GVAL(IOV_YIELDCPU): int_val = sd_yieldcpu; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_YIELDCPU): sd_yieldcpu = (bool)int_val; break; case IOV_GVAL(IOV_MINYIELD): int_val = sd_minyield; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_MINYIELD): sd_minyield = (bool)int_val; break; case IOV_GVAL(IOV_FORCERB): int_val = sd_forcerb; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_FORCERB): sd_forcerb = (bool)int_val; break; #endif /* BCMSDYIELD */ case IOV_GVAL(IOV_BLOCKSIZE): if ((uint32)int_val > si->num_funcs) { bcmerror = BCME_BADARG; break; } int_val = (int32)si->client_block_size[int_val]; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_BLOCKSIZE): { uint func = ((uint32)int_val >> 16); uint blksize = (uint16)int_val; uint maxsize; if (func > si->num_funcs) { bcmerror = BCME_BADARG; break; } switch (func) { case 0: maxsize = 32; break; case 1: maxsize = BLOCK_SIZE_4318; break; case 2: maxsize = BLOCK_SIZE_4328; break; default: maxsize = 0; } if (blksize > maxsize) { bcmerror = BCME_BADARG; break; } if (!blksize) { blksize = maxsize; } /* Now set it */ sdstd_lock(si); bcmerror = set_client_block_size(si, func, blksize); sdstd_unlock(si); break; } case IOV_GVAL(IOV_DMA): int_val = (int32)si->sd_dma_mode; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_DMA): si->sd_dma_mode = (char)int_val; sdstd_set_dma_mode(si, si->sd_dma_mode); break; case IOV_GVAL(IOV_USEINTS): int_val = (int32)si->use_client_ints; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_USEINTS): si->use_client_ints = (bool)int_val; if (si->use_client_ints) si->intmask |= CLIENT_INTR; else si->intmask &= ~CLIENT_INTR; break; case IOV_GVAL(IOV_DIVISOR): int_val = (uint32)sd_divisor; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_DIVISOR): sd_divisor = int_val; if (!sdstd_start_clock(si, (uint16)sd_divisor)) { sd_err(("set clock failed!\n")); bcmerror = BCME_ERROR; } break; case IOV_GVAL(IOV_POWER): int_val = (uint32)sd_power; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_POWER): sd_power = int_val; if (sd_power == 1) { if (sdstd_driver_init(si) != SUCCESS) { sd_err(("set SD Slot power failed!\n")); bcmerror = BCME_ERROR; } else { sd_err(("SD Slot Powered ON.\n")); } } else { uint8 pwr = 0; pwr = SFIELD(pwr, PWR_BUS_EN, 0); sdstd_wreg8(si, SD_PwrCntrl, pwr); /* Set Voltage level */ sd_err(("SD Slot Powered OFF.\n")); } break; case IOV_GVAL(IOV_CLOCK): int_val = (uint32)sd_clock; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_CLOCK): sd_clock = int_val; if (sd_clock == 1) { sd_info(("SD Clock turned ON.\n")); if (!sdstd_start_clock(si, (uint16)sd_divisor)) { sd_err(("sdstd_start_clock failed\n")); bcmerror = BCME_ERROR; } } else { /* turn off HC clock */ sdstd_wreg16(si, SD_ClockCntrl, sdstd_rreg16(si, SD_ClockCntrl) & ~((uint16)0x4)); sd_info(("SD Clock turned OFF.\n")); } break; case IOV_GVAL(IOV_SDMODE): int_val = (uint32)sd_sdmode; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_SDMODE): sd_sdmode = int_val; if (!sdstd_bus_width(si, sd_sdmode)) { sd_err(("sdstd_bus_width failed\n")); bcmerror = BCME_ERROR; } break; case IOV_GVAL(IOV_HISPEED): int_val = (uint32)sd_hiok; bcopy(&int_val, arg, val_size); break; case IOV_SVAL(IOV_HISPEED): sd_hiok = int_val; bcmerror = sdstd_set_highspeed_mode(si, (bool)sd_hiok); break; case IOV_GVAL(IOV_NUMINTS): int_val = (int32)si->intrcount; bcopy(&int_val, arg, val_size); break; case IOV_GVAL(IOV_NUMLOCALINTS): int_val = (int32)si->local_intrcount; bcopy(&int_val, arg, val_size); break; case IOV_GVAL(IOV_HOSTREG): { sdreg_t *sd_ptr = (sdreg_t *)params; if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); bcmerror = BCME_BADARG; break; } sd_trace(("%s: rreg%d at offset %d\n", __FUNCTION__, (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), sd_ptr->offset)); if (sd_ptr->offset & 1) int_val = sdstd_rreg8(si, sd_ptr->offset); else if (sd_ptr->offset & 2) int_val = sdstd_rreg16(si, sd_ptr->offset); else int_val = sdstd_rreg(si, sd_ptr->offset); bcopy(&int_val, arg, sizeof(int_val)); break; } case IOV_SVAL(IOV_HOSTREG): { sdreg_t *sd_ptr = (sdreg_t *)params; if (sd_ptr->offset < SD_SysAddr || sd_ptr->offset > SD_MaxCurCap) { sd_err(("%s: bad offset 0x%x\n", __FUNCTION__, sd_ptr->offset)); bcmerror = BCME_BADARG; break; } sd_trace(("%s: wreg%d value 0x%08x at offset %d\n", __FUNCTION__, sd_ptr->value, (sd_ptr->offset & 1) ? 8 : ((sd_ptr->offset & 2) ? 16 : 32), sd_ptr->offset)); if (sd_ptr->offset & 1) sdstd_wreg8(si, sd_ptr->offset, (uint8)sd_ptr->value); else if (sd_ptr->offset & 2) sdstd_wreg16(si, sd_ptr->offset, (uint16)sd_ptr->value); else sdstd_wreg(si, sd_ptr->offset, (uint32)sd_ptr->value); break; } case IOV_GVAL(IOV_DEVREG): { sdreg_t *sd_ptr = (sdreg_t *)params; uint8 data; if (sdioh_cfg_read(si, sd_ptr->func, sd_ptr->offset, &data)) { bcmerror = BCME_SDIO_ERROR; break; } int_val = (int)data; bcopy(&int_val, arg, sizeof(int_val)); break; } case IOV_SVAL(IOV_DEVREG): { sdreg_t *sd_ptr = (sdreg_t *)params; uint8 data = (uint8)sd_ptr->value; if (sdioh_cfg_write(si, sd_ptr->func, sd_ptr->offset, &data)) { bcmerror = BCME_SDIO_ERROR; break; } break; } default: bcmerror = BCME_UNSUPPORTED; break; } exit: return bcmerror; } extern SDIOH_API_RC sdioh_cfg_read(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) { SDIOH_API_RC status; /* No lock needed since sdioh_request_byte does locking */ status = sdioh_request_byte(sd, SDIOH_READ, fnc_num, addr, data); return status; } extern SDIOH_API_RC sdioh_cfg_write(sdioh_info_t *sd, uint fnc_num, uint32 addr, uint8 *data) { /* No lock needed since sdioh_request_byte does locking */ SDIOH_API_RC status; status = sdioh_request_byte(sd, SDIOH_WRITE, fnc_num, addr, data); return status; } extern SDIOH_API_RC sdioh_cis_read(sdioh_info_t *sd, uint func, uint8 *cisd, uint32 length) { uint32 count; int offset; uint32 foo; uint8 *cis = cisd; sd_trace(("%s: Func = %d\n", __FUNCTION__, func)); if (!sd->func_cis_ptr[func]) { bzero(cis, length); return SDIOH_API_RC_FAIL; } sdstd_lock(sd); *cis = 0; for (count = 0; count < length; count++) { offset = sd->func_cis_ptr[func] + count; if (sdstd_card_regread(sd, 0, offset, 1, &foo)) { sd_err(("%s: regread failed: Can't read CIS\n", __FUNCTION__)); sdstd_unlock(sd); return SDIOH_API_RC_FAIL; } *cis = (uint8)(foo & 0xff); cis++; } sdstd_unlock(sd); return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_request_byte(sdioh_info_t *sd, uint rw, uint func, uint regaddr, uint8 *byte) { int status; uint32 cmd_arg; uint32 rsp5; sdstd_lock(sd); cmd_arg = 0; cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, rw == SDIOH_READ ? 0 : 1); cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); cmd_arg = SFIELD(cmd_arg, CMD52_DATA, rw == SDIOH_READ ? 0 : *byte); if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) { sdstd_unlock(sd); return status; } sdstd_cmd_getrsp(sd, &rsp5, 1); if (sdstd_rreg16 (sd, SD_ErrorIntrStatus) != 0) { sd_err(("%s: 1: ErrorintrStatus 0x%x\n", __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); } if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) sd_err(("%s: rsp5 flags is 0x%x\t %d\n", __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); if (GFIELD(rsp5, RSP5_STUFF)) sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); if (rw == SDIOH_READ) *byte = GFIELD(rsp5, RSP5_DATA); sdstd_unlock(sd); return SDIOH_API_RC_SUCCESS; } extern SDIOH_API_RC sdioh_request_word(sdioh_info_t *sd, uint cmd_type, uint rw, uint func, uint addr, uint32 *word, uint nbytes) { int status; bool swap = FALSE; sdstd_lock(sd); if (rw == SDIOH_READ) { status = sdstd_card_regread(sd, func, addr, nbytes, word); if (swap) *word = BCMSWAP32(*word); } else { if (swap) *word = BCMSWAP32(*word); status = sdstd_card_regwrite(sd, func, addr, nbytes, *word); } sdstd_unlock(sd); return (status == SUCCESS ? SDIOH_API_RC_SUCCESS : SDIOH_API_RC_FAIL); } extern SDIOH_API_RC sdioh_request_buffer(sdioh_info_t *sd, uint pio_dma, uint fix_inc, uint rw, uint func, uint addr, uint reg_width, uint buflen_u, uint8 *buffer, void *pkt) { int len; int buflen = (int)buflen_u; bool fifo = (fix_inc == SDIOH_DATA_FIX); uint8 *localbuf = NULL, *tmpbuf = NULL; uint tmplen = 0; bool local_blockmode = sd->sd_blockmode; sdstd_lock(sd); ASSERT(reg_width == 4); ASSERT(buflen_u < (1 << 30)); ASSERT(sd->client_block_size[func]); sd_data(("%s: %c len %d r_cnt %d t_cnt %d, pkt @0x%p\n", __FUNCTION__, rw == SDIOH_READ ? 'R' : 'W', buflen_u, sd->r_cnt, sd->t_cnt, pkt)); /* Break buffer down into blocksize chunks: * Bytemode: 1 block at a time. * Blockmode: Multiples of blocksizes at a time w/ max of SD_PAGE. * Both: leftovers are handled last (will be sent via bytemode). */ while (buflen > 0) { if (local_blockmode) { /* Max xfer is Page size */ len = MIN(SD_PAGE, buflen); /* Round down to a block boundry */ if (buflen > sd->client_block_size[func]) len = (len/sd->client_block_size[func]) * sd->client_block_size[func]; if ((func == SDIO_FUNC_1) && ((len % 4) == 3) && (rw == SDIOH_WRITE)) { tmplen = len; sd_err(("%s: Rounding up buffer to mod4 length.\n", __FUNCTION__)); len++; tmpbuf = buffer; if ((localbuf = (uint8 *)MALLOC(sd->osh, len)) == NULL) { sd_err(("out of memory, malloced %d bytes\n", MALLOCED(sd->osh))); sdstd_unlock(sd); return SDIOH_API_RC_FAIL; } bcopy(buffer, localbuf, len); buffer = localbuf; } } else { /* Byte mode: One block at a time */ len = MIN(sd->client_block_size[func], buflen); } if (sdstd_card_buf(sd, rw, func, fifo, addr, len, (uint32 *)buffer) != SUCCESS) { sdstd_unlock(sd); return SDIOH_API_RC_FAIL; } if (local_blockmode) { if ((func == SDIO_FUNC_1) && ((tmplen % 4) == 3) && (rw == SDIOH_WRITE)) { if (localbuf) MFREE(sd->osh, localbuf, len); len--; buffer = tmpbuf; sd_err(("%s: Restoring back buffer ptr and len.\n", __FUNCTION__)); } } buffer += len; buflen -= len; if (!fifo) addr += len; } sdstd_unlock(sd); return SDIOH_API_RC_SUCCESS; } static int sdstd_abort(sdioh_info_t *sd, uint func) { int err = 0; int retries; uint16 cmd_reg; uint32 cmd_arg; uint32 rsp5; uint8 rflags; uint16 int_reg = 0; uint16 plain_intstatus; /* Argument is write to F0 (CCCR) IOAbort with function number */ cmd_arg = 0; cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, SDIO_FUNC_0); cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, SDIOD_CCCR_IOABORT); cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SD_IO_OP_WRITE); cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); cmd_arg = SFIELD(cmd_arg, CMD52_DATA, func); /* Command is CMD52 write */ cmd_reg = 0; cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48_BUSY); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_ABORT); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, SDIOH_CMD_52); if (sd->sd_mode == SDIOH_MODE_SPI) { cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); } /* Wait for CMD_INHIBIT to go away as per spec section 3.6.1.1 */ retries = RETRIES_SMALL; while (GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CMD_INHIBIT)) { if (retries == RETRIES_SMALL) sd_err(("%s: Waiting for Command Inhibit, state 0x%08x\n", __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); if (!--retries) { sd_err(("%s: Command Inhibit timeout, state 0x%08x\n", __FUNCTION__, sdstd_rreg(sd, SD_PresentState))); if (trap_errs) ASSERT(0); err = BCME_SDIO_ERROR; goto done; } } /* Clear errors from any previous commands */ if ((plain_intstatus = sdstd_rreg16(sd, SD_ErrorIntrStatus)) != 0) { sd_err(("abort: clearing errstat 0x%04x\n", plain_intstatus)); sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); } plain_intstatus = sdstd_rreg16(sd, SD_IntrStatus); if (plain_intstatus & ~(SFIELD(0, INTSTAT_CARD_INT, 1))) { sd_err(("abort: intstatus 0x%04x\n", plain_intstatus)); if (GFIELD(plain_intstatus, INTSTAT_CMD_COMPLETE)) { sd_err(("SDSTD_ABORT: CMD COMPLETE SET BEFORE COMMAND GIVEN!!!\n")); } if (GFIELD(plain_intstatus, INTSTAT_CARD_REMOVAL)) { sd_err(("SDSTD_ABORT: INTSTAT_CARD_REMOVAL\n")); err = BCME_NODEVICE; goto done; } } /* Issue the command */ sdstd_wreg(sd, SD_Arg0, cmd_arg); sdstd_wreg16(sd, SD_Command, cmd_reg); /* In interrupt mode return, expect later CMD_COMPLETE interrupt */ if (!sd->polled_mode) return err; /* Otherwise, wait for the command to complete */ retries = RETRIES_LARGE; do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); /* If command completion fails, do a cmd reset and note the error */ if (!retries) { sd_err(("%s: CMD_COMPLETE timeout: intr 0x%04x err 0x%04x state 0x%08x\n", __FUNCTION__, int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sdstd_rreg(sd, SD_PresentState))); sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); retries = RETRIES_LARGE; do { sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), SW_RESET_CMD)) && retries--); if (!retries) { sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); } if (trap_errs) ASSERT(0); err = BCME_SDIO_ERROR; } /* Clear Command Complete interrupt */ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); sdstd_wreg16(sd, SD_IntrStatus, int_reg); /* Check for Errors */ if ((plain_intstatus = sdstd_rreg16 (sd, SD_ErrorIntrStatus)) != 0) { sd_err(("%s: ErrorintrStatus: 0x%x, " "(intrstatus = 0x%x, present state 0x%x) clearing\n", __FUNCTION__, plain_intstatus, sdstd_rreg16(sd, SD_IntrStatus), sdstd_rreg(sd, SD_PresentState))); sdstd_wreg16(sd, SD_ErrorIntrStatus, plain_intstatus); sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); retries = RETRIES_LARGE; do { sd_trace(("%s: waiting for DAT line reset\n", __FUNCTION__)); } while ((GFIELD(sdstd_rreg8(sd, SD_SoftwareReset), SW_RESET_DAT)) && retries--); if (!retries) { sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); } if (trap_errs) ASSERT(0); /* ABORT is dataless, only cmd errs count */ if (plain_intstatus & ERRINT_CMD_ERRS) err = BCME_SDIO_ERROR; } /* If command failed don't bother looking at response */ if (err) goto done; /* Otherwise, check the response */ sdstd_cmd_getrsp(sd, &rsp5, 1); rflags = GFIELD(rsp5, RSP5_FLAGS); if (rflags & SD_RSP_R5_ERRBITS) { sd_err(("%s: R5 flags include errbits: 0x%02x\n", __FUNCTION__, rflags)); /* The CRC error flag applies to the previous command */ if (rflags & (SD_RSP_R5_ERRBITS & ~SD_RSP_R5_COM_CRC_ERROR)) { err = BCME_SDIO_ERROR; goto done; } } if (((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x10) && ((rflags & (SD_RSP_R5_IO_CURRENTSTATE0 | SD_RSP_R5_IO_CURRENTSTATE1)) != 0x20)) { sd_err(("%s: R5 flags has bad state: 0x%02x\n", __FUNCTION__, rflags)); err = BCME_SDIO_ERROR; goto done; } if (GFIELD(rsp5, RSP5_STUFF)) { sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); err = BCME_SDIO_ERROR; goto done; } done: if (err == BCME_NODEVICE) return err; sdstd_wreg8(sd, SD_SoftwareReset, SFIELD(SFIELD(0, SW_RESET_DAT, 1), SW_RESET_CMD, 1)); retries = RETRIES_LARGE; do { rflags = sdstd_rreg8(sd, SD_SoftwareReset); if (!GFIELD(rflags, SW_RESET_DAT) && !GFIELD(rflags, SW_RESET_CMD)) break; } while (--retries); if (!retries) { sd_err(("%s: Timeout waiting for DAT/CMD reset: 0x%02x\n", __FUNCTION__, rflags)); err = BCME_SDIO_ERROR; } return err; } extern int sdioh_abort(sdioh_info_t *sd, uint fnum) { int ret; sdstd_lock(sd); ret = sdstd_abort(sd, fnum); sdstd_unlock(sd); return ret; } int sdioh_start(sdioh_info_t *sd, int stage) { return SUCCESS; } int sdioh_stop(sdioh_info_t *sd) { return SUCCESS; } static int sdstd_check_errs(sdioh_info_t *sdioh_info, uint32 cmd, uint32 arg) { uint16 regval; uint retries; uint function = 0; /* If no errors, we're done */ if ((regval = sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus)) == 0) return SUCCESS; sd_info(("%s: ErrorIntrStatus 0x%04x (clearing), IntrStatus 0x%04x PresentState 0x%08x\n", __FUNCTION__, regval, sdstd_rreg16(sdioh_info, SD_IntrStatus), sdstd_rreg(sdioh_info, SD_PresentState))); sdstd_wreg16(sdioh_info, SD_ErrorIntrStatus, regval); /* On command error, issue CMD reset */ if (regval & ERRINT_CMD_ERRS) { sd_trace(("%s: issuing CMD reset\n", __FUNCTION__)); sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); for (retries = RETRIES_LARGE; retries; retries--) if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD))) break; if (!retries) { sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); } } /* On data error, issue DAT reset */ if (regval & ERRINT_DATA_ERRS) { sd_trace(("%s: issuing DAT reset\n", __FUNCTION__)); sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_DAT, 1)); for (retries = RETRIES_LARGE; retries; retries--) if (!(GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_DAT))) break; if (!retries) { sd_err(("%s: Timeout waiting for DAT line reset\n", __FUNCTION__)); } } /* For an IO command (CMD52 or CMD53) issue an abort to the appropriate function */ if (cmd == SDIOH_CMD_53) function = GFIELD(arg, CMD53_FUNCTION); else if (cmd == SDIOH_CMD_52) function = GFIELD(arg, CMD52_FUNCTION); if (function) { sd_trace(("%s: requesting abort for function %d after cmd %d\n", __FUNCTION__, function, cmd)); sdstd_abort(sdioh_info, function); } if (trap_errs) ASSERT(0); return ERROR; } /* * Private/Static work routines */ static bool sdstd_reset(sdioh_info_t *sd, bool host_reset, bool client_reset) { int retries = RETRIES_LARGE; uchar regval; if (!sd) return TRUE; sdstd_lock(sd); /* Reset client card */ if (client_reset && (sd->adapter_slot != -1)) { if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOABORT, 1, 0x8) != SUCCESS) sd_err(("%s: Cannot write to card reg 0x%x\n", __FUNCTION__, SDIOD_CCCR_IOABORT)); else sd->card_rca = 0; } /* Reset host controller */ if (host_reset) { regval = SFIELD(0, SW_RESET_ALL, 1); sdstd_wreg8(sd, SD_SoftwareReset, regval); do { sd_trace(("%s: waiting for reset\n", __FUNCTION__)); } while ((sdstd_rreg8(sd, SD_SoftwareReset) & regval) && retries--); if (!retries) { sd_err(("%s: Timeout waiting for host reset\n", __FUNCTION__)); sdstd_unlock(sd); return (FALSE); } /* A reset should reset bus back to 1 bit mode */ sd->sd_mode = SDIOH_MODE_SD1; sdstd_set_dma_mode(sd, sd->sd_dma_mode); } sdstd_unlock(sd); return TRUE; } /* Disable device interrupt */ void sdstd_devintr_off(sdioh_info_t *sd) { sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); if (sd->use_client_ints) { sd->intmask &= ~CLIENT_INTR; sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ } } /* Enable device interrupt */ void sdstd_devintr_on(sdioh_info_t *sd) { ASSERT(sd->lockcount == 0); sd_trace(("%s: %d\n", __FUNCTION__, sd->use_client_ints)); if (sd->use_client_ints) { uint16 status = sdstd_rreg16(sd, SD_IntrStatusEnable); sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(status, INTSTAT_CARD_INT, 0)); sdstd_wreg16(sd, SD_IntrStatusEnable, status); sd->intmask |= CLIENT_INTR; sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ } } #ifdef BCMSDYIELD /* Enable/disable other interrupts */ void sdstd_intrs_on(sdioh_info_t *sd, uint16 norm, uint16 err) { if (err) { norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, err); } sd->intmask |= norm; sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); if (sd_forcerb) sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ } void sdstd_intrs_off(sdioh_info_t *sd, uint16 norm, uint16 err) { if (err) { norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); } sd->intmask &= ~norm; sdstd_wreg16(sd, SD_IntrSignalEnable, sd->intmask); if (sd_forcerb) sdstd_rreg16(sd, SD_IntrSignalEnable); /* Sync readback */ } #endif /* BCMSDYIELD */ static int sdstd_host_init(sdioh_info_t *sd) { int num_slots, full_slot; uint8 reg8; uint32 card_ins; int slot, first_bar = 0; bool detect_slots = FALSE; uint bar; /* Check for Arasan ID */ if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_SI_IMAGE) { sd_info(("%s: Found Arasan Standard SDIO Host Controller\n", __FUNCTION__)); sd->controller_type = SDIOH_TYPE_ARASAN_HDK; detect_slots = TRUE; } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_BROADCOM) { sd_info(("%s: Found Broadcom 27xx Standard SDIO Host Controller\n", __FUNCTION__)); sd->controller_type = SDIOH_TYPE_BCM27XX; detect_slots = FALSE; } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_TI) { sd_info(("%s: Found TI PCIxx21 Standard SDIO Host Controller\n", __FUNCTION__)); sd->controller_type = SDIOH_TYPE_TI_PCIXX21; detect_slots = TRUE; } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_RICOH) { sd_info(("%s: Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host Adapter\n", __FUNCTION__)); sd->controller_type = SDIOH_TYPE_RICOH_R5C822; detect_slots = TRUE; } else if ((OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) & 0xFFFF) == VENDOR_JMICRON) { sd_info(("%s: JMicron Standard SDIO Host Controller\n", __FUNCTION__)); sd->controller_type = SDIOH_TYPE_JMICRON; detect_slots = TRUE; } else { return ERROR; } /* * Determine num of slots * Search each slot */ first_bar = OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0x7; num_slots = (OSL_PCI_READ_CONFIG(sd->osh, SD_SlotInfo, 4) & 0xff) >> 4; num_slots &= 7; num_slots++; /* map bits to num slots according to spec */ if (OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_VID, 4) == ((SDIOH_FPGA_ID << 16) | VENDOR_BROADCOM)) { sd_err(("%s: Found Broadcom Standard SDIO Host Controller FPGA\n", __FUNCTION__)); /* Set BAR0 Window to SDIOSTH core */ OSL_PCI_WRITE_CONFIG(sd->osh, PCI_BAR0_WIN, 4, 0x18001000); /* Set defaults particular to this controller. */ detect_slots = TRUE; num_slots = 1; first_bar = 0; /* Controller supports ADMA2, so turn it on here. */ sd->sd_dma_mode = DMA_MODE_ADMA2; } /* Map in each slot on the board and query it to see if a * card is inserted. Use the first populated slot found. */ if (sd->mem_space) { sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); sd->mem_space = NULL; } full_slot = -1; for (slot = 0; slot < num_slots; slot++) { bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(slot + first_bar)), 4); sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ); sd->adapter_slot = -1; if (detect_slots) { card_ins = GFIELD(sdstd_rreg(sd, SD_PresentState), PRES_CARD_PRESENT); } else { card_ins = TRUE; } if (card_ins) { sd_info(("%s: SDIO slot %d: Full\n", __FUNCTION__, slot)); if (full_slot < 0) full_slot = slot; } else { sd_info(("%s: SDIO slot %d: Empty\n", __FUNCTION__, slot)); } if (sd->mem_space) { sdstd_reg_unmap(sd->osh, (uintptr)sd->mem_space, SDIOH_REG_WINSZ); sd->mem_space = NULL; } } if (full_slot < 0) { sd_err(("No slots on SDIO controller are populated\n")); return -1; } bar = OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4); sd->mem_space = (volatile char *)sdstd_reg_map(sd->osh, (uintptr)bar, SDIOH_REG_WINSZ); sd_err(("Using slot %d at BAR%d [0x%08x] mem_space 0x%p\n", full_slot, (full_slot + first_bar), OSL_PCI_READ_CONFIG(sd->osh, PCI_CFG_BAR0 + (4*(full_slot + first_bar)), 4), sd->mem_space)); sd->adapter_slot = full_slot; sd->version = sdstd_rreg16(sd, SD_HostControllerVersion) & 0xFF; switch (sd->version) { case 0: sd_err(("Host Controller version 1.0, Vendor Revision: 0x%02x\n", sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); break; case 1: case 2: sd_err(("Host Controller version 2.0, Vendor Revision: 0x%02x\n", sdstd_rreg16(sd, SD_HostControllerVersion) >> 8)); break; default: sd_err(("%s: Host Controller version 0x%02x not supported.\n", __FUNCTION__, sd->version)); break; } sd->caps = sdstd_rreg(sd, SD_Capabilities); /* Cache this for later use */ sd->curr_caps = sdstd_rreg(sd, SD_MaxCurCap); sdstd_set_dma_mode(sd, sd->sd_dma_mode); sdstd_reset(sd, 1, 0); /* Read SD4/SD1 mode */ if ((reg8 = sdstd_rreg8(sd, SD_HostCntrl))) { if (reg8 & SD4_MODE) { sd_err(("%s: Host cntrlr already in 4 bit mode: 0x%x\n", __FUNCTION__, reg8)); } } /* Default power on mode is SD1 */ sd->sd_mode = SDIOH_MODE_SD1; sd->polled_mode = TRUE; sd->host_init_done = TRUE; sd->card_init_done = FALSE; sd->adapter_slot = full_slot; return (SUCCESS); } #define CMD5_RETRIES 200 static int get_ocr(sdioh_info_t *sd, uint32 *cmd_arg, uint32 *cmd_rsp) { int retries, status; /* Get the Card's Operation Condition. Occasionally the board * takes a while to become ready */ retries = CMD5_RETRIES; do { *cmd_rsp = 0; if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_5, *cmd_arg)) != SUCCESS) { sd_err(("%s: CMD5 failed\n", __FUNCTION__)); return status; } sdstd_cmd_getrsp(sd, cmd_rsp, 1); if (!GFIELD(*cmd_rsp, RSP4_CARD_READY)) sd_trace(("%s: Waiting for card to become ready\n", __FUNCTION__)); } while ((!GFIELD(*cmd_rsp, RSP4_CARD_READY)) && --retries); if (!retries) return ERROR; return (SUCCESS); } static int sdstd_client_init(sdioh_info_t *sd) { uint32 cmd_arg, cmd_rsp; int status; uint8 fn_ints; sd_trace(("%s: Powering up slot %d\n", __FUNCTION__, sd->adapter_slot)); /* Clear any pending ints */ sdstd_wreg16(sd, SD_IntrStatus, 0x1ff); sdstd_wreg16(sd, SD_ErrorIntrStatus, 0x0fff); /* Enable both Normal and Error Status. This does not enable * interrupts, it only enables the status bits to * become 'live' */ sdstd_wreg16(sd, SD_IntrStatusEnable, 0x1ff); sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, 0xffff); sdstd_wreg16(sd, SD_IntrSignalEnable, 0); /* Disable ints for now. */ /* Start at ~400KHz clock rate for initialization */ if (!sdstd_start_clock(sd, 128)) { sd_err(("sdstd_start_clock failed\n")); return ERROR; } if (!sdstd_start_power(sd)) { sd_err(("sdstd_start_power failed\n")); return ERROR; } if (sd->num_funcs == 0) { sd_err(("%s: No IO funcs!\n", __FUNCTION__)); return ERROR; } /* In SPI mode, issue CMD0 first */ if (sd->sd_mode == SDIOH_MODE_SPI) { cmd_arg = 0; if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_0, cmd_arg)) != SUCCESS) { sd_err(("BCMSDIOH: cardinit: CMD0 failed!\n")); return status; } } if (sd->sd_mode != SDIOH_MODE_SPI) { uint16 rsp6_status; /* Card is operational. Ask it to send an RCA */ cmd_arg = 0; if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_3, cmd_arg)) != SUCCESS) { sd_err(("%s: CMD3 failed!\n", __FUNCTION__)); return status; } /* Verify the card status returned with the cmd response */ sdstd_cmd_getrsp(sd, &cmd_rsp, 1); rsp6_status = GFIELD(cmd_rsp, RSP6_STATUS); if (GFIELD(rsp6_status, RSP6STAT_COM_CRC_ERROR) || GFIELD(rsp6_status, RSP6STAT_ILLEGAL_CMD) || GFIELD(rsp6_status, RSP6STAT_ERROR)) { sd_err(("%s: CMD3 response error. Response = 0x%x!\n", __FUNCTION__, rsp6_status)); return ERROR; } /* Save the Card's RCA */ sd->card_rca = GFIELD(cmd_rsp, RSP6_IO_RCA); sd_info(("RCA is 0x%x\n", sd->card_rca)); if (rsp6_status) sd_err(("raw status is 0x%x\n", rsp6_status)); /* Select the card */ cmd_arg = SFIELD(0, CMD7_RCA, sd->card_rca); if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_7, cmd_arg)) != SUCCESS) { sd_err(("%s: CMD7 failed!\n", __FUNCTION__)); return status; } sdstd_cmd_getrsp(sd, &cmd_rsp, 1); if (cmd_rsp != SDIOH_CMD7_EXP_STATUS) { sd_err(("%s: CMD7 response error. Response = 0x%x!\n", __FUNCTION__, cmd_rsp)); return ERROR; } } sdstd_card_enablefuncs(sd); if (!sdstd_bus_width(sd, sd_sdmode)) { sd_err(("sdstd_bus_width failed\n")); return ERROR; } set_client_block_size(sd, 1, BLOCK_SIZE_4318); fn_ints = INTR_CTL_FUNC1_EN; if (sd->num_funcs >= 2) { set_client_block_size(sd, 2, sd_f2_blocksize /* BLOCK_SIZE_4328 */); fn_ints |= INTR_CTL_FUNC2_EN; } /* Enable/Disable Client interrupts */ /* Turn on here but disable at host controller? */ if (sdstd_card_regwrite(sd, 0, SDIOD_CCCR_INTEN, 1, (fn_ints | INTR_CTL_MASTER_EN)) != SUCCESS) { sd_err(("%s: Could not enable ints in CCCR\n", __FUNCTION__)); return ERROR; } /* Switch to High-speed clocking mode if both host and device support it */ sdstd_set_highspeed_mode(sd, (bool)sd_hiok); /* After configuring for High-Speed mode, set the desired clock rate. */ if (!sdstd_start_clock(sd, (uint16)sd_divisor)) { sd_err(("sdstd_start_clock failed\n")); return ERROR; } sd->card_init_done = TRUE; return SUCCESS; } static int sdstd_set_highspeed_mode(sdioh_info_t *sd, bool HSMode) { uint32 regdata; int status; uint8 reg8; reg8 = sdstd_rreg8(sd, SD_HostCntrl); if (HSMode == TRUE) { if (sd_hiok && (GFIELD(sd->caps, CAP_HIGHSPEED)) == 0) { sd_err(("Host Controller does not support hi-speed mode.\n")); return BCME_ERROR; } sd_info(("Attempting to enable High-Speed mode.\n")); if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, 1, &regdata)) != SUCCESS) { return BCME_SDIO_ERROR; } if (regdata & SDIO_SPEED_SHS) { sd_info(("Device supports High-Speed mode.\n")); regdata |= SDIO_SPEED_EHS; sd_info(("Writing %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL)); if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, 1, regdata)) != BCME_OK) { return BCME_SDIO_ERROR; } if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, 1, &regdata)) != BCME_OK) { return BCME_SDIO_ERROR; } sd_info(("Read %08x to Card at %08x\n", regdata, SDIOD_CCCR_SPEED_CONTROL)); reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 1); sd_err(("High-speed clocking mode enabled.\n")); } else { sd_err(("Device does not support High-Speed Mode.\n")); reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); } } else { /* Force off device bit */ if ((status = sdstd_card_regread(sd, 0, SDIOD_CCCR_SPEED_CONTROL, 1, &regdata)) != BCME_OK) { return status; } if (regdata & SDIO_SPEED_EHS) { regdata &= ~SDIO_SPEED_EHS; if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_SPEED_CONTROL, 1, regdata)) != BCME_OK) { return status; } } sd_err(("High-speed clocking mode disabled.\n")); reg8 = SFIELD(reg8, HOST_HI_SPEED_EN, 0); } sdstd_wreg8(sd, SD_HostCntrl, reg8); return BCME_OK; } /* Select DMA Mode: * If dma_mode == DMA_MODE_AUTO, pick the "best" mode. * Otherwise, pick the selected mode if supported. * If not supported, use PIO mode. */ static int sdstd_set_dma_mode(sdioh_info_t *sd, int8 dma_mode) { uint8 reg8, dma_sel_bits = SDIOH_SDMA_MODE; int8 prev_dma_mode = sd->sd_dma_mode; switch (prev_dma_mode) { case DMA_MODE_AUTO: sd_dma(("%s: Selecting best DMA mode supported by controller.\n", __FUNCTION__)); if (GFIELD(sd->caps, CAP_ADMA2)) { sd->sd_dma_mode = DMA_MODE_ADMA2; dma_sel_bits = SDIOH_ADMA2_MODE; } else if (GFIELD(sd->caps, CAP_ADMA1)) { sd->sd_dma_mode = DMA_MODE_ADMA1; dma_sel_bits = SDIOH_ADMA1_MODE; } else if (GFIELD(sd->caps, CAP_DMA)) { sd->sd_dma_mode = DMA_MODE_SDMA; } else { sd->sd_dma_mode = DMA_MODE_NONE; } break; case DMA_MODE_NONE: sd->sd_dma_mode = DMA_MODE_NONE; break; case DMA_MODE_SDMA: if (GFIELD(sd->caps, CAP_DMA)) { sd->sd_dma_mode = DMA_MODE_SDMA; } else { sd_err(("%s: SDMA not supported by controller.\n", __FUNCTION__)); sd->sd_dma_mode = DMA_MODE_NONE; } break; case DMA_MODE_ADMA1: if (GFIELD(sd->caps, CAP_ADMA1)) { sd->sd_dma_mode = DMA_MODE_ADMA1; dma_sel_bits = SDIOH_ADMA1_MODE; } else { sd_err(("%s: ADMA1 not supported by controller.\n", __FUNCTION__)); sd->sd_dma_mode = DMA_MODE_NONE; } break; case DMA_MODE_ADMA2: if (GFIELD(sd->caps, CAP_ADMA2)) { sd->sd_dma_mode = DMA_MODE_ADMA2; dma_sel_bits = SDIOH_ADMA2_MODE; } else { sd_err(("%s: ADMA2 not supported by controller.\n", __FUNCTION__)); sd->sd_dma_mode = DMA_MODE_NONE; } break; case DMA_MODE_ADMA2_64: sd_err(("%s: 64b ADMA2 not supported by driver.\n", __FUNCTION__)); sd->sd_dma_mode = DMA_MODE_NONE; break; default: sd_err(("%s: Unsupported DMA Mode %d requested.\n", __FUNCTION__, prev_dma_mode)); sd->sd_dma_mode = DMA_MODE_NONE; break; } /* clear SysAddr, only used for SDMA */ sdstd_wreg(sd, SD_SysAddr, 0); sd_err(("%s: %s mode selected.\n", __FUNCTION__, dma_mode_description[sd->sd_dma_mode])); reg8 = sdstd_rreg8(sd, SD_HostCntrl); reg8 = SFIELD(reg8, HOST_DMA_SEL, dma_sel_bits); sdstd_wreg8(sd, SD_HostCntrl, reg8); sd_dma(("%s: SD_HostCntrl=0x%02x\n", __FUNCTION__, reg8)); return BCME_OK; } bool sdstd_start_clock(sdioh_info_t *sd, uint16 new_sd_divisor) { uint rc, count; uint16 divisor; /* turn off HC clock */ sdstd_wreg16(sd, SD_ClockCntrl, sdstd_rreg16(sd, SD_ClockCntrl) & ~((uint16)0x4)); /* Disable the HC clock */ /* Set divisor */ divisor = (new_sd_divisor >> 1) << 8; sd_info(("Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); sdstd_mod_reg16(sd, SD_ClockCntrl, 0xff00, divisor); sd_info(("%s: Using clock divisor of %d (regval 0x%04x)\n", __FUNCTION__, new_sd_divisor, divisor)); sd_info(("Primary Clock Freq = %d MHz\n", GFIELD(sd->caps, CAP_TO_CLKFREQ))); if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 50) { sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, ((50 % new_sd_divisor) ? (50000 / new_sd_divisor) : (50 / new_sd_divisor)), ((50 % new_sd_divisor) ? "KHz" : "MHz"))); } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 48) { sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, ((48 % new_sd_divisor) ? (48000 / new_sd_divisor) : (48 / new_sd_divisor)), ((48 % new_sd_divisor) ? "KHz" : "MHz"))); } else if (GFIELD(sd->caps, CAP_TO_CLKFREQ) == 33) { sd_info(("%s: Resulting SDIO clock is %d %s\n", __FUNCTION__, ((33 % new_sd_divisor) ? (33000 / new_sd_divisor) : (33 / new_sd_divisor)), ((33 % new_sd_divisor) ? "KHz" : "MHz"))); } else if (sd->controller_type == SDIOH_TYPE_BCM27XX) { } else { sd_err(("Need to determine divisor for %d MHz clocks\n", GFIELD(sd->caps, CAP_TO_CLKFREQ))); sd_err(("Consult SD Host Controller Spec: Clock Control Register\n")); return (FALSE); } sdstd_or_reg16(sd, SD_ClockCntrl, 0x1); /* Enable the clock */ /* Wait for clock to stabilize */ rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); count = 0; while (!rc) { OSL_DELAY(1); sd_info(("Waiting for clock to become stable 0x%x\n", rc)); rc = (sdstd_rreg16(sd, SD_ClockCntrl) & 2); count++; if (count > 10000) { sd_err(("%s:Clocks failed to stabilize after %u attempts", __FUNCTION__, count)); return (FALSE); } } /* Turn on clock */ sdstd_or_reg16(sd, SD_ClockCntrl, 0x4); /* Set timeout control (adjust default value based on divisor). * Disabling timeout interrupts during setting is advised by host spec. */ { uint16 regdata; uint toval; toval = sd_toctl; divisor = new_sd_divisor; while (toval && !(divisor & 1)) { toval -= 1; divisor >>= 1; } regdata = sdstd_rreg16(sd, SD_ErrorIntrStatusEnable); sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, (regdata & ~ERRINT_DATA_TIMEOUT_BIT)); sdstd_wreg8(sd, SD_TimeoutCntrl, (uint8)toval); sdstd_wreg16(sd, SD_ErrorIntrStatusEnable, regdata); } OSL_DELAY(2); sd_info(("Final Clock control is 0x%x\n", sdstd_rreg16(sd, SD_ClockCntrl))); return TRUE; } bool sdstd_start_power(sdioh_info_t *sd) { char *s; uint32 cmd_arg; uint32 cmd_rsp; uint8 pwr = 0; int volts; volts = 0; s = NULL; if (GFIELD(sd->caps, CAP_VOLT_1_8)) { volts = 5; s = "1.8"; } if (GFIELD(sd->caps, CAP_VOLT_3_0)) { volts = 6; s = "3.0"; } if (GFIELD(sd->caps, CAP_VOLT_3_3)) { volts = 7; s = "3.3"; } pwr = SFIELD(pwr, PWR_VOLTS, volts); pwr = SFIELD(pwr, PWR_BUS_EN, 1); sdstd_wreg8(sd, SD_PwrCntrl, pwr); /* Set Voltage level */ sd_info(("Setting Bus Power to %s Volts\n", s)); /* Wait for power to stabilize, Dongle takes longer than NIC. */ OSL_DELAY(250000); /* Get the Card's Operation Condition. Occasionally the board * takes a while to become ready */ cmd_arg = 0; cmd_rsp = 0; if (get_ocr(sd, &cmd_arg, &cmd_rsp) != SUCCESS) { sd_err(("%s: Failed to get OCR bailing\n", __FUNCTION__)); sdstd_reset(sd, 0, 1); return FALSE; } sd_info(("mem_present = %d\n", GFIELD(cmd_rsp, RSP4_MEM_PRESENT))); sd_info(("num_funcs = %d\n", GFIELD(cmd_rsp, RSP4_NUM_FUNCS))); sd_info(("card_ready = %d\n", GFIELD(cmd_rsp, RSP4_CARD_READY))); sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); /* Verify that the card supports I/O mode */ if (GFIELD(cmd_rsp, RSP4_NUM_FUNCS) == 0) { sd_err(("%s: Card does not support I/O\n", __FUNCTION__)); return ERROR; } sd->num_funcs = GFIELD(cmd_rsp, RSP4_NUM_FUNCS); /* Examine voltage: Arasan only supports 3.3 volts, * so look for 3.2-3.3 Volts and also 3.3-3.4 volts. */ if ((GFIELD(cmd_rsp, RSP4_IO_OCR) & (0x3 << 20)) == 0) { sd_err(("This client does not support 3.3 volts!\n")); return ERROR; } sd_info(("Leaving bus power at 3.3 Volts\n")); cmd_arg = SFIELD(0, CMD5_OCR, 0xfff000); cmd_rsp = 0; get_ocr(sd, &cmd_arg, &cmd_rsp); sd_info(("OCR = 0x%x\n", GFIELD(cmd_rsp, RSP4_IO_OCR))); return TRUE; } bool sdstd_bus_width(sdioh_info_t *sd, int new_mode) { uint32 regdata; int status; uint8 reg8; sd_trace(("%s\n", __FUNCTION__)); if (sd->sd_mode == new_mode) { sd_info(("%s: Already at width %d\n", __FUNCTION__, new_mode)); /* Could exit, but continue just in case... */ } /* Set client side via reg 0x7 in CCCR */ if ((status = sdstd_card_regread (sd, 0, SDIOD_CCCR_BICTRL, 1, &regdata)) != SUCCESS) return (bool)status; regdata &= ~BUS_SD_DATA_WIDTH_MASK; if (new_mode == SDIOH_MODE_SD4) { sd_info(("Changing to SD4 Mode\n")); regdata |= SD4_MODE; } else if (new_mode == SDIOH_MODE_SD1) { sd_info(("Changing to SD1 Mode\n")); } else { sd_err(("SPI Mode not supported by Standard Host Controller\n")); } if ((status = sdstd_card_regwrite (sd, 0, SDIOD_CCCR_BICTRL, 1, regdata)) != SUCCESS) return (bool)status; /* Set host side via Host reg */ reg8 = sdstd_rreg8(sd, SD_HostCntrl) & ~SD4_MODE; if (new_mode == SDIOH_MODE_SD4) reg8 |= SD4_MODE; sdstd_wreg8(sd, SD_HostCntrl, reg8); sd->sd_mode = new_mode; return TRUE; } static int sdstd_driver_init(sdioh_info_t *sd) { sd_trace(("%s\n", __FUNCTION__)); if ((sdstd_host_init(sd)) != SUCCESS) { return ERROR; } if (sdstd_client_init(sd) != SUCCESS) { return ERROR; } return SUCCESS; } static int sdstd_get_cisaddr(sdioh_info_t *sd, uint32 regaddr) { /* read 24 bits and return valid 17 bit addr */ int i; uint32 scratch, regdata; uint8 *ptr = (uint8 *)&scratch; for (i = 0; i < 3; i++) { if ((sdstd_card_regread (sd, 0, regaddr, 1, &regdata)) != SUCCESS) sd_err(("%s: Can't read!\n", __FUNCTION__)); *ptr++ = (uint8) regdata; regaddr++; } /* Only the lower 17-bits are valid */ scratch = ltoh32(scratch); scratch &= 0x0001FFFF; return (scratch); } static int sdstd_card_enablefuncs(sdioh_info_t *sd) { int status; uint32 regdata; uint32 fbraddr; uint8 func; sd_trace(("%s\n", __FUNCTION__)); /* Get the Card's common CIS address */ sd->com_cis_ptr = sdstd_get_cisaddr(sd, SDIOD_CCCR_CISPTR_0); sd->func_cis_ptr[0] = sd->com_cis_ptr; sd_info(("%s: Card's Common CIS Ptr = 0x%x\n", __FUNCTION__, sd->com_cis_ptr)); /* Get the Card's function CIS (for each function) */ for (fbraddr = SDIOD_FBR_STARTADDR, func = 1; func <= sd->num_funcs; func++, fbraddr += SDIOD_FBR_SIZE) { sd->func_cis_ptr[func] = sdstd_get_cisaddr(sd, SDIOD_FBR_CISPTR_0 + fbraddr); sd_info(("%s: Function %d CIS Ptr = 0x%x\n", __FUNCTION__, func, sd->func_cis_ptr[func])); } /* Enable function 1 on the card */ regdata = SDIO_FUNC_ENABLE_1; if ((status = sdstd_card_regwrite(sd, 0, SDIOD_CCCR_IOEN, 1, regdata)) != SUCCESS) return status; return SUCCESS; } /* Read client card reg */ static int sdstd_card_regread(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 *data) { int status; uint32 cmd_arg; uint32 rsp5; cmd_arg = 0; if ((func == 0) || (regsize == 1)) { cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_READ); cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); cmd_arg = SFIELD(cmd_arg, CMD52_DATA, 0); if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) return status; sdstd_cmd_getrsp(sd, &rsp5, 1); if (sdstd_rreg16(sd, SD_ErrorIntrStatus) != 0) { sd_err(("%s: 1: ErrorintrStatus 0x%x\n", __FUNCTION__, sdstd_rreg16(sd, SD_ErrorIntrStatus))); } if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) sd_err(("%s: rsp5 flags is 0x%x\t %d\n", __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); if (GFIELD(rsp5, RSP5_STUFF)) sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); *data = GFIELD(rsp5, RSP5_DATA); } else { cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); sd->data_xfer_count = regsize; /* sdstd_cmd_issue() returns with the command complete bit * in the ISR already cleared */ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) != SUCCESS) return status; sdstd_cmd_getrsp(sd, &rsp5, 1); if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) sd_err(("%s: rsp5 flags is 0x%x\t %d\n", __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS), func)); if (GFIELD(rsp5, RSP5_STUFF)) sd_err(("%s: rsp5 stuff is 0x%x: should be 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); if (sd->polled_mode) { volatile uint16 int_reg; int retries = RETRIES_LARGE; /* Wait for Read Buffer to become ready */ do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_READ_READY) == 0)); if (!retries) { sd_err(("%s: Timeout on Buf_Read_Ready: " "intStat: 0x%x errint: 0x%x PresentState 0x%x\n", __FUNCTION__, int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sdstd_rreg(sd, SD_PresentState))); sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); return (ERROR); } /* Have Buffer Ready, so clear it and read the data */ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_BUF_READ_READY, 1)); if (regsize == 2) *data = sdstd_rreg16(sd, SD_BufferDataPort0); else *data = sdstd_rreg(sd, SD_BufferDataPort0); /* Check Status. * After the data is read, the Transfer Complete bit should be on */ retries = RETRIES_LARGE; do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); /* Check for any errors from the data phase */ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) return ERROR; if (!retries) { sd_err(("%s: Timeout on xfer complete: " "intr 0x%04x err 0x%04x state 0x%08x\n", __FUNCTION__, int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sdstd_rreg(sd, SD_PresentState))); return (ERROR); } sdstd_wreg16(sd, SD_IntrStatus, SFIELD(0, INTSTAT_XFER_COMPLETE, 1)); } } if (sd->polled_mode) { if (regsize == 2) *data &= 0xffff; } return SUCCESS; } bool check_client_intr(sdioh_info_t *sd) { uint16 raw_int, cur_int, old_int; raw_int = sdstd_rreg16(sd, SD_IntrStatus); cur_int = raw_int & sd->intmask; if (!cur_int) { /* Not an error -- might share interrupts... */ return FALSE; } if (GFIELD(cur_int, INTSTAT_CARD_INT)) { old_int = sdstd_rreg16(sd, SD_IntrStatusEnable); sdstd_wreg16(sd, SD_IntrStatusEnable, SFIELD(old_int, INTSTAT_CARD_INT, 0)); if (sd->client_intr_enabled && sd->use_client_ints) { sd->intrcount++; ASSERT(sd->intr_handler); ASSERT(sd->intr_handler_arg); (sd->intr_handler)(sd->intr_handler_arg); } else { sd_err(("%s: Not ready for intr: enabled %d, handler %p\n", __FUNCTION__, sd->client_intr_enabled, sd->intr_handler)); } sdstd_wreg16(sd, SD_IntrStatusEnable, old_int); } else { /* Local interrupt: disable, set flag, and save intrstatus */ sdstd_wreg16(sd, SD_IntrSignalEnable, 0); sdstd_wreg16(sd, SD_ErrorIntrSignalEnable, 0); sd->local_intrcount++; sd->got_hcint = TRUE; sd->last_intrstatus = cur_int; } return TRUE; } void sdstd_spinbits(sdioh_info_t *sd, uint16 norm, uint16 err) { uint16 int_reg, err_reg; int retries = RETRIES_LARGE; do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); err_reg = sdstd_rreg16(sd, SD_ErrorIntrStatus); } while (--retries && !(int_reg & norm) && !(err_reg & err)); norm |= sd->intmask; if (err_reg & err) norm = SFIELD(norm, INTSTAT_ERROR_INT, 1); sd->last_intrstatus = int_reg & norm; } /* write a client register */ static int sdstd_card_regwrite(sdioh_info_t *sd, int func, uint32 regaddr, int regsize, uint32 data) { int status; uint32 cmd_arg, rsp5, flags; cmd_arg = 0; if ((func == 0) || (regsize == 1)) { cmd_arg = SFIELD(cmd_arg, CMD52_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD52_REG_ADDR, regaddr); cmd_arg = SFIELD(cmd_arg, CMD52_RW_FLAG, SDIOH_XFER_TYPE_WRITE); cmd_arg = SFIELD(cmd_arg, CMD52_RAW, 0); cmd_arg = SFIELD(cmd_arg, CMD52_DATA, data & 0xff); if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_52, cmd_arg)) != SUCCESS) return status; sdstd_cmd_getrsp(sd, &rsp5, 1); flags = GFIELD(rsp5, RSP5_FLAGS); if (flags && (flags != 0x10)) sd_err(("%s: rsp5.rsp5.flags = 0x%x, expecting 0x10\n", __FUNCTION__, flags)); } else { cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, regsize); cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, regaddr); cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); sd->data_xfer_count = regsize; /* sdstd_cmd_issue() returns with the command complete bit * in the ISR already cleared */ if ((status = sdstd_cmd_issue(sd, USE_DMA(sd), SDIOH_CMD_53, cmd_arg)) != SUCCESS) return status; sdstd_cmd_getrsp(sd, &rsp5, 1); if (GFIELD(rsp5, RSP5_FLAGS) != 0x10) sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10\n", __FUNCTION__, GFIELD(rsp5, RSP5_FLAGS))); if (GFIELD(rsp5, RSP5_STUFF)) sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); if (sd->polled_mode) { uint16 int_reg; int retries = RETRIES_LARGE; /* Wait for Write Buffer to become ready */ do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_BUF_WRITE_READY) == 0)); if (!retries) { sd_err(("%s: Timeout on Buf_Write_Ready: intStat: 0x%x " "errint: 0x%x PresentState 0x%x\n", __FUNCTION__, int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sdstd_rreg(sd, SD_PresentState))); sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); return (ERROR); } /* Clear Write Buf Ready bit */ int_reg = 0; int_reg = SFIELD(int_reg, INTSTAT_BUF_WRITE_READY, 1); sdstd_wreg16(sd, SD_IntrStatus, int_reg); /* At this point we have Buffer Ready, so write the data */ if (regsize == 2) sdstd_wreg16(sd, SD_BufferDataPort0, (uint16) data); else sdstd_wreg(sd, SD_BufferDataPort0, data); /* Wait for Transfer Complete */ retries = RETRIES_LARGE; do { int_reg = sdstd_rreg16(sd, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_XFER_COMPLETE) == 0)); /* Check for any errors from the data phase */ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) return ERROR; if (retries == 0) { sd_err(("%s: Timeout for xfer complete; State = 0x%x, " "intr state=0x%x, Errintstatus 0x%x rcnt %d, tcnt %d\n", __FUNCTION__, sdstd_rreg(sd, SD_PresentState), int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sd->r_cnt, sd->t_cnt)); } /* Clear the status bits */ sdstd_wreg16(sd, SD_IntrStatus, SFIELD(int_reg, INTSTAT_CARD_INT, 0)); } } return SUCCESS; } void sdstd_cmd_getrsp(sdioh_info_t *sd, uint32 *rsp_buffer, int count /* num 32 bit words */) { int rsp_count; int respaddr = SD_Response0; if (count > 4) count = 4; for (rsp_count = 0; rsp_count < count; rsp_count++) { *rsp_buffer++ = sdstd_rreg(sd, respaddr); respaddr += 4; } } static int sdstd_cmd_issue(sdioh_info_t *sdioh_info, bool use_dma, uint32 cmd, uint32 arg) { uint16 cmd_reg; int retries; uint32 cmd_arg; uint16 xfer_reg = 0; if ((sdioh_info->sd_mode == SDIOH_MODE_SPI) && ((cmd == SDIOH_CMD_3) || (cmd == SDIOH_CMD_7) || (cmd == SDIOH_CMD_15))) { sd_err(("%s: Cmd %d is not for SPI\n", __FUNCTION__, cmd)); return ERROR; } retries = RETRIES_SMALL; while ((GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_CMD_INHIBIT)) && --retries) { if (retries == RETRIES_SMALL) sd_err(("%s: Waiting for Command Inhibit cmd = %d 0x%x\n", __FUNCTION__, cmd, sdstd_rreg(sdioh_info, SD_PresentState))); } if (!retries) { sd_err(("%s: Command Inhibit timeout\n", __FUNCTION__)); if (trap_errs) ASSERT(0); return ERROR; } cmd_reg = 0; switch (cmd) { case SDIOH_CMD_0: /* Set Card to Idle State - No Response */ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_3: /* Ask card to send RCA - Response R6 */ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_5: /* Send Operation condition - Response R4 */ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_7: /* Select card - Response R1 */ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_15: /* Set card to inactive state - Response None */ sd_data(("%s: CMD%d\n", __FUNCTION__, cmd)); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_NONE); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_52: /* IO R/W Direct (single byte) - Response R5 */ sd_data(("%s: CMD52 func(%d) addr(0x%x) %s data(0x%x)\n", __FUNCTION__, GFIELD(arg, CMD52_FUNCTION), GFIELD(arg, CMD52_REG_ADDR), GFIELD(arg, CMD52_RW_FLAG) ? "W" : "R", GFIELD(arg, CMD52_DATA))); cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); break; case SDIOH_CMD_53: /* IO R/W Extended (multiple bytes/blocks) */ sd_data(("%s: CMD53 func(%d) addr(0x%x) %s mode(%s) cnt(%d), %s\n", __FUNCTION__, GFIELD(arg, CMD53_FUNCTION), GFIELD(arg, CMD53_REG_ADDR), GFIELD(arg, CMD53_RW_FLAG) ? "W" : "R", GFIELD(arg, CMD53_BLK_MODE) ? "Block" : "Byte", GFIELD(arg, CMD53_BYTE_BLK_CNT), GFIELD(arg, CMD53_OP_CODE) ? "Incrementing addr" : "Single addr")); cmd_arg = arg; xfer_reg = 0; cmd_reg = SFIELD(cmd_reg, CMD_RESP_TYPE, RESP_TYPE_48); cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_DATA_EN, 1); cmd_reg = SFIELD(cmd_reg, CMD_TYPE, CMD_TYPE_NORMAL); cmd_reg = SFIELD(cmd_reg, CMD_INDEX, cmd); use_dma = USE_DMA(sdioh_info) && GFIELD(cmd_arg, CMD53_BLK_MODE); if (GFIELD(cmd_arg, CMD53_BLK_MODE)) { uint16 blocksize; uint16 blockcount; int func; ASSERT(sdioh_info->sd_blockmode); func = GFIELD(cmd_arg, CMD53_FUNCTION); blocksize = MIN((int)sdioh_info->data_xfer_count, sdioh_info->client_block_size[func]); blockcount = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); /* data_xfer_cnt is already setup so that for multiblock mode, * it is the entire buffer length. For non-block or single block, * it is < 64 bytes */ if (use_dma) { switch (sdioh_info->sd_dma_mode) { case DMA_MODE_SDMA: sd_dma(("%s: SDMA: SysAddr reg was 0x%x now 0x%x\n", __FUNCTION__, sdstd_rreg(sdioh_info, SD_SysAddr), (uint32)sdioh_info->dma_phys)); sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); break; case DMA_MODE_ADMA1: case DMA_MODE_ADMA2: sd_dma(("%s: ADMA: Using ADMA\n", __FUNCTION__)); sd_create_adma_descriptor(sdioh_info, 0, sdioh_info->dma_phys, blockcount*blocksize, ADMA2_ATTRIBUTE_VALID | ADMA2_ATTRIBUTE_END | ADMA2_ATTRIBUTE_INT | ADMA2_ATTRIBUTE_ACT_TRAN); /* Dump descriptor if DMA debugging is enabled. */ if (sd_msglevel & SDH_DMA_VAL) { sd_dump_adma_dscr(sdioh_info); } sdstd_wreg(sdioh_info, SD_ADMA_SysAddr, sdioh_info->adma2_dscr_phys); break; default: sd_err(("%s: unsupported DMA mode %d.\n", __FUNCTION__, sdioh_info->sd_dma_mode)); break; } } sd_trace(("%s: Setting block count %d, block size %d bytes\n", __FUNCTION__, blockcount, blocksize)); sdstd_wreg16(sdioh_info, SD_BlockSize, blocksize); sdstd_wreg16(sdioh_info, SD_BlockCount, blockcount); xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, use_dma); if (sdioh_info->client_block_size[func] != blocksize) set_client_block_size(sdioh_info, 1, blocksize); if (blockcount > 1) { xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 1); xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 1); xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); } else { xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); } if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); else xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); retries = RETRIES_SMALL; while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_DAT_INHIBIT) && --retries) sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", __FUNCTION__, cmd)); if (!retries) { sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); if (trap_errs) ASSERT(0); return ERROR; } sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); } else { /* Non block mode */ uint16 bytes = GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT); /* The byte/block count field only has 9 bits, * so, to do a 512-byte bytemode transfer, this * field will contain 0, but we need to tell the * controller we're transferring 512 bytes. */ if (bytes == 0) bytes = 512; if (use_dma) sdstd_wreg(sdioh_info, SD_SysAddr, sdioh_info->dma_phys); /* PCI: Transfer Mode register 0x0c */ xfer_reg = SFIELD(xfer_reg, XFER_DMA_ENABLE, bytes <= 4 ? 0 : use_dma); xfer_reg = SFIELD(xfer_reg, XFER_CMD_12_EN, 0); if (GFIELD(cmd_arg, CMD53_RW_FLAG) == SDIOH_XFER_TYPE_READ) xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 1); else xfer_reg = SFIELD(xfer_reg, XFER_DATA_DIRECTION, 0); /* See table 2-8 Host Controller spec ver 1.00 */ xfer_reg = SFIELD(xfer_reg, XFER_BLK_COUNT_EN, 0); /* Dont care */ xfer_reg = SFIELD(xfer_reg, XFER_MULTI_BLOCK, 0); sdstd_wreg16(sdioh_info, SD_BlockSize, bytes); sdstd_wreg16(sdioh_info, SD_BlockCount, 1); retries = RETRIES_SMALL; while (GFIELD(sdstd_rreg(sdioh_info, SD_PresentState), PRES_DAT_INHIBIT) && --retries) sd_err(("%s: Waiting for Data Inhibit cmd = %d\n", __FUNCTION__, cmd)); if (!retries) { sd_err(("%s: Data Inhibit timeout\n", __FUNCTION__)); if (trap_errs) ASSERT(0); return ERROR; } sdstd_wreg16(sdioh_info, SD_TransferMode, xfer_reg); } break; default: sd_err(("%s: Unknown command\n", __FUNCTION__)); return ERROR; } if (sdioh_info->sd_mode == SDIOH_MODE_SPI) { cmd_reg = SFIELD(cmd_reg, CMD_CRC_EN, 0); cmd_reg = SFIELD(cmd_reg, CMD_INDEX_EN, 0); } /* Setup and issue the SDIO command */ sdstd_wreg(sdioh_info, SD_Arg0, arg); sdstd_wreg16(sdioh_info, SD_Command, cmd_reg); /* If we are in polled mode, wait for the command to complete. * In interrupt mode, return immediately. The calling function will * know that the command has completed when the CMDATDONE interrupt * is asserted */ if (sdioh_info->polled_mode) { uint16 int_reg = 0; int retries = RETRIES_LARGE; do { int_reg = sdstd_rreg16(sdioh_info, SD_IntrStatus); } while (--retries && (GFIELD(int_reg, INTSTAT_ERROR_INT) == 0) && (GFIELD(int_reg, INTSTAT_CMD_COMPLETE) == 0)); if (!retries) { sd_err(("%s: CMD_COMPLETE timeout: intrStatus: 0x%x " "error stat 0x%x state 0x%x\n", __FUNCTION__, int_reg, sdstd_rreg16(sdioh_info, SD_ErrorIntrStatus), sdstd_rreg(sdioh_info, SD_PresentState))); /* Attempt to reset CMD line when we get a CMD timeout */ sdstd_wreg8(sdioh_info, SD_SoftwareReset, SFIELD(0, SW_RESET_CMD, 1)); retries = RETRIES_LARGE; do { sd_trace(("%s: waiting for CMD line reset\n", __FUNCTION__)); } while ((GFIELD(sdstd_rreg8(sdioh_info, SD_SoftwareReset), SW_RESET_CMD)) && retries--); if (!retries) { sd_err(("%s: Timeout waiting for CMD line reset\n", __FUNCTION__)); } if (trap_errs) ASSERT(0); return (ERROR); } /* Clear Command Complete interrupt */ int_reg = SFIELD(0, INTSTAT_CMD_COMPLETE, 1); sdstd_wreg16(sdioh_info, SD_IntrStatus, int_reg); /* Check for Errors */ if (sdstd_check_errs(sdioh_info, cmd, arg)) { if (trap_errs) ASSERT(0); return ERROR; } } return SUCCESS; } static int sdstd_card_buf(sdioh_info_t *sd, int rw, int func, bool fifo, uint32 addr, int nbytes, uint32 *data) { int status; uint32 cmd_arg; uint32 rsp5; uint16 int_reg, int_bit; uint flags; int num_blocks, blocksize; bool local_blockmode, local_dma; bool read = rw == SDIOH_READ ? 1 : 0; bool yield = FALSE; ASSERT(nbytes); cmd_arg = 0; sd_data(("%s: %s 53 addr 0x%x, len %d bytes, r_cnt %d t_cnt %d\n", __FUNCTION__, read ? "Rd" : "Wr", addr, nbytes, sd->r_cnt, sd->t_cnt)); if (read) sd->r_cnt++; else sd->t_cnt++; local_blockmode = sd->sd_blockmode; local_dma = USE_DMA(sd); /* Don't bother with block mode on small xfers */ if (nbytes < sd->client_block_size[func]) { sd_data(("setting local blockmode to false: nbytes (%d) != block_size (%d)\n", nbytes, sd->client_block_size[func])); local_blockmode = FALSE; local_dma = FALSE; } if (local_blockmode) { blocksize = MIN(sd->client_block_size[func], nbytes); num_blocks = nbytes/blocksize; cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, num_blocks); cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 1); } else { num_blocks = 1; blocksize = nbytes; cmd_arg = SFIELD(cmd_arg, CMD53_BYTE_BLK_CNT, nbytes); cmd_arg = SFIELD(cmd_arg, CMD53_BLK_MODE, 0); } if (local_dma && !read) { bcopy(data, sd->dma_buf, nbytes); sd_sync_dma(sd, read, nbytes); } if (fifo) cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 0); else cmd_arg = SFIELD(cmd_arg, CMD53_OP_CODE, 1); cmd_arg = SFIELD(cmd_arg, CMD53_FUNCTION, func); cmd_arg = SFIELD(cmd_arg, CMD53_REG_ADDR, addr); if (read) cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_READ); else cmd_arg = SFIELD(cmd_arg, CMD53_RW_FLAG, SDIOH_XFER_TYPE_WRITE); sd->data_xfer_count = nbytes; /* sdstd_cmd_issue() returns with the command complete bit * in the ISR already cleared */ if ((status = sdstd_cmd_issue(sd, local_dma, SDIOH_CMD_53, cmd_arg)) != SUCCESS) { sd_err(("%s: cmd_issue failed for %s\n", __FUNCTION__, (read ? "read" : "write"))); return status; } sdstd_cmd_getrsp(sd, &rsp5, 1); if ((flags = GFIELD(rsp5, RSP5_FLAGS)) != 0x10) { sd_err(("%s: Rsp5: nbytes %d, dma %d blockmode %d, read %d " "numblocks %d, blocksize %d\n", __FUNCTION__, nbytes, local_dma, local_dma, read, num_blocks, blocksize)); if (flags & 1) sd_err(("%s: rsp5: Command not accepted: arg out of range 0x%x, " "bytes %d dma %d\n", __FUNCTION__, flags, GFIELD(cmd_arg, CMD53_BYTE_BLK_CNT), GFIELD(cmd_arg, CMD53_BLK_MODE))); if (flags & 0x8) sd_err(("%s: Rsp5: General Error\n", __FUNCTION__)); sd_err(("%s: rsp5 flags = 0x%x, expecting 0x10 returning error\n", __FUNCTION__, flags)); if (trap_errs) ASSERT(0); return ERROR; } if (GFIELD(rsp5, RSP5_STUFF)) sd_err(("%s: rsp5 stuff is 0x%x: expecting 0\n", __FUNCTION__, GFIELD(rsp5, RSP5_STUFF))); #ifdef BCMSDYIELD yield = sd_yieldcpu && ((uint)nbytes >= sd_minyield); #endif if (!local_dma) { int bytes, i; uint32 tmp; for (i = 0; i < num_blocks; i++) { int words; /* Decide which status bit we're waiting for */ if (read) int_bit = SFIELD(0, INTSTAT_BUF_READ_READY, 1); else int_bit = SFIELD(0, INTSTAT_BUF_WRITE_READY, 1); /* If not on, wait for it (or for xfer error) */ int_reg = sdstd_rreg16(sd, SD_IntrStatus); if (!(int_reg & int_bit)) int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield); /* Confirm we got the bit w/o error */ if (!(int_reg & int_bit) || GFIELD(int_reg, INTSTAT_ERROR_INT)) { sd_err(("%s: Error or timeout for Buf_%s_Ready: intStat: 0x%x " "errint: 0x%x PresentState 0x%x\n", __FUNCTION__, read ? "Read" : "Write", int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), sdstd_rreg(sd, SD_PresentState))); sdstd_dumpregs(sd); sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg); return (ERROR); } /* Clear Buf Ready bit */ sdstd_wreg16(sd, SD_IntrStatus, int_bit); /* At this point we have Buffer Ready, write the data 4 bytes at a time */ for (words = blocksize/4; words; words--) { if (read) *data = sdstd_rreg(sd, SD_BufferDataPort0); else sdstd_wreg(sd, SD_BufferDataPort0, *data); data++; } bytes = blocksize % 4; /* If no leftover bytes, go to next block */ if (!bytes) continue; switch (bytes) { case 1: /* R/W 8 bits */ if (read) *(data++) = (uint32)(sdstd_rreg8(sd, SD_BufferDataPort0)); else sdstd_wreg8(sd, SD_BufferDataPort0, (uint8)(*(data++) & 0xff)); break; case 2: /* R/W 16 bits */ if (read) *(data++) = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); else sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)(*(data++))); break; case 3: /* R/W 24 bits: * SD_BufferDataPort0[0-15] | SD_BufferDataPort1[16-23] */ if (read) { tmp = (uint32)sdstd_rreg16(sd, SD_BufferDataPort0); tmp |= ((uint32)(sdstd_rreg8(sd, SD_BufferDataPort1)) << 16); *(data++) = tmp; } else { tmp = *(data++); sdstd_wreg16(sd, SD_BufferDataPort0, (uint16)tmp & 0xffff); sdstd_wreg8(sd, SD_BufferDataPort1, (uint8)((tmp >> 16) & 0xff)); } break; default: sd_err(("%s: Unexpected bytes leftover %d\n", __FUNCTION__, bytes)); ASSERT(0); break; } } } /* End PIO processing */ /* Wait for Transfer Complete or Transfer Error */ int_bit = SFIELD(0, INTSTAT_XFER_COMPLETE, 1); /* If not on, wait for it (or for xfer error) */ int_reg = sdstd_rreg16(sd, SD_IntrStatus); if (!(int_reg & int_bit)) int_reg = sdstd_waitbits(sd, int_bit, ERRINT_TRANSFER_ERRS, yield); /* Check for any errors from the data phase */ if (sdstd_check_errs(sd, SDIOH_CMD_53, cmd_arg)) return ERROR; /* May have gotten a software timeout if not blocking? */ int_reg = sdstd_rreg16(sd, SD_IntrStatus); if (!(int_reg & int_bit)) { sd_err(("%s: Error or Timeout for xfer complete; %s, dma %d, State 0x%08x, " "intr 0x%04x, Err 0x%04x, len = %d, rcnt %d, tcnt %d\n", __FUNCTION__, read ? "R" : "W", local_dma, sdstd_rreg(sd, SD_PresentState), int_reg, sdstd_rreg16(sd, SD_ErrorIntrStatus), nbytes, sd->r_cnt, sd->t_cnt)); sdstd_dumpregs(sd); return ERROR; } /* Clear the status bits */ int_reg = int_bit; if (local_dma) { /* DMA Complete */ /* Reads in particular don't have DMA_COMPLETE set */ int_reg = SFIELD(int_reg, INTSTAT_DMA_INT, 1); } sdstd_wreg16(sd, SD_IntrStatus, int_reg); /* Fetch data */ if (local_dma && read) { sd_sync_dma(sd, read, nbytes); bcopy(sd->dma_buf, data, nbytes); } return SUCCESS; } static int set_client_block_size(sdioh_info_t *sd, int func, int block_size) { int base; int err = 0; sd_err(("%s: Setting block size %d, func %d\n", __FUNCTION__, block_size, func)); sd->client_block_size[func] = block_size; /* Set the block size in the SDIO Card register */ base = func * SDIOD_FBR_SIZE; err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_0, 1, block_size & 0xff); if (!err) { err = sdstd_card_regwrite(sd, 0, base+SDIOD_CCCR_BLKSIZE_1, 1, (block_size >> 8) & 0xff); } /* Do not set the block size in the SDIO Host register, that * is func dependent and will get done on an individual * transaction basis */ return (err ? BCME_SDIO_ERROR : 0); } /* Reset and re-initialize the device */ int sdioh_sdio_reset(sdioh_info_t *si) { uint8 hreg; /* Reset the attached device (use slower clock for safety) */ sdstd_start_clock(si, 128); sdstd_reset(si, 0, 1); /* Reset portions of the host state accordingly */ hreg = sdstd_rreg8(si, SD_HostCntrl); hreg = SFIELD(hreg, HOST_HI_SPEED_EN, 0); hreg = SFIELD(hreg, HOST_DATA_WIDTH, 0); si->sd_mode = SDIOH_MODE_SD1; /* Reinitialize the card */ si->card_init_done = FALSE; return sdstd_client_init(si); } static void sd_map_dma(sdioh_info_t * sd) { void *va; if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, &sd->dma_start_phys, 0x12, 12)) == NULL) { sd->sd_dma_mode = DMA_MODE_NONE; sd->dma_start_buf = 0; sd->dma_buf = (void *)0; sd->dma_phys = 0; sd->alloced_dma_size = SD_PAGE; sd_err(("%s: DMA_ALLOC failed. Disabling DMA support.\n", __FUNCTION__)); } else { sd->dma_start_buf = va; sd->dma_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); sd->dma_phys = ROUNDUP((sd->dma_start_phys), SD_PAGE); sd->alloced_dma_size = SD_PAGE; sd_err(("%s: Mapped DMA Buffer %dbytes @virt/phys: %p/0x%lx\n", __FUNCTION__, sd->alloced_dma_size, sd->dma_buf, sd->dma_phys)); sd_fill_dma_data_buf(sd, 0xA5); } if ((va = DMA_ALLOC_CONSISTENT(sd->osh, SD_PAGE, &sd->adma2_dscr_start_phys, 0x12, 12)) == NULL) { sd->sd_dma_mode = DMA_MODE_NONE; sd->adma2_dscr_start_buf = 0; sd->adma2_dscr_buf = (void *)0; sd->adma2_dscr_phys = 0; sd->alloced_adma2_dscr_size = 0; sd_err(("%s: DMA_ALLOC failed for descriptor buffer. " "Disabling DMA support.\n", __FUNCTION__)); } else { sd->adma2_dscr_start_buf = va; sd->adma2_dscr_buf = (void *)ROUNDUP((uintptr)va, SD_PAGE); sd->adma2_dscr_phys = ROUNDUP((sd->adma2_dscr_start_phys), SD_PAGE); sd->alloced_adma2_dscr_size = SD_PAGE; } sd_err(("%s: Mapped ADMA2 Descriptor Buffer %dbytes @virt/phys: %p/0x%lx\n", __FUNCTION__, sd->alloced_adma2_dscr_size, sd->adma2_dscr_buf, sd->adma2_dscr_phys)); sd_clear_adma_dscr_buf(sd); } static void sd_unmap_dma(sdioh_info_t * sd) { if (sd->dma_start_buf) { DMA_FREE_CONSISTENT(sd->osh, sd->dma_start_buf, sd->alloced_dma_size, sd->dma_start_phys, 0x12); } if (sd->adma2_dscr_start_buf) { DMA_FREE_CONSISTENT(sd->osh, sd->adma2_dscr_start_buf, sd->alloced_adma2_dscr_size, sd->adma2_dscr_start_phys, 0x12); } } static void sd_clear_adma_dscr_buf(sdioh_info_t *sd) { bzero((char *)sd->adma2_dscr_buf, SD_PAGE); sd_dump_adma_dscr(sd); } static void sd_fill_dma_data_buf(sdioh_info_t *sd, uint8 data) { memset((char *)sd->dma_buf, data, SD_PAGE); } static void sd_create_adma_descriptor(sdioh_info_t *sd, uint32 index, uint32 addr_phys, uint16 length, uint16 flags) { adma2_dscr_32b_t *adma2_dscr_table; adma1_dscr_t *adma1_dscr_table; adma2_dscr_table = sd->adma2_dscr_buf; adma1_dscr_table = sd->adma2_dscr_buf; switch (sd->sd_dma_mode) { case DMA_MODE_ADMA2: sd_dma(("%s: creating ADMA2 descriptor for index %d\n", __FUNCTION__, index)); adma2_dscr_table[index].phys_addr = addr_phys; adma2_dscr_table[index].len_attr = length << 16; adma2_dscr_table[index].len_attr |= flags; break; case DMA_MODE_ADMA1: /* ADMA1 requires two descriptors, one for len * and the other for data transfer */ index <<= 1; sd_dma(("%s: creating ADMA1 descriptor for index %d\n", __FUNCTION__, index)); adma1_dscr_table[index].phys_addr_attr = length << 12; adma1_dscr_table[index].phys_addr_attr |= (ADMA1_ATTRIBUTE_ACT_SET | ADMA2_ATTRIBUTE_VALID); adma1_dscr_table[index+1].phys_addr_attr = addr_phys & 0xFFFFF000; adma1_dscr_table[index+1].phys_addr_attr |= (flags & 0x3f); break; default: sd_err(("%s: cannot create ADMA descriptor for DMA mode %d\n", __FUNCTION__, sd->sd_dma_mode)); break; } } static void sd_dump_adma_dscr(sdioh_info_t *sd) { adma2_dscr_32b_t *adma2_dscr_table; adma1_dscr_t *adma1_dscr_table; uint32 i = 0; uint16 flags; char flags_str[32]; ASSERT(sd->adma2_dscr_buf != NULL); adma2_dscr_table = sd->adma2_dscr_buf; adma1_dscr_table = sd->adma2_dscr_buf; switch (sd->sd_dma_mode) { case DMA_MODE_ADMA2: sd_err(("ADMA2 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n", SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys)); sd_err((" #[Descr VA ] Buffer PA | Len | Flags (5:4 2 1 0)" " |\n")); while (adma2_dscr_table->len_attr & ADMA2_ATTRIBUTE_VALID) { flags = adma2_dscr_table->len_attr & 0xFFFF; sprintf(flags_str, "%s%s%s%s", ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "RSV ", (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | 0x%04x (%s) |\n", i, adma2_dscr_table, adma2_dscr_table->phys_addr, adma2_dscr_table->len_attr >> 16, flags, flags_str)); i++; /* Follow LINK descriptors or skip to next. */ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_LINK) { adma2_dscr_table = phys_to_virt( adma2_dscr_table->phys_addr); } else { adma2_dscr_table++; } } break; case DMA_MODE_ADMA1: sd_err(("ADMA1 Descriptor Table (%dbytes) @virt/phys: %p/0x%lx\n", SD_PAGE, sd->adma2_dscr_buf, sd->adma2_dscr_phys)); sd_err((" #[Descr VA ] Buffer PA | Flags (5:4 2 1 0) |\n")); for (i = 0; adma1_dscr_table->phys_addr_attr & ADMA2_ATTRIBUTE_VALID; i++) { flags = adma1_dscr_table->phys_addr_attr & 0x3F; sprintf(flags_str, "%s%s%s%s", ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_LINK) ? "LINK " : ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_TRAN) ? "TRAN " : ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_NOP) ? "NOP " : "SET ", (flags & ADMA2_ATTRIBUTE_INT ? "INT " : " "), (flags & ADMA2_ATTRIBUTE_END ? "END " : " "), (flags & ADMA2_ATTRIBUTE_VALID ? "VALID" : "")); sd_err(("%2d[0x%p]: 0x%08x | 0x%04x | (%s) |\n", i, adma1_dscr_table, adma1_dscr_table->phys_addr_attr & 0xFFFFF000, flags, flags_str)); /* Follow LINK descriptors or skip to next. */ if ((flags & ADMA2_ATTRIBUTE_ACT_LINK) == ADMA2_ATTRIBUTE_ACT_LINK) { adma1_dscr_table = phys_to_virt( adma1_dscr_table->phys_addr_attr & 0xFFFFF000); } else { adma1_dscr_table++; } } break; default: sd_err(("Unknown DMA Descriptor Table Format.\n")); break; } } static void sdstd_dumpregs(sdioh_info_t *sd) { sd_err(("IntrStatus: 0x%04x ErrorIntrStatus 0x%04x\n", sdstd_rreg16(sd, SD_IntrStatus), sdstd_rreg16(sd, SD_ErrorIntrStatus))); sd_err(("IntrStatusEnable: 0x%04x ErrorIntrStatusEnable 0x%04x\n", sdstd_rreg16(sd, SD_IntrStatusEnable), sdstd_rreg16(sd, SD_ErrorIntrStatusEnable))); sd_err(("IntrSignalEnable: 0x%04x ErrorIntrSignalEnable 0x%04x\n", sdstd_rreg16(sd, SD_IntrSignalEnable), sdstd_rreg16(sd, SD_ErrorIntrSignalEnable))); }
gpl-2.0
klquicksall/Galaxy-Nexus-JB
net/ieee802154/nl-mac.c
4040
16109
/* * Netlink inteface for IEEE 802.15.4 stack * * Copyright 2007, 2008 Siemens AG * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * Written by: * Sergey Lapin <slapin@ossfans.org> * Dmitry Eremin-Solenikov <dbaryshkov@gmail.com> * Maxim Osipov <maxim.osipov@siemens.com> */ #include <linux/gfp.h> #include <linux/kernel.h> #include <linux/if_arp.h> #include <linux/netdevice.h> #include <net/netlink.h> #include <net/genetlink.h> #include <net/sock.h> #include <linux/nl802154.h> #include <net/af_ieee802154.h> #include <net/nl802154.h> #include <net/ieee802154.h> #include <net/ieee802154_netdev.h> #include <net/wpan-phy.h> #include "ieee802154.h" static struct genl_multicast_group ieee802154_coord_mcgrp = { .name = IEEE802154_MCAST_COORD_NAME, }; static struct genl_multicast_group ieee802154_beacon_mcgrp = { .name = IEEE802154_MCAST_BEACON_NAME, }; int ieee802154_nl_assoc_indic(struct net_device *dev, struct ieee802154_addr *addr, u8 cap) { struct sk_buff *msg; pr_debug("%s\n", __func__); if (addr->addr_type != IEEE802154_ADDR_LONG) { pr_err("%s: received non-long source address!\n", __func__); return -EINVAL; } msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_INDIC); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, addr->hwaddr); NLA_PUT_U8(msg, IEEE802154_ATTR_CAPABILITY, cap); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_assoc_indic); int ieee802154_nl_assoc_confirm(struct net_device *dev, u16 short_addr, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_ASSOCIATE_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, short_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_assoc_confirm); int ieee802154_nl_disassoc_indic(struct net_device *dev, struct ieee802154_addr *addr, u8 reason) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_INDIC); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); if (addr->addr_type == IEEE802154_ADDR_LONG) NLA_PUT(msg, IEEE802154_ATTR_SRC_HW_ADDR, IEEE802154_ADDR_LEN, addr->hwaddr); else NLA_PUT_U16(msg, IEEE802154_ATTR_SRC_SHORT_ADDR, addr->short_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_REASON, reason); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_disassoc_indic); int ieee802154_nl_disassoc_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_DISASSOCIATE_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_disassoc_confirm); int ieee802154_nl_beacon_indic(struct net_device *dev, u16 panid, u16 coord_addr) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_BEACON_NOTIFY_INDIC); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_SHORT_ADDR, coord_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_COORD_PAN_ID, panid); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_beacon_indic); int ieee802154_nl_scan_confirm(struct net_device *dev, u8 status, u8 scan_type, u32 unscanned, u8 page, u8 *edl/* , struct list_head *pan_desc_list */) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_SCAN_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); NLA_PUT_U8(msg, IEEE802154_ATTR_SCAN_TYPE, scan_type); NLA_PUT_U32(msg, IEEE802154_ATTR_CHANNELS, unscanned); NLA_PUT_U8(msg, IEEE802154_ATTR_PAGE, page); if (edl) NLA_PUT(msg, IEEE802154_ATTR_ED_LIST, 27, edl); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_scan_confirm); int ieee802154_nl_start_confirm(struct net_device *dev, u8 status) { struct sk_buff *msg; pr_debug("%s\n", __func__); msg = ieee802154_nl_create(0, IEEE802154_START_CONF); if (!msg) return -ENOBUFS; NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U8(msg, IEEE802154_ATTR_STATUS, status); return ieee802154_nl_mcast(msg, ieee802154_coord_mcgrp.id); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } EXPORT_SYMBOL(ieee802154_nl_start_confirm); static int ieee802154_nl_fill_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev) { void *hdr; struct wpan_phy *phy; pr_debug("%s\n", __func__); hdr = genlmsg_put(msg, 0, seq, &nl802154_family, flags, IEEE802154_LIST_IFACE); if (!hdr) goto out; phy = ieee802154_mlme_ops(dev)->get_phy(dev); BUG_ON(!phy); NLA_PUT_STRING(msg, IEEE802154_ATTR_DEV_NAME, dev->name); NLA_PUT_STRING(msg, IEEE802154_ATTR_PHY_NAME, wpan_phy_name(phy)); NLA_PUT_U32(msg, IEEE802154_ATTR_DEV_INDEX, dev->ifindex); NLA_PUT(msg, IEEE802154_ATTR_HW_ADDR, IEEE802154_ADDR_LEN, dev->dev_addr); NLA_PUT_U16(msg, IEEE802154_ATTR_SHORT_ADDR, ieee802154_mlme_ops(dev)->get_short_addr(dev)); NLA_PUT_U16(msg, IEEE802154_ATTR_PAN_ID, ieee802154_mlme_ops(dev)->get_pan_id(dev)); wpan_phy_put(phy); return genlmsg_end(msg, hdr); nla_put_failure: wpan_phy_put(phy); genlmsg_cancel(msg, hdr); out: return -EMSGSIZE; } /* Requests from userspace */ static struct net_device *ieee802154_nl_get_dev(struct genl_info *info) { struct net_device *dev; if (info->attrs[IEEE802154_ATTR_DEV_NAME]) { char name[IFNAMSIZ + 1]; nla_strlcpy(name, info->attrs[IEEE802154_ATTR_DEV_NAME], sizeof(name)); dev = dev_get_by_name(&init_net, name); } else if (info->attrs[IEEE802154_ATTR_DEV_INDEX]) dev = dev_get_by_index(&init_net, nla_get_u32(info->attrs[IEEE802154_ATTR_DEV_INDEX])); else return NULL; if (!dev) return NULL; if (dev->type != ARPHRD_IEEE802154) { dev_put(dev); return NULL; } return dev; } static int ieee802154_associate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; u8 page; int ret = -EINVAL; if (!info->attrs[IEEE802154_ATTR_CHANNEL] || !info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || (!info->attrs[IEEE802154_ATTR_COORD_HW_ADDR] && !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_CAPABILITY]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (info->attrs[IEEE802154_ATTR_COORD_HW_ADDR]) { addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_COORD_HW_ADDR], IEEE802154_ADDR_LEN); } else { addr.addr_type = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_u16( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); } addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; ret = ieee802154_mlme_ops(dev)->assoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]), page, nla_get_u8(info->attrs[IEEE802154_ATTR_CAPABILITY])); dev_put(dev); return ret; } static int ieee802154_associate_resp(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EINVAL; if (!info->attrs[IEEE802154_ATTR_STATUS] || !info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] || !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], IEEE802154_ADDR_LEN); addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); ret = ieee802154_mlme_ops(dev)->assoc_resp(dev, &addr, nla_get_u16(info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]), nla_get_u8(info->attrs[IEEE802154_ATTR_STATUS])); dev_put(dev); return ret; } static int ieee802154_disassociate_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; int ret = -EINVAL; if ((!info->attrs[IEEE802154_ATTR_DEST_HW_ADDR] && !info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]) || !info->attrs[IEEE802154_ATTR_REASON]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; if (info->attrs[IEEE802154_ATTR_DEST_HW_ADDR]) { addr.addr_type = IEEE802154_ADDR_LONG; nla_memcpy(addr.hwaddr, info->attrs[IEEE802154_ATTR_DEST_HW_ADDR], IEEE802154_ADDR_LEN); } else { addr.addr_type = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_u16( info->attrs[IEEE802154_ATTR_DEST_SHORT_ADDR]); } addr.pan_id = ieee802154_mlme_ops(dev)->get_pan_id(dev); ret = ieee802154_mlme_ops(dev)->disassoc_req(dev, &addr, nla_get_u8(info->attrs[IEEE802154_ATTR_REASON])); dev_put(dev); return ret; } /* * PANid, channel, beacon_order = 15, superframe_order = 15, * PAN_coordinator, battery_life_extension = 0, * coord_realignment = 0, security_enable = 0 */ static int ieee802154_start_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; struct ieee802154_addr addr; u8 channel, bcn_ord, sf_ord; u8 page; int pan_coord, blx, coord_realign; int ret; if (!info->attrs[IEEE802154_ATTR_COORD_PAN_ID] || !info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR] || !info->attrs[IEEE802154_ATTR_CHANNEL] || !info->attrs[IEEE802154_ATTR_BCN_ORD] || !info->attrs[IEEE802154_ATTR_SF_ORD] || !info->attrs[IEEE802154_ATTR_PAN_COORD] || !info->attrs[IEEE802154_ATTR_BAT_EXT] || !info->attrs[IEEE802154_ATTR_COORD_REALIGN] ) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; addr.addr_type = IEEE802154_ADDR_SHORT; addr.short_addr = nla_get_u16( info->attrs[IEEE802154_ATTR_COORD_SHORT_ADDR]); addr.pan_id = nla_get_u16(info->attrs[IEEE802154_ATTR_COORD_PAN_ID]); channel = nla_get_u8(info->attrs[IEEE802154_ATTR_CHANNEL]); bcn_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_BCN_ORD]); sf_ord = nla_get_u8(info->attrs[IEEE802154_ATTR_SF_ORD]); pan_coord = nla_get_u8(info->attrs[IEEE802154_ATTR_PAN_COORD]); blx = nla_get_u8(info->attrs[IEEE802154_ATTR_BAT_EXT]); coord_realign = nla_get_u8(info->attrs[IEEE802154_ATTR_COORD_REALIGN]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; if (addr.short_addr == IEEE802154_ADDR_BROADCAST) { ieee802154_nl_start_confirm(dev, IEEE802154_NO_SHORT_ADDRESS); dev_put(dev); return -EINVAL; } ret = ieee802154_mlme_ops(dev)->start_req(dev, &addr, channel, page, bcn_ord, sf_ord, pan_coord, blx, coord_realign); dev_put(dev); return ret; } static int ieee802154_scan_req(struct sk_buff *skb, struct genl_info *info) { struct net_device *dev; int ret; u8 type; u32 channels; u8 duration; u8 page; if (!info->attrs[IEEE802154_ATTR_SCAN_TYPE] || !info->attrs[IEEE802154_ATTR_CHANNELS] || !info->attrs[IEEE802154_ATTR_DURATION]) return -EINVAL; dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; type = nla_get_u8(info->attrs[IEEE802154_ATTR_SCAN_TYPE]); channels = nla_get_u32(info->attrs[IEEE802154_ATTR_CHANNELS]); duration = nla_get_u8(info->attrs[IEEE802154_ATTR_DURATION]); if (info->attrs[IEEE802154_ATTR_PAGE]) page = nla_get_u8(info->attrs[IEEE802154_ATTR_PAGE]); else page = 0; ret = ieee802154_mlme_ops(dev)->scan_req(dev, type, channels, page, duration); dev_put(dev); return ret; } static int ieee802154_list_iface(struct sk_buff *skb, struct genl_info *info) { /* Request for interface name, index, type, IEEE address, PAN Id, short address */ struct sk_buff *msg; struct net_device *dev = NULL; int rc = -ENOBUFS; pr_debug("%s\n", __func__); dev = ieee802154_nl_get_dev(info); if (!dev) return -ENODEV; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) goto out_dev; rc = ieee802154_nl_fill_iface(msg, info->snd_pid, info->snd_seq, 0, dev); if (rc < 0) goto out_free; dev_put(dev); return genlmsg_reply(msg, info); out_free: nlmsg_free(msg); out_dev: dev_put(dev); return rc; } static int ieee802154_dump_iface(struct sk_buff *skb, struct netlink_callback *cb) { struct net *net = sock_net(skb->sk); struct net_device *dev; int idx; int s_idx = cb->args[0]; pr_debug("%s\n", __func__); idx = 0; for_each_netdev(net, dev) { if (idx < s_idx || (dev->type != ARPHRD_IEEE802154)) goto cont; if (ieee802154_nl_fill_iface(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) break; cont: idx++; } cb->args[0] = idx; return skb->len; } static struct genl_ops ieee802154_coordinator_ops[] = { IEEE802154_OP(IEEE802154_ASSOCIATE_REQ, ieee802154_associate_req), IEEE802154_OP(IEEE802154_ASSOCIATE_RESP, ieee802154_associate_resp), IEEE802154_OP(IEEE802154_DISASSOCIATE_REQ, ieee802154_disassociate_req), IEEE802154_OP(IEEE802154_SCAN_REQ, ieee802154_scan_req), IEEE802154_OP(IEEE802154_START_REQ, ieee802154_start_req), IEEE802154_DUMP(IEEE802154_LIST_IFACE, ieee802154_list_iface, ieee802154_dump_iface), }; /* * No need to unregister as family unregistration will do it. */ int nl802154_mac_register(void) { int i; int rc; rc = genl_register_mc_group(&nl802154_family, &ieee802154_coord_mcgrp); if (rc) return rc; rc = genl_register_mc_group(&nl802154_family, &ieee802154_beacon_mcgrp); if (rc) return rc; for (i = 0; i < ARRAY_SIZE(ieee802154_coordinator_ops); i++) { rc = genl_register_ops(&nl802154_family, &ieee802154_coordinator_ops[i]); if (rc) return rc; } return 0; }
gpl-2.0
VanirAOSP/kernel_htc_msm8974
arch/blackfin/mach-bf561/boards/ezkit.c
4552
15346
/* * Copyright 2004-2009 Analog Devices Inc. * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "ADI BF561-EZKIT"; #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) #include <linux/usb/isp1760.h> static struct resource bfin_isp1760_resources[] = { [0] = { .start = 0x2C0F0000, .end = 0x203C0000 + 0xfffff, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_PF10, .end = IRQ_PF10, .flags = IORESOURCE_IRQ, }, }; static struct isp1760_platform_data isp1760_priv = { .is_isp1761 = 0, .bus_width_16 = 1, .port1_otg = 0, .analog_oc = 0, .dack_polarity_high = 0, .dreq_polarity_high = 0, }; static struct platform_device bfin_isp1760_device = { .name = "isp1760", .id = 0, .dev = { .platform_data = &isp1760_priv, }, .num_resources = ARRAY_SIZE(bfin_isp1760_resources), .resource = bfin_isp1760_resources, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> static struct resource isp1362_hcd_resources[] = { { .start = 0x2c060000, .end = 0x2c060000, .flags = IORESOURCE_MEM, }, { .start = 0x2c060004, .end = 0x2c060004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF8, .end = IRQ_PF8, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x2C000000, .end = 0x2C000000 + 0x7F, .flags = IORESOURCE_MEM, }, { .start = 1, .flags = IORESOURCE_BUS, }, { .start = IRQ_PF10, .end = IRQ_PF10, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif /* * USB-LAN EzExtender board * Driver needs to know address, irq and flag pin. */ #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_32BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .name = "smc91x-regs", .start = 0x2C010300, .end = 0x2C010300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF9, .end = IRQ_PF9, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = BFIN_UART_THR, .end = BFIN_UART_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART_TX, .end = IRQ_UART_TX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_RX, .end = IRQ_UART_RX, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART_ERROR, .end = IRQ_UART_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART_TX, .end = CH_UART_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART_RX, .end = CH_UART_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition ezkit_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x1C0000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = 0x800000 - 0x40000 - 0x1C0000 - 0x2000 * 8, .offset = MTDPART_OFS_APPEND, }, { .name = "config(nor)", .size = 0x2000 * 7, .offset = MTDPART_OFS_APPEND, }, { .name = "u-boot env(nor)", .size = 0x2000, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data ezkit_flash_data = { .width = 2, .parts = ezkit_partitions, .nr_parts = ARRAY_SIZE(ezkit_partitions), }; static struct resource ezkit_flash_resource = { .start = 0x20000000, .end = 0x207fffff, .flags = IORESOURCE_MEM, }; static struct platform_device ezkit_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &ezkit_flash_data, }, .num_resources = 1, .resource = &ezkit_flash_resource, }; #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_SND_BF5XX_SOC_AD183X) \ || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .platform_data = "ad1836", /* only includes chip name for the moment */ .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_SPI_SPIDEV) || defined(CONFIG_SPI_SPIDEV_MODULE) { .modalias = "spidev", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, }, #endif }; #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) #include <linux/input.h> #include <linux/gpio_keys.h> static struct gpio_keys_button bfin_gpio_keys_table[] = { {BTN_0, GPIO_PF5, 1, "gpio-keys: BTN0"}, {BTN_1, GPIO_PF6, 1, "gpio-keys: BTN1"}, {BTN_2, GPIO_PF7, 1, "gpio-keys: BTN2"}, {BTN_3, GPIO_PF8, 1, "gpio-keys: BTN3"}, }; static struct gpio_keys_platform_data bfin_gpio_keys_data = { .buttons = bfin_gpio_keys_table, .nbuttons = ARRAY_SIZE(bfin_gpio_keys_table), }; static struct platform_device bfin_device_gpiokeys = { .name = "gpio-keys", .dev = { .platform_data = &bfin_gpio_keys_data, }, }; #endif #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) #include <linux/i2c-gpio.h> static struct i2c_gpio_platform_data i2c_gpio_data = { .sda_pin = GPIO_PF1, .scl_pin = GPIO_PF0, .sda_is_open_drain = 0, .scl_is_open_drain = 0, .udelay = 10, }; static struct platform_device i2c_gpio_device = { .name = "i2c-gpio", .id = 0, .dev = { .platform_data = &i2c_gpio_data, }, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 300000000), VRPAIR(VLEV_095, 313000000), VRPAIR(VLEV_100, 350000000), VRPAIR(VLEV_105, 400000000), VRPAIR(VLEV_110, 444000000), VRPAIR(VLEV_115, 450000000), VRPAIR(VLEV_120, 475000000), VRPAIR(VLEV_125, 500000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; #if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE) #include <linux/videodev2.h> #include <media/blackfin/bfin_capture.h> #include <media/blackfin/ppi.h> static const unsigned short ppi_req[] = { P_PPI0_D0, P_PPI0_D1, P_PPI0_D2, P_PPI0_D3, P_PPI0_D4, P_PPI0_D5, P_PPI0_D6, P_PPI0_D7, P_PPI0_CLK, P_PPI0_FS1, P_PPI0_FS2, 0, }; static const struct ppi_info ppi_info = { .type = PPI_TYPE_PPI, .dma_ch = CH_PPI0, .irq_err = IRQ_PPI1_ERROR, .base = (void __iomem *)PPI0_CONTROL, .pin_req = ppi_req, }; #if defined(CONFIG_VIDEO_ADV7183) \ || defined(CONFIG_VIDEO_ADV7183_MODULE) #include <media/adv7183.h> static struct v4l2_input adv7183_inputs[] = { { .index = 0, .name = "Composite", .type = V4L2_INPUT_TYPE_CAMERA, .std = V4L2_STD_ALL, }, { .index = 1, .name = "S-Video", .type = V4L2_INPUT_TYPE_CAMERA, .std = V4L2_STD_ALL, }, { .index = 2, .name = "Component", .type = V4L2_INPUT_TYPE_CAMERA, .std = V4L2_STD_ALL, }, }; static struct bcap_route adv7183_routes[] = { { .input = ADV7183_COMPOSITE4, .output = ADV7183_8BIT_OUT, }, { .input = ADV7183_SVIDEO0, .output = ADV7183_8BIT_OUT, }, { .input = ADV7183_COMPONENT0, .output = ADV7183_8BIT_OUT, }, }; static const unsigned adv7183_gpio[] = { GPIO_PF13, /* reset pin */ GPIO_PF2, /* output enable pin */ }; static struct bfin_capture_config bfin_capture_data = { .card_name = "BF561", .inputs = adv7183_inputs, .num_inputs = ARRAY_SIZE(adv7183_inputs), .routes = adv7183_routes, .i2c_adapter_id = 0, .board_info = { .type = "adv7183", .addr = 0x20, .platform_data = (void *)adv7183_gpio, }, .ppi_info = &ppi_info, .ppi_control = (PACK_EN | DLEN_8 | DMA32 | FLD_SEL), }; #endif static struct platform_device bfin_capture_device = { .name = "bfin_capture", .dev = { .platform_data = &bfin_capture_data, }, }; #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) static struct platform_device bfin_i2s = { .name = "bfin-i2s", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) static struct platform_device bfin_tdm = { .name = "bfin-tdm", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) static struct platform_device bfin_ac97 = { .name = "bfin-ac97", .id = CONFIG_SND_BF5XX_SPORT_NUM, /* TODO: add platform data here */ }; #endif static struct platform_device *ezkit_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_USB_ISP1760_HCD) || defined(CONFIG_USB_ISP1760_HCD_MODULE) &bfin_isp1760_device, #endif #if defined(CONFIG_SPI_BFIN5XX) || defined(CONFIG_SPI_BFIN5XX_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif #if defined(CONFIG_KEYBOARD_GPIO) || defined(CONFIG_KEYBOARD_GPIO_MODULE) &bfin_device_gpiokeys, #endif #if defined(CONFIG_I2C_GPIO) || defined(CONFIG_I2C_GPIO_MODULE) &i2c_gpio_device, #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &ezkit_flash_device, #endif #if defined(CONFIG_VIDEO_BLACKFIN_CAPTURE) \ || defined(CONFIG_VIDEO_BLACKFIN_CAPTURE_MODULE) &bfin_capture_device, #endif #if defined(CONFIG_SND_BF5XX_I2S) || defined(CONFIG_SND_BF5XX_I2S_MODULE) &bfin_i2s, #endif #if defined(CONFIG_SND_BF5XX_TDM) || defined(CONFIG_SND_BF5XX_TDM_MODULE) &bfin_tdm, #endif #if defined(CONFIG_SND_BF5XX_AC97) || defined(CONFIG_SND_BF5XX_AC97_MODULE) &bfin_ac97, #endif }; static int __init net2272_init(void) { #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) int ret; ret = gpio_request(GPIO_PF11, "net2272"); if (ret) return ret; /* Reset the USB chip */ gpio_direction_output(GPIO_PF11, 0); mdelay(2); gpio_set_value(GPIO_PF11, 1); #endif return 0; } static int __init ezkit_init(void) { int ret; printk(KERN_INFO "%s(): registering device resources\n", __func__); ret = platform_add_devices(ezkit_devices, ARRAY_SIZE(ezkit_devices)); if (ret < 0) return ret; #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 12)); SSYNC(); #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) bfin_write_FIO0_DIR(bfin_read_FIO0_DIR() | (1 << 15)); bfin_write_FIO0_FLAG_S(1 << 15); SSYNC(); /* * This initialization lasts for approximately 4500 MCLKs. * MCLK = 12.288MHz */ udelay(400); #endif if (net2272_init()) pr_warning("unable to configure net2272; it probably won't work\n"); spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); return 0; } arch_initcall(ezkit_init); static struct platform_device *ezkit_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(ezkit_early_devices, ARRAY_SIZE(ezkit_early_devices)); }
gpl-2.0
AK-Kernel/AK-OnePone
arch/mips/bcm63xx/irq.c
4552
12406
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr> * Copyright (C) 2008 Nicolas Schichan <nschichan@freebox.fr> */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <asm/mipsregs.h> #include <bcm63xx_cpu.h> #include <bcm63xx_regs.h> #include <bcm63xx_io.h> #include <bcm63xx_irq.h> static void __dispatch_internal(void) __maybe_unused; static void __dispatch_internal_64(void) __maybe_unused; static void __internal_irq_mask_32(unsigned int irq) __maybe_unused; static void __internal_irq_mask_64(unsigned int irq) __maybe_unused; static void __internal_irq_unmask_32(unsigned int irq) __maybe_unused; static void __internal_irq_unmask_64(unsigned int irq) __maybe_unused; #ifndef BCMCPU_RUNTIME_DETECT #ifdef CONFIG_BCM63XX_CPU_6338 #define irq_stat_reg PERF_IRQSTAT_6338_REG #define irq_mask_reg PERF_IRQMASK_6338_REG #define irq_bits 32 #define is_ext_irq_cascaded 0 #define ext_irq_start 0 #define ext_irq_end 0 #define ext_irq_count 4 #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6338 #define ext_irq_cfg_reg2 0 #endif #ifdef CONFIG_BCM63XX_CPU_6345 #define irq_stat_reg PERF_IRQSTAT_6345_REG #define irq_mask_reg PERF_IRQMASK_6345_REG #define irq_bits 32 #define is_ext_irq_cascaded 0 #define ext_irq_start 0 #define ext_irq_end 0 #define ext_irq_count 0 #define ext_irq_cfg_reg1 0 #define ext_irq_cfg_reg2 0 #endif #ifdef CONFIG_BCM63XX_CPU_6348 #define irq_stat_reg PERF_IRQSTAT_6348_REG #define irq_mask_reg PERF_IRQMASK_6348_REG #define irq_bits 32 #define is_ext_irq_cascaded 0 #define ext_irq_start 0 #define ext_irq_end 0 #define ext_irq_count 4 #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6348 #define ext_irq_cfg_reg2 0 #endif #ifdef CONFIG_BCM63XX_CPU_6358 #define irq_stat_reg PERF_IRQSTAT_6358_REG #define irq_mask_reg PERF_IRQMASK_6358_REG #define irq_bits 32 #define is_ext_irq_cascaded 1 #define ext_irq_start (BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE) #define ext_irq_end (BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE) #define ext_irq_count 4 #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6358 #define ext_irq_cfg_reg2 0 #endif #ifdef CONFIG_BCM63XX_CPU_6368 #define irq_stat_reg PERF_IRQSTAT_6368_REG #define irq_mask_reg PERF_IRQMASK_6368_REG #define irq_bits 64 #define is_ext_irq_cascaded 1 #define ext_irq_start (BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE) #define ext_irq_end (BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE) #define ext_irq_count 6 #define ext_irq_cfg_reg1 PERF_EXTIRQ_CFG_REG_6368 #define ext_irq_cfg_reg2 PERF_EXTIRQ_CFG_REG2_6368 #endif #if irq_bits == 32 #define dispatch_internal __dispatch_internal #define internal_irq_mask __internal_irq_mask_32 #define internal_irq_unmask __internal_irq_unmask_32 #else #define dispatch_internal __dispatch_internal_64 #define internal_irq_mask __internal_irq_mask_64 #define internal_irq_unmask __internal_irq_unmask_64 #endif #define irq_stat_addr (bcm63xx_regset_address(RSET_PERF) + irq_stat_reg) #define irq_mask_addr (bcm63xx_regset_address(RSET_PERF) + irq_mask_reg) static inline void bcm63xx_init_irq(void) { } #else /* ! BCMCPU_RUNTIME_DETECT */ static u32 irq_stat_addr, irq_mask_addr; static void (*dispatch_internal)(void); static int is_ext_irq_cascaded; static unsigned int ext_irq_count; static unsigned int ext_irq_start, ext_irq_end; static unsigned int ext_irq_cfg_reg1, ext_irq_cfg_reg2; static void (*internal_irq_mask)(unsigned int irq); static void (*internal_irq_unmask)(unsigned int irq); static void bcm63xx_init_irq(void) { int irq_bits; irq_stat_addr = bcm63xx_regset_address(RSET_PERF); irq_mask_addr = bcm63xx_regset_address(RSET_PERF); switch (bcm63xx_get_cpu_id()) { case BCM6338_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6338_REG; irq_mask_addr += PERF_IRQMASK_6338_REG; irq_bits = 32; break; case BCM6345_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6345_REG; irq_mask_addr += PERF_IRQMASK_6345_REG; irq_bits = 32; break; case BCM6348_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6348_REG; irq_mask_addr += PERF_IRQMASK_6348_REG; irq_bits = 32; ext_irq_count = 4; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6348; break; case BCM6358_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6358_REG; irq_mask_addr += PERF_IRQMASK_6358_REG; irq_bits = 32; ext_irq_count = 4; is_ext_irq_cascaded = 1; ext_irq_start = BCM_6358_EXT_IRQ0 - IRQ_INTERNAL_BASE; ext_irq_end = BCM_6358_EXT_IRQ3 - IRQ_INTERNAL_BASE; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6358; break; case BCM6368_CPU_ID: irq_stat_addr += PERF_IRQSTAT_6368_REG; irq_mask_addr += PERF_IRQMASK_6368_REG; irq_bits = 64; ext_irq_count = 6; is_ext_irq_cascaded = 1; ext_irq_start = BCM_6368_EXT_IRQ0 - IRQ_INTERNAL_BASE; ext_irq_end = BCM_6368_EXT_IRQ5 - IRQ_INTERNAL_BASE; ext_irq_cfg_reg1 = PERF_EXTIRQ_CFG_REG_6368; ext_irq_cfg_reg2 = PERF_EXTIRQ_CFG_REG2_6368; break; default: BUG(); } if (irq_bits == 32) { dispatch_internal = __dispatch_internal; internal_irq_mask = __internal_irq_mask_32; internal_irq_unmask = __internal_irq_unmask_32; } else { dispatch_internal = __dispatch_internal_64; internal_irq_mask = __internal_irq_mask_64; internal_irq_unmask = __internal_irq_unmask_64; } } #endif /* ! BCMCPU_RUNTIME_DETECT */ static inline u32 get_ext_irq_perf_reg(int irq) { if (irq < 4) return ext_irq_cfg_reg1; return ext_irq_cfg_reg2; } static inline void handle_internal(int intbit) { if (is_ext_irq_cascaded && intbit >= ext_irq_start && intbit <= ext_irq_end) do_IRQ(intbit - ext_irq_start + IRQ_EXTERNAL_BASE); else do_IRQ(intbit + IRQ_INTERNAL_BASE); } /* * dispatch internal devices IRQ (uart, enet, watchdog, ...). do not * prioritize any interrupt relatively to another. the static counter * will resume the loop where it ended the last time we left this * function. */ static void __dispatch_internal(void) { u32 pending; static int i; pending = bcm_readl(irq_stat_addr) & bcm_readl(irq_mask_addr); if (!pending) return ; while (1) { int to_call = i; i = (i + 1) & 0x1f; if (pending & (1 << to_call)) { handle_internal(to_call); break; } } } static void __dispatch_internal_64(void) { u64 pending; static int i; pending = bcm_readq(irq_stat_addr) & bcm_readq(irq_mask_addr); if (!pending) return ; while (1) { int to_call = i; i = (i + 1) & 0x3f; if (pending & (1ull << to_call)) { handle_internal(to_call); break; } } } asmlinkage void plat_irq_dispatch(void) { u32 cause; do { cause = read_c0_cause() & read_c0_status() & ST0_IM; if (!cause) break; if (cause & CAUSEF_IP7) do_IRQ(7); if (cause & CAUSEF_IP2) dispatch_internal(); if (!is_ext_irq_cascaded) { if (cause & CAUSEF_IP3) do_IRQ(IRQ_EXT_0); if (cause & CAUSEF_IP4) do_IRQ(IRQ_EXT_1); if (cause & CAUSEF_IP5) do_IRQ(IRQ_EXT_2); if (cause & CAUSEF_IP6) do_IRQ(IRQ_EXT_3); } } while (1); } /* * internal IRQs operations: only mask/unmask on PERF irq mask * register. */ static void __internal_irq_mask_32(unsigned int irq) { u32 mask; mask = bcm_readl(irq_mask_addr); mask &= ~(1 << irq); bcm_writel(mask, irq_mask_addr); } static void __internal_irq_mask_64(unsigned int irq) { u64 mask; mask = bcm_readq(irq_mask_addr); mask &= ~(1ull << irq); bcm_writeq(mask, irq_mask_addr); } static void __internal_irq_unmask_32(unsigned int irq) { u32 mask; mask = bcm_readl(irq_mask_addr); mask |= (1 << irq); bcm_writel(mask, irq_mask_addr); } static void __internal_irq_unmask_64(unsigned int irq) { u64 mask; mask = bcm_readq(irq_mask_addr); mask |= (1ull << irq); bcm_writeq(mask, irq_mask_addr); } static void bcm63xx_internal_irq_mask(struct irq_data *d) { internal_irq_mask(d->irq - IRQ_INTERNAL_BASE); } static void bcm63xx_internal_irq_unmask(struct irq_data *d) { internal_irq_unmask(d->irq - IRQ_INTERNAL_BASE); } /* * external IRQs operations: mask/unmask and clear on PERF external * irq control register. */ static void bcm63xx_external_irq_mask(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) reg &= ~EXTIRQ_CFG_MASK_6348(irq % 4); else reg &= ~EXTIRQ_CFG_MASK(irq % 4); bcm_perf_writel(reg, regaddr); if (is_ext_irq_cascaded) internal_irq_mask(irq + ext_irq_start); } static void bcm63xx_external_irq_unmask(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) reg |= EXTIRQ_CFG_MASK_6348(irq % 4); else reg |= EXTIRQ_CFG_MASK(irq % 4); bcm_perf_writel(reg, regaddr); if (is_ext_irq_cascaded) internal_irq_unmask(irq + ext_irq_start); } static void bcm63xx_external_irq_clear(struct irq_data *d) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); if (BCMCPU_IS_6348()) reg |= EXTIRQ_CFG_CLEAR_6348(irq % 4); else reg |= EXTIRQ_CFG_CLEAR(irq % 4); bcm_perf_writel(reg, regaddr); } static int bcm63xx_external_irq_set_type(struct irq_data *d, unsigned int flow_type) { unsigned int irq = d->irq - IRQ_EXTERNAL_BASE; u32 reg, regaddr; int levelsense, sense, bothedge; flow_type &= IRQ_TYPE_SENSE_MASK; if (flow_type == IRQ_TYPE_NONE) flow_type = IRQ_TYPE_LEVEL_LOW; levelsense = sense = bothedge = 0; switch (flow_type) { case IRQ_TYPE_EDGE_BOTH: bothedge = 1; break; case IRQ_TYPE_EDGE_RISING: sense = 1; break; case IRQ_TYPE_EDGE_FALLING: break; case IRQ_TYPE_LEVEL_HIGH: levelsense = 1; sense = 1; break; case IRQ_TYPE_LEVEL_LOW: levelsense = 1; break; default: printk(KERN_ERR "bogus flow type combination given !\n"); return -EINVAL; } regaddr = get_ext_irq_perf_reg(irq); reg = bcm_perf_readl(regaddr); irq %= 4; if (BCMCPU_IS_6348()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE_6348(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE_6348(irq); if (sense) reg |= EXTIRQ_CFG_SENSE_6348(irq); else reg &= ~EXTIRQ_CFG_SENSE_6348(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE_6348(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE_6348(irq); } if (BCMCPU_IS_6338() || BCMCPU_IS_6358() || BCMCPU_IS_6368()) { if (levelsense) reg |= EXTIRQ_CFG_LEVELSENSE(irq); else reg &= ~EXTIRQ_CFG_LEVELSENSE(irq); if (sense) reg |= EXTIRQ_CFG_SENSE(irq); else reg &= ~EXTIRQ_CFG_SENSE(irq); if (bothedge) reg |= EXTIRQ_CFG_BOTHEDGE(irq); else reg &= ~EXTIRQ_CFG_BOTHEDGE(irq); } bcm_perf_writel(reg, regaddr); irqd_set_trigger_type(d, flow_type); if (flow_type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) __irq_set_handler_locked(d->irq, handle_level_irq); else __irq_set_handler_locked(d->irq, handle_edge_irq); return IRQ_SET_MASK_OK_NOCOPY; } static struct irq_chip bcm63xx_internal_irq_chip = { .name = "bcm63xx_ipic", .irq_mask = bcm63xx_internal_irq_mask, .irq_unmask = bcm63xx_internal_irq_unmask, }; static struct irq_chip bcm63xx_external_irq_chip = { .name = "bcm63xx_epic", .irq_ack = bcm63xx_external_irq_clear, .irq_mask = bcm63xx_external_irq_mask, .irq_unmask = bcm63xx_external_irq_unmask, .irq_set_type = bcm63xx_external_irq_set_type, }; static struct irqaction cpu_ip2_cascade_action = { .handler = no_action, .name = "cascade_ip2", .flags = IRQF_NO_THREAD, }; static struct irqaction cpu_ext_cascade_action = { .handler = no_action, .name = "cascade_extirq", .flags = IRQF_NO_THREAD, }; void __init arch_init_irq(void) { int i; bcm63xx_init_irq(); mips_cpu_irq_init(); for (i = IRQ_INTERNAL_BASE; i < NR_IRQS; ++i) irq_set_chip_and_handler(i, &bcm63xx_internal_irq_chip, handle_level_irq); for (i = IRQ_EXTERNAL_BASE; i < IRQ_EXTERNAL_BASE + ext_irq_count; ++i) irq_set_chip_and_handler(i, &bcm63xx_external_irq_chip, handle_edge_irq); if (!is_ext_irq_cascaded) { for (i = 3; i < 3 + ext_irq_count; ++i) setup_irq(MIPS_CPU_IRQ_BASE + i, &cpu_ext_cascade_action); } setup_irq(MIPS_CPU_IRQ_BASE + 2, &cpu_ip2_cascade_action); }
gpl-2.0
IndieBeto/moggy
drivers/video/backlight/corgi_lcd.c
4808
16857
/* * LCD/Backlight Driver for Sharp Zaurus Handhelds (various models) * * Copyright (c) 2004-2006 Richard Purdie * * Based on Sharp's 2.4 Backlight Driver * * Copyright (c) 2008 Marvell International Ltd. * Converted to SPI device based LCD/Backlight device driver * by Eric Miao <eric.miao@marvell.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/gpio.h> #include <linux/fb.h> #include <linux/lcd.h> #include <linux/spi/spi.h> #include <linux/spi/corgi_lcd.h> #include <linux/slab.h> #include <asm/mach/sharpsl_param.h> #define POWER_IS_ON(pwr) ((pwr) <= FB_BLANK_NORMAL) /* Register Addresses */ #define RESCTL_ADRS 0x00 #define PHACTRL_ADRS 0x01 #define DUTYCTRL_ADRS 0x02 #define POWERREG0_ADRS 0x03 #define POWERREG1_ADRS 0x04 #define GPOR3_ADRS 0x05 #define PICTRL_ADRS 0x06 #define POLCTRL_ADRS 0x07 /* Register Bit Definitions */ #define RESCTL_QVGA 0x01 #define RESCTL_VGA 0x00 #define POWER1_VW_ON 0x01 /* VW Supply FET ON */ #define POWER1_GVSS_ON 0x02 /* GVSS(-8V) Power Supply ON */ #define POWER1_VDD_ON 0x04 /* VDD(8V),SVSS(-4V) Power Supply ON */ #define POWER1_VW_OFF 0x00 /* VW Supply FET OFF */ #define POWER1_GVSS_OFF 0x00 /* GVSS(-8V) Power Supply OFF */ #define POWER1_VDD_OFF 0x00 /* VDD(8V),SVSS(-4V) Power Supply OFF */ #define POWER0_COM_DCLK 0x01 /* COM Voltage DC Bias DAC Serial Data Clock */ #define POWER0_COM_DOUT 0x02 /* COM Voltage DC Bias DAC Serial Data Out */ #define POWER0_DAC_ON 0x04 /* DAC Power Supply ON */ #define POWER0_COM_ON 0x08 /* COM Power Supply ON */ #define POWER0_VCC5_ON 0x10 /* VCC5 Power Supply ON */ #define POWER0_DAC_OFF 0x00 /* DAC Power Supply OFF */ #define POWER0_COM_OFF 0x00 /* COM Power Supply OFF */ #define POWER0_VCC5_OFF 0x00 /* VCC5 Power Supply OFF */ #define PICTRL_INIT_STATE 0x01 #define PICTRL_INIOFF 0x02 #define PICTRL_POWER_DOWN 0x04 #define PICTRL_COM_SIGNAL_OFF 0x08 #define PICTRL_DAC_SIGNAL_OFF 0x10 #define POLCTRL_SYNC_POL_FALL 0x01 #define POLCTRL_EN_POL_FALL 0x02 #define POLCTRL_DATA_POL_FALL 0x04 #define POLCTRL_SYNC_ACT_H 0x08 #define POLCTRL_EN_ACT_L 0x10 #define POLCTRL_SYNC_POL_RISE 0x00 #define POLCTRL_EN_POL_RISE 0x00 #define POLCTRL_DATA_POL_RISE 0x00 #define POLCTRL_SYNC_ACT_L 0x00 #define POLCTRL_EN_ACT_H 0x00 #define PHACTRL_PHASE_MANUAL 0x01 #define DEFAULT_PHAD_QVGA (9) #define DEFAULT_COMADJ (125) struct corgi_lcd { struct spi_device *spi_dev; struct lcd_device *lcd_dev; struct backlight_device *bl_dev; int limit_mask; int intensity; int power; int mode; char buf[2]; int gpio_backlight_on; int gpio_backlight_cont; int gpio_backlight_cont_inverted; void (*kick_battery)(void); }; static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int reg, uint8_t val); static struct corgi_lcd *the_corgi_lcd; static unsigned long corgibl_flags; #define CORGIBL_SUSPENDED 0x01 #define CORGIBL_BATTLOW 0x02 /* * This is only a pseudo I2C interface. We can't use the standard kernel * routines as the interface is write only. We just assume the data is acked... */ static void lcdtg_ssp_i2c_send(struct corgi_lcd *lcd, uint8_t data) { corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, data); udelay(10); } static void lcdtg_i2c_send_bit(struct corgi_lcd *lcd, uint8_t data) { lcdtg_ssp_i2c_send(lcd, data); lcdtg_ssp_i2c_send(lcd, data | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, data); } static void lcdtg_i2c_send_start(struct corgi_lcd *lcd, uint8_t base) { lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK | POWER0_COM_DOUT); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, base); } static void lcdtg_i2c_send_stop(struct corgi_lcd *lcd, uint8_t base) { lcdtg_ssp_i2c_send(lcd, base); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK); lcdtg_ssp_i2c_send(lcd, base | POWER0_COM_DCLK | POWER0_COM_DOUT); } static void lcdtg_i2c_send_byte(struct corgi_lcd *lcd, uint8_t base, uint8_t data) { int i; for (i = 0; i < 8; i++) { if (data & 0x80) lcdtg_i2c_send_bit(lcd, base | POWER0_COM_DOUT); else lcdtg_i2c_send_bit(lcd, base); data <<= 1; } } static void lcdtg_i2c_wait_ack(struct corgi_lcd *lcd, uint8_t base) { lcdtg_i2c_send_bit(lcd, base); } static void lcdtg_set_common_voltage(struct corgi_lcd *lcd, uint8_t base_data, uint8_t data) { /* Set Common Voltage to M62332FP via I2C */ lcdtg_i2c_send_start(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, 0x9c); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, 0x00); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_byte(lcd, base_data, data); lcdtg_i2c_wait_ack(lcd, base_data); lcdtg_i2c_send_stop(lcd, base_data); } static int corgi_ssp_lcdtg_send(struct corgi_lcd *lcd, int adrs, uint8_t data) { struct spi_message msg; struct spi_transfer xfer = { .len = 1, .cs_change = 1, .tx_buf = lcd->buf, }; lcd->buf[0] = ((adrs & 0x07) << 5) | (data & 0x1f); spi_message_init(&msg); spi_message_add_tail(&xfer, &msg); return spi_sync(lcd->spi_dev, &msg); } /* Set Phase Adjust */ static void lcdtg_set_phadadj(struct corgi_lcd *lcd, int mode) { int adj; switch(mode) { case CORGI_LCD_MODE_VGA: /* Setting for VGA */ adj = sharpsl_param.phadadj; adj = (adj < 0) ? PHACTRL_PHASE_MANUAL : PHACTRL_PHASE_MANUAL | ((adj & 0xf) << 1); break; case CORGI_LCD_MODE_QVGA: default: /* Setting for QVGA */ adj = (DEFAULT_PHAD_QVGA << 1) | PHACTRL_PHASE_MANUAL; break; } corgi_ssp_lcdtg_send(lcd, PHACTRL_ADRS, adj); } static void corgi_lcd_power_on(struct corgi_lcd *lcd) { int comadj; /* Initialize Internal Logic & Port */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_POWER_DOWN | PICTRL_INIOFF | PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF | PICTRL_DAC_SIGNAL_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF); /* VDD(+8V), SVSS(-4V) ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON); mdelay(3); /* DAC ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF); /* INIB = H, INI = L */ /* PICTL[0] = H , PICTL[1] = PICTL[2] = PICTL[4] = L */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIT_STATE | PICTRL_COM_SIGNAL_OFF); /* Set Common Voltage */ comadj = sharpsl_param.comadj; if (comadj < 0) comadj = DEFAULT_COMADJ; lcdtg_set_common_voltage(lcd, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF, comadj); /* VCC5 ON, DAC ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON); /* GVSS(-8V) ON, VDD ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON); mdelay(2); /* COM SIGNAL ON (PICTL[3] = L) */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIT_STATE); /* COM ON, DAC ON, VCC5_ON */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_COM_DCLK | POWER0_COM_DOUT | POWER0_DAC_ON | POWER0_COM_ON | POWER0_VCC5_ON); /* VW ON, GVSS ON, VDD ON */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_ON | POWER1_GVSS_ON | POWER1_VDD_ON); /* Signals output enable */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, 0); /* Set Phase Adjust */ lcdtg_set_phadadj(lcd, lcd->mode); /* Initialize for Input Signals from ATI */ corgi_ssp_lcdtg_send(lcd, POLCTRL_ADRS, POLCTRL_SYNC_POL_RISE | POLCTRL_EN_POL_RISE | POLCTRL_DATA_POL_RISE | POLCTRL_SYNC_ACT_L | POLCTRL_EN_ACT_H); udelay(1000); switch (lcd->mode) { case CORGI_LCD_MODE_VGA: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_VGA); break; case CORGI_LCD_MODE_QVGA: default: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_QVGA); break; } } static void corgi_lcd_power_off(struct corgi_lcd *lcd) { /* 60Hz x 2 frame = 16.7msec x 2 = 33.4 msec */ msleep(34); /* (1)VW OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_ON | POWER1_VDD_ON); /* (2)COM OFF */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_COM_SIGNAL_OFF); corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON); /* (3)Set Common Voltage Bias 0V */ lcdtg_set_common_voltage(lcd, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_ON, 0); /* (4)GVSS OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_ON); /* (5)VCC5 OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_ON | POWER0_COM_OFF | POWER0_VCC5_OFF); /* (6)Set PDWN, INIOFF, DACOFF */ corgi_ssp_lcdtg_send(lcd, PICTRL_ADRS, PICTRL_INIOFF | PICTRL_DAC_SIGNAL_OFF | PICTRL_POWER_DOWN | PICTRL_COM_SIGNAL_OFF); /* (7)DAC OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG0_ADRS, POWER0_DAC_OFF | POWER0_COM_OFF | POWER0_VCC5_OFF); /* (8)VDD OFF */ corgi_ssp_lcdtg_send(lcd, POWERREG1_ADRS, POWER1_VW_OFF | POWER1_GVSS_OFF | POWER1_VDD_OFF); } static int corgi_lcd_set_mode(struct lcd_device *ld, struct fb_videomode *m) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); int mode = CORGI_LCD_MODE_QVGA; if (m->xres == 640 || m->xres == 480) mode = CORGI_LCD_MODE_VGA; if (lcd->mode == mode) return 0; lcdtg_set_phadadj(lcd, mode); switch (mode) { case CORGI_LCD_MODE_VGA: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_VGA); break; case CORGI_LCD_MODE_QVGA: default: corgi_ssp_lcdtg_send(lcd, RESCTL_ADRS, RESCTL_QVGA); break; } lcd->mode = mode; return 0; } static int corgi_lcd_set_power(struct lcd_device *ld, int power) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); if (POWER_IS_ON(power) && !POWER_IS_ON(lcd->power)) corgi_lcd_power_on(lcd); if (!POWER_IS_ON(power) && POWER_IS_ON(lcd->power)) corgi_lcd_power_off(lcd); lcd->power = power; return 0; } static int corgi_lcd_get_power(struct lcd_device *ld) { struct corgi_lcd *lcd = dev_get_drvdata(&ld->dev); return lcd->power; } static struct lcd_ops corgi_lcd_ops = { .get_power = corgi_lcd_get_power, .set_power = corgi_lcd_set_power, .set_mode = corgi_lcd_set_mode, }; static int corgi_bl_get_intensity(struct backlight_device *bd) { struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev); return lcd->intensity; } static int corgi_bl_set_intensity(struct corgi_lcd *lcd, int intensity) { int cont; if (intensity > 0x10) intensity += 0x10; corgi_ssp_lcdtg_send(lcd, DUTYCTRL_ADRS, intensity); /* Bit 5 via GPIO_BACKLIGHT_CONT */ cont = !!(intensity & 0x20) ^ lcd->gpio_backlight_cont_inverted; if (gpio_is_valid(lcd->gpio_backlight_cont)) gpio_set_value(lcd->gpio_backlight_cont, cont); if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_set_value(lcd->gpio_backlight_on, intensity); if (lcd->kick_battery) lcd->kick_battery(); lcd->intensity = intensity; return 0; } static int corgi_bl_update_status(struct backlight_device *bd) { struct corgi_lcd *lcd = dev_get_drvdata(&bd->dev); int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (corgibl_flags & CORGIBL_SUSPENDED) intensity = 0; if ((corgibl_flags & CORGIBL_BATTLOW) && intensity > lcd->limit_mask) intensity = lcd->limit_mask; return corgi_bl_set_intensity(lcd, intensity); } void corgi_lcd_limit_intensity(int limit) { if (limit) corgibl_flags |= CORGIBL_BATTLOW; else corgibl_flags &= ~CORGIBL_BATTLOW; backlight_update_status(the_corgi_lcd->bl_dev); } EXPORT_SYMBOL(corgi_lcd_limit_intensity); static const struct backlight_ops corgi_bl_ops = { .get_brightness = corgi_bl_get_intensity, .update_status = corgi_bl_update_status, }; #ifdef CONFIG_PM static int corgi_lcd_suspend(struct spi_device *spi, pm_message_t state) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); corgibl_flags |= CORGIBL_SUSPENDED; corgi_bl_set_intensity(lcd, 0); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); return 0; } static int corgi_lcd_resume(struct spi_device *spi) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); corgibl_flags &= ~CORGIBL_SUSPENDED; corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); backlight_update_status(lcd->bl_dev); return 0; } #else #define corgi_lcd_suspend NULL #define corgi_lcd_resume NULL #endif static int setup_gpio_backlight(struct corgi_lcd *lcd, struct corgi_lcd_platform_data *pdata) { struct spi_device *spi = lcd->spi_dev; int err; lcd->gpio_backlight_on = -1; lcd->gpio_backlight_cont = -1; if (gpio_is_valid(pdata->gpio_backlight_on)) { err = gpio_request(pdata->gpio_backlight_on, "BL_ON"); if (err) { dev_err(&spi->dev, "failed to request GPIO%d for " "backlight_on\n", pdata->gpio_backlight_on); return err; } lcd->gpio_backlight_on = pdata->gpio_backlight_on; gpio_direction_output(lcd->gpio_backlight_on, 0); } if (gpio_is_valid(pdata->gpio_backlight_cont)) { err = gpio_request(pdata->gpio_backlight_cont, "BL_CONT"); if (err) { dev_err(&spi->dev, "failed to request GPIO%d for " "backlight_cont\n", pdata->gpio_backlight_cont); goto err_free_backlight_on; } lcd->gpio_backlight_cont = pdata->gpio_backlight_cont; /* spitz and akita use both GPIOs for backlight, and * have inverted polarity of GPIO_BACKLIGHT_CONT */ if (gpio_is_valid(lcd->gpio_backlight_on)) { lcd->gpio_backlight_cont_inverted = 1; gpio_direction_output(lcd->gpio_backlight_cont, 1); } else { lcd->gpio_backlight_cont_inverted = 0; gpio_direction_output(lcd->gpio_backlight_cont, 0); } } return 0; err_free_backlight_on: if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_free(lcd->gpio_backlight_on); return err; } static int __devinit corgi_lcd_probe(struct spi_device *spi) { struct backlight_properties props; struct corgi_lcd_platform_data *pdata = spi->dev.platform_data; struct corgi_lcd *lcd; int ret = 0; if (pdata == NULL) { dev_err(&spi->dev, "platform data not available\n"); return -EINVAL; } lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL); if (!lcd) { dev_err(&spi->dev, "failed to allocate memory\n"); return -ENOMEM; } lcd->spi_dev = spi; lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev, lcd, &corgi_lcd_ops); if (IS_ERR(lcd->lcd_dev)) { ret = PTR_ERR(lcd->lcd_dev); goto err_free_lcd; } lcd->power = FB_BLANK_POWERDOWN; lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->max_intensity; lcd->bl_dev = backlight_device_register("corgi_bl", &spi->dev, lcd, &corgi_bl_ops, &props); if (IS_ERR(lcd->bl_dev)) { ret = PTR_ERR(lcd->bl_dev); goto err_unregister_lcd; } lcd->bl_dev->props.brightness = pdata->default_intensity; lcd->bl_dev->props.power = FB_BLANK_UNBLANK; ret = setup_gpio_backlight(lcd, pdata); if (ret) goto err_unregister_bl; lcd->kick_battery = pdata->kick_battery; dev_set_drvdata(&spi->dev, lcd); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_UNBLANK); backlight_update_status(lcd->bl_dev); lcd->limit_mask = pdata->limit_mask; the_corgi_lcd = lcd; return 0; err_unregister_bl: backlight_device_unregister(lcd->bl_dev); err_unregister_lcd: lcd_device_unregister(lcd->lcd_dev); err_free_lcd: kfree(lcd); return ret; } static int __devexit corgi_lcd_remove(struct spi_device *spi) { struct corgi_lcd *lcd = dev_get_drvdata(&spi->dev); lcd->bl_dev->props.power = FB_BLANK_UNBLANK; lcd->bl_dev->props.brightness = 0; backlight_update_status(lcd->bl_dev); backlight_device_unregister(lcd->bl_dev); if (gpio_is_valid(lcd->gpio_backlight_on)) gpio_free(lcd->gpio_backlight_on); if (gpio_is_valid(lcd->gpio_backlight_cont)) gpio_free(lcd->gpio_backlight_cont); corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); lcd_device_unregister(lcd->lcd_dev); kfree(lcd); return 0; } static struct spi_driver corgi_lcd_driver = { .driver = { .name = "corgi-lcd", .owner = THIS_MODULE, }, .probe = corgi_lcd_probe, .remove = __devexit_p(corgi_lcd_remove), .suspend = corgi_lcd_suspend, .resume = corgi_lcd_resume, }; module_spi_driver(corgi_lcd_driver); MODULE_DESCRIPTION("LCD and backlight driver for SHARP C7x0/Cxx00"); MODULE_AUTHOR("Eric Miao <eric.miao@marvell.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:corgi-lcd");
gpl-2.0
tako0910/m7GPE
drivers/pci/pcie/portdrv_core.c
4808
15644
/* * File: portdrv_core.c * Purpose: PCI Express Port Bus Driver's Core Functions * * Copyright (C) 2004 Intel * Copyright (C) Tom Long Nguyen (tom.l.nguyen@intel.com) */ #include <linux/module.h> #include <linux/pci.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/pcieport_if.h> #include <linux/aer.h> #include "../pci.h" #include "portdrv.h" bool pciehp_msi_disabled; static int __init pciehp_setup(char *str) { if (!strncmp(str, "nomsi", 5)) pciehp_msi_disabled = true; return 1; } __setup("pcie_hp=", pciehp_setup); /** * release_pcie_device - free PCI Express port service device structure * @dev: Port service device to release * * Invoked automatically when device is being removed in response to * device_unregister(dev). Release all resources being claimed. */ static void release_pcie_device(struct device *dev) { kfree(to_pcie_device(dev)); } /** * pcie_port_msix_add_entry - add entry to given array of MSI-X entries * @entries: Array of MSI-X entries * @new_entry: Index of the entry to add to the array * @nr_entries: Number of entries aleady in the array * * Return value: Position of the added entry in the array */ static int pcie_port_msix_add_entry( struct msix_entry *entries, int new_entry, int nr_entries) { int j; for (j = 0; j < nr_entries; j++) if (entries[j].entry == new_entry) return j; entries[j].entry = new_entry; return j; } /** * pcie_port_enable_msix - try to set up MSI-X as interrupt mode for given port * @dev: PCI Express port to handle * @vectors: Array of interrupt vectors to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: 0 on success, error code on failure */ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) { struct msix_entry *msix_entries; int idx[PCIE_PORT_DEVICE_MAXSERVICES]; int nr_entries, status, pos, i, nvec; u16 reg16; u32 reg32; nr_entries = pci_msix_table_size(dev); if (!nr_entries) return -EINVAL; if (nr_entries > PCIE_PORT_MAX_MSIX_ENTRIES) nr_entries = PCIE_PORT_MAX_MSIX_ENTRIES; msix_entries = kzalloc(sizeof(*msix_entries) * nr_entries, GFP_KERNEL); if (!msix_entries) return -ENOMEM; /* * Allocate as many entries as the port wants, so that we can check * which of them will be useful. Moreover, if nr_entries is correctly * equal to the number of entries this port actually uses, we'll happily * go through without any tricks. */ for (i = 0; i < nr_entries; i++) msix_entries[i].entry = i; status = pci_enable_msix(dev, msix_entries, nr_entries); if (status) goto Exit; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) idx[i] = -1; status = -EIO; nvec = 0; if (mask & (PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP)) { int entry; /* * The code below follows the PCI Express Base Specification 2.0 * stating in Section 6.1.6 that "PME and Hot-Plug Event * interrupts (when both are implemented) always share the same * MSI or MSI-X vector, as indicated by the Interrupt Message * Number field in the PCI Express Capabilities register", where * according to Section 7.8.2 of the specification "For MSI-X, * the value in this field indicates which MSI-X Table entry is * used to generate the interrupt message." */ pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); entry = (reg16 & PCI_EXP_FLAGS_IRQ) >> 9; if (entry >= nr_entries) goto Error; i = pcie_port_msix_add_entry(msix_entries, entry, nvec); if (i == nvec) nvec++; idx[PCIE_PORT_SERVICE_PME_SHIFT] = i; idx[PCIE_PORT_SERVICE_HP_SHIFT] = i; } if (mask & PCIE_PORT_SERVICE_AER) { int entry; /* * The code below follows Section 7.10.10 of the PCI Express * Base Specification 2.0 stating that bits 31-27 of the Root * Error Status Register contain a value indicating which of the * MSI/MSI-X vectors assigned to the port is going to be used * for AER, where "For MSI-X, the value in this register * indicates which MSI-X Table entry is used to generate the * interrupt message." */ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR); pci_read_config_dword(dev, pos + PCI_ERR_ROOT_STATUS, &reg32); entry = reg32 >> 27; if (entry >= nr_entries) goto Error; i = pcie_port_msix_add_entry(msix_entries, entry, nvec); if (i == nvec) nvec++; idx[PCIE_PORT_SERVICE_AER_SHIFT] = i; } /* * If nvec is equal to the allocated number of entries, we can just use * what we have. Otherwise, the port has some extra entries not for the * services we know and we need to work around that. */ if (nvec == nr_entries) { status = 0; } else { /* Drop the temporary MSI-X setup */ pci_disable_msix(dev); /* Now allocate the MSI-X vectors for real */ status = pci_enable_msix(dev, msix_entries, nvec); if (status) goto Exit; } for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) vectors[i] = idx[i] >= 0 ? msix_entries[idx[i]].vector : -1; Exit: kfree(msix_entries); return status; Error: pci_disable_msix(dev); goto Exit; } /** * init_service_irqs - initialize irqs for PCI Express port services * @dev: PCI Express port to handle * @irqs: Array of irqs to populate * @mask: Bitmask of port capabilities returned by get_port_device_capability() * * Return value: Interrupt mode associated with the port */ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) { int i, irq = -1; /* We have to use INTx if MSI cannot be used for PCIe PME or pciehp. */ if (((mask & PCIE_PORT_SERVICE_PME) && pcie_pme_no_msi()) || ((mask & PCIE_PORT_SERVICE_HP) && pciehp_no_msi())) { if (dev->pin) irq = dev->irq; goto no_msi; } /* Try to use MSI-X if supported */ if (!pcie_port_enable_msix(dev, irqs, mask)) return 0; /* We're not going to use MSI-X, so try MSI and fall back to INTx */ if (!pci_enable_msi(dev) || dev->pin) irq = dev->irq; no_msi: for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) irqs[i] = irq; irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; if (irq < 0) return -ENODEV; return 0; } static void cleanup_service_irqs(struct pci_dev *dev) { if (dev->msix_enabled) pci_disable_msix(dev); else if (dev->msi_enabled) pci_disable_msi(dev); } /** * get_port_device_capability - discover capabilities of a PCI Express port * @dev: PCI Express port to examine * * The capabilities are read from the port's PCI Express configuration registers * as described in PCI Express Base Specification 1.0a sections 7.8.2, 7.8.9 and * 7.9 - 7.11. * * Return value: Bitmask of discovered port capabilities */ static int get_port_device_capability(struct pci_dev *dev) { int services = 0, pos; u16 reg16; u32 reg32; int cap_mask; int err; if (pcie_ports_disabled) return 0; err = pcie_port_platform_notify(dev, &cap_mask); if (!pcie_ports_auto) { cap_mask = PCIE_PORT_SERVICE_PME | PCIE_PORT_SERVICE_HP | PCIE_PORT_SERVICE_VC; if (pci_aer_available()) cap_mask |= PCIE_PORT_SERVICE_AER; } else if (err) { return 0; } pos = pci_pcie_cap(dev); pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &reg16); /* Hot-Plug Capable */ if ((cap_mask & PCIE_PORT_SERVICE_HP) && (reg16 & PCI_EXP_FLAGS_SLOT)) { pci_read_config_dword(dev, pos + PCI_EXP_SLTCAP, &reg32); if (reg32 & PCI_EXP_SLTCAP_HPC) { services |= PCIE_PORT_SERVICE_HP; /* * Disable hot-plug interrupts in case they have been * enabled by the BIOS and the hot-plug service driver * is not loaded. */ pos += PCI_EXP_SLTCTL; pci_read_config_word(dev, pos, &reg16); reg16 &= ~(PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_HPIE); pci_write_config_word(dev, pos, reg16); } } /* AER capable */ if ((cap_mask & PCIE_PORT_SERVICE_AER) && pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR)) { services |= PCIE_PORT_SERVICE_AER; /* * Disable AER on this port in case it's been enabled by the * BIOS (the AER service driver will enable it when necessary). */ pci_disable_pcie_error_reporting(dev); } /* VC support */ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_VC)) services |= PCIE_PORT_SERVICE_VC; /* Root ports are capable of generating PME too */ if ((cap_mask & PCIE_PORT_SERVICE_PME) && dev->pcie_type == PCI_EXP_TYPE_ROOT_PORT) { services |= PCIE_PORT_SERVICE_PME; /* * Disable PME interrupt on this port in case it's been enabled * by the BIOS (the PME service driver will enable it when * necessary). */ pcie_pme_interrupt_enable(dev, false); } return services; } /** * pcie_device_init - allocate and initialize PCI Express port service device * @pdev: PCI Express port to associate the service device with * @service: Type of service to associate with the service device * @irq: Interrupt vector to associate with the service device */ static int pcie_device_init(struct pci_dev *pdev, int service, int irq) { int retval; struct pcie_device *pcie; struct device *device; pcie = kzalloc(sizeof(*pcie), GFP_KERNEL); if (!pcie) return -ENOMEM; pcie->port = pdev; pcie->irq = irq; pcie->service = service; /* Initialize generic device interface */ device = &pcie->device; device->bus = &pcie_port_bus_type; device->release = release_pcie_device; /* callback to free pcie dev */ dev_set_name(device, "%s:pcie%02x", pci_name(pdev), get_descriptor_id(pdev->pcie_type, service)); device->parent = &pdev->dev; device_enable_async_suspend(device); retval = device_register(device); if (retval) kfree(pcie); else get_device(device); return retval; } /** * pcie_port_device_register - register PCI Express port * @dev: PCI Express port to register * * Allocate the port extension structure and register services associated with * the port. */ int pcie_port_device_register(struct pci_dev *dev) { int status, capabilities, i, nr_service; int irqs[PCIE_PORT_DEVICE_MAXSERVICES]; /* Enable PCI Express port device */ status = pci_enable_device(dev); if (status) return status; /* Get and check PCI Express port services */ capabilities = get_port_device_capability(dev); if (!capabilities) return 0; pci_set_master(dev); /* * Initialize service irqs. Don't use service devices that * require interrupts if there is no way to generate them. */ status = init_service_irqs(dev, irqs, capabilities); if (status) { capabilities &= PCIE_PORT_SERVICE_VC; if (!capabilities) goto error_disable; } /* Allocate child services if any */ status = -ENODEV; nr_service = 0; for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { int service = 1 << i; if (!(capabilities & service)) continue; if (!pcie_device_init(dev, service, irqs[i])) nr_service++; } if (!nr_service) goto error_cleanup_irqs; return 0; error_cleanup_irqs: cleanup_service_irqs(dev); error_disable: pci_disable_device(dev); return status; } #ifdef CONFIG_PM static int suspend_iter(struct device *dev, void *data) { struct pcie_port_service_driver *service_driver; if ((dev->bus == &pcie_port_bus_type) && dev->driver) { service_driver = to_service_driver(dev->driver); if (service_driver->suspend) service_driver->suspend(to_pcie_device(dev)); } return 0; } /** * pcie_port_device_suspend - suspend port services associated with a PCIe port * @dev: PCI Express port to handle */ int pcie_port_device_suspend(struct device *dev) { return device_for_each_child(dev, NULL, suspend_iter); } static int resume_iter(struct device *dev, void *data) { struct pcie_port_service_driver *service_driver; if ((dev->bus == &pcie_port_bus_type) && (dev->driver)) { service_driver = to_service_driver(dev->driver); if (service_driver->resume) service_driver->resume(to_pcie_device(dev)); } return 0; } /** * pcie_port_device_suspend - resume port services associated with a PCIe port * @dev: PCI Express port to handle */ int pcie_port_device_resume(struct device *dev) { return device_for_each_child(dev, NULL, resume_iter); } #endif /* PM */ static int remove_iter(struct device *dev, void *data) { if (dev->bus == &pcie_port_bus_type) { put_device(dev); device_unregister(dev); } return 0; } /** * pcie_port_device_remove - unregister PCI Express port service devices * @dev: PCI Express port the service devices to unregister are associated with * * Remove PCI Express port service devices associated with given port and * disable MSI-X or MSI for the port. */ void pcie_port_device_remove(struct pci_dev *dev) { device_for_each_child(&dev->dev, NULL, remove_iter); cleanup_service_irqs(dev); pci_disable_device(dev); } /** * pcie_port_probe_service - probe driver for given PCI Express port service * @dev: PCI Express port service device to probe against * * If PCI Express port service driver is registered with * pcie_port_service_register(), this function will be called by the driver core * whenever match is found between the driver and a port service device. */ static int pcie_port_probe_service(struct device *dev) { struct pcie_device *pciedev; struct pcie_port_service_driver *driver; int status; if (!dev || !dev->driver) return -ENODEV; driver = to_service_driver(dev->driver); if (!driver || !driver->probe) return -ENODEV; pciedev = to_pcie_device(dev); status = driver->probe(pciedev); if (!status) { dev_printk(KERN_DEBUG, dev, "service driver %s loaded\n", driver->name); get_device(dev); } return status; } /** * pcie_port_remove_service - detach driver from given PCI Express port service * @dev: PCI Express port service device to handle * * If PCI Express port service driver is registered with * pcie_port_service_register(), this function will be called by the driver core * when device_unregister() is called for the port service device associated * with the driver. */ static int pcie_port_remove_service(struct device *dev) { struct pcie_device *pciedev; struct pcie_port_service_driver *driver; if (!dev || !dev->driver) return 0; pciedev = to_pcie_device(dev); driver = to_service_driver(dev->driver); if (driver && driver->remove) { dev_printk(KERN_DEBUG, dev, "unloading service driver %s\n", driver->name); driver->remove(pciedev); put_device(dev); } return 0; } /** * pcie_port_shutdown_service - shut down given PCI Express port service * @dev: PCI Express port service device to handle * * If PCI Express port service driver is registered with * pcie_port_service_register(), this function will be called by the driver core * when device_shutdown() is called for the port service device associated * with the driver. */ static void pcie_port_shutdown_service(struct device *dev) {} /** * pcie_port_service_register - register PCI Express port service driver * @new: PCI Express port service driver to register */ int pcie_port_service_register(struct pcie_port_service_driver *new) { if (pcie_ports_disabled) return -ENODEV; new->driver.name = (char *)new->name; new->driver.bus = &pcie_port_bus_type; new->driver.probe = pcie_port_probe_service; new->driver.remove = pcie_port_remove_service; new->driver.shutdown = pcie_port_shutdown_service; return driver_register(&new->driver); } EXPORT_SYMBOL(pcie_port_service_register); /** * pcie_port_service_unregister - unregister PCI Express port service driver * @drv: PCI Express port service driver to unregister */ void pcie_port_service_unregister(struct pcie_port_service_driver *drv) { driver_unregister(&drv->driver); } EXPORT_SYMBOL(pcie_port_service_unregister);
gpl-2.0
CyanogenMod/android_kernel_samsung_lt03wifi
drivers/power/ds2781_battery.c
4808
21870
/* * 1-wire client/driver for the Maxim/Dallas DS2781 Stand-Alone Fuel Gauge IC * * Author: Renata Sayakhova <renata@oktetlabs.ru> * * Based on ds2780_battery drivers * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/slab.h> #include <linux/param.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/power_supply.h> #include <linux/idr.h> #include "../w1/w1.h" #include "../w1/slaves/w1_ds2781.h" /* Current unit measurement in uA for a 1 milli-ohm sense resistor */ #define DS2781_CURRENT_UNITS 1563 /* Charge unit measurement in uAh for a 1 milli-ohm sense resistor */ #define DS2781_CHARGE_UNITS 6250 /* Number of bytes in user EEPROM space */ #define DS2781_USER_EEPROM_SIZE (DS2781_EEPROM_BLOCK0_END - \ DS2781_EEPROM_BLOCK0_START + 1) /* Number of bytes in parameter EEPROM space */ #define DS2781_PARAM_EEPROM_SIZE (DS2781_EEPROM_BLOCK1_END - \ DS2781_EEPROM_BLOCK1_START + 1) struct ds2781_device_info { struct device *dev; struct power_supply bat; struct device *w1_dev; struct task_struct *mutex_holder; }; enum current_types { CURRENT_NOW, CURRENT_AVG, }; static const char model[] = "DS2781"; static const char manufacturer[] = "Maxim/Dallas"; static inline struct ds2781_device_info * to_ds2781_device_info(struct power_supply *psy) { return container_of(psy, struct ds2781_device_info, bat); } static inline struct power_supply *to_power_supply(struct device *dev) { return dev_get_drvdata(dev); } static inline int ds2781_battery_io(struct ds2781_device_info *dev_info, char *buf, int addr, size_t count, int io) { if (dev_info->mutex_holder == current) return w1_ds2781_io_nolock(dev_info->w1_dev, buf, addr, count, io); else return w1_ds2781_io(dev_info->w1_dev, buf, addr, count, io); } int w1_ds2781_read(struct ds2781_device_info *dev_info, char *buf, int addr, size_t count) { return ds2781_battery_io(dev_info, buf, addr, count, 0); } static inline int ds2781_read8(struct ds2781_device_info *dev_info, u8 *val, int addr) { return ds2781_battery_io(dev_info, val, addr, sizeof(u8), 0); } static int ds2781_read16(struct ds2781_device_info *dev_info, s16 *val, int addr) { int ret; u8 raw[2]; ret = ds2781_battery_io(dev_info, raw, addr, sizeof(raw), 0); if (ret < 0) return ret; *val = (raw[0] << 8) | raw[1]; return 0; } static inline int ds2781_read_block(struct ds2781_device_info *dev_info, u8 *val, int addr, size_t count) { return ds2781_battery_io(dev_info, val, addr, count, 0); } static inline int ds2781_write(struct ds2781_device_info *dev_info, u8 *val, int addr, size_t count) { return ds2781_battery_io(dev_info, val, addr, count, 1); } static inline int ds2781_store_eeprom(struct device *dev, int addr) { return w1_ds2781_eeprom_cmd(dev, addr, W1_DS2781_COPY_DATA); } static inline int ds2781_recall_eeprom(struct device *dev, int addr) { return w1_ds2781_eeprom_cmd(dev, addr, W1_DS2781_RECALL_DATA); } static int ds2781_save_eeprom(struct ds2781_device_info *dev_info, int reg) { int ret; ret = ds2781_store_eeprom(dev_info->w1_dev, reg); if (ret < 0) return ret; ret = ds2781_recall_eeprom(dev_info->w1_dev, reg); if (ret < 0) return ret; return 0; } /* Set sense resistor value in mhos */ static int ds2781_set_sense_register(struct ds2781_device_info *dev_info, u8 conductance) { int ret; ret = ds2781_write(dev_info, &conductance, DS2781_RSNSP, sizeof(u8)); if (ret < 0) return ret; return ds2781_save_eeprom(dev_info, DS2781_RSNSP); } /* Get RSGAIN value from 0 to 1.999 in steps of 0.001 */ static int ds2781_get_rsgain_register(struct ds2781_device_info *dev_info, u16 *rsgain) { return ds2781_read16(dev_info, rsgain, DS2781_RSGAIN_MSB); } /* Set RSGAIN value from 0 to 1.999 in steps of 0.001 */ static int ds2781_set_rsgain_register(struct ds2781_device_info *dev_info, u16 rsgain) { int ret; u8 raw[] = {rsgain >> 8, rsgain & 0xFF}; ret = ds2781_write(dev_info, raw, DS2781_RSGAIN_MSB, sizeof(raw)); if (ret < 0) return ret; return ds2781_save_eeprom(dev_info, DS2781_RSGAIN_MSB); } static int ds2781_get_voltage(struct ds2781_device_info *dev_info, int *voltage_uV) { int ret; char val[2]; int voltage_raw; ret = w1_ds2781_read(dev_info, val, DS2781_VOLT_MSB, 2 * sizeof(u8)); if (ret < 0) return ret; /* * The voltage value is located in 10 bits across the voltage MSB * and LSB registers in two's compliment form * Sign bit of the voltage value is in bit 7 of the voltage MSB register * Bits 9 - 3 of the voltage value are in bits 6 - 0 of the * voltage MSB register * Bits 2 - 0 of the voltage value are in bits 7 - 5 of the * voltage LSB register */ voltage_raw = (val[0] << 3) | (val[1] >> 5); /* DS2781 reports voltage in units of 9.76mV, but the battery class * reports in units of uV, so convert by multiplying by 9760. */ *voltage_uV = voltage_raw * 9760; return 0; } static int ds2781_get_temperature(struct ds2781_device_info *dev_info, int *temp) { int ret; char val[2]; int temp_raw; ret = w1_ds2781_read(dev_info, val, DS2781_TEMP_MSB, 2 * sizeof(u8)); if (ret < 0) return ret; /* * The temperature value is located in 10 bits across the temperature * MSB and LSB registers in two's compliment form * Sign bit of the temperature value is in bit 7 of the temperature * MSB register * Bits 9 - 3 of the temperature value are in bits 6 - 0 of the * temperature MSB register * Bits 2 - 0 of the temperature value are in bits 7 - 5 of the * temperature LSB register */ temp_raw = ((val[0]) << 3) | (val[1] >> 5); *temp = temp_raw + (temp_raw / 4); return 0; } static int ds2781_get_current(struct ds2781_device_info *dev_info, enum current_types type, int *current_uA) { int ret, sense_res; s16 current_raw; u8 sense_res_raw, reg_msb; /* * The units of measurement for current are dependent on the value of * the sense resistor. */ ret = ds2781_read8(dev_info, &sense_res_raw, DS2781_RSNSP); if (ret < 0) return ret; if (sense_res_raw == 0) { dev_err(dev_info->dev, "sense resistor value is 0\n"); return -EINVAL; } sense_res = 1000 / sense_res_raw; if (type == CURRENT_NOW) reg_msb = DS2781_CURRENT_MSB; else if (type == CURRENT_AVG) reg_msb = DS2781_IAVG_MSB; else return -EINVAL; /* * The current value is located in 16 bits across the current MSB * and LSB registers in two's compliment form * Sign bit of the current value is in bit 7 of the current MSB register * Bits 14 - 8 of the current value are in bits 6 - 0 of the current * MSB register * Bits 7 - 0 of the current value are in bits 7 - 0 of the current * LSB register */ ret = ds2781_read16(dev_info, &current_raw, reg_msb); if (ret < 0) return ret; *current_uA = current_raw * (DS2781_CURRENT_UNITS / sense_res); return 0; } static int ds2781_get_accumulated_current(struct ds2781_device_info *dev_info, int *accumulated_current) { int ret, sense_res; s16 current_raw; u8 sense_res_raw; /* * The units of measurement for accumulated current are dependent on * the value of the sense resistor. */ ret = ds2781_read8(dev_info, &sense_res_raw, DS2781_RSNSP); if (ret < 0) return ret; if (sense_res_raw == 0) { dev_err(dev_info->dev, "sense resistor value is 0\n"); return -EINVAL; } sense_res = 1000 / sense_res_raw; /* * The ACR value is located in 16 bits across the ACR MSB and * LSB registers * Bits 15 - 8 of the ACR value are in bits 7 - 0 of the ACR * MSB register * Bits 7 - 0 of the ACR value are in bits 7 - 0 of the ACR * LSB register */ ret = ds2781_read16(dev_info, &current_raw, DS2781_ACR_MSB); if (ret < 0) return ret; *accumulated_current = current_raw * (DS2781_CHARGE_UNITS / sense_res); return 0; } static int ds2781_get_capacity(struct ds2781_device_info *dev_info, int *capacity) { int ret; u8 raw; ret = ds2781_read8(dev_info, &raw, DS2781_RARC); if (ret < 0) return ret; *capacity = raw; return 0; } static int ds2781_get_status(struct ds2781_device_info *dev_info, int *status) { int ret, current_uA, capacity; ret = ds2781_get_current(dev_info, CURRENT_NOW, &current_uA); if (ret < 0) return ret; ret = ds2781_get_capacity(dev_info, &capacity); if (ret < 0) return ret; if (power_supply_am_i_supplied(&dev_info->bat)) { if (capacity == 100) *status = POWER_SUPPLY_STATUS_FULL; else if (current_uA > 50000) *status = POWER_SUPPLY_STATUS_CHARGING; else *status = POWER_SUPPLY_STATUS_NOT_CHARGING; } else { *status = POWER_SUPPLY_STATUS_DISCHARGING; } return 0; } static int ds2781_get_charge_now(struct ds2781_device_info *dev_info, int *charge_now) { int ret; u16 charge_raw; /* * The RAAC value is located in 16 bits across the RAAC MSB and * LSB registers * Bits 15 - 8 of the RAAC value are in bits 7 - 0 of the RAAC * MSB register * Bits 7 - 0 of the RAAC value are in bits 7 - 0 of the RAAC * LSB register */ ret = ds2781_read16(dev_info, &charge_raw, DS2781_RAAC_MSB); if (ret < 0) return ret; *charge_now = charge_raw * 1600; return 0; } static int ds2781_get_control_register(struct ds2781_device_info *dev_info, u8 *control_reg) { return ds2781_read8(dev_info, control_reg, DS2781_CONTROL); } static int ds2781_set_control_register(struct ds2781_device_info *dev_info, u8 control_reg) { int ret; ret = ds2781_write(dev_info, &control_reg, DS2781_CONTROL, sizeof(u8)); if (ret < 0) return ret; return ds2781_save_eeprom(dev_info, DS2781_CONTROL); } static int ds2781_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { int ret = 0; struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); switch (psp) { case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = ds2781_get_voltage(dev_info, &val->intval); break; case POWER_SUPPLY_PROP_TEMP: ret = ds2781_get_temperature(dev_info, &val->intval); break; case POWER_SUPPLY_PROP_MODEL_NAME: val->strval = model; break; case POWER_SUPPLY_PROP_MANUFACTURER: val->strval = manufacturer; break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = ds2781_get_current(dev_info, CURRENT_NOW, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_AVG: ret = ds2781_get_current(dev_info, CURRENT_AVG, &val->intval); break; case POWER_SUPPLY_PROP_STATUS: ret = ds2781_get_status(dev_info, &val->intval); break; case POWER_SUPPLY_PROP_CAPACITY: ret = ds2781_get_capacity(dev_info, &val->intval); break; case POWER_SUPPLY_PROP_CHARGE_COUNTER: ret = ds2781_get_accumulated_current(dev_info, &val->intval); break; case POWER_SUPPLY_PROP_CHARGE_NOW: ret = ds2781_get_charge_now(dev_info, &val->intval); break; default: ret = -EINVAL; } return ret; } static enum power_supply_property ds2781_battery_props[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_TEMP, POWER_SUPPLY_PROP_MODEL_NAME, POWER_SUPPLY_PROP_MANUFACTURER, POWER_SUPPLY_PROP_CURRENT_NOW, POWER_SUPPLY_PROP_CURRENT_AVG, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_CHARGE_COUNTER, POWER_SUPPLY_PROP_CHARGE_NOW, }; static ssize_t ds2781_get_pmod_enabled(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u8 control_reg; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); /* Get power mode */ ret = ds2781_get_control_register(dev_info, &control_reg); if (ret < 0) return ret; return sprintf(buf, "%d\n", !!(control_reg & DS2781_CONTROL_PMOD)); } static ssize_t ds2781_set_pmod_enabled(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; u8 control_reg, new_setting; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); /* Set power mode */ ret = ds2781_get_control_register(dev_info, &control_reg); if (ret < 0) return ret; ret = kstrtou8(buf, 0, &new_setting); if (ret < 0) return ret; if ((new_setting != 0) && (new_setting != 1)) { dev_err(dev_info->dev, "Invalid pmod setting (0 or 1)\n"); return -EINVAL; } if (new_setting) control_reg |= DS2781_CONTROL_PMOD; else control_reg &= ~DS2781_CONTROL_PMOD; ret = ds2781_set_control_register(dev_info, control_reg); if (ret < 0) return ret; return count; } static ssize_t ds2781_get_sense_resistor_value(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u8 sense_resistor; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = ds2781_read8(dev_info, &sense_resistor, DS2781_RSNSP); if (ret < 0) return ret; ret = sprintf(buf, "%d\n", sense_resistor); return ret; } static ssize_t ds2781_set_sense_resistor_value(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; u8 new_setting; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = kstrtou8(buf, 0, &new_setting); if (ret < 0) return ret; ret = ds2781_set_sense_register(dev_info, new_setting); if (ret < 0) return ret; return count; } static ssize_t ds2781_get_rsgain_setting(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u16 rsgain; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = ds2781_get_rsgain_register(dev_info, &rsgain); if (ret < 0) return ret; return sprintf(buf, "%d\n", rsgain); } static ssize_t ds2781_set_rsgain_setting(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; u16 new_setting; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = kstrtou16(buf, 0, &new_setting); if (ret < 0) return ret; /* Gain can only be from 0 to 1.999 in steps of .001 */ if (new_setting > 1999) { dev_err(dev_info->dev, "Invalid rsgain setting (0 - 1999)\n"); return -EINVAL; } ret = ds2781_set_rsgain_register(dev_info, new_setting); if (ret < 0) return ret; return count; } static ssize_t ds2781_get_pio_pin(struct device *dev, struct device_attribute *attr, char *buf) { int ret; u8 sfr; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = ds2781_read8(dev_info, &sfr, DS2781_SFR); if (ret < 0) return ret; ret = sprintf(buf, "%d\n", sfr & DS2781_SFR_PIOSC); return ret; } static ssize_t ds2781_set_pio_pin(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int ret; u8 new_setting; struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); ret = kstrtou8(buf, 0, &new_setting); if (ret < 0) return ret; if ((new_setting != 0) && (new_setting != 1)) { dev_err(dev_info->dev, "Invalid pio_pin setting (0 or 1)\n"); return -EINVAL; } ret = ds2781_write(dev_info, &new_setting, DS2781_SFR, sizeof(u8)); if (ret < 0) return ret; return count; } static ssize_t ds2781_read_param_eeprom_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); count = min_t(loff_t, count, DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1 - off); return ds2781_read_block(dev_info, buf, DS2781_EEPROM_BLOCK1_START + off, count); } static ssize_t ds2781_write_param_eeprom_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); int ret; count = min_t(loff_t, count, DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1 - off); ret = ds2781_write(dev_info, buf, DS2781_EEPROM_BLOCK1_START + off, count); if (ret < 0) return ret; ret = ds2781_save_eeprom(dev_info, DS2781_EEPROM_BLOCK1_START); if (ret < 0) return ret; return count; } static struct bin_attribute ds2781_param_eeprom_bin_attr = { .attr = { .name = "param_eeprom", .mode = S_IRUGO | S_IWUSR, }, .size = DS2781_EEPROM_BLOCK1_END - DS2781_EEPROM_BLOCK1_START + 1, .read = ds2781_read_param_eeprom_bin, .write = ds2781_write_param_eeprom_bin, }; static ssize_t ds2781_read_user_eeprom_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); count = min_t(loff_t, count, DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1 - off); return ds2781_read_block(dev_info, buf, DS2781_EEPROM_BLOCK0_START + off, count); } static ssize_t ds2781_write_user_eeprom_bin(struct file *filp, struct kobject *kobj, struct bin_attribute *bin_attr, char *buf, loff_t off, size_t count) { struct device *dev = container_of(kobj, struct device, kobj); struct power_supply *psy = to_power_supply(dev); struct ds2781_device_info *dev_info = to_ds2781_device_info(psy); int ret; count = min_t(loff_t, count, DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1 - off); ret = ds2781_write(dev_info, buf, DS2781_EEPROM_BLOCK0_START + off, count); if (ret < 0) return ret; ret = ds2781_save_eeprom(dev_info, DS2781_EEPROM_BLOCK0_START); if (ret < 0) return ret; return count; } static struct bin_attribute ds2781_user_eeprom_bin_attr = { .attr = { .name = "user_eeprom", .mode = S_IRUGO | S_IWUSR, }, .size = DS2781_EEPROM_BLOCK0_END - DS2781_EEPROM_BLOCK0_START + 1, .read = ds2781_read_user_eeprom_bin, .write = ds2781_write_user_eeprom_bin, }; static DEVICE_ATTR(pmod_enabled, S_IRUGO | S_IWUSR, ds2781_get_pmod_enabled, ds2781_set_pmod_enabled); static DEVICE_ATTR(sense_resistor_value, S_IRUGO | S_IWUSR, ds2781_get_sense_resistor_value, ds2781_set_sense_resistor_value); static DEVICE_ATTR(rsgain_setting, S_IRUGO | S_IWUSR, ds2781_get_rsgain_setting, ds2781_set_rsgain_setting); static DEVICE_ATTR(pio_pin, S_IRUGO | S_IWUSR, ds2781_get_pio_pin, ds2781_set_pio_pin); static struct attribute *ds2781_attributes[] = { &dev_attr_pmod_enabled.attr, &dev_attr_sense_resistor_value.attr, &dev_attr_rsgain_setting.attr, &dev_attr_pio_pin.attr, NULL }; static const struct attribute_group ds2781_attr_group = { .attrs = ds2781_attributes, }; static int __devinit ds2781_battery_probe(struct platform_device *pdev) { int ret = 0; struct ds2781_device_info *dev_info; dev_info = kzalloc(sizeof(*dev_info), GFP_KERNEL); if (!dev_info) { ret = -ENOMEM; goto fail; } platform_set_drvdata(pdev, dev_info); dev_info->dev = &pdev->dev; dev_info->w1_dev = pdev->dev.parent; dev_info->bat.name = dev_name(&pdev->dev); dev_info->bat.type = POWER_SUPPLY_TYPE_BATTERY; dev_info->bat.properties = ds2781_battery_props; dev_info->bat.num_properties = ARRAY_SIZE(ds2781_battery_props); dev_info->bat.get_property = ds2781_battery_get_property; dev_info->mutex_holder = current; ret = power_supply_register(&pdev->dev, &dev_info->bat); if (ret) { dev_err(dev_info->dev, "failed to register battery\n"); goto fail_free_info; } ret = sysfs_create_group(&dev_info->bat.dev->kobj, &ds2781_attr_group); if (ret) { dev_err(dev_info->dev, "failed to create sysfs group\n"); goto fail_unregister; } ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj, &ds2781_param_eeprom_bin_attr); if (ret) { dev_err(dev_info->dev, "failed to create param eeprom bin file"); goto fail_remove_group; } ret = sysfs_create_bin_file(&dev_info->bat.dev->kobj, &ds2781_user_eeprom_bin_attr); if (ret) { dev_err(dev_info->dev, "failed to create user eeprom bin file"); goto fail_remove_bin_file; } dev_info->mutex_holder = NULL; return 0; fail_remove_bin_file: sysfs_remove_bin_file(&dev_info->bat.dev->kobj, &ds2781_param_eeprom_bin_attr); fail_remove_group: sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2781_attr_group); fail_unregister: power_supply_unregister(&dev_info->bat); fail_free_info: kfree(dev_info); fail: return ret; } static int __devexit ds2781_battery_remove(struct platform_device *pdev) { struct ds2781_device_info *dev_info = platform_get_drvdata(pdev); dev_info->mutex_holder = current; /* remove attributes */ sysfs_remove_group(&dev_info->bat.dev->kobj, &ds2781_attr_group); power_supply_unregister(&dev_info->bat); kfree(dev_info); return 0; } static struct platform_driver ds2781_battery_driver = { .driver = { .name = "ds2781-battery", }, .probe = ds2781_battery_probe, .remove = __devexit_p(ds2781_battery_remove), }; static int __init ds2781_battery_init(void) { return platform_driver_register(&ds2781_battery_driver); } static void __exit ds2781_battery_exit(void) { platform_driver_unregister(&ds2781_battery_driver); } module_init(ds2781_battery_init); module_exit(ds2781_battery_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Renata Sayakhova <renata@oktetlabs.ru>"); MODULE_DESCRIPTION("Maxim/Dallas DS2781 Stand-Alone Fuel Gauage IC driver"); MODULE_ALIAS("platform:ds2781-battery");
gpl-2.0
ReflexBow/ghost
drivers/staging/media/go7007/go7007-v4l2.c
4808
45806
/* * Copyright (C) 2005-2006 Micronas USA Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/version.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/unistd.h> #include <linux/time.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/videodev2.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-subdev.h> #include <linux/i2c.h> #include <linux/mutex.h> #include <linux/uaccess.h> #include "go7007.h" #include "go7007-priv.h" #include "wis-i2c.h" /* Temporary defines until accepted in v4l-dvb */ #ifndef V4L2_MPEG_STREAM_TYPE_MPEG_ELEM #define V4L2_MPEG_STREAM_TYPE_MPEG_ELEM 6 /* MPEG elementary stream */ #endif #ifndef V4L2_MPEG_VIDEO_ENCODING_MPEG_4 #define V4L2_MPEG_VIDEO_ENCODING_MPEG_4 3 #endif #define call_all(dev, o, f, args...) \ v4l2_device_call_until_err(dev, 0, o, f, ##args) static void deactivate_buffer(struct go7007_buffer *gobuf) { int i; if (gobuf->state != BUF_STATE_IDLE) { list_del(&gobuf->stream); gobuf->state = BUF_STATE_IDLE; } if (gobuf->page_count > 0) { for (i = 0; i < gobuf->page_count; ++i) page_cache_release(gobuf->pages[i]); gobuf->page_count = 0; } } static void abort_queued(struct go7007 *go) { struct go7007_buffer *gobuf, *next; list_for_each_entry_safe(gobuf, next, &go->stream, stream) { deactivate_buffer(gobuf); } } static int go7007_streamoff(struct go7007 *go) { int retval = -EINVAL; unsigned long flags; mutex_lock(&go->hw_lock); if (go->streaming) { go->streaming = 0; go7007_stream_stop(go); spin_lock_irqsave(&go->spinlock, flags); abort_queued(go); spin_unlock_irqrestore(&go->spinlock, flags); go7007_reset_encoder(go); retval = 0; } mutex_unlock(&go->hw_lock); return 0; } static int go7007_open(struct file *file) { struct go7007 *go = video_get_drvdata(video_devdata(file)); struct go7007_file *gofh; if (go->status != STATUS_ONLINE) return -EBUSY; gofh = kmalloc(sizeof(struct go7007_file), GFP_KERNEL); if (gofh == NULL) return -ENOMEM; ++go->ref_count; gofh->go = go; mutex_init(&gofh->lock); gofh->buf_count = 0; file->private_data = gofh; return 0; } static int go7007_release(struct file *file) { struct go7007_file *gofh = file->private_data; struct go7007 *go = gofh->go; if (gofh->buf_count > 0) { go7007_streamoff(go); go->in_use = 0; kfree(gofh->bufs); gofh->buf_count = 0; } kfree(gofh); if (--go->ref_count == 0) kfree(go); file->private_data = NULL; return 0; } static u32 get_frame_type_flag(struct go7007_buffer *gobuf, int format) { u8 *f = page_address(gobuf->pages[0]); switch (format) { case GO7007_FORMAT_MJPEG: return V4L2_BUF_FLAG_KEYFRAME; case GO7007_FORMAT_MPEG4: switch ((f[gobuf->frame_offset + 4] >> 6) & 0x3) { case 0: return V4L2_BUF_FLAG_KEYFRAME; case 1: return V4L2_BUF_FLAG_PFRAME; case 2: return V4L2_BUF_FLAG_BFRAME; default: return 0; } case GO7007_FORMAT_MPEG1: case GO7007_FORMAT_MPEG2: switch ((f[gobuf->frame_offset + 5] >> 3) & 0x7) { case 1: return V4L2_BUF_FLAG_KEYFRAME; case 2: return V4L2_BUF_FLAG_PFRAME; case 3: return V4L2_BUF_FLAG_BFRAME; default: return 0; } } return 0; } static int set_capture_size(struct go7007 *go, struct v4l2_format *fmt, int try) { int sensor_height = 0, sensor_width = 0; int width, height, i; if (fmt != NULL && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MJPEG && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG && fmt->fmt.pix.pixelformat != V4L2_PIX_FMT_MPEG4) return -EINVAL; switch (go->standard) { case GO7007_STD_NTSC: sensor_width = 720; sensor_height = 480; break; case GO7007_STD_PAL: sensor_width = 720; sensor_height = 576; break; case GO7007_STD_OTHER: sensor_width = go->board_info->sensor_width; sensor_height = go->board_info->sensor_height; break; } if (fmt == NULL) { width = sensor_width; height = sensor_height; } else if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) { if (fmt->fmt.pix.width > sensor_width) width = sensor_width; else if (fmt->fmt.pix.width < 144) width = 144; else width = fmt->fmt.pix.width & ~0x0f; if (fmt->fmt.pix.height > sensor_height) height = sensor_height; else if (fmt->fmt.pix.height < 96) height = 96; else height = fmt->fmt.pix.height & ~0x0f; } else { int requested_size = fmt->fmt.pix.width * fmt->fmt.pix.height; int sensor_size = sensor_width * sensor_height; if (64 * requested_size < 9 * sensor_size) { width = sensor_width / 4; height = sensor_height / 4; } else if (64 * requested_size < 36 * sensor_size) { width = sensor_width / 2; height = sensor_height / 2; } else { width = sensor_width; height = sensor_height; } width &= ~0xf; height &= ~0xf; } if (fmt != NULL) { u32 pixelformat = fmt->fmt.pix.pixelformat; memset(fmt, 0, sizeof(*fmt)); fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->fmt.pix.width = width; fmt->fmt.pix.height = height; fmt->fmt.pix.pixelformat = pixelformat; fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.sizeimage = GO7007_BUF_SIZE; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; /* ?? */ } if (try) return 0; go->width = width; go->height = height; go->encoder_h_offset = go->board_info->sensor_h_offset; go->encoder_v_offset = go->board_info->sensor_v_offset; for (i = 0; i < 4; ++i) go->modet[i].enable = 0; for (i = 0; i < 1624; ++i) go->modet_map[i] = 0; if (go->board_info->sensor_flags & GO7007_SENSOR_SCALING) { struct v4l2_mbus_framefmt mbus_fmt; mbus_fmt.code = V4L2_MBUS_FMT_FIXED; if (fmt != NULL) mbus_fmt.width = fmt->fmt.pix.width; else mbus_fmt.width = width; if (height > sensor_height / 2) { mbus_fmt.height = height / 2; go->encoder_v_halve = 0; } else { mbus_fmt.height = height; go->encoder_v_halve = 1; } call_all(&go->v4l2_dev, video, s_mbus_fmt, &mbus_fmt); } else { if (width <= sensor_width / 4) { go->encoder_h_halve = 1; go->encoder_v_halve = 1; go->encoder_subsample = 1; } else if (width <= sensor_width / 2) { go->encoder_h_halve = 1; go->encoder_v_halve = 1; go->encoder_subsample = 0; } else { go->encoder_h_halve = 0; go->encoder_v_halve = 0; go->encoder_subsample = 0; } } if (fmt == NULL) return 0; switch (fmt->fmt.pix.pixelformat) { case V4L2_PIX_FMT_MPEG: if (go->format == GO7007_FORMAT_MPEG1 || go->format == GO7007_FORMAT_MPEG2 || go->format == GO7007_FORMAT_MPEG4) break; go->format = GO7007_FORMAT_MPEG1; go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = go->sensor_framerate / 1000; go->ipb = 0; go->closed_gop = 1; go->repeat_seqhead = 1; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 0; break; /* Backwards compatibility only! */ case V4L2_PIX_FMT_MPEG4: if (go->format == GO7007_FORMAT_MPEG4) break; go->format = GO7007_FORMAT_MPEG4; go->pali = 0xf5; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = go->sensor_framerate / 1000; go->ipb = 0; go->closed_gop = 1; go->repeat_seqhead = 1; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 0; break; case V4L2_PIX_FMT_MJPEG: go->format = GO7007_FORMAT_MJPEG; go->pali = 0; go->aspect_ratio = GO7007_RATIO_1_1; go->gop_size = 0; go->ipb = 0; go->closed_gop = 0; go->repeat_seqhead = 0; go->seq_header_enable = 0; go->gop_header_enable = 0; go->dvd_mode = 0; break; } return 0; } #if 0 static int clip_to_modet_map(struct go7007 *go, int region, struct v4l2_clip *clip_list) { struct v4l2_clip clip, *clip_ptr; int x, y, mbnum; /* Check if coordinates are OK and if any macroblocks are already * used by other regions (besides 0) */ clip_ptr = clip_list; while (clip_ptr) { if (copy_from_user(&clip, clip_ptr, sizeof(clip))) return -EFAULT; if (clip.c.left < 0 || (clip.c.left & 0xF) || clip.c.width <= 0 || (clip.c.width & 0xF)) return -EINVAL; if (clip.c.left + clip.c.width > go->width) return -EINVAL; if (clip.c.top < 0 || (clip.c.top & 0xF) || clip.c.height <= 0 || (clip.c.height & 0xF)) return -EINVAL; if (clip.c.top + clip.c.height > go->height) return -EINVAL; for (y = 0; y < clip.c.height; y += 16) for (x = 0; x < clip.c.width; x += 16) { mbnum = (go->width >> 4) * ((clip.c.top + y) >> 4) + ((clip.c.left + x) >> 4); if (go->modet_map[mbnum] != 0 && go->modet_map[mbnum] != region) return -EBUSY; } clip_ptr = clip.next; } /* Clear old region macroblocks */ for (mbnum = 0; mbnum < 1624; ++mbnum) if (go->modet_map[mbnum] == region) go->modet_map[mbnum] = 0; /* Claim macroblocks in this list */ clip_ptr = clip_list; while (clip_ptr) { if (copy_from_user(&clip, clip_ptr, sizeof(clip))) return -EFAULT; for (y = 0; y < clip.c.height; y += 16) for (x = 0; x < clip.c.width; x += 16) { mbnum = (go->width >> 4) * ((clip.c.top + y) >> 4) + ((clip.c.left + x) >> 4); go->modet_map[mbnum] = region; } clip_ptr = clip.next; } return 0; } #endif static int mpeg_query_ctrl(struct v4l2_queryctrl *ctrl) { static const u32 mpeg_ctrls[] = { V4L2_CID_MPEG_CLASS, V4L2_CID_MPEG_STREAM_TYPE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_GOP_SIZE, V4L2_CID_MPEG_VIDEO_GOP_CLOSURE, V4L2_CID_MPEG_VIDEO_BITRATE, 0 }; static const u32 *ctrl_classes[] = { mpeg_ctrls, NULL }; ctrl->id = v4l2_ctrl_next(ctrl_classes, ctrl->id); switch (ctrl->id) { case V4L2_CID_MPEG_CLASS: return v4l2_ctrl_query_fill(ctrl, 0, 0, 0, 0); case V4L2_CID_MPEG_STREAM_TYPE: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_STREAM_TYPE_MPEG2_DVD, V4L2_MPEG_STREAM_TYPE_MPEG_ELEM, 1, V4L2_MPEG_STREAM_TYPE_MPEG_ELEM); case V4L2_CID_MPEG_VIDEO_ENCODING: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_VIDEO_ENCODING_MPEG_1, V4L2_MPEG_VIDEO_ENCODING_MPEG_4, 1, V4L2_MPEG_VIDEO_ENCODING_MPEG_2); case V4L2_CID_MPEG_VIDEO_ASPECT: return v4l2_ctrl_query_fill(ctrl, V4L2_MPEG_VIDEO_ASPECT_1x1, V4L2_MPEG_VIDEO_ASPECT_16x9, 1, V4L2_MPEG_VIDEO_ASPECT_1x1); case V4L2_CID_MPEG_VIDEO_GOP_SIZE: return v4l2_ctrl_query_fill(ctrl, 0, 34, 1, 15); case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: return v4l2_ctrl_query_fill(ctrl, 0, 1, 1, 0); case V4L2_CID_MPEG_VIDEO_BITRATE: return v4l2_ctrl_query_fill(ctrl, 64000, 10000000, 1, 1500000); default: return -EINVAL; } return 0; } static int mpeg_s_ctrl(struct v4l2_control *ctrl, struct go7007 *go) { /* pretty sure we can't change any of these while streaming */ if (go->streaming) return -EBUSY; switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_TYPE: switch (ctrl->value) { case V4L2_MPEG_STREAM_TYPE_MPEG2_DVD: go->format = GO7007_FORMAT_MPEG2; go->bitrate = 9800000; go->gop_size = 15; go->pali = 0x48; go->closed_gop = 1; go->repeat_seqhead = 0; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 1; break; case V4L2_MPEG_STREAM_TYPE_MPEG_ELEM: /* todo: */ break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_ENCODING: switch (ctrl->value) { case V4L2_MPEG_VIDEO_ENCODING_MPEG_1: go->format = GO7007_FORMAT_MPEG1; go->pali = 0; break; case V4L2_MPEG_VIDEO_ENCODING_MPEG_2: go->format = GO7007_FORMAT_MPEG2; /*if (mpeg->pali >> 24 == 2) go->pali = mpeg->pali & 0xff; else*/ go->pali = 0x48; break; case V4L2_MPEG_VIDEO_ENCODING_MPEG_4: go->format = GO7007_FORMAT_MPEG4; /*if (mpeg->pali >> 24 == 4) go->pali = mpeg->pali & 0xff; else*/ go->pali = 0xf5; break; default: return -EINVAL; } go->gop_header_enable = /*mpeg->flags & GO7007_MPEG_OMIT_GOP_HEADER ? 0 :*/ 1; /*if (mpeg->flags & GO7007_MPEG_REPEAT_SEQHEADER) go->repeat_seqhead = 1; else*/ go->repeat_seqhead = 0; go->dvd_mode = 0; break; case V4L2_CID_MPEG_VIDEO_ASPECT: if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; switch (ctrl->value) { case V4L2_MPEG_VIDEO_ASPECT_1x1: go->aspect_ratio = GO7007_RATIO_1_1; break; case V4L2_MPEG_VIDEO_ASPECT_4x3: go->aspect_ratio = GO7007_RATIO_4_3; break; case V4L2_MPEG_VIDEO_ASPECT_16x9: go->aspect_ratio = GO7007_RATIO_16_9; break; case V4L2_MPEG_VIDEO_ASPECT_221x100: default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: if (ctrl->value < 0 || ctrl->value > 34) return -EINVAL; go->gop_size = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: if (ctrl->value != 0 && ctrl->value != 1) return -EINVAL; go->closed_gop = ctrl->value; break; case V4L2_CID_MPEG_VIDEO_BITRATE: /* Upper bound is kind of arbitrary here */ if (ctrl->value < 64000 || ctrl->value > 10000000) return -EINVAL; go->bitrate = ctrl->value; break; default: return -EINVAL; } return 0; } static int mpeg_g_ctrl(struct v4l2_control *ctrl, struct go7007 *go) { switch (ctrl->id) { case V4L2_CID_MPEG_STREAM_TYPE: if (go->dvd_mode) ctrl->value = V4L2_MPEG_STREAM_TYPE_MPEG2_DVD; else ctrl->value = V4L2_MPEG_STREAM_TYPE_MPEG_ELEM; break; case V4L2_CID_MPEG_VIDEO_ENCODING: switch (go->format) { case GO7007_FORMAT_MPEG1: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_1; break; case GO7007_FORMAT_MPEG2: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_2; break; case GO7007_FORMAT_MPEG4: ctrl->value = V4L2_MPEG_VIDEO_ENCODING_MPEG_4; break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_ASPECT: switch (go->aspect_ratio) { case GO7007_RATIO_1_1: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_1x1; break; case GO7007_RATIO_4_3: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_4x3; break; case GO7007_RATIO_16_9: ctrl->value = V4L2_MPEG_VIDEO_ASPECT_16x9; break; default: return -EINVAL; } break; case V4L2_CID_MPEG_VIDEO_GOP_SIZE: ctrl->value = go->gop_size; break; case V4L2_CID_MPEG_VIDEO_GOP_CLOSURE: ctrl->value = go->closed_gop; break; case V4L2_CID_MPEG_VIDEO_BITRATE: ctrl->value = go->bitrate; break; default: return -EINVAL; } return 0; } static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct go7007 *go = ((struct go7007_file *) priv)->go; strlcpy(cap->driver, "go7007", sizeof(cap->driver)); strlcpy(cap->card, go->name, sizeof(cap->card)); #if 0 strlcpy(cap->bus_info, dev_name(&dev->udev->dev), sizeof(cap->bus_info)); #endif cap->version = KERNEL_VERSION(0, 9, 8); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; /* | V4L2_CAP_AUDIO; */ if (go->board_info->flags & GO7007_BOARD_HAS_TUNER) cap->capabilities |= V4L2_CAP_TUNER; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *fmt) { char *desc = NULL; switch (fmt->index) { case 0: fmt->pixelformat = V4L2_PIX_FMT_MJPEG; desc = "Motion-JPEG"; break; case 1: fmt->pixelformat = V4L2_PIX_FMT_MPEG; desc = "MPEG1/MPEG2/MPEG4"; break; default: return -EINVAL; } fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->flags = V4L2_FMT_FLAG_COMPRESSED; strncpy(fmt->description, desc, sizeof(fmt->description)); return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; fmt->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; fmt->fmt.pix.width = go->width; fmt->fmt.pix.height = go->height; fmt->fmt.pix.pixelformat = (go->format == GO7007_FORMAT_MJPEG) ? V4L2_PIX_FMT_MJPEG : V4L2_PIX_FMT_MPEG; fmt->fmt.pix.field = V4L2_FIELD_NONE; fmt->fmt.pix.bytesperline = 0; fmt->fmt.pix.sizeimage = GO7007_BUF_SIZE; fmt->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; return set_capture_size(go, fmt, 1); } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *fmt) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (go->streaming) return -EBUSY; return set_capture_size(go, fmt, 0); } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *req) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; int retval = -EBUSY; unsigned int count, i; if (go->streaming) return retval; if (req->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || req->memory != V4L2_MEMORY_MMAP) return -EINVAL; mutex_lock(&gofh->lock); for (i = 0; i < gofh->buf_count; ++i) if (gofh->bufs[i].mapped > 0) goto unlock_and_return; mutex_lock(&go->hw_lock); if (go->in_use > 0 && gofh->buf_count == 0) { mutex_unlock(&go->hw_lock); goto unlock_and_return; } if (gofh->buf_count > 0) kfree(gofh->bufs); retval = -ENOMEM; count = req->count; if (count > 0) { if (count < 2) count = 2; if (count > 32) count = 32; gofh->bufs = kcalloc(count, sizeof(struct go7007_buffer), GFP_KERNEL); if (!gofh->bufs) { mutex_unlock(&go->hw_lock); goto unlock_and_return; } for (i = 0; i < count; ++i) { gofh->bufs[i].go = go; gofh->bufs[i].index = i; gofh->bufs[i].state = BUF_STATE_IDLE; gofh->bufs[i].mapped = 0; } go->in_use = 1; } else { go->in_use = 0; } gofh->buf_count = count; mutex_unlock(&go->hw_lock); mutex_unlock(&gofh->lock); memset(req, 0, sizeof(*req)); req->count = count; req->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; req->memory = V4L2_MEMORY_MMAP; return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; int retval = -EINVAL; unsigned int index; if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return retval; index = buf->index; mutex_lock(&gofh->lock); if (index >= gofh->buf_count) goto unlock_and_return; memset(buf, 0, sizeof(*buf)); buf->index = index; buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; switch (gofh->bufs[index].state) { case BUF_STATE_QUEUED: buf->flags = V4L2_BUF_FLAG_QUEUED; break; case BUF_STATE_DONE: buf->flags = V4L2_BUF_FLAG_DONE; break; default: buf->flags = 0; } if (gofh->bufs[index].mapped) buf->flags |= V4L2_BUF_FLAG_MAPPED; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = index * GO7007_BUF_SIZE; buf->length = GO7007_BUF_SIZE; mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; struct go7007_buffer *gobuf; unsigned long flags; int retval = -EINVAL; int ret; if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE || buf->memory != V4L2_MEMORY_MMAP) return retval; mutex_lock(&gofh->lock); if (buf->index < 0 || buf->index >= gofh->buf_count) goto unlock_and_return; gobuf = &gofh->bufs[buf->index]; if (!gobuf->mapped) goto unlock_and_return; retval = -EBUSY; if (gobuf->state != BUF_STATE_IDLE) goto unlock_and_return; /* offset will be 0 until we really support USERPTR streaming */ gobuf->offset = gobuf->user_addr & ~PAGE_MASK; gobuf->bytesused = 0; gobuf->frame_offset = 0; gobuf->modet_active = 0; if (gobuf->offset > 0) gobuf->page_count = GO7007_BUF_PAGES + 1; else gobuf->page_count = GO7007_BUF_PAGES; retval = -ENOMEM; down_read(&current->mm->mmap_sem); ret = get_user_pages(current, current->mm, gobuf->user_addr & PAGE_MASK, gobuf->page_count, 1, 1, gobuf->pages, NULL); up_read(&current->mm->mmap_sem); if (ret != gobuf->page_count) { int i; for (i = 0; i < ret; ++i) page_cache_release(gobuf->pages[i]); gobuf->page_count = 0; goto unlock_and_return; } gobuf->state = BUF_STATE_QUEUED; spin_lock_irqsave(&go->spinlock, flags); list_add_tail(&gobuf->stream, &go->stream); spin_unlock_irqrestore(&go->spinlock, flags); mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; struct go7007_buffer *gobuf; int retval = -EINVAL; unsigned long flags; u32 frame_type_flag; DEFINE_WAIT(wait); if (buf->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return retval; if (buf->memory != V4L2_MEMORY_MMAP) return retval; mutex_lock(&gofh->lock); if (list_empty(&go->stream)) goto unlock_and_return; gobuf = list_entry(go->stream.next, struct go7007_buffer, stream); retval = -EAGAIN; if (gobuf->state != BUF_STATE_DONE && !(file->f_flags & O_NONBLOCK)) { for (;;) { prepare_to_wait(&go->frame_waitq, &wait, TASK_INTERRUPTIBLE); if (gobuf->state == BUF_STATE_DONE) break; if (signal_pending(current)) { retval = -ERESTARTSYS; break; } schedule(); } finish_wait(&go->frame_waitq, &wait); } if (gobuf->state != BUF_STATE_DONE) goto unlock_and_return; spin_lock_irqsave(&go->spinlock, flags); deactivate_buffer(gobuf); spin_unlock_irqrestore(&go->spinlock, flags); frame_type_flag = get_frame_type_flag(gobuf, go->format); gobuf->state = BUF_STATE_IDLE; memset(buf, 0, sizeof(*buf)); buf->index = gobuf->index; buf->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; buf->bytesused = gobuf->bytesused; buf->flags = V4L2_BUF_FLAG_MAPPED | frame_type_flag; buf->field = V4L2_FIELD_NONE; buf->timestamp = gobuf->timestamp; buf->sequence = gobuf->seq; buf->memory = V4L2_MEMORY_MMAP; buf->m.offset = gobuf->index * GO7007_BUF_SIZE; buf->length = GO7007_BUF_SIZE; buf->reserved = gobuf->modet_active; mutex_unlock(&gofh->lock); return 0; unlock_and_return: mutex_unlock(&gofh->lock); return retval; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; int retval = 0; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mutex_lock(&gofh->lock); mutex_lock(&go->hw_lock); if (!go->streaming) { go->streaming = 1; go->next_seq = 0; go->active_buf = NULL; if (go7007_start_encoder(go) < 0) retval = -EIO; else retval = 0; } mutex_unlock(&go->hw_lock); mutex_unlock(&gofh->lock); return retval; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct go7007_file *gofh = priv; struct go7007 *go = gofh->go; if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; mutex_lock(&gofh->lock); go7007_streamoff(go); mutex_unlock(&gofh->lock); return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *query) { struct go7007 *go = ((struct go7007_file *) priv)->go; int id = query->id; if (0 == call_all(&go->v4l2_dev, core, queryctrl, query)) return 0; query->id = id; return mpeg_query_ctrl(query); } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (0 == call_all(&go->v4l2_dev, core, g_ctrl, ctrl)) return 0; return mpeg_g_ctrl(ctrl, go); } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (0 == call_all(&go->v4l2_dev, core, s_ctrl, ctrl)) return 0; return mpeg_s_ctrl(ctrl, go); } static int vidioc_g_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct go7007 *go = ((struct go7007_file *) priv)->go; struct v4l2_fract timeperframe = { .numerator = 1001 * go->fps_scale, .denominator = go->sensor_framerate, }; if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; parm->parm.capture.capability |= V4L2_CAP_TIMEPERFRAME; parm->parm.capture.timeperframe = timeperframe; return 0; } static int vidioc_s_parm(struct file *filp, void *priv, struct v4l2_streamparm *parm) { struct go7007 *go = ((struct go7007_file *) priv)->go; unsigned int n, d; if (parm->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (parm->parm.capture.capturemode != 0) return -EINVAL; n = go->sensor_framerate * parm->parm.capture.timeperframe.numerator; d = 1001 * parm->parm.capture.timeperframe.denominator; if (n != 0 && d != 0 && n > d) go->fps_scale = (n + d/2) / d; else go->fps_scale = 1; return 0; } /* VIDIOC_ENUMSTD on go7007 were used for enumerating the supported fps and its resolution, when the device is not connected to TV. This is were an API abuse, probably used by the lack of specific IOCTL's to enumerate it, by the time the driver was written. However, since kernel 2.6.19, two new ioctls (VIDIOC_ENUM_FRAMEINTERVALS and VIDIOC_ENUM_FRAMESIZES) were added for this purpose. The two functions below implement the newer ioctls */ static int vidioc_enum_framesizes(struct file *filp, void *priv, struct v4l2_frmsizeenum *fsize) { struct go7007 *go = ((struct go7007_file *) priv)->go; /* Return -EINVAL, if it is a TV board */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) || (go->board_info->sensor_flags & GO7007_SENSOR_TV)) return -EINVAL; if (fsize->index > 0) return -EINVAL; fsize->type = V4L2_FRMSIZE_TYPE_DISCRETE; fsize->discrete.width = go->board_info->sensor_width; fsize->discrete.height = go->board_info->sensor_height; return 0; } static int vidioc_enum_frameintervals(struct file *filp, void *priv, struct v4l2_frmivalenum *fival) { struct go7007 *go = ((struct go7007_file *) priv)->go; /* Return -EINVAL, if it is a TV board */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) || (go->board_info->sensor_flags & GO7007_SENSOR_TV)) return -EINVAL; if (fival->index > 0) return -EINVAL; fival->type = V4L2_FRMIVAL_TYPE_DISCRETE; fival->discrete.numerator = 1001; fival->discrete.denominator = go->board_info->sensor_framerate; return 0; } static int vidioc_g_std(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; switch (go->standard) { case GO7007_STD_NTSC: *std = V4L2_STD_NTSC; break; case GO7007_STD_PAL: *std = V4L2_STD_PAL; break; default: return -EINVAL; } return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (go->streaming) return -EBUSY; if (!(go->board_info->sensor_flags & GO7007_SENSOR_TV) && *std != 0) return -EINVAL; if (*std == 0) return -EINVAL; if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && go->input == go->board_info->num_inputs - 1) { if (!go->i2c_adapter_online) return -EIO; if (call_all(&go->v4l2_dev, core, s_std, *std) < 0) return -EINVAL; } if (*std & V4L2_STD_NTSC) { go->standard = GO7007_STD_NTSC; go->sensor_framerate = 30000; } else if (*std & V4L2_STD_PAL) { go->standard = GO7007_STD_PAL; go->sensor_framerate = 25025; } else if (*std & V4L2_STD_SECAM) { go->standard = GO7007_STD_PAL; go->sensor_framerate = 25025; } else return -EINVAL; call_all(&go->v4l2_dev, core, s_std, *std); set_capture_size(go, NULL, 0); return 0; } static int vidioc_querystd(struct file *file, void *priv, v4l2_std_id *std) { struct go7007 *go = ((struct go7007_file *) priv)->go; if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && go->input == go->board_info->num_inputs - 1) { if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, video, querystd, std); } else if (go->board_info->sensor_flags & GO7007_SENSOR_TV) *std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM; else *std = 0; return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (inp->index >= go->board_info->num_inputs) return -EINVAL; strncpy(inp->name, go->board_info->inputs[inp->index].name, sizeof(inp->name)); /* If this board has a tuner, it will be the last input */ if ((go->board_info->flags & GO7007_BOARD_HAS_TUNER) && inp->index == go->board_info->num_inputs - 1) inp->type = V4L2_INPUT_TYPE_TUNER; else inp->type = V4L2_INPUT_TYPE_CAMERA; inp->audioset = 0; inp->tuner = 0; if (go->board_info->sensor_flags & GO7007_SENSOR_TV) inp->std = V4L2_STD_NTSC | V4L2_STD_PAL | V4L2_STD_SECAM; else inp->std = 0; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *input) { struct go7007 *go = ((struct go7007_file *) priv)->go; *input = go->input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int input) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (input >= go->board_info->num_inputs) return -EINVAL; if (go->streaming) return -EBUSY; go->input = input; return call_all(&go->v4l2_dev, video, s_routing, input, 0, 0); } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (t->index != 0) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, tuner, g_tuner, t); } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (t->index != 0) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; switch (go->board_id) { case GO7007_BOARDID_PX_TV402U_NA: case GO7007_BOARDID_PX_TV402U_JP: /* No selectable options currently */ if (t->audmode != V4L2_TUNER_MODE_STEREO) return -EINVAL; break; } return call_all(&go->v4l2_dev, tuner, s_tuner, t); } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; f->type = V4L2_TUNER_ANALOG_TV; return call_all(&go->v4l2_dev, tuner, g_frequency, f); } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (!(go->board_info->flags & GO7007_BOARD_HAS_TUNER)) return -EINVAL; if (!go->i2c_adapter_online) return -EIO; return call_all(&go->v4l2_dev, tuner, s_frequency, f); } static int vidioc_cropcap(struct file *file, void *priv, struct v4l2_cropcap *cropcap) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (cropcap->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; /* These specify the raw input of the sensor */ switch (go->standard) { case GO7007_STD_NTSC: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = 720; cropcap->bounds.height = 480; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = 720; cropcap->defrect.height = 480; break; case GO7007_STD_PAL: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = 720; cropcap->bounds.height = 576; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = 720; cropcap->defrect.height = 576; break; case GO7007_STD_OTHER: cropcap->bounds.top = 0; cropcap->bounds.left = 0; cropcap->bounds.width = go->board_info->sensor_width; cropcap->bounds.height = go->board_info->sensor_height; cropcap->defrect.top = 0; cropcap->defrect.left = 0; cropcap->defrect.width = go->board_info->sensor_width; cropcap->defrect.height = go->board_info->sensor_height; break; } return 0; } static int vidioc_g_crop(struct file *file, void *priv, struct v4l2_crop *crop) { struct go7007 *go = ((struct go7007_file *) priv)->go; if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; crop->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; /* These specify the raw input of the sensor */ switch (go->standard) { case GO7007_STD_NTSC: crop->c.top = 0; crop->c.left = 0; crop->c.width = 720; crop->c.height = 480; break; case GO7007_STD_PAL: crop->c.top = 0; crop->c.left = 0; crop->c.width = 720; crop->c.height = 576; break; case GO7007_STD_OTHER: crop->c.top = 0; crop->c.left = 0; crop->c.width = go->board_info->sensor_width; crop->c.height = go->board_info->sensor_height; break; } return 0; } /* FIXME: vidioc_s_crop is not really implemented!!! */ static int vidioc_s_crop(struct file *file, void *priv, struct v4l2_crop *crop) { if (crop->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; return 0; } static int vidioc_g_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *params) { memset(params, 0, sizeof(*params)); params->quality = 50; /* ?? */ params->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } static int vidioc_s_jpegcomp(struct file *file, void *priv, struct v4l2_jpegcompression *params) { if (params->quality != 50 || params->jpeg_markers != (V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT)) return -EINVAL; return 0; } /* FIXME: Those ioctls are private, and not needed, since several standard extended controls already provide streaming control. So, those ioctls should be converted into vidioc_g_ext_ctrls() and vidioc_s_ext_ctrls() */ #if 0 /* Temporary ioctls for controlling compression characteristics */ case GO7007IOC_S_BITRATE: { int *bitrate = arg; if (go->streaming) return -EINVAL; /* Upper bound is kind of arbitrary here */ if (*bitrate < 64000 || *bitrate > 10000000) return -EINVAL; go->bitrate = *bitrate; return 0; } case GO7007IOC_G_BITRATE: { int *bitrate = arg; *bitrate = go->bitrate; return 0; } case GO7007IOC_S_COMP_PARAMS: { struct go7007_comp_params *comp = arg; if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; if (comp->gop_size > 0) go->gop_size = comp->gop_size; else go->gop_size = go->sensor_framerate / 1000; if (go->gop_size != 15) go->dvd_mode = 0; /*go->ipb = comp->max_b_frames > 0;*/ /* completely untested */ if (go->board_info->sensor_flags & GO7007_SENSOR_TV) { switch (comp->aspect_ratio) { case GO7007_ASPECT_RATIO_4_3_NTSC: case GO7007_ASPECT_RATIO_4_3_PAL: go->aspect_ratio = GO7007_RATIO_4_3; break; case GO7007_ASPECT_RATIO_16_9_NTSC: case GO7007_ASPECT_RATIO_16_9_PAL: go->aspect_ratio = GO7007_RATIO_16_9; break; default: go->aspect_ratio = GO7007_RATIO_1_1; break; } } if (comp->flags & GO7007_COMP_OMIT_SEQ_HEADER) { go->dvd_mode = 0; go->seq_header_enable = 0; } else { go->seq_header_enable = 1; } /* fall-through */ } case GO7007IOC_G_COMP_PARAMS: { struct go7007_comp_params *comp = arg; if (go->format == GO7007_FORMAT_MJPEG) return -EINVAL; memset(comp, 0, sizeof(*comp)); comp->gop_size = go->gop_size; comp->max_b_frames = go->ipb ? 2 : 0; switch (go->aspect_ratio) { case GO7007_RATIO_4_3: if (go->standard == GO7007_STD_NTSC) comp->aspect_ratio = GO7007_ASPECT_RATIO_4_3_NTSC; else comp->aspect_ratio = GO7007_ASPECT_RATIO_4_3_PAL; break; case GO7007_RATIO_16_9: if (go->standard == GO7007_STD_NTSC) comp->aspect_ratio = GO7007_ASPECT_RATIO_16_9_NTSC; else comp->aspect_ratio = GO7007_ASPECT_RATIO_16_9_PAL; break; default: comp->aspect_ratio = GO7007_ASPECT_RATIO_1_1; break; } if (go->closed_gop) comp->flags |= GO7007_COMP_CLOSED_GOP; if (!go->seq_header_enable) comp->flags |= GO7007_COMP_OMIT_SEQ_HEADER; return 0; } case GO7007IOC_S_MPEG_PARAMS: { struct go7007_mpeg_params *mpeg = arg; if (go->format != GO7007_FORMAT_MPEG1 && go->format != GO7007_FORMAT_MPEG2 && go->format != GO7007_FORMAT_MPEG4) return -EINVAL; if (mpeg->flags & GO7007_MPEG_FORCE_DVD_MODE) { go->format = GO7007_FORMAT_MPEG2; go->bitrate = 9800000; go->gop_size = 15; go->pali = 0x48; go->closed_gop = 1; go->repeat_seqhead = 0; go->seq_header_enable = 1; go->gop_header_enable = 1; go->dvd_mode = 1; } else { switch (mpeg->mpeg_video_standard) { case GO7007_MPEG_VIDEO_MPEG1: go->format = GO7007_FORMAT_MPEG1; go->pali = 0; break; case GO7007_MPEG_VIDEO_MPEG2: go->format = GO7007_FORMAT_MPEG2; if (mpeg->pali >> 24 == 2) go->pali = mpeg->pali & 0xff; else go->pali = 0x48; break; case GO7007_MPEG_VIDEO_MPEG4: go->format = GO7007_FORMAT_MPEG4; if (mpeg->pali >> 24 == 4) go->pali = mpeg->pali & 0xff; else go->pali = 0xf5; break; default: return -EINVAL; } go->gop_header_enable = mpeg->flags & GO7007_MPEG_OMIT_GOP_HEADER ? 0 : 1; if (mpeg->flags & GO7007_MPEG_REPEAT_SEQHEADER) go->repeat_seqhead = 1; else go->repeat_seqhead = 0; go->dvd_mode = 0; } /* fall-through */ } case GO7007IOC_G_MPEG_PARAMS: { struct go7007_mpeg_params *mpeg = arg; memset(mpeg, 0, sizeof(*mpeg)); switch (go->format) { case GO7007_FORMAT_MPEG1: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG1; mpeg->pali = 0; break; case GO7007_FORMAT_MPEG2: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG2; mpeg->pali = GO7007_MPEG_PROFILE(2, go->pali); break; case GO7007_FORMAT_MPEG4: mpeg->mpeg_video_standard = GO7007_MPEG_VIDEO_MPEG4; mpeg->pali = GO7007_MPEG_PROFILE(4, go->pali); break; default: return -EINVAL; } if (!go->gop_header_enable) mpeg->flags |= GO7007_MPEG_OMIT_GOP_HEADER; if (go->repeat_seqhead) mpeg->flags |= GO7007_MPEG_REPEAT_SEQHEADER; if (go->dvd_mode) mpeg->flags |= GO7007_MPEG_FORCE_DVD_MODE; return 0; } case GO7007IOC_S_MD_PARAMS: { struct go7007_md_params *mdp = arg; if (mdp->region > 3) return -EINVAL; if (mdp->trigger > 0) { go->modet[mdp->region].pixel_threshold = mdp->pixel_threshold >> 1; go->modet[mdp->region].motion_threshold = mdp->motion_threshold >> 1; go->modet[mdp->region].mb_threshold = mdp->trigger >> 1; go->modet[mdp->region].enable = 1; } else go->modet[mdp->region].enable = 0; /* fall-through */ } case GO7007IOC_G_MD_PARAMS: { struct go7007_md_params *mdp = arg; int region = mdp->region; if (mdp->region > 3) return -EINVAL; memset(mdp, 0, sizeof(struct go7007_md_params)); mdp->region = region; if (!go->modet[region].enable) return 0; mdp->pixel_threshold = (go->modet[region].pixel_threshold << 1) + 1; mdp->motion_threshold = (go->modet[region].motion_threshold << 1) + 1; mdp->trigger = (go->modet[region].mb_threshold << 1) + 1; return 0; } case GO7007IOC_S_MD_REGION: { struct go7007_md_region *region = arg; if (region->region < 1 || region->region > 3) return -EINVAL; return clip_to_modet_map(go, region->region, region->clips); } #endif static ssize_t go7007_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { return -EINVAL; } static void go7007_vm_open(struct vm_area_struct *vma) { struct go7007_buffer *gobuf = vma->vm_private_data; ++gobuf->mapped; } static void go7007_vm_close(struct vm_area_struct *vma) { struct go7007_buffer *gobuf = vma->vm_private_data; unsigned long flags; if (--gobuf->mapped == 0) { spin_lock_irqsave(&gobuf->go->spinlock, flags); deactivate_buffer(gobuf); spin_unlock_irqrestore(&gobuf->go->spinlock, flags); } } /* Copied from videobuf-dma-sg.c */ static int go7007_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return VM_FAULT_OOM; clear_user_highpage(page, (unsigned long)vmf->virtual_address); vmf->page = page; return 0; } static struct vm_operations_struct go7007_vm_ops = { .open = go7007_vm_open, .close = go7007_vm_close, .fault = go7007_vm_fault, }; static int go7007_mmap(struct file *file, struct vm_area_struct *vma) { struct go7007_file *gofh = file->private_data; unsigned int index; if (gofh->go->status != STATUS_ONLINE) return -EIO; if (!(vma->vm_flags & VM_SHARED)) return -EINVAL; /* only support VM_SHARED mapping */ if (vma->vm_end - vma->vm_start != GO7007_BUF_SIZE) return -EINVAL; /* must map exactly one full buffer */ mutex_lock(&gofh->lock); index = vma->vm_pgoff / GO7007_BUF_PAGES; if (index >= gofh->buf_count) { mutex_unlock(&gofh->lock); return -EINVAL; /* trying to map beyond requested buffers */ } if (index * GO7007_BUF_PAGES != vma->vm_pgoff) { mutex_unlock(&gofh->lock); return -EINVAL; /* offset is not aligned on buffer boundary */ } if (gofh->bufs[index].mapped > 0) { mutex_unlock(&gofh->lock); return -EBUSY; } gofh->bufs[index].mapped = 1; gofh->bufs[index].user_addr = vma->vm_start; vma->vm_ops = &go7007_vm_ops; vma->vm_flags |= VM_DONTEXPAND; vma->vm_flags &= ~VM_IO; vma->vm_private_data = &gofh->bufs[index]; mutex_unlock(&gofh->lock); return 0; } static unsigned int go7007_poll(struct file *file, poll_table *wait) { struct go7007_file *gofh = file->private_data; struct go7007_buffer *gobuf; if (list_empty(&gofh->go->stream)) return POLLERR; gobuf = list_entry(gofh->go->stream.next, struct go7007_buffer, stream); poll_wait(file, &gofh->go->frame_waitq, wait); if (gobuf->state == BUF_STATE_DONE) return POLLIN | POLLRDNORM; return 0; } static void go7007_vfl_release(struct video_device *vfd) { struct go7007 *go = video_get_drvdata(vfd); video_device_release(vfd); if (--go->ref_count == 0) kfree(go); } static struct v4l2_file_operations go7007_fops = { .owner = THIS_MODULE, .open = go7007_open, .release = go7007_release, .ioctl = video_ioctl2, .read = go7007_read, .mmap = go7007_mmap, .poll = go7007_poll, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_g_std = vidioc_g_std, .vidioc_s_std = vidioc_s_std, .vidioc_querystd = vidioc_querystd, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_g_parm = vidioc_g_parm, .vidioc_s_parm = vidioc_s_parm, .vidioc_enum_framesizes = vidioc_enum_framesizes, .vidioc_enum_frameintervals = vidioc_enum_frameintervals, .vidioc_cropcap = vidioc_cropcap, .vidioc_g_crop = vidioc_g_crop, .vidioc_s_crop = vidioc_s_crop, .vidioc_g_jpegcomp = vidioc_g_jpegcomp, .vidioc_s_jpegcomp = vidioc_s_jpegcomp, }; static struct video_device go7007_template = { .name = "go7007", .fops = &go7007_fops, .release = go7007_vfl_release, .ioctl_ops = &video_ioctl_ops, .tvnorms = V4L2_STD_ALL, .current_norm = V4L2_STD_NTSC, }; int go7007_v4l2_init(struct go7007 *go) { int rv; go->video_dev = video_device_alloc(); if (go->video_dev == NULL) return -ENOMEM; *go->video_dev = go7007_template; go->video_dev->parent = go->dev; rv = video_register_device(go->video_dev, VFL_TYPE_GRABBER, -1); if (rv < 0) { video_device_release(go->video_dev); go->video_dev = NULL; return rv; } rv = v4l2_device_register(go->dev, &go->v4l2_dev); if (rv < 0) { video_device_release(go->video_dev); go->video_dev = NULL; return rv; } video_set_drvdata(go->video_dev, go); ++go->ref_count; printk(KERN_INFO "%s: registered device %s [v4l2]\n", go->video_dev->name, video_device_node_name(go->video_dev)); return 0; } void go7007_v4l2_remove(struct go7007 *go) { unsigned long flags; mutex_lock(&go->hw_lock); if (go->streaming) { go->streaming = 0; go7007_stream_stop(go); spin_lock_irqsave(&go->spinlock, flags); abort_queued(go); spin_unlock_irqrestore(&go->spinlock, flags); } mutex_unlock(&go->hw_lock); if (go->video_dev) video_unregister_device(go->video_dev); v4l2_device_unregister(&go->v4l2_dev); }
gpl-2.0
TeskeVirtualSystem/odroid_mptcp
sound/isa/sb/sb16_main.c
4808
27389
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Routines for control of 16-bit SoundBlaster cards and clones * Note: This is very ugly hardware which uses one 8-bit DMA channel and * second 16-bit DMA channel. Unfortunately 8-bit DMA channel can't * transfer 16-bit samples and 16-bit DMA channels can't transfer * 8-bit samples. This make full duplex more complicated than * can be... People, don't buy these soundcards for full 16-bit * duplex!!! * Note: 16-bit wide is assigned to first direction which made request. * With full duplex - playback is preferred with abstract layer. * * Note: Some chip revisions have hardware bug. Changing capture * channel from full-duplex 8bit DMA to 16bit DMA will block * 16bit DMA transfers from DSP chip (capture) until 8bit transfer * to DSP chip (playback) starts. This bug can be avoided with * "16bit DMA Allocation" setting set to Playback or Capture. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <asm/io.h> #include <asm/dma.h> #include <linux/init.h> #include <linux/time.h> #include <sound/core.h> #include <sound/sb.h> #include <sound/sb16_csp.h> #include <sound/mpu401.h> #include <sound/control.h> #include <sound/info.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for control of 16-bit SoundBlaster cards and clones"); MODULE_LICENSE("GPL"); #ifdef CONFIG_SND_SB16_CSP static void snd_sb16_csp_playback_prepare(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) && ((1U << runtime->format) == csp->acc_format)) { /* Supported runtime PCM format for playback */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } else if ((csp->mode & SNDRV_SB_CSP_MODE_QSOUND) && (csp->q_enabled)) { /* QSound decoder is loaded and enabled */ if ((1 << runtime->format) & (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE)) { /* Only for simple PCM formats */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } } } else if (csp->ops.csp_use(csp) == 0) { /* Acquire CSP and try to autoload hardware codec */ if (csp->ops.csp_autoload(csp, runtime->format, SNDRV_SB_CSP_MODE_DSP_WRITE)) { /* Unsupported format, release CSP */ csp->ops.csp_unuse(csp); } else { __start_CSP: /* Try to start CSP */ if (csp->ops.csp_start(csp, (chip->mode & SB_MODE_PLAYBACK_16) ? SNDRV_SB_CSP_SAMPLE_16BIT : SNDRV_SB_CSP_SAMPLE_8BIT, (runtime->channels > 1) ? SNDRV_SB_CSP_STEREO : SNDRV_SB_CSP_MONO)) { /* Failed, release CSP */ csp->ops.csp_unuse(csp); } else { /* Success, CSP acquired and running */ chip->open = SNDRV_SB_CSP_MODE_DSP_WRITE; } } } } } static void snd_sb16_csp_capture_prepare(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if ((csp->mode & SNDRV_SB_CSP_MODE_DSP_READ) && ((1U << runtime->format) == csp->acc_format)) { /* Supported runtime PCM format for capture */ if (csp->ops.csp_use(csp) == 0) { /* If CSP was successfully acquired */ goto __start_CSP; } } } else if (csp->ops.csp_use(csp) == 0) { /* Acquire CSP and try to autoload hardware codec */ if (csp->ops.csp_autoload(csp, runtime->format, SNDRV_SB_CSP_MODE_DSP_READ)) { /* Unsupported format, release CSP */ csp->ops.csp_unuse(csp); } else { __start_CSP: /* Try to start CSP */ if (csp->ops.csp_start(csp, (chip->mode & SB_MODE_CAPTURE_16) ? SNDRV_SB_CSP_SAMPLE_16BIT : SNDRV_SB_CSP_SAMPLE_8BIT, (runtime->channels > 1) ? SNDRV_SB_CSP_STEREO : SNDRV_SB_CSP_MONO)) { /* Failed, release CSP */ csp->ops.csp_unuse(csp); } else { /* Success, CSP acquired and running */ chip->open = SNDRV_SB_CSP_MODE_DSP_READ; } } } } } static void snd_sb16_csp_update(struct snd_sb *chip) { if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->qpos_changed) { spin_lock(&chip->reg_lock); csp->ops.csp_qsound_transfer (csp); spin_unlock(&chip->reg_lock); } } } static void snd_sb16_csp_playback_open(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { /* CSP decoders (QSound excluded) support only 16bit transfers */ if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if (csp->mode & SNDRV_SB_CSP_MODE_DSP_WRITE) { runtime->hw.formats |= csp->acc_format; } } else { /* autoloaded codecs */ runtime->hw.formats |= SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM; } } } static void snd_sb16_csp_playback_close(struct snd_sb *chip) { if ((chip->hardware == SB_HW_16CSP) && (chip->open == SNDRV_SB_CSP_MODE_DSP_WRITE)) { struct snd_sb_csp *csp = chip->csp; if (csp->ops.csp_stop(csp) == 0) { csp->ops.csp_unuse(csp); chip->open = 0; } } } static void snd_sb16_csp_capture_open(struct snd_sb *chip, struct snd_pcm_runtime *runtime) { /* CSP coders support only 16bit transfers */ if (chip->hardware == SB_HW_16CSP) { struct snd_sb_csp *csp = chip->csp; if (csp->running & SNDRV_SB_CSP_ST_LOADED) { /* manually loaded codec */ if (csp->mode & SNDRV_SB_CSP_MODE_DSP_READ) { runtime->hw.formats |= csp->acc_format; } } else { /* autoloaded codecs */ runtime->hw.formats |= SNDRV_PCM_FMTBIT_MU_LAW | SNDRV_PCM_FMTBIT_A_LAW | SNDRV_PCM_FMTBIT_IMA_ADPCM; } } } static void snd_sb16_csp_capture_close(struct snd_sb *chip) { if ((chip->hardware == SB_HW_16CSP) && (chip->open == SNDRV_SB_CSP_MODE_DSP_READ)) { struct snd_sb_csp *csp = chip->csp; if (csp->ops.csp_stop(csp) == 0) { csp->ops.csp_unuse(csp); chip->open = 0; } } } #else #define snd_sb16_csp_playback_prepare(chip, runtime) /*nop*/ #define snd_sb16_csp_capture_prepare(chip, runtime) /*nop*/ #define snd_sb16_csp_update(chip) /*nop*/ #define snd_sb16_csp_playback_open(chip, runtime) /*nop*/ #define snd_sb16_csp_playback_close(chip) /*nop*/ #define snd_sb16_csp_capture_open(chip, runtime) /*nop*/ #define snd_sb16_csp_capture_close(chip) /*nop*/ #endif static void snd_sb16_setup_rate(struct snd_sb *chip, unsigned short rate, int channel) { unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & (channel == SNDRV_PCM_STREAM_PLAYBACK ? SB_MODE_PLAYBACK_16 : SB_MODE_CAPTURE_16)) snd_sb_ack_16bit(chip); else snd_sb_ack_8bit(chip); if (!(chip->mode & SB_RATE_LOCK)) { chip->locked_rate = rate; snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE_IN); snd_sbdsp_command(chip, rate >> 8); snd_sbdsp_command(chip, rate & 0xff); snd_sbdsp_command(chip, SB_DSP_SAMPLE_RATE_OUT); snd_sbdsp_command(chip, rate >> 8); snd_sbdsp_command(chip, rate & 0xff); } spin_unlock_irqrestore(&chip->reg_lock, flags); } static int snd_sb16_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { return snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); } static int snd_sb16_hw_free(struct snd_pcm_substream *substream) { snd_pcm_lib_free_pages(substream); return 0; } static int snd_sb16_playback_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char format; unsigned int size, count, dma; snd_sb16_csp_playback_prepare(chip, runtime); if (snd_pcm_format_unsigned(runtime->format) > 0) { format = runtime->channels > 1 ? SB_DSP4_MODE_UNS_STEREO : SB_DSP4_MODE_UNS_MONO; } else { format = runtime->channels > 1 ? SB_DSP4_MODE_SIGN_STEREO : SB_DSP4_MODE_SIGN_MONO; } snd_sb16_setup_rate(chip, runtime->rate, SNDRV_PCM_STREAM_PLAYBACK); size = chip->p_dma_size = snd_pcm_lib_buffer_bytes(substream); dma = (chip->mode & SB_MODE_PLAYBACK_8) ? chip->dma8 : chip->dma16; snd_dma_program(dma, runtime->dma_addr, size, DMA_MODE_WRITE | DMA_AUTOINIT); count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & SB_MODE_PLAYBACK_16) { count >>= 1; count--; snd_sbdsp_command(chip, SB_DSP4_OUT16_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); } else { count--; snd_sbdsp_command(chip, SB_DSP4_OUT8_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_playback_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_PLAYBACK; snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_OFF : SB_DSP_DMA8_OFF); /* next two lines are needed for some types of DSP4 (SB AWE 32 - 4.13) */ if (chip->mode & SB_RATE_LOCK_CAPTURE) snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); chip->mode &= ~SB_RATE_LOCK_PLAYBACK; break; default: result = -EINVAL; } spin_unlock(&chip->reg_lock); return result; } static int snd_sb16_capture_prepare(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; unsigned char format; unsigned int size, count, dma; snd_sb16_csp_capture_prepare(chip, runtime); if (snd_pcm_format_unsigned(runtime->format) > 0) { format = runtime->channels > 1 ? SB_DSP4_MODE_UNS_STEREO : SB_DSP4_MODE_UNS_MONO; } else { format = runtime->channels > 1 ? SB_DSP4_MODE_SIGN_STEREO : SB_DSP4_MODE_SIGN_MONO; } snd_sb16_setup_rate(chip, runtime->rate, SNDRV_PCM_STREAM_CAPTURE); size = chip->c_dma_size = snd_pcm_lib_buffer_bytes(substream); dma = (chip->mode & SB_MODE_CAPTURE_8) ? chip->dma8 : chip->dma16; snd_dma_program(dma, runtime->dma_addr, size, DMA_MODE_READ | DMA_AUTOINIT); count = snd_pcm_lib_period_bytes(substream); spin_lock_irqsave(&chip->reg_lock, flags); if (chip->mode & SB_MODE_CAPTURE_16) { count >>= 1; count--; snd_sbdsp_command(chip, SB_DSP4_IN16_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); } else { count--; snd_sbdsp_command(chip, SB_DSP4_IN8_AI); snd_sbdsp_command(chip, format); snd_sbdsp_command(chip, count & 0xff); snd_sbdsp_command(chip, count >> 8); snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); } spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_capture_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_sb *chip = snd_pcm_substream_chip(substream); int result = 0; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: chip->mode |= SB_RATE_LOCK_CAPTURE; snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: snd_sbdsp_command(chip, chip->mode & SB_MODE_CAPTURE_16 ? SB_DSP_DMA16_OFF : SB_DSP_DMA8_OFF); /* next two lines are needed for some types of DSP4 (SB AWE 32 - 4.13) */ if (chip->mode & SB_RATE_LOCK_PLAYBACK) snd_sbdsp_command(chip, chip->mode & SB_MODE_PLAYBACK_16 ? SB_DSP_DMA16_ON : SB_DSP_DMA8_ON); chip->mode &= ~SB_RATE_LOCK_CAPTURE; break; default: result = -EINVAL; } spin_unlock(&chip->reg_lock); return result; } irqreturn_t snd_sb16dsp_interrupt(int irq, void *dev_id) { struct snd_sb *chip = dev_id; unsigned char status; int ok; spin_lock(&chip->mixer_lock); status = snd_sbmixer_read(chip, SB_DSP4_IRQSTATUS); spin_unlock(&chip->mixer_lock); if ((status & SB_IRQTYPE_MPUIN) && chip->rmidi_callback) chip->rmidi_callback(irq, chip->rmidi->private_data); if (status & SB_IRQTYPE_8BIT) { ok = 0; if (chip->mode & SB_MODE_PLAYBACK_8) { snd_pcm_period_elapsed(chip->playback_substream); snd_sb16_csp_update(chip); ok++; } if (chip->mode & SB_MODE_CAPTURE_8) { snd_pcm_period_elapsed(chip->capture_substream); ok++; } spin_lock(&chip->reg_lock); if (!ok) snd_sbdsp_command(chip, SB_DSP_DMA8_OFF); snd_sb_ack_8bit(chip); spin_unlock(&chip->reg_lock); } if (status & SB_IRQTYPE_16BIT) { ok = 0; if (chip->mode & SB_MODE_PLAYBACK_16) { snd_pcm_period_elapsed(chip->playback_substream); snd_sb16_csp_update(chip); ok++; } if (chip->mode & SB_MODE_CAPTURE_16) { snd_pcm_period_elapsed(chip->capture_substream); ok++; } spin_lock(&chip->reg_lock); if (!ok) snd_sbdsp_command(chip, SB_DSP_DMA16_OFF); snd_sb_ack_16bit(chip); spin_unlock(&chip->reg_lock); } return IRQ_HANDLED; } /* */ static snd_pcm_uframes_t snd_sb16_playback_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned int dma; size_t ptr; dma = (chip->mode & SB_MODE_PLAYBACK_8) ? chip->dma8 : chip->dma16; ptr = snd_dma_pointer(dma, chip->p_dma_size); return bytes_to_frames(substream->runtime, ptr); } static snd_pcm_uframes_t snd_sb16_capture_pointer(struct snd_pcm_substream *substream) { struct snd_sb *chip = snd_pcm_substream_chip(substream); unsigned int dma; size_t ptr; dma = (chip->mode & SB_MODE_CAPTURE_8) ? chip->dma8 : chip->dma16; ptr = snd_dma_pointer(dma, chip->c_dma_size); return bytes_to_frames(substream->runtime, ptr); } /* */ static struct snd_pcm_hardware snd_sb16_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_44100, .rate_min = 4000, .rate_max = 44100, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; static struct snd_pcm_hardware snd_sb16_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID), .formats = 0, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_44100, .rate_min = 4000, .rate_max = 44100, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (128*1024), .period_bytes_min = 64, .period_bytes_max = (128*1024), .periods_min = 1, .periods_max = 1024, .fifo_size = 0, }; /* * open/close */ static int snd_sb16_playback_open(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irqsave(&chip->open_lock, flags); if (chip->mode & SB_MODE_PLAYBACK) { spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; } runtime->hw = snd_sb16_playback; /* skip if 16 bit DMA was reserved for capture */ if (chip->force_mode16 & SB_MODE_CAPTURE_16) goto __skip_16bit; if (chip->dma16 >= 0 && !(chip->mode & SB_MODE_CAPTURE_16)) { chip->mode |= SB_MODE_PLAYBACK_16; runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; /* Vibra16X hack */ if (chip->dma16 <= 3) { runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; } else { snd_sb16_csp_playback_open(chip, runtime); } goto __open_ok; } __skip_16bit: if (chip->dma8 >= 0 && !(chip->mode & SB_MODE_CAPTURE_8)) { chip->mode |= SB_MODE_PLAYBACK_8; /* DSP v 4.xx can transfer 16bit data through 8bit DMA channel, SBHWPG 2-7 */ if (chip->dma16 < 0) { runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; chip->mode |= SB_MODE_PLAYBACK_16; } else { runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8; } runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; goto __open_ok; } spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; __open_ok: if (chip->hardware == SB_HW_ALS100) runtime->hw.rate_max = 48000; if (chip->hardware == SB_HW_CS5530) { runtime->hw.buffer_bytes_max = 32 * 1024; runtime->hw.periods_min = 2; runtime->hw.rate_min = 44100; } if (chip->mode & SB_RATE_LOCK) runtime->hw.rate_min = runtime->hw.rate_max = chip->locked_rate; chip->playback_substream = substream; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_playback_close(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); snd_sb16_csp_playback_close(chip); spin_lock_irqsave(&chip->open_lock, flags); chip->playback_substream = NULL; chip->mode &= ~SB_MODE_PLAYBACK; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_capture_open(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; spin_lock_irqsave(&chip->open_lock, flags); if (chip->mode & SB_MODE_CAPTURE) { spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; } runtime->hw = snd_sb16_capture; /* skip if 16 bit DMA was reserved for playback */ if (chip->force_mode16 & SB_MODE_PLAYBACK_16) goto __skip_16bit; if (chip->dma16 >= 0 && !(chip->mode & SB_MODE_PLAYBACK_16)) { chip->mode |= SB_MODE_CAPTURE_16; runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; /* Vibra16X hack */ if (chip->dma16 <= 3) { runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; } else { snd_sb16_csp_capture_open(chip, runtime); } goto __open_ok; } __skip_16bit: if (chip->dma8 >= 0 && !(chip->mode & SB_MODE_PLAYBACK_8)) { chip->mode |= SB_MODE_CAPTURE_8; /* DSP v 4.xx can transfer 16bit data through 8bit DMA channel, SBHWPG 2-7 */ if (chip->dma16 < 0) { runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_U16_LE; chip->mode |= SB_MODE_CAPTURE_16; } else { runtime->hw.formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S8; } runtime->hw.buffer_bytes_max = runtime->hw.period_bytes_max = 64 * 1024; goto __open_ok; } spin_unlock_irqrestore(&chip->open_lock, flags); return -EAGAIN; __open_ok: if (chip->hardware == SB_HW_ALS100) runtime->hw.rate_max = 48000; if (chip->hardware == SB_HW_CS5530) { runtime->hw.buffer_bytes_max = 32 * 1024; runtime->hw.periods_min = 2; runtime->hw.rate_min = 44100; } if (chip->mode & SB_RATE_LOCK) runtime->hw.rate_min = runtime->hw.rate_max = chip->locked_rate; chip->capture_substream = substream; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } static int snd_sb16_capture_close(struct snd_pcm_substream *substream) { unsigned long flags; struct snd_sb *chip = snd_pcm_substream_chip(substream); snd_sb16_csp_capture_close(chip); spin_lock_irqsave(&chip->open_lock, flags); chip->capture_substream = NULL; chip->mode &= ~SB_MODE_CAPTURE; spin_unlock_irqrestore(&chip->open_lock, flags); return 0; } /* * DMA control interface */ static int snd_sb16_set_dma_mode(struct snd_sb *chip, int what) { if (chip->dma8 < 0 || chip->dma16 < 0) { if (snd_BUG_ON(what)) return -EINVAL; return 0; } if (what == 0) { chip->force_mode16 = 0; } else if (what == 1) { chip->force_mode16 = SB_MODE_PLAYBACK_16; } else if (what == 2) { chip->force_mode16 = SB_MODE_CAPTURE_16; } else { return -EINVAL; } return 0; } static int snd_sb16_get_dma_mode(struct snd_sb *chip) { if (chip->dma8 < 0 || chip->dma16 < 0) return 0; switch (chip->force_mode16) { case SB_MODE_PLAYBACK_16: return 1; case SB_MODE_CAPTURE_16: return 2; default: return 0; } } static int snd_sb16_dma_control_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { static char *texts[3] = { "Auto", "Playback", "Capture" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 3; if (uinfo->value.enumerated.item > 2) uinfo->value.enumerated.item = 2; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_sb16_dma_control_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; spin_lock_irqsave(&chip->reg_lock, flags); ucontrol->value.enumerated.item[0] = snd_sb16_get_dma_mode(chip); spin_unlock_irqrestore(&chip->reg_lock, flags); return 0; } static int snd_sb16_dma_control_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_sb *chip = snd_kcontrol_chip(kcontrol); unsigned long flags; unsigned char nval, oval; int change; if ((nval = ucontrol->value.enumerated.item[0]) > 2) return -EINVAL; spin_lock_irqsave(&chip->reg_lock, flags); oval = snd_sb16_get_dma_mode(chip); change = nval != oval; snd_sb16_set_dma_mode(chip, nval); spin_unlock_irqrestore(&chip->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_sb16_dma_control = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "16-bit DMA Allocation", .info = snd_sb16_dma_control_info, .get = snd_sb16_dma_control_get, .put = snd_sb16_dma_control_put }; /* * Initialization part */ int snd_sb16dsp_configure(struct snd_sb * chip) { unsigned long flags; unsigned char irqreg = 0, dmareg = 0, mpureg; unsigned char realirq, realdma, realmpureg; /* note: mpu register should be present only on SB16 Vibra soundcards */ // printk(KERN_DEBUG "codec->irq=%i, codec->dma8=%i, codec->dma16=%i\n", chip->irq, chip->dma8, chip->dma16); spin_lock_irqsave(&chip->mixer_lock, flags); mpureg = snd_sbmixer_read(chip, SB_DSP4_MPUSETUP) & ~0x06; spin_unlock_irqrestore(&chip->mixer_lock, flags); switch (chip->irq) { case 2: case 9: irqreg |= SB_IRQSETUP_IRQ9; break; case 5: irqreg |= SB_IRQSETUP_IRQ5; break; case 7: irqreg |= SB_IRQSETUP_IRQ7; break; case 10: irqreg |= SB_IRQSETUP_IRQ10; break; default: return -EINVAL; } if (chip->dma8 >= 0) { switch (chip->dma8) { case 0: dmareg |= SB_DMASETUP_DMA0; break; case 1: dmareg |= SB_DMASETUP_DMA1; break; case 3: dmareg |= SB_DMASETUP_DMA3; break; default: return -EINVAL; } } if (chip->dma16 >= 0 && chip->dma16 != chip->dma8) { switch (chip->dma16) { case 5: dmareg |= SB_DMASETUP_DMA5; break; case 6: dmareg |= SB_DMASETUP_DMA6; break; case 7: dmareg |= SB_DMASETUP_DMA7; break; default: return -EINVAL; } } switch (chip->mpu_port) { case 0x300: mpureg |= 0x04; break; case 0x330: mpureg |= 0x00; break; default: mpureg |= 0x02; /* disable MPU */ } spin_lock_irqsave(&chip->mixer_lock, flags); snd_sbmixer_write(chip, SB_DSP4_IRQSETUP, irqreg); realirq = snd_sbmixer_read(chip, SB_DSP4_IRQSETUP); snd_sbmixer_write(chip, SB_DSP4_DMASETUP, dmareg); realdma = snd_sbmixer_read(chip, SB_DSP4_DMASETUP); snd_sbmixer_write(chip, SB_DSP4_MPUSETUP, mpureg); realmpureg = snd_sbmixer_read(chip, SB_DSP4_MPUSETUP); spin_unlock_irqrestore(&chip->mixer_lock, flags); if ((~realirq) & irqreg || (~realdma) & dmareg) { snd_printk(KERN_ERR "SB16 [0x%lx]: unable to set DMA & IRQ (PnP device?)\n", chip->port); snd_printk(KERN_ERR "SB16 [0x%lx]: wanted: irqreg=0x%x, dmareg=0x%x, mpureg = 0x%x\n", chip->port, realirq, realdma, realmpureg); snd_printk(KERN_ERR "SB16 [0x%lx]: got: irqreg=0x%x, dmareg=0x%x, mpureg = 0x%x\n", chip->port, irqreg, dmareg, mpureg); return -ENODEV; } return 0; } static struct snd_pcm_ops snd_sb16_playback_ops = { .open = snd_sb16_playback_open, .close = snd_sb16_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sb16_hw_params, .hw_free = snd_sb16_hw_free, .prepare = snd_sb16_playback_prepare, .trigger = snd_sb16_playback_trigger, .pointer = snd_sb16_playback_pointer, }; static struct snd_pcm_ops snd_sb16_capture_ops = { .open = snd_sb16_capture_open, .close = snd_sb16_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_sb16_hw_params, .hw_free = snd_sb16_hw_free, .prepare = snd_sb16_capture_prepare, .trigger = snd_sb16_capture_trigger, .pointer = snd_sb16_capture_pointer, }; int snd_sb16dsp_pcm(struct snd_sb * chip, int device, struct snd_pcm ** rpcm) { struct snd_card *card = chip->card; struct snd_pcm *pcm; int err; if (rpcm) *rpcm = NULL; if ((err = snd_pcm_new(card, "SB16 DSP", device, 1, 1, &pcm)) < 0) return err; sprintf(pcm->name, "DSP v%i.%i", chip->version >> 8, chip->version & 0xff); pcm->info_flags = SNDRV_PCM_INFO_JOINT_DUPLEX; pcm->private_data = chip; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_sb16_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_sb16_capture_ops); if (chip->dma16 >= 0 && chip->dma8 != chip->dma16) snd_ctl_add(card, snd_ctl_new1(&snd_sb16_dma_control, chip)); else pcm->info_flags = SNDRV_PCM_INFO_HALF_DUPLEX; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_isa_data(), 64*1024, 128*1024); if (rpcm) *rpcm = pcm; return 0; } const struct snd_pcm_ops *snd_sb16dsp_get_pcm_ops(int direction) { return direction == SNDRV_PCM_STREAM_PLAYBACK ? &snd_sb16_playback_ops : &snd_sb16_capture_ops; } EXPORT_SYMBOL(snd_sb16dsp_pcm); EXPORT_SYMBOL(snd_sb16dsp_get_pcm_ops); EXPORT_SYMBOL(snd_sb16dsp_configure); EXPORT_SYMBOL(snd_sb16dsp_interrupt); /* * INIT part */ static int __init alsa_sb16_init(void) { return 0; } static void __exit alsa_sb16_exit(void) { } module_init(alsa_sb16_init) module_exit(alsa_sb16_exit)
gpl-2.0
TeamHorizon/android_kernel_oneplus_msm8974
net/batman-adv/originator.c
4808
16881
/* * Copyright (C) 2009-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include "originator.h" #include "hash.h" #include "translation-table.h" #include "routing.h" #include "gateway_client.h" #include "hard-interface.h" #include "unicast.h" #include "soft-interface.h" static void purge_orig(struct work_struct *work); static void start_purge_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->orig_work, purge_orig); queue_delayed_work(bat_event_workqueue, &bat_priv->orig_work, 1 * HZ); } /* returns 1 if they are the same originator */ static int compare_orig(const struct hlist_node *node, const void *data2) { const void *data1 = container_of(node, struct orig_node, hash_entry); return (memcmp(data1, data2, ETH_ALEN) == 0 ? 1 : 0); } int originator_init(struct bat_priv *bat_priv) { if (bat_priv->orig_hash) return 1; bat_priv->orig_hash = hash_new(1024); if (!bat_priv->orig_hash) goto err; start_purge_timer(bat_priv); return 1; err: return 0; } void neigh_node_free_ref(struct neigh_node *neigh_node) { if (atomic_dec_and_test(&neigh_node->refcount)) kfree_rcu(neigh_node, rcu); } /* increases the refcounter of a found router */ struct neigh_node *orig_node_get_router(struct orig_node *orig_node) { struct neigh_node *router; rcu_read_lock(); router = rcu_dereference(orig_node->router); if (router && !atomic_inc_not_zero(&router->refcount)) router = NULL; rcu_read_unlock(); return router; } struct neigh_node *create_neighbor(struct orig_node *orig_node, struct orig_node *orig_neigh_node, const uint8_t *neigh, struct hard_iface *if_incoming) { struct bat_priv *bat_priv = netdev_priv(if_incoming->soft_iface); struct neigh_node *neigh_node; bat_dbg(DBG_BATMAN, bat_priv, "Creating new last-hop neighbor of originator\n"); neigh_node = kzalloc(sizeof(*neigh_node), GFP_ATOMIC); if (!neigh_node) return NULL; INIT_HLIST_NODE(&neigh_node->list); INIT_LIST_HEAD(&neigh_node->bonding_list); spin_lock_init(&neigh_node->tq_lock); memcpy(neigh_node->addr, neigh, ETH_ALEN); neigh_node->orig_node = orig_neigh_node; neigh_node->if_incoming = if_incoming; /* extra reference for return */ atomic_set(&neigh_node->refcount, 2); spin_lock_bh(&orig_node->neigh_list_lock); hlist_add_head_rcu(&neigh_node->list, &orig_node->neigh_list); spin_unlock_bh(&orig_node->neigh_list_lock); return neigh_node; } static void orig_node_free_rcu(struct rcu_head *rcu) { struct hlist_node *node, *node_tmp; struct neigh_node *neigh_node, *tmp_neigh_node; struct orig_node *orig_node; orig_node = container_of(rcu, struct orig_node, rcu); spin_lock_bh(&orig_node->neigh_list_lock); /* for all bonding members ... */ list_for_each_entry_safe(neigh_node, tmp_neigh_node, &orig_node->bond_list, bonding_list) { list_del_rcu(&neigh_node->bonding_list); neigh_node_free_ref(neigh_node); } /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { hlist_del_rcu(&neigh_node->list); neigh_node_free_ref(neigh_node); } spin_unlock_bh(&orig_node->neigh_list_lock); frag_list_free(&orig_node->frag_list); tt_global_del_orig(orig_node->bat_priv, orig_node, "originator timed out"); kfree(orig_node->tt_buff); kfree(orig_node->bcast_own); kfree(orig_node->bcast_own_sum); kfree(orig_node); } void orig_node_free_ref(struct orig_node *orig_node) { if (atomic_dec_and_test(&orig_node->refcount)) call_rcu(&orig_node->rcu, orig_node_free_rcu); } void originator_free(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; uint32_t i; if (!hash) return; cancel_delayed_work_sync(&bat_priv->orig_work); bat_priv->orig_hash = NULL; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, head, hash_entry) { hlist_del_rcu(node); orig_node_free_ref(orig_node); } spin_unlock_bh(list_lock); } hash_destroy(hash); } /* this function finds or creates an originator entry for the given * address if it does not exits */ struct orig_node *get_orig_node(struct bat_priv *bat_priv, const uint8_t *addr) { struct orig_node *orig_node; int size; int hash_added; orig_node = orig_hash_find(bat_priv, addr); if (orig_node) return orig_node; bat_dbg(DBG_BATMAN, bat_priv, "Creating new originator: %pM\n", addr); orig_node = kzalloc(sizeof(*orig_node), GFP_ATOMIC); if (!orig_node) return NULL; INIT_HLIST_HEAD(&orig_node->neigh_list); INIT_LIST_HEAD(&orig_node->bond_list); spin_lock_init(&orig_node->ogm_cnt_lock); spin_lock_init(&orig_node->bcast_seqno_lock); spin_lock_init(&orig_node->neigh_list_lock); spin_lock_init(&orig_node->tt_buff_lock); /* extra reference for return */ atomic_set(&orig_node->refcount, 2); orig_node->tt_initialised = false; orig_node->tt_poss_change = false; orig_node->bat_priv = bat_priv; memcpy(orig_node->orig, addr, ETH_ALEN); orig_node->router = NULL; orig_node->tt_crc = 0; atomic_set(&orig_node->last_ttvn, 0); orig_node->tt_buff = NULL; orig_node->tt_buff_len = 0; atomic_set(&orig_node->tt_size, 0); orig_node->bcast_seqno_reset = jiffies - 1 - msecs_to_jiffies(RESET_PROTECTION_MS); orig_node->batman_seqno_reset = jiffies - 1 - msecs_to_jiffies(RESET_PROTECTION_MS); atomic_set(&orig_node->bond_candidates, 0); size = bat_priv->num_ifaces * sizeof(unsigned long) * NUM_WORDS; orig_node->bcast_own = kzalloc(size, GFP_ATOMIC); if (!orig_node->bcast_own) goto free_orig_node; size = bat_priv->num_ifaces * sizeof(uint8_t); orig_node->bcast_own_sum = kzalloc(size, GFP_ATOMIC); INIT_LIST_HEAD(&orig_node->frag_list); orig_node->last_frag_packet = 0; if (!orig_node->bcast_own_sum) goto free_bcast_own; hash_added = hash_add(bat_priv->orig_hash, compare_orig, choose_orig, orig_node, &orig_node->hash_entry); if (hash_added != 0) goto free_bcast_own_sum; return orig_node; free_bcast_own_sum: kfree(orig_node->bcast_own_sum); free_bcast_own: kfree(orig_node->bcast_own); free_orig_node: kfree(orig_node); return NULL; } static bool purge_orig_neighbors(struct bat_priv *bat_priv, struct orig_node *orig_node, struct neigh_node **best_neigh_node) { struct hlist_node *node, *node_tmp; struct neigh_node *neigh_node; bool neigh_purged = false; *best_neigh_node = NULL; spin_lock_bh(&orig_node->neigh_list_lock); /* for all neighbors towards this originator ... */ hlist_for_each_entry_safe(neigh_node, node, node_tmp, &orig_node->neigh_list, list) { if ((has_timed_out(neigh_node->last_valid, PURGE_TIMEOUT)) || (neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) { if ((neigh_node->if_incoming->if_status == IF_INACTIVE) || (neigh_node->if_incoming->if_status == IF_NOT_IN_USE) || (neigh_node->if_incoming->if_status == IF_TO_BE_REMOVED)) bat_dbg(DBG_BATMAN, bat_priv, "neighbor purge: originator %pM, neighbor: %pM, iface: %s\n", orig_node->orig, neigh_node->addr, neigh_node->if_incoming->net_dev->name); else bat_dbg(DBG_BATMAN, bat_priv, "neighbor timeout: originator %pM, neighbor: %pM, last_valid: %lu\n", orig_node->orig, neigh_node->addr, (neigh_node->last_valid / HZ)); neigh_purged = true; hlist_del_rcu(&neigh_node->list); bonding_candidate_del(orig_node, neigh_node); neigh_node_free_ref(neigh_node); } else { if ((!*best_neigh_node) || (neigh_node->tq_avg > (*best_neigh_node)->tq_avg)) *best_neigh_node = neigh_node; } } spin_unlock_bh(&orig_node->neigh_list_lock); return neigh_purged; } static bool purge_orig_node(struct bat_priv *bat_priv, struct orig_node *orig_node) { struct neigh_node *best_neigh_node; if (has_timed_out(orig_node->last_valid, 2 * PURGE_TIMEOUT)) { bat_dbg(DBG_BATMAN, bat_priv, "Originator timeout: originator %pM, last_valid %lu\n", orig_node->orig, (orig_node->last_valid / HZ)); return true; } else { if (purge_orig_neighbors(bat_priv, orig_node, &best_neigh_node)) update_route(bat_priv, orig_node, best_neigh_node); } return false; } static void _purge_orig(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; spinlock_t *list_lock; /* spinlock to protect write access */ struct orig_node *orig_node; uint32_t i; if (!hash) return; /* for all origins... */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; list_lock = &hash->list_locks[i]; spin_lock_bh(list_lock); hlist_for_each_entry_safe(orig_node, node, node_tmp, head, hash_entry) { if (purge_orig_node(bat_priv, orig_node)) { if (orig_node->gw_flags) gw_node_delete(bat_priv, orig_node); hlist_del_rcu(node); orig_node_free_ref(orig_node); continue; } if (has_timed_out(orig_node->last_frag_packet, FRAG_TIMEOUT)) frag_list_free(&orig_node->frag_list); } spin_unlock_bh(list_lock); } gw_node_purge(bat_priv); gw_election(bat_priv); softif_neigh_purge(bat_priv); } static void purge_orig(struct work_struct *work) { struct delayed_work *delayed_work = container_of(work, struct delayed_work, work); struct bat_priv *bat_priv = container_of(delayed_work, struct bat_priv, orig_work); _purge_orig(bat_priv); start_purge_timer(bat_priv); } void purge_orig_ref(struct bat_priv *bat_priv) { _purge_orig(bat_priv); } int orig_seq_print_text(struct seq_file *seq, void *offset) { struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; struct hard_iface *primary_if; struct orig_node *orig_node; struct neigh_node *neigh_node, *neigh_node_tmp; int batman_count = 0; int last_seen_secs; int last_seen_msecs; uint32_t i; int ret = 0; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) { ret = seq_printf(seq, "BATMAN mesh %s disabled - please specify interfaces to enable it\n", net_dev->name); goto out; } if (primary_if->if_status != IF_ACTIVE) { ret = seq_printf(seq, "BATMAN mesh %s disabled - primary interface not active\n", net_dev->name); goto out; } seq_printf(seq, "[B.A.T.M.A.N. adv %s, MainIF/MAC: %s/%pM (%s)]\n", SOURCE_VERSION, primary_if->net_dev->name, primary_if->net_dev->dev_addr, net_dev->name); seq_printf(seq, " %-15s %s (%s/%i) %17s [%10s]: %20s ...\n", "Originator", "last-seen", "#", TQ_MAX_VALUE, "Nexthop", "outgoingIF", "Potential nexthops"); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { neigh_node = orig_node_get_router(orig_node); if (!neigh_node) continue; if (neigh_node->tq_avg == 0) goto next; last_seen_secs = jiffies_to_msecs(jiffies - orig_node->last_valid) / 1000; last_seen_msecs = jiffies_to_msecs(jiffies - orig_node->last_valid) % 1000; seq_printf(seq, "%pM %4i.%03is (%3i) %pM [%10s]:", orig_node->orig, last_seen_secs, last_seen_msecs, neigh_node->tq_avg, neigh_node->addr, neigh_node->if_incoming->net_dev->name); hlist_for_each_entry_rcu(neigh_node_tmp, node_tmp, &orig_node->neigh_list, list) { seq_printf(seq, " %pM (%3i)", neigh_node_tmp->addr, neigh_node_tmp->tq_avg); } seq_printf(seq, "\n"); batman_count++; next: neigh_node_free_ref(neigh_node); } rcu_read_unlock(); } if (batman_count == 0) seq_printf(seq, "No batman nodes in range ...\n"); out: if (primary_if) hardif_free_ref(primary_if); return ret; } static int orig_node_add_if(struct orig_node *orig_node, int max_if_num) { void *data_ptr; data_ptr = kmalloc(max_if_num * sizeof(unsigned long) * NUM_WORDS, GFP_ATOMIC); if (!data_ptr) return -1; memcpy(data_ptr, orig_node->bcast_own, (max_if_num - 1) * sizeof(unsigned long) * NUM_WORDS); kfree(orig_node->bcast_own); orig_node->bcast_own = data_ptr; data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); if (!data_ptr) return -1; memcpy(data_ptr, orig_node->bcast_own_sum, (max_if_num - 1) * sizeof(uint8_t)); kfree(orig_node->bcast_own_sum); orig_node->bcast_own_sum = data_ptr; return 0; } int orig_hash_add_if(struct hard_iface *hard_iface, int max_if_num) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; uint32_t i; int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { spin_lock_bh(&orig_node->ogm_cnt_lock); ret = orig_node_add_if(orig_node, max_if_num); spin_unlock_bh(&orig_node->ogm_cnt_lock); if (ret == -1) goto err; } rcu_read_unlock(); } return 0; err: rcu_read_unlock(); return -ENOMEM; } static int orig_node_del_if(struct orig_node *orig_node, int max_if_num, int del_if_num) { void *data_ptr = NULL; int chunk_size; /* last interface was removed */ if (max_if_num == 0) goto free_bcast_own; chunk_size = sizeof(unsigned long) * NUM_WORDS; data_ptr = kmalloc(max_if_num * chunk_size, GFP_ATOMIC); if (!data_ptr) return -1; /* copy first part */ memcpy(data_ptr, orig_node->bcast_own, del_if_num * chunk_size); /* copy second part */ memcpy((char *)data_ptr + del_if_num * chunk_size, orig_node->bcast_own + ((del_if_num + 1) * chunk_size), (max_if_num - del_if_num) * chunk_size); free_bcast_own: kfree(orig_node->bcast_own); orig_node->bcast_own = data_ptr; if (max_if_num == 0) goto free_own_sum; data_ptr = kmalloc(max_if_num * sizeof(uint8_t), GFP_ATOMIC); if (!data_ptr) return -1; memcpy(data_ptr, orig_node->bcast_own_sum, del_if_num * sizeof(uint8_t)); memcpy((char *)data_ptr + del_if_num * sizeof(uint8_t), orig_node->bcast_own_sum + ((del_if_num + 1) * sizeof(uint8_t)), (max_if_num - del_if_num) * sizeof(uint8_t)); free_own_sum: kfree(orig_node->bcast_own_sum); orig_node->bcast_own_sum = data_ptr; return 0; } int orig_hash_del_if(struct hard_iface *hard_iface, int max_if_num) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct hard_iface *hard_iface_tmp; struct orig_node *orig_node; uint32_t i; int ret; /* resize all orig nodes because orig_node->bcast_own(_sum) depend on * if_num */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { spin_lock_bh(&orig_node->ogm_cnt_lock); ret = orig_node_del_if(orig_node, max_if_num, hard_iface->if_num); spin_unlock_bh(&orig_node->ogm_cnt_lock); if (ret == -1) goto err; } rcu_read_unlock(); } /* renumber remaining batman interfaces _inside_ of orig_hash_lock */ rcu_read_lock(); list_for_each_entry_rcu(hard_iface_tmp, &hardif_list, list) { if (hard_iface_tmp->if_status == IF_NOT_IN_USE) continue; if (hard_iface == hard_iface_tmp) continue; if (hard_iface->soft_iface != hard_iface_tmp->soft_iface) continue; if (hard_iface_tmp->if_num > hard_iface->if_num) hard_iface_tmp->if_num--; } rcu_read_unlock(); hard_iface->if_num = -1; return 0; err: rcu_read_unlock(); return -ENOMEM; }
gpl-2.0
garynych/hammerhead_kernel
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
4808
7021
/******************************************************************************* This contains the functions to handle the platform driver. Copyright (C) 2007-2011 STMicroelectronics Ltd This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include <linux/platform_device.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_net.h> #include "stmmac.h" #ifdef CONFIG_OF static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) { struct device_node *np = pdev->dev.of_node; if (!np) return -ENODEV; *mac = of_get_mac_address(np); plat->interface = of_get_phy_mode(np); plat->mdio_bus_data = devm_kzalloc(&pdev->dev, sizeof(struct stmmac_mdio_bus_data), GFP_KERNEL); /* * Currently only the properties needed on SPEAr600 * are provided. All other properties should be added * once needed on other platforms. */ if (of_device_is_compatible(np, "st,spear600-gmac")) { plat->pbl = 8; plat->has_gmac = 1; plat->pmt = 1; } return 0; } #else static int __devinit stmmac_probe_config_dt(struct platform_device *pdev, struct plat_stmmacenet_data *plat, const char **mac) { return -ENOSYS; } #endif /* CONFIG_OF */ /** * stmmac_pltfr_probe * @pdev: platform device pointer * Description: platform_device probe function. It allocates * the necessary resources and invokes the main to init * the net device, register the mdio bus etc. */ static int stmmac_pltfr_probe(struct platform_device *pdev) { int ret = 0; struct resource *res; void __iomem *addr = NULL; struct stmmac_priv *priv = NULL; struct plat_stmmacenet_data *plat_dat = NULL; const char *mac = NULL; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) return -ENODEV; if (!request_mem_region(res->start, resource_size(res), pdev->name)) { pr_err("%s: ERROR: memory allocation failed" "cannot get the I/O addr 0x%x\n", __func__, (unsigned int)res->start); return -EBUSY; } addr = ioremap(res->start, resource_size(res)); if (!addr) { pr_err("%s: ERROR: memory mapping failed", __func__); ret = -ENOMEM; goto out_release_region; } if (pdev->dev.of_node) { plat_dat = devm_kzalloc(&pdev->dev, sizeof(struct plat_stmmacenet_data), GFP_KERNEL); if (!plat_dat) { pr_err("%s: ERROR: no memory", __func__); ret = -ENOMEM; goto out_unmap; } ret = stmmac_probe_config_dt(pdev, plat_dat, &mac); if (ret) { pr_err("%s: main dt probe failed", __func__); goto out_unmap; } } else { plat_dat = pdev->dev.platform_data; } /* Custom initialisation (if needed)*/ if (plat_dat->init) { ret = plat_dat->init(pdev); if (unlikely(ret)) goto out_unmap; } priv = stmmac_dvr_probe(&(pdev->dev), plat_dat, addr); if (!priv) { pr_err("%s: main driver probe failed", __func__); goto out_unmap; } /* Get MAC address if available (DT) */ if (mac) memcpy(priv->dev->dev_addr, mac, ETH_ALEN); /* Get the MAC information */ priv->dev->irq = platform_get_irq_byname(pdev, "macirq"); if (priv->dev->irq == -ENXIO) { pr_err("%s: ERROR: MAC IRQ configuration " "information not found\n", __func__); ret = -ENXIO; goto out_unmap; } /* * On some platforms e.g. SPEAr the wake up irq differs from the mac irq * The external wake up irq can be passed through the platform code * named as "eth_wake_irq" * * In case the wake up interrupt is not passed from the platform * so the driver will continue to use the mac irq (ndev->irq) */ priv->wol_irq = platform_get_irq_byname(pdev, "eth_wake_irq"); if (priv->wol_irq == -ENXIO) priv->wol_irq = priv->dev->irq; platform_set_drvdata(pdev, priv->dev); pr_debug("STMMAC platform driver registration completed"); return 0; out_unmap: iounmap(addr); platform_set_drvdata(pdev, NULL); out_release_region: release_mem_region(res->start, resource_size(res)); return ret; } /** * stmmac_pltfr_remove * @pdev: platform device pointer * Description: this function calls the main to free the net resources * and calls the platforms hook and release the resources (e.g. mem). */ static int stmmac_pltfr_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct stmmac_priv *priv = netdev_priv(ndev); struct resource *res; int ret = stmmac_dvr_remove(ndev); if (priv->plat->exit) priv->plat->exit(pdev); if (priv->plat->exit) priv->plat->exit(pdev); platform_set_drvdata(pdev, NULL); iounmap((void *)priv->ioaddr); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(res->start, resource_size(res)); return ret; } #ifdef CONFIG_PM static int stmmac_pltfr_suspend(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_suspend(ndev); } static int stmmac_pltfr_resume(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_resume(ndev); } int stmmac_pltfr_freeze(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_freeze(ndev); } int stmmac_pltfr_restore(struct device *dev) { struct net_device *ndev = dev_get_drvdata(dev); return stmmac_restore(ndev); } static const struct dev_pm_ops stmmac_pltfr_pm_ops = { .suspend = stmmac_pltfr_suspend, .resume = stmmac_pltfr_resume, .freeze = stmmac_pltfr_freeze, .thaw = stmmac_pltfr_restore, .restore = stmmac_pltfr_restore, }; #else static const struct dev_pm_ops stmmac_pltfr_pm_ops; #endif /* CONFIG_PM */ static const struct of_device_id stmmac_dt_ids[] = { { .compatible = "st,spear600-gmac", }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, stmmac_dt_ids); static struct platform_driver stmmac_driver = { .probe = stmmac_pltfr_probe, .remove = stmmac_pltfr_remove, .driver = { .name = STMMAC_RESOURCE_NAME, .owner = THIS_MODULE, .pm = &stmmac_pltfr_pm_ops, .of_match_table = of_match_ptr(stmmac_dt_ids), }, }; module_platform_driver(stmmac_driver); MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet PLATFORM driver"); MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>"); MODULE_LICENSE("GPL");
gpl-2.0
Ozric/kernel_asus_OzricErnel
drivers/scsi/pas16.c
5064
18258
#define AUTOSENSE #define PSEUDO_DMA #define FOO #define UNSAFE /* Not unsafe for PAS16 -- use it */ #define PDEBUG 0 /* * This driver adapted from Drew Eckhardt's Trantor T128 driver * * Copyright 1993, Drew Eckhardt * Visionary Computing * (Unix and Linux consulting and custom programming) * drew@colorado.edu * +1 (303) 666-5836 * * ( Based on T128 - DISTRIBUTION RELEASE 3. ) * * Modified to work with the Pro Audio Spectrum/Studio 16 * by John Weidman. * * * For more information, please consult * * Media Vision * (510) 770-8600 * (800) 348-7116 * * and * * NCR 5380 Family * SCSI Protocol Controller * Databook * * NCR Microelectronics * 1635 Aeroplaza Drive * Colorado Springs, CO 80916 * 1+ (719) 578-3400 * 1+ (800) 334-5454 */ /* * Options : * AUTOSENSE - if defined, REQUEST SENSE will be performed automatically * for commands that return with a CHECK CONDITION status. * * LIMIT_TRANSFERSIZE - if defined, limit the pseudo-dma transfers to 512 * bytes at a time. Since interrupts are disabled by default during * these transfers, we might need this to give reasonable interrupt * service time if the transfer size gets too large. * * PSEUDO_DMA - enables PSEUDO-DMA hardware, should give a 3-4X performance * increase compared to polled I/O. * * PARITY - enable parity checking. Not supported. * * SCSI2 - enable support for SCSI-II tagged queueing. Untested. * * UNSAFE - leave interrupts enabled during pseudo-DMA transfers. This * parameter comes from the NCR5380 code. It is NOT unsafe with * the PAS16 and you should use it. If you don't you will have * a problem with dropped characters during high speed * communications during SCSI transfers. If you really don't * want to use UNSAFE you can try defining LIMIT_TRANSFERSIZE or * twiddle with the transfer size in the high level code. * * USLEEP - enable support for devices that don't disconnect. Untested. * * The card is detected and initialized in one of several ways : * 1. Autoprobe (default) - There are many different models of * the Pro Audio Spectrum/Studio 16, and I only have one of * them, so this may require a little tweaking. An interrupt * is triggered to autoprobe for the interrupt line. Note: * with the newer model boards, the interrupt is set via * software after reset using the default_irq for the * current board number. * * 2. With command line overrides - pas16=port,irq may be * used on the LILO command line to override the defaults. * * 3. With the PAS16_OVERRIDE compile time define. This is * specified as an array of address, irq tuples. Ie, for * one board at the default 0x388 address, IRQ10, I could say * -DPAS16_OVERRIDE={{0x388, 10}} * NOTE: Untested. * * 4. When included as a module, with arguments passed on the command line: * pas16_irq=xx the interrupt * pas16_addr=xx the port * e.g. "modprobe pas16 pas16_addr=0x388 pas16_irq=5" * * Note that if the override methods are used, place holders must * be specified for other boards in the system. * * * Configuration notes : * The current driver does not support interrupt sharing with the * sound portion of the card. If you use the same irq for the * scsi port and sound you will have problems. Either use * a different irq for the scsi port or don't use interrupts * for the scsi port. * * If you have problems with your card not being recognized, use * the LILO command line override. Try to get it recognized without * interrupts. Ie, for a board at the default 0x388 base port, * boot: linux pas16=0x388,255 * * SCSI_IRQ_NONE (255) should be specified for no interrupt, * IRQ_AUTO (254) to autoprobe for an IRQ line if overridden * on the command line. * * (IRQ_AUTO == 254, SCSI_IRQ_NONE == 255 in NCR5380.h) */ #include <linux/module.h> #include <asm/system.h> #include <linux/signal.h> #include <linux/proc_fs.h> #include <asm/io.h> #include <asm/dma.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/stat.h> #include <linux/init.h> #include "scsi.h" #include <scsi/scsi_host.h> #include "pas16.h" #define AUTOPROBE_IRQ #include "NCR5380.h" static int pas_maxi = 0; static int pas_wmaxi = 0; static unsigned short pas16_addr = 0; static int pas16_irq = 0; static const int scsi_irq_translate[] = { 0, 0, 1, 2, 3, 4, 5, 6, 0, 0, 7, 8, 9, 0, 10, 11 }; /* The default_irqs array contains values used to set the irq into the * board via software (as must be done on newer model boards without * irq jumpers on the board). The first value in the array will be * assigned to logical board 0, the next to board 1, etc. */ static int default_irqs[] __initdata = { PAS16_DEFAULT_BOARD_1_IRQ, PAS16_DEFAULT_BOARD_2_IRQ, PAS16_DEFAULT_BOARD_3_IRQ, PAS16_DEFAULT_BOARD_4_IRQ }; static struct override { unsigned short io_port; int irq; } overrides #ifdef PAS16_OVERRIDE [] __initdata = PAS16_OVERRIDE; #else [4] __initdata = {{0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}, {0,IRQ_AUTO}}; #endif #define NO_OVERRIDES ARRAY_SIZE(overrides) static struct base { unsigned short io_port; int noauto; } bases[] __initdata = { {PAS16_DEFAULT_BASE_1, 0}, {PAS16_DEFAULT_BASE_2, 0}, {PAS16_DEFAULT_BASE_3, 0}, {PAS16_DEFAULT_BASE_4, 0} }; #define NO_BASES ARRAY_SIZE(bases) static const unsigned short pas16_offset[ 8 ] = { 0x1c00, /* OUTPUT_DATA_REG */ 0x1c01, /* INITIATOR_COMMAND_REG */ 0x1c02, /* MODE_REG */ 0x1c03, /* TARGET_COMMAND_REG */ 0x3c00, /* STATUS_REG ro, SELECT_ENABLE_REG wo */ 0x3c01, /* BUS_AND_STATUS_REG ro, START_DMA_SEND_REG wo */ 0x3c02, /* INPUT_DATA_REGISTER ro, (N/A on PAS16 ?) * START_DMA_TARGET_RECEIVE_REG wo */ 0x3c03, /* RESET_PARITY_INTERRUPT_REG ro, * START_DMA_INITIATOR_RECEIVE_REG wo */ }; /*----------------------------------------------------------------*/ /* the following will set the monitor border color (useful to find where something crashed or gets stuck at */ /* 1 = blue 2 = green 3 = cyan 4 = red 5 = magenta 6 = yellow 7 = white */ #if 1 #define rtrc(i) {inb(0x3da); outb(0x31, 0x3c0); outb((i), 0x3c0);} #else #define rtrc(i) {} #endif /* * Function : enable_board( int board_num, unsigned short port ) * * Purpose : set address in new model board * * Inputs : board_num - logical board number 0-3, port - base address * */ static void __init enable_board( int board_num, unsigned short port ) { outb( 0xbc + board_num, MASTER_ADDRESS_PTR ); outb( port >> 2, MASTER_ADDRESS_PTR ); } /* * Function : init_board( unsigned short port, int irq ) * * Purpose : Set the board up to handle the SCSI interface * * Inputs : port - base address of the board, * irq - irq to assign to the SCSI port * force_irq - set it even if it conflicts with sound driver * */ static void __init init_board( unsigned short io_port, int irq, int force_irq ) { unsigned int tmp; unsigned int pas_irq_code; /* Initialize the SCSI part of the board */ outb( 0x30, io_port + P_TIMEOUT_COUNTER_REG ); /* Timeout counter */ outb( 0x01, io_port + P_TIMEOUT_STATUS_REG_OFFSET ); /* Reset TC */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_read( RESET_PARITY_INTERRUPT_REG ); /* Set the SCSI interrupt pointer without mucking up the sound * interrupt pointer in the same byte. */ pas_irq_code = ( irq < 16 ) ? scsi_irq_translate[irq] : 0; tmp = inb( io_port + IO_CONFIG_3 ); if( (( tmp & 0x0f ) == pas_irq_code) && pas_irq_code > 0 && !force_irq ) { printk( "pas16: WARNING: Can't use same irq as sound " "driver -- interrupts disabled\n" ); /* Set up the drive parameters, disable 5380 interrupts */ outb( 0x4d, io_port + SYS_CONFIG_4 ); } else { tmp = ( tmp & 0x0f ) | ( pas_irq_code << 4 ); outb( tmp, io_port + IO_CONFIG_3 ); /* Set up the drive parameters and enable 5380 interrupts */ outb( 0x6d, io_port + SYS_CONFIG_4 ); } } /* * Function : pas16_hw_detect( unsigned short board_num ) * * Purpose : determine if a pas16 board is present * * Inputs : board_num - logical board number ( 0 - 3 ) * * Returns : 0 if board not found, 1 if found. */ static int __init pas16_hw_detect( unsigned short board_num ) { unsigned char board_rev, tmp; unsigned short io_port = bases[ board_num ].io_port; /* See if we can find a PAS16 board at the address associated * with this logical board number. */ /* First, attempt to take a newer model board out of reset and * give it a base address. This shouldn't affect older boards. */ enable_board( board_num, io_port ); /* Now see if it looks like a PAS16 board */ board_rev = inb( io_port + PCB_CONFIG ); if( board_rev == 0xff ) return 0; tmp = board_rev ^ 0xe0; outb( tmp, io_port + PCB_CONFIG ); tmp = inb( io_port + PCB_CONFIG ); outb( board_rev, io_port + PCB_CONFIG ); if( board_rev != tmp ) /* Not a PAS-16 */ return 0; if( ( inb( io_port + OPERATION_MODE_1 ) & 0x03 ) != 0x03 ) return 0; /* return if no SCSI interface found */ /* Mediavision has some new model boards that return ID bits * that indicate a SCSI interface, but they're not (LMS). We'll * put in an additional test to try to weed them out. */ outb( 0x01, io_port + WAIT_STATE ); /* 1 Wait state */ NCR5380_write( MODE_REG, 0x20 ); /* Is it really SCSI? */ if( NCR5380_read( MODE_REG ) != 0x20 ) /* Write to a reg. */ return 0; /* and try to read */ NCR5380_write( MODE_REG, 0x00 ); /* it back. */ if( NCR5380_read( MODE_REG ) != 0x00 ) return 0; return 1; } /* * Function : pas16_setup(char *str, int *ints) * * Purpose : LILO command line initialization of the overrides array, * * Inputs : str - unused, ints - array of integer parameters with ints[0] * equal to the number of ints. * */ void __init pas16_setup(char *str, int *ints) { static int commandline_current = 0; int i; if (ints[0] != 2) printk("pas16_setup : usage pas16=io_port,irq\n"); else if (commandline_current < NO_OVERRIDES) { overrides[commandline_current].io_port = (unsigned short) ints[1]; overrides[commandline_current].irq = ints[2]; for (i = 0; i < NO_BASES; ++i) if (bases[i].io_port == (unsigned short) ints[1]) { bases[i].noauto = 1; break; } ++commandline_current; } } /* * Function : int pas16_detect(struct scsi_host_template * tpnt) * * Purpose : detects and initializes PAS16 controllers * that were autoprobed, overridden on the LILO command line, * or specified at compile time. * * Inputs : tpnt - template for this SCSI adapter. * * Returns : 1 if a host adapter was found, 0 if not. * */ int __init pas16_detect(struct scsi_host_template * tpnt) { static int current_override = 0; static unsigned short current_base = 0; struct Scsi_Host *instance; unsigned short io_port; int count; tpnt->proc_name = "pas16"; tpnt->proc_info = &pas16_proc_info; if (pas16_addr != 0) { overrides[0].io_port = pas16_addr; /* * This is how we avoid seeing more than * one host adapter at the same I/O port. * Cribbed shamelessly from pas16_setup(). */ for (count = 0; count < NO_BASES; ++count) if (bases[count].io_port == pas16_addr) { bases[count].noauto = 1; break; } } if (pas16_irq != 0) overrides[0].irq = pas16_irq; for (count = 0; current_override < NO_OVERRIDES; ++current_override) { io_port = 0; if (overrides[current_override].io_port) { io_port = overrides[current_override].io_port; enable_board( current_override, io_port ); init_board( io_port, overrides[current_override].irq, 1 ); } else for (; !io_port && (current_base < NO_BASES); ++current_base) { #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : probing io_port %04x\n", (unsigned int) bases[current_base].io_port); #endif if ( !bases[current_base].noauto && pas16_hw_detect( current_base ) ){ io_port = bases[current_base].io_port; init_board( io_port, default_irqs[ current_base ], 0 ); #if (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : detected board.\n"); #endif } } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi-pas16 : io_port = %04x\n", (unsigned int) io_port); #endif if (!io_port) break; instance = scsi_register (tpnt, sizeof(struct NCR5380_hostdata)); if(instance == NULL) break; instance->io_port = io_port; NCR5380_init(instance, 0); if (overrides[current_override].irq != IRQ_AUTO) instance->irq = overrides[current_override].irq; else instance->irq = NCR5380_probe_irq(instance, PAS16_IRQS); if (instance->irq != SCSI_IRQ_NONE) if (request_irq(instance->irq, pas16_intr, IRQF_DISABLED, "pas16", instance)) { printk("scsi%d : IRQ%d not free, interrupts disabled\n", instance->host_no, instance->irq); instance->irq = SCSI_IRQ_NONE; } if (instance->irq == SCSI_IRQ_NONE) { printk("scsi%d : interrupts not enabled. for better interactive performance,\n", instance->host_no); printk("scsi%d : please jumper the board for a free IRQ.\n", instance->host_no); /* Disable 5380 interrupts, leave drive params the same */ outb( 0x4d, io_port + SYS_CONFIG_4 ); outb( (inb(io_port + IO_CONFIG_3) & 0x0f), io_port + IO_CONFIG_3 ); } #if defined(PDEBUG) && (PDEBUG & PDEBUG_INIT) printk("scsi%d : irq = %d\n", instance->host_no, instance->irq); #endif printk("scsi%d : at 0x%04x", instance->host_no, (int) instance->io_port); if (instance->irq == SCSI_IRQ_NONE) printk (" interrupts disabled"); else printk (" irq %d", instance->irq); printk(" options CAN_QUEUE=%d CMD_PER_LUN=%d release=%d", CAN_QUEUE, CMD_PER_LUN, PAS16_PUBLIC_RELEASE); NCR5380_print_options(instance); printk("\n"); ++current_override; ++count; } return count; } /* * Function : int pas16_biosparam(Disk *disk, struct block_device *dev, int *ip) * * Purpose : Generates a BIOS / DOS compatible H-C-S mapping for * the specified device / size. * * Inputs : size = size of device in sectors (512 bytes), dev = block device * major / minor, ip[] = {heads, sectors, cylinders} * * Returns : always 0 (success), initializes ip * */ /* * XXX Most SCSI boards use this mapping, I could be incorrect. Some one * using hard disks on a trantor should verify that this mapping corresponds * to that used by the BIOS / ASPI driver by running the linux fdisk program * and matching the H_C_S coordinates to what DOS uses. */ int pas16_biosparam(struct scsi_device *sdev, struct block_device *dev, sector_t capacity, int * ip) { int size = capacity; ip[0] = 64; ip[1] = 32; ip[2] = size >> 11; /* I think I have it as /(32*64) */ if( ip[2] > 1024 ) { /* yes, >, not >= */ ip[0]=255; ip[1]=63; ip[2]=size/(63*255); if( ip[2] > 1023 ) /* yes >1023... */ ip[2] = 1023; } return 0; } /* * Function : int NCR5380_pread (struct Scsi_Host *instance, * unsigned char *dst, int len) * * Purpose : Fast 5380 pseudo-dma read function, transfers len bytes to * dst * * Inputs : dst = destination, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pread (struct Scsi_Host *instance, unsigned char *dst, int len) { register unsigned char *d = dst; register unsigned short reg = (unsigned short) (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !(inb(instance->io_port + P_STATUS_REG_OFFSET) & P_ST_RDY) ) ++ii; insb( reg, d, i ); if ( inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pread()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_maxi = ii; return 0; } /* * Function : int NCR5380_pwrite (struct Scsi_Host *instance, * unsigned char *src, int len) * * Purpose : Fast 5380 pseudo-dma write function, transfers len bytes from * src * * Inputs : src = source, len = length in bytes * * Returns : 0 on success, non zero on a failure such as a watchdog * timeout. */ static inline int NCR5380_pwrite (struct Scsi_Host *instance, unsigned char *src, int len) { register unsigned char *s = src; register unsigned short reg = (instance->io_port + P_DATA_REG_OFFSET); register int i = len; int ii = 0; while ( !((inb(instance->io_port + P_STATUS_REG_OFFSET)) & P_ST_RDY) ) ++ii; outsb( reg, s, i ); if (inb(instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET) & P_TS_TIM) { outb( P_TS_CT, instance->io_port + P_TIMEOUT_STATUS_REG_OFFSET); printk("scsi%d : watchdog timer fired in NCR5380_pwrite()\n", instance->host_no); return -1; } if (ii > pas_maxi) pas_wmaxi = ii; return 0; } #include "NCR5380.c" static int pas16_release(struct Scsi_Host *shost) { if (shost->irq) free_irq(shost->irq, shost); NCR5380_exit(shost); if (shost->dma_channel != 0xff) free_dma(shost->dma_channel); if (shost->io_port && shost->n_io_port) release_region(shost->io_port, shost->n_io_port); scsi_unregister(shost); return 0; } static struct scsi_host_template driver_template = { .name = "Pro Audio Spectrum-16 SCSI", .detect = pas16_detect, .release = pas16_release, .queuecommand = pas16_queue_command, .eh_abort_handler = pas16_abort, .eh_bus_reset_handler = pas16_bus_reset, .bios_param = pas16_biosparam, .can_queue = CAN_QUEUE, .this_id = 7, .sg_tablesize = SG_ALL, .cmd_per_lun = CMD_PER_LUN, .use_clustering = DISABLE_CLUSTERING, }; #include "scsi_module.c" #ifdef MODULE module_param(pas16_addr, ushort, 0); module_param(pas16_irq, int, 0); #endif MODULE_LICENSE("GPL");
gpl-2.0
GalaxyTab4/bliss_kernel_samsung_s3ve3g
drivers/i2c/busses/i2c-sis5595.c
5064
11864
/* Copyright (c) 1998, 1999 Frodo Looijaard <frodol@dds.nl> and Philip Edelbrock <phil@netroedge.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Note: we assume there can only be one SIS5595 with one SMBus interface */ /* Note: all have mfr. ID 0x1039. SUPPORTED PCI ID 5595 0008 Note: these chips contain a 0008 device which is incompatible with the 5595. We recognize these by the presence of the listed "blacklist" PCI ID and refuse to load. NOT SUPPORTED PCI ID BLACKLIST PCI ID 540 0008 0540 550 0008 0550 5513 0008 5511 5581 0008 5597 5582 0008 5597 5597 0008 5597 5598 0008 5597/5598 630 0008 0630 645 0008 0645 646 0008 0646 648 0008 0648 650 0008 0650 651 0008 0651 730 0008 0730 735 0008 0735 745 0008 0745 746 0008 0746 */ /* TO DO: * Add Block Transfers (ugly, but supported by the adapter) * Add adapter resets */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/acpi.h> #include <linux/io.h> static int blacklist[] = { PCI_DEVICE_ID_SI_540, PCI_DEVICE_ID_SI_550, PCI_DEVICE_ID_SI_630, PCI_DEVICE_ID_SI_645, PCI_DEVICE_ID_SI_646, PCI_DEVICE_ID_SI_648, PCI_DEVICE_ID_SI_650, PCI_DEVICE_ID_SI_651, PCI_DEVICE_ID_SI_730, PCI_DEVICE_ID_SI_735, PCI_DEVICE_ID_SI_745, PCI_DEVICE_ID_SI_746, PCI_DEVICE_ID_SI_5511, /* 5513 chip has the 0008 device but that ID shows up in other chips so we use the 5511 ID for recognition */ PCI_DEVICE_ID_SI_5597, PCI_DEVICE_ID_SI_5598, 0, /* terminates the list */ }; /* Length of ISA address segment */ #define SIS5595_EXTENT 8 /* SIS5595 SMBus registers */ #define SMB_STS_LO 0x00 #define SMB_STS_HI 0x01 #define SMB_CTL_LO 0x02 #define SMB_CTL_HI 0x03 #define SMB_ADDR 0x04 #define SMB_CMD 0x05 #define SMB_PCNT 0x06 #define SMB_CNT 0x07 #define SMB_BYTE 0x08 #define SMB_DEV 0x10 #define SMB_DB0 0x11 #define SMB_DB1 0x12 #define SMB_HAA 0x13 /* PCI Address Constants */ #define SMB_INDEX 0x38 #define SMB_DAT 0x39 #define SIS5595_ENABLE_REG 0x40 #define ACPI_BASE 0x90 /* Other settings */ #define MAX_TIMEOUT 500 /* SIS5595 constants */ #define SIS5595_QUICK 0x00 #define SIS5595_BYTE 0x02 #define SIS5595_BYTE_DATA 0x04 #define SIS5595_WORD_DATA 0x06 #define SIS5595_PROC_CALL 0x08 #define SIS5595_BLOCK_DATA 0x0A /* insmod parameters */ /* If force_addr is set to anything different from 0, we forcibly enable the device at the given address. */ static u16 force_addr; module_param(force_addr, ushort, 0); MODULE_PARM_DESC(force_addr, "Initialize the base address of the i2c controller"); static struct pci_driver sis5595_driver; static unsigned short sis5595_base; static struct pci_dev *sis5595_pdev; static u8 sis5595_read(u8 reg) { outb(reg, sis5595_base + SMB_INDEX); return inb(sis5595_base + SMB_DAT); } static void sis5595_write(u8 reg, u8 data) { outb(reg, sis5595_base + SMB_INDEX); outb(data, sis5595_base + SMB_DAT); } static int __devinit sis5595_setup(struct pci_dev *SIS5595_dev) { u16 a; u8 val; int *i; int retval; /* Look for imposters */ for (i = blacklist; *i != 0; i++) { struct pci_dev *dev; dev = pci_get_device(PCI_VENDOR_ID_SI, *i, NULL); if (dev) { dev_err(&SIS5595_dev->dev, "Looked for SIS5595 but found unsupported device %.4x\n", *i); pci_dev_put(dev); return -ENODEV; } } /* Determine the address of the SMBus areas */ pci_read_config_word(SIS5595_dev, ACPI_BASE, &sis5595_base); if (sis5595_base == 0 && force_addr == 0) { dev_err(&SIS5595_dev->dev, "ACPI base address uninitialized - upgrade BIOS or use force_addr=0xaddr\n"); return -ENODEV; } if (force_addr) sis5595_base = force_addr & ~(SIS5595_EXTENT - 1); dev_dbg(&SIS5595_dev->dev, "ACPI Base address: %04x\n", sis5595_base); /* NB: We grab just the two SMBus registers here, but this may still * interfere with ACPI :-( */ retval = acpi_check_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name); if (retval) return retval; if (!request_region(sis5595_base + SMB_INDEX, 2, sis5595_driver.name)) { dev_err(&SIS5595_dev->dev, "SMBus registers 0x%04x-0x%04x already in use!\n", sis5595_base + SMB_INDEX, sis5595_base + SMB_INDEX + 1); return -ENODEV; } if (force_addr) { dev_info(&SIS5595_dev->dev, "forcing ISA address 0x%04X\n", sis5595_base); if (pci_write_config_word(SIS5595_dev, ACPI_BASE, sis5595_base) != PCIBIOS_SUCCESSFUL) goto error; if (pci_read_config_word(SIS5595_dev, ACPI_BASE, &a) != PCIBIOS_SUCCESSFUL) goto error; if ((a & ~(SIS5595_EXTENT - 1)) != sis5595_base) { /* doesn't work for some chips! */ dev_err(&SIS5595_dev->dev, "force address failed - not supported?\n"); goto error; } } if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val) != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { dev_info(&SIS5595_dev->dev, "enabling ACPI\n"); if (pci_write_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, val | 0x80) != PCIBIOS_SUCCESSFUL) goto error; if (pci_read_config_byte(SIS5595_dev, SIS5595_ENABLE_REG, &val) != PCIBIOS_SUCCESSFUL) goto error; if ((val & 0x80) == 0) { /* doesn't work for some chips? */ dev_err(&SIS5595_dev->dev, "ACPI enable failed - not supported?\n"); goto error; } } /* Everything is happy */ return 0; error: release_region(sis5595_base + SMB_INDEX, 2); return -ENODEV; } static int sis5595_transaction(struct i2c_adapter *adap) { int temp; int result = 0; int timeout = 0; /* Make sure the SMBus host is ready to start transmitting */ temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { dev_dbg(&adap->dev, "SMBus busy (%04x). Resetting...\n", temp); sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); if ((temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8)) != 0x00) { dev_dbg(&adap->dev, "Failed! (%02x)\n", temp); return -EBUSY; } else { dev_dbg(&adap->dev, "Successful!\n"); } } /* start the transaction by setting bit 4 */ sis5595_write(SMB_CTL_LO, sis5595_read(SMB_CTL_LO) | 0x10); /* We will always wait for a fraction of a second! */ do { msleep(1); temp = sis5595_read(SMB_STS_LO); } while (!(temp & 0x40) && (timeout++ < MAX_TIMEOUT)); /* If the SMBus is still busy, we give up */ if (timeout > MAX_TIMEOUT) { dev_dbg(&adap->dev, "SMBus Timeout!\n"); result = -ETIMEDOUT; } if (temp & 0x10) { dev_dbg(&adap->dev, "Error: Failed bus transaction\n"); result = -ENXIO; } if (temp & 0x20) { dev_err(&adap->dev, "Bus collision! SMBus may be locked until " "next hard reset (or not...)\n"); /* Clock stops and slave is stuck in mid-transmission */ result = -EIO; } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) { sis5595_write(SMB_STS_LO, temp & 0xff); sis5595_write(SMB_STS_HI, temp >> 8); } temp = sis5595_read(SMB_STS_LO) + (sis5595_read(SMB_STS_HI) << 8); if (temp != 0x00) dev_dbg(&adap->dev, "Failed reset at end of transaction (%02x)\n", temp); return result; } /* Return negative errno on error. */ static s32 sis5595_access(struct i2c_adapter *adap, u16 addr, unsigned short flags, char read_write, u8 command, int size, union i2c_smbus_data *data) { int status; switch (size) { case I2C_SMBUS_QUICK: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); size = SIS5595_QUICK; break; case I2C_SMBUS_BYTE: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_CMD, command); size = SIS5595_BYTE; break; case I2C_SMBUS_BYTE_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) sis5595_write(SMB_BYTE, data->byte); size = SIS5595_BYTE_DATA; break; case I2C_SMBUS_PROC_CALL: case I2C_SMBUS_WORD_DATA: sis5595_write(SMB_ADDR, ((addr & 0x7f) << 1) | (read_write & 0x01)); sis5595_write(SMB_CMD, command); if (read_write == I2C_SMBUS_WRITE) { sis5595_write(SMB_BYTE, data->word & 0xff); sis5595_write(SMB_BYTE + 1, (data->word & 0xff00) >> 8); } size = (size == I2C_SMBUS_PROC_CALL) ? SIS5595_PROC_CALL : SIS5595_WORD_DATA; break; default: dev_warn(&adap->dev, "Unsupported transaction %d\n", size); return -EOPNOTSUPP; } sis5595_write(SMB_CTL_LO, ((size & 0x0E))); status = sis5595_transaction(adap); if (status) return status; if ((size != SIS5595_PROC_CALL) && ((read_write == I2C_SMBUS_WRITE) || (size == SIS5595_QUICK))) return 0; switch (size) { case SIS5595_BYTE: case SIS5595_BYTE_DATA: data->byte = sis5595_read(SMB_BYTE); break; case SIS5595_WORD_DATA: case SIS5595_PROC_CALL: data->word = sis5595_read(SMB_BYTE) + (sis5595_read(SMB_BYTE + 1) << 8); break; } return 0; } static u32 sis5595_func(struct i2c_adapter *adapter) { return I2C_FUNC_SMBUS_QUICK | I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_PROC_CALL; } static const struct i2c_algorithm smbus_algorithm = { .smbus_xfer = sis5595_access, .functionality = sis5595_func, }; static struct i2c_adapter sis5595_adapter = { .owner = THIS_MODULE, .class = I2C_CLASS_HWMON | I2C_CLASS_SPD, .algo = &smbus_algorithm, }; static DEFINE_PCI_DEVICE_TABLE(sis5595_ids) = { { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, { 0, } }; MODULE_DEVICE_TABLE (pci, sis5595_ids); static int __devinit sis5595_probe(struct pci_dev *dev, const struct pci_device_id *id) { int err; if (sis5595_setup(dev)) { dev_err(&dev->dev, "SIS5595 not detected, module not inserted.\n"); return -ENODEV; } /* set up the sysfs linkage to our parent device */ sis5595_adapter.dev.parent = &dev->dev; snprintf(sis5595_adapter.name, sizeof(sis5595_adapter.name), "SMBus SIS5595 adapter at %04x", sis5595_base + SMB_INDEX); err = i2c_add_adapter(&sis5595_adapter); if (err) { release_region(sis5595_base + SMB_INDEX, 2); return err; } /* Always return failure here. This is to allow other drivers to bind * to this pci device. We don't really want to have control over the * pci device, we only wanted to read as few register values from it. */ sis5595_pdev = pci_dev_get(dev); return -ENODEV; } static struct pci_driver sis5595_driver = { .name = "sis5595_smbus", .id_table = sis5595_ids, .probe = sis5595_probe, }; static int __init i2c_sis5595_init(void) { return pci_register_driver(&sis5595_driver); } static void __exit i2c_sis5595_exit(void) { pci_unregister_driver(&sis5595_driver); if (sis5595_pdev) { i2c_del_adapter(&sis5595_adapter); release_region(sis5595_base + SMB_INDEX, 2); pci_dev_put(sis5595_pdev); sis5595_pdev = NULL; } } MODULE_AUTHOR("Frodo Looijaard <frodol@dds.nl>"); MODULE_DESCRIPTION("SIS5595 SMBus driver"); MODULE_LICENSE("GPL"); module_init(i2c_sis5595_init); module_exit(i2c_sis5595_exit);
gpl-2.0
dekadev/Deka-kernel-CM10.1-3.4
fs/proc/proc_devtree.c
8136
5399
/* * proc_devtree.c - handles /proc/device-tree * * Copyright 1997 Paul Mackerras */ #include <linux/errno.h> #include <linux/init.h> #include <linux/time.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/of.h> #include <linux/module.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/uaccess.h> #include "internal.h" static inline void set_node_proc_entry(struct device_node *np, struct proc_dir_entry *de) { #ifdef HAVE_ARCH_DEVTREE_FIXUPS np->pde = de; #endif } static struct proc_dir_entry *proc_device_tree; /* * Supply data on a read from /proc/device-tree/node/property. */ static int property_proc_show(struct seq_file *m, void *v) { struct property *pp = m->private; seq_write(m, pp->value, pp->length); return 0; } static int property_proc_open(struct inode *inode, struct file *file) { return single_open(file, property_proc_show, PDE(inode)->data); } static const struct file_operations property_proc_fops = { .owner = THIS_MODULE, .open = property_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * For a node with a name like "gc@10", we make symlinks called "gc" * and "@10" to it. */ /* * Add a property to a node */ static struct proc_dir_entry * __proc_device_tree_add_prop(struct proc_dir_entry *de, struct property *pp, const char *name) { struct proc_dir_entry *ent; /* * Unfortunately proc_register puts each new entry * at the beginning of the list. So we rearrange them. */ ent = proc_create_data(name, strncmp(name, "security-", 9) ? S_IRUGO : S_IRUSR, de, &property_proc_fops, pp); if (ent == NULL) return NULL; if (!strncmp(name, "security-", 9)) ent->size = 0; /* don't leak number of password chars */ else ent->size = pp->length; return ent; } void proc_device_tree_add_prop(struct proc_dir_entry *pde, struct property *prop) { __proc_device_tree_add_prop(pde, prop, prop->name); } void proc_device_tree_remove_prop(struct proc_dir_entry *pde, struct property *prop) { remove_proc_entry(prop->name, pde); } void proc_device_tree_update_prop(struct proc_dir_entry *pde, struct property *newprop, struct property *oldprop) { struct proc_dir_entry *ent; for (ent = pde->subdir; ent != NULL; ent = ent->next) if (ent->data == oldprop) break; if (ent == NULL) { printk(KERN_WARNING "device-tree: property \"%s\" " " does not exist\n", oldprop->name); } else { ent->data = newprop; ent->size = newprop->length; } } /* * Various dodgy firmware might give us nodes and/or properties with * conflicting names. That's generally ok, except for exporting via /proc, * so munge names here to ensure they're unique. */ static int duplicate_name(struct proc_dir_entry *de, const char *name) { struct proc_dir_entry *ent; int found = 0; spin_lock(&proc_subdir_lock); for (ent = de->subdir; ent != NULL; ent = ent->next) { if (strcmp(ent->name, name) == 0) { found = 1; break; } } spin_unlock(&proc_subdir_lock); return found; } static const char *fixup_name(struct device_node *np, struct proc_dir_entry *de, const char *name) { char *fixed_name; int fixup_len = strlen(name) + 2 + 1; /* name + #x + \0 */ int i = 1, size; realloc: fixed_name = kmalloc(fixup_len, GFP_KERNEL); if (fixed_name == NULL) { printk(KERN_ERR "device-tree: Out of memory trying to fixup " "name \"%s\"\n", name); return name; } retry: size = snprintf(fixed_name, fixup_len, "%s#%d", name, i); size++; /* account for NULL */ if (size > fixup_len) { /* We ran out of space, free and reallocate. */ kfree(fixed_name); fixup_len = size; goto realloc; } if (duplicate_name(de, fixed_name)) { /* Multiple duplicates. Retry with a different offset. */ i++; goto retry; } printk(KERN_WARNING "device-tree: Duplicate name in %s, " "renamed to \"%s\"\n", np->full_name, fixed_name); return fixed_name; } /* * Process a node, adding entries for its children and its properties. */ void proc_device_tree_add_node(struct device_node *np, struct proc_dir_entry *de) { struct property *pp; struct proc_dir_entry *ent; struct device_node *child; const char *p; set_node_proc_entry(np, de); for (child = NULL; (child = of_get_next_child(np, child));) { /* Use everything after the last slash, or the full name */ p = strrchr(child->full_name, '/'); if (!p) p = child->full_name; else ++p; if (duplicate_name(de, p)) p = fixup_name(np, de, p); ent = proc_mkdir(p, de); if (ent == NULL) break; proc_device_tree_add_node(child, ent); } of_node_put(child); for (pp = np->properties; pp != NULL; pp = pp->next) { p = pp->name; if (strchr(p, '/')) continue; if (duplicate_name(de, p)) p = fixup_name(np, de, p); ent = __proc_device_tree_add_prop(de, pp, p); if (ent == NULL) break; } } /* * Called on initialization to set up the /proc/device-tree subtree */ void __init proc_device_tree_init(void) { struct device_node *root; proc_device_tree = proc_mkdir("device-tree", NULL); if (proc_device_tree == NULL) return; root = of_find_node_by_path("/"); if (root == NULL) { pr_debug("/proc/device-tree: can't find root\n"); return; } proc_device_tree_add_node(root, proc_device_tree); of_node_put(root); }
gpl-2.0
TEAM-Gummy/android_kernel_sony_msm8x27
net/netfilter/xt_recent.c
8136
17113
/* * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is a replacement of the old ipt_recent module, which carried the * following copyright notice: * * Author: Stephen Frost <sfrost@snowman.net> * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_recent.h> MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_recent"); MODULE_ALIAS("ip6t_recent"); static unsigned int ip_list_tot = 100; static unsigned int ip_pkt_list_tot = 20; static unsigned int ip_list_hash_size = 0; static unsigned int ip_list_perms = 0644; static unsigned int ip_list_uid = 0; static unsigned int ip_list_gid = 0; module_param(ip_list_tot, uint, 0400); module_param(ip_pkt_list_tot, uint, 0400); module_param(ip_list_hash_size, uint, 0400); module_param(ip_list_perms, uint, 0400); module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR); module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files"); struct recent_entry { struct list_head list; struct list_head lru_list; union nf_inet_addr addr; u_int16_t family; u_int8_t ttl; u_int8_t index; u_int16_t nstamps; unsigned long stamps[0]; }; struct recent_table { struct list_head list; char name[XT_RECENT_NAME_LEN]; unsigned int refcnt; unsigned int entries; struct list_head lru_list; struct list_head iphash[0]; }; struct recent_net { struct list_head tables; #ifdef CONFIG_PROC_FS struct proc_dir_entry *xt_recent; #endif }; static int recent_net_id; static inline struct recent_net *recent_pernet(struct net *net) { return net_generic(net, recent_net_id); } static DEFINE_SPINLOCK(recent_lock); static DEFINE_MUTEX(recent_mutex); #ifdef CONFIG_PROC_FS static const struct file_operations recent_old_fops, recent_mt_fops; #endif static u_int32_t hash_rnd __read_mostly; static bool hash_rnd_inited __read_mostly; static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr) { return jhash_1word((__force u32)addr->ip, hash_rnd) & (ip_list_hash_size - 1); } static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr) { return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & (ip_list_hash_size - 1); } static struct recent_entry * recent_entry_lookup(const struct recent_table *table, const union nf_inet_addr *addrp, u_int16_t family, u_int8_t ttl) { struct recent_entry *e; unsigned int h; if (family == NFPROTO_IPV4) h = recent_entry_hash4(addrp); else h = recent_entry_hash6(addrp); list_for_each_entry(e, &table->iphash[h], list) if (e->family == family && memcmp(&e->addr, addrp, sizeof(e->addr)) == 0 && (ttl == e->ttl || ttl == 0 || e->ttl == 0)) return e; return NULL; } static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) { list_del(&e->list); list_del(&e->lru_list); kfree(e); t->entries--; } /* * Drop entries with timestamps older then 'time'. */ static void recent_entry_reap(struct recent_table *t, unsigned long time) { struct recent_entry *e; /* * The head of the LRU list is always the oldest entry. */ e = list_entry(t->lru_list.next, struct recent_entry, lru_list); /* * The last time stamp is the most recent. */ if (time_after(time, e->stamps[e->index-1])) recent_entry_remove(t, e); } static struct recent_entry * recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, u_int16_t family, u_int8_t ttl) { struct recent_entry *e; if (t->entries >= ip_list_tot) { e = list_entry(t->lru_list.next, struct recent_entry, lru_list); recent_entry_remove(t, e); } e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot, GFP_ATOMIC); if (e == NULL) return NULL; memcpy(&e->addr, addr, sizeof(e->addr)); e->ttl = ttl; e->stamps[0] = jiffies; e->nstamps = 1; e->index = 1; e->family = family; if (family == NFPROTO_IPV4) list_add_tail(&e->list, &t->iphash[recent_entry_hash4(addr)]); else list_add_tail(&e->list, &t->iphash[recent_entry_hash6(addr)]); list_add_tail(&e->lru_list, &t->lru_list); t->entries++; return e; } static void recent_entry_update(struct recent_table *t, struct recent_entry *e) { e->index %= ip_pkt_list_tot; e->stamps[e->index++] = jiffies; if (e->index > e->nstamps) e->nstamps = e->index; list_move_tail(&e->lru_list, &t->lru_list); } static struct recent_table *recent_table_lookup(struct recent_net *recent_net, const char *name) { struct recent_table *t; list_for_each_entry(t, &recent_net->tables, list) if (!strcmp(t->name, name)) return t; return NULL; } static void recent_table_flush(struct recent_table *t) { struct recent_entry *e, *next; unsigned int i; for (i = 0; i < ip_list_hash_size; i++) list_for_each_entry_safe(e, next, &t->iphash[i], list) recent_entry_remove(t, e); } static bool recent_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct net *net = dev_net(par->in ? par->in : par->out); struct recent_net *recent_net = recent_pernet(net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; struct recent_entry *e; union nf_inet_addr addr = {}; u_int8_t ttl; bool ret = info->invert; if (par->family == NFPROTO_IPV4) { const struct iphdr *iph = ip_hdr(skb); if (info->side == XT_RECENT_DEST) addr.ip = iph->daddr; else addr.ip = iph->saddr; ttl = iph->ttl; } else { const struct ipv6hdr *iph = ipv6_hdr(skb); if (info->side == XT_RECENT_DEST) memcpy(&addr.in6, &iph->daddr, sizeof(addr.in6)); else memcpy(&addr.in6, &iph->saddr, sizeof(addr.in6)); ttl = iph->hop_limit; } /* use TTL as seen before forwarding */ if (par->out != NULL && skb->sk == NULL) ttl++; spin_lock_bh(&recent_lock); t = recent_table_lookup(recent_net, info->name); e = recent_entry_lookup(t, &addr, par->family, (info->check_set & XT_RECENT_TTL) ? ttl : 0); if (e == NULL) { if (!(info->check_set & XT_RECENT_SET)) goto out; e = recent_entry_init(t, &addr, par->family, ttl); if (e == NULL) par->hotdrop = true; ret = !ret; goto out; } if (info->check_set & XT_RECENT_SET) ret = !ret; else if (info->check_set & XT_RECENT_REMOVE) { recent_entry_remove(t, e); ret = !ret; } else if (info->check_set & (XT_RECENT_CHECK | XT_RECENT_UPDATE)) { unsigned long time = jiffies - info->seconds * HZ; unsigned int i, hits = 0; for (i = 0; i < e->nstamps; i++) { if (info->seconds && time_after(time, e->stamps[i])) continue; if (!info->hit_count || ++hits >= info->hit_count) { ret = !ret; break; } } /* info->seconds must be non-zero */ if (info->check_set & XT_RECENT_REAP) recent_entry_reap(t, time); } if (info->check_set & XT_RECENT_SET || (info->check_set & XT_RECENT_UPDATE && ret)) { recent_entry_update(t, e); e->ttl = ttl; } out: spin_unlock_bh(&recent_lock); return ret; } static int recent_mt_check(const struct xt_mtchk_param *par) { struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; #ifdef CONFIG_PROC_FS struct proc_dir_entry *pde; #endif unsigned i; int ret = -EINVAL; if (unlikely(!hash_rnd_inited)) { get_random_bytes(&hash_rnd, sizeof(hash_rnd)); hash_rnd_inited = true; } if (info->check_set & ~XT_RECENT_VALID_FLAGS) { pr_info("Unsupported user space flags (%08x)\n", info->check_set); return -EINVAL; } if (hweight8(info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE | XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) return -EINVAL; if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && (info->seconds || info->hit_count || (info->check_set & XT_RECENT_MODIFIERS))) return -EINVAL; if ((info->check_set & XT_RECENT_REAP) && !info->seconds) return -EINVAL; if (info->hit_count > ip_pkt_list_tot) { pr_info("hitcount (%u) is larger than " "packets to be remembered (%u)\n", info->hit_count, ip_pkt_list_tot); return -EINVAL; } if (info->name[0] == '\0' || strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) return -EINVAL; mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (t != NULL) { t->refcnt++; ret = 0; goto out; } t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; goto out; } t->refcnt = 1; strcpy(t->name, info->name); INIT_LIST_HEAD(&t->lru_list); for (i = 0; i < ip_list_hash_size; i++) INIT_LIST_HEAD(&t->iphash[i]); #ifdef CONFIG_PROC_FS pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, &recent_mt_fops, t); if (pde == NULL) { kfree(t); ret = -ENOMEM; goto out; } pde->uid = ip_list_uid; pde->gid = ip_list_gid; #endif spin_lock_bh(&recent_lock); list_add_tail(&t->list, &recent_net->tables); spin_unlock_bh(&recent_lock); ret = 0; out: mutex_unlock(&recent_mutex); return ret; } static void recent_mt_destroy(const struct xt_mtdtor_param *par) { struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (--t->refcnt == 0) { spin_lock_bh(&recent_lock); list_del(&t->list); spin_unlock_bh(&recent_lock); #ifdef CONFIG_PROC_FS remove_proc_entry(t->name, recent_net->xt_recent); #endif recent_table_flush(t); kfree(t); } mutex_unlock(&recent_mutex); } #ifdef CONFIG_PROC_FS struct recent_iter_state { const struct recent_table *table; unsigned int bucket; }; static void *recent_seq_start(struct seq_file *seq, loff_t *pos) __acquires(recent_lock) { struct recent_iter_state *st = seq->private; const struct recent_table *t = st->table; struct recent_entry *e; loff_t p = *pos; spin_lock_bh(&recent_lock); for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) list_for_each_entry(e, &t->iphash[st->bucket], list) if (p-- == 0) return e; return NULL; } static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct recent_iter_state *st = seq->private; const struct recent_table *t = st->table; const struct recent_entry *e = v; const struct list_head *head = e->list.next; while (head == &t->iphash[st->bucket]) { if (++st->bucket >= ip_list_hash_size) return NULL; head = t->iphash[st->bucket].next; } (*pos)++; return list_entry(head, struct recent_entry, list); } static void recent_seq_stop(struct seq_file *s, void *v) __releases(recent_lock) { spin_unlock_bh(&recent_lock); } static int recent_seq_show(struct seq_file *seq, void *v) { const struct recent_entry *e = v; unsigned int i; i = (e->index - 1) % ip_pkt_list_tot; if (e->family == NFPROTO_IPV4) seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.ip, e->ttl, e->stamps[i], e->index); else seq_printf(seq, "src=%pI6 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.in6, e->ttl, e->stamps[i], e->index); for (i = 0; i < e->nstamps; i++) seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]); seq_printf(seq, "\n"); return 0; } static const struct seq_operations recent_seq_ops = { .start = recent_seq_start, .next = recent_seq_next, .stop = recent_seq_stop, .show = recent_seq_show, }; static int recent_seq_open(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); struct recent_iter_state *st; st = __seq_open_private(file, &recent_seq_ops, sizeof(*st)); if (st == NULL) return -ENOMEM; st->table = pde->data; return 0; } static ssize_t recent_mt_proc_write(struct file *file, const char __user *input, size_t size, loff_t *loff) { const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); struct recent_table *t = pde->data; struct recent_entry *e; char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; const char *c = buf; union nf_inet_addr addr = {}; u_int16_t family; bool add, succ; if (size == 0) return 0; if (size > sizeof(buf)) size = sizeof(buf); if (copy_from_user(buf, input, size) != 0) return -EFAULT; /* Strict protocol! */ if (*loff != 0) return -ESPIPE; switch (*c) { case '/': /* flush table */ spin_lock_bh(&recent_lock); recent_table_flush(t); spin_unlock_bh(&recent_lock); return size; case '-': /* remove address */ add = false; break; case '+': /* add address */ add = true; break; default: pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); return -EINVAL; } ++c; --size; if (strnchr(c, size, ':') != NULL) { family = NFPROTO_IPV6; succ = in6_pton(c, size, (void *)&addr, '\n', NULL); } else { family = NFPROTO_IPV4; succ = in4_pton(c, size, (void *)&addr, '\n', NULL); } if (!succ) { pr_info("illegal address written to procfs\n"); return -EINVAL; } spin_lock_bh(&recent_lock); e = recent_entry_lookup(t, &addr, family, 0); if (e == NULL) { if (add) recent_entry_init(t, &addr, family, 0); } else { if (add) recent_entry_update(t, e); else recent_entry_remove(t, e); } spin_unlock_bh(&recent_lock); /* Note we removed one above */ *loff += size + 1; return size + 1; } static const struct file_operations recent_mt_fops = { .open = recent_seq_open, .read = seq_read, .write = recent_mt_proc_write, .release = seq_release_private, .owner = THIS_MODULE, .llseek = seq_lseek, }; static int __net_init recent_proc_net_init(struct net *net) { struct recent_net *recent_net = recent_pernet(net); recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); if (!recent_net->xt_recent) return -ENOMEM; return 0; } static void __net_exit recent_proc_net_exit(struct net *net) { proc_net_remove(net, "xt_recent"); } #else static inline int recent_proc_net_init(struct net *net) { return 0; } static inline void recent_proc_net_exit(struct net *net) { } #endif /* CONFIG_PROC_FS */ static int __net_init recent_net_init(struct net *net) { struct recent_net *recent_net = recent_pernet(net); INIT_LIST_HEAD(&recent_net->tables); return recent_proc_net_init(net); } static void __net_exit recent_net_exit(struct net *net) { struct recent_net *recent_net = recent_pernet(net); BUG_ON(!list_empty(&recent_net->tables)); recent_proc_net_exit(net); } static struct pernet_operations recent_net_ops = { .init = recent_net_init, .exit = recent_net_exit, .id = &recent_net_id, .size = sizeof(struct recent_net), }; static struct xt_match recent_mt_reg[] __read_mostly = { { .name = "recent", .revision = 0, .family = NFPROTO_IPV4, .match = recent_mt, .matchsize = sizeof(struct xt_recent_mtinfo), .checkentry = recent_mt_check, .destroy = recent_mt_destroy, .me = THIS_MODULE, }, { .name = "recent", .revision = 0, .family = NFPROTO_IPV6, .match = recent_mt, .matchsize = sizeof(struct xt_recent_mtinfo), .checkentry = recent_mt_check, .destroy = recent_mt_destroy, .me = THIS_MODULE, }, }; static int __init recent_mt_init(void) { int err; if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255) return -EINVAL; ip_list_hash_size = 1 << fls(ip_list_tot); err = register_pernet_subsys(&recent_net_ops); if (err) return err; err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); if (err) unregister_pernet_subsys(&recent_net_ops); return err; } static void __exit recent_mt_exit(void) { xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); unregister_pernet_subsys(&recent_net_ops); } module_init(recent_mt_init); module_exit(recent_mt_exit);
gpl-2.0
J-Team/android_kernel_samsung_u8500
drivers/video/cg3.c
8136
11698
/* cg3.c: CGTHREE frame buffer driver * * Copyright (C) 2003, 2006 David S. Miller (davem@davemloft.net) * Copyright (C) 1996,1998 Jakub Jelinek (jj@ultra.linux.cz) * Copyright (C) 1996 Miguel de Icaza (miguel@nuclecu.unam.mx) * Copyright (C) 1997 Eddie C. Dost (ecd@skynet.be) * * Driver layout based loosely on tgafb.c, see that file for credits. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/fb.h> #include <linux/mm.h> #include <linux/of_device.h> #include <asm/io.h> #include <asm/fbio.h> #include "sbuslib.h" /* * Local functions. */ static int cg3_setcolreg(unsigned, unsigned, unsigned, unsigned, unsigned, struct fb_info *); static int cg3_blank(int, struct fb_info *); static int cg3_mmap(struct fb_info *, struct vm_area_struct *); static int cg3_ioctl(struct fb_info *, unsigned int, unsigned long); /* * Frame buffer operations */ static struct fb_ops cg3_ops = { .owner = THIS_MODULE, .fb_setcolreg = cg3_setcolreg, .fb_blank = cg3_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_mmap = cg3_mmap, .fb_ioctl = cg3_ioctl, #ifdef CONFIG_COMPAT .fb_compat_ioctl = sbusfb_compat_ioctl, #endif }; /* Control Register Constants */ #define CG3_CR_ENABLE_INTS 0x80 #define CG3_CR_ENABLE_VIDEO 0x40 #define CG3_CR_ENABLE_TIMING 0x20 #define CG3_CR_ENABLE_CURCMP 0x10 #define CG3_CR_XTAL_MASK 0x0c #define CG3_CR_DIVISOR_MASK 0x03 /* Status Register Constants */ #define CG3_SR_PENDING_INT 0x80 #define CG3_SR_RES_MASK 0x70 #define CG3_SR_1152_900_76_A 0x40 #define CG3_SR_1152_900_76_B 0x60 #define CG3_SR_ID_MASK 0x0f #define CG3_SR_ID_COLOR 0x01 #define CG3_SR_ID_MONO 0x02 #define CG3_SR_ID_MONO_ECL 0x03 enum cg3_type { CG3_AT_66HZ = 0, CG3_AT_76HZ, CG3_RDI }; struct bt_regs { u32 addr; u32 color_map; u32 control; u32 cursor; }; struct cg3_regs { struct bt_regs cmap; u8 control; u8 status; u8 cursor_start; u8 cursor_end; u8 h_blank_start; u8 h_blank_end; u8 h_sync_start; u8 h_sync_end; u8 comp_sync_end; u8 v_blank_start_high; u8 v_blank_start_low; u8 v_blank_end; u8 v_sync_start; u8 v_sync_end; u8 xfer_holdoff_start; u8 xfer_holdoff_end; }; /* Offset of interesting structures in the OBIO space */ #define CG3_REGS_OFFSET 0x400000UL #define CG3_RAM_OFFSET 0x800000UL struct cg3_par { spinlock_t lock; struct cg3_regs __iomem *regs; u32 sw_cmap[((256 * 3) + 3) / 4]; u32 flags; #define CG3_FLAG_BLANKED 0x00000001 #define CG3_FLAG_RDI 0x00000002 unsigned long which_io; }; /** * cg3_setcolreg - Optional function. Sets a color register. * @regno: boolean, 0 copy local, 1 get_user() function * @red: frame buffer colormap structure * @green: The green value which can be up to 16 bits wide * @blue: The blue value which can be up to 16 bits wide. * @transp: If supported the alpha value which can be up to 16 bits wide. * @info: frame buffer info structure * * The cg3 palette is loaded with 4 color values at each time * so you end up with: (rgb)(r), (gb)(rg), (b)(rgb), and so on. * We keep a sw copy of the hw cmap to assist us in this esoteric * loading procedure. */ static int cg3_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct cg3_par *par = (struct cg3_par *) info->par; struct bt_regs __iomem *bt = &par->regs->cmap; unsigned long flags; u32 *p32; u8 *p8; int count; if (regno >= 256) return 1; red >>= 8; green >>= 8; blue >>= 8; spin_lock_irqsave(&par->lock, flags); p8 = (u8 *)par->sw_cmap + (regno * 3); p8[0] = red; p8[1] = green; p8[2] = blue; #define D4M3(x) ((((x)>>2)<<1) + ((x)>>2)) /* (x/4)*3 */ #define D4M4(x) ((x)&~0x3) /* (x/4)*4 */ count = 3; p32 = &par->sw_cmap[D4M3(regno)]; sbus_writel(D4M4(regno), &bt->addr); while (count--) sbus_writel(*p32++, &bt->color_map); #undef D4M3 #undef D4M4 spin_unlock_irqrestore(&par->lock, flags); return 0; } /** * cg3_blank - Optional function. Blanks the display. * @blank_mode: the blank mode we want. * @info: frame buffer structure that represents a single frame buffer */ static int cg3_blank(int blank, struct fb_info *info) { struct cg3_par *par = (struct cg3_par *) info->par; struct cg3_regs __iomem *regs = par->regs; unsigned long flags; u8 val; spin_lock_irqsave(&par->lock, flags); switch (blank) { case FB_BLANK_UNBLANK: /* Unblanking */ val = sbus_readb(&regs->control); val |= CG3_CR_ENABLE_VIDEO; sbus_writeb(val, &regs->control); par->flags &= ~CG3_FLAG_BLANKED; break; case FB_BLANK_NORMAL: /* Normal blanking */ case FB_BLANK_VSYNC_SUSPEND: /* VESA blank (vsync off) */ case FB_BLANK_HSYNC_SUSPEND: /* VESA blank (hsync off) */ case FB_BLANK_POWERDOWN: /* Poweroff */ val = sbus_readb(&regs->control); val &= ~CG3_CR_ENABLE_VIDEO; sbus_writeb(val, &regs->control); par->flags |= CG3_FLAG_BLANKED; break; } spin_unlock_irqrestore(&par->lock, flags); return 0; } static struct sbus_mmap_map cg3_mmap_map[] = { { .voff = CG3_MMAP_OFFSET, .poff = CG3_RAM_OFFSET, .size = SBUS_MMAP_FBSIZE(1) }, { .size = 0 } }; static int cg3_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct cg3_par *par = (struct cg3_par *)info->par; return sbusfb_mmap_helper(cg3_mmap_map, info->fix.smem_start, info->fix.smem_len, par->which_io, vma); } static int cg3_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { return sbusfb_ioctl_helper(cmd, arg, info, FBTYPE_SUN3COLOR, 8, info->fix.smem_len); } /* * Initialisation */ static void __devinit cg3_init_fix(struct fb_info *info, int linebytes, struct device_node *dp) { strlcpy(info->fix.id, dp->name, sizeof(info->fix.id)); info->fix.type = FB_TYPE_PACKED_PIXELS; info->fix.visual = FB_VISUAL_PSEUDOCOLOR; info->fix.line_length = linebytes; info->fix.accel = FB_ACCEL_SUN_CGTHREE; } static void __devinit cg3_rdi_maybe_fixup_var(struct fb_var_screeninfo *var, struct device_node *dp) { const char *params; char *p; int ww, hh; params = of_get_property(dp, "params", NULL); if (params) { ww = simple_strtoul(params, &p, 10); if (ww && *p == 'x') { hh = simple_strtoul(p + 1, &p, 10); if (hh && *p == '-') { if (var->xres != ww || var->yres != hh) { var->xres = var->xres_virtual = ww; var->yres = var->yres_virtual = hh; } } } } } static u8 cg3regvals_66hz[] __devinitdata = { /* 1152 x 900, 66 Hz */ 0x14, 0xbb, 0x15, 0x2b, 0x16, 0x04, 0x17, 0x14, 0x18, 0xae, 0x19, 0x03, 0x1a, 0xa8, 0x1b, 0x24, 0x1c, 0x01, 0x1d, 0x05, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x20, 0 }; static u8 cg3regvals_76hz[] __devinitdata = { /* 1152 x 900, 76 Hz */ 0x14, 0xb7, 0x15, 0x27, 0x16, 0x03, 0x17, 0x0f, 0x18, 0xae, 0x19, 0x03, 0x1a, 0xae, 0x1b, 0x2a, 0x1c, 0x01, 0x1d, 0x09, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x24, 0 }; static u8 cg3regvals_rdi[] __devinitdata = { /* 640 x 480, cgRDI */ 0x14, 0x70, 0x15, 0x20, 0x16, 0x08, 0x17, 0x10, 0x18, 0x06, 0x19, 0x02, 0x1a, 0x31, 0x1b, 0x51, 0x1c, 0x06, 0x1d, 0x0c, 0x1e, 0xff, 0x1f, 0x01, 0x10, 0x22, 0 }; static u8 *cg3_regvals[] __devinitdata = { cg3regvals_66hz, cg3regvals_76hz, cg3regvals_rdi }; static u_char cg3_dacvals[] __devinitdata = { 4, 0xff, 5, 0x00, 6, 0x70, 7, 0x00, 0 }; static int __devinit cg3_do_default_mode(struct cg3_par *par) { enum cg3_type type; u8 *p; if (par->flags & CG3_FLAG_RDI) type = CG3_RDI; else { u8 status = sbus_readb(&par->regs->status), mon; if ((status & CG3_SR_ID_MASK) == CG3_SR_ID_COLOR) { mon = status & CG3_SR_RES_MASK; if (mon == CG3_SR_1152_900_76_A || mon == CG3_SR_1152_900_76_B) type = CG3_AT_76HZ; else type = CG3_AT_66HZ; } else { printk(KERN_ERR "cgthree: can't handle SR %02x\n", status); return -EINVAL; } } for (p = cg3_regvals[type]; *p; p += 2) { u8 __iomem *regp = &((u8 __iomem *)par->regs)[p[0]]; sbus_writeb(p[1], regp); } for (p = cg3_dacvals; *p; p += 2) { u8 __iomem *regp; regp = (u8 __iomem *)&par->regs->cmap.addr; sbus_writeb(p[0], regp); regp = (u8 __iomem *)&par->regs->cmap.control; sbus_writeb(p[1], regp); } return 0; } static int __devinit cg3_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct fb_info *info; struct cg3_par *par; int linebytes, err; info = framebuffer_alloc(sizeof(struct cg3_par), &op->dev); err = -ENOMEM; if (!info) goto out_err; par = info->par; spin_lock_init(&par->lock); info->fix.smem_start = op->resource[0].start; par->which_io = op->resource[0].flags & IORESOURCE_BITS; sbusfb_fill_var(&info->var, dp, 8); info->var.red.length = 8; info->var.green.length = 8; info->var.blue.length = 8; if (!strcmp(dp->name, "cgRDI")) par->flags |= CG3_FLAG_RDI; if (par->flags & CG3_FLAG_RDI) cg3_rdi_maybe_fixup_var(&info->var, dp); linebytes = of_getintprop_default(dp, "linebytes", info->var.xres); info->fix.smem_len = PAGE_ALIGN(linebytes * info->var.yres); par->regs = of_ioremap(&op->resource[0], CG3_REGS_OFFSET, sizeof(struct cg3_regs), "cg3 regs"); if (!par->regs) goto out_release_fb; info->flags = FBINFO_DEFAULT; info->fbops = &cg3_ops; info->screen_base = of_ioremap(&op->resource[0], CG3_RAM_OFFSET, info->fix.smem_len, "cg3 ram"); if (!info->screen_base) goto out_unmap_regs; cg3_blank(FB_BLANK_UNBLANK, info); if (!of_find_property(dp, "width", NULL)) { err = cg3_do_default_mode(par); if (err) goto out_unmap_screen; } if (fb_alloc_cmap(&info->cmap, 256, 0)) goto out_unmap_screen; fb_set_cmap(&info->cmap, info); cg3_init_fix(info, linebytes, dp); err = register_framebuffer(info); if (err < 0) goto out_dealloc_cmap; dev_set_drvdata(&op->dev, info); printk(KERN_INFO "%s: cg3 at %lx:%lx\n", dp->full_name, par->which_io, info->fix.smem_start); return 0; out_dealloc_cmap: fb_dealloc_cmap(&info->cmap); out_unmap_screen: of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); out_unmap_regs: of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs)); out_release_fb: framebuffer_release(info); out_err: return err; } static int __devexit cg3_remove(struct platform_device *op) { struct fb_info *info = dev_get_drvdata(&op->dev); struct cg3_par *par = info->par; unregister_framebuffer(info); fb_dealloc_cmap(&info->cmap); of_iounmap(&op->resource[0], par->regs, sizeof(struct cg3_regs)); of_iounmap(&op->resource[0], info->screen_base, info->fix.smem_len); framebuffer_release(info); dev_set_drvdata(&op->dev, NULL); return 0; } static const struct of_device_id cg3_match[] = { { .name = "cgthree", }, { .name = "cgRDI", }, {}, }; MODULE_DEVICE_TABLE(of, cg3_match); static struct platform_driver cg3_driver = { .driver = { .name = "cg3", .owner = THIS_MODULE, .of_match_table = cg3_match, }, .probe = cg3_probe, .remove = __devexit_p(cg3_remove), }; static int __init cg3_init(void) { if (fb_get_options("cg3fb", NULL)) return -ENODEV; return platform_driver_register(&cg3_driver); } static void __exit cg3_exit(void) { platform_driver_unregister(&cg3_driver); } module_init(cg3_init); module_exit(cg3_exit); MODULE_DESCRIPTION("framebuffer driver for CGthree chipsets"); MODULE_AUTHOR("David S. Miller <davem@davemloft.net>"); MODULE_VERSION("2.0"); MODULE_LICENSE("GPL");
gpl-2.0
flxo/android_kernel_samsung_skomer
net/netfilter/xt_recent.c
8136
17113
/* * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> * Copyright © CC Computer Consultants GmbH, 2007 - 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This is a replacement of the old ipt_recent module, which carried the * following copyright notice: * * Author: Stephen Frost <sfrost@snowman.net> * Copyright 2002-2003, Stephen Frost, 2.5.x port by laforge@netfilter.org */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/list.h> #include <linux/random.h> #include <linux/jhash.h> #include <linux/bitops.h> #include <linux/skbuff.h> #include <linux/inet.h> #include <linux/slab.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_recent.h> MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_AUTHOR("Jan Engelhardt <jengelh@medozas.de>"); MODULE_DESCRIPTION("Xtables: \"recently-seen\" host matching"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_recent"); MODULE_ALIAS("ip6t_recent"); static unsigned int ip_list_tot = 100; static unsigned int ip_pkt_list_tot = 20; static unsigned int ip_list_hash_size = 0; static unsigned int ip_list_perms = 0644; static unsigned int ip_list_uid = 0; static unsigned int ip_list_gid = 0; module_param(ip_list_tot, uint, 0400); module_param(ip_pkt_list_tot, uint, 0400); module_param(ip_list_hash_size, uint, 0400); module_param(ip_list_perms, uint, 0400); module_param(ip_list_uid, uint, S_IRUGO | S_IWUSR); module_param(ip_list_gid, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(ip_list_tot, "number of IPs to remember per list"); MODULE_PARM_DESC(ip_pkt_list_tot, "number of packets per IP address to remember (max. 255)"); MODULE_PARM_DESC(ip_list_hash_size, "size of hash table used to look up IPs"); MODULE_PARM_DESC(ip_list_perms, "permissions on /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_uid, "default owner of /proc/net/xt_recent/* files"); MODULE_PARM_DESC(ip_list_gid, "default owning group of /proc/net/xt_recent/* files"); struct recent_entry { struct list_head list; struct list_head lru_list; union nf_inet_addr addr; u_int16_t family; u_int8_t ttl; u_int8_t index; u_int16_t nstamps; unsigned long stamps[0]; }; struct recent_table { struct list_head list; char name[XT_RECENT_NAME_LEN]; unsigned int refcnt; unsigned int entries; struct list_head lru_list; struct list_head iphash[0]; }; struct recent_net { struct list_head tables; #ifdef CONFIG_PROC_FS struct proc_dir_entry *xt_recent; #endif }; static int recent_net_id; static inline struct recent_net *recent_pernet(struct net *net) { return net_generic(net, recent_net_id); } static DEFINE_SPINLOCK(recent_lock); static DEFINE_MUTEX(recent_mutex); #ifdef CONFIG_PROC_FS static const struct file_operations recent_old_fops, recent_mt_fops; #endif static u_int32_t hash_rnd __read_mostly; static bool hash_rnd_inited __read_mostly; static inline unsigned int recent_entry_hash4(const union nf_inet_addr *addr) { return jhash_1word((__force u32)addr->ip, hash_rnd) & (ip_list_hash_size - 1); } static inline unsigned int recent_entry_hash6(const union nf_inet_addr *addr) { return jhash2((u32 *)addr->ip6, ARRAY_SIZE(addr->ip6), hash_rnd) & (ip_list_hash_size - 1); } static struct recent_entry * recent_entry_lookup(const struct recent_table *table, const union nf_inet_addr *addrp, u_int16_t family, u_int8_t ttl) { struct recent_entry *e; unsigned int h; if (family == NFPROTO_IPV4) h = recent_entry_hash4(addrp); else h = recent_entry_hash6(addrp); list_for_each_entry(e, &table->iphash[h], list) if (e->family == family && memcmp(&e->addr, addrp, sizeof(e->addr)) == 0 && (ttl == e->ttl || ttl == 0 || e->ttl == 0)) return e; return NULL; } static void recent_entry_remove(struct recent_table *t, struct recent_entry *e) { list_del(&e->list); list_del(&e->lru_list); kfree(e); t->entries--; } /* * Drop entries with timestamps older then 'time'. */ static void recent_entry_reap(struct recent_table *t, unsigned long time) { struct recent_entry *e; /* * The head of the LRU list is always the oldest entry. */ e = list_entry(t->lru_list.next, struct recent_entry, lru_list); /* * The last time stamp is the most recent. */ if (time_after(time, e->stamps[e->index-1])) recent_entry_remove(t, e); } static struct recent_entry * recent_entry_init(struct recent_table *t, const union nf_inet_addr *addr, u_int16_t family, u_int8_t ttl) { struct recent_entry *e; if (t->entries >= ip_list_tot) { e = list_entry(t->lru_list.next, struct recent_entry, lru_list); recent_entry_remove(t, e); } e = kmalloc(sizeof(*e) + sizeof(e->stamps[0]) * ip_pkt_list_tot, GFP_ATOMIC); if (e == NULL) return NULL; memcpy(&e->addr, addr, sizeof(e->addr)); e->ttl = ttl; e->stamps[0] = jiffies; e->nstamps = 1; e->index = 1; e->family = family; if (family == NFPROTO_IPV4) list_add_tail(&e->list, &t->iphash[recent_entry_hash4(addr)]); else list_add_tail(&e->list, &t->iphash[recent_entry_hash6(addr)]); list_add_tail(&e->lru_list, &t->lru_list); t->entries++; return e; } static void recent_entry_update(struct recent_table *t, struct recent_entry *e) { e->index %= ip_pkt_list_tot; e->stamps[e->index++] = jiffies; if (e->index > e->nstamps) e->nstamps = e->index; list_move_tail(&e->lru_list, &t->lru_list); } static struct recent_table *recent_table_lookup(struct recent_net *recent_net, const char *name) { struct recent_table *t; list_for_each_entry(t, &recent_net->tables, list) if (!strcmp(t->name, name)) return t; return NULL; } static void recent_table_flush(struct recent_table *t) { struct recent_entry *e, *next; unsigned int i; for (i = 0; i < ip_list_hash_size; i++) list_for_each_entry_safe(e, next, &t->iphash[i], list) recent_entry_remove(t, e); } static bool recent_mt(const struct sk_buff *skb, struct xt_action_param *par) { struct net *net = dev_net(par->in ? par->in : par->out); struct recent_net *recent_net = recent_pernet(net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; struct recent_entry *e; union nf_inet_addr addr = {}; u_int8_t ttl; bool ret = info->invert; if (par->family == NFPROTO_IPV4) { const struct iphdr *iph = ip_hdr(skb); if (info->side == XT_RECENT_DEST) addr.ip = iph->daddr; else addr.ip = iph->saddr; ttl = iph->ttl; } else { const struct ipv6hdr *iph = ipv6_hdr(skb); if (info->side == XT_RECENT_DEST) memcpy(&addr.in6, &iph->daddr, sizeof(addr.in6)); else memcpy(&addr.in6, &iph->saddr, sizeof(addr.in6)); ttl = iph->hop_limit; } /* use TTL as seen before forwarding */ if (par->out != NULL && skb->sk == NULL) ttl++; spin_lock_bh(&recent_lock); t = recent_table_lookup(recent_net, info->name); e = recent_entry_lookup(t, &addr, par->family, (info->check_set & XT_RECENT_TTL) ? ttl : 0); if (e == NULL) { if (!(info->check_set & XT_RECENT_SET)) goto out; e = recent_entry_init(t, &addr, par->family, ttl); if (e == NULL) par->hotdrop = true; ret = !ret; goto out; } if (info->check_set & XT_RECENT_SET) ret = !ret; else if (info->check_set & XT_RECENT_REMOVE) { recent_entry_remove(t, e); ret = !ret; } else if (info->check_set & (XT_RECENT_CHECK | XT_RECENT_UPDATE)) { unsigned long time = jiffies - info->seconds * HZ; unsigned int i, hits = 0; for (i = 0; i < e->nstamps; i++) { if (info->seconds && time_after(time, e->stamps[i])) continue; if (!info->hit_count || ++hits >= info->hit_count) { ret = !ret; break; } } /* info->seconds must be non-zero */ if (info->check_set & XT_RECENT_REAP) recent_entry_reap(t, time); } if (info->check_set & XT_RECENT_SET || (info->check_set & XT_RECENT_UPDATE && ret)) { recent_entry_update(t, e); e->ttl = ttl; } out: spin_unlock_bh(&recent_lock); return ret; } static int recent_mt_check(const struct xt_mtchk_param *par) { struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; #ifdef CONFIG_PROC_FS struct proc_dir_entry *pde; #endif unsigned i; int ret = -EINVAL; if (unlikely(!hash_rnd_inited)) { get_random_bytes(&hash_rnd, sizeof(hash_rnd)); hash_rnd_inited = true; } if (info->check_set & ~XT_RECENT_VALID_FLAGS) { pr_info("Unsupported user space flags (%08x)\n", info->check_set); return -EINVAL; } if (hweight8(info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE | XT_RECENT_CHECK | XT_RECENT_UPDATE)) != 1) return -EINVAL; if ((info->check_set & (XT_RECENT_SET | XT_RECENT_REMOVE)) && (info->seconds || info->hit_count || (info->check_set & XT_RECENT_MODIFIERS))) return -EINVAL; if ((info->check_set & XT_RECENT_REAP) && !info->seconds) return -EINVAL; if (info->hit_count > ip_pkt_list_tot) { pr_info("hitcount (%u) is larger than " "packets to be remembered (%u)\n", info->hit_count, ip_pkt_list_tot); return -EINVAL; } if (info->name[0] == '\0' || strnlen(info->name, XT_RECENT_NAME_LEN) == XT_RECENT_NAME_LEN) return -EINVAL; mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (t != NULL) { t->refcnt++; ret = 0; goto out; } t = kzalloc(sizeof(*t) + sizeof(t->iphash[0]) * ip_list_hash_size, GFP_KERNEL); if (t == NULL) { ret = -ENOMEM; goto out; } t->refcnt = 1; strcpy(t->name, info->name); INIT_LIST_HEAD(&t->lru_list); for (i = 0; i < ip_list_hash_size; i++) INIT_LIST_HEAD(&t->iphash[i]); #ifdef CONFIG_PROC_FS pde = proc_create_data(t->name, ip_list_perms, recent_net->xt_recent, &recent_mt_fops, t); if (pde == NULL) { kfree(t); ret = -ENOMEM; goto out; } pde->uid = ip_list_uid; pde->gid = ip_list_gid; #endif spin_lock_bh(&recent_lock); list_add_tail(&t->list, &recent_net->tables); spin_unlock_bh(&recent_lock); ret = 0; out: mutex_unlock(&recent_mutex); return ret; } static void recent_mt_destroy(const struct xt_mtdtor_param *par) { struct recent_net *recent_net = recent_pernet(par->net); const struct xt_recent_mtinfo *info = par->matchinfo; struct recent_table *t; mutex_lock(&recent_mutex); t = recent_table_lookup(recent_net, info->name); if (--t->refcnt == 0) { spin_lock_bh(&recent_lock); list_del(&t->list); spin_unlock_bh(&recent_lock); #ifdef CONFIG_PROC_FS remove_proc_entry(t->name, recent_net->xt_recent); #endif recent_table_flush(t); kfree(t); } mutex_unlock(&recent_mutex); } #ifdef CONFIG_PROC_FS struct recent_iter_state { const struct recent_table *table; unsigned int bucket; }; static void *recent_seq_start(struct seq_file *seq, loff_t *pos) __acquires(recent_lock) { struct recent_iter_state *st = seq->private; const struct recent_table *t = st->table; struct recent_entry *e; loff_t p = *pos; spin_lock_bh(&recent_lock); for (st->bucket = 0; st->bucket < ip_list_hash_size; st->bucket++) list_for_each_entry(e, &t->iphash[st->bucket], list) if (p-- == 0) return e; return NULL; } static void *recent_seq_next(struct seq_file *seq, void *v, loff_t *pos) { struct recent_iter_state *st = seq->private; const struct recent_table *t = st->table; const struct recent_entry *e = v; const struct list_head *head = e->list.next; while (head == &t->iphash[st->bucket]) { if (++st->bucket >= ip_list_hash_size) return NULL; head = t->iphash[st->bucket].next; } (*pos)++; return list_entry(head, struct recent_entry, list); } static void recent_seq_stop(struct seq_file *s, void *v) __releases(recent_lock) { spin_unlock_bh(&recent_lock); } static int recent_seq_show(struct seq_file *seq, void *v) { const struct recent_entry *e = v; unsigned int i; i = (e->index - 1) % ip_pkt_list_tot; if (e->family == NFPROTO_IPV4) seq_printf(seq, "src=%pI4 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.ip, e->ttl, e->stamps[i], e->index); else seq_printf(seq, "src=%pI6 ttl: %u last_seen: %lu oldest_pkt: %u", &e->addr.in6, e->ttl, e->stamps[i], e->index); for (i = 0; i < e->nstamps; i++) seq_printf(seq, "%s %lu", i ? "," : "", e->stamps[i]); seq_printf(seq, "\n"); return 0; } static const struct seq_operations recent_seq_ops = { .start = recent_seq_start, .next = recent_seq_next, .stop = recent_seq_stop, .show = recent_seq_show, }; static int recent_seq_open(struct inode *inode, struct file *file) { struct proc_dir_entry *pde = PDE(inode); struct recent_iter_state *st; st = __seq_open_private(file, &recent_seq_ops, sizeof(*st)); if (st == NULL) return -ENOMEM; st->table = pde->data; return 0; } static ssize_t recent_mt_proc_write(struct file *file, const char __user *input, size_t size, loff_t *loff) { const struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode); struct recent_table *t = pde->data; struct recent_entry *e; char buf[sizeof("+b335:1d35:1e55:dead:c0de:1715:5afe:c0de")]; const char *c = buf; union nf_inet_addr addr = {}; u_int16_t family; bool add, succ; if (size == 0) return 0; if (size > sizeof(buf)) size = sizeof(buf); if (copy_from_user(buf, input, size) != 0) return -EFAULT; /* Strict protocol! */ if (*loff != 0) return -ESPIPE; switch (*c) { case '/': /* flush table */ spin_lock_bh(&recent_lock); recent_table_flush(t); spin_unlock_bh(&recent_lock); return size; case '-': /* remove address */ add = false; break; case '+': /* add address */ add = true; break; default: pr_info("Need \"+ip\", \"-ip\" or \"/\"\n"); return -EINVAL; } ++c; --size; if (strnchr(c, size, ':') != NULL) { family = NFPROTO_IPV6; succ = in6_pton(c, size, (void *)&addr, '\n', NULL); } else { family = NFPROTO_IPV4; succ = in4_pton(c, size, (void *)&addr, '\n', NULL); } if (!succ) { pr_info("illegal address written to procfs\n"); return -EINVAL; } spin_lock_bh(&recent_lock); e = recent_entry_lookup(t, &addr, family, 0); if (e == NULL) { if (add) recent_entry_init(t, &addr, family, 0); } else { if (add) recent_entry_update(t, e); else recent_entry_remove(t, e); } spin_unlock_bh(&recent_lock); /* Note we removed one above */ *loff += size + 1; return size + 1; } static const struct file_operations recent_mt_fops = { .open = recent_seq_open, .read = seq_read, .write = recent_mt_proc_write, .release = seq_release_private, .owner = THIS_MODULE, .llseek = seq_lseek, }; static int __net_init recent_proc_net_init(struct net *net) { struct recent_net *recent_net = recent_pernet(net); recent_net->xt_recent = proc_mkdir("xt_recent", net->proc_net); if (!recent_net->xt_recent) return -ENOMEM; return 0; } static void __net_exit recent_proc_net_exit(struct net *net) { proc_net_remove(net, "xt_recent"); } #else static inline int recent_proc_net_init(struct net *net) { return 0; } static inline void recent_proc_net_exit(struct net *net) { } #endif /* CONFIG_PROC_FS */ static int __net_init recent_net_init(struct net *net) { struct recent_net *recent_net = recent_pernet(net); INIT_LIST_HEAD(&recent_net->tables); return recent_proc_net_init(net); } static void __net_exit recent_net_exit(struct net *net) { struct recent_net *recent_net = recent_pernet(net); BUG_ON(!list_empty(&recent_net->tables)); recent_proc_net_exit(net); } static struct pernet_operations recent_net_ops = { .init = recent_net_init, .exit = recent_net_exit, .id = &recent_net_id, .size = sizeof(struct recent_net), }; static struct xt_match recent_mt_reg[] __read_mostly = { { .name = "recent", .revision = 0, .family = NFPROTO_IPV4, .match = recent_mt, .matchsize = sizeof(struct xt_recent_mtinfo), .checkentry = recent_mt_check, .destroy = recent_mt_destroy, .me = THIS_MODULE, }, { .name = "recent", .revision = 0, .family = NFPROTO_IPV6, .match = recent_mt, .matchsize = sizeof(struct xt_recent_mtinfo), .checkentry = recent_mt_check, .destroy = recent_mt_destroy, .me = THIS_MODULE, }, }; static int __init recent_mt_init(void) { int err; if (!ip_list_tot || !ip_pkt_list_tot || ip_pkt_list_tot > 255) return -EINVAL; ip_list_hash_size = 1 << fls(ip_list_tot); err = register_pernet_subsys(&recent_net_ops); if (err) return err; err = xt_register_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); if (err) unregister_pernet_subsys(&recent_net_ops); return err; } static void __exit recent_mt_exit(void) { xt_unregister_matches(recent_mt_reg, ARRAY_SIZE(recent_mt_reg)); unregister_pernet_subsys(&recent_net_ops); } module_init(recent_mt_init); module_exit(recent_mt_exit);
gpl-2.0
kelledge/linux
arch/x86/platform/olpc/olpc_dt.c
10184
6753
/* * OLPC-specific OFW device tree support code. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com * * Adapted for sparc by David S. Miller davem@davemloft.net * Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/bootmem.h> #include <linux/of.h> #include <linux/of_platform.h> #include <linux/of_pdt.h> #include <asm/olpc.h> #include <asm/olpc_ofw.h> static phandle __init olpc_dt_getsibling(phandle node) { const void *args[] = { (void *)node }; void *res[] = { &node }; if ((s32)node == -1) return 0; if (olpc_ofw("peer", args, res) || (s32)node == -1) return 0; return node; } static phandle __init olpc_dt_getchild(phandle node) { const void *args[] = { (void *)node }; void *res[] = { &node }; if ((s32)node == -1) return 0; if (olpc_ofw("child", args, res) || (s32)node == -1) { pr_err("PROM: %s: fetching child failed!\n", __func__); return 0; } return node; } static int __init olpc_dt_getproplen(phandle node, const char *prop) { const void *args[] = { (void *)node, prop }; int len; void *res[] = { &len }; if ((s32)node == -1) return -1; if (olpc_ofw("getproplen", args, res)) { pr_err("PROM: %s: getproplen failed!\n", __func__); return -1; } return len; } static int __init olpc_dt_getproperty(phandle node, const char *prop, char *buf, int bufsize) { int plen; plen = olpc_dt_getproplen(node, prop); if (plen > bufsize || plen < 1) { return -1; } else { const void *args[] = { (void *)node, prop, buf, (void *)plen }; void *res[] = { &plen }; if (olpc_ofw("getprop", args, res)) { pr_err("PROM: %s: getprop failed!\n", __func__); return -1; } } return plen; } static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf) { const void *args[] = { (void *)node, prev, buf }; int success; void *res[] = { &success }; buf[0] = '\0'; if ((s32)node == -1) return -1; if (olpc_ofw("nextprop", args, res) || success != 1) return -1; return 0; } static int __init olpc_dt_pkg2path(phandle node, char *buf, const int buflen, int *len) { const void *args[] = { (void *)node, buf, (void *)buflen }; void *res[] = { len }; if ((s32)node == -1) return -1; if (olpc_ofw("package-to-path", args, res) || *len < 1) return -1; return 0; } static unsigned int prom_early_allocated __initdata; void * __init prom_early_alloc(unsigned long size) { static u8 *mem; static size_t free_mem; void *res; if (free_mem < size) { const size_t chunk_size = max(PAGE_SIZE, size); /* * To mimimize the number of allocations, grab at least * PAGE_SIZE of memory (that's an arbitrary choice that's * fast enough on the platforms we care about while minimizing * wasted bootmem) and hand off chunks of it to callers. */ res = alloc_bootmem(chunk_size); BUG_ON(!res); prom_early_allocated += chunk_size; memset(res, 0, chunk_size); free_mem = chunk_size; mem = res; } /* allocate from the local cache */ free_mem -= size; res = mem; mem += size; return res; } static struct of_pdt_ops prom_olpc_ops __initdata = { .nextprop = olpc_dt_nextprop, .getproplen = olpc_dt_getproplen, .getproperty = olpc_dt_getproperty, .getchild = olpc_dt_getchild, .getsibling = olpc_dt_getsibling, .pkg2path = olpc_dt_pkg2path, }; static phandle __init olpc_dt_finddevice(const char *path) { phandle node; const void *args[] = { path }; void *res[] = { &node }; if (olpc_ofw("finddevice", args, res)) { pr_err("olpc_dt: finddevice failed!\n"); return 0; } if ((s32) node == -1) return 0; return node; } static int __init olpc_dt_interpret(const char *words) { int result; const void *args[] = { words }; void *res[] = { &result }; if (olpc_ofw("interpret", args, res)) { pr_err("olpc_dt: interpret failed!\n"); return -1; } return result; } /* * Extract board revision directly from OFW device tree. * We can't use olpc_platform_info because that hasn't been set up yet. */ static u32 __init olpc_dt_get_board_revision(void) { phandle node; __be32 rev; int r; node = olpc_dt_finddevice("/"); if (!node) return 0; r = olpc_dt_getproperty(node, "board-revision-int", (char *) &rev, sizeof(rev)); if (r < 0) return 0; return be32_to_cpu(rev); } void __init olpc_dt_fixup(void) { int r; char buf[64]; phandle node; u32 board_rev; node = olpc_dt_finddevice("/battery@0"); if (!node) return; /* * If the battery node has a compatible property, we are running a new * enough firmware and don't have fixups to make. */ r = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf)); if (r > 0) return; pr_info("PROM DT: Old firmware detected, applying fixes\n"); /* Add olpc,xo1-battery compatible marker to battery node */ olpc_dt_interpret("\" /battery@0\" find-device" " \" olpc,xo1-battery\" +compatible" " device-end"); board_rev = olpc_dt_get_board_revision(); if (!board_rev) return; if (board_rev >= olpc_board_pre(0xd0)) { /* XO-1.5: add dcon device */ olpc_dt_interpret("\" /pci/display@1\" find-device" " new-device" " \" dcon\" device-name \" olpc,xo1-dcon\" +compatible" " finish-device device-end"); } else { /* XO-1: add dcon device, mark RTC as olpc,xo1-rtc */ olpc_dt_interpret("\" /pci/display@1,1\" find-device" " new-device" " \" dcon\" device-name \" olpc,xo1-dcon\" +compatible" " finish-device device-end" " \" /rtc\" find-device" " \" olpc,xo1-rtc\" +compatible" " device-end"); } } void __init olpc_dt_build_devicetree(void) { phandle root; if (!olpc_ofw_is_installed()) return; olpc_dt_fixup(); root = olpc_dt_getsibling(0); if (!root) { pr_err("PROM: unable to get root node from OFW!\n"); return; } of_pdt_build_devicetree(root, &prom_olpc_ops); pr_info("PROM DT: Built device tree with %u bytes of memory.\n", prom_early_allocated); } /* A list of DT node/bus matches that we want to expose as platform devices */ static struct of_device_id __initdata of_ids[] = { { .compatible = "olpc,xo1-battery" }, { .compatible = "olpc,xo1-dcon" }, { .compatible = "olpc,xo1-rtc" }, {}, }; static int __init olpc_create_platform_devices(void) { if (machine_is_olpc()) return of_platform_bus_probe(NULL, of_ids, NULL); else return 0; } device_initcall(olpc_create_platform_devices);
gpl-2.0
CyanHacker-Lollipop/kernel_samsung_jf
arch/arm/mach-s5pc100/setup-ide.c
10952
1549
/* linux/arch/arm/mach-s5pc100/setup-ide.c * * Copyright (c) 2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * S5PC100 setup information for IDE * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/gpio.h> #include <linux/io.h> #include <mach/regs-clock.h> #include <plat/gpio-cfg.h> static void s5pc100_ide_cfg_gpios(unsigned int base, unsigned int nr) { s3c_gpio_cfgrange_nopull(base, nr, S3C_GPIO_SFN(4)); for (; nr > 0; nr--, base++) s5p_gpio_set_drvstr(base, S5P_GPIO_DRVSTR_LV4); } void s5pc100_ide_setup_gpio(void) { u32 reg; /* Independent CF interface, CF chip select configuration */ reg = readl(S5PC100_MEM_SYS_CFG) & (~0x3f); writel(reg | MEM_SYS_CFG_EBI_FIX_PRI_CFCON, S5PC100_MEM_SYS_CFG); /* CF_Add[0 - 2], CF_IORDY, CF_INTRQ, CF_DMARQ, CF_DMARST, CF_DMACK */ s5pc100_ide_cfg_gpios(S5PC100_GPJ0(0), 8); /*CF_Data[0 - 7] */ s5pc100_ide_cfg_gpios(S5PC100_GPJ2(0), 8); /* CF_Data[8 - 15] */ s5pc100_ide_cfg_gpios(S5PC100_GPJ3(0), 8); /* CF_CS0, CF_CS1, CF_IORD, CF_IOWR */ s5pc100_ide_cfg_gpios(S5PC100_GPJ4(0), 4); /* EBI_OE, EBI_WE */ s3c_gpio_cfgpin_range(S5PC100_GPK0(6), 2, S3C_GPIO_SFN(0)); /* CF_OE, CF_WE */ s3c_gpio_cfgrange_nopull(S5PC100_GPK1(6), 8, S3C_GPIO_SFN(2)); /* CF_CD */ s3c_gpio_cfgpin(S5PC100_GPK3(5), S3C_GPIO_SFN(2)); s3c_gpio_setpull(S5PC100_GPK3(5), S3C_GPIO_PULL_NONE); }
gpl-2.0
cwyy/kernel
drivers/usb/storage/sierra_ms.c
457
5487
#include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include <linux/usb.h> #include "usb.h" #include "transport.h" #include "protocol.h" #include "scsiglue.h" #include "sierra_ms.h" #include "debug.h" #define SWIMS_USB_REQUEST_SetSwocMode 0x0B #define SWIMS_USB_REQUEST_GetSwocInfo 0x0A #define SWIMS_USB_INDEX_SetMode 0x0000 #define SWIMS_SET_MODE_Modem 0x0001 #define TRU_NORMAL 0x01 #define TRU_FORCE_MS 0x02 #define TRU_FORCE_MODEM 0x03 static unsigned int swi_tru_install = 1; module_param(swi_tru_install, uint, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(swi_tru_install, "TRU-Install mode (1=Full Logic (def)," " 2=Force CD-Rom, 3=Force Modem)"); struct swoc_info { __u8 rev; __u8 reserved[8]; __u16 LinuxSKU; __u16 LinuxVer; __u8 reserved2[47]; } __attribute__((__packed__)); static bool containsFullLinuxPackage(struct swoc_info *swocInfo) { if ((swocInfo->LinuxSKU >= 0x2100 && swocInfo->LinuxSKU <= 0x2FFF) || (swocInfo->LinuxSKU >= 0x7100 && swocInfo->LinuxSKU <= 0x7FFF)) return true; else return false; } static int sierra_set_ms_mode(struct usb_device *udev, __u16 eSWocMode) { int result; US_DEBUGP("SWIMS: %s", "DEVICE MODE SWITCH\n"); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetSwocMode, /* __u8 request */ USB_TYPE_VENDOR | USB_DIR_OUT, /* __u8 request type */ eSWocMode, /* __u16 value */ 0x0000, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_get_swoc_info(struct usb_device *udev, struct swoc_info *swocInfo) { int result; US_DEBUGP("SWIMS: Attempting to get TRU-Install info.\n"); result = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), SWIMS_USB_REQUEST_GetSwocInfo, /* __u8 request */ USB_TYPE_VENDOR | USB_DIR_IN, /* __u8 request type */ 0, /* __u16 value */ 0, /* __u16 index */ (void *) swocInfo, /* void *data */ sizeof(struct swoc_info), /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ swocInfo->LinuxSKU = le16_to_cpu(swocInfo->LinuxSKU); swocInfo->LinuxVer = le16_to_cpu(swocInfo->LinuxVer); return result; } static void debug_swoc(struct swoc_info *swocInfo) { US_DEBUGP("SWIMS: SWoC Rev: %02d \n", swocInfo->rev); US_DEBUGP("SWIMS: Linux SKU: %04X \n", swocInfo->LinuxSKU); US_DEBUGP("SWIMS: Linux Version: %04X \n", swocInfo->LinuxVer); } static ssize_t show_truinst(struct device *dev, struct device_attribute *attr, char *buf) { struct swoc_info *swocInfo; struct usb_interface *intf = to_usb_interface(dev); struct usb_device *udev = interface_to_usbdev(intf); int result; if (swi_tru_install == TRU_FORCE_MS) { result = snprintf(buf, PAGE_SIZE, "Forced Mass Storage\n"); } else { swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); if (!swocInfo) { US_DEBUGP("SWIMS: Allocation failure\n"); snprintf(buf, PAGE_SIZE, "Error\n"); return -ENOMEM; } result = sierra_get_swoc_info(udev, swocInfo); if (result < 0) { US_DEBUGP("SWIMS: failed SWoC query\n"); kfree(swocInfo); snprintf(buf, PAGE_SIZE, "Error\n"); return -EIO; } debug_swoc(swocInfo); result = snprintf(buf, PAGE_SIZE, "REV=%02d SKU=%04X VER=%04X\n", swocInfo->rev, swocInfo->LinuxSKU, swocInfo->LinuxVer); kfree(swocInfo); } return result; } static DEVICE_ATTR(truinst, S_IWUGO | S_IRUGO, show_truinst, NULL); int sierra_ms_init(struct us_data *us) { int result, retries; signed long delay_t; struct swoc_info *swocInfo; struct usb_device *udev; struct Scsi_Host *sh; struct scsi_device *sd; delay_t = 2; retries = 3; result = 0; udev = us->pusb_dev; sh = us_to_host(us); sd = scsi_get_host_dev(sh); US_DEBUGP("SWIMS: sierra_ms_init called\n"); /* Force Modem mode */ if (swi_tru_install == TRU_FORCE_MODEM) { US_DEBUGP("SWIMS: %s", "Forcing Modem Mode\n"); result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); if (result < 0) US_DEBUGP("SWIMS: Failed to switch to modem mode.\n"); return -EIO; } /* Force Mass Storage mode (keep CD-Rom) */ else if (swi_tru_install == TRU_FORCE_MS) { US_DEBUGP("SWIMS: %s", "Forcing Mass Storage Mode\n"); goto complete; } /* Normal TRU-Install Logic */ else { US_DEBUGP("SWIMS: %s", "Normal SWoC Logic\n"); swocInfo = kmalloc(sizeof(struct swoc_info), GFP_KERNEL); if (!swocInfo) { US_DEBUGP("SWIMS: %s", "Allocation failure\n"); return -ENOMEM; } retries = 3; do { retries--; result = sierra_get_swoc_info(udev, swocInfo); if (result < 0) { US_DEBUGP("SWIMS: %s", "Failed SWoC query\n"); schedule_timeout_uninterruptible(2*HZ); } } while (retries && result < 0); if (result < 0) { US_DEBUGP("SWIMS: %s", "Completely failed SWoC query\n"); kfree(swocInfo); return -EIO; } debug_swoc(swocInfo); /* If there is not Linux software on the TRU-Install device * then switch to modem mode */ if (!containsFullLinuxPackage(swocInfo)) { US_DEBUGP("SWIMS: %s", "Switching to Modem Mode\n"); result = sierra_set_ms_mode(udev, SWIMS_SET_MODE_Modem); if (result < 0) US_DEBUGP("SWIMS: Failed to switch modem\n"); kfree(swocInfo); return -EIO; } kfree(swocInfo); } complete: result = device_create_file(&us->pusb_intf->dev, &dev_attr_truinst); return 0; }
gpl-2.0
CAFans/android_kernel_lge_msm8974
arch/x86/crypto/sha1_ssse3_glue.c
713
5909
/* * Cryptographic API. * * Glue code for the SHA1 Secure Hash Algorithm assembler implementation using * Supplemental SSE3 instructions. * * This file is based on sha1_generic.c * * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * Copyright (c) Mathias Krause <minipli@googlemail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <linux/cryptohash.h> #include <linux/types.h> #include <crypto/sha.h> #include <asm/byteorder.h> #include <asm/i387.h> #include <asm/xcr.h> #include <asm/xsave.h> asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data, unsigned int rounds); #ifdef CONFIG_AS_AVX asmlinkage void sha1_transform_avx(u32 *digest, const char *data, unsigned int rounds); #endif static asmlinkage void (*sha1_transform_asm)(u32 *, const char *, unsigned int); static int sha1_ssse3_init(struct shash_desc *desc) { struct sha1_state *sctx = shash_desc_ctx(desc); *sctx = (struct sha1_state){ .state = { SHA1_H0, SHA1_H1, SHA1_H2, SHA1_H3, SHA1_H4 }, }; return 0; } static int __sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len, unsigned int partial) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int done = 0; sctx->count += len; if (partial) { done = SHA1_BLOCK_SIZE - partial; memcpy(sctx->buffer + partial, data, done); sha1_transform_asm(sctx->state, sctx->buffer, 1); } if (len - done >= SHA1_BLOCK_SIZE) { const unsigned int rounds = (len - done) / SHA1_BLOCK_SIZE; sha1_transform_asm(sctx->state, data + done, rounds); done += rounds * SHA1_BLOCK_SIZE; } memcpy(sctx->buffer, data + done, len - done); return 0; } static int sha1_ssse3_update(struct shash_desc *desc, const u8 *data, unsigned int len) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int partial = sctx->count % SHA1_BLOCK_SIZE; int res; /* Handle the fast case right here */ if (partial + len < SHA1_BLOCK_SIZE) { sctx->count += len; memcpy(sctx->buffer + partial, data, len); return 0; } if (!irq_fpu_usable()) { res = crypto_sha1_update(desc, data, len); } else { kernel_fpu_begin(); res = __sha1_ssse3_update(desc, data, len, partial); kernel_fpu_end(); } return res; } /* Add padding and return the message digest. */ static int sha1_ssse3_final(struct shash_desc *desc, u8 *out) { struct sha1_state *sctx = shash_desc_ctx(desc); unsigned int i, index, padlen; __be32 *dst = (__be32 *)out; __be64 bits; static const u8 padding[SHA1_BLOCK_SIZE] = { 0x80, }; bits = cpu_to_be64(sctx->count << 3); /* Pad out to 56 mod 64 and append length */ index = sctx->count % SHA1_BLOCK_SIZE; padlen = (index < 56) ? (56 - index) : ((SHA1_BLOCK_SIZE+56) - index); if (!irq_fpu_usable()) { crypto_sha1_update(desc, padding, padlen); crypto_sha1_update(desc, (const u8 *)&bits, sizeof(bits)); } else { kernel_fpu_begin(); /* We need to fill a whole block for __sha1_ssse3_update() */ if (padlen <= 56) { sctx->count += padlen; memcpy(sctx->buffer + index, padding, padlen); } else { __sha1_ssse3_update(desc, padding, padlen, index); } __sha1_ssse3_update(desc, (const u8 *)&bits, sizeof(bits), 56); kernel_fpu_end(); } /* Store state in digest */ for (i = 0; i < 5; i++) dst[i] = cpu_to_be32(sctx->state[i]); /* Wipe context */ memset(sctx, 0, sizeof(*sctx)); return 0; } static int sha1_ssse3_export(struct shash_desc *desc, void *out) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(out, sctx, sizeof(*sctx)); return 0; } static int sha1_ssse3_import(struct shash_desc *desc, const void *in) { struct sha1_state *sctx = shash_desc_ctx(desc); memcpy(sctx, in, sizeof(*sctx)); return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_ssse3_init, .update = sha1_ssse3_update, .final = sha1_ssse3_final, .export = sha1_ssse3_export, .import = sha1_ssse3_import, .descsize = sizeof(struct sha1_state), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-ssse3", .cra_priority = 150, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; #ifdef CONFIG_AS_AVX static bool __init avx_usable(void) { u64 xcr0; if (!cpu_has_avx || !cpu_has_osxsave) return false; xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK); if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) { pr_info("AVX detected but unusable.\n"); return false; } return true; } #endif static int __init sha1_ssse3_mod_init(void) { /* test for SSSE3 first */ if (cpu_has_ssse3) sha1_transform_asm = sha1_transform_ssse3; #ifdef CONFIG_AS_AVX /* allow AVX to override SSSE3, it's a little faster */ if (avx_usable()) sha1_transform_asm = sha1_transform_avx; #endif if (sha1_transform_asm) { pr_info("Using %s optimized SHA-1 implementation\n", sha1_transform_asm == sha1_transform_ssse3 ? "SSSE3" : "AVX"); return crypto_register_shash(&alg); } pr_info("Neither AVX nor SSSE3 is available/usable.\n"); return -ENODEV; } static void __exit sha1_ssse3_mod_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_ssse3_mod_init); module_exit(sha1_ssse3_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm, Supplemental SSE3 accelerated"); MODULE_ALIAS_CRYPTO("sha1");
gpl-2.0
lategoodbye/linux-mxs-power
drivers/net/wireless/ath/ath9k/dfs.c
713
5271
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * Copyright (c) 2011 Neratec Solutions AG * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include "hw.h" #include "hw-ops.h" #include "ath9k.h" #include "dfs.h" #include "dfs_debug.h" /* internal struct to pass radar data */ struct ath_radar_data { u8 pulse_bw_info; u8 rssi; u8 ext_rssi; u8 pulse_length_ext; u8 pulse_length_pri; }; /* convert pulse duration to usecs, considering clock mode */ static u32 dur_to_usecs(struct ath_hw *ah, u32 dur) { const u32 AR93X_NSECS_PER_DUR = 800; const u32 AR93X_NSECS_PER_DUR_FAST = (8000 / 11); u32 nsecs; if (IS_CHAN_A_FAST_CLOCK(ah, ah->curchan)) nsecs = dur * AR93X_NSECS_PER_DUR_FAST; else nsecs = dur * AR93X_NSECS_PER_DUR; return (nsecs + 500) / 1000; } #define PRI_CH_RADAR_FOUND 0x01 #define EXT_CH_RADAR_FOUND 0x02 static bool ath9k_postprocess_radar_event(struct ath_softc *sc, struct ath_radar_data *ard, struct pulse_event *pe) { u8 rssi; u16 dur; /* * Only the last 2 bits of the BW info are relevant, they indicate * which channel the radar was detected in. */ ard->pulse_bw_info &= 0x03; switch (ard->pulse_bw_info) { case PRI_CH_RADAR_FOUND: /* radar in ctrl channel */ dur = ard->pulse_length_pri; DFS_STAT_INC(sc, pri_phy_errors); /* * cannot use ctrl channel RSSI * if extension channel is stronger */ rssi = (ard->ext_rssi >= (ard->rssi + 3)) ? 0 : ard->rssi; break; case EXT_CH_RADAR_FOUND: /* radar in extension channel */ dur = ard->pulse_length_ext; DFS_STAT_INC(sc, ext_phy_errors); /* * cannot use extension channel RSSI * if control channel is stronger */ rssi = (ard->rssi >= (ard->ext_rssi + 12)) ? 0 : ard->ext_rssi; break; case (PRI_CH_RADAR_FOUND | EXT_CH_RADAR_FOUND): /* * Conducted testing, when pulse is on DC, both pri and ext * durations are reported to be same * * Radiated testing, when pulse is on DC, different pri and * ext durations are reported, so take the larger of the two */ if (ard->pulse_length_ext >= ard->pulse_length_pri) dur = ard->pulse_length_ext; else dur = ard->pulse_length_pri; DFS_STAT_INC(sc, dc_phy_errors); /* when both are present use stronger one */ rssi = (ard->rssi < ard->ext_rssi) ? ard->ext_rssi : ard->rssi; break; default: /* * Bogus bandwidth info was received in descriptor, * so ignore this PHY error */ DFS_STAT_INC(sc, bwinfo_discards); return false; } if (rssi == 0) { DFS_STAT_INC(sc, rssi_discards); return false; } /* * TODO: check chirping pulses * checks for chirping are dependent on the DFS regulatory domain * used, which is yet TBD */ /* convert duration to usecs */ pe->width = dur_to_usecs(sc->sc_ah, dur); pe->rssi = rssi; DFS_STAT_INC(sc, pulses_detected); return true; } #undef PRI_CH_RADAR_FOUND #undef EXT_CH_RADAR_FOUND /* * DFS: check PHY-error for radar pulse and feed the detector */ void ath9k_dfs_process_phyerr(struct ath_softc *sc, void *data, struct ath_rx_status *rs, u64 mactime) { struct ath_radar_data ard; u16 datalen; char *vdata_end; struct pulse_event pe; struct ath_hw *ah = sc->sc_ah; struct ath_common *common = ath9k_hw_common(ah); DFS_STAT_INC(sc, pulses_total); if ((rs->rs_phyerr != ATH9K_PHYERR_RADAR) && (rs->rs_phyerr != ATH9K_PHYERR_FALSE_RADAR_EXT)) { ath_dbg(common, DFS, "Error: rs_phyer=0x%x not a radar error\n", rs->rs_phyerr); DFS_STAT_INC(sc, pulses_no_dfs); return; } datalen = rs->rs_datalen; if (datalen == 0) { DFS_STAT_INC(sc, datalen_discards); return; } ard.rssi = rs->rs_rssi_ctl[0]; ard.ext_rssi = rs->rs_rssi_ext[0]; /* * hardware stores this as 8 bit signed value. * we will cap it at 0 if it is a negative number */ if (ard.rssi & 0x80) ard.rssi = 0; if (ard.ext_rssi & 0x80) ard.ext_rssi = 0; vdata_end = (char *)data + datalen; ard.pulse_bw_info = vdata_end[-1]; ard.pulse_length_ext = vdata_end[-2]; ard.pulse_length_pri = vdata_end[-3]; pe.freq = ah->curchan->channel; pe.ts = mactime; if (ath9k_postprocess_radar_event(sc, &ard, &pe)) { struct dfs_pattern_detector *pd = sc->dfs_detector; ath_dbg(common, DFS, "ath9k_dfs_process_phyerr: channel=%d, ts=%llu, " "width=%d, rssi=%d, delta_ts=%llu\n", pe.freq, pe.ts, pe.width, pe.rssi, pe.ts - sc->dfs_prev_pulse_ts); sc->dfs_prev_pulse_ts = pe.ts; DFS_STAT_INC(sc, pulses_processed); if (pd != NULL && pd->add_pulse(pd, &pe)) { DFS_STAT_INC(sc, radar_detected); ieee80211_radar_detected(sc->hw); } } }
gpl-2.0
Ordenkrieger/android_kernel_cyanogen_msm8974
drivers/ata/libata-transport.c
969
19817
/* * Copyright 2008 ioogle, Inc. All rights reserved. * Released under GPL v2. * * Libata transport class. * * The ATA transport class contains common code to deal with ATA HBAs, * an approximated representation of ATA topologies in the driver model, * and various sysfs attributes to expose these topologies and management * interfaces to user-space. * * There are 3 objects defined in in this class: * - ata_port * - ata_link * - ata_device * Each port has a link object. Each link can have up to two devices for PATA * and generally one for SATA. * If there is SATA port multiplier [PMP], 15 additional ata_link object are * created. * * These objects are created when the ata host is initialized and when a PMP is * found. They are removed only when the HBA is removed, cleaned before the * error handler runs. */ #include <linux/kernel.h> #include <linux/blkdev.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <scsi/scsi_transport.h> #include <linux/libata.h> #include <linux/hdreg.h> #include <linux/uaccess.h> #include <linux/pm_runtime.h> #include "libata.h" #include "libata-transport.h" #define ATA_PORT_ATTRS 2 #define ATA_LINK_ATTRS 3 #define ATA_DEV_ATTRS 9 struct scsi_transport_template; struct scsi_transport_template *ata_scsi_transport_template; struct ata_internal { struct scsi_transport_template t; struct device_attribute private_port_attrs[ATA_PORT_ATTRS]; struct device_attribute private_link_attrs[ATA_LINK_ATTRS]; struct device_attribute private_dev_attrs[ATA_DEV_ATTRS]; struct transport_container link_attr_cont; struct transport_container dev_attr_cont; /* * The array of null terminated pointers to attributes * needed by scsi_sysfs.c */ struct device_attribute *link_attrs[ATA_LINK_ATTRS + 1]; struct device_attribute *port_attrs[ATA_PORT_ATTRS + 1]; struct device_attribute *dev_attrs[ATA_DEV_ATTRS + 1]; }; #define to_ata_internal(tmpl) container_of(tmpl, struct ata_internal, t) #define tdev_to_device(d) \ container_of((d), struct ata_device, tdev) #define transport_class_to_dev(dev) \ tdev_to_device((dev)->parent) #define tdev_to_link(d) \ container_of((d), struct ata_link, tdev) #define transport_class_to_link(dev) \ tdev_to_link((dev)->parent) #define tdev_to_port(d) \ container_of((d), struct ata_port, tdev) #define transport_class_to_port(dev) \ tdev_to_port((dev)->parent) /* Device objects are always created whit link objects */ static int ata_tdev_add(struct ata_device *dev); static void ata_tdev_delete(struct ata_device *dev); /* * Hack to allow attributes of the same name in different objects. */ #define ATA_DEVICE_ATTR(_prefix,_name,_mode,_show,_store) \ struct device_attribute device_attr_##_prefix##_##_name = \ __ATTR(_name,_mode,_show,_store) #define ata_bitfield_name_match(title, table) \ static ssize_t \ get_ata_##title##_names(u32 table_key, char *buf) \ { \ char *prefix = ""; \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value & table_key) { \ len += sprintf(buf + len, "%s%s", \ prefix, table[i].name); \ prefix = ", "; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } #define ata_bitfield_name_search(title, table) \ static ssize_t \ get_ata_##title##_names(u32 table_key, char *buf) \ { \ ssize_t len = 0; \ int i; \ \ for (i = 0; i < ARRAY_SIZE(table); i++) { \ if (table[i].value == table_key) { \ len += sprintf(buf + len, "%s", \ table[i].name); \ break; \ } \ } \ len += sprintf(buf + len, "\n"); \ return len; \ } static struct { u32 value; char *name; } ata_class_names[] = { { ATA_DEV_UNKNOWN, "unknown" }, { ATA_DEV_ATA, "ata" }, { ATA_DEV_ATA_UNSUP, "ata" }, { ATA_DEV_ATAPI, "atapi" }, { ATA_DEV_ATAPI_UNSUP, "atapi" }, { ATA_DEV_PMP, "pmp" }, { ATA_DEV_PMP_UNSUP, "pmp" }, { ATA_DEV_SEMB, "semb" }, { ATA_DEV_SEMB_UNSUP, "semb" }, { ATA_DEV_NONE, "none" } }; ata_bitfield_name_search(class, ata_class_names) static struct { u32 value; char *name; } ata_err_names[] = { { AC_ERR_DEV, "DeviceError" }, { AC_ERR_HSM, "HostStateMachineError" }, { AC_ERR_TIMEOUT, "Timeout" }, { AC_ERR_MEDIA, "MediaError" }, { AC_ERR_ATA_BUS, "BusError" }, { AC_ERR_HOST_BUS, "HostBusError" }, { AC_ERR_SYSTEM, "SystemError" }, { AC_ERR_INVALID, "InvalidArg" }, { AC_ERR_OTHER, "Unknown" }, { AC_ERR_NODEV_HINT, "NoDeviceHint" }, { AC_ERR_NCQ, "NCQError" } }; ata_bitfield_name_match(err, ata_err_names) static struct { u32 value; char *name; } ata_xfer_names[] = { { XFER_UDMA_7, "XFER_UDMA_7" }, { XFER_UDMA_6, "XFER_UDMA_6" }, { XFER_UDMA_5, "XFER_UDMA_5" }, { XFER_UDMA_4, "XFER_UDMA_4" }, { XFER_UDMA_3, "XFER_UDMA_3" }, { XFER_UDMA_2, "XFER_UDMA_2" }, { XFER_UDMA_1, "XFER_UDMA_1" }, { XFER_UDMA_0, "XFER_UDMA_0" }, { XFER_MW_DMA_4, "XFER_MW_DMA_4" }, { XFER_MW_DMA_3, "XFER_MW_DMA_3" }, { XFER_MW_DMA_2, "XFER_MW_DMA_2" }, { XFER_MW_DMA_1, "XFER_MW_DMA_1" }, { XFER_MW_DMA_0, "XFER_MW_DMA_0" }, { XFER_SW_DMA_2, "XFER_SW_DMA_2" }, { XFER_SW_DMA_1, "XFER_SW_DMA_1" }, { XFER_SW_DMA_0, "XFER_SW_DMA_0" }, { XFER_PIO_6, "XFER_PIO_6" }, { XFER_PIO_5, "XFER_PIO_5" }, { XFER_PIO_4, "XFER_PIO_4" }, { XFER_PIO_3, "XFER_PIO_3" }, { XFER_PIO_2, "XFER_PIO_2" }, { XFER_PIO_1, "XFER_PIO_1" }, { XFER_PIO_0, "XFER_PIO_0" }, { XFER_PIO_SLOW, "XFER_PIO_SLOW" } }; ata_bitfield_name_match(xfer,ata_xfer_names) /* * ATA Port attributes */ #define ata_port_show_simple(field, name, format_string, cast) \ static ssize_t \ show_ata_port_##name(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_port *ap = transport_class_to_port(dev); \ \ return snprintf(buf, 20, format_string, cast ap->field); \ } #define ata_port_simple_attr(field, name, format_string, type) \ ata_port_show_simple(field, name, format_string, (type)) \ static DEVICE_ATTR(name, S_IRUGO, show_ata_port_##name, NULL) ata_port_simple_attr(nr_pmp_links, nr_pmp_links, "%d\n", int); ata_port_simple_attr(stats.idle_irq, idle_irq, "%ld\n", unsigned long); static DECLARE_TRANSPORT_CLASS(ata_port_class, "ata_port", NULL, NULL, NULL); static void ata_tport_release(struct device *dev) { put_device(dev->parent); } /** * ata_is_port -- check if a struct device represents a ATA port * @dev: device to check * * Returns: * %1 if the device represents a ATA Port, %0 else */ int ata_is_port(const struct device *dev) { return dev->release == ata_tport_release; } static int ata_tport_match(struct attribute_container *cont, struct device *dev) { if (!ata_is_port(dev)) return 0; return &ata_scsi_transport_template->host_attrs.ac == cont; } /** * ata_tport_delete -- remove ATA PORT * @port: ATA PORT to remove * * Removes the specified ATA PORT. Remove the associated link as well. */ void ata_tport_delete(struct ata_port *ap) { struct device *dev = &ap->tdev; ata_tlink_delete(&ap->link); transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } /** ata_tport_add - initialize a transport ATA port structure * * @parent: parent device * @ap: existing ata_port structure * * Initialize a ATA port structure for sysfs. It will be added to the device * tree below the device specified by @parent which could be a PCI device. * * Returns %0 on success */ int ata_tport_add(struct device *parent, struct ata_port *ap) { int error; struct device *dev = &ap->tdev; device_initialize(dev); dev->type = &ata_port_type; dev->parent = get_device(parent); dev->release = ata_tport_release; dev_set_name(dev, "ata%d", ap->print_id); transport_setup_device(dev); error = device_add(dev); if (error) { goto tport_err; } device_enable_async_suspend(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); pm_runtime_forbid(dev); transport_add_device(dev); transport_configure_device(dev); error = ata_tlink_add(&ap->link); if (error) { goto tport_link_err; } return 0; tport_link_err: transport_remove_device(dev); device_del(dev); tport_err: transport_destroy_device(dev); put_device(dev); return error; } /* * ATA link attributes */ static int noop(int x) { return x; } #define ata_link_show_linkspeed(field, format) \ static ssize_t \ show_ata_link_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_link *link = transport_class_to_link(dev); \ \ return sprintf(buf, "%s\n", sata_spd_string(format(link->field))); \ } #define ata_link_linkspeed_attr(field, format) \ ata_link_show_linkspeed(field, format) \ static DEVICE_ATTR(field, S_IRUGO, show_ata_link_##field, NULL) ata_link_linkspeed_attr(hw_sata_spd_limit, fls); ata_link_linkspeed_attr(sata_spd_limit, fls); ata_link_linkspeed_attr(sata_spd, noop); static DECLARE_TRANSPORT_CLASS(ata_link_class, "ata_link", NULL, NULL, NULL); static void ata_tlink_release(struct device *dev) { put_device(dev->parent); } /** * ata_is_link -- check if a struct device represents a ATA link * @dev: device to check * * Returns: * %1 if the device represents a ATA link, %0 else */ int ata_is_link(const struct device *dev) { return dev->release == ata_tlink_release; } static int ata_tlink_match(struct attribute_container *cont, struct device *dev) { struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); if (!ata_is_link(dev)) return 0; return &i->link_attr_cont.ac == cont; } /** * ata_tlink_delete -- remove ATA LINK * @port: ATA LINK to remove * * Removes the specified ATA LINK. remove associated ATA device(s) as well. */ void ata_tlink_delete(struct ata_link *link) { struct device *dev = &link->tdev; struct ata_device *ata_dev; ata_for_each_dev(ata_dev, link, ALL) { ata_tdev_delete(ata_dev); } transport_remove_device(dev); device_del(dev); transport_destroy_device(dev); put_device(dev); } /** * ata_tlink_add -- initialize a transport ATA link structure * @link: allocated ata_link structure. * * Initialize an ATA LINK structure for sysfs. It will be added in the * device tree below the ATA PORT it belongs to. * * Returns %0 on success */ int ata_tlink_add(struct ata_link *link) { struct device *dev = &link->tdev; struct ata_port *ap = link->ap; struct ata_device *ata_dev; int error; device_initialize(dev); dev->parent = get_device(&ap->tdev); dev->release = ata_tlink_release; if (ata_is_host_link(link)) dev_set_name(dev, "link%d", ap->print_id); else dev_set_name(dev, "link%d.%d", ap->print_id, link->pmp); transport_setup_device(dev); error = device_add(dev); if (error) { goto tlink_err; } transport_add_device(dev); transport_configure_device(dev); ata_for_each_dev(ata_dev, link, ALL) { error = ata_tdev_add(ata_dev); if (error) { goto tlink_dev_err; } } return 0; tlink_dev_err: while (--ata_dev >= link->device) { ata_tdev_delete(ata_dev); } transport_remove_device(dev); device_del(dev); tlink_err: transport_destroy_device(dev); put_device(dev); return error; } /* * ATA device attributes */ #define ata_dev_show_class(title, field) \ static ssize_t \ show_ata_dev_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ return get_ata_##title##_names(ata_dev->field, buf); \ } #define ata_dev_attr(title, field) \ ata_dev_show_class(title, field) \ static DEVICE_ATTR(field, S_IRUGO, show_ata_dev_##field, NULL) ata_dev_attr(class, class); ata_dev_attr(xfer, pio_mode); ata_dev_attr(xfer, dma_mode); ata_dev_attr(xfer, xfer_mode); #define ata_dev_show_simple(field, format_string, cast) \ static ssize_t \ show_ata_dev_##field(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct ata_device *ata_dev = transport_class_to_dev(dev); \ \ return snprintf(buf, 20, format_string, cast ata_dev->field); \ } #define ata_dev_simple_attr(field, format_string, type) \ ata_dev_show_simple(field, format_string, (type)) \ static DEVICE_ATTR(field, S_IRUGO, \ show_ata_dev_##field, NULL) ata_dev_simple_attr(spdn_cnt, "%d\n", int); struct ata_show_ering_arg { char* buf; int written; }; static int ata_show_ering(struct ata_ering_entry *ent, void *void_arg) { struct ata_show_ering_arg* arg = void_arg; struct timespec time; jiffies_to_timespec(ent->timestamp,&time); arg->written += sprintf(arg->buf + arg->written, "[%5lu.%06lu]", time.tv_sec, time.tv_nsec); arg->written += get_ata_err_names(ent->err_mask, arg->buf + arg->written); return 0; } static ssize_t show_ata_dev_ering(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); struct ata_show_ering_arg arg = { buf, 0 }; ata_ering_map(&ata_dev->ering, ata_show_ering, &arg); return arg.written; } static DEVICE_ATTR(ering, S_IRUGO, show_ata_dev_ering, NULL); static ssize_t show_ata_dev_id(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); int written = 0, i = 0; if (ata_dev->class == ATA_DEV_PMP) return 0; for(i=0;i<ATA_ID_WORDS;i++) { written += snprintf(buf+written, 20, "%04x%c", ata_dev->id[i], ((i+1) & 7) ? ' ' : '\n'); } return written; } static DEVICE_ATTR(id, S_IRUGO, show_ata_dev_id, NULL); static ssize_t show_ata_dev_gscr(struct device *dev, struct device_attribute *attr, char *buf) { struct ata_device *ata_dev = transport_class_to_dev(dev); int written = 0, i = 0; if (ata_dev->class != ATA_DEV_PMP) return 0; for(i=0;i<SATA_PMP_GSCR_DWORDS;i++) { written += snprintf(buf+written, 20, "%08x%c", ata_dev->gscr[i], ((i+1) & 3) ? ' ' : '\n'); } if (SATA_PMP_GSCR_DWORDS & 3) buf[written-1] = '\n'; return written; } static DEVICE_ATTR(gscr, S_IRUGO, show_ata_dev_gscr, NULL); static DECLARE_TRANSPORT_CLASS(ata_dev_class, "ata_device", NULL, NULL, NULL); static void ata_tdev_release(struct device *dev) { put_device(dev->parent); } /** * ata_is_ata_dev -- check if a struct device represents a ATA device * @dev: device to check * * Returns: * %1 if the device represents a ATA device, %0 else */ int ata_is_ata_dev(const struct device *dev) { return dev->release == ata_tdev_release; } static int ata_tdev_match(struct attribute_container *cont, struct device *dev) { struct ata_internal* i = to_ata_internal(ata_scsi_transport_template); if (!ata_is_ata_dev(dev)) return 0; return &i->dev_attr_cont.ac == cont; } /** * ata_tdev_free -- free a ATA LINK * @dev: ATA PHY to free * * Frees the specified ATA PHY. * * Note: * This function must only be called on a PHY that has not * successfully been added using ata_tdev_add(). */ static void ata_tdev_free(struct ata_device *dev) { transport_destroy_device(&dev->tdev); put_device(&dev->tdev); } /** * ata_tdev_delete -- remove ATA device * @port: ATA PORT to remove * * Removes the specified ATA device. */ static void ata_tdev_delete(struct ata_device *ata_dev) { struct device *dev = &ata_dev->tdev; transport_remove_device(dev); device_del(dev); ata_tdev_free(ata_dev); } /** * ata_tdev_add -- initialize a transport ATA device structure. * @ata_dev: ata_dev structure. * * Initialize an ATA device structure for sysfs. It will be added in the * device tree below the ATA LINK device it belongs to. * * Returns %0 on success */ static int ata_tdev_add(struct ata_device *ata_dev) { struct device *dev = &ata_dev->tdev; struct ata_link *link = ata_dev->link; struct ata_port *ap = link->ap; int error; device_initialize(dev); dev->parent = get_device(&link->tdev); dev->release = ata_tdev_release; if (ata_is_host_link(link)) dev_set_name(dev, "dev%d.%d", ap->print_id,ata_dev->devno); else dev_set_name(dev, "dev%d.%d.0", ap->print_id, link->pmp); transport_setup_device(dev); error = device_add(dev); if (error) { ata_tdev_free(ata_dev); return error; } transport_add_device(dev); transport_configure_device(dev); return 0; } /* * Setup / Teardown code */ #define SETUP_TEMPLATE(attrb, field, perm, test) \ i->private_##attrb[count] = dev_attr_##field; \ i->private_##attrb[count].attr.mode = perm; \ i->attrb[count] = &i->private_##attrb[count]; \ if (test) \ count++ #define SETUP_LINK_ATTRIBUTE(field) \ SETUP_TEMPLATE(link_attrs, field, S_IRUGO, 1) #define SETUP_PORT_ATTRIBUTE(field) \ SETUP_TEMPLATE(port_attrs, field, S_IRUGO, 1) #define SETUP_DEV_ATTRIBUTE(field) \ SETUP_TEMPLATE(dev_attrs, field, S_IRUGO, 1) /** * ata_attach_transport -- instantiate ATA transport template */ struct scsi_transport_template *ata_attach_transport(void) { struct ata_internal *i; int count; i = kzalloc(sizeof(struct ata_internal), GFP_KERNEL); if (!i) return NULL; i->t.eh_strategy_handler = ata_scsi_error; i->t.eh_timed_out = ata_scsi_timed_out; i->t.user_scan = ata_scsi_user_scan; i->t.host_attrs.ac.attrs = &i->port_attrs[0]; i->t.host_attrs.ac.class = &ata_port_class.class; i->t.host_attrs.ac.match = ata_tport_match; transport_container_register(&i->t.host_attrs); i->link_attr_cont.ac.class = &ata_link_class.class; i->link_attr_cont.ac.attrs = &i->link_attrs[0]; i->link_attr_cont.ac.match = ata_tlink_match; transport_container_register(&i->link_attr_cont); i->dev_attr_cont.ac.class = &ata_dev_class.class; i->dev_attr_cont.ac.attrs = &i->dev_attrs[0]; i->dev_attr_cont.ac.match = ata_tdev_match; transport_container_register(&i->dev_attr_cont); count = 0; SETUP_PORT_ATTRIBUTE(nr_pmp_links); SETUP_PORT_ATTRIBUTE(idle_irq); BUG_ON(count > ATA_PORT_ATTRS); i->port_attrs[count] = NULL; count = 0; SETUP_LINK_ATTRIBUTE(hw_sata_spd_limit); SETUP_LINK_ATTRIBUTE(sata_spd_limit); SETUP_LINK_ATTRIBUTE(sata_spd); BUG_ON(count > ATA_LINK_ATTRS); i->link_attrs[count] = NULL; count = 0; SETUP_DEV_ATTRIBUTE(class); SETUP_DEV_ATTRIBUTE(pio_mode); SETUP_DEV_ATTRIBUTE(dma_mode); SETUP_DEV_ATTRIBUTE(xfer_mode); SETUP_DEV_ATTRIBUTE(spdn_cnt); SETUP_DEV_ATTRIBUTE(ering); SETUP_DEV_ATTRIBUTE(id); SETUP_DEV_ATTRIBUTE(gscr); BUG_ON(count > ATA_DEV_ATTRS); i->dev_attrs[count] = NULL; return &i->t; } /** * ata_release_transport -- release ATA transport template instance * @t: transport template instance */ void ata_release_transport(struct scsi_transport_template *t) { struct ata_internal *i = to_ata_internal(t); transport_container_unregister(&i->t.host_attrs); transport_container_unregister(&i->link_attr_cont); transport_container_unregister(&i->dev_attr_cont); kfree(i); } __init int libata_transport_init(void) { int error; error = transport_class_register(&ata_link_class); if (error) goto out_unregister_transport; error = transport_class_register(&ata_port_class); if (error) goto out_unregister_link; error = transport_class_register(&ata_dev_class); if (error) goto out_unregister_port; return 0; out_unregister_port: transport_class_unregister(&ata_port_class); out_unregister_link: transport_class_unregister(&ata_link_class); out_unregister_transport: return error; } void __exit libata_transport_exit(void) { transport_class_unregister(&ata_link_class); transport_class_unregister(&ata_port_class); transport_class_unregister(&ata_dev_class); }
gpl-2.0
techomancer/kernel-galaxytab
fs/ocfs2/locks.c
1225
3465
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * locks.c * * Userspace file locking support * * Copyright (C) 2007 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. */ #include <linux/fs.h> #include <linux/fcntl.h> #define MLOG_MASK_PREFIX ML_INODE #include <cluster/masklog.h> #include "ocfs2.h" #include "dlmglue.h" #include "file.h" #include "inode.h" #include "locks.h" static int ocfs2_do_flock(struct file *file, struct inode *inode, int cmd, struct file_lock *fl) { int ret = 0, level = 0, trylock = 0; struct ocfs2_file_private *fp = file->private_data; struct ocfs2_lock_res *lockres = &fp->fp_flock; if (fl->fl_type == F_WRLCK) level = 1; if (!IS_SETLKW(cmd)) trylock = 1; mutex_lock(&fp->fp_mutex); if (lockres->l_flags & OCFS2_LOCK_ATTACHED && lockres->l_level > LKM_NLMODE) { int old_level = 0; if (lockres->l_level == LKM_EXMODE) old_level = 1; if (level == old_level) goto out; /* * Converting an existing lock is not guaranteed to be * atomic, so we can get away with simply unlocking * here and allowing the lock code to try at the new * level. */ flock_lock_file_wait(file, &(struct file_lock){.fl_type = F_UNLCK}); ocfs2_file_unlock(file); } ret = ocfs2_file_lock(file, level, trylock); if (ret) { if (ret == -EAGAIN && trylock) ret = -EWOULDBLOCK; else mlog_errno(ret); goto out; } ret = flock_lock_file_wait(file, fl); out: mutex_unlock(&fp->fp_mutex); return ret; } static int ocfs2_do_funlock(struct file *file, int cmd, struct file_lock *fl) { int ret; struct ocfs2_file_private *fp = file->private_data; mutex_lock(&fp->fp_mutex); ocfs2_file_unlock(file); ret = flock_lock_file_wait(file, fl); mutex_unlock(&fp->fp_mutex); return ret; } /* * Overall flow of ocfs2_flock() was influenced by gfs2_flock(). */ int ocfs2_flock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!(fl->fl_flags & FL_FLOCK)) return -ENOLCK; if (__mandatory_lock(inode)) return -ENOLCK; if ((osb->s_mount_opt & OCFS2_MOUNT_LOCALFLOCKS) || ocfs2_mount_local(osb)) return flock_lock_file_wait(file, fl); if (fl->fl_type == F_UNLCK) return ocfs2_do_funlock(file, cmd, fl); else return ocfs2_do_flock(file, inode, cmd, fl); } int ocfs2_lock(struct file *file, int cmd, struct file_lock *fl) { struct inode *inode = file->f_mapping->host; struct ocfs2_super *osb = OCFS2_SB(inode->i_sb); if (!(fl->fl_flags & FL_POSIX)) return -ENOLCK; if (__mandatory_lock(inode) && fl->fl_type != F_UNLCK) return -ENOLCK; return ocfs2_plock(osb->cconn, OCFS2_I(inode)->ip_blkno, file, cmd, fl); }
gpl-2.0
YUPlayGod/android_kernel_yu_msm8916
fs/cachefiles/xattr.c
2249
6569
/* CacheFiles extended attribute management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public Licence * as published by the Free Software Foundation; either version * 2 of the Licence, or (at your option) any later version. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/fsnotify.h> #include <linux/quotaops.h> #include <linux/xattr.h> #include <linux/slab.h> #include "internal.h" static const char cachefiles_xattr_cache[] = XATTR_USER_PREFIX "CacheFiles.cache"; /* * check the type label on an object * - done using xattrs */ int cachefiles_check_object_type(struct cachefiles_object *object) { struct dentry *dentry = object->dentry; char type[3], xtype[3]; int ret; ASSERT(dentry); ASSERT(dentry->d_inode); if (!object->fscache.cookie) strcpy(type, "C3"); else snprintf(type, 3, "%02x", object->fscache.cookie->def->type); _enter("%p{%s}", object, type); /* attempt to install a type label directly */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2, XATTR_CREATE); if (ret == 0) { _debug("SET"); /* we succeeded */ goto error; } if (ret != -EEXIST) { kerror("Can't set xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3); if (ret < 0) { if (ret == -ERANGE) goto bad_type_length; kerror("Can't read xattr on %*.*s [%lu] (err %d)", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, -ret); goto error; } /* check the type is what we're expecting */ if (ret != 2) goto bad_type_length; if (xtype[0] != type[0] || xtype[1] != type[1]) goto bad_type; ret = 0; error: _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu type xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; bad_type: xtype[2] = 0; kerror("Cache object %*.*s [%lu] type %s not %s", dentry->d_name.len, dentry->d_name.len, dentry->d_name.name, dentry->d_inode->i_ino, xtype, type); ret = -EIO; goto error; } /* * set the state xattr on a cache file */ int cachefiles_set_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_CREATE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to set xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * update the state xattr on a cache file */ int cachefiles_update_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct dentry *dentry = object->dentry; int ret; ASSERT(object->fscache.cookie); ASSERT(dentry); _enter("%p,#%d", object, auxdata->len); /* attempt to install the cache metadata directly */ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len); ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0 && ret != -ENOMEM) cachefiles_io_error_obj( object, "Failed to update xattr with error %d", ret); _leave(" = %d", ret); return ret; } /* * check the state xattr on a cache file * - return -ESTALE if the object should be deleted */ int cachefiles_check_object_xattr(struct cachefiles_object *object, struct cachefiles_xattr *auxdata) { struct cachefiles_xattr *auxbuf; struct dentry *dentry = object->dentry; int ret; _enter("%p,#%d", object, auxdata->len); ASSERT(dentry); ASSERT(dentry->d_inode); auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, cachefiles_gfp); if (!auxbuf) { _leave(" = -ENOMEM"); return -ENOMEM; } /* read the current type label */ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, &auxbuf->type, 512 + 1); if (ret < 0) { if (ret == -ENODATA) goto stale; /* no attribute - power went off * mid-cull? */ if (ret == -ERANGE) goto bad_type_length; cachefiles_io_error_obj(object, "Can't read xattr on %lu (err %d)", dentry->d_inode->i_ino, -ret); goto error; } /* check the on-disk object */ if (ret < 1) goto bad_type_length; if (auxbuf->type != auxdata->type) goto stale; auxbuf->len = ret; /* consult the netfs */ if (object->fscache.cookie->def->check_aux) { enum fscache_checkaux result; unsigned int dlen; dlen = auxbuf->len - 1; _debug("checkaux %s #%u", object->fscache.cookie->def->name, dlen); result = fscache_check_aux(&object->fscache, &auxbuf->data, dlen); switch (result) { /* entry okay as is */ case FSCACHE_CHECKAUX_OKAY: goto okay; /* entry requires update */ case FSCACHE_CHECKAUX_NEEDS_UPDATE: break; /* entry requires deletion */ case FSCACHE_CHECKAUX_OBSOLETE: goto stale; default: BUG(); } /* update the current label */ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, &auxdata->type, auxdata->len, XATTR_REPLACE); if (ret < 0) { cachefiles_io_error_obj(object, "Can't update xattr on %lu" " (error %d)", dentry->d_inode->i_ino, -ret); goto error; } } okay: ret = 0; error: kfree(auxbuf); _leave(" = %d", ret); return ret; bad_type_length: kerror("Cache object %lu xattr length incorrect", dentry->d_inode->i_ino); ret = -EIO; goto error; stale: ret = -ESTALE; goto error; } /* * remove the object's xattr to mark it stale */ int cachefiles_remove_object_xattr(struct cachefiles_cache *cache, struct dentry *dentry) { int ret; ret = vfs_removexattr(dentry, cachefiles_xattr_cache); if (ret < 0) { if (ret == -ENOENT || ret == -ENODATA) ret = 0; else if (ret != -ENOMEM) cachefiles_io_error(cache, "Can't remove xattr from %lu" " (error %d)", dentry->d_inode->i_ino, -ret); } _leave(" = %d", ret); return ret; }
gpl-2.0
pombredanne/bcm11351
drivers/scsi/csiostor/csio_scsi.c
2249
69256
/* * This file is part of the Chelsio FCoE driver for Linux. * * Copyright (c) 2008-2012 Chelsio Communications, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/device.h> #include <linux/delay.h> #include <linux/ctype.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/compiler.h> #include <linux/export.h> #include <linux/module.h> #include <asm/unaligned.h> #include <asm/page.h> #include <scsi/scsi.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport_fc.h> #include "csio_hw.h" #include "csio_lnode.h" #include "csio_rnode.h" #include "csio_scsi.h" #include "csio_init.h" int csio_scsi_eqsize = 65536; int csio_scsi_iqlen = 128; int csio_scsi_ioreqs = 2048; uint32_t csio_max_scan_tmo; uint32_t csio_delta_scan_tmo = 5; int csio_lun_qdepth = 32; static int csio_ddp_descs = 128; static int csio_do_abrt_cls(struct csio_hw *, struct csio_ioreq *, bool); static void csio_scsis_uninit(struct csio_ioreq *, enum csio_scsi_ev); static void csio_scsis_io_active(struct csio_ioreq *, enum csio_scsi_ev); static void csio_scsis_tm_active(struct csio_ioreq *, enum csio_scsi_ev); static void csio_scsis_aborting(struct csio_ioreq *, enum csio_scsi_ev); static void csio_scsis_closing(struct csio_ioreq *, enum csio_scsi_ev); static void csio_scsis_shost_cmpl_await(struct csio_ioreq *, enum csio_scsi_ev); /* * csio_scsi_match_io - Match an ioreq with the given SCSI level data. * @ioreq: The I/O request * @sld: Level information * * Should be called with lock held. * */ static bool csio_scsi_match_io(struct csio_ioreq *ioreq, struct csio_scsi_level_data *sld) { struct scsi_cmnd *scmnd = csio_scsi_cmnd(ioreq); switch (sld->level) { case CSIO_LEV_LUN: if (scmnd == NULL) return false; return ((ioreq->lnode == sld->lnode) && (ioreq->rnode == sld->rnode) && ((uint64_t)scmnd->device->lun == sld->oslun)); case CSIO_LEV_RNODE: return ((ioreq->lnode == sld->lnode) && (ioreq->rnode == sld->rnode)); case CSIO_LEV_LNODE: return (ioreq->lnode == sld->lnode); case CSIO_LEV_ALL: return true; default: return false; } } /* * csio_scsi_gather_active_ios - Gather active I/Os based on level * @scm: SCSI module * @sld: Level information * @dest: The queue where these I/Os have to be gathered. * * Should be called with lock held. */ static void csio_scsi_gather_active_ios(struct csio_scsim *scm, struct csio_scsi_level_data *sld, struct list_head *dest) { struct list_head *tmp, *next; if (list_empty(&scm->active_q)) return; /* Just splice the entire active_q into dest */ if (sld->level == CSIO_LEV_ALL) { list_splice_tail_init(&scm->active_q, dest); return; } list_for_each_safe(tmp, next, &scm->active_q) { if (csio_scsi_match_io((struct csio_ioreq *)tmp, sld)) { list_del_init(tmp); list_add_tail(tmp, dest); } } } static inline bool csio_scsi_itnexus_loss_error(uint16_t error) { switch (error) { case FW_ERR_LINK_DOWN: case FW_RDEV_NOT_READY: case FW_ERR_RDEV_LOST: case FW_ERR_RDEV_LOGO: case FW_ERR_RDEV_IMPL_LOGO: return 1; } return 0; } static inline void csio_scsi_tag(struct scsi_cmnd *scmnd, uint8_t *tag, uint8_t hq, uint8_t oq, uint8_t sq) { char stag[2]; if (scsi_populate_tag_msg(scmnd, stag)) { switch (stag[0]) { case HEAD_OF_QUEUE_TAG: *tag = hq; break; case ORDERED_QUEUE_TAG: *tag = oq; break; default: *tag = sq; break; } } else *tag = 0; } /* * csio_scsi_fcp_cmnd - Frame the SCSI FCP command paylod. * @req: IO req structure. * @addr: DMA location to place the payload. * * This routine is shared between FCP_WRITE, FCP_READ and FCP_CMD requests. */ static inline void csio_scsi_fcp_cmnd(struct csio_ioreq *req, void *addr) { struct fcp_cmnd *fcp_cmnd = (struct fcp_cmnd *)addr; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); /* Check for Task Management */ if (likely(scmnd->SCp.Message == 0)) { int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); fcp_cmnd->fc_tm_flags = 0; fcp_cmnd->fc_cmdref = 0; fcp_cmnd->fc_pri_ta = 0; memcpy(fcp_cmnd->fc_cdb, scmnd->cmnd, 16); csio_scsi_tag(scmnd, &fcp_cmnd->fc_pri_ta, FCP_PTA_HEADQ, FCP_PTA_ORDERED, FCP_PTA_SIMPLE); fcp_cmnd->fc_dl = cpu_to_be32(scsi_bufflen(scmnd)); if (req->nsge) if (req->datadir == DMA_TO_DEVICE) fcp_cmnd->fc_flags = FCP_CFL_WRDATA; else fcp_cmnd->fc_flags = FCP_CFL_RDDATA; else fcp_cmnd->fc_flags = 0; } else { memset(fcp_cmnd, 0, sizeof(*fcp_cmnd)); int_to_scsilun(scmnd->device->lun, &fcp_cmnd->fc_lun); fcp_cmnd->fc_tm_flags = (uint8_t)scmnd->SCp.Message; } } /* * csio_scsi_init_cmd_wr - Initialize the SCSI CMD WR. * @req: IO req structure. * @addr: DMA location to place the payload. * @size: Size of WR (including FW WR + immed data + rsp SG entry * * Wrapper for populating fw_scsi_cmd_wr. */ static inline void csio_scsi_init_cmd_wr(struct csio_ioreq *req, void *addr, uint32_t size) { struct csio_hw *hw = req->lnode->hwp; struct csio_rnode *rn = req->rnode; struct fw_scsi_cmd_wr *wr = (struct fw_scsi_cmd_wr *)addr; struct csio_dma_buf *dma_buf; uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_CMD_WR) | FW_SCSI_CMD_WR_IMMDLEN(imm)); wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | FW_WR_LEN16( DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t) req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t) req->tmo; wr->r3 = 0; memset(&wr->r5, 0, 8); /* Get RSP DMA buffer */ dma_buf = &req->dma_buf; /* Prepare RSP SGL */ wr->rsp_dmalen = cpu_to_be32(dma_buf->len); wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); wr->r6 = 0; wr->u.fcoe.ctl_pri = 0; wr->u.fcoe.cp_en_class = 0; wr->u.fcoe.r4_lo[0] = 0; wr->u.fcoe.r4_lo[1] = 0; /* Frame a FCP command */ csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)addr + sizeof(struct fw_scsi_cmd_wr))); } #define CSIO_SCSI_CMD_WR_SZ(_imm) \ (sizeof(struct fw_scsi_cmd_wr) + /* WR size */ \ ALIGN((_imm), 16)) /* Immed data */ #define CSIO_SCSI_CMD_WR_SZ_16(_imm) \ (ALIGN(CSIO_SCSI_CMD_WR_SZ((_imm)), 16)) /* * csio_scsi_cmd - Create a SCSI CMD WR. * @req: IO req structure. * * Gets a WR slot in the ingress queue and initializes it with SCSI CMD WR. * */ static inline void csio_scsi_cmd(struct csio_ioreq *req) { struct csio_wr_pair wrp; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scsim = csio_hw_to_scsim(hw); uint32_t size = CSIO_SCSI_CMD_WR_SZ_16(scsim->proto_cmd_len); req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); if (unlikely(req->drv_status != 0)) return; if (wrp.size1 >= size) { /* Initialize WR in one shot */ csio_scsi_init_cmd_wr(req, wrp.addr1, size); } else { uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); /* * Make a temporary copy of the WR and write back * the copy into the WR pair. */ csio_scsi_init_cmd_wr(req, (void *)tmpwr, size); memcpy(wrp.addr1, tmpwr, wrp.size1); memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); } } /* * csio_scsi_init_ulptx_dsgl - Fill in a ULP_TX_SC_DSGL * @hw: HW module * @req: IO request * @sgl: ULP TX SGL pointer. * */ static inline void csio_scsi_init_ultptx_dsgl(struct csio_hw *hw, struct csio_ioreq *req, struct ulptx_sgl *sgl) { struct ulptx_sge_pair *sge_pair = NULL; struct scatterlist *sgel; uint32_t i = 0; uint32_t xfer_len; struct list_head *tmp; struct csio_dma_buf *dma_buf; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); sgl->cmd_nsge = htonl(ULPTX_CMD(ULP_TX_SC_DSGL) | ULPTX_MORE | ULPTX_NSGE(req->nsge)); /* Now add the data SGLs */ if (likely(!req->dcopy)) { scsi_for_each_sg(scmnd, sgel, req->nsge, i) { if (i == 0) { sgl->addr0 = cpu_to_be64(sg_dma_address(sgel)); sgl->len0 = cpu_to_be32(sg_dma_len(sgel)); sge_pair = (struct ulptx_sge_pair *)(sgl + 1); continue; } if ((i - 1) & 0x1) { sge_pair->addr[1] = cpu_to_be64( sg_dma_address(sgel)); sge_pair->len[1] = cpu_to_be32( sg_dma_len(sgel)); sge_pair++; } else { sge_pair->addr[0] = cpu_to_be64( sg_dma_address(sgel)); sge_pair->len[0] = cpu_to_be32( sg_dma_len(sgel)); } } } else { /* Program sg elements with driver's DDP buffer */ xfer_len = scsi_bufflen(scmnd); list_for_each(tmp, &req->gen_list) { dma_buf = (struct csio_dma_buf *)tmp; if (i == 0) { sgl->addr0 = cpu_to_be64(dma_buf->paddr); sgl->len0 = cpu_to_be32( min(xfer_len, dma_buf->len)); sge_pair = (struct ulptx_sge_pair *)(sgl + 1); } else if ((i - 1) & 0x1) { sge_pair->addr[1] = cpu_to_be64(dma_buf->paddr); sge_pair->len[1] = cpu_to_be32( min(xfer_len, dma_buf->len)); sge_pair++; } else { sge_pair->addr[0] = cpu_to_be64(dma_buf->paddr); sge_pair->len[0] = cpu_to_be32( min(xfer_len, dma_buf->len)); } xfer_len -= min(xfer_len, dma_buf->len); i++; } } } /* * csio_scsi_init_read_wr - Initialize the READ SCSI WR. * @req: IO req structure. * @wrp: DMA location to place the payload. * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL * * Wrapper for populating fw_scsi_read_wr. */ static inline void csio_scsi_init_read_wr(struct csio_ioreq *req, void *wrp, uint32_t size) { struct csio_hw *hw = req->lnode->hwp; struct csio_rnode *rn = req->rnode; struct fw_scsi_read_wr *wr = (struct fw_scsi_read_wr *)wrp; struct ulptx_sgl *sgl; struct csio_dma_buf *dma_buf; uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_READ_WR) | FW_SCSI_READ_WR_IMMDLEN(imm)); wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | FW_WR_LEN16(DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t)req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t)(req->tmo); wr->use_xfer_cnt = 1; wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); /* Get RSP DMA buffer */ dma_buf = &req->dma_buf; /* Prepare RSP SGL */ wr->rsp_dmalen = cpu_to_be32(dma_buf->len); wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); wr->r4 = 0; wr->u.fcoe.ctl_pri = 0; wr->u.fcoe.cp_en_class = 0; wr->u.fcoe.r3_lo[0] = 0; wr->u.fcoe.r3_lo[1] = 0; csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + sizeof(struct fw_scsi_read_wr))); /* Move WR pointer past command and immediate data */ sgl = (struct ulptx_sgl *)((uintptr_t)wrp + sizeof(struct fw_scsi_read_wr) + ALIGN(imm, 16)); /* Fill in the DSGL */ csio_scsi_init_ultptx_dsgl(hw, req, sgl); } /* * csio_scsi_init_write_wr - Initialize the WRITE SCSI WR. * @req: IO req structure. * @wrp: DMA location to place the payload. * @size: Size of WR (including FW WR + immed data + rsp SG entry + data SGL * * Wrapper for populating fw_scsi_write_wr. */ static inline void csio_scsi_init_write_wr(struct csio_ioreq *req, void *wrp, uint32_t size) { struct csio_hw *hw = req->lnode->hwp; struct csio_rnode *rn = req->rnode; struct fw_scsi_write_wr *wr = (struct fw_scsi_write_wr *)wrp; struct ulptx_sgl *sgl; struct csio_dma_buf *dma_buf; uint8_t imm = csio_hw_to_scsim(hw)->proto_cmd_len; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_WRITE_WR) | FW_SCSI_WRITE_WR_IMMDLEN(imm)); wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | FW_WR_LEN16(DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t)req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t)(req->tmo); wr->use_xfer_cnt = 1; wr->xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); wr->ini_xfer_cnt = cpu_to_be32(scsi_bufflen(scmnd)); /* Get RSP DMA buffer */ dma_buf = &req->dma_buf; /* Prepare RSP SGL */ wr->rsp_dmalen = cpu_to_be32(dma_buf->len); wr->rsp_dmaaddr = cpu_to_be64(dma_buf->paddr); wr->r4 = 0; wr->u.fcoe.ctl_pri = 0; wr->u.fcoe.cp_en_class = 0; wr->u.fcoe.r3_lo[0] = 0; wr->u.fcoe.r3_lo[1] = 0; csio_scsi_fcp_cmnd(req, (void *)((uintptr_t)wrp + sizeof(struct fw_scsi_write_wr))); /* Move WR pointer past command and immediate data */ sgl = (struct ulptx_sgl *)((uintptr_t)wrp + sizeof(struct fw_scsi_write_wr) + ALIGN(imm, 16)); /* Fill in the DSGL */ csio_scsi_init_ultptx_dsgl(hw, req, sgl); } /* Calculate WR size needed for fw_scsi_read_wr/fw_scsi_write_wr */ #define CSIO_SCSI_DATA_WRSZ(req, oper, sz, imm) \ do { \ (sz) = sizeof(struct fw_scsi_##oper##_wr) + /* WR size */ \ ALIGN((imm), 16) + /* Immed data */ \ sizeof(struct ulptx_sgl); /* ulptx_sgl */ \ \ if (unlikely((req)->nsge > 1)) \ (sz) += (sizeof(struct ulptx_sge_pair) * \ (ALIGN(((req)->nsge - 1), 2) / 2)); \ /* Data SGE */ \ } while (0) /* * csio_scsi_read - Create a SCSI READ WR. * @req: IO req structure. * * Gets a WR slot in the ingress queue and initializes it with * SCSI READ WR. * */ static inline void csio_scsi_read(struct csio_ioreq *req) { struct csio_wr_pair wrp; uint32_t size; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scsim = csio_hw_to_scsim(hw); CSIO_SCSI_DATA_WRSZ(req, read, size, scsim->proto_cmd_len); size = ALIGN(size, 16); req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); if (likely(req->drv_status == 0)) { if (likely(wrp.size1 >= size)) { /* Initialize WR in one shot */ csio_scsi_init_read_wr(req, wrp.addr1, size); } else { uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); /* * Make a temporary copy of the WR and write back * the copy into the WR pair. */ csio_scsi_init_read_wr(req, (void *)tmpwr, size); memcpy(wrp.addr1, tmpwr, wrp.size1); memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); } } } /* * csio_scsi_write - Create a SCSI WRITE WR. * @req: IO req structure. * * Gets a WR slot in the ingress queue and initializes it with * SCSI WRITE WR. * */ static inline void csio_scsi_write(struct csio_ioreq *req) { struct csio_wr_pair wrp; uint32_t size; struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scsim = csio_hw_to_scsim(hw); CSIO_SCSI_DATA_WRSZ(req, write, size, scsim->proto_cmd_len); size = ALIGN(size, 16); req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); if (likely(req->drv_status == 0)) { if (likely(wrp.size1 >= size)) { /* Initialize WR in one shot */ csio_scsi_init_write_wr(req, wrp.addr1, size); } else { uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); /* * Make a temporary copy of the WR and write back * the copy into the WR pair. */ csio_scsi_init_write_wr(req, (void *)tmpwr, size); memcpy(wrp.addr1, tmpwr, wrp.size1); memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); } } } /* * csio_setup_ddp - Setup DDP buffers for Read request. * @req: IO req structure. * * Checks SGLs/Data buffers are virtually contiguous required for DDP. * If contiguous,driver posts SGLs in the WR otherwise post internal * buffers for such request for DDP. */ static inline void csio_setup_ddp(struct csio_scsim *scsim, struct csio_ioreq *req) { #ifdef __CSIO_DEBUG__ struct csio_hw *hw = req->lnode->hwp; #endif struct scatterlist *sgel = NULL; struct scsi_cmnd *scmnd = csio_scsi_cmnd(req); uint64_t sg_addr = 0; uint32_t ddp_pagesz = 4096; uint32_t buf_off; struct csio_dma_buf *dma_buf = NULL; uint32_t alloc_len = 0; uint32_t xfer_len = 0; uint32_t sg_len = 0; uint32_t i; scsi_for_each_sg(scmnd, sgel, req->nsge, i) { sg_addr = sg_dma_address(sgel); sg_len = sg_dma_len(sgel); buf_off = sg_addr & (ddp_pagesz - 1); /* Except 1st buffer,all buffer addr have to be Page aligned */ if (i != 0 && buf_off) { csio_dbg(hw, "SGL addr not DDP aligned (%llx:%d)\n", sg_addr, sg_len); goto unaligned; } /* Except last buffer,all buffer must end on page boundary */ if ((i != (req->nsge - 1)) && ((buf_off + sg_len) & (ddp_pagesz - 1))) { csio_dbg(hw, "SGL addr not ending on page boundary" "(%llx:%d)\n", sg_addr, sg_len); goto unaligned; } } /* SGL's are virtually contiguous. HW will DDP to SGLs */ req->dcopy = 0; csio_scsi_read(req); return; unaligned: CSIO_INC_STATS(scsim, n_unaligned); /* * For unaligned SGLs, driver will allocate internal DDP buffer. * Once command is completed data from DDP buffer copied to SGLs */ req->dcopy = 1; /* Use gen_list to store the DDP buffers */ INIT_LIST_HEAD(&req->gen_list); xfer_len = scsi_bufflen(scmnd); i = 0; /* Allocate ddp buffers for this request */ while (alloc_len < xfer_len) { dma_buf = csio_get_scsi_ddp(scsim); if (dma_buf == NULL || i > scsim->max_sge) { req->drv_status = -EBUSY; break; } alloc_len += dma_buf->len; /* Added to IO req */ list_add_tail(&dma_buf->list, &req->gen_list); i++; } if (!req->drv_status) { /* set number of ddp bufs used */ req->nsge = i; csio_scsi_read(req); return; } /* release dma descs */ if (i > 0) csio_put_scsi_ddp_list(scsim, &req->gen_list, i); } /* * csio_scsi_init_abrt_cls_wr - Initialize an ABORT/CLOSE WR. * @req: IO req structure. * @addr: DMA location to place the payload. * @size: Size of WR * @abort: abort OR close * * Wrapper for populating fw_scsi_cmd_wr. */ static inline void csio_scsi_init_abrt_cls_wr(struct csio_ioreq *req, void *addr, uint32_t size, bool abort) { struct csio_hw *hw = req->lnode->hwp; struct csio_rnode *rn = req->rnode; struct fw_scsi_abrt_cls_wr *wr = (struct fw_scsi_abrt_cls_wr *)addr; wr->op_immdlen = cpu_to_be32(FW_WR_OP(FW_SCSI_ABRT_CLS_WR)); wr->flowid_len16 = cpu_to_be32(FW_WR_FLOWID(rn->flowid) | FW_WR_LEN16( DIV_ROUND_UP(size, 16))); wr->cookie = (uintptr_t) req; wr->iqid = cpu_to_be16(csio_q_physiqid(hw, req->iq_idx)); wr->tmo_val = (uint8_t) req->tmo; /* 0 for CHK_ALL_IO tells FW to look up t_cookie */ wr->sub_opcode_to_chk_all_io = (FW_SCSI_ABRT_CLS_WR_SUB_OPCODE(abort) | FW_SCSI_ABRT_CLS_WR_CHK_ALL_IO(0)); wr->r3[0] = 0; wr->r3[1] = 0; wr->r3[2] = 0; wr->r3[3] = 0; /* Since we re-use the same ioreq for abort as well */ wr->t_cookie = (uintptr_t) req; } static inline void csio_scsi_abrt_cls(struct csio_ioreq *req, bool abort) { struct csio_wr_pair wrp; struct csio_hw *hw = req->lnode->hwp; uint32_t size = ALIGN(sizeof(struct fw_scsi_abrt_cls_wr), 16); req->drv_status = csio_wr_get(hw, req->eq_idx, size, &wrp); if (req->drv_status != 0) return; if (wrp.size1 >= size) { /* Initialize WR in one shot */ csio_scsi_init_abrt_cls_wr(req, wrp.addr1, size, abort); } else { uint8_t *tmpwr = csio_q_eq_wrap(hw, req->eq_idx); /* * Make a temporary copy of the WR and write back * the copy into the WR pair. */ csio_scsi_init_abrt_cls_wr(req, (void *)tmpwr, size, abort); memcpy(wrp.addr1, tmpwr, wrp.size1); memcpy(wrp.addr2, tmpwr + wrp.size1, size - wrp.size1); } } /*****************************************************************************/ /* START: SCSI SM */ /*****************************************************************************/ static void csio_scsis_uninit(struct csio_ioreq *req, enum csio_scsi_ev evt) { struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scsim = csio_hw_to_scsim(hw); switch (evt) { case CSIO_SCSIE_START_IO: if (req->nsge) { if (req->datadir == DMA_TO_DEVICE) { req->dcopy = 0; csio_scsi_write(req); } else csio_setup_ddp(scsim, req); } else { csio_scsi_cmd(req); } if (likely(req->drv_status == 0)) { /* change state and enqueue on active_q */ csio_set_state(&req->sm, csio_scsis_io_active); list_add_tail(&req->sm.sm_list, &scsim->active_q); csio_wr_issue(hw, req->eq_idx, false); CSIO_INC_STATS(scsim, n_active); return; } break; case CSIO_SCSIE_START_TM: csio_scsi_cmd(req); if (req->drv_status == 0) { /* * NOTE: We collect the affected I/Os prior to issuing * LUN reset, and not after it. This is to prevent * aborting I/Os that get issued after the LUN reset, * but prior to LUN reset completion (in the event that * the host stack has not blocked I/Os to a LUN that is * being reset. */ csio_set_state(&req->sm, csio_scsis_tm_active); list_add_tail(&req->sm.sm_list, &scsim->active_q); csio_wr_issue(hw, req->eq_idx, false); CSIO_INC_STATS(scsim, n_tm_active); } return; case CSIO_SCSIE_ABORT: case CSIO_SCSIE_CLOSE: /* * NOTE: * We could get here due to : * - a window in the cleanup path of the SCSI module * (csio_scsi_abort_io()). Please see NOTE in this function. * - a window in the time we tried to issue an abort/close * of a request to FW, and the FW completed the request * itself. * Print a message for now, and return INVAL either way. */ req->drv_status = -EINVAL; csio_warn(hw, "Trying to abort/close completed IO:%p!\n", req); break; default: csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } static void csio_scsis_io_active(struct csio_ioreq *req, enum csio_scsi_ev evt) { struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); struct csio_rnode *rn; switch (evt) { case CSIO_SCSIE_COMPLETED: CSIO_DEC_STATS(scm, n_active); list_del_init(&req->sm.sm_list); csio_set_state(&req->sm, csio_scsis_uninit); /* * In MSIX mode, with multiple queues, the SCSI compeltions * could reach us sooner than the FW events sent to indicate * I-T nexus loss (link down, remote device logo etc). We * dont want to be returning such I/Os to the upper layer * immediately, since we wouldnt have reported the I-T nexus * loss itself. This forces us to serialize such completions * with the reporting of the I-T nexus loss. Therefore, we * internally queue up such up such completions in the rnode. * The reporting of I-T nexus loss to the upper layer is then * followed by the returning of I/Os in this internal queue. * Having another state alongwith another queue helps us take * actions for events such as ABORT received while we are * in this rnode queue. */ if (unlikely(req->wr_status != FW_SUCCESS)) { rn = req->rnode; /* * FW says remote device is lost, but rnode * doesnt reflect it. */ if (csio_scsi_itnexus_loss_error(req->wr_status) && csio_is_rnode_ready(rn)) { csio_set_state(&req->sm, csio_scsis_shost_cmpl_await); list_add_tail(&req->sm.sm_list, &rn->host_cmpl_q); } } break; case CSIO_SCSIE_ABORT: csio_scsi_abrt_cls(req, SCSI_ABORT); if (req->drv_status == 0) { csio_wr_issue(hw, req->eq_idx, false); csio_set_state(&req->sm, csio_scsis_aborting); } break; case CSIO_SCSIE_CLOSE: csio_scsi_abrt_cls(req, SCSI_CLOSE); if (req->drv_status == 0) { csio_wr_issue(hw, req->eq_idx, false); csio_set_state(&req->sm, csio_scsis_closing); } break; case CSIO_SCSIE_DRVCLEANUP: req->wr_status = FW_HOSTERROR; CSIO_DEC_STATS(scm, n_active); csio_set_state(&req->sm, csio_scsis_uninit); break; default: csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } static void csio_scsis_tm_active(struct csio_ioreq *req, enum csio_scsi_ev evt) { struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); switch (evt) { case CSIO_SCSIE_COMPLETED: CSIO_DEC_STATS(scm, n_tm_active); list_del_init(&req->sm.sm_list); csio_set_state(&req->sm, csio_scsis_uninit); break; case CSIO_SCSIE_ABORT: csio_scsi_abrt_cls(req, SCSI_ABORT); if (req->drv_status == 0) { csio_wr_issue(hw, req->eq_idx, false); csio_set_state(&req->sm, csio_scsis_aborting); } break; case CSIO_SCSIE_CLOSE: csio_scsi_abrt_cls(req, SCSI_CLOSE); if (req->drv_status == 0) { csio_wr_issue(hw, req->eq_idx, false); csio_set_state(&req->sm, csio_scsis_closing); } break; case CSIO_SCSIE_DRVCLEANUP: req->wr_status = FW_HOSTERROR; CSIO_DEC_STATS(scm, n_tm_active); csio_set_state(&req->sm, csio_scsis_uninit); break; default: csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } static void csio_scsis_aborting(struct csio_ioreq *req, enum csio_scsi_ev evt) { struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); switch (evt) { case CSIO_SCSIE_COMPLETED: csio_dbg(hw, "ioreq %p recvd cmpltd (wr_status:%d) " "in aborting st\n", req, req->wr_status); /* * Use -ECANCELED to explicitly tell the ABORTED event that * the original I/O was returned to driver by FW. * We dont really care if the I/O was returned with success by * FW (because the ABORT and completion of the I/O crossed each * other), or any other return value. Once we are in aborting * state, the success or failure of the I/O is unimportant to * us. */ req->drv_status = -ECANCELED; break; case CSIO_SCSIE_ABORT: CSIO_INC_STATS(scm, n_abrt_dups); break; case CSIO_SCSIE_ABORTED: csio_dbg(hw, "abort of %p return status:0x%x drv_status:%x\n", req, req->wr_status, req->drv_status); /* * Check if original I/O WR completed before the Abort * completion. */ if (req->drv_status != -ECANCELED) { csio_warn(hw, "Abort completed before original I/O," " req:%p\n", req); CSIO_DB_ASSERT(0); } /* * There are the following possible scenarios: * 1. The abort completed successfully, FW returned FW_SUCCESS. * 2. The completion of an I/O and the receipt of * abort for that I/O by the FW crossed each other. * The FW returned FW_EINVAL. The original I/O would have * returned with FW_SUCCESS or any other SCSI error. * 3. The FW couldnt sent the abort out on the wire, as there * was an I-T nexus loss (link down, remote device logged * out etc). FW sent back an appropriate IT nexus loss status * for the abort. * 4. FW sent an abort, but abort timed out (remote device * didnt respond). FW replied back with * FW_SCSI_ABORT_TIMEDOUT. * 5. FW couldnt genuinely abort the request for some reason, * and sent us an error. * * The first 3 scenarios are treated as succesful abort * operations by the host, while the last 2 are failed attempts * to abort. Manipulate the return value of the request * appropriately, so that host can convey these results * back to the upper layer. */ if ((req->wr_status == FW_SUCCESS) || (req->wr_status == FW_EINVAL) || csio_scsi_itnexus_loss_error(req->wr_status)) req->wr_status = FW_SCSI_ABORT_REQUESTED; CSIO_DEC_STATS(scm, n_active); list_del_init(&req->sm.sm_list); csio_set_state(&req->sm, csio_scsis_uninit); break; case CSIO_SCSIE_DRVCLEANUP: req->wr_status = FW_HOSTERROR; CSIO_DEC_STATS(scm, n_active); csio_set_state(&req->sm, csio_scsis_uninit); break; case CSIO_SCSIE_CLOSE: /* * We can receive this event from the module * cleanup paths, if the FW forgot to reply to the ABORT WR * and left this ioreq in this state. For now, just ignore * the event. The CLOSE event is sent to this state, as * the LINK may have already gone down. */ break; default: csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } static void csio_scsis_closing(struct csio_ioreq *req, enum csio_scsi_ev evt) { struct csio_hw *hw = req->lnode->hwp; struct csio_scsim *scm = csio_hw_to_scsim(hw); switch (evt) { case CSIO_SCSIE_COMPLETED: csio_dbg(hw, "ioreq %p recvd cmpltd (wr_status:%d) " "in closing st\n", req, req->wr_status); /* * Use -ECANCELED to explicitly tell the CLOSED event that * the original I/O was returned to driver by FW. * We dont really care if the I/O was returned with success by * FW (because the CLOSE and completion of the I/O crossed each * other), or any other return value. Once we are in aborting * state, the success or failure of the I/O is unimportant to * us. */ req->drv_status = -ECANCELED; break; case CSIO_SCSIE_CLOSED: /* * Check if original I/O WR completed before the Close * completion. */ if (req->drv_status != -ECANCELED) { csio_fatal(hw, "Close completed before original I/O," " req:%p\n", req); CSIO_DB_ASSERT(0); } /* * Either close succeeded, or we issued close to FW at the * same time FW compelted it to us. Either way, the I/O * is closed. */ CSIO_DB_ASSERT((req->wr_status == FW_SUCCESS) || (req->wr_status == FW_EINVAL)); req->wr_status = FW_SCSI_CLOSE_REQUESTED; CSIO_DEC_STATS(scm, n_active); list_del_init(&req->sm.sm_list); csio_set_state(&req->sm, csio_scsis_uninit); break; case CSIO_SCSIE_CLOSE: break; case CSIO_SCSIE_DRVCLEANUP: req->wr_status = FW_HOSTERROR; CSIO_DEC_STATS(scm, n_active); csio_set_state(&req->sm, csio_scsis_uninit); break; default: csio_dbg(hw, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } static void csio_scsis_shost_cmpl_await(struct csio_ioreq *req, enum csio_scsi_ev evt) { switch (evt) { case CSIO_SCSIE_ABORT: case CSIO_SCSIE_CLOSE: /* * Just succeed the abort request, and hope that * the remote device unregister path will cleanup * this I/O to the upper layer within a sane * amount of time. */ /* * A close can come in during a LINK DOWN. The FW would have * returned us the I/O back, but not the remote device lost * FW event. In this interval, if the I/O times out at the upper * layer, a close can come in. Take the same action as abort: * return success, and hope that the remote device unregister * path will cleanup this I/O. If the FW still doesnt send * the msg, the close times out, and the upper layer resorts * to the next level of error recovery. */ req->drv_status = 0; break; case CSIO_SCSIE_DRVCLEANUP: csio_set_state(&req->sm, csio_scsis_uninit); break; default: csio_dbg(req->lnode->hwp, "Unhandled event:%d sent to req:%p\n", evt, req); CSIO_DB_ASSERT(0); } } /* * csio_scsi_cmpl_handler - WR completion handler for SCSI. * @hw: HW module. * @wr: The completed WR from the ingress queue. * @len: Length of the WR. * @flb: Freelist buffer array. * @priv: Private object * @scsiwr: Pointer to SCSI WR. * * This is the WR completion handler called per completion from the * ISR. It is called with lock held. It walks past the RSS and CPL message * header where the actual WR is present. * It then gets the status, WR handle (ioreq pointer) and the len of * the WR, based on WR opcode. Only on a non-good status is the entire * WR copied into the WR cache (ioreq->fw_wr). * The ioreq corresponding to the WR is returned to the caller. * NOTE: The SCSI queue doesnt allocate a freelist today, hence * no freelist buffer is expected. */ struct csio_ioreq * csio_scsi_cmpl_handler(struct csio_hw *hw, void *wr, uint32_t len, struct csio_fl_dma_buf *flb, void *priv, uint8_t **scsiwr) { struct csio_ioreq *ioreq = NULL; struct cpl_fw6_msg *cpl; uint8_t *tempwr; uint8_t status; struct csio_scsim *scm = csio_hw_to_scsim(hw); /* skip RSS header */ cpl = (struct cpl_fw6_msg *)((uintptr_t)wr + sizeof(__be64)); if (unlikely(cpl->opcode != CPL_FW6_MSG)) { csio_warn(hw, "Error: Invalid CPL msg %x recvd on SCSI q\n", cpl->opcode); CSIO_INC_STATS(scm, n_inval_cplop); return NULL; } tempwr = (uint8_t *)(cpl->data); status = csio_wr_status(tempwr); *scsiwr = tempwr; if (likely((*tempwr == FW_SCSI_READ_WR) || (*tempwr == FW_SCSI_WRITE_WR) || (*tempwr == FW_SCSI_CMD_WR))) { ioreq = (struct csio_ioreq *)((uintptr_t) (((struct fw_scsi_read_wr *)tempwr)->cookie)); CSIO_DB_ASSERT(virt_addr_valid(ioreq)); ioreq->wr_status = status; return ioreq; } if (*tempwr == FW_SCSI_ABRT_CLS_WR) { ioreq = (struct csio_ioreq *)((uintptr_t) (((struct fw_scsi_abrt_cls_wr *)tempwr)->cookie)); CSIO_DB_ASSERT(virt_addr_valid(ioreq)); ioreq->wr_status = status; return ioreq; } csio_warn(hw, "WR with invalid opcode in SCSI IQ: %x\n", *tempwr); CSIO_INC_STATS(scm, n_inval_scsiop); return NULL; } /* * csio_scsi_cleanup_io_q - Cleanup the given queue. * @scm: SCSI module. * @q: Queue to be cleaned up. * * Called with lock held. Has to exit with lock held. */ void csio_scsi_cleanup_io_q(struct csio_scsim *scm, struct list_head *q) { struct csio_hw *hw = scm->hw; struct csio_ioreq *ioreq; struct list_head *tmp, *next; struct scsi_cmnd *scmnd; /* Call back the completion routines of the active_q */ list_for_each_safe(tmp, next, q) { ioreq = (struct csio_ioreq *)tmp; csio_scsi_drvcleanup(ioreq); list_del_init(&ioreq->sm.sm_list); scmnd = csio_scsi_cmnd(ioreq); spin_unlock_irq(&hw->lock); /* * Upper layers may have cleared this command, hence this * check to avoid accessing stale references. */ if (scmnd != NULL) ioreq->io_cbfn(hw, ioreq); spin_lock_irq(&scm->freelist_lock); csio_put_scsi_ioreq(scm, ioreq); spin_unlock_irq(&scm->freelist_lock); spin_lock_irq(&hw->lock); } } #define CSIO_SCSI_ABORT_Q_POLL_MS 2000 static void csio_abrt_cls(struct csio_ioreq *ioreq, struct scsi_cmnd *scmnd) { struct csio_lnode *ln = ioreq->lnode; struct csio_hw *hw = ln->hwp; int ready = 0; struct csio_scsim *scsim = csio_hw_to_scsim(hw); int rv; if (csio_scsi_cmnd(ioreq) != scmnd) { CSIO_INC_STATS(scsim, n_abrt_race_comp); return; } ready = csio_is_lnode_ready(ln); rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); if (rv != 0) { if (ready) CSIO_INC_STATS(scsim, n_abrt_busy_error); else CSIO_INC_STATS(scsim, n_cls_busy_error); } } /* * csio_scsi_abort_io_q - Abort all I/Os on given queue * @scm: SCSI module. * @q: Queue to abort. * @tmo: Timeout in ms * * Attempt to abort all I/Os on given queue, and wait for a max * of tmo milliseconds for them to complete. Returns success * if all I/Os are aborted. Else returns -ETIMEDOUT. * Should be entered with lock held. Exits with lock held. * NOTE: * Lock has to be held across the loop that aborts I/Os, since dropping the lock * in between can cause the list to be corrupted. As a result, the caller * of this function has to ensure that the number of I/os to be aborted * is finite enough to not cause lock-held-for-too-long issues. */ static int csio_scsi_abort_io_q(struct csio_scsim *scm, struct list_head *q, uint32_t tmo) { struct csio_hw *hw = scm->hw; struct list_head *tmp, *next; int count = DIV_ROUND_UP(tmo, CSIO_SCSI_ABORT_Q_POLL_MS); struct scsi_cmnd *scmnd; if (list_empty(q)) return 0; csio_dbg(hw, "Aborting SCSI I/Os\n"); /* Now abort/close I/Os in the queue passed */ list_for_each_safe(tmp, next, q) { scmnd = csio_scsi_cmnd((struct csio_ioreq *)tmp); csio_abrt_cls((struct csio_ioreq *)tmp, scmnd); } /* Wait till all active I/Os are completed/aborted/closed */ while (!list_empty(q) && count--) { spin_unlock_irq(&hw->lock); msleep(CSIO_SCSI_ABORT_Q_POLL_MS); spin_lock_irq(&hw->lock); } /* all aborts completed */ if (list_empty(q)) return 0; return -ETIMEDOUT; } /* * csio_scsim_cleanup_io - Cleanup all I/Os in SCSI module. * @scm: SCSI module. * @abort: abort required. * Called with lock held, should exit with lock held. * Can sleep when waiting for I/Os to complete. */ int csio_scsim_cleanup_io(struct csio_scsim *scm, bool abort) { struct csio_hw *hw = scm->hw; int rv = 0; int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); /* No I/Os pending */ if (list_empty(&scm->active_q)) return 0; /* Wait until all active I/Os are completed */ while (!list_empty(&scm->active_q) && count--) { spin_unlock_irq(&hw->lock); msleep(CSIO_SCSI_ABORT_Q_POLL_MS); spin_lock_irq(&hw->lock); } /* all I/Os completed */ if (list_empty(&scm->active_q)) return 0; /* Else abort */ if (abort) { rv = csio_scsi_abort_io_q(scm, &scm->active_q, 30000); if (rv == 0) return rv; csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); } csio_scsi_cleanup_io_q(scm, &scm->active_q); CSIO_DB_ASSERT(list_empty(&scm->active_q)); return rv; } /* * csio_scsim_cleanup_io_lnode - Cleanup all I/Os of given lnode. * @scm: SCSI module. * @lnode: lnode * * Called with lock held, should exit with lock held. * Can sleep (with dropped lock) when waiting for I/Os to complete. */ int csio_scsim_cleanup_io_lnode(struct csio_scsim *scm, struct csio_lnode *ln) { struct csio_hw *hw = scm->hw; struct csio_scsi_level_data sld; int rv; int count = DIV_ROUND_UP(60 * 1000, CSIO_SCSI_ABORT_Q_POLL_MS); csio_dbg(hw, "Gathering all SCSI I/Os on lnode %p\n", ln); sld.level = CSIO_LEV_LNODE; sld.lnode = ln; INIT_LIST_HEAD(&ln->cmpl_q); csio_scsi_gather_active_ios(scm, &sld, &ln->cmpl_q); /* No I/Os pending on this lnode */ if (list_empty(&ln->cmpl_q)) return 0; /* Wait until all active I/Os on this lnode are completed */ while (!list_empty(&ln->cmpl_q) && count--) { spin_unlock_irq(&hw->lock); msleep(CSIO_SCSI_ABORT_Q_POLL_MS); spin_lock_irq(&hw->lock); } /* all I/Os completed */ if (list_empty(&ln->cmpl_q)) return 0; csio_dbg(hw, "Some I/Os pending on ln:%p, aborting them..\n", ln); /* I/Os are pending, abort them */ rv = csio_scsi_abort_io_q(scm, &ln->cmpl_q, 30000); if (rv != 0) { csio_dbg(hw, "Some I/O aborts timed out, cleaning up..\n"); csio_scsi_cleanup_io_q(scm, &ln->cmpl_q); } CSIO_DB_ASSERT(list_empty(&ln->cmpl_q)); return rv; } static ssize_t csio_show_hw_state(struct device *dev, struct device_attribute *attr, char *buf) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); struct csio_hw *hw = csio_lnode_to_hw(ln); if (csio_is_hw_ready(hw)) return snprintf(buf, PAGE_SIZE, "ready\n"); else return snprintf(buf, PAGE_SIZE, "not ready\n"); } /* Device reset */ static ssize_t csio_device_reset(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); struct csio_hw *hw = csio_lnode_to_hw(ln); if (*buf != '1') return -EINVAL; /* Delete NPIV lnodes */ csio_lnodes_exit(hw, 1); /* Block upper IOs */ csio_lnodes_block_request(hw); spin_lock_irq(&hw->lock); csio_hw_reset(hw); spin_unlock_irq(&hw->lock); /* Unblock upper IOs */ csio_lnodes_unblock_request(hw); return count; } /* disable port */ static ssize_t csio_disable_port(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); struct csio_hw *hw = csio_lnode_to_hw(ln); bool disable; if (*buf == '1' || *buf == '0') disable = (*buf == '1') ? true : false; else return -EINVAL; /* Block upper IOs */ csio_lnodes_block_by_port(hw, ln->portid); spin_lock_irq(&hw->lock); csio_disable_lnodes(hw, ln->portid, disable); spin_unlock_irq(&hw->lock); /* Unblock upper IOs */ csio_lnodes_unblock_by_port(hw, ln->portid); return count; } /* Show debug level */ static ssize_t csio_show_dbg_level(struct device *dev, struct device_attribute *attr, char *buf) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%x\n", ln->params.log_level); } /* Store debug level */ static ssize_t csio_store_dbg_level(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); struct csio_hw *hw = csio_lnode_to_hw(ln); uint32_t dbg_level = 0; if (!isdigit(buf[0])) return -EINVAL; if (sscanf(buf, "%i", &dbg_level)) return -EINVAL; ln->params.log_level = dbg_level; hw->params.log_level = dbg_level; return 0; } static DEVICE_ATTR(hw_state, S_IRUGO, csio_show_hw_state, NULL); static DEVICE_ATTR(device_reset, S_IRUGO | S_IWUSR, NULL, csio_device_reset); static DEVICE_ATTR(disable_port, S_IRUGO | S_IWUSR, NULL, csio_disable_port); static DEVICE_ATTR(dbg_level, S_IRUGO | S_IWUSR, csio_show_dbg_level, csio_store_dbg_level); static struct device_attribute *csio_fcoe_lport_attrs[] = { &dev_attr_hw_state, &dev_attr_device_reset, &dev_attr_disable_port, &dev_attr_dbg_level, NULL, }; static ssize_t csio_show_num_reg_rnodes(struct device *dev, struct device_attribute *attr, char *buf) { struct csio_lnode *ln = shost_priv(class_to_shost(dev)); return snprintf(buf, PAGE_SIZE, "%d\n", ln->num_reg_rnodes); } static DEVICE_ATTR(num_reg_rnodes, S_IRUGO, csio_show_num_reg_rnodes, NULL); static struct device_attribute *csio_fcoe_vport_attrs[] = { &dev_attr_num_reg_rnodes, &dev_attr_dbg_level, NULL, }; static inline uint32_t csio_scsi_copy_to_sgl(struct csio_hw *hw, struct csio_ioreq *req) { struct scsi_cmnd *scmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); struct scatterlist *sg; uint32_t bytes_left; uint32_t bytes_copy; uint32_t buf_off = 0; uint32_t start_off = 0; uint32_t sg_off = 0; void *sg_addr; void *buf_addr; struct csio_dma_buf *dma_buf; bytes_left = scsi_bufflen(scmnd); sg = scsi_sglist(scmnd); dma_buf = (struct csio_dma_buf *)csio_list_next(&req->gen_list); /* Copy data from driver buffer to SGs of SCSI CMD */ while (bytes_left > 0 && sg && dma_buf) { if (buf_off >= dma_buf->len) { buf_off = 0; dma_buf = (struct csio_dma_buf *) csio_list_next(dma_buf); continue; } if (start_off >= sg->length) { start_off -= sg->length; sg = sg_next(sg); continue; } buf_addr = dma_buf->vaddr + buf_off; sg_off = sg->offset + start_off; bytes_copy = min((dma_buf->len - buf_off), sg->length - start_off); bytes_copy = min((uint32_t)(PAGE_SIZE - (sg_off & ~PAGE_MASK)), bytes_copy); sg_addr = kmap_atomic(sg_page(sg) + (sg_off >> PAGE_SHIFT)); if (!sg_addr) { csio_err(hw, "failed to kmap sg:%p of ioreq:%p\n", sg, req); break; } csio_dbg(hw, "copy_to_sgl:sg_addr %p sg_off %d buf %p len %d\n", sg_addr, sg_off, buf_addr, bytes_copy); memcpy(sg_addr + (sg_off & ~PAGE_MASK), buf_addr, bytes_copy); kunmap_atomic(sg_addr); start_off += bytes_copy; buf_off += bytes_copy; bytes_left -= bytes_copy; } if (bytes_left > 0) return DID_ERROR; else return DID_OK; } /* * csio_scsi_err_handler - SCSI error handler. * @hw: HW module. * @req: IO request. * */ static inline void csio_scsi_err_handler(struct csio_hw *hw, struct csio_ioreq *req) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); struct csio_scsim *scm = csio_hw_to_scsim(hw); struct fcp_resp_with_ext *fcp_resp; struct fcp_resp_rsp_info *rsp_info; struct csio_dma_buf *dma_buf; uint8_t flags, scsi_status = 0; uint32_t host_status = DID_OK; uint32_t rsp_len = 0, sns_len = 0; struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); switch (req->wr_status) { case FW_HOSTERROR: if (unlikely(!csio_is_hw_ready(hw))) return; host_status = DID_ERROR; CSIO_INC_STATS(scm, n_hosterror); break; case FW_SCSI_RSP_ERR: dma_buf = &req->dma_buf; fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); flags = fcp_resp->resp.fr_flags; scsi_status = fcp_resp->resp.fr_status; if (flags & FCP_RSP_LEN_VAL) { rsp_len = be32_to_cpu(fcp_resp->ext.fr_rsp_len); if ((rsp_len != 0 && rsp_len != 4 && rsp_len != 8) || (rsp_info->rsp_code != FCP_TMF_CMPL)) { host_status = DID_ERROR; goto out; } } if ((flags & FCP_SNS_LEN_VAL) && fcp_resp->ext.fr_sns_len) { sns_len = be32_to_cpu(fcp_resp->ext.fr_sns_len); if (sns_len > SCSI_SENSE_BUFFERSIZE) sns_len = SCSI_SENSE_BUFFERSIZE; memcpy(cmnd->sense_buffer, &rsp_info->_fr_resvd[0] + rsp_len, sns_len); CSIO_INC_STATS(scm, n_autosense); } scsi_set_resid(cmnd, 0); /* Under run */ if (flags & FCP_RESID_UNDER) { scsi_set_resid(cmnd, be32_to_cpu(fcp_resp->ext.fr_resid)); if (!(flags & FCP_SNS_LEN_VAL) && (scsi_status == SAM_STAT_GOOD) && ((scsi_bufflen(cmnd) - scsi_get_resid(cmnd)) < cmnd->underflow)) host_status = DID_ERROR; } else if (flags & FCP_RESID_OVER) host_status = DID_ERROR; CSIO_INC_STATS(scm, n_rsperror); break; case FW_SCSI_OVER_FLOW_ERR: csio_warn(hw, "Over-flow error,cmnd:0x%x expected len:0x%x" " resid:0x%x\n", cmnd->cmnd[0], scsi_bufflen(cmnd), scsi_get_resid(cmnd)); host_status = DID_ERROR; CSIO_INC_STATS(scm, n_ovflerror); break; case FW_SCSI_UNDER_FLOW_ERR: csio_warn(hw, "Under-flow error,cmnd:0x%x expected" " len:0x%x resid:0x%x lun:0x%x ssn:0x%x\n", cmnd->cmnd[0], scsi_bufflen(cmnd), scsi_get_resid(cmnd), cmnd->device->lun, rn->flowid); host_status = DID_ERROR; CSIO_INC_STATS(scm, n_unflerror); break; case FW_SCSI_ABORT_REQUESTED: case FW_SCSI_ABORTED: case FW_SCSI_CLOSE_REQUESTED: csio_dbg(hw, "Req %p cmd:%p op:%x %s\n", req, cmnd, cmnd->cmnd[0], (req->wr_status == FW_SCSI_CLOSE_REQUESTED) ? "closed" : "aborted"); /* * csio_eh_abort_handler checks this value to * succeed or fail the abort request. */ host_status = DID_REQUEUE; if (req->wr_status == FW_SCSI_CLOSE_REQUESTED) CSIO_INC_STATS(scm, n_closed); else CSIO_INC_STATS(scm, n_aborted); break; case FW_SCSI_ABORT_TIMEDOUT: /* FW timed out the abort itself */ csio_dbg(hw, "FW timed out abort req:%p cmnd:%p status:%x\n", req, cmnd, req->wr_status); host_status = DID_ERROR; CSIO_INC_STATS(scm, n_abrt_timedout); break; case FW_RDEV_NOT_READY: /* * In firmware, a RDEV can get into this state * temporarily, before moving into dissapeared/lost * state. So, the driver should complete the request equivalent * to device-disappeared! */ CSIO_INC_STATS(scm, n_rdev_nr_error); host_status = DID_ERROR; break; case FW_ERR_RDEV_LOST: CSIO_INC_STATS(scm, n_rdev_lost_error); host_status = DID_ERROR; break; case FW_ERR_RDEV_LOGO: CSIO_INC_STATS(scm, n_rdev_logo_error); host_status = DID_ERROR; break; case FW_ERR_RDEV_IMPL_LOGO: host_status = DID_ERROR; break; case FW_ERR_LINK_DOWN: CSIO_INC_STATS(scm, n_link_down_error); host_status = DID_ERROR; break; case FW_FCOE_NO_XCHG: CSIO_INC_STATS(scm, n_no_xchg_error); host_status = DID_ERROR; break; default: csio_err(hw, "Unknown SCSI FW WR status:%d req:%p cmnd:%p\n", req->wr_status, req, cmnd); CSIO_DB_ASSERT(0); CSIO_INC_STATS(scm, n_unknown_error); host_status = DID_ERROR; break; } out: if (req->nsge > 0) scsi_dma_unmap(cmnd); cmnd->result = (((host_status) << 16) | scsi_status); cmnd->scsi_done(cmnd); /* Wake up waiting threads */ csio_scsi_cmnd(req) = NULL; complete_all(&req->cmplobj); } /* * csio_scsi_cbfn - SCSI callback function. * @hw: HW module. * @req: IO request. * */ static void csio_scsi_cbfn(struct csio_hw *hw, struct csio_ioreq *req) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); uint8_t scsi_status = SAM_STAT_GOOD; uint32_t host_status = DID_OK; if (likely(req->wr_status == FW_SUCCESS)) { if (req->nsge > 0) { scsi_dma_unmap(cmnd); if (req->dcopy) host_status = csio_scsi_copy_to_sgl(hw, req); } cmnd->result = (((host_status) << 16) | scsi_status); cmnd->scsi_done(cmnd); csio_scsi_cmnd(req) = NULL; CSIO_INC_STATS(csio_hw_to_scsim(hw), n_tot_success); } else { /* Error handling */ csio_scsi_err_handler(hw, req); } } /** * csio_queuecommand - Entry point to kickstart an I/O request. * @host: The scsi_host pointer. * @cmnd: The I/O request from ML. * * This routine does the following: * - Checks for HW and Rnode module readiness. * - Gets a free ioreq structure (which is already initialized * to uninit during its allocation). * - Maps SG elements. * - Initializes ioreq members. * - Kicks off the SCSI state machine for this IO. * - Returns busy status on error. */ static int csio_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmnd) { struct csio_lnode *ln = shost_priv(host); struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_scsim *scsim = csio_hw_to_scsim(hw); struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); struct csio_ioreq *ioreq = NULL; unsigned long flags; int nsge = 0; int rv = SCSI_MLQUEUE_HOST_BUSY, nr; int retval; int cpu; struct csio_scsi_qset *sqset; struct fc_rport *rport = starget_to_rport(scsi_target(cmnd->device)); if (!blk_rq_cpu_valid(cmnd->request)) cpu = smp_processor_id(); else cpu = cmnd->request->cpu; sqset = &hw->sqset[ln->portid][cpu]; nr = fc_remote_port_chkready(rport); if (nr) { cmnd->result = nr; CSIO_INC_STATS(scsim, n_rn_nr_error); goto err_done; } if (unlikely(!csio_is_hw_ready(hw))) { cmnd->result = (DID_REQUEUE << 16); CSIO_INC_STATS(scsim, n_hw_nr_error); goto err_done; } /* Get req->nsge, if there are SG elements to be mapped */ nsge = scsi_dma_map(cmnd); if (unlikely(nsge < 0)) { CSIO_INC_STATS(scsim, n_dmamap_error); goto err; } /* Do we support so many mappings? */ if (unlikely(nsge > scsim->max_sge)) { csio_warn(hw, "More SGEs than can be supported." " SGEs: %d, Max SGEs: %d\n", nsge, scsim->max_sge); CSIO_INC_STATS(scsim, n_unsupp_sge_error); goto err_dma_unmap; } /* Get a free ioreq structure - SM is already set to uninit */ ioreq = csio_get_scsi_ioreq_lock(hw, scsim); if (!ioreq) { csio_err(hw, "Out of I/O request elements. Active #:%d\n", scsim->stats.n_active); CSIO_INC_STATS(scsim, n_no_req_error); goto err_dma_unmap; } ioreq->nsge = nsge; ioreq->lnode = ln; ioreq->rnode = rn; ioreq->iq_idx = sqset->iq_idx; ioreq->eq_idx = sqset->eq_idx; ioreq->wr_status = 0; ioreq->drv_status = 0; csio_scsi_cmnd(ioreq) = (void *)cmnd; ioreq->tmo = 0; ioreq->datadir = cmnd->sc_data_direction; if (cmnd->sc_data_direction == DMA_TO_DEVICE) { CSIO_INC_STATS(ln, n_output_requests); ln->stats.n_output_bytes += scsi_bufflen(cmnd); } else if (cmnd->sc_data_direction == DMA_FROM_DEVICE) { CSIO_INC_STATS(ln, n_input_requests); ln->stats.n_input_bytes += scsi_bufflen(cmnd); } else CSIO_INC_STATS(ln, n_control_requests); /* Set cbfn */ ioreq->io_cbfn = csio_scsi_cbfn; /* Needed during abort */ cmnd->host_scribble = (unsigned char *)ioreq; cmnd->SCp.Message = 0; /* Kick off SCSI IO SM on the ioreq */ spin_lock_irqsave(&hw->lock, flags); retval = csio_scsi_start_io(ioreq); spin_unlock_irqrestore(&hw->lock, flags); if (retval != 0) { csio_err(hw, "ioreq: %p couldnt be started, status:%d\n", ioreq, retval); CSIO_INC_STATS(scsim, n_busy_error); goto err_put_req; } return 0; err_put_req: csio_put_scsi_ioreq_lock(hw, scsim, ioreq); err_dma_unmap: if (nsge > 0) scsi_dma_unmap(cmnd); err: return rv; err_done: cmnd->scsi_done(cmnd); return 0; } static int csio_do_abrt_cls(struct csio_hw *hw, struct csio_ioreq *ioreq, bool abort) { int rv; int cpu = smp_processor_id(); struct csio_lnode *ln = ioreq->lnode; struct csio_scsi_qset *sqset = &hw->sqset[ln->portid][cpu]; ioreq->tmo = CSIO_SCSI_ABRT_TMO_MS; /* * Use current processor queue for posting the abort/close, but retain * the ingress queue ID of the original I/O being aborted/closed - we * need the abort/close completion to be received on the same queue * as the original I/O. */ ioreq->eq_idx = sqset->eq_idx; if (abort == SCSI_ABORT) rv = csio_scsi_abort(ioreq); else rv = csio_scsi_close(ioreq); return rv; } static int csio_eh_abort_handler(struct scsi_cmnd *cmnd) { struct csio_ioreq *ioreq; struct csio_lnode *ln = shost_priv(cmnd->device->host); struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_scsim *scsim = csio_hw_to_scsim(hw); int ready = 0, ret; unsigned long tmo = 0; int rv; struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); ret = fc_block_scsi_eh(cmnd); if (ret) return ret; ioreq = (struct csio_ioreq *)cmnd->host_scribble; if (!ioreq) return SUCCESS; if (!rn) return FAILED; csio_dbg(hw, "Request to abort ioreq:%p cmd:%p cdb:%08llx" " ssni:0x%x lun:%d iq:0x%x\n", ioreq, cmnd, *((uint64_t *)cmnd->cmnd), rn->flowid, cmnd->device->lun, csio_q_physiqid(hw, ioreq->iq_idx)); if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) != cmnd) { CSIO_INC_STATS(scsim, n_abrt_race_comp); return SUCCESS; } ready = csio_is_lnode_ready(ln); tmo = CSIO_SCSI_ABRT_TMO_MS; spin_lock_irq(&hw->lock); rv = csio_do_abrt_cls(hw, ioreq, (ready ? SCSI_ABORT : SCSI_CLOSE)); spin_unlock_irq(&hw->lock); if (rv != 0) { if (rv == -EINVAL) { /* Return success, if abort/close request issued on * already completed IO */ return SUCCESS; } if (ready) CSIO_INC_STATS(scsim, n_abrt_busy_error); else CSIO_INC_STATS(scsim, n_cls_busy_error); goto inval_scmnd; } /* Wait for completion */ init_completion(&ioreq->cmplobj); wait_for_completion_timeout(&ioreq->cmplobj, msecs_to_jiffies(tmo)); /* FW didnt respond to abort within our timeout */ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { csio_err(hw, "Abort timed out -- req: %p\n", ioreq); CSIO_INC_STATS(scsim, n_abrt_timedout); inval_scmnd: if (ioreq->nsge > 0) scsi_dma_unmap(cmnd); spin_lock_irq(&hw->lock); csio_scsi_cmnd(ioreq) = NULL; spin_unlock_irq(&hw->lock); cmnd->result = (DID_ERROR << 16); cmnd->scsi_done(cmnd); return FAILED; } /* FW successfully aborted the request */ if (host_byte(cmnd->result) == DID_REQUEUE) { csio_info(hw, "Aborted SCSI command to (%d:%d) serial#:0x%lx\n", cmnd->device->id, cmnd->device->lun, cmnd->serial_number); return SUCCESS; } else { csio_info(hw, "Failed to abort SCSI command, (%d:%d) serial#:0x%lx\n", cmnd->device->id, cmnd->device->lun, cmnd->serial_number); return FAILED; } } /* * csio_tm_cbfn - TM callback function. * @hw: HW module. * @req: IO request. * * Cache the result in 'cmnd', since ioreq will be freed soon * after we return from here, and the waiting thread shouldnt trust * the ioreq contents. */ static void csio_tm_cbfn(struct csio_hw *hw, struct csio_ioreq *req) { struct scsi_cmnd *cmnd = (struct scsi_cmnd *)csio_scsi_cmnd(req); struct csio_dma_buf *dma_buf; uint8_t flags = 0; struct fcp_resp_with_ext *fcp_resp; struct fcp_resp_rsp_info *rsp_info; csio_dbg(hw, "req: %p in csio_tm_cbfn status: %d\n", req, req->wr_status); /* Cache FW return status */ cmnd->SCp.Status = req->wr_status; /* Special handling based on FCP response */ /* * FW returns us this error, if flags were set. FCP4 says * FCP_RSP_LEN_VAL in flags shall be set for TM completions. * So if a target were to set this bit, we expect that the * rsp_code is set to FCP_TMF_CMPL for a successful TM * completion. Any other rsp_code means TM operation failed. * If a target were to just ignore setting flags, we treat * the TM operation as success, and FW returns FW_SUCCESS. */ if (req->wr_status == FW_SCSI_RSP_ERR) { dma_buf = &req->dma_buf; fcp_resp = (struct fcp_resp_with_ext *)dma_buf->vaddr; rsp_info = (struct fcp_resp_rsp_info *)(fcp_resp + 1); flags = fcp_resp->resp.fr_flags; /* Modify return status if flags indicate success */ if (flags & FCP_RSP_LEN_VAL) if (rsp_info->rsp_code == FCP_TMF_CMPL) cmnd->SCp.Status = FW_SUCCESS; csio_dbg(hw, "TM FCP rsp code: %d\n", rsp_info->rsp_code); } /* Wake up the TM handler thread */ csio_scsi_cmnd(req) = NULL; } static int csio_eh_lun_reset_handler(struct scsi_cmnd *cmnd) { struct csio_lnode *ln = shost_priv(cmnd->device->host); struct csio_hw *hw = csio_lnode_to_hw(ln); struct csio_scsim *scsim = csio_hw_to_scsim(hw); struct csio_rnode *rn = (struct csio_rnode *)(cmnd->device->hostdata); struct csio_ioreq *ioreq = NULL; struct csio_scsi_qset *sqset; unsigned long flags; int retval; int count, ret; LIST_HEAD(local_q); struct csio_scsi_level_data sld; if (!rn) goto fail; csio_dbg(hw, "Request to reset LUN:%d (ssni:0x%x tgtid:%d)\n", cmnd->device->lun, rn->flowid, rn->scsi_id); if (!csio_is_lnode_ready(ln)) { csio_err(hw, "LUN reset cannot be issued on non-ready" " local node vnpi:0x%x (LUN:%d)\n", ln->vnp_flowid, cmnd->device->lun); goto fail; } /* Lnode is ready, now wait on rport node readiness */ ret = fc_block_scsi_eh(cmnd); if (ret) return ret; /* * If we have blocked in the previous call, at this point, either the * remote node has come back online, or device loss timer has fired * and the remote node is destroyed. Allow the LUN reset only for * the former case, since LUN reset is a TMF I/O on the wire, and we * need a valid session to issue it. */ if (fc_remote_port_chkready(rn->rport)) { csio_err(hw, "LUN reset cannot be issued on non-ready" " remote node ssni:0x%x (LUN:%d)\n", rn->flowid, cmnd->device->lun); goto fail; } /* Get a free ioreq structure - SM is already set to uninit */ ioreq = csio_get_scsi_ioreq_lock(hw, scsim); if (!ioreq) { csio_err(hw, "Out of IO request elements. Active # :%d\n", scsim->stats.n_active); goto fail; } sqset = &hw->sqset[ln->portid][smp_processor_id()]; ioreq->nsge = 0; ioreq->lnode = ln; ioreq->rnode = rn; ioreq->iq_idx = sqset->iq_idx; ioreq->eq_idx = sqset->eq_idx; csio_scsi_cmnd(ioreq) = cmnd; cmnd->host_scribble = (unsigned char *)ioreq; cmnd->SCp.Status = 0; cmnd->SCp.Message = FCP_TMF_LUN_RESET; ioreq->tmo = CSIO_SCSI_LUNRST_TMO_MS / 1000; /* * FW times the LUN reset for ioreq->tmo, so we got to wait a little * longer (10s for now) than that to allow FW to return the timed * out command. */ count = DIV_ROUND_UP((ioreq->tmo + 10) * 1000, CSIO_SCSI_TM_POLL_MS); /* Set cbfn */ ioreq->io_cbfn = csio_tm_cbfn; /* Save of the ioreq info for later use */ sld.level = CSIO_LEV_LUN; sld.lnode = ioreq->lnode; sld.rnode = ioreq->rnode; sld.oslun = (uint64_t)cmnd->device->lun; spin_lock_irqsave(&hw->lock, flags); /* Kick off TM SM on the ioreq */ retval = csio_scsi_start_tm(ioreq); spin_unlock_irqrestore(&hw->lock, flags); if (retval != 0) { csio_err(hw, "Failed to issue LUN reset, req:%p, status:%d\n", ioreq, retval); goto fail_ret_ioreq; } csio_dbg(hw, "Waiting max %d secs for LUN reset completion\n", count * (CSIO_SCSI_TM_POLL_MS / 1000)); /* Wait for completion */ while ((((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) && count--) msleep(CSIO_SCSI_TM_POLL_MS); /* LUN reset timed-out */ if (((struct scsi_cmnd *)csio_scsi_cmnd(ioreq)) == cmnd) { csio_err(hw, "LUN reset (%d:%d) timed out\n", cmnd->device->id, cmnd->device->lun); spin_lock_irq(&hw->lock); csio_scsi_drvcleanup(ioreq); list_del_init(&ioreq->sm.sm_list); spin_unlock_irq(&hw->lock); goto fail_ret_ioreq; } /* LUN reset returned, check cached status */ if (cmnd->SCp.Status != FW_SUCCESS) { csio_err(hw, "LUN reset failed (%d:%d), status: %d\n", cmnd->device->id, cmnd->device->lun, cmnd->SCp.Status); goto fail; } /* LUN reset succeeded, Start aborting affected I/Os */ /* * Since the host guarantees during LUN reset that there * will not be any more I/Os to that LUN, until the LUN reset * completes, we gather pending I/Os after the LUN reset. */ spin_lock_irq(&hw->lock); csio_scsi_gather_active_ios(scsim, &sld, &local_q); retval = csio_scsi_abort_io_q(scsim, &local_q, 30000); spin_unlock_irq(&hw->lock); /* Aborts may have timed out */ if (retval != 0) { csio_err(hw, "Attempt to abort I/Os during LUN reset of %d" " returned %d\n", cmnd->device->lun, retval); /* Return I/Os back to active_q */ spin_lock_irq(&hw->lock); list_splice_tail_init(&local_q, &scsim->active_q); spin_unlock_irq(&hw->lock); goto fail; } CSIO_INC_STATS(rn, n_lun_rst); csio_info(hw, "LUN reset occurred (%d:%d)\n", cmnd->device->id, cmnd->device->lun); return SUCCESS; fail_ret_ioreq: csio_put_scsi_ioreq_lock(hw, scsim, ioreq); fail: CSIO_INC_STATS(rn, n_lun_rst_fail); return FAILED; } static int csio_slave_alloc(struct scsi_device *sdev) { struct fc_rport *rport = starget_to_rport(scsi_target(sdev)); if (!rport || fc_remote_port_chkready(rport)) return -ENXIO; sdev->hostdata = *((struct csio_lnode **)(rport->dd_data)); return 0; } static int csio_slave_configure(struct scsi_device *sdev) { if (sdev->tagged_supported) scsi_activate_tcq(sdev, csio_lun_qdepth); else scsi_deactivate_tcq(sdev, csio_lun_qdepth); return 0; } static void csio_slave_destroy(struct scsi_device *sdev) { sdev->hostdata = NULL; } static int csio_scan_finished(struct Scsi_Host *shost, unsigned long time) { struct csio_lnode *ln = shost_priv(shost); int rv = 1; spin_lock_irq(shost->host_lock); if (!ln->hwp || csio_list_deleted(&ln->sm.sm_list)) goto out; rv = csio_scan_done(ln, jiffies, time, csio_max_scan_tmo * HZ, csio_delta_scan_tmo * HZ); out: spin_unlock_irq(shost->host_lock); return rv; } struct scsi_host_template csio_fcoe_shost_template = { .module = THIS_MODULE, .name = CSIO_DRV_DESC, .proc_name = KBUILD_MODNAME, .queuecommand = csio_queuecommand, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, .slave_alloc = csio_slave_alloc, .slave_configure = csio_slave_configure, .slave_destroy = csio_slave_destroy, .scan_finished = csio_scan_finished, .this_id = -1, .sg_tablesize = CSIO_SCSI_MAX_SGE, .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = csio_fcoe_lport_attrs, .max_sectors = CSIO_MAX_SECTOR_SIZE, }; struct scsi_host_template csio_fcoe_shost_vport_template = { .module = THIS_MODULE, .name = CSIO_DRV_DESC, .proc_name = KBUILD_MODNAME, .queuecommand = csio_queuecommand, .eh_abort_handler = csio_eh_abort_handler, .eh_device_reset_handler = csio_eh_lun_reset_handler, .slave_alloc = csio_slave_alloc, .slave_configure = csio_slave_configure, .slave_destroy = csio_slave_destroy, .scan_finished = csio_scan_finished, .this_id = -1, .sg_tablesize = CSIO_SCSI_MAX_SGE, .cmd_per_lun = CSIO_MAX_CMD_PER_LUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = csio_fcoe_vport_attrs, .max_sectors = CSIO_MAX_SECTOR_SIZE, }; /* * csio_scsi_alloc_ddp_bufs - Allocate buffers for DDP of unaligned SGLs. * @scm: SCSI Module * @hw: HW device. * @buf_size: buffer size * @num_buf : Number of buffers. * * This routine allocates DMA buffers required for SCSI Data xfer, if * each SGL buffer for a SCSI Read request posted by SCSI midlayer are * not virtually contiguous. */ static int csio_scsi_alloc_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw, int buf_size, int num_buf) { int n = 0; struct list_head *tmp; struct csio_dma_buf *ddp_desc = NULL; uint32_t unit_size = 0; if (!num_buf) return 0; if (!buf_size) return -EINVAL; INIT_LIST_HEAD(&scm->ddp_freelist); /* Align buf size to page size */ buf_size = (buf_size + PAGE_SIZE - 1) & PAGE_MASK; /* Initialize dma descriptors */ for (n = 0; n < num_buf; n++) { /* Set unit size to request size */ unit_size = buf_size; ddp_desc = kzalloc(sizeof(struct csio_dma_buf), GFP_KERNEL); if (!ddp_desc) { csio_err(hw, "Failed to allocate ddp descriptors," " Num allocated = %d.\n", scm->stats.n_free_ddp); goto no_mem; } /* Allocate Dma buffers for DDP */ ddp_desc->vaddr = pci_alloc_consistent(hw->pdev, unit_size, &ddp_desc->paddr); if (!ddp_desc->vaddr) { csio_err(hw, "SCSI response DMA buffer (ddp) allocation" " failed!\n"); kfree(ddp_desc); goto no_mem; } ddp_desc->len = unit_size; /* Added it to scsi ddp freelist */ list_add_tail(&ddp_desc->list, &scm->ddp_freelist); CSIO_INC_STATS(scm, n_free_ddp); } return 0; no_mem: /* release dma descs back to freelist and free dma memory */ list_for_each(tmp, &scm->ddp_freelist) { ddp_desc = (struct csio_dma_buf *) tmp; tmp = csio_list_prev(tmp); pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, ddp_desc->paddr); list_del_init(&ddp_desc->list); kfree(ddp_desc); } scm->stats.n_free_ddp = 0; return -ENOMEM; } /* * csio_scsi_free_ddp_bufs - free DDP buffers of unaligned SGLs. * @scm: SCSI Module * @hw: HW device. * * This routine frees ddp buffers. */ static void csio_scsi_free_ddp_bufs(struct csio_scsim *scm, struct csio_hw *hw) { struct list_head *tmp; struct csio_dma_buf *ddp_desc; /* release dma descs back to freelist and free dma memory */ list_for_each(tmp, &scm->ddp_freelist) { ddp_desc = (struct csio_dma_buf *) tmp; tmp = csio_list_prev(tmp); pci_free_consistent(hw->pdev, ddp_desc->len, ddp_desc->vaddr, ddp_desc->paddr); list_del_init(&ddp_desc->list); kfree(ddp_desc); } scm->stats.n_free_ddp = 0; } /** * csio_scsim_init - Initialize SCSI Module * @scm: SCSI Module * @hw: HW module * */ int csio_scsim_init(struct csio_scsim *scm, struct csio_hw *hw) { int i; struct csio_ioreq *ioreq; struct csio_dma_buf *dma_buf; INIT_LIST_HEAD(&scm->active_q); scm->hw = hw; scm->proto_cmd_len = sizeof(struct fcp_cmnd); scm->proto_rsp_len = CSIO_SCSI_RSP_LEN; scm->max_sge = CSIO_SCSI_MAX_SGE; spin_lock_init(&scm->freelist_lock); /* Pre-allocate ioreqs and initialize them */ INIT_LIST_HEAD(&scm->ioreq_freelist); for (i = 0; i < csio_scsi_ioreqs; i++) { ioreq = kzalloc(sizeof(struct csio_ioreq), GFP_KERNEL); if (!ioreq) { csio_err(hw, "I/O request element allocation failed, " " Num allocated = %d.\n", scm->stats.n_free_ioreq); goto free_ioreq; } /* Allocate Dma buffers for Response Payload */ dma_buf = &ioreq->dma_buf; dma_buf->vaddr = pci_pool_alloc(hw->scsi_pci_pool, GFP_KERNEL, &dma_buf->paddr); if (!dma_buf->vaddr) { csio_err(hw, "SCSI response DMA buffer allocation" " failed!\n"); kfree(ioreq); goto free_ioreq; } dma_buf->len = scm->proto_rsp_len; /* Set state to uninit */ csio_init_state(&ioreq->sm, csio_scsis_uninit); INIT_LIST_HEAD(&ioreq->gen_list); init_completion(&ioreq->cmplobj); list_add_tail(&ioreq->sm.sm_list, &scm->ioreq_freelist); CSIO_INC_STATS(scm, n_free_ioreq); } if (csio_scsi_alloc_ddp_bufs(scm, hw, PAGE_SIZE, csio_ddp_descs)) goto free_ioreq; return 0; free_ioreq: /* * Free up existing allocations, since an error * from here means we are returning for good */ while (!list_empty(&scm->ioreq_freelist)) { struct csio_sm *tmp; tmp = list_first_entry(&scm->ioreq_freelist, struct csio_sm, sm_list); list_del_init(&tmp->sm_list); ioreq = (struct csio_ioreq *)tmp; dma_buf = &ioreq->dma_buf; pci_pool_free(hw->scsi_pci_pool, dma_buf->vaddr, dma_buf->paddr); kfree(ioreq); } scm->stats.n_free_ioreq = 0; return -ENOMEM; } /** * csio_scsim_exit: Uninitialize SCSI Module * @scm: SCSI Module * */ void csio_scsim_exit(struct csio_scsim *scm) { struct csio_ioreq *ioreq; struct csio_dma_buf *dma_buf; while (!list_empty(&scm->ioreq_freelist)) { struct csio_sm *tmp; tmp = list_first_entry(&scm->ioreq_freelist, struct csio_sm, sm_list); list_del_init(&tmp->sm_list); ioreq = (struct csio_ioreq *)tmp; dma_buf = &ioreq->dma_buf; pci_pool_free(scm->hw->scsi_pci_pool, dma_buf->vaddr, dma_buf->paddr); kfree(ioreq); } scm->stats.n_free_ioreq = 0; csio_scsi_free_ddp_bufs(scm, scm->hw); }
gpl-2.0
championswimmer/kernel_sony_msm8960t
mm/util.c
3529
8341
#include <linux/mm.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/export.h> #include <linux/err.h> #include <linux/sched.h> #include <asm/uaccess.h> #include "internal.h" #define CREATE_TRACE_POINTS #include <trace/events/kmem.h> /** * kstrdup - allocate space for and copy an existing string * @s: the string to duplicate * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ char *kstrdup(const char *s, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strlen(s) + 1; buf = kmalloc_track_caller(len, gfp); if (buf) memcpy(buf, s, len); return buf; } EXPORT_SYMBOL(kstrdup); /** * kstrndup - allocate space for and copy an existing string * @s: the string to duplicate * @max: read at most @max chars from @s * @gfp: the GFP mask used in the kmalloc() call when allocating memory */ char *kstrndup(const char *s, size_t max, gfp_t gfp) { size_t len; char *buf; if (!s) return NULL; len = strnlen(s, max); buf = kmalloc_track_caller(len+1, gfp); if (buf) { memcpy(buf, s, len); buf[len] = '\0'; } return buf; } EXPORT_SYMBOL(kstrndup); /** * kmemdup - duplicate region of memory * * @src: memory region to duplicate * @len: memory region length * @gfp: GFP mask to use */ void *kmemdup(const void *src, size_t len, gfp_t gfp) { void *p; p = kmalloc_track_caller(len, gfp); if (p) memcpy(p, src, len); return p; } EXPORT_SYMBOL(kmemdup); /** * memdup_user - duplicate memory region from user space * * @src: source address in user space * @len: number of bytes to copy * * Returns an ERR_PTR() on failure. */ void *memdup_user(const void __user *src, size_t len) { void *p; /* * Always use GFP_KERNEL, since copy_from_user() can sleep and * cause pagefault, which makes it pointless to use GFP_NOFS * or GFP_ATOMIC. */ p = kmalloc_track_caller(len, GFP_KERNEL); if (!p) return ERR_PTR(-ENOMEM); if (copy_from_user(p, src, len)) { kfree(p); return ERR_PTR(-EFAULT); } return p; } EXPORT_SYMBOL(memdup_user); /** * __krealloc - like krealloc() but don't free @p. * @p: object to reallocate memory for. * @new_size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * This function is like krealloc() except it never frees the originally * allocated buffer. Use this if you don't want to free the buffer immediately * like, for example, with RCU. */ void *__krealloc(const void *p, size_t new_size, gfp_t flags) { void *ret; size_t ks = 0; if (unlikely(!new_size)) return ZERO_SIZE_PTR; if (p) ks = ksize(p); if (ks >= new_size) return (void *)p; ret = kmalloc_track_caller(new_size, flags); if (ret && p) memcpy(ret, p, ks); return ret; } EXPORT_SYMBOL(__krealloc); /** * krealloc - reallocate memory. The contents will remain unchanged. * @p: object to reallocate memory for. * @new_size: how many bytes of memory are required. * @flags: the type of memory to allocate. * * The contents of the object pointed to are preserved up to the * lesser of the new and old sizes. If @p is %NULL, krealloc() * behaves exactly like kmalloc(). If @size is 0 and @p is not a * %NULL pointer, the object pointed to is freed. */ void *krealloc(const void *p, size_t new_size, gfp_t flags) { void *ret; if (unlikely(!new_size)) { kfree(p); return ZERO_SIZE_PTR; } ret = __krealloc(p, new_size, flags); if (ret && p != ret) kfree(p); return ret; } EXPORT_SYMBOL(krealloc); /** * kzfree - like kfree but zero memory * @p: object to free memory of * * The memory of the object @p points to is zeroed before freed. * If @p is %NULL, kzfree() does nothing. * * Note: this function zeroes the whole allocated buffer which can be a good * deal bigger than the requested buffer size passed to kmalloc(). So be * careful when using this function in performance sensitive code. */ void kzfree(const void *p) { size_t ks; void *mem = (void *)p; if (unlikely(ZERO_OR_NULL_PTR(mem))) return; ks = ksize(mem); memset(mem, 0, ks); kfree(mem); } EXPORT_SYMBOL(kzfree); /* * strndup_user - duplicate an existing string from user space * @s: The string to duplicate * @n: Maximum number of bytes to copy, including the trailing NUL. */ char *strndup_user(const char __user *s, long n) { char *p; long length; length = strnlen_user(s, n); if (!length) return ERR_PTR(-EFAULT); if (length > n) return ERR_PTR(-EINVAL); p = memdup_user(s, length); if (IS_ERR(p)) return p; p[length - 1] = '\0'; return p; } EXPORT_SYMBOL(strndup_user); void __vma_link_list(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, struct rb_node *rb_parent) { struct vm_area_struct *next; vma->vm_prev = prev; if (prev) { next = prev->vm_next; prev->vm_next = vma; } else { mm->mmap = vma; if (rb_parent) next = rb_entry(rb_parent, struct vm_area_struct, vm_rb); else next = NULL; } vma->vm_next = next; if (next) next->vm_prev = vma; } /* Check if the vma is being used as a stack by this task */ static int vm_is_stack_for_task(struct task_struct *t, struct vm_area_struct *vma) { return (vma->vm_start <= KSTK_ESP(t) && vma->vm_end >= KSTK_ESP(t)); } /* * Check if the vma is being used as a stack. * If is_group is non-zero, check in the entire thread group or else * just check in the current task. Returns the pid of the task that * the vma is stack for. */ pid_t vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group) { pid_t ret = 0; if (vm_is_stack_for_task(task, vma)) return task->pid; if (in_group) { struct task_struct *t; rcu_read_lock(); if (!pid_alive(task)) goto done; t = task; do { if (vm_is_stack_for_task(t, vma)) { ret = t->pid; goto done; } } while_each_thread(task, t); done: rcu_read_unlock(); } return ret; } #if defined(CONFIG_MMU) && !defined(HAVE_ARCH_PICK_MMAP_LAYOUT) void arch_pick_mmap_layout(struct mm_struct *mm) { mm->mmap_base = TASK_UNMAPPED_BASE; mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } #endif /* * Like get_user_pages_fast() except its IRQ-safe in that it won't fall * back to the regular GUP. * If the architecture not support this function, simply return with no * page pinned */ int __attribute__((weak)) __get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { return 0; } EXPORT_SYMBOL_GPL(__get_user_pages_fast); /** * get_user_pages_fast() - pin user pages in memory * @start: starting user address * @nr_pages: number of pages from start to pin * @write: whether pages will be written to * @pages: array that receives pointers to the pages pinned. * Should be at least nr_pages long. * * Returns number of pages pinned. This may be fewer than the number * requested. If nr_pages is 0 or negative, returns 0. If no pages * were pinned, returns -errno. * * get_user_pages_fast provides equivalent functionality to get_user_pages, * operating on current and current->mm, with force=0 and vma=NULL. However * unlike get_user_pages, it must be called without mmap_sem held. * * get_user_pages_fast may take mmap_sem and page table locks, so no * assumptions can be made about lack of locking. get_user_pages_fast is to be * implemented in a way that is advantageous (vs get_user_pages()) when the * user memory area is already faulted in and present in ptes. However if the * pages have to be faulted in, it may turn out to be slightly slower so * callers need to carefully consider what to use. On many architectures, * get_user_pages_fast simply falls back to get_user_pages. */ int __attribute__((weak)) get_user_pages_fast(unsigned long start, int nr_pages, int write, struct page **pages) { struct mm_struct *mm = current->mm; int ret; down_read(&mm->mmap_sem); ret = get_user_pages(current, mm, start, nr_pages, write, 0, pages, NULL); up_read(&mm->mmap_sem); return ret; } EXPORT_SYMBOL_GPL(get_user_pages_fast); /* Tracepoints definitions. */ EXPORT_TRACEPOINT_SYMBOL(kmalloc); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc); EXPORT_TRACEPOINT_SYMBOL(kmalloc_node); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node); EXPORT_TRACEPOINT_SYMBOL(kfree); EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
gpl-2.0
robcore/machinex_kernelv2
arch/mips/alchemy/devboards/pb1500.c
4553
6058
/* * Pb1500 board support. * * Copyright (C) 2009 Manuel Lauss * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-db1x00/bcsr.h> #include <prom.h> #include "platform.h" const char *get_system_type(void) { return "PB1500"; } void __init board_setup(void) { u32 pin_func; u32 sys_freqctrl, sys_clksrc; bcsr_init(DB1000_BCSR_PHYS_ADDR, DB1000_BCSR_PHYS_ADDR + DB1000_BCSR_HEXLED_OFS); sys_clksrc = sys_freqctrl = pin_func = 0; /* Set AUX clock to 12 MHz * 8 = 96 MHz */ au_writel(8, SYS_AUXPLL); alchemy_gpio1_input_enable(); udelay(100); /* GPIO201 is input for PCMCIA card detect */ /* GPIO203 is input for PCMCIA interrupt request */ alchemy_gpio_direction_input(201); alchemy_gpio_direction_input(203); #if defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) /* Zero and disable FREQ2 */ sys_freqctrl = au_readl(SYS_FREQCTRL0); sys_freqctrl &= ~0xFFF00000; au_writel(sys_freqctrl, SYS_FREQCTRL0); /* zero and disable USBH/USBD clocks */ sys_clksrc = au_readl(SYS_CLKSRC); sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK | SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK); au_writel(sys_clksrc, SYS_CLKSRC); sys_freqctrl = au_readl(SYS_FREQCTRL0); sys_freqctrl &= ~0xFFF00000; sys_clksrc = au_readl(SYS_CLKSRC); sys_clksrc &= ~(SYS_CS_CUD | SYS_CS_DUD | SYS_CS_MUD_MASK | SYS_CS_CUH | SYS_CS_DUH | SYS_CS_MUH_MASK); /* FREQ2 = aux/2 = 48 MHz */ sys_freqctrl |= (0 << SYS_FC_FRDIV2_BIT) | SYS_FC_FE2 | SYS_FC_FS2; au_writel(sys_freqctrl, SYS_FREQCTRL0); /* * Route 48MHz FREQ2 into USB Host and/or Device */ sys_clksrc |= SYS_CS_MUX_FQ2 << SYS_CS_MUH_BIT; au_writel(sys_clksrc, SYS_CLKSRC); pin_func = au_readl(SYS_PINFUNC) & ~SYS_PF_USB; /* 2nd USB port is USB host */ pin_func |= SYS_PF_USB; au_writel(pin_func, SYS_PINFUNC); #endif /* defined(CONFIG_USB_OHCI_HCD) || defined(CONFIG_USB_OHCI_HCD_MODULE) */ #ifdef CONFIG_PCI { void __iomem *base = (void __iomem *)KSEG1ADDR(AU1500_PCI_PHYS_ADDR); /* Setup PCI bus controller */ __raw_writel(0x00003fff, base + PCI_REG_CMEM); __raw_writel(0xf0000000, base + PCI_REG_MWMASK_DEV); __raw_writel(0, base + PCI_REG_MWBASE_REV_CCL); __raw_writel(0x02a00356, base + PCI_REG_STATCMD); __raw_writel(0x00003c04, base + PCI_REG_PARAM); __raw_writel(0x00000008, base + PCI_REG_MBAR); wmb(); } #endif /* Enable sys bus clock divider when IDLE state or no bus activity. */ au_writel(au_readl(SYS_POWERCTRL) | (0x3 << 5), SYS_POWERCTRL); /* Enable the RTC if not already enabled */ if (!(au_readl(0xac000028) & 0x20)) { printk(KERN_INFO "enabling clock ...\n"); au_writel((au_readl(0xac000028) | 0x20), 0xac000028); } /* Put the clock in BCD mode */ if (au_readl(0xac00002c) & 0x4) { /* reg B */ au_writel(au_readl(0xac00002c) & ~0x4, 0xac00002c); au_sync(); } } /******************************************************************************/ static int pb1500_map_pci_irq(const struct pci_dev *d, u8 slot, u8 pin) { if ((slot < 12) || (slot > 13) || pin == 0) return -1; if (slot == 12) return (pin == 1) ? AU1500_PCI_INTA : 0xff; if (slot == 13) { switch (pin) { case 1: return AU1500_PCI_INTA; case 2: return AU1500_PCI_INTB; case 3: return AU1500_PCI_INTC; case 4: return AU1500_PCI_INTD; } } return -1; } static struct resource alchemy_pci_host_res[] = { [0] = { .start = AU1500_PCI_PHYS_ADDR, .end = AU1500_PCI_PHYS_ADDR + 0xfff, .flags = IORESOURCE_MEM, }, }; static struct alchemy_pci_platdata pb1500_pci_pd = { .board_map_irq = pb1500_map_pci_irq, .pci_cfg_set = PCI_CONFIG_AEN | PCI_CONFIG_R2H | PCI_CONFIG_R1H | PCI_CONFIG_CH | #if defined(__MIPSEB__) PCI_CONFIG_SIC_HWA_DAT | PCI_CONFIG_SM, #else 0, #endif }; static struct platform_device pb1500_pci_host = { .dev.platform_data = &pb1500_pci_pd, .name = "alchemy-pci", .id = 0, .num_resources = ARRAY_SIZE(alchemy_pci_host_res), .resource = alchemy_pci_host_res, }; static int __init pb1500_dev_init(void) { int swapped; irq_set_irq_type(AU1500_GPIO9_INT, IRQF_TRIGGER_LOW); /* CD0# */ irq_set_irq_type(AU1500_GPIO10_INT, IRQF_TRIGGER_LOW); /* CARD0 */ irq_set_irq_type(AU1500_GPIO11_INT, IRQF_TRIGGER_LOW); /* STSCHG0# */ irq_set_irq_type(AU1500_GPIO204_INT, IRQF_TRIGGER_HIGH); irq_set_irq_type(AU1500_GPIO201_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO202_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO203_INT, IRQF_TRIGGER_LOW); irq_set_irq_type(AU1500_GPIO205_INT, IRQF_TRIGGER_LOW); /* PCMCIA. single socket, identical to Pb1100 */ db1x_register_pcmcia_socket( AU1000_PCMCIA_ATTR_PHYS_ADDR, AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_MEM_PHYS_ADDR, AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1, AU1000_PCMCIA_IO_PHYS_ADDR, AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1, AU1500_GPIO11_INT, AU1500_GPIO9_INT, /* card / insert */ /*AU1500_GPIO10_INT*/0, 0, 0); /* stschg / eject / id */ swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1000_SWAPBOOT; db1x_register_norflash(64 * 1024 * 1024, 4, swapped); platform_device_register(&pb1500_pci_host); return 0; } arch_initcall(pb1500_dev_init);
gpl-2.0
Radium-Devices/Radium_taoshan
drivers/scsi/isci/remote_node_context.c
4809
20717
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "host.h" #include "isci.h" #include "remote_device.h" #include "remote_node_context.h" #include "scu_event_codes.h" #include "scu_task_context.h" #undef C #define C(a) (#a) const char *rnc_state_name(enum scis_sds_remote_node_context_states state) { static const char * const strings[] = RNC_STATES; return strings[state]; } #undef C /** * * @sci_rnc: The state of the remote node context object to check. * * This method will return true if the remote node context is in a READY state * otherwise it will return false bool true if the remote node context is in * the ready state. false if the remote node context is not in the ready state. */ bool sci_remote_node_context_is_ready( struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; if (current_state == SCI_RNC_READY) { return true; } return false; } static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) { if (id < ihost->remote_node_entries && ihost->device_table[id]) return &ihost->remote_node_context_table[id]; return NULL; } static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; struct isci_host *ihost; __le64 sas_addr; ihost = idev->owning_port->owning_controller; rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; /* sas address is __be64, context ram format is __le64 */ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); rnc->ssp.nexus_loss_timer_enable = true; rnc->ssp.check_bit = false; rnc->ssp.is_valid = false; rnc->ssp.is_remote_node_context = true; rnc->ssp.function_number = 0; rnc->ssp.arbitration_wait_time = 0; if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; /* Open Address Frame Parameters */ rnc->ssp.oaf_connection_rate = idev->connection_rate; rnc->ssp.oaf_features = 0; rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; } /** * * @sci_rnc: * @callback: * @callback_parameter: * * This method will setup the remote node context object so it will transition * to its ready state. If the remote node context is already setup to * transition to its final state then this function does nothing. none */ static void sci_remote_node_context_setup_to_resume( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) { sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY; sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; } } static void sci_remote_node_context_setup_to_destory( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { sci_rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL; sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; } /** * * * This method just calls the user callback function and then resets the * callback. */ static void sci_remote_node_context_notify_user( struct sci_remote_node_context *rnc) { if (rnc->user_callback != NULL) { (*rnc->user_callback)(rnc->user_cookie); rnc->user_callback = NULL; rnc->user_cookie = NULL; } } static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { if (rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); } static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (!idev->is_direct_attached && (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))) { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); if (idev->is_direct_attached) sci_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } } static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = false; sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); } static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; sci_remote_node_context_notify_user(rnc); } } static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); sci_remote_node_context_validate_context_buffer(sci_rnc); } static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); sci_remote_node_context_invalidate_context_buffer(rnc); } static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; idev = rnc_to_dev(rnc); dev = idev->domain_dev; /* * For direct attached SATA devices we need to clear the TLCR * NCQ to TCi tag mapping on the phy and in cases where we * resume because of a target reset we also need to update * the STPTLDARNI register with the RNi of the device */ if ((dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) && idev->is_direct_attached) sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; if (rnc->user_callback) sci_remote_node_context_notify_user(rnc); } static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); sci_remote_node_context_continue_state_transitions(rnc); } static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); sci_remote_node_context_continue_state_transitions(rnc); } static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { .enter_state = sci_remote_node_context_initial_state_enter, }, [SCI_RNC_POSTING] = { .enter_state = sci_remote_node_context_posting_state_enter, }, [SCI_RNC_INVALIDATING] = { .enter_state = sci_remote_node_context_invalidating_state_enter, }, [SCI_RNC_RESUMING] = { .enter_state = sci_remote_node_context_resuming_state_enter, }, [SCI_RNC_READY] = { .enter_state = sci_remote_node_context_ready_state_enter, }, [SCI_RNC_TX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_suspended_state_enter, }, [SCI_RNC_TX_RX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, [SCI_RNC_AWAIT_SUSPENSION] = { }, }; void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index) { memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; rnc->destination_state = SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_UNSPECIFIED; sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_POSTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_POST_RNC_COMPLETE: sci_change_state(&sci_rnc->sm, SCI_RNC_READY); break; default: goto out; } break; case SCI_RNC_INVALIDATING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { if (sci_rnc->destination_state == SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_FINAL) state = SCI_RNC_INITIAL; else state = SCI_RNC_POSTING; sci_change_state(&sci_rnc->sm, state); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being invalidated anyway */ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being " "invalidated.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_RESUMING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { sci_change_state(&sci_rnc->sm, SCI_RNC_READY); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being resumed anyway */ dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being resumed.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_READY: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; default: goto out; } break; case SCI_RNC_AWAIT_SUSPENSION: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspension_code = scu_get_event_specifier(event_code); break; default: goto out; } break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; out: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: code: %#x state: %d\n", __func__, event_code, state); return SCI_FAILURE; } enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_destory(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; case SCI_RNC_INITIAL: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); /* We have decided that the destruct request on the remote node context * can not fail since it is either in the initial/destroyed state or is * can be destroyed. */ return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_suspend(struct sci_remote_node_context *sci_rnc, u32 suspend_type, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; if (state != SCI_RNC_READY) { dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } sci_rnc->user_callback = cb_fn; sci_rnc->user_cookie = cb_p; sci_rnc->suspension_code = suspend_type; if (suspend_type == SCI_SOFTWARE_SUSPENSION) { sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_SUSPEND_TX); } sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); return SCI_SUCCESS; } enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_INVALIDATING: case SCI_RNC_RESUMING: if (sci_rnc->destination_state != SCIC_SDS_REMOTE_NODE_DESTINATION_STATE_READY) return SCI_FAILURE_INVALID_STATE; sci_rnc->user_callback = cb_fn; sci_rnc->user_cookie = cb_p; return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); /* TODO: consider adding a resume action of NONE, INVALIDATE, WRITE_TLCR */ if (dev->dev_type == SAS_END_DEV || dev_is_expander(dev)) sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) { if (idev->is_direct_attached) { /* @todo Fix this since I am being silly in writing to the STPTLDARNI register. */ sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } else { sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); } } else return SCI_FAILURE; return SCI_SUCCESS; } case SCI_RNC_TX_RX_SUSPENDED: sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); return SCI_FAILURE_INVALID_STATE; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_READY: return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; default: break; } dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: requested to start IO while still resuming, %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } enum sci_status sci_remote_node_context_start_task(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_AWAIT_SUSPENSION: return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: sci_remote_node_context_resume(sci_rnc, NULL, NULL); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return SCI_FAILURE_INVALID_STATE; } }
gpl-2.0
CyanogenMod/android_kernel_sony_msm8660
drivers/media/dvb/frontends/dib9000.c
4809
72872
/* * Linux-DVB Driver for DiBcom's DiB9000 and demodulator-family. * * Copyright (C) 2005-10 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_math.h" #include "dvb_frontend.h" #include "dib9000.h" #include "dibx000_common.h" static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB9000: "); printk(args); printk("\n"); } } while (0) #define MAX_NUMBER_OF_FRONTENDS 6 struct i2c_device { struct i2c_adapter *i2c_adap; u8 i2c_addr; u8 *i2c_read_buffer; u8 *i2c_write_buffer; }; /* lock */ #define DIB_LOCK struct mutex #define DibAcquireLock(lock) mutex_lock_interruptible(lock) #define DibReleaseLock(lock) mutex_unlock(lock) #define DibInitLock(lock) mutex_init(lock) #define DibFreeLock(lock) struct dib9000_pid_ctrl { #define DIB9000_PID_FILTER_CTRL 0 #define DIB9000_PID_FILTER 1 u8 cmd; u8 id; u16 pid; u8 onoff; }; struct dib9000_state { struct i2c_device i2c; struct dibx000_i2c_master i2c_master; struct i2c_adapter tuner_adap; struct i2c_adapter component_bus; u16 revision; u8 reg_offs; enum frontend_tune_state tune_state; u32 status; struct dvb_frontend_parametersContext channel_status; u8 fe_id; #define DIB9000_GPIO_DEFAULT_DIRECTIONS 0xffff u16 gpio_dir; #define DIB9000_GPIO_DEFAULT_VALUES 0x0000 u16 gpio_val; #define DIB9000_GPIO_DEFAULT_PWM_POS 0xffff u16 gpio_pwm_pos; union { /* common for all chips */ struct { u8 mobile_mode:1; } host; struct { struct dib9000_fe_memory_map { u16 addr; u16 size; } fe_mm[18]; u8 memcmd; DIB_LOCK mbx_if_lock; /* to protect read/write operations */ DIB_LOCK mbx_lock; /* to protect the whole mailbox handling */ DIB_LOCK mem_lock; /* to protect the memory accesses */ DIB_LOCK mem_mbx_lock; /* to protect the memory-based mailbox */ #define MBX_MAX_WORDS (256 - 200 - 2) #define DIB9000_MSG_CACHE_SIZE 2 u16 message_cache[DIB9000_MSG_CACHE_SIZE][MBX_MAX_WORDS]; u8 fw_is_running; } risc; } platform; union { /* common for all platforms */ struct { struct dib9000_config cfg; } d9; } chip; struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS]; u16 component_bus_speed; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[255]; u8 i2c_read_buffer[255]; DIB_LOCK demod_lock; u8 get_frontend_internal; struct dib9000_pid_ctrl pid_ctrl[10]; s8 pid_ctrl_index; /* -1: empty list; -2: do not use the list */ }; static const u32 fe_info[44] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; enum dib9000_power_mode { DIB9000_POWER_ALL = 0, DIB9000_POWER_NO, DIB9000_POWER_INTERF_ANALOG_AGC, DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD, DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD, DIB9000_POWER_INTERFACE_ONLY, }; enum dib9000_out_messages { OUT_MSG_HBM_ACK, OUT_MSG_HOST_BUF_FAIL, OUT_MSG_REQ_VERSION, OUT_MSG_BRIDGE_I2C_W, OUT_MSG_BRIDGE_I2C_R, OUT_MSG_BRIDGE_APB_W, OUT_MSG_BRIDGE_APB_R, OUT_MSG_SCAN_CHANNEL, OUT_MSG_MONIT_DEMOD, OUT_MSG_CONF_GPIO, OUT_MSG_DEBUG_HELP, OUT_MSG_SUBBAND_SEL, OUT_MSG_ENABLE_TIME_SLICE, OUT_MSG_FE_FW_DL, OUT_MSG_FE_CHANNEL_SEARCH, OUT_MSG_FE_CHANNEL_TUNE, OUT_MSG_FE_SLEEP, OUT_MSG_FE_SYNC, OUT_MSG_CTL_MONIT, OUT_MSG_CONF_SVC, OUT_MSG_SET_HBM, OUT_MSG_INIT_DEMOD, OUT_MSG_ENABLE_DIVERSITY, OUT_MSG_SET_OUTPUT_MODE, OUT_MSG_SET_PRIORITARY_CHANNEL, OUT_MSG_ACK_FRG, OUT_MSG_INIT_PMU, }; enum dib9000_in_messages { IN_MSG_DATA, IN_MSG_FRAME_INFO, IN_MSG_CTL_MONIT, IN_MSG_ACK_FREE_ITEM, IN_MSG_DEBUG_BUF, IN_MSG_MPE_MONITOR, IN_MSG_RAWTS_MONITOR, IN_MSG_END_BRIDGE_I2C_RW, IN_MSG_END_BRIDGE_APB_RW, IN_MSG_VERSION, IN_MSG_END_OF_SCAN, IN_MSG_MONIT_DEMOD, IN_MSG_ERROR, IN_MSG_FE_FW_DL_DONE, IN_MSG_EVENT, IN_MSG_ACK_CHANGE_SVC, IN_MSG_HBM_PROF, }; /* memory_access requests */ #define FE_MM_W_CHANNEL 0 #define FE_MM_W_FE_INFO 1 #define FE_MM_RW_SYNC 2 #define FE_SYNC_CHANNEL 1 #define FE_SYNC_W_GENERIC_MONIT 2 #define FE_SYNC_COMPONENT_ACCESS 3 #define FE_MM_R_CHANNEL_SEARCH_STATE 3 #define FE_MM_R_CHANNEL_UNION_CONTEXT 4 #define FE_MM_R_FE_INFO 5 #define FE_MM_R_FE_MONITOR 6 #define FE_MM_W_CHANNEL_HEAD 7 #define FE_MM_W_CHANNEL_UNION 8 #define FE_MM_W_CHANNEL_CONTEXT 9 #define FE_MM_R_CHANNEL_UNION 10 #define FE_MM_R_CHANNEL_CONTEXT 11 #define FE_MM_R_CHANNEL_TUNE_STATE 12 #define FE_MM_R_GENERIC_MONITORING_SIZE 13 #define FE_MM_W_GENERIC_MONITORING 14 #define FE_MM_R_GENERIC_MONITORING 15 #define FE_MM_W_COMPONENT_ACCESS 16 #define FE_MM_RW_COMPONENT_ACCESS_BUFFER 17 static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len); static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len); static u16 to_fw_output_mode(u16 mode) { switch (mode) { case OUTMODE_HIGH_Z: return 0; case OUTMODE_MPEG2_PAR_GATED_CLK: return 4; case OUTMODE_MPEG2_PAR_CONT_CLK: return 8; case OUTMODE_MPEG2_SERIAL: return 16; case OUTMODE_DIVERSITY: return 128; case OUTMODE_MPEG2_FIFO: return 2; case OUTMODE_ANALOG_ADC: return 1; default: return 0; } } static u16 dib9000_read16_attr(struct dib9000_state *state, u16 reg, u8 * b, u32 len, u16 attribute) { u32 chunk_size = 126; u32 l; int ret; if (state->platform.risc.fw_is_running && (reg < 1024)) return dib9000_risc_apb_access_read(state, reg, attribute, NULL, 0, b, len); memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c.i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 2; state->msg[1].addr = state->i2c.i2c_addr >> 1; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = b; state->msg[1].len = len; state->i2c_write_buffer[0] = reg >> 8; state->i2c_write_buffer[1] = reg & 0xff; if (attribute & DATA_BUS_ACCESS_MODE_8BIT) state->i2c_write_buffer[0] |= (1 << 5); if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) state->i2c_write_buffer[0] |= (1 << 4); do { l = len < chunk_size ? len : chunk_size; state->msg[1].len = l; state->msg[1].buf = b; ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 2) != 2 ? -EREMOTEIO : 0; if (ret != 0) { dprintk("i2c read error on %d", reg); return -EREMOTEIO; } b += l; len -= l; if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)) reg += l / 2; } while ((ret == 0) && len); return 0; } static u16 dib9000_i2c_read16(struct i2c_device *i2c, u16 reg) { struct i2c_msg msg[2] = { {.addr = i2c->i2c_addr >> 1, .flags = 0, .buf = i2c->i2c_write_buffer, .len = 2}, {.addr = i2c->i2c_addr >> 1, .flags = I2C_M_RD, .buf = i2c->i2c_read_buffer, .len = 2}, }; i2c->i2c_write_buffer[0] = reg >> 8; i2c->i2c_write_buffer[1] = reg & 0xff; if (i2c_transfer(i2c->i2c_adap, msg, 2) != 2) { dprintk("read register %x error", reg); return 0; } return (i2c->i2c_read_buffer[0] << 8) | i2c->i2c_read_buffer[1]; } static inline u16 dib9000_read_word(struct dib9000_state *state, u16 reg) { if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, 0) != 0) return 0; return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; } static inline u16 dib9000_read_word_attr(struct dib9000_state *state, u16 reg, u16 attribute) { if (dib9000_read16_attr(state, reg, state->i2c_read_buffer, 2, attribute) != 0) return 0; return (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; } #define dib9000_read16_noinc_attr(state, reg, b, len, attribute) dib9000_read16_attr(state, reg, b, len, (attribute) | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) static u16 dib9000_write16_attr(struct dib9000_state *state, u16 reg, const u8 * buf, u32 len, u16 attribute) { u32 chunk_size = 126; u32 l; int ret; if (state->platform.risc.fw_is_running && (reg < 1024)) { if (dib9000_risc_apb_access_write (state, reg, DATA_BUS_ACCESS_MODE_16BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | attribute, buf, len) != 0) return -EINVAL; return 0; } memset(&state->msg[0], 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c.i2c_addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = len + 2; state->i2c_write_buffer[0] = (reg >> 8) & 0xff; state->i2c_write_buffer[1] = (reg) & 0xff; if (attribute & DATA_BUS_ACCESS_MODE_8BIT) state->i2c_write_buffer[0] |= (1 << 5); if (attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) state->i2c_write_buffer[0] |= (1 << 4); do { l = len < chunk_size ? len : chunk_size; state->msg[0].len = l + 2; memcpy(&state->i2c_write_buffer[2], buf, l); ret = i2c_transfer(state->i2c.i2c_adap, state->msg, 1) != 1 ? -EREMOTEIO : 0; buf += l; len -= l; if (!(attribute & DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT)) reg += l / 2; } while ((ret == 0) && len); return ret; } static int dib9000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val) { struct i2c_msg msg = { .addr = i2c->i2c_addr >> 1, .flags = 0, .buf = i2c->i2c_write_buffer, .len = 4 }; i2c->i2c_write_buffer[0] = (reg >> 8) & 0xff; i2c->i2c_write_buffer[1] = reg & 0xff; i2c->i2c_write_buffer[2] = (val >> 8) & 0xff; i2c->i2c_write_buffer[3] = val & 0xff; return i2c_transfer(i2c->i2c_adap, &msg, 1) != 1 ? -EREMOTEIO : 0; } static inline int dib9000_write_word(struct dib9000_state *state, u16 reg, u16 val) { u8 b[2] = { val >> 8, val & 0xff }; return dib9000_write16_attr(state, reg, b, 2, 0); } static inline int dib9000_write_word_attr(struct dib9000_state *state, u16 reg, u16 val, u16 attribute) { u8 b[2] = { val >> 8, val & 0xff }; return dib9000_write16_attr(state, reg, b, 2, attribute); } #define dib9000_write(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, 0) #define dib9000_write16_noinc(state, reg, buf, len) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) #define dib9000_write16_noinc_attr(state, reg, buf, len, attribute) dib9000_write16_attr(state, reg, buf, len, DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT | (attribute)) #define dib9000_mbx_send(state, id, data, len) dib9000_mbx_send_attr(state, id, data, len, 0) #define dib9000_mbx_get_message(state, id, msg, len) dib9000_mbx_get_message_attr(state, id, msg, len, 0) #define MAC_IRQ (1 << 1) #define IRQ_POL_MSK (1 << 4) #define dib9000_risc_mem_read_chunks(state, b, len) dib9000_read16_attr(state, 1063, b, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) #define dib9000_risc_mem_write_chunks(state, buf, len) dib9000_write16_attr(state, 1063, buf, len, DATA_BUS_ACCESS_MODE_8BIT | DATA_BUS_ACCESS_MODE_NO_ADDRESS_INCREMENT) static void dib9000_risc_mem_setup_cmd(struct dib9000_state *state, u32 addr, u32 len, u8 reading) { u8 b[14] = { 0 }; /* dprintk("%d memcmd: %d %d %d\n", state->fe_id, addr, addr+len, len); */ /* b[0] = 0 << 7; */ b[1] = 1; /* b[2] = 0; */ /* b[3] = 0; */ b[4] = (u8) (addr >> 8); b[5] = (u8) (addr & 0xff); /* b[10] = 0; */ /* b[11] = 0; */ b[12] = (u8) (addr >> 8); b[13] = (u8) (addr & 0xff); addr += len; /* b[6] = 0; */ /* b[7] = 0; */ b[8] = (u8) (addr >> 8); b[9] = (u8) (addr & 0xff); dib9000_write(state, 1056, b, 14); if (reading) dib9000_write_word(state, 1056, (1 << 15) | 1); state->platform.risc.memcmd = -1; /* if it was called directly reset it - to force a future setup-call to set it */ } static void dib9000_risc_mem_setup(struct dib9000_state *state, u8 cmd) { struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd & 0x7f]; /* decide whether we need to "refresh" the memory controller */ if (state->platform.risc.memcmd == cmd && /* same command */ !(cmd & 0x80 && m->size < 67)) /* and we do not want to read something with less than 67 bytes looping - working around a bug in the memory controller */ return; dib9000_risc_mem_setup_cmd(state, m->addr, m->size, cmd & 0x80); state->platform.risc.memcmd = cmd; } static int dib9000_risc_mem_read(struct dib9000_state *state, u8 cmd, u8 * b, u16 len) { if (!state->platform.risc.fw_is_running) return -EIO; if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } dib9000_risc_mem_setup(state, cmd | 0x80); dib9000_risc_mem_read_chunks(state, b, len); DibReleaseLock(&state->platform.risc.mem_lock); return 0; } static int dib9000_risc_mem_write(struct dib9000_state *state, u8 cmd, const u8 * b) { struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[cmd]; if (!state->platform.risc.fw_is_running) return -EIO; if (DibAcquireLock(&state->platform.risc.mem_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } dib9000_risc_mem_setup(state, cmd); dib9000_risc_mem_write_chunks(state, b, m->size); DibReleaseLock(&state->platform.risc.mem_lock); return 0; } static int dib9000_firmware_download(struct dib9000_state *state, u8 risc_id, u16 key, const u8 * code, u32 len) { u16 offs; if (risc_id == 1) offs = 16; else offs = 0; /* config crtl reg */ dib9000_write_word(state, 1024 + offs, 0x000f); dib9000_write_word(state, 1025 + offs, 0); dib9000_write_word(state, 1031 + offs, key); dprintk("going to download %dB of microcode", len); if (dib9000_write16_noinc(state, 1026 + offs, (u8 *) code, (u16) len) != 0) { dprintk("error while downloading microcode for RISC %c", 'A' + risc_id); return -EIO; } dprintk("Microcode for RISC %c loaded", 'A' + risc_id); return 0; } static int dib9000_mbx_host_init(struct dib9000_state *state, u8 risc_id) { u16 mbox_offs; u16 reset_reg; u16 tries = 1000; if (risc_id == 1) mbox_offs = 16; else mbox_offs = 0; /* Reset mailbox */ dib9000_write_word(state, 1027 + mbox_offs, 0x8000); /* Read reset status */ do { reset_reg = dib9000_read_word(state, 1027 + mbox_offs); msleep(100); } while ((reset_reg & 0x8000) && --tries); if (reset_reg & 0x8000) { dprintk("MBX: init ERROR, no response from RISC %c", 'A' + risc_id); return -EIO; } dprintk("MBX: initialized"); return 0; } #define MAX_MAILBOX_TRY 100 static int dib9000_mbx_send_attr(struct dib9000_state *state, u8 id, u16 * data, u8 len, u16 attr) { u8 *d, b[2]; u16 tmp; u16 size; u32 i; int ret = 0; if (!state->platform.risc.fw_is_running) return -EINVAL; if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } tmp = MAX_MAILBOX_TRY; do { size = dib9000_read_word_attr(state, 1043, attr) & 0xff; if ((size + len + 1) > MBX_MAX_WORDS && --tmp) { dprintk("MBX: RISC mbx full, retrying"); msleep(100); } else break; } while (1); /*dprintk( "MBX: size: %d", size); */ if (tmp == 0) { ret = -EINVAL; goto out; } #ifdef DUMP_MSG dprintk("--> %02x %d ", id, len + 1); for (i = 0; i < len; i++) dprintk("%04x ", data[i]); dprintk("\n"); #endif /* byte-order conversion - works on big (where it is not necessary) or little endian */ d = (u8 *) data; for (i = 0; i < len; i++) { tmp = data[i]; *d++ = tmp >> 8; *d++ = tmp & 0xff; } /* write msg */ b[0] = id; b[1] = len + 1; if (dib9000_write16_noinc_attr(state, 1045, b, 2, attr) != 0 || dib9000_write16_noinc_attr(state, 1045, (u8 *) data, len * 2, attr) != 0) { ret = -EIO; goto out; } /* update register nb_mes_in_RX */ ret = (u8) dib9000_write_word_attr(state, 1043, 1 << 14, attr); out: DibReleaseLock(&state->platform.risc.mbx_if_lock); return ret; } static u8 dib9000_mbx_read(struct dib9000_state *state, u16 * data, u8 risc_id, u16 attr) { #ifdef DUMP_MSG u16 *d = data; #endif u16 tmp, i; u8 size; u8 mc_base; if (!state->platform.risc.fw_is_running) return 0; if (DibAcquireLock(&state->platform.risc.mbx_if_lock) < 0) { dprintk("could not get the lock"); return 0; } if (risc_id == 1) mc_base = 16; else mc_base = 0; /* Length and type in the first word */ *data = dib9000_read_word_attr(state, 1029 + mc_base, attr); size = *data & 0xff; if (size <= MBX_MAX_WORDS) { data++; size--; /* Initial word already read */ dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, size * 2, attr); /* to word conversion */ for (i = 0; i < size; i++) { tmp = *data; *data = (tmp >> 8) | (tmp << 8); data++; } #ifdef DUMP_MSG dprintk("<-- "); for (i = 0; i < size + 1; i++) dprintk("%04x ", d[i]); dprintk("\n"); #endif } else { dprintk("MBX: message is too big for message cache (%d), flushing message", size); size--; /* Initial word already read */ while (size--) dib9000_read16_noinc_attr(state, 1029 + mc_base, (u8 *) data, 2, attr); } /* Update register nb_mes_in_TX */ dib9000_write_word_attr(state, 1028 + mc_base, 1 << 14, attr); DibReleaseLock(&state->platform.risc.mbx_if_lock); return size + 1; } static int dib9000_risc_debug_buf(struct dib9000_state *state, u16 * data, u8 size) { u32 ts = data[1] << 16 | data[0]; char *b = (char *)&data[2]; b[2 * (size - 2) - 1] = '\0'; /* Bullet proof the buffer */ if (*b == '~') { b++; dprintk(b); } else dprintk("RISC%d: %d.%04d %s", state->fe_id, ts / 10000, ts % 10000, *b ? b : "<emtpy>"); return 1; } static int dib9000_mbx_fetch_to_cache(struct dib9000_state *state, u16 attr) { int i; u8 size; u16 *block; /* find a free slot */ for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) { block = state->platform.risc.message_cache[i]; if (*block == 0) { size = dib9000_mbx_read(state, block, 1, attr); /* dprintk( "MBX: fetched %04x message to cache", *block); */ switch (*block >> 8) { case IN_MSG_DEBUG_BUF: dib9000_risc_debug_buf(state, block + 1, size); /* debug-messages are going to be printed right away */ *block = 0; /* free the block */ break; #if 0 case IN_MSG_DATA: /* FE-TRACE */ dib9000_risc_data_process(state, block + 1, size); *block = 0; break; #endif default: break; } return 1; } } dprintk("MBX: no free cache-slot found for new message..."); return -1; } static u8 dib9000_mbx_count(struct dib9000_state *state, u8 risc_id, u16 attr) { if (risc_id == 0) return (u8) (dib9000_read_word_attr(state, 1028, attr) >> 10) & 0x1f; /* 5 bit field */ else return (u8) (dib9000_read_word_attr(state, 1044, attr) >> 8) & 0x7f; /* 7 bit field */ } static int dib9000_mbx_process(struct dib9000_state *state, u16 attr) { int ret = 0; u16 tmp; if (!state->platform.risc.fw_is_running) return -1; if (DibAcquireLock(&state->platform.risc.mbx_lock) < 0) { dprintk("could not get the lock"); return -1; } if (dib9000_mbx_count(state, 1, attr)) /* 1=RiscB */ ret = dib9000_mbx_fetch_to_cache(state, attr); tmp = dib9000_read_word_attr(state, 1229, attr); /* Clear the IRQ */ /* if (tmp) */ /* dprintk( "cleared IRQ: %x", tmp); */ DibReleaseLock(&state->platform.risc.mbx_lock); return ret; } static int dib9000_mbx_get_message_attr(struct dib9000_state *state, u16 id, u16 * msg, u8 * size, u16 attr) { u8 i; u16 *block; u16 timeout = 30; *msg = 0; do { /* dib9000_mbx_get_from_cache(); */ for (i = 0; i < DIB9000_MSG_CACHE_SIZE; i++) { block = state->platform.risc.message_cache[i]; if ((*block >> 8) == id) { *size = (*block & 0xff) - 1; memcpy(msg, block + 1, (*size) * 2); *block = 0; /* free the block */ i = 0; /* signal that we found a message */ break; } } if (i == 0) break; if (dib9000_mbx_process(state, attr) == -1) /* try to fetch one message - if any */ return -1; } while (--timeout); if (timeout == 0) { dprintk("waiting for message %d timed out", id); return -1; } return i == 0; } static int dib9000_risc_check_version(struct dib9000_state *state) { u8 r[4]; u8 size; u16 fw_version = 0; if (dib9000_mbx_send(state, OUT_MSG_REQ_VERSION, &fw_version, 1) != 0) return -EIO; if (dib9000_mbx_get_message(state, IN_MSG_VERSION, (u16 *) r, &size) < 0) return -EIO; fw_version = (r[0] << 8) | r[1]; dprintk("RISC: ver: %d.%02d (IC: %d)", fw_version >> 10, fw_version & 0x3ff, (r[2] << 8) | r[3]); if ((fw_version >> 10) != 7) return -EINVAL; switch (fw_version & 0x3ff) { case 11: case 12: case 14: case 15: case 16: case 17: break; default: dprintk("RISC: invalid firmware version"); return -EINVAL; } dprintk("RISC: valid firmware version"); return 0; } static int dib9000_fw_boot(struct dib9000_state *state, const u8 * codeA, u32 lenA, const u8 * codeB, u32 lenB) { /* Reconfig pool mac ram */ dib9000_write_word(state, 1225, 0x02); /* A: 8k C, 4 k D - B: 32k C 6 k D - IRAM 96k */ dib9000_write_word(state, 1226, 0x05); /* Toggles IP crypto to Host APB interface. */ dib9000_write_word(state, 1542, 1); /* Set jump and no jump in the dma box */ dib9000_write_word(state, 1074, 0); dib9000_write_word(state, 1075, 0); /* Set MAC as APB Master. */ dib9000_write_word(state, 1237, 0); /* Reset the RISCs */ if (codeA != NULL) dib9000_write_word(state, 1024, 2); else dib9000_write_word(state, 1024, 15); if (codeB != NULL) dib9000_write_word(state, 1040, 2); if (codeA != NULL) dib9000_firmware_download(state, 0, 0x1234, codeA, lenA); if (codeB != NULL) dib9000_firmware_download(state, 1, 0x1234, codeB, lenB); /* Run the RISCs */ if (codeA != NULL) dib9000_write_word(state, 1024, 0); if (codeB != NULL) dib9000_write_word(state, 1040, 0); if (codeA != NULL) if (dib9000_mbx_host_init(state, 0) != 0) return -EIO; if (codeB != NULL) if (dib9000_mbx_host_init(state, 1) != 0) return -EIO; msleep(100); state->platform.risc.fw_is_running = 1; if (dib9000_risc_check_version(state) != 0) return -EINVAL; state->platform.risc.memcmd = 0xff; return 0; } static u16 dib9000_identify(struct i2c_device *client) { u16 value; value = dib9000_i2c_read16(client, 896); if (value != 0x01b3) { dprintk("wrong Vendor ID (0x%x)", value); return 0; } value = dib9000_i2c_read16(client, 897); if (value != 0x4000 && value != 0x4001 && value != 0x4002 && value != 0x4003 && value != 0x4004 && value != 0x4005) { dprintk("wrong Device ID (0x%x)", value); return 0; } /* protect this driver to be used with 7000PC */ if (value == 0x4000 && dib9000_i2c_read16(client, 769) == 0x4000) { dprintk("this driver does not work with DiB7000PC"); return 0; } switch (value) { case 0x4000: dprintk("found DiB7000MA/PA/MB/PB"); break; case 0x4001: dprintk("found DiB7000HC"); break; case 0x4002: dprintk("found DiB7000MC"); break; case 0x4003: dprintk("found DiB9000A"); break; case 0x4004: dprintk("found DiB9000H"); break; case 0x4005: dprintk("found DiB9000M"); break; } return value; } static void dib9000_set_power_mode(struct dib9000_state *state, enum dib9000_power_mode mode) { /* by default everything is going to be powered off */ u16 reg_903 = 0x3fff, reg_904 = 0xffff, reg_905 = 0xffff, reg_906; u8 offset; if (state->revision == 0x4003 || state->revision == 0x4004 || state->revision == 0x4005) offset = 1; else offset = 0; reg_906 = dib9000_read_word(state, 906 + offset) | 0x3; /* keep settings for RISC */ /* now, depending on the requested mode, we power on */ switch (mode) { /* power up everything in the demod */ case DIB9000_POWER_ALL: reg_903 = 0x0000; reg_904 = 0x0000; reg_905 = 0x0000; reg_906 = 0x0000; break; /* just leave power on the control-interfaces: GPIO and (I2C or SDIO or SRAM) */ case DIB9000_POWER_INTERFACE_ONLY: /* TODO power up either SDIO or I2C or SRAM */ reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 2)); break; case DIB9000_POWER_INTERF_ANALOG_AGC: reg_903 &= ~((1 << 15) | (1 << 14) | (1 << 11) | (1 << 10)); reg_905 &= ~((1 << 7) | (1 << 6) | (1 << 5) | (1 << 4) | (1 << 2)); reg_906 &= ~((1 << 0)); break; case DIB9000_POWER_COR4_DINTLV_ICIRM_EQUAL_CFROD: reg_903 = 0x0000; reg_904 = 0x801f; reg_905 = 0x0000; reg_906 &= ~((1 << 0)); break; case DIB9000_POWER_COR4_CRY_ESRAM_MOUT_NUD: reg_903 = 0x0000; reg_904 = 0x8000; reg_905 = 0x010b; reg_906 &= ~((1 << 0)); break; default: case DIB9000_POWER_NO: break; } /* always power down unused parts */ if (!state->platform.host.mobile_mode) reg_904 |= (1 << 7) | (1 << 6) | (1 << 4) | (1 << 2) | (1 << 1); /* P_sdio_select_clk = 0 on MC and after */ if (state->revision != 0x4000) reg_906 <<= 1; dib9000_write_word(state, 903 + offset, reg_903); dib9000_write_word(state, 904 + offset, reg_904); dib9000_write_word(state, 905 + offset, reg_905); dib9000_write_word(state, 906 + offset, reg_906); } static int dib9000_fw_reset(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; dib9000_write_word(state, 1817, 0x0003); dib9000_write_word(state, 1227, 1); dib9000_write_word(state, 1227, 0); switch ((state->revision = dib9000_identify(&state->i2c))) { case 0x4003: case 0x4004: case 0x4005: state->reg_offs = 1; break; default: return -EINVAL; } /* reset the i2c-master to use the host interface */ dibx000_reset_i2c_master(&state->i2c_master); dib9000_set_power_mode(state, DIB9000_POWER_ALL); /* unforce divstr regardless whether i2c enumeration was done or not */ dib9000_write_word(state, 1794, dib9000_read_word(state, 1794) & ~(1 << 1)); dib9000_write_word(state, 1796, 0); dib9000_write_word(state, 1805, 0x805); /* restart all parts */ dib9000_write_word(state, 898, 0xffff); dib9000_write_word(state, 899, 0xffff); dib9000_write_word(state, 900, 0x0001); dib9000_write_word(state, 901, 0xff19); dib9000_write_word(state, 902, 0x003c); dib9000_write_word(state, 898, 0); dib9000_write_word(state, 899, 0); dib9000_write_word(state, 900, 0); dib9000_write_word(state, 901, 0); dib9000_write_word(state, 902, 0); dib9000_write_word(state, 911, state->chip.d9.cfg.if_drives); dib9000_set_power_mode(state, DIB9000_POWER_INTERFACE_ONLY); return 0; } static int dib9000_risc_apb_access_read(struct dib9000_state *state, u32 address, u16 attribute, const u8 * tx, u32 txlen, u8 * b, u32 len) { u16 mb[10]; u8 i, s; if (address >= 1024 || !state->platform.risc.fw_is_running) return -EINVAL; /* dprintk( "APB access thru rd fw %d %x", address, attribute); */ mb[0] = (u16) address; mb[1] = len / 2; dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_R, mb, 2, attribute); switch (dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute)) { case 1: s--; for (i = 0; i < s; i++) { b[i * 2] = (mb[i + 1] >> 8) & 0xff; b[i * 2 + 1] = (mb[i + 1]) & 0xff; } return 0; default: return -EIO; } return -EIO; } static int dib9000_risc_apb_access_write(struct dib9000_state *state, u32 address, u16 attribute, const u8 * b, u32 len) { u16 mb[10]; u8 s, i; if (address >= 1024 || !state->platform.risc.fw_is_running) return -EINVAL; /* dprintk( "APB access thru wr fw %d %x", address, attribute); */ mb[0] = (unsigned short)address; for (i = 0; i < len && i < 20; i += 2) mb[1 + (i / 2)] = (b[i] << 8 | b[i + 1]); dib9000_mbx_send_attr(state, OUT_MSG_BRIDGE_APB_W, mb, 1 + len / 2, attribute); return dib9000_mbx_get_message_attr(state, IN_MSG_END_BRIDGE_APB_RW, mb, &s, attribute) == 1 ? 0 : -EINVAL; } static int dib9000_fw_memmbx_sync(struct dib9000_state *state, u8 i) { u8 index_loop = 10; if (!state->platform.risc.fw_is_running) return 0; dib9000_risc_mem_write(state, FE_MM_RW_SYNC, &i); do { dib9000_risc_mem_read(state, FE_MM_RW_SYNC, state->i2c_read_buffer, 1); } while (state->i2c_read_buffer[0] && index_loop--); if (index_loop > 0) return 0; return -EIO; } static int dib9000_fw_init(struct dib9000_state *state) { struct dibGPIOFunction *f; u16 b[40] = { 0 }; u8 i; u8 size; if (dib9000_fw_boot(state, NULL, 0, state->chip.d9.cfg.microcode_B_fe_buffer, state->chip.d9.cfg.microcode_B_fe_size) != 0) return -EIO; /* initialize the firmware */ for (i = 0; i < ARRAY_SIZE(state->chip.d9.cfg.gpio_function); i++) { f = &state->chip.d9.cfg.gpio_function[i]; if (f->mask) { switch (f->function) { case BOARD_GPIO_FUNCTION_COMPONENT_ON: b[0] = (u16) f->mask; b[1] = (u16) f->direction; b[2] = (u16) f->value; break; case BOARD_GPIO_FUNCTION_COMPONENT_OFF: b[3] = (u16) f->mask; b[4] = (u16) f->direction; b[5] = (u16) f->value; break; } } } if (dib9000_mbx_send(state, OUT_MSG_CONF_GPIO, b, 15) != 0) return -EIO; /* subband */ b[0] = state->chip.d9.cfg.subband.size; /* type == 0 -> GPIO - PWM not yet supported */ for (i = 0; i < state->chip.d9.cfg.subband.size; i++) { b[1 + i * 4] = state->chip.d9.cfg.subband.subband[i].f_mhz; b[2 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.mask; b[3 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.direction; b[4 + i * 4] = (u16) state->chip.d9.cfg.subband.subband[i].gpio.value; } b[1 + i * 4] = 0; /* fe_id */ if (dib9000_mbx_send(state, OUT_MSG_SUBBAND_SEL, b, 2 + 4 * i) != 0) return -EIO; /* 0 - id, 1 - no_of_frontends */ b[0] = (0 << 8) | 1; /* 0 = i2c-address demod, 0 = tuner */ b[1] = (0 << 8) | (0); b[2] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000) >> 16) & 0xffff); b[3] = (u16) (((state->chip.d9.cfg.xtal_clock_khz * 1000)) & 0xffff); b[4] = (u16) ((state->chip.d9.cfg.vcxo_timer >> 16) & 0xffff); b[5] = (u16) ((state->chip.d9.cfg.vcxo_timer) & 0xffff); b[6] = (u16) ((state->chip.d9.cfg.timing_frequency >> 16) & 0xffff); b[7] = (u16) ((state->chip.d9.cfg.timing_frequency) & 0xffff); b[29] = state->chip.d9.cfg.if_drives; if (dib9000_mbx_send(state, OUT_MSG_INIT_DEMOD, b, ARRAY_SIZE(b)) != 0) return -EIO; if (dib9000_mbx_send(state, OUT_MSG_FE_FW_DL, NULL, 0) != 0) return -EIO; if (dib9000_mbx_get_message(state, IN_MSG_FE_FW_DL_DONE, b, &size) < 0) return -EIO; if (size > ARRAY_SIZE(b)) { dprintk("error : firmware returned %dbytes needed but the used buffer has only %dbytes\n Firmware init ABORTED", size, (int)ARRAY_SIZE(b)); return -EINVAL; } for (i = 0; i < size; i += 2) { state->platform.risc.fe_mm[i / 2].addr = b[i + 0]; state->platform.risc.fe_mm[i / 2].size = b[i + 1]; } return 0; } static void dib9000_fw_set_channel_head(struct dib9000_state *state) { u8 b[9]; u32 freq = state->fe[0]->dtv_property_cache.frequency / 1000; if (state->fe_id % 2) freq += 101; b[0] = (u8) ((freq >> 0) & 0xff); b[1] = (u8) ((freq >> 8) & 0xff); b[2] = (u8) ((freq >> 16) & 0xff); b[3] = (u8) ((freq >> 24) & 0xff); b[4] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 0) & 0xff); b[5] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 8) & 0xff); b[6] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 16) & 0xff); b[7] = (u8) ((state->fe[0]->dtv_property_cache.bandwidth_hz / 1000 >> 24) & 0xff); b[8] = 0x80; /* do not wait for CELL ID when doing autosearch */ if (state->fe[0]->dtv_property_cache.delivery_system == SYS_DVBT) b[8] |= 1; dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_HEAD, b); } static int dib9000_fw_get_channel(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; struct dibDVBTChannel { s8 spectrum_inversion; s8 nfft; s8 guard; s8 constellation; s8 hrch; s8 alpha; s8 code_rate_hp; s8 code_rate_lp; s8 select_hp; s8 intlv_native; }; struct dibDVBTChannel *ch; int ret = 0; if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) { ret = -EIO; goto error; } dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_UNION, state->i2c_read_buffer, sizeof(struct dibDVBTChannel)); ch = (struct dibDVBTChannel *)state->i2c_read_buffer; switch (ch->spectrum_inversion & 0x7) { case 1: state->fe[0]->dtv_property_cache.inversion = INVERSION_ON; break; case 0: state->fe[0]->dtv_property_cache.inversion = INVERSION_OFF; break; default: case -1: state->fe[0]->dtv_property_cache.inversion = INVERSION_AUTO; break; } switch (ch->nfft) { case 0: state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K; break; case 2: state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_4K; break; case 1: state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K; break; default: case -1: state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_AUTO; break; } switch (ch->guard) { case 0: state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32; break; case 1: state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16; break; case 2: state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8; break; case 3: state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4; break; default: case -1: state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_AUTO; break; } switch (ch->constellation) { case 2: state->fe[0]->dtv_property_cache.modulation = QAM_64; break; case 1: state->fe[0]->dtv_property_cache.modulation = QAM_16; break; case 0: state->fe[0]->dtv_property_cache.modulation = QPSK; break; default: case -1: state->fe[0]->dtv_property_cache.modulation = QAM_AUTO; break; } switch (ch->hrch) { case 0: state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_NONE; break; case 1: state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_1; break; default: case -1: state->fe[0]->dtv_property_cache.hierarchy = HIERARCHY_AUTO; break; } switch (ch->code_rate_hp) { case 1: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_1_2; break; case 2: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_2_3; break; case 3: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_3_4; break; case 5: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_5_6; break; case 7: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_7_8; break; default: case -1: state->fe[0]->dtv_property_cache.code_rate_HP = FEC_AUTO; break; } switch (ch->code_rate_lp) { case 1: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_1_2; break; case 2: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_2_3; break; case 3: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_3_4; break; case 5: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_5_6; break; case 7: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_7_8; break; default: case -1: state->fe[0]->dtv_property_cache.code_rate_LP = FEC_AUTO; break; } error: DibReleaseLock(&state->platform.risc.mem_mbx_lock); return ret; } static int dib9000_fw_set_channel_union(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; struct dibDVBTChannel { s8 spectrum_inversion; s8 nfft; s8 guard; s8 constellation; s8 hrch; s8 alpha; s8 code_rate_hp; s8 code_rate_lp; s8 select_hp; s8 intlv_native; }; struct dibDVBTChannel ch; switch (state->fe[0]->dtv_property_cache.inversion) { case INVERSION_ON: ch.spectrum_inversion = 1; break; case INVERSION_OFF: ch.spectrum_inversion = 0; break; default: case INVERSION_AUTO: ch.spectrum_inversion = -1; break; } switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_2K: ch.nfft = 0; break; case TRANSMISSION_MODE_4K: ch.nfft = 2; break; case TRANSMISSION_MODE_8K: ch.nfft = 1; break; default: case TRANSMISSION_MODE_AUTO: ch.nfft = 1; break; } switch (state->fe[0]->dtv_property_cache.guard_interval) { case GUARD_INTERVAL_1_32: ch.guard = 0; break; case GUARD_INTERVAL_1_16: ch.guard = 1; break; case GUARD_INTERVAL_1_8: ch.guard = 2; break; case GUARD_INTERVAL_1_4: ch.guard = 3; break; default: case GUARD_INTERVAL_AUTO: ch.guard = -1; break; } switch (state->fe[0]->dtv_property_cache.modulation) { case QAM_64: ch.constellation = 2; break; case QAM_16: ch.constellation = 1; break; case QPSK: ch.constellation = 0; break; default: case QAM_AUTO: ch.constellation = -1; break; } switch (state->fe[0]->dtv_property_cache.hierarchy) { case HIERARCHY_NONE: ch.hrch = 0; break; case HIERARCHY_1: case HIERARCHY_2: case HIERARCHY_4: ch.hrch = 1; break; default: case HIERARCHY_AUTO: ch.hrch = -1; break; } ch.alpha = 1; switch (state->fe[0]->dtv_property_cache.code_rate_HP) { case FEC_1_2: ch.code_rate_hp = 1; break; case FEC_2_3: ch.code_rate_hp = 2; break; case FEC_3_4: ch.code_rate_hp = 3; break; case FEC_5_6: ch.code_rate_hp = 5; break; case FEC_7_8: ch.code_rate_hp = 7; break; default: case FEC_AUTO: ch.code_rate_hp = -1; break; } switch (state->fe[0]->dtv_property_cache.code_rate_LP) { case FEC_1_2: ch.code_rate_lp = 1; break; case FEC_2_3: ch.code_rate_lp = 2; break; case FEC_3_4: ch.code_rate_lp = 3; break; case FEC_5_6: ch.code_rate_lp = 5; break; case FEC_7_8: ch.code_rate_lp = 7; break; default: case FEC_AUTO: ch.code_rate_lp = -1; break; } ch.select_hp = 1; ch.intlv_native = 1; dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_UNION, (u8 *) &ch); return 0; } static int dib9000_fw_tune(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; int ret = 10, search = state->channel_status.status == CHANNEL_STATUS_PARAMETERS_UNKNOWN; s8 i; switch (state->tune_state) { case CT_DEMOD_START: dib9000_fw_set_channel_head(state); /* write the channel context - a channel is initialized to 0, so it is OK */ dib9000_risc_mem_write(state, FE_MM_W_CHANNEL_CONTEXT, (u8 *) fe_info); dib9000_risc_mem_write(state, FE_MM_W_FE_INFO, (u8 *) fe_info); if (search) dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_SEARCH, NULL, 0); else { dib9000_fw_set_channel_union(fe); dib9000_mbx_send(state, OUT_MSG_FE_CHANNEL_TUNE, NULL, 0); } state->tune_state = CT_DEMOD_STEP_1; break; case CT_DEMOD_STEP_1: if (search) dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_SEARCH_STATE, state->i2c_read_buffer, 1); else dib9000_risc_mem_read(state, FE_MM_R_CHANNEL_TUNE_STATE, state->i2c_read_buffer, 1); i = (s8)state->i2c_read_buffer[0]; switch (i) { /* something happened */ case 0: break; case -2: /* tps locks are "slower" than MPEG locks -> even in autosearch data is OK here */ if (search) state->status = FE_STATUS_DEMOD_SUCCESS; else { state->tune_state = CT_DEMOD_STOP; state->status = FE_STATUS_LOCKED; } break; default: state->status = FE_STATUS_TUNE_FAILED; state->tune_state = CT_DEMOD_STOP; break; } break; default: ret = FE_CALLBACK_TIME_NEVER; break; } return ret; } static int dib9000_fw_set_diversity_in(struct dvb_frontend *fe, int onoff) { struct dib9000_state *state = fe->demodulator_priv; u16 mode = (u16) onoff; return dib9000_mbx_send(state, OUT_MSG_ENABLE_DIVERSITY, &mode, 1); } static int dib9000_fw_set_output_mode(struct dvb_frontend *fe, int mode) { struct dib9000_state *state = fe->demodulator_priv; u16 outreg, smo_mode; dprintk("setting output mode for demod %p to %d", fe, mode); switch (mode) { case OUTMODE_MPEG2_PAR_GATED_CLK: outreg = (1 << 10); /* 0x0400 */ break; case OUTMODE_MPEG2_PAR_CONT_CLK: outreg = (1 << 10) | (1 << 6); /* 0x0440 */ break; case OUTMODE_MPEG2_SERIAL: outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */ break; case OUTMODE_DIVERSITY: outreg = (1 << 10) | (4 << 6); /* 0x0500 */ break; case OUTMODE_MPEG2_FIFO: outreg = (1 << 10) | (5 << 6); break; case OUTMODE_HIGH_Z: outreg = 0; break; default: dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]); return -EINVAL; } dib9000_write_word(state, 1795, outreg); switch (mode) { case OUTMODE_MPEG2_PAR_GATED_CLK: case OUTMODE_MPEG2_PAR_CONT_CLK: case OUTMODE_MPEG2_SERIAL: case OUTMODE_MPEG2_FIFO: smo_mode = (dib9000_read_word(state, 295) & 0x0010) | (1 << 1); if (state->chip.d9.cfg.output_mpeg2_in_188_bytes) smo_mode |= (1 << 5); dib9000_write_word(state, 295, smo_mode); break; } outreg = to_fw_output_mode(mode); return dib9000_mbx_send(state, OUT_MSG_SET_OUTPUT_MODE, &outreg, 1); } static int dib9000_tuner_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dib9000_state *state = i2c_get_adapdata(i2c_adap); u16 i, len, t, index_msg; for (index_msg = 0; index_msg < num; index_msg++) { if (msg[index_msg].flags & I2C_M_RD) { /* read */ len = msg[index_msg].len; if (len > 16) len = 16; if (dib9000_read_word(state, 790) != 0) dprintk("TunerITF: read busy"); dib9000_write_word(state, 784, (u16) (msg[index_msg].addr)); dib9000_write_word(state, 787, (len / 2) - 1); dib9000_write_word(state, 786, 1); /* start read */ i = 1000; while (dib9000_read_word(state, 790) != (len / 2) && i) i--; if (i == 0) dprintk("TunerITF: read failed"); for (i = 0; i < len; i += 2) { t = dib9000_read_word(state, 785); msg[index_msg].buf[i] = (t >> 8) & 0xff; msg[index_msg].buf[i + 1] = (t) & 0xff; } if (dib9000_read_word(state, 790) != 0) dprintk("TunerITF: read more data than expected"); } else { i = 1000; while (dib9000_read_word(state, 789) && i) i--; if (i == 0) dprintk("TunerITF: write busy"); len = msg[index_msg].len; if (len > 16) len = 16; for (i = 0; i < len; i += 2) dib9000_write_word(state, 785, (msg[index_msg].buf[i] << 8) | msg[index_msg].buf[i + 1]); dib9000_write_word(state, 784, (u16) msg[index_msg].addr); dib9000_write_word(state, 787, (len / 2) - 1); dib9000_write_word(state, 786, 0); /* start write */ i = 1000; while (dib9000_read_word(state, 791) > 0 && i) i--; if (i == 0) dprintk("TunerITF: write failed"); } } return num; } int dib9000_fw_set_component_bus_speed(struct dvb_frontend *fe, u16 speed) { struct dib9000_state *state = fe->demodulator_priv; state->component_bus_speed = speed; return 0; } EXPORT_SYMBOL(dib9000_fw_set_component_bus_speed); static int dib9000_fw_component_bus_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg msg[], int num) { struct dib9000_state *state = i2c_get_adapdata(i2c_adap); u8 type = 0; /* I2C */ u8 port = DIBX000_I2C_INTERFACE_GPIO_3_4; u16 scl = state->component_bus_speed; /* SCL frequency */ struct dib9000_fe_memory_map *m = &state->platform.risc.fe_mm[FE_MM_RW_COMPONENT_ACCESS_BUFFER]; u8 p[13] = { 0 }; p[0] = type; p[1] = port; p[2] = msg[0].addr << 1; p[3] = (u8) scl & 0xff; /* scl */ p[4] = (u8) (scl >> 8); p[7] = 0; p[8] = 0; p[9] = (u8) (msg[0].len); p[10] = (u8) (msg[0].len >> 8); if ((num > 1) && (msg[1].flags & I2C_M_RD)) { p[11] = (u8) (msg[1].len); p[12] = (u8) (msg[1].len >> 8); } else { p[11] = 0; p[12] = 0; } if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); return 0; } dib9000_risc_mem_write(state, FE_MM_W_COMPONENT_ACCESS, p); { /* write-part */ dib9000_risc_mem_setup_cmd(state, m->addr, msg[0].len, 0); dib9000_risc_mem_write_chunks(state, msg[0].buf, msg[0].len); } /* do the transaction */ if (dib9000_fw_memmbx_sync(state, FE_SYNC_COMPONENT_ACCESS) < 0) { DibReleaseLock(&state->platform.risc.mem_mbx_lock); return 0; } /* read back any possible result */ if ((num > 1) && (msg[1].flags & I2C_M_RD)) dib9000_risc_mem_read(state, FE_MM_RW_COMPONENT_ACCESS_BUFFER, msg[1].buf, msg[1].len); DibReleaseLock(&state->platform.risc.mem_mbx_lock); return num; } static u32 dib9000_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C; } static struct i2c_algorithm dib9000_tuner_algo = { .master_xfer = dib9000_tuner_xfer, .functionality = dib9000_i2c_func, }; static struct i2c_algorithm dib9000_component_bus_algo = { .master_xfer = dib9000_fw_component_bus_xfer, .functionality = dib9000_i2c_func, }; struct i2c_adapter *dib9000_get_tuner_interface(struct dvb_frontend *fe) { struct dib9000_state *st = fe->demodulator_priv; return &st->tuner_adap; } EXPORT_SYMBOL(dib9000_get_tuner_interface); struct i2c_adapter *dib9000_get_component_bus_interface(struct dvb_frontend *fe) { struct dib9000_state *st = fe->demodulator_priv; return &st->component_bus; } EXPORT_SYMBOL(dib9000_get_component_bus_interface); struct i2c_adapter *dib9000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating) { struct dib9000_state *st = fe->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating); } EXPORT_SYMBOL(dib9000_get_i2c_master); int dib9000_set_i2c_adapter(struct dvb_frontend *fe, struct i2c_adapter *i2c) { struct dib9000_state *st = fe->demodulator_priv; st->i2c.i2c_adap = i2c; return 0; } EXPORT_SYMBOL(dib9000_set_i2c_adapter); static int dib9000_cfg_gpio(struct dib9000_state *st, u8 num, u8 dir, u8 val) { st->gpio_dir = dib9000_read_word(st, 773); st->gpio_dir &= ~(1 << num); /* reset the direction bit */ st->gpio_dir |= (dir & 0x1) << num; /* set the new direction */ dib9000_write_word(st, 773, st->gpio_dir); st->gpio_val = dib9000_read_word(st, 774); st->gpio_val &= ~(1 << num); /* reset the direction bit */ st->gpio_val |= (val & 0x01) << num; /* set the new value */ dib9000_write_word(st, 774, st->gpio_val); dprintk("gpio dir: %04x: gpio val: %04x", st->gpio_dir, st->gpio_val); return 0; } int dib9000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val) { struct dib9000_state *state = fe->demodulator_priv; return dib9000_cfg_gpio(state, num, dir, val); } EXPORT_SYMBOL(dib9000_set_gpio); int dib9000_fw_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) { struct dib9000_state *state = fe->demodulator_priv; u16 val; int ret; if ((state->pid_ctrl_index != -2) && (state->pid_ctrl_index < 9)) { /* postpone the pid filtering cmd */ dprintk("pid filter cmd postpone"); state->pid_ctrl_index++; state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER_CTRL; state->pid_ctrl[state->pid_ctrl_index].onoff = onoff; return 0; } if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } val = dib9000_read_word(state, 294 + 1) & 0xffef; val |= (onoff & 0x1) << 4; dprintk("PID filter enabled %d", onoff); ret = dib9000_write_word(state, 294 + 1, val); DibReleaseLock(&state->demod_lock); return ret; } EXPORT_SYMBOL(dib9000_fw_pid_filter_ctrl); int dib9000_fw_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) { struct dib9000_state *state = fe->demodulator_priv; int ret; if (state->pid_ctrl_index != -2) { /* postpone the pid filtering cmd */ dprintk("pid filter postpone"); if (state->pid_ctrl_index < 9) { state->pid_ctrl_index++; state->pid_ctrl[state->pid_ctrl_index].cmd = DIB9000_PID_FILTER; state->pid_ctrl[state->pid_ctrl_index].id = id; state->pid_ctrl[state->pid_ctrl_index].pid = pid; state->pid_ctrl[state->pid_ctrl_index].onoff = onoff; } else dprintk("can not add any more pid ctrl cmd"); return 0; } if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff); ret = dib9000_write_word(state, 300 + 1 + id, onoff ? (1 << 13) | pid : 0); DibReleaseLock(&state->demod_lock); return ret; } EXPORT_SYMBOL(dib9000_fw_pid_filter); int dib9000_firmware_post_pll_init(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; return dib9000_fw_init(state); } EXPORT_SYMBOL(dib9000_firmware_post_pll_init); static void dib9000_release(struct dvb_frontend *demod) { struct dib9000_state *st = demod->demodulator_priv; u8 index_frontend; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++) dvb_frontend_detach(st->fe[index_frontend]); DibFreeLock(&state->platform.risc.mbx_if_lock); DibFreeLock(&state->platform.risc.mbx_lock); DibFreeLock(&state->platform.risc.mem_lock); DibFreeLock(&state->platform.risc.mem_mbx_lock); DibFreeLock(&state->demod_lock); dibx000_exit_i2c_master(&st->i2c_master); i2c_del_adapter(&st->tuner_adap); i2c_del_adapter(&st->component_bus); kfree(st->fe[0]); kfree(st); } static int dib9000_wakeup(struct dvb_frontend *fe) { return 0; } static int dib9000_sleep(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend; int ret = 0; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]); if (ret < 0) goto error; } ret = dib9000_mbx_send(state, OUT_MSG_FE_SLEEP, NULL, 0); error: DibReleaseLock(&state->demod_lock); return ret; } static int dib9000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; return 0; } static int dib9000_get_frontend(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend, sub_index_frontend; fe_status_t stat; int ret = 0; if (state->get_frontend_internal == 0) { if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } } for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat); if (stat & FE_HAS_SYNC) { dprintk("TPS lock on the slave%i", index_frontend); /* synchronize the cache with the other frontends */ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend]); for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) { if (sub_index_frontend != index_frontend) { state->fe[sub_index_frontend]->dtv_property_cache.modulation = state->fe[index_frontend]->dtv_property_cache.modulation; state->fe[sub_index_frontend]->dtv_property_cache.inversion = state->fe[index_frontend]->dtv_property_cache.inversion; state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode = state->fe[index_frontend]->dtv_property_cache.transmission_mode; state->fe[sub_index_frontend]->dtv_property_cache.guard_interval = state->fe[index_frontend]->dtv_property_cache.guard_interval; state->fe[sub_index_frontend]->dtv_property_cache.hierarchy = state->fe[index_frontend]->dtv_property_cache.hierarchy; state->fe[sub_index_frontend]->dtv_property_cache.code_rate_HP = state->fe[index_frontend]->dtv_property_cache.code_rate_HP; state->fe[sub_index_frontend]->dtv_property_cache.code_rate_LP = state->fe[index_frontend]->dtv_property_cache.code_rate_LP; state->fe[sub_index_frontend]->dtv_property_cache.rolloff = state->fe[index_frontend]->dtv_property_cache.rolloff; } } ret = 0; goto return_value; } } /* get the channel from master chip */ ret = dib9000_fw_get_channel(fe); if (ret != 0) goto return_value; /* synchronize the cache with the other frontends */ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion; state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode; state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval; state->fe[index_frontend]->dtv_property_cache.modulation = fe->dtv_property_cache.modulation; state->fe[index_frontend]->dtv_property_cache.hierarchy = fe->dtv_property_cache.hierarchy; state->fe[index_frontend]->dtv_property_cache.code_rate_HP = fe->dtv_property_cache.code_rate_HP; state->fe[index_frontend]->dtv_property_cache.code_rate_LP = fe->dtv_property_cache.code_rate_LP; state->fe[index_frontend]->dtv_property_cache.rolloff = fe->dtv_property_cache.rolloff; } ret = 0; return_value: if (state->get_frontend_internal == 0) DibReleaseLock(&state->demod_lock); return ret; } static int dib9000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state) { struct dib9000_state *state = fe->demodulator_priv; state->tune_state = tune_state; if (tune_state == CT_DEMOD_START) state->status = FE_STATUS_TUNE_PENDING; return 0; } static u32 dib9000_get_status(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; return state->status; } static int dib9000_set_channel_status(struct dvb_frontend *fe, struct dvb_frontend_parametersContext *channel_status) { struct dib9000_state *state = fe->demodulator_priv; memcpy(&state->channel_status, channel_status, sizeof(struct dvb_frontend_parametersContext)); return 0; } static int dib9000_set_frontend(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; int sleep_time, sleep_time_slave; u32 frontend_status; u8 nbr_pending, exit_condition, index_frontend, index_frontend_success; struct dvb_frontend_parametersContext channel_status; /* check that the correct parameters are set */ if (state->fe[0]->dtv_property_cache.frequency == 0) { dprintk("dib9000: must specify frequency "); return 0; } if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) { dprintk("dib9000: must specify bandwidth "); return 0; } state->pid_ctrl_index = -1; /* postpone the pid filtering cmd */ if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return 0; } fe->dtv_property_cache.delivery_system = SYS_DVBT; /* set the master status */ if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO || state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO || state->fe[0]->dtv_property_cache.modulation == QAM_AUTO || state->fe[0]->dtv_property_cache.code_rate_HP == FEC_AUTO) { /* no channel specified, autosearch the channel */ state->channel_status.status = CHANNEL_STATUS_PARAMETERS_UNKNOWN; } else state->channel_status.status = CHANNEL_STATUS_PARAMETERS_SET; /* set mode and status for the different frontends */ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { dib9000_fw_set_diversity_in(state->fe[index_frontend], 1); /* synchronization of the cache */ memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties)); state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_DVBT; dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z); dib9000_set_channel_status(state->fe[index_frontend], &state->channel_status); dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START); } /* actual tune */ exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */ index_frontend_success = 0; do { sleep_time = dib9000_fw_tune(state->fe[0]); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend]); if (sleep_time == FE_CALLBACK_TIME_NEVER) sleep_time = sleep_time_slave; else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time)) sleep_time = sleep_time_slave; } if (sleep_time != FE_CALLBACK_TIME_NEVER) msleep(sleep_time / 10); else break; nbr_pending = 0; exit_condition = 0; index_frontend_success = 0; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { frontend_status = -dib9000_get_status(state->fe[index_frontend]); if (frontend_status > -FE_STATUS_TUNE_PENDING) { exit_condition = 2; /* tune success */ index_frontend_success = index_frontend; break; } if (frontend_status == -FE_STATUS_TUNE_PENDING) nbr_pending++; /* some frontends are still tuning */ } if ((exit_condition != 2) && (nbr_pending == 0)) exit_condition = 1; /* if all tune are done and no success, exit: tune failed */ } while (exit_condition == 0); /* check the tune result */ if (exit_condition == 1) { /* tune failed */ dprintk("tune failed"); DibReleaseLock(&state->demod_lock); /* tune failed; put all the pid filtering cmd to junk */ state->pid_ctrl_index = -1; return 0; } dprintk("tune success on frontend%i", index_frontend_success); /* synchronize all the channel cache */ state->get_frontend_internal = 1; dib9000_get_frontend(state->fe[0]); state->get_frontend_internal = 0; /* retune the other frontends with the found channel */ channel_status.status = CHANNEL_STATUS_PARAMETERS_SET; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { /* only retune the frontends which was not tuned success */ if (index_frontend != index_frontend_success) { dib9000_set_channel_status(state->fe[index_frontend], &channel_status); dib9000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START); } } do { sleep_time = FE_CALLBACK_TIME_NEVER; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { if (index_frontend != index_frontend_success) { sleep_time_slave = dib9000_fw_tune(state->fe[index_frontend]); if (sleep_time == FE_CALLBACK_TIME_NEVER) sleep_time = sleep_time_slave; else if ((sleep_time_slave != FE_CALLBACK_TIME_NEVER) && (sleep_time_slave > sleep_time)) sleep_time = sleep_time_slave; } } if (sleep_time != FE_CALLBACK_TIME_NEVER) msleep(sleep_time / 10); else break; nbr_pending = 0; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { if (index_frontend != index_frontend_success) { frontend_status = -dib9000_get_status(state->fe[index_frontend]); if ((index_frontend != index_frontend_success) && (frontend_status == -FE_STATUS_TUNE_PENDING)) nbr_pending++; /* some frontends are still tuning */ } } } while (nbr_pending != 0); /* set the output mode */ dib9000_fw_set_output_mode(state->fe[0], state->chip.d9.cfg.output_mode); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) dib9000_fw_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY); /* turn off the diversity for the last frontend */ dib9000_fw_set_diversity_in(state->fe[index_frontend - 1], 0); DibReleaseLock(&state->demod_lock); if (state->pid_ctrl_index >= 0) { u8 index_pid_filter_cmd; u8 pid_ctrl_index = state->pid_ctrl_index; state->pid_ctrl_index = -2; for (index_pid_filter_cmd = 0; index_pid_filter_cmd <= pid_ctrl_index; index_pid_filter_cmd++) { if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER_CTRL) dib9000_fw_pid_filter_ctrl(state->fe[0], state->pid_ctrl[index_pid_filter_cmd].onoff); else if (state->pid_ctrl[index_pid_filter_cmd].cmd == DIB9000_PID_FILTER) dib9000_fw_pid_filter(state->fe[0], state->pid_ctrl[index_pid_filter_cmd].id, state->pid_ctrl[index_pid_filter_cmd].pid, state->pid_ctrl[index_pid_filter_cmd].onoff); } } /* do not postpone any more the pid filtering */ state->pid_ctrl_index = -2; return 0; } static u16 dib9000_read_lock(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; return dib9000_read_word(state, 535); } static int dib9000_read_status(struct dvb_frontend *fe, fe_status_t * stat) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend; u16 lock = 0, lock_slave = 0; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) lock_slave |= dib9000_read_lock(state->fe[index_frontend]); lock = dib9000_read_word(state, 535); *stat = 0; if ((lock & 0x8000) || (lock_slave & 0x8000)) *stat |= FE_HAS_SIGNAL; if ((lock & 0x3000) || (lock_slave & 0x3000)) *stat |= FE_HAS_CARRIER; if ((lock & 0x0100) || (lock_slave & 0x0100)) *stat |= FE_HAS_VITERBI; if (((lock & 0x0038) == 0x38) || ((lock_slave & 0x0038) == 0x38)) *stat |= FE_HAS_SYNC; if ((lock & 0x0008) || (lock_slave & 0x0008)) *stat |= FE_HAS_LOCK; DibReleaseLock(&state->demod_lock); return 0; } static int dib9000_read_ber(struct dvb_frontend *fe, u32 * ber) { struct dib9000_state *state = fe->demodulator_priv; u16 *c; int ret = 0; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); ret = -EINTR; goto error; } if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) { DibReleaseLock(&state->platform.risc.mem_mbx_lock); ret = -EIO; goto error; } dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, state->i2c_read_buffer, 16 * 2); DibReleaseLock(&state->platform.risc.mem_mbx_lock); c = (u16 *)state->i2c_read_buffer; *ber = c[10] << 16 | c[11]; error: DibReleaseLock(&state->demod_lock); return ret; } static int dib9000_read_signal_strength(struct dvb_frontend *fe, u16 * strength) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend; u16 *c = (u16 *)state->i2c_read_buffer; u16 val; int ret = 0; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } *strength = 0; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val); if (val > 65535 - *strength) *strength = 65535; else *strength += val; } if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); ret = -EINTR; goto error; } if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) { DibReleaseLock(&state->platform.risc.mem_mbx_lock); ret = -EIO; goto error; } dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); DibReleaseLock(&state->platform.risc.mem_mbx_lock); val = 65535 - c[4]; if (val > 65535 - *strength) *strength = 65535; else *strength += val; error: DibReleaseLock(&state->demod_lock); return ret; } static u32 dib9000_get_snr(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; u16 *c = (u16 *)state->i2c_read_buffer; u32 n, s, exp; u16 val; if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); return 0; } if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) { DibReleaseLock(&state->platform.risc.mem_mbx_lock); return 0; } dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); DibReleaseLock(&state->platform.risc.mem_mbx_lock); val = c[7]; n = (val >> 4) & 0xff; exp = ((val & 0xf) << 2); val = c[8]; exp += ((val >> 14) & 0x3); if ((exp & 0x20) != 0) exp -= 0x40; n <<= exp + 16; s = (val >> 6) & 0xFF; exp = (val & 0x3F); if ((exp & 0x20) != 0) exp -= 0x40; s <<= exp + 16; if (n > 0) { u32 t = (s / n) << 16; return t + ((s << 16) - n * t) / n; } return 0xffffffff; } static int dib9000_read_snr(struct dvb_frontend *fe, u16 * snr) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend; u32 snr_master; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } snr_master = dib9000_get_snr(fe); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) snr_master += dib9000_get_snr(state->fe[index_frontend]); if ((snr_master >> 16) != 0) { snr_master = 10 * intlog10(snr_master >> 16); *snr = snr_master / ((1 << 24) / 10); } else *snr = 0; DibReleaseLock(&state->demod_lock); return 0; } static int dib9000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc) { struct dib9000_state *state = fe->demodulator_priv; u16 *c = (u16 *)state->i2c_read_buffer; int ret = 0; if (DibAcquireLock(&state->demod_lock) < 0) { dprintk("could not get the lock"); return -EINTR; } if (DibAcquireLock(&state->platform.risc.mem_mbx_lock) < 0) { dprintk("could not get the lock"); ret = -EINTR; goto error; } if (dib9000_fw_memmbx_sync(state, FE_SYNC_CHANNEL) < 0) { DibReleaseLock(&state->platform.risc.mem_mbx_lock); ret = -EIO; goto error; } dib9000_risc_mem_read(state, FE_MM_R_FE_MONITOR, (u8 *) c, 16 * 2); DibReleaseLock(&state->platform.risc.mem_mbx_lock); *unc = c[12]; error: DibReleaseLock(&state->demod_lock); return ret; } int dib9000_i2c_enumeration(struct i2c_adapter *i2c, int no_of_demods, u8 default_addr, u8 first_addr) { int k = 0, ret = 0; u8 new_addr = 0; struct i2c_device client = {.i2c_adap = i2c }; client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL); if (!client.i2c_write_buffer) { dprintk("%s: not enough memory", __func__); return -ENOMEM; } client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL); if (!client.i2c_read_buffer) { dprintk("%s: not enough memory", __func__); ret = -ENOMEM; goto error_memory; } client.i2c_addr = default_addr + 16; dib9000_i2c_write16(&client, 1796, 0x0); for (k = no_of_demods - 1; k >= 0; k--) { /* designated i2c address */ new_addr = first_addr + (k << 1); client.i2c_addr = default_addr; dib9000_i2c_write16(&client, 1817, 3); dib9000_i2c_write16(&client, 1796, 0); dib9000_i2c_write16(&client, 1227, 1); dib9000_i2c_write16(&client, 1227, 0); client.i2c_addr = new_addr; dib9000_i2c_write16(&client, 1817, 3); dib9000_i2c_write16(&client, 1796, 0); dib9000_i2c_write16(&client, 1227, 1); dib9000_i2c_write16(&client, 1227, 0); if (dib9000_identify(&client) == 0) { client.i2c_addr = default_addr; if (dib9000_identify(&client) == 0) { dprintk("DiB9000 #%d: not identified", k); ret = -EIO; goto error; } } dib9000_i2c_write16(&client, 1795, (1 << 10) | (4 << 6)); dib9000_i2c_write16(&client, 1794, (new_addr << 2) | 2); dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr); } for (k = 0; k < no_of_demods; k++) { new_addr = first_addr | (k << 1); client.i2c_addr = new_addr; dib9000_i2c_write16(&client, 1794, (new_addr << 2)); dib9000_i2c_write16(&client, 1795, 0); } error: kfree(client.i2c_read_buffer); error_memory: kfree(client.i2c_write_buffer); return ret; } EXPORT_SYMBOL(dib9000_i2c_enumeration); int dib9000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend = 1; while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) index_frontend++; if (index_frontend < MAX_NUMBER_OF_FRONTENDS) { dprintk("set slave fe %p to index %i", fe_slave, index_frontend); state->fe[index_frontend] = fe_slave; return 0; } dprintk("too many slave frontend"); return -ENOMEM; } EXPORT_SYMBOL(dib9000_set_slave_frontend); int dib9000_remove_slave_frontend(struct dvb_frontend *fe) { struct dib9000_state *state = fe->demodulator_priv; u8 index_frontend = 1; while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) index_frontend++; if (index_frontend != 1) { dprintk("remove slave fe %p (index %i)", state->fe[index_frontend - 1], index_frontend - 1); state->fe[index_frontend] = NULL; return 0; } dprintk("no frontend to be removed"); return -ENODEV; } EXPORT_SYMBOL(dib9000_remove_slave_frontend); struct dvb_frontend *dib9000_get_slave_frontend(struct dvb_frontend *fe, int slave_index) { struct dib9000_state *state = fe->demodulator_priv; if (slave_index >= MAX_NUMBER_OF_FRONTENDS) return NULL; return state->fe[slave_index]; } EXPORT_SYMBOL(dib9000_get_slave_frontend); static struct dvb_frontend_ops dib9000_ops; struct dvb_frontend *dib9000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, const struct dib9000_config *cfg) { struct dvb_frontend *fe; struct dib9000_state *st; st = kzalloc(sizeof(struct dib9000_state), GFP_KERNEL); if (st == NULL) return NULL; fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL); if (fe == NULL) { kfree(st); return NULL; } memcpy(&st->chip.d9.cfg, cfg, sizeof(struct dib9000_config)); st->i2c.i2c_adap = i2c_adap; st->i2c.i2c_addr = i2c_addr; st->i2c.i2c_write_buffer = st->i2c_write_buffer; st->i2c.i2c_read_buffer = st->i2c_read_buffer; st->gpio_dir = DIB9000_GPIO_DEFAULT_DIRECTIONS; st->gpio_val = DIB9000_GPIO_DEFAULT_VALUES; st->gpio_pwm_pos = DIB9000_GPIO_DEFAULT_PWM_POS; DibInitLock(&st->platform.risc.mbx_if_lock); DibInitLock(&st->platform.risc.mbx_lock); DibInitLock(&st->platform.risc.mem_lock); DibInitLock(&st->platform.risc.mem_mbx_lock); DibInitLock(&st->demod_lock); st->get_frontend_internal = 0; st->pid_ctrl_index = -2; st->fe[0] = fe; fe->demodulator_priv = st; memcpy(&st->fe[0]->ops, &dib9000_ops, sizeof(struct dvb_frontend_ops)); /* Ensure the output mode remains at the previous default if it's * not specifically set by the caller. */ if ((st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (st->chip.d9.cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK)) st->chip.d9.cfg.output_mode = OUTMODE_MPEG2_FIFO; if (dib9000_identify(&st->i2c) == 0) goto error; dibx000_init_i2c_master(&st->i2c_master, DIB7000MC, st->i2c.i2c_adap, st->i2c.i2c_addr); st->tuner_adap.dev.parent = i2c_adap->dev.parent; strncpy(st->tuner_adap.name, "DIB9000_FW TUNER ACCESS", sizeof(st->tuner_adap.name)); st->tuner_adap.algo = &dib9000_tuner_algo; st->tuner_adap.algo_data = NULL; i2c_set_adapdata(&st->tuner_adap, st); if (i2c_add_adapter(&st->tuner_adap) < 0) goto error; st->component_bus.dev.parent = i2c_adap->dev.parent; strncpy(st->component_bus.name, "DIB9000_FW COMPONENT BUS ACCESS", sizeof(st->component_bus.name)); st->component_bus.algo = &dib9000_component_bus_algo; st->component_bus.algo_data = NULL; st->component_bus_speed = 340; i2c_set_adapdata(&st->component_bus, st); if (i2c_add_adapter(&st->component_bus) < 0) goto component_bus_add_error; dib9000_fw_reset(fe); return fe; component_bus_add_error: i2c_del_adapter(&st->tuner_adap); error: kfree(st); return NULL; } EXPORT_SYMBOL(dib9000_attach); static struct dvb_frontend_ops dib9000_ops = { .delsys = { SYS_DVBT }, .info = { .name = "DiBcom 9000", .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib9000_release, .init = dib9000_wakeup, .sleep = dib9000_sleep, .set_frontend = dib9000_set_frontend, .get_tune_settings = dib9000_fe_get_tune_settings, .get_frontend = dib9000_get_frontend, .read_status = dib9000_read_status, .read_ber = dib9000_read_ber, .read_signal_strength = dib9000_read_signal_strength, .read_snr = dib9000_read_snr, .read_ucblocks = dib9000_read_unc_blocks, }; MODULE_AUTHOR("Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_AUTHOR("Olivier Grenie <ogrenie@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 9000 COFDM demodulator"); MODULE_LICENSE("GPL");
gpl-2.0
raum1807/android_kernel_quanta_fg6q
drivers/net/ethernet/cadence/macb.c
4809
37174
/* * Cadence MACB/GEM Ethernet Controller driver * * Copyright (C) 2004-2006 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/dma-mapping.h> #include <linux/platform_data/macb.h> #include <linux/platform_device.h> #include <linux/phy.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_net.h> #include "macb.h" #define RX_BUFFER_SIZE 128 #define RX_RING_SIZE 512 #define RX_RING_BYTES (sizeof(struct dma_desc) * RX_RING_SIZE) /* Make the IP header word-aligned (the ethernet header is 14 bytes) */ #define RX_OFFSET 2 #define TX_RING_SIZE 128 #define DEF_TX_RING_PENDING (TX_RING_SIZE - 1) #define TX_RING_BYTES (sizeof(struct dma_desc) * TX_RING_SIZE) #define TX_RING_GAP(bp) \ (TX_RING_SIZE - (bp)->tx_pending) #define TX_BUFFS_AVAIL(bp) \ (((bp)->tx_tail <= (bp)->tx_head) ? \ (bp)->tx_tail + (bp)->tx_pending - (bp)->tx_head : \ (bp)->tx_tail - (bp)->tx_head - TX_RING_GAP(bp)) #define NEXT_TX(n) (((n) + 1) & (TX_RING_SIZE - 1)) #define NEXT_RX(n) (((n) + 1) & (RX_RING_SIZE - 1)) /* minimum number of free TX descriptors before waking up TX process */ #define MACB_TX_WAKEUP_THRESH (TX_RING_SIZE / 4) #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ | MACB_BIT(ISR_ROVR)) static void __macb_set_hwaddr(struct macb *bp) { u32 bottom; u16 top; bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr)); macb_or_gem_writel(bp, SA1B, bottom); top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4))); macb_or_gem_writel(bp, SA1T, top); } static void __init macb_get_hwaddr(struct macb *bp) { u32 bottom; u16 top; u8 addr[6]; bottom = macb_or_gem_readl(bp, SA1B); top = macb_or_gem_readl(bp, SA1T); addr[0] = bottom & 0xff; addr[1] = (bottom >> 8) & 0xff; addr[2] = (bottom >> 16) & 0xff; addr[3] = (bottom >> 24) & 0xff; addr[4] = top & 0xff; addr[5] = (top >> 8) & 0xff; if (is_valid_ether_addr(addr)) { memcpy(bp->dev->dev_addr, addr, sizeof(addr)); } else { netdev_info(bp->dev, "invalid hw address, using random\n"); eth_hw_addr_random(bp->dev); } } static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum) { struct macb *bp = bus->priv; int value; macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_READ) | MACB_BF(PHYA, mii_id) | MACB_BF(REGA, regnum) | MACB_BF(CODE, MACB_MAN_CODE))); /* wait for end of transfer */ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) cpu_relax(); value = MACB_BFEXT(DATA, macb_readl(bp, MAN)); return value; } static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) { struct macb *bp = bus->priv; macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF) | MACB_BF(RW, MACB_MAN_WRITE) | MACB_BF(PHYA, mii_id) | MACB_BF(REGA, regnum) | MACB_BF(CODE, MACB_MAN_CODE) | MACB_BF(DATA, value))); /* wait for end of transfer */ while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR))) cpu_relax(); return 0; } static int macb_mdio_reset(struct mii_bus *bus) { return 0; } static void macb_handle_link_change(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; unsigned long flags; int status_change = 0; spin_lock_irqsave(&bp->lock, flags); if (phydev->link) { if ((bp->speed != phydev->speed) || (bp->duplex != phydev->duplex)) { u32 reg; reg = macb_readl(bp, NCFGR); reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD)); if (phydev->duplex) reg |= MACB_BIT(FD); if (phydev->speed == SPEED_100) reg |= MACB_BIT(SPD); macb_writel(bp, NCFGR, reg); bp->speed = phydev->speed; bp->duplex = phydev->duplex; status_change = 1; } } if (phydev->link != bp->link) { if (!phydev->link) { bp->speed = 0; bp->duplex = -1; } bp->link = phydev->link; status_change = 1; } spin_unlock_irqrestore(&bp->lock, flags); if (status_change) { if (phydev->link) netdev_info(dev, "link up (%d/%s)\n", phydev->speed, phydev->duplex == DUPLEX_FULL ? "Full" : "Half"); else netdev_info(dev, "link down\n"); } } /* based on au1000_eth. c*/ static int macb_mii_probe(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev; int ret; phydev = phy_find_first(bp->mii_bus); if (!phydev) { netdev_err(dev, "no PHY found\n"); return -1; } /* TODO : add pin_irq */ /* attach the mac to the phy */ ret = phy_connect_direct(dev, phydev, &macb_handle_link_change, 0, bp->phy_interface); if (ret) { netdev_err(dev, "Could not attach to PHY\n"); return ret; } /* mask with MAC supported features */ phydev->supported &= PHY_BASIC_FEATURES; phydev->advertising = phydev->supported; bp->link = 0; bp->speed = 0; bp->duplex = -1; bp->phy_dev = phydev; return 0; } static int macb_mii_init(struct macb *bp) { struct macb_platform_data *pdata; int err = -ENXIO, i; /* Enable management port */ macb_writel(bp, NCR, MACB_BIT(MPE)); bp->mii_bus = mdiobus_alloc(); if (bp->mii_bus == NULL) { err = -ENOMEM; goto err_out; } bp->mii_bus->name = "MACB_mii_bus"; bp->mii_bus->read = &macb_mdio_read; bp->mii_bus->write = &macb_mdio_write; bp->mii_bus->reset = &macb_mdio_reset; snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x", bp->pdev->name, bp->pdev->id); bp->mii_bus->priv = bp; bp->mii_bus->parent = &bp->dev->dev; pdata = bp->pdev->dev.platform_data; if (pdata) bp->mii_bus->phy_mask = pdata->phy_mask; bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL); if (!bp->mii_bus->irq) { err = -ENOMEM; goto err_out_free_mdiobus; } for (i = 0; i < PHY_MAX_ADDR; i++) bp->mii_bus->irq[i] = PHY_POLL; dev_set_drvdata(&bp->dev->dev, bp->mii_bus); if (mdiobus_register(bp->mii_bus)) goto err_out_free_mdio_irq; if (macb_mii_probe(bp->dev) != 0) { goto err_out_unregister_bus; } return 0; err_out_unregister_bus: mdiobus_unregister(bp->mii_bus); err_out_free_mdio_irq: kfree(bp->mii_bus->irq); err_out_free_mdiobus: mdiobus_free(bp->mii_bus); err_out: return err; } static void macb_update_stats(struct macb *bp) { u32 __iomem *reg = bp->regs + MACB_PFR; u32 *p = &bp->hw_stats.macb.rx_pause_frames; u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1; WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4); for(; p < end; p++, reg++) *p += __raw_readl(reg); } static void macb_tx(struct macb *bp) { unsigned int tail; unsigned int head; u32 status; status = macb_readl(bp, TSR); macb_writel(bp, TSR, status); netdev_dbg(bp->dev, "macb_tx status = %02lx\n", (unsigned long)status); if (status & (MACB_BIT(UND) | MACB_BIT(TSR_RLE))) { int i; netdev_err(bp->dev, "TX %s, resetting buffers\n", status & MACB_BIT(UND) ? "underrun" : "retry limit exceeded"); /* Transfer ongoing, disable transmitter, to avoid confusion */ if (status & MACB_BIT(TGO)) macb_writel(bp, NCR, macb_readl(bp, NCR) & ~MACB_BIT(TE)); head = bp->tx_head; /*Mark all the buffer as used to avoid sending a lost buffer*/ for (i = 0; i < TX_RING_SIZE; i++) bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); /* Add wrap bit */ bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); /* free transmit buffer in upper layer*/ for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { struct ring_info *rp = &bp->tx_skb[tail]; struct sk_buff *skb = rp->skb; BUG_ON(skb == NULL); rmb(); dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, DMA_TO_DEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_head = bp->tx_tail = 0; /* Enable the transmitter again */ if (status & MACB_BIT(TGO)) macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TE)); } if (!(status & MACB_BIT(COMP))) /* * This may happen when a buffer becomes complete * between reading the ISR and scanning the * descriptors. Nothing to worry about. */ return; head = bp->tx_head; for (tail = bp->tx_tail; tail != head; tail = NEXT_TX(tail)) { struct ring_info *rp = &bp->tx_skb[tail]; struct sk_buff *skb = rp->skb; u32 bufstat; BUG_ON(skb == NULL); rmb(); bufstat = bp->tx_ring[tail].ctrl; if (!(bufstat & MACB_BIT(TX_USED))) break; netdev_dbg(bp->dev, "skb %u (data %p) TX complete\n", tail, skb->data); dma_unmap_single(&bp->pdev->dev, rp->mapping, skb->len, DMA_TO_DEVICE); bp->stats.tx_packets++; bp->stats.tx_bytes += skb->len; rp->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_tail = tail; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > MACB_TX_WAKEUP_THRESH) netif_wake_queue(bp->dev); } static int macb_rx_frame(struct macb *bp, unsigned int first_frag, unsigned int last_frag) { unsigned int len; unsigned int frag; unsigned int offset = 0; struct sk_buff *skb; len = MACB_BFEXT(RX_FRMLEN, bp->rx_ring[last_frag].ctrl); netdev_dbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", first_frag, last_frag, len); skb = netdev_alloc_skb(bp->dev, len + RX_OFFSET); if (!skb) { bp->stats.rx_dropped++; for (frag = first_frag; ; frag = NEXT_RX(frag)) { bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); if (frag == last_frag) break; } wmb(); return 1; } skb_reserve(skb, RX_OFFSET); skb_checksum_none_assert(skb); skb_put(skb, len); for (frag = first_frag; ; frag = NEXT_RX(frag)) { unsigned int frag_len = RX_BUFFER_SIZE; if (offset + frag_len > len) { BUG_ON(frag != last_frag); frag_len = len - offset; } skb_copy_to_linear_data_offset(skb, offset, (bp->rx_buffers + (RX_BUFFER_SIZE * frag)), frag_len); offset += RX_BUFFER_SIZE; bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); wmb(); if (frag == last_frag) break; } skb->protocol = eth_type_trans(skb, bp->dev); bp->stats.rx_packets++; bp->stats.rx_bytes += len; netdev_dbg(bp->dev, "received skb of length %u, csum: %08x\n", skb->len, skb->csum); netif_receive_skb(skb); return 0; } /* Mark DMA descriptors from begin up to and not including end as unused */ static void discard_partial_frame(struct macb *bp, unsigned int begin, unsigned int end) { unsigned int frag; for (frag = begin; frag != end; frag = NEXT_RX(frag)) bp->rx_ring[frag].addr &= ~MACB_BIT(RX_USED); wmb(); /* * When this happens, the hardware stats registers for * whatever caused this is updated, so we don't have to record * anything. */ } static int macb_rx(struct macb *bp, int budget) { int received = 0; unsigned int tail = bp->rx_tail; int first_frag = -1; for (; budget > 0; tail = NEXT_RX(tail)) { u32 addr, ctrl; rmb(); addr = bp->rx_ring[tail].addr; ctrl = bp->rx_ring[tail].ctrl; if (!(addr & MACB_BIT(RX_USED))) break; if (ctrl & MACB_BIT(RX_SOF)) { if (first_frag != -1) discard_partial_frame(bp, first_frag, tail); first_frag = tail; } if (ctrl & MACB_BIT(RX_EOF)) { int dropped; BUG_ON(first_frag == -1); dropped = macb_rx_frame(bp, first_frag, tail); first_frag = -1; if (!dropped) { received++; budget--; } } } if (first_frag != -1) bp->rx_tail = first_frag; else bp->rx_tail = tail; return received; } static int macb_poll(struct napi_struct *napi, int budget) { struct macb *bp = container_of(napi, struct macb, napi); int work_done; u32 status; status = macb_readl(bp, RSR); macb_writel(bp, RSR, status); work_done = 0; netdev_dbg(bp->dev, "poll: status = %08lx, budget = %d\n", (unsigned long)status, budget); work_done = macb_rx(bp, budget); if (work_done < budget) { napi_complete(napi); /* * We've done what we can to clean the buffers. Make sure we * get notified when new packets arrive. */ macb_writel(bp, IER, MACB_RX_INT_FLAGS); } /* TODO: Handle errors */ return work_done; } static irqreturn_t macb_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct macb *bp = netdev_priv(dev); u32 status; status = macb_readl(bp, ISR); if (unlikely(!status)) return IRQ_NONE; spin_lock(&bp->lock); while (status) { /* close possible race with dev_close */ if (unlikely(!netif_running(dev))) { macb_writel(bp, IDR, ~0UL); break; } if (status & MACB_RX_INT_FLAGS) { /* * There's no point taking any more interrupts * until we have processed the buffers. The * scheduling call may fail if the poll routine * is already scheduled, so disable interrupts * now. */ macb_writel(bp, IDR, MACB_RX_INT_FLAGS); if (napi_schedule_prep(&bp->napi)) { netdev_dbg(bp->dev, "scheduling RX softirq\n"); __napi_schedule(&bp->napi); } } if (status & (MACB_BIT(TCOMP) | MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE))) macb_tx(bp); /* * Link change detection isn't possible with RMII, so we'll * add that if/when we get our hands on a full-blown MII PHY. */ if (status & MACB_BIT(ISR_ROVR)) { /* We missed at least one packet */ if (macb_is_gem(bp)) bp->hw_stats.gem.rx_overruns++; else bp->hw_stats.macb.rx_overruns++; } if (status & MACB_BIT(HRESP)) { /* * TODO: Reset the hardware, and maybe move the * netdev_err to a lower-priority context as well * (work queue?) */ netdev_err(dev, "DMA bus error: HRESP not OK\n"); } status = macb_readl(bp, ISR); } spin_unlock(&bp->lock); return IRQ_HANDLED; } #ifdef CONFIG_NET_POLL_CONTROLLER /* * Polling receive - used by netconsole and other diagnostic tools * to allow network i/o with interrupts disabled. */ static void macb_poll_controller(struct net_device *dev) { unsigned long flags; local_irq_save(flags); macb_interrupt(dev->irq, dev); local_irq_restore(flags); } #endif static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct macb *bp = netdev_priv(dev); dma_addr_t mapping; unsigned int len, entry; u32 ctrl; unsigned long flags; #ifdef DEBUG netdev_dbg(bp->dev, "start_xmit: len %u head %p data %p tail %p end %p\n", skb->len, skb->head, skb->data, skb_tail_pointer(skb), skb_end_pointer(skb)); print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1, skb->data, 16, true); #endif len = skb->len; spin_lock_irqsave(&bp->lock, flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(bp) < 1) { netif_stop_queue(dev); spin_unlock_irqrestore(&bp->lock, flags); netdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n"); netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", bp->tx_head, bp->tx_tail); return NETDEV_TX_BUSY; } entry = bp->tx_head; netdev_dbg(bp->dev, "Allocated ring entry %u\n", entry); mapping = dma_map_single(&bp->pdev->dev, skb->data, len, DMA_TO_DEVICE); bp->tx_skb[entry].skb = skb; bp->tx_skb[entry].mapping = mapping; netdev_dbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n", skb->data, (unsigned long)mapping); ctrl = MACB_BF(TX_FRMLEN, len); ctrl |= MACB_BIT(TX_LAST); if (entry == (TX_RING_SIZE - 1)) ctrl |= MACB_BIT(TX_WRAP); bp->tx_ring[entry].addr = mapping; bp->tx_ring[entry].ctrl = ctrl; wmb(); entry = NEXT_TX(entry); bp->tx_head = entry; skb_tx_timestamp(skb); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); spin_unlock_irqrestore(&bp->lock, flags); return NETDEV_TX_OK; } static void macb_free_consistent(struct macb *bp) { if (bp->tx_skb) { kfree(bp->tx_skb); bp->tx_skb = NULL; } if (bp->rx_ring) { dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, bp->rx_ring, bp->rx_ring_dma); bp->rx_ring = NULL; } if (bp->tx_ring) { dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, bp->tx_ring, bp->tx_ring_dma); bp->tx_ring = NULL; } if (bp->rx_buffers) { dma_free_coherent(&bp->pdev->dev, RX_RING_SIZE * RX_BUFFER_SIZE, bp->rx_buffers, bp->rx_buffers_dma); bp->rx_buffers = NULL; } } static int macb_alloc_consistent(struct macb *bp) { int size; size = TX_RING_SIZE * sizeof(struct ring_info); bp->tx_skb = kmalloc(size, GFP_KERNEL); if (!bp->tx_skb) goto out_err; size = RX_RING_BYTES; bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_ring_dma, GFP_KERNEL); if (!bp->rx_ring) goto out_err; netdev_dbg(bp->dev, "Allocated RX ring of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->rx_ring_dma, bp->rx_ring); size = TX_RING_BYTES; bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, &bp->tx_ring_dma, GFP_KERNEL); if (!bp->tx_ring) goto out_err; netdev_dbg(bp->dev, "Allocated TX ring of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->tx_ring_dma, bp->tx_ring); size = RX_RING_SIZE * RX_BUFFER_SIZE; bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, &bp->rx_buffers_dma, GFP_KERNEL); if (!bp->rx_buffers) goto out_err; netdev_dbg(bp->dev, "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); return 0; out_err: macb_free_consistent(bp); return -ENOMEM; } static void macb_init_rings(struct macb *bp) { int i; dma_addr_t addr; addr = bp->rx_buffers_dma; for (i = 0; i < RX_RING_SIZE; i++) { bp->rx_ring[i].addr = addr; bp->rx_ring[i].ctrl = 0; addr += RX_BUFFER_SIZE; } bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); for (i = 0; i < TX_RING_SIZE; i++) { bp->tx_ring[i].addr = 0; bp->tx_ring[i].ctrl = MACB_BIT(TX_USED); } bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); bp->rx_tail = bp->tx_head = bp->tx_tail = 0; } static void macb_reset_hw(struct macb *bp) { /* Make sure we have the write buffer for ourselves */ wmb(); /* * Disable RX and TX (XXX: Should we halt the transmission * more gracefully?) */ macb_writel(bp, NCR, 0); /* Clear the stats registers (XXX: Update stats first?) */ macb_writel(bp, NCR, MACB_BIT(CLRSTAT)); /* Clear all status flags */ macb_writel(bp, TSR, ~0UL); macb_writel(bp, RSR, ~0UL); /* Disable all interrupts */ macb_writel(bp, IDR, ~0UL); macb_readl(bp, ISR); } static u32 gem_mdc_clk_div(struct macb *bp) { u32 config; unsigned long pclk_hz = clk_get_rate(bp->pclk); if (pclk_hz <= 20000000) config = GEM_BF(CLK, GEM_CLK_DIV8); else if (pclk_hz <= 40000000) config = GEM_BF(CLK, GEM_CLK_DIV16); else if (pclk_hz <= 80000000) config = GEM_BF(CLK, GEM_CLK_DIV32); else if (pclk_hz <= 120000000) config = GEM_BF(CLK, GEM_CLK_DIV48); else if (pclk_hz <= 160000000) config = GEM_BF(CLK, GEM_CLK_DIV64); else config = GEM_BF(CLK, GEM_CLK_DIV96); return config; } static u32 macb_mdc_clk_div(struct macb *bp) { u32 config; unsigned long pclk_hz; if (macb_is_gem(bp)) return gem_mdc_clk_div(bp); pclk_hz = clk_get_rate(bp->pclk); if (pclk_hz <= 20000000) config = MACB_BF(CLK, MACB_CLK_DIV8); else if (pclk_hz <= 40000000) config = MACB_BF(CLK, MACB_CLK_DIV16); else if (pclk_hz <= 80000000) config = MACB_BF(CLK, MACB_CLK_DIV32); else config = MACB_BF(CLK, MACB_CLK_DIV64); return config; } /* * Get the DMA bus width field of the network configuration register that we * should program. We find the width from decoding the design configuration * register to find the maximum supported data bus width. */ static u32 macb_dbw(struct macb *bp) { if (!macb_is_gem(bp)) return 0; switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) { case 4: return GEM_BF(DBW, GEM_DBW128); case 2: return GEM_BF(DBW, GEM_DBW64); case 1: default: return GEM_BF(DBW, GEM_DBW32); } } /* * Configure the receive DMA engine to use the correct receive buffer size. * This is a configurable parameter for GEM. */ static void macb_configure_dma(struct macb *bp) { u32 dmacfg; if (macb_is_gem(bp)) { dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); dmacfg |= GEM_BF(RXBS, RX_BUFFER_SIZE / 64); gem_writel(bp, DMACFG, dmacfg); } } static void macb_init_hw(struct macb *bp) { u32 config; macb_reset_hw(bp); __macb_set_hwaddr(bp); config = macb_mdc_clk_div(bp); config |= MACB_BIT(PAE); /* PAuse Enable */ config |= MACB_BIT(DRFCS); /* Discard Rx FCS */ config |= MACB_BIT(BIG); /* Receive oversized frames */ if (bp->dev->flags & IFF_PROMISC) config |= MACB_BIT(CAF); /* Copy All Frames */ if (!(bp->dev->flags & IFF_BROADCAST)) config |= MACB_BIT(NBC); /* No BroadCast */ config |= macb_dbw(bp); macb_writel(bp, NCFGR, config); macb_configure_dma(bp); /* Initialize TX and RX buffers */ macb_writel(bp, RBQP, bp->rx_ring_dma); macb_writel(bp, TBQP, bp->tx_ring_dma); /* Enable TX and RX */ macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); /* Enable interrupts */ macb_writel(bp, IER, (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) | MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE) | MACB_BIT(TXERR) | MACB_BIT(TCOMP) | MACB_BIT(ISR_ROVR) | MACB_BIT(HRESP))); } /* * The hash address register is 64 bits long and takes up two * locations in the memory map. The least significant bits are stored * in EMAC_HSL and the most significant bits in EMAC_HSH. * * The unicast hash enable and the multicast hash enable bits in the * network configuration register enable the reception of hash matched * frames. The destination address is reduced to a 6 bit index into * the 64 bit hash register using the following hash function. The * hash function is an exclusive or of every sixth bit of the * destination address. * * hi[5] = da[5] ^ da[11] ^ da[17] ^ da[23] ^ da[29] ^ da[35] ^ da[41] ^ da[47] * hi[4] = da[4] ^ da[10] ^ da[16] ^ da[22] ^ da[28] ^ da[34] ^ da[40] ^ da[46] * hi[3] = da[3] ^ da[09] ^ da[15] ^ da[21] ^ da[27] ^ da[33] ^ da[39] ^ da[45] * hi[2] = da[2] ^ da[08] ^ da[14] ^ da[20] ^ da[26] ^ da[32] ^ da[38] ^ da[44] * hi[1] = da[1] ^ da[07] ^ da[13] ^ da[19] ^ da[25] ^ da[31] ^ da[37] ^ da[43] * hi[0] = da[0] ^ da[06] ^ da[12] ^ da[18] ^ da[24] ^ da[30] ^ da[36] ^ da[42] * * da[0] represents the least significant bit of the first byte * received, that is, the multicast/unicast indicator, and da[47] * represents the most significant bit of the last byte received. If * the hash index, hi[n], points to a bit that is set in the hash * register then the frame will be matched according to whether the * frame is multicast or unicast. A multicast match will be signalled * if the multicast hash enable bit is set, da[0] is 1 and the hash * index points to a bit set in the hash register. A unicast match * will be signalled if the unicast hash enable bit is set, da[0] is 0 * and the hash index points to a bit set in the hash register. To * receive all multicast frames, the hash register should be set with * all ones and the multicast hash enable bit should be set in the * network configuration register. */ static inline int hash_bit_value(int bitnr, __u8 *addr) { if (addr[bitnr / 8] & (1 << (bitnr % 8))) return 1; return 0; } /* * Return the hash index value for the specified address. */ static int hash_get_index(__u8 *addr) { int i, j, bitval; int hash_index = 0; for (j = 0; j < 6; j++) { for (i = 0, bitval = 0; i < 8; i++) bitval ^= hash_bit_value(i*6 + j, addr); hash_index |= (bitval << j); } return hash_index; } /* * Add multicast addresses to the internal multicast-hash table. */ static void macb_sethashtable(struct net_device *dev) { struct netdev_hw_addr *ha; unsigned long mc_filter[2]; unsigned int bitnr; struct macb *bp = netdev_priv(dev); mc_filter[0] = mc_filter[1] = 0; netdev_for_each_mc_addr(ha, dev) { bitnr = hash_get_index(ha->addr); mc_filter[bitnr >> 5] |= 1 << (bitnr & 31); } macb_or_gem_writel(bp, HRB, mc_filter[0]); macb_or_gem_writel(bp, HRT, mc_filter[1]); } /* * Enable/Disable promiscuous and multicast modes. */ static void macb_set_rx_mode(struct net_device *dev) { unsigned long cfg; struct macb *bp = netdev_priv(dev); cfg = macb_readl(bp, NCFGR); if (dev->flags & IFF_PROMISC) /* Enable promiscuous mode */ cfg |= MACB_BIT(CAF); else if (dev->flags & (~IFF_PROMISC)) /* Disable promiscuous mode */ cfg &= ~MACB_BIT(CAF); if (dev->flags & IFF_ALLMULTI) { /* Enable all multicast mode */ macb_or_gem_writel(bp, HRB, -1); macb_or_gem_writel(bp, HRT, -1); cfg |= MACB_BIT(NCFGR_MTI); } else if (!netdev_mc_empty(dev)) { /* Enable specific multicasts */ macb_sethashtable(dev); cfg |= MACB_BIT(NCFGR_MTI); } else if (dev->flags & (~IFF_ALLMULTI)) { /* Disable all multicast mode */ macb_or_gem_writel(bp, HRB, 0); macb_or_gem_writel(bp, HRT, 0); cfg &= ~MACB_BIT(NCFGR_MTI); } macb_writel(bp, NCFGR, cfg); } static int macb_open(struct net_device *dev) { struct macb *bp = netdev_priv(dev); int err; netdev_dbg(bp->dev, "open\n"); /* if the phy is not yet register, retry later*/ if (!bp->phy_dev) return -EAGAIN; if (!is_valid_ether_addr(dev->dev_addr)) return -EADDRNOTAVAIL; err = macb_alloc_consistent(bp); if (err) { netdev_err(dev, "Unable to allocate DMA memory (error %d)\n", err); return err; } napi_enable(&bp->napi); macb_init_rings(bp); macb_init_hw(bp); /* schedule a link state check */ phy_start(bp->phy_dev); netif_start_queue(dev); return 0; } static int macb_close(struct net_device *dev) { struct macb *bp = netdev_priv(dev); unsigned long flags; netif_stop_queue(dev); napi_disable(&bp->napi); if (bp->phy_dev) phy_stop(bp->phy_dev); spin_lock_irqsave(&bp->lock, flags); macb_reset_hw(bp); netif_carrier_off(dev); spin_unlock_irqrestore(&bp->lock, flags); macb_free_consistent(bp); return 0; } static void gem_update_stats(struct macb *bp) { u32 __iomem *reg = bp->regs + GEM_OTX; u32 *p = &bp->hw_stats.gem.tx_octets_31_0; u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1; for (; p < end; p++, reg++) *p += __raw_readl(reg); } static struct net_device_stats *gem_get_stats(struct macb *bp) { struct gem_stats *hwstat = &bp->hw_stats.gem; struct net_device_stats *nstat = &bp->stats; gem_update_stats(bp); nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors + hwstat->rx_alignment_errors + hwstat->rx_resource_errors + hwstat->rx_overruns + hwstat->rx_oversize_frames + hwstat->rx_jabbers + hwstat->rx_undersized_frames + hwstat->rx_length_field_frame_errors); nstat->tx_errors = (hwstat->tx_late_collisions + hwstat->tx_excessive_collisions + hwstat->tx_underrun + hwstat->tx_carrier_sense_errors); nstat->multicast = hwstat->rx_multicast_frames; nstat->collisions = (hwstat->tx_single_collision_frames + hwstat->tx_multiple_collision_frames + hwstat->tx_excessive_collisions); nstat->rx_length_errors = (hwstat->rx_oversize_frames + hwstat->rx_jabbers + hwstat->rx_undersized_frames + hwstat->rx_length_field_frame_errors); nstat->rx_over_errors = hwstat->rx_resource_errors; nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors; nstat->rx_frame_errors = hwstat->rx_alignment_errors; nstat->rx_fifo_errors = hwstat->rx_overruns; nstat->tx_aborted_errors = hwstat->tx_excessive_collisions; nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors; nstat->tx_fifo_errors = hwstat->tx_underrun; return nstat; } static struct net_device_stats *macb_get_stats(struct net_device *dev) { struct macb *bp = netdev_priv(dev); struct net_device_stats *nstat = &bp->stats; struct macb_stats *hwstat = &bp->hw_stats.macb; if (macb_is_gem(bp)) return gem_get_stats(bp); /* read stats from hardware */ macb_update_stats(bp); /* Convert HW stats into netdevice stats */ nstat->rx_errors = (hwstat->rx_fcs_errors + hwstat->rx_align_errors + hwstat->rx_resource_errors + hwstat->rx_overruns + hwstat->rx_oversize_pkts + hwstat->rx_jabbers + hwstat->rx_undersize_pkts + hwstat->sqe_test_errors + hwstat->rx_length_mismatch); nstat->tx_errors = (hwstat->tx_late_cols + hwstat->tx_excessive_cols + hwstat->tx_underruns + hwstat->tx_carrier_errors); nstat->collisions = (hwstat->tx_single_cols + hwstat->tx_multiple_cols + hwstat->tx_excessive_cols); nstat->rx_length_errors = (hwstat->rx_oversize_pkts + hwstat->rx_jabbers + hwstat->rx_undersize_pkts + hwstat->rx_length_mismatch); nstat->rx_over_errors = hwstat->rx_resource_errors + hwstat->rx_overruns; nstat->rx_crc_errors = hwstat->rx_fcs_errors; nstat->rx_frame_errors = hwstat->rx_align_errors; nstat->rx_fifo_errors = hwstat->rx_overruns; /* XXX: What does "missed" mean? */ nstat->tx_aborted_errors = hwstat->tx_excessive_cols; nstat->tx_carrier_errors = hwstat->tx_carrier_errors; nstat->tx_fifo_errors = hwstat->tx_underruns; /* Don't know about heartbeat or window errors... */ return nstat; } static int macb_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_gset(phydev, cmd); } static int macb_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!phydev) return -ENODEV; return phy_ethtool_sset(phydev, cmd); } static void macb_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct macb *bp = netdev_priv(dev); strcpy(info->driver, bp->pdev->dev.driver->name); strcpy(info->version, "$Revision: 1.14 $"); strcpy(info->bus_info, dev_name(&bp->pdev->dev)); } static const struct ethtool_ops macb_ethtool_ops = { .get_settings = macb_get_settings, .set_settings = macb_set_settings, .get_drvinfo = macb_get_drvinfo, .get_link = ethtool_op_get_link, }; static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct macb *bp = netdev_priv(dev); struct phy_device *phydev = bp->phy_dev; if (!netif_running(dev)) return -EINVAL; if (!phydev) return -ENODEV; return phy_mii_ioctl(phydev, rq, cmd); } static const struct net_device_ops macb_netdev_ops = { .ndo_open = macb_open, .ndo_stop = macb_close, .ndo_start_xmit = macb_start_xmit, .ndo_set_rx_mode = macb_set_rx_mode, .ndo_get_stats = macb_get_stats, .ndo_do_ioctl = macb_ioctl, .ndo_validate_addr = eth_validate_addr, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = macb_poll_controller, #endif }; #if defined(CONFIG_OF) static const struct of_device_id macb_dt_ids[] = { { .compatible = "cdns,at32ap7000-macb" }, { .compatible = "cdns,at91sam9260-macb" }, { .compatible = "cdns,macb" }, { .compatible = "cdns,pc302-gem" }, { .compatible = "cdns,gem" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, macb_dt_ids); static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; if (np) return of_get_phy_mode(np); return -ENODEV; } static int __devinit macb_get_hwaddr_dt(struct macb *bp) { struct device_node *np = bp->pdev->dev.of_node; if (np) { const char *mac = of_get_mac_address(np); if (mac) { memcpy(bp->dev->dev_addr, mac, ETH_ALEN); return 0; } } return -ENODEV; } #else static int __devinit macb_get_phy_mode_dt(struct platform_device *pdev) { return -ENODEV; } static int __devinit macb_get_hwaddr_dt(struct macb *bp) { return -ENODEV; } #endif static int __init macb_probe(struct platform_device *pdev) { struct macb_platform_data *pdata; struct resource *regs; struct net_device *dev; struct macb *bp; struct phy_device *phydev; u32 config; int err = -ENXIO; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "no mmio resource defined\n"); goto err_out; } err = -ENOMEM; dev = alloc_etherdev(sizeof(*bp)); if (!dev) goto err_out; SET_NETDEV_DEV(dev, &pdev->dev); /* TODO: Actually, we have some interesting features... */ dev->features |= 0; bp = netdev_priv(dev); bp->pdev = pdev; bp->dev = dev; spin_lock_init(&bp->lock); bp->pclk = clk_get(&pdev->dev, "pclk"); if (IS_ERR(bp->pclk)) { dev_err(&pdev->dev, "failed to get macb_clk\n"); goto err_out_free_dev; } clk_enable(bp->pclk); bp->hclk = clk_get(&pdev->dev, "hclk"); if (IS_ERR(bp->hclk)) { dev_err(&pdev->dev, "failed to get hclk\n"); goto err_out_put_pclk; } clk_enable(bp->hclk); bp->regs = ioremap(regs->start, resource_size(regs)); if (!bp->regs) { dev_err(&pdev->dev, "failed to map registers, aborting.\n"); err = -ENOMEM; goto err_out_disable_clocks; } dev->irq = platform_get_irq(pdev, 0); err = request_irq(dev->irq, macb_interrupt, 0, dev->name, dev); if (err) { dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n", dev->irq, err); goto err_out_iounmap; } dev->netdev_ops = &macb_netdev_ops; netif_napi_add(dev, &bp->napi, macb_poll, 64); dev->ethtool_ops = &macb_ethtool_ops; dev->base_addr = regs->start; /* Set MII management clock divider */ config = macb_mdc_clk_div(bp); config |= macb_dbw(bp); macb_writel(bp, NCFGR, config); err = macb_get_hwaddr_dt(bp); if (err < 0) macb_get_hwaddr(bp); err = macb_get_phy_mode_dt(pdev); if (err < 0) { pdata = pdev->dev.platform_data; if (pdata && pdata->is_rmii) bp->phy_interface = PHY_INTERFACE_MODE_RMII; else bp->phy_interface = PHY_INTERFACE_MODE_MII; } else { bp->phy_interface = err; } if (bp->phy_interface == PHY_INTERFACE_MODE_RMII) #if defined(CONFIG_ARCH_AT91) macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) | MACB_BIT(CLKEN))); #else macb_or_gem_writel(bp, USRIO, 0); #endif else #if defined(CONFIG_ARCH_AT91) macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN)); #else macb_or_gem_writel(bp, USRIO, MACB_BIT(MII)); #endif bp->tx_pending = DEF_TX_RING_PENDING; err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "Cannot register net device, aborting.\n"); goto err_out_free_irq; } if (macb_mii_init(bp) != 0) { goto err_out_unregister_netdev; } platform_set_drvdata(pdev, dev); netdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n", macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr, dev->irq, dev->dev_addr); phydev = bp->phy_dev; netdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phydev->drv->name, dev_name(&phydev->dev), phydev->irq); return 0; err_out_unregister_netdev: unregister_netdev(dev); err_out_free_irq: free_irq(dev->irq, dev); err_out_iounmap: iounmap(bp->regs); err_out_disable_clocks: clk_disable(bp->hclk); clk_put(bp->hclk); clk_disable(bp->pclk); err_out_put_pclk: clk_put(bp->pclk); err_out_free_dev: free_netdev(dev); err_out: platform_set_drvdata(pdev, NULL); return err; } static int __exit macb_remove(struct platform_device *pdev) { struct net_device *dev; struct macb *bp; dev = platform_get_drvdata(pdev); if (dev) { bp = netdev_priv(dev); if (bp->phy_dev) phy_disconnect(bp->phy_dev); mdiobus_unregister(bp->mii_bus); kfree(bp->mii_bus->irq); mdiobus_free(bp->mii_bus); unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(bp->regs); clk_disable(bp->hclk); clk_put(bp->hclk); clk_disable(bp->pclk); clk_put(bp->pclk); free_netdev(dev); platform_set_drvdata(pdev, NULL); } return 0; } #ifdef CONFIG_PM static int macb_suspend(struct platform_device *pdev, pm_message_t state) { struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); netif_device_detach(netdev); clk_disable(bp->hclk); clk_disable(bp->pclk); return 0; } static int macb_resume(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); struct macb *bp = netdev_priv(netdev); clk_enable(bp->pclk); clk_enable(bp->hclk); netif_device_attach(netdev); return 0; } #else #define macb_suspend NULL #define macb_resume NULL #endif static struct platform_driver macb_driver = { .remove = __exit_p(macb_remove), .suspend = macb_suspend, .resume = macb_resume, .driver = { .name = "macb", .owner = THIS_MODULE, .of_match_table = of_match_ptr(macb_dt_ids), }, }; static int __init macb_init(void) { return platform_driver_probe(&macb_driver, macb_probe); } static void __exit macb_exit(void) { platform_driver_unregister(&macb_driver); } module_init(macb_init); module_exit(macb_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver"); MODULE_AUTHOR("Haavard Skinnemoen (Atmel)"); MODULE_ALIAS("platform:macb");
gpl-2.0
moongtaeng/android_kernel_pantech_ef56s
drivers/infiniband/hw/qib/qib_rc.c
4809
62388
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/io.h> #include "qib.h" /* cut down ridiculously long IB macro names */ #define OP(x) IB_OPCODE_RC_##x static void rc_timeout(unsigned long arg); static u32 restart_sge(struct qib_sge_state *ss, struct qib_swqe *wqe, u32 psn, u32 pmtu) { u32 len; len = ((psn - wqe->psn) & QIB_PSN_MASK) * pmtu; ss->sge = wqe->sg_list[0]; ss->sg_list = wqe->sg_list + 1; ss->num_sge = wqe->wr.num_sge; ss->total_len = wqe->length; qib_skip_sge(ss, len, 0); return wqe->length - len; } static void start_timer(struct qib_qp *qp) { qp->s_flags |= QIB_S_TIMER; qp->s_timer.function = rc_timeout; /* 4.096 usec. * (1 << qp->timeout) */ qp->s_timer.expires = jiffies + qp->timeout_jiffies; add_timer(&qp->s_timer); } /** * qib_make_rc_ack - construct a response packet (ACK, NAK, or RDMA read) * @dev: the device for this QP * @qp: a pointer to the QP * @ohdr: a pointer to the IB header being constructed * @pmtu: the path MTU * * Return 1 if constructed; otherwise, return 0. * Note that we are in the responder's side of the QP context. * Note the QP s_lock must be held. */ static int qib_make_rc_ack(struct qib_ibdev *dev, struct qib_qp *qp, struct qib_other_headers *ohdr, u32 pmtu) { struct qib_ack_entry *e; u32 hwords; u32 len; u32 bth0; u32 bth2; /* Don't send an ACK if we aren't supposed to. */ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) goto bail; /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; switch (qp->s_ack_state) { case OP(RDMA_READ_RESPONSE_LAST): case OP(RDMA_READ_RESPONSE_ONLY): e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->rdma_sge.mr) { atomic_dec(&e->rdma_sge.mr->refcount); e->rdma_sge.mr = NULL; } /* FALLTHROUGH */ case OP(ATOMIC_ACKNOWLEDGE): /* * We can increment the tail pointer now that the last * response has been sent instead of only being * constructed. */ if (++qp->s_tail_ack_queue > QIB_MAX_RDMA_ATOMIC) qp->s_tail_ack_queue = 0; /* FALLTHROUGH */ case OP(SEND_ONLY): case OP(ACKNOWLEDGE): /* Check for no next entry in the queue. */ if (qp->r_head_ack_queue == qp->s_tail_ack_queue) { if (qp->s_flags & QIB_S_ACK_PENDING) goto normal; goto bail; } e = &qp->s_ack_queue[qp->s_tail_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST)) { /* * If a RDMA read response is being resent and * we haven't seen the duplicate request yet, * then stop sending the remaining responses the * responder has seen until the requester resends it. */ len = e->rdma_sge.sge_length; if (len && !e->rdma_sge.mr) { qp->s_tail_ack_queue = qp->r_head_ack_queue; goto bail; } /* Copy SGE state in case we need to resend */ qp->s_rdma_mr = e->rdma_sge.mr; if (qp->s_rdma_mr) atomic_inc(&qp->s_rdma_mr->refcount); qp->s_ack_rdma_sge.sge = e->rdma_sge; qp->s_ack_rdma_sge.num_sge = 1; qp->s_cur_sge = &qp->s_ack_rdma_sge; if (len > pmtu) { len = pmtu; qp->s_ack_state = OP(RDMA_READ_RESPONSE_FIRST); } else { qp->s_ack_state = OP(RDMA_READ_RESPONSE_ONLY); e->sent = 1; } ohdr->u.aeth = qib_compute_aeth(qp); hwords++; qp->s_ack_rdma_psn = e->psn; bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; } else { /* COMPARE_SWAP or FETCH_ADD */ qp->s_cur_sge = NULL; len = 0; qp->s_ack_state = OP(ATOMIC_ACKNOWLEDGE); ohdr->u.at.aeth = qib_compute_aeth(qp); ohdr->u.at.atomic_ack_eth[0] = cpu_to_be32(e->atomic_data >> 32); ohdr->u.at.atomic_ack_eth[1] = cpu_to_be32(e->atomic_data); hwords += sizeof(ohdr->u.at) / sizeof(u32); bth2 = e->psn & QIB_PSN_MASK; e->sent = 1; } bth0 = qp->s_ack_state << 24; break; case OP(RDMA_READ_RESPONSE_FIRST): qp->s_ack_state = OP(RDMA_READ_RESPONSE_MIDDLE); /* FALLTHROUGH */ case OP(RDMA_READ_RESPONSE_MIDDLE): qp->s_cur_sge = &qp->s_ack_rdma_sge; qp->s_rdma_mr = qp->s_ack_rdma_sge.sge.mr; if (qp->s_rdma_mr) atomic_inc(&qp->s_rdma_mr->refcount); len = qp->s_ack_rdma_sge.sge.sge_length; if (len > pmtu) len = pmtu; else { ohdr->u.aeth = qib_compute_aeth(qp); hwords++; qp->s_ack_state = OP(RDMA_READ_RESPONSE_LAST); e = &qp->s_ack_queue[qp->s_tail_ack_queue]; e->sent = 1; } bth0 = qp->s_ack_state << 24; bth2 = qp->s_ack_rdma_psn++ & QIB_PSN_MASK; break; default: normal: /* * Send a regular ACK. * Set the s_ack_state so we wait until after sending * the ACK before setting s_ack_state to ACKNOWLEDGE * (see above). */ qp->s_ack_state = OP(SEND_ONLY); qp->s_flags &= ~QIB_S_ACK_PENDING; qp->s_cur_sge = NULL; if (qp->s_nak_state) ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | (qp->s_nak_state << QIB_AETH_CREDIT_SHIFT)); else ohdr->u.aeth = qib_compute_aeth(qp); hwords++; len = 0; bth0 = OP(ACKNOWLEDGE) << 24; bth2 = qp->s_ack_psn & QIB_PSN_MASK; } qp->s_rdma_ack_cnt++; qp->s_hdrwords = hwords; qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0, bth2); return 1; bail: qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_flags &= ~(QIB_S_RESP_PENDING | QIB_S_ACK_PENDING); return 0; } /** * qib_make_rc_req - construct a request packet (SEND, RDMA r/w, ATOMIC) * @qp: a pointer to the QP * * Return 1 if constructed; otherwise, return 0. */ int qib_make_rc_req(struct qib_qp *qp) { struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_other_headers *ohdr; struct qib_sge_state *ss; struct qib_swqe *wqe; u32 hwords; u32 len; u32 bth0; u32 bth2; u32 pmtu = qp->pmtu; char newreq; unsigned long flags; int ret = 0; int delta; ohdr = &qp->s_hdr.u.oth; if (qp->remote_ah_attr.ah_flags & IB_AH_GRH) ohdr = &qp->s_hdr.u.l.oth; /* * The lock is needed to synchronize between the sending tasklet, * the receive interrupt handler, and timeout resends. */ spin_lock_irqsave(&qp->s_lock, flags); /* Sending responses has higher priority over sending requests. */ if ((qp->s_flags & QIB_S_RESP_PENDING) && qib_make_rc_ack(dev, qp, ohdr, pmtu)) goto done; if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_SEND_OK)) { if (!(ib_qib_state_ops[qp->state] & QIB_FLUSH_SEND)) goto bail; /* We are in the error state, flush the work request. */ if (qp->s_last == qp->s_head) goto bail; /* If DMAs are in progress, we can't flush immediately. */ if (atomic_read(&qp->s_dma_busy)) { qp->s_flags |= QIB_S_WAIT_DMA; goto bail; } wqe = get_swqe_ptr(qp, qp->s_last); qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ? IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR); /* will get called again */ goto done; } if (qp->s_flags & (QIB_S_WAIT_RNR | QIB_S_WAIT_ACK)) goto bail; if (qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) { if (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) { qp->s_flags |= QIB_S_WAIT_PSN; goto bail; } qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; } /* header size in 32-bit words LRH+BTH = (8+12)/4. */ hwords = 5; bth0 = 0; /* Send a request. */ wqe = get_swqe_ptr(qp, qp->s_cur); switch (qp->s_state) { default: if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_NEXT_SEND_OK)) goto bail; /* * Resend an old request or start a new one. * * We keep track of the current SWQE so that * we don't reset the "furthest progress" state * if we need to back up. */ newreq = 0; if (qp->s_cur == qp->s_tail) { /* Check if send work queue is empty. */ if (qp->s_tail == qp->s_head) goto bail; /* * If a fence is requested, wait for previous * RDMA read and atomic operations to finish. */ if ((wqe->wr.send_flags & IB_SEND_FENCE) && qp->s_num_rd_atomic) { qp->s_flags |= QIB_S_WAIT_FENCE; goto bail; } wqe->psn = qp->s_next_psn; newreq = 1; } /* * Note that we have to be careful not to modify the * original work request since we may need to resend * it. */ len = wqe->length; ss = &qp->s_sge; bth2 = qp->s_psn & QIB_PSN_MASK; switch (wqe->wr.opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: /* If no credit, return. */ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; goto bail; } wqe->lpsn = wqe->psn; if (len > pmtu) { wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(SEND_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) qp->s_state = OP(SEND_ONLY); else { qp->s_state = OP(SEND_ONLY_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_WRITE: if (newreq && !(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* FALLTHROUGH */ case IB_WR_RDMA_WRITE_WITH_IMM: /* If no credit, return. */ if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT) && qib_cmp24(wqe->ssn, qp->s_lsn + 1) > 0) { qp->s_flags |= QIB_S_WAIT_SSN_CREDIT; goto bail; } ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->wr.wr.rdma.remote_addr); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->wr.wr.rdma.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); hwords += sizeof(struct ib_reth) / sizeof(u32); wqe->lpsn = wqe->psn; if (len > pmtu) { wqe->lpsn += (len - 1) / pmtu; qp->s_state = OP(RDMA_WRITE_FIRST); len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qp->s_state = OP(RDMA_WRITE_ONLY); else { qp->s_state = OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE); /* Immediate data comes after RETH */ ohdr->u.rc.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_RDMA_READ: /* * Don't allow more operations to be started * than the QP limits allow. */ if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= QIB_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qp->s_lsn++; /* * Adjust s_next_psn to count the * expected number of responses. */ if (len > pmtu) qp->s_next_psn += (len - 1) / pmtu; wqe->lpsn = qp->s_next_psn++; } ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->wr.wr.rdma.remote_addr); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->wr.wr.rdma.rkey); ohdr->u.rc.reth.length = cpu_to_be32(len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); ss = NULL; len = 0; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; case IB_WR_ATOMIC_CMP_AND_SWP: case IB_WR_ATOMIC_FETCH_AND_ADD: /* * Don't allow more operations to be started * than the QP limits allow. */ if (newreq) { if (qp->s_num_rd_atomic >= qp->s_max_rd_atomic) { qp->s_flags |= QIB_S_WAIT_RDMAR; goto bail; } qp->s_num_rd_atomic++; if (!(qp->s_flags & QIB_S_UNLIMITED_CREDIT)) qp->s_lsn++; wqe->lpsn = wqe->psn; } if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP) { qp->s_state = OP(COMPARE_SWAP); ohdr->u.atomic_eth.swap_data = cpu_to_be64( wqe->wr.wr.atomic.swap); ohdr->u.atomic_eth.compare_data = cpu_to_be64( wqe->wr.wr.atomic.compare_add); } else { qp->s_state = OP(FETCH_ADD); ohdr->u.atomic_eth.swap_data = cpu_to_be64( wqe->wr.wr.atomic.compare_add); ohdr->u.atomic_eth.compare_data = 0; } ohdr->u.atomic_eth.vaddr[0] = cpu_to_be32( wqe->wr.wr.atomic.remote_addr >> 32); ohdr->u.atomic_eth.vaddr[1] = cpu_to_be32( wqe->wr.wr.atomic.remote_addr); ohdr->u.atomic_eth.rkey = cpu_to_be32( wqe->wr.wr.atomic.rkey); hwords += sizeof(struct ib_atomic_eth) / sizeof(u32); ss = NULL; len = 0; bth2 |= IB_BTH_REQ_ACK; if (++qp->s_cur == qp->s_size) qp->s_cur = 0; break; default: goto bail; } qp->s_sge.sge = wqe->sg_list[0]; qp->s_sge.sg_list = wqe->sg_list + 1; qp->s_sge.num_sge = wqe->wr.num_sge; qp->s_sge.total_len = wqe->length; qp->s_len = wqe->length; if (newreq) { qp->s_tail++; if (qp->s_tail >= qp->s_size) qp->s_tail = 0; } if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_psn = wqe->lpsn + 1; else { qp->s_psn++; if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qp->s_next_psn = qp->s_psn; } break; case OP(RDMA_READ_RESPONSE_FIRST): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_FIRST is used by the ACK processing * thread to indicate a SEND needs to be restarted from an * earlier PSN without interferring with the sending thread. * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); /* FALLTHROUGH */ case OP(SEND_FIRST): qp->s_state = OP(SEND_MIDDLE); /* FALLTHROUGH */ case OP(SEND_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { len = pmtu; break; } if (wqe->wr.opcode == IB_WR_SEND) qp->s_state = OP(SEND_LAST); else { qp->s_state = OP(SEND_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; } if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; bth2 |= IB_BTH_REQ_ACK; qp->s_cur++; if (qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_READ_RESPONSE_LAST): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_LAST is used by the ACK processing * thread to indicate a RDMA write needs to be restarted from * an earlier PSN without interferring with the sending thread. * See qib_restart_rc(). */ qp->s_len = restart_sge(&qp->s_sge, wqe, qp->s_psn, pmtu); /* FALLTHROUGH */ case OP(RDMA_WRITE_FIRST): qp->s_state = OP(RDMA_WRITE_MIDDLE); /* FALLTHROUGH */ case OP(RDMA_WRITE_MIDDLE): bth2 = qp->s_psn++ & QIB_PSN_MASK; if (qib_cmp24(qp->s_psn, qp->s_next_psn) > 0) qp->s_next_psn = qp->s_psn; ss = &qp->s_sge; len = qp->s_len; if (len > pmtu) { len = pmtu; break; } if (wqe->wr.opcode == IB_WR_RDMA_WRITE) qp->s_state = OP(RDMA_WRITE_LAST); else { qp->s_state = OP(RDMA_WRITE_LAST_WITH_IMMEDIATE); /* Immediate data comes after the BTH */ ohdr->u.imm_data = wqe->wr.ex.imm_data; hwords += 1; if (wqe->wr.send_flags & IB_SEND_SOLICITED) bth0 |= IB_BTH_SOLICITED; } bth2 |= IB_BTH_REQ_ACK; qp->s_cur++; if (qp->s_cur >= qp->s_size) qp->s_cur = 0; break; case OP(RDMA_READ_RESPONSE_MIDDLE): /* * qp->s_state is normally set to the opcode of the * last packet constructed for new requests and therefore * is never set to RDMA read response. * RDMA_READ_RESPONSE_MIDDLE is used by the ACK processing * thread to indicate a RDMA read needs to be restarted from * an earlier PSN without interferring with the sending thread. * See qib_restart_rc(). */ len = ((qp->s_psn - wqe->psn) & QIB_PSN_MASK) * pmtu; ohdr->u.rc.reth.vaddr = cpu_to_be64(wqe->wr.wr.rdma.remote_addr + len); ohdr->u.rc.reth.rkey = cpu_to_be32(wqe->wr.wr.rdma.rkey); ohdr->u.rc.reth.length = cpu_to_be32(wqe->length - len); qp->s_state = OP(RDMA_READ_REQUEST); hwords += sizeof(ohdr->u.rc.reth) / sizeof(u32); bth2 = (qp->s_psn & QIB_PSN_MASK) | IB_BTH_REQ_ACK; qp->s_psn = wqe->lpsn + 1; ss = NULL; len = 0; qp->s_cur++; if (qp->s_cur == qp->s_size) qp->s_cur = 0; break; } qp->s_sending_hpsn = bth2; delta = (((int) bth2 - (int) wqe->psn) << 8) >> 8; if (delta && delta % QIB_PSN_CREDIT == 0) bth2 |= IB_BTH_REQ_ACK; if (qp->s_flags & QIB_S_SEND_ONE) { qp->s_flags &= ~QIB_S_SEND_ONE; qp->s_flags |= QIB_S_WAIT_ACK; bth2 |= IB_BTH_REQ_ACK; } qp->s_len -= len; qp->s_hdrwords = hwords; qp->s_cur_sge = ss; qp->s_cur_size = len; qib_make_ruc_header(qp, ohdr, bth0 | (qp->s_state << 24), bth2); done: ret = 1; goto unlock; bail: qp->s_flags &= ~QIB_S_BUSY; unlock: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * qib_send_rc_ack - Construct an ACK packet and send it * @qp: a pointer to the QP * * This is called from qib_rc_rcv() and qib_kreceive(). * Note that RDMA reads and atomics are handled in the * send side QP state and tasklet. */ void qib_send_rc_ack(struct qib_qp *qp) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u64 pbc; u16 lrh0; u32 bth0; u32 hwords; u32 pbufn; u32 __iomem *piobuf; struct qib_ib_header hdr; struct qib_other_headers *ohdr; u32 control; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) goto unlock; /* Don't send ACK or NAK if a RDMA read or atomic is pending. */ if ((qp->s_flags & QIB_S_RESP_PENDING) || qp->s_rdma_ack_cnt) goto queue_ack; /* Construct the header with s_lock held so APM doesn't change it. */ ohdr = &hdr.u.oth; lrh0 = QIB_LRH_BTH; /* header size in 32-bit words LRH+BTH+AETH = (8+12+4)/4. */ hwords = 6; if (unlikely(qp->remote_ah_attr.ah_flags & IB_AH_GRH)) { hwords += qib_make_grh(ibp, &hdr.u.l.grh, &qp->remote_ah_attr.grh, hwords, 0); ohdr = &hdr.u.l.oth; lrh0 = QIB_LRH_GRH; } /* read pkey_index w/o lock (its atomic) */ bth0 = qib_get_pkey(ibp, qp->s_pkey_index) | (OP(ACKNOWLEDGE) << 24); if (qp->s_mig_state == IB_MIG_MIGRATED) bth0 |= IB_BTH_MIG_REQ; if (qp->r_nak_state) ohdr->u.aeth = cpu_to_be32((qp->r_msn & QIB_MSN_MASK) | (qp->r_nak_state << QIB_AETH_CREDIT_SHIFT)); else ohdr->u.aeth = qib_compute_aeth(qp); lrh0 |= ibp->sl_to_vl[qp->remote_ah_attr.sl] << 12 | qp->remote_ah_attr.sl << 4; hdr.lrh[0] = cpu_to_be16(lrh0); hdr.lrh[1] = cpu_to_be16(qp->remote_ah_attr.dlid); hdr.lrh[2] = cpu_to_be16(hwords + SIZE_OF_CRC); hdr.lrh[3] = cpu_to_be16(ppd->lid | qp->remote_ah_attr.src_path_bits); ohdr->bth[0] = cpu_to_be32(bth0); ohdr->bth[1] = cpu_to_be32(qp->remote_qpn); ohdr->bth[2] = cpu_to_be32(qp->r_ack_psn & QIB_PSN_MASK); spin_unlock_irqrestore(&qp->s_lock, flags); /* Don't try to send ACKs if the link isn't ACTIVE */ if (!(ppd->lflags & QIBL_LINKACTIVE)) goto done; control = dd->f_setpbc_control(ppd, hwords + SIZE_OF_CRC, qp->s_srate, lrh0 >> 12); /* length is + 1 for the control dword */ pbc = ((u64) control << 32) | (hwords + 1); piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); if (!piobuf) { /* * We are out of PIO buffers at the moment. * Pass responsibility for sending the ACK to the * send tasklet so that when a PIO buffer becomes * available, the ACK is sent ahead of other outgoing * packets. */ spin_lock_irqsave(&qp->s_lock, flags); goto queue_ack; } /* * Write the pbc. * We have to flush after the PBC for correctness * on some cpus or WC buffer can be written out of order. */ writeq(pbc, piobuf); if (dd->flags & QIB_PIO_FLUSH_WC) { u32 *hdrp = (u32 *) &hdr; qib_flush_wc(); qib_pio_copy(piobuf + 2, hdrp, hwords - 1); qib_flush_wc(); __raw_writel(hdrp[hwords - 1], piobuf + hwords + 1); } else qib_pio_copy(piobuf + 2, (u32 *) &hdr, hwords); if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; qib_flush_wc(); __raw_writel(0xaebecede, piobuf + spcl_off); } qib_flush_wc(); qib_sendbuf_done(dd, pbufn); ibp->n_unicast_xmit++; goto done; queue_ack: if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { ibp->n_rc_qacks++; qp->s_flags |= QIB_S_ACK_PENDING | QIB_S_RESP_PENDING; qp->s_nak_state = qp->r_nak_state; qp->s_ack_psn = qp->r_ack_psn; /* Schedule the send tasklet. */ qib_schedule_send(qp); } unlock: spin_unlock_irqrestore(&qp->s_lock, flags); done: return; } /** * reset_psn - reset the QP state to send starting from PSN * @qp: the QP * @psn: the packet sequence number to restart at * * This is called from qib_rc_rcv() to process an incoming RC ACK * for the given QP. * Called at interrupt level with the QP s_lock held. */ static void reset_psn(struct qib_qp *qp, u32 psn) { u32 n = qp->s_acked; struct qib_swqe *wqe = get_swqe_ptr(qp, n); u32 opcode; qp->s_cur = n; /* * If we are starting the request from the beginning, * let the normal send code handle initialization. */ if (qib_cmp24(psn, wqe->psn) <= 0) { qp->s_state = OP(SEND_LAST); goto done; } /* Find the work request opcode corresponding to the given PSN. */ opcode = wqe->wr.opcode; for (;;) { int diff; if (++n == qp->s_size) n = 0; if (n == qp->s_tail) break; wqe = get_swqe_ptr(qp, n); diff = qib_cmp24(psn, wqe->psn); if (diff < 0) break; qp->s_cur = n; /* * If we are starting the request from the beginning, * let the normal send code handle initialization. */ if (diff == 0) { qp->s_state = OP(SEND_LAST); goto done; } opcode = wqe->wr.opcode; } /* * Set the state to restart in the middle of a request. * Don't change the s_sge, s_cur_sge, or s_cur_size. * See qib_make_rc_req(). */ switch (opcode) { case IB_WR_SEND: case IB_WR_SEND_WITH_IMM: qp->s_state = OP(RDMA_READ_RESPONSE_FIRST); break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_WRITE_WITH_IMM: qp->s_state = OP(RDMA_READ_RESPONSE_LAST); break; case IB_WR_RDMA_READ: qp->s_state = OP(RDMA_READ_RESPONSE_MIDDLE); break; default: /* * This case shouldn't happen since its only * one PSN per req. */ qp->s_state = OP(SEND_LAST); } done: qp->s_psn = psn; /* * Set QIB_S_WAIT_PSN as qib_rc_complete() may start the timer * asynchronously before the send tasklet can get scheduled. * Doing it in qib_make_rc_req() is too late. */ if ((qib_cmp24(qp->s_psn, qp->s_sending_hpsn) <= 0) && (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) qp->s_flags |= QIB_S_WAIT_PSN; } /* * Back up requester to resend the last un-ACKed request. * The QP r_lock and s_lock should be held and interrupts disabled. */ static void qib_restart_rc(struct qib_qp *qp, u32 psn, int wait) { struct qib_swqe *wqe = get_swqe_ptr(qp, qp->s_acked); struct qib_ibport *ibp; if (qp->s_retry == 0) { if (qp->s_mig_state == IB_MIG_ARMED) { qib_migrate_qp(qp); qp->s_retry = qp->s_retry_cnt; } else if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, IB_WC_RETRY_EXC_ERR); qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); return; } else /* XXX need to handle delayed completion */ return; } else qp->s_retry--; ibp = to_iport(qp->ibqp.device, qp->port_num); if (wqe->wr.opcode == IB_WR_RDMA_READ) ibp->n_rc_resends++; else ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_RDMAR | QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_PSN | QIB_S_WAIT_ACK); if (wait) qp->s_flags |= QIB_S_SEND_ONE; reset_psn(qp, psn); } /* * This is called from s_timer for missing responses. */ static void rc_timeout(unsigned long arg) { struct qib_qp *qp = (struct qib_qp *)arg; struct qib_ibport *ibp; unsigned long flags; spin_lock_irqsave(&qp->r_lock, flags); spin_lock(&qp->s_lock); if (qp->s_flags & QIB_S_TIMER) { ibp = to_iport(qp->ibqp.device, qp->port_num); ibp->n_rc_timeouts++; qp->s_flags &= ~QIB_S_TIMER; del_timer(&qp->s_timer); qib_restart_rc(qp, qp->s_last_psn + 1, 1); qib_schedule_send(qp); } spin_unlock(&qp->s_lock); spin_unlock_irqrestore(&qp->r_lock, flags); } /* * This is called from s_timer for RNR timeouts. */ void qib_rc_rnr_retry(unsigned long arg) { struct qib_qp *qp = (struct qib_qp *)arg; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & QIB_S_WAIT_RNR) { qp->s_flags &= ~QIB_S_WAIT_RNR; del_timer(&qp->s_timer); qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); } /* * Set qp->s_sending_psn to the next PSN after the given one. * This would be psn+1 except when RDMA reads are present. */ static void reset_sending_psn(struct qib_qp *qp, u32 psn) { struct qib_swqe *wqe; u32 n = qp->s_last; /* Find the work request corresponding to the given PSN. */ for (;;) { wqe = get_swqe_ptr(qp, n); if (qib_cmp24(psn, wqe->lpsn) <= 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ) qp->s_sending_psn = wqe->lpsn + 1; else qp->s_sending_psn = psn + 1; break; } if (++n == qp->s_size) n = 0; if (n == qp->s_tail) break; } } /* * This should be called with the QP s_lock held and interrupts disabled. */ void qib_rc_send_complete(struct qib_qp *qp, struct qib_ib_header *hdr) { struct qib_other_headers *ohdr; struct qib_swqe *wqe; struct ib_wc wc; unsigned i; u32 opcode; u32 psn; if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_OR_FLUSH_SEND)) return; /* Find out where the BTH is */ if ((be16_to_cpu(hdr->lrh[0]) & 3) == QIB_LRH_BTH) ohdr = &hdr->u.oth; else ohdr = &hdr->u.l.oth; opcode = be32_to_cpu(ohdr->bth[0]) >> 24; if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && opcode <= OP(ATOMIC_ACKNOWLEDGE)) { WARN_ON(!qp->s_rdma_ack_cnt); qp->s_rdma_ack_cnt--; return; } psn = be32_to_cpu(ohdr->bth[2]); reset_sending_psn(qp, psn); /* * Start timer after a packet requesting an ACK has been sent and * there are still requests that haven't been acked. */ if ((psn & IB_BTH_REQ_ACK) && qp->s_acked != qp->s_tail && !(qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR | QIB_S_WAIT_PSN)) && (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) start_timer(qp); while (qp->s_last != qp->s_acked) { wqe = get_swqe_ptr(qp, qp->s_last); if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) >= 0 && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0) break; for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; atomic_dec(&sge->mr->refcount); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof wc); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); } if (++qp->s_last >= qp->s_size) qp->s_last = 0; } /* * If we were waiting for sends to complete before resending, * and they are now complete, restart sending. */ if (qp->s_flags & QIB_S_WAIT_PSN && qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { qp->s_flags &= ~QIB_S_WAIT_PSN; qp->s_sending_psn = qp->s_psn; qp->s_sending_hpsn = qp->s_psn - 1; qib_schedule_send(qp); } } static inline void update_last_psn(struct qib_qp *qp, u32 psn) { qp->s_last_psn = psn; } /* * Generate a SWQE completion. * This is similar to qib_send_complete but has to check to be sure * that the SGEs are not being referenced if the SWQE is being resent. */ static struct qib_swqe *do_rc_completion(struct qib_qp *qp, struct qib_swqe *wqe, struct qib_ibport *ibp) { struct ib_wc wc; unsigned i; /* * Don't decrement refcount and don't generate a * completion if the SWQE is being resent until the send * is finished. */ if (qib_cmp24(wqe->lpsn, qp->s_sending_psn) < 0 || qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) > 0) { for (i = 0; i < wqe->wr.num_sge; i++) { struct qib_sge *sge = &wqe->sg_list[i]; atomic_dec(&sge->mr->refcount); } /* Post a send completion queue entry if requested. */ if (!(qp->s_flags & QIB_S_SIGNAL_REQ_WR) || (wqe->wr.send_flags & IB_SEND_SIGNALED)) { memset(&wc, 0, sizeof wc); wc.wr_id = wqe->wr.wr_id; wc.status = IB_WC_SUCCESS; wc.opcode = ib_qib_wc_opcode[wqe->wr.opcode]; wc.byte_len = wqe->length; wc.qp = &qp->ibqp; qib_cq_enter(to_icq(qp->ibqp.send_cq), &wc, 0); } if (++qp->s_last >= qp->s_size) qp->s_last = 0; } else ibp->n_rc_delayed_comp++; qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, wqe->lpsn); /* * If we are completing a request which is in the process of * being resent, we can stop resending it since we know the * responder has already seen it. */ if (qp->s_acked == qp->s_cur) { if (++qp->s_cur >= qp->s_size) qp->s_cur = 0; qp->s_acked = qp->s_cur; wqe = get_swqe_ptr(qp, qp->s_cur); if (qp->s_acked != qp->s_tail) { qp->s_state = OP(SEND_LAST); qp->s_psn = wqe->psn; } } else { if (++qp->s_acked >= qp->s_size) qp->s_acked = 0; if (qp->state == IB_QPS_SQD && qp->s_acked == qp->s_cur) qp->s_draining = 0; wqe = get_swqe_ptr(qp, qp->s_acked); } return wqe; } /** * do_rc_ack - process an incoming RC ACK * @qp: the QP the ACK came in on * @psn: the packet sequence number of the ACK * @opcode: the opcode of the request that resulted in the ACK * * This is called from qib_rc_rcv_resp() to process an incoming RC ACK * for the given QP. * Called at interrupt level with the QP s_lock held. * Returns 1 if OK, 0 if current operation should be aborted (NAK). */ static int do_rc_ack(struct qib_qp *qp, u32 aeth, u32 psn, int opcode, u64 val, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp; enum ib_wc_status status; struct qib_swqe *wqe; int ret = 0; u32 ack_psn; int diff; /* Remove QP from retry timer */ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); del_timer(&qp->s_timer); } /* * Note that NAKs implicitly ACK outstanding SEND and RDMA write * requests and implicitly NAK RDMA read and atomic requests issued * before the NAK'ed request. The MSN won't include the NAK'ed * request but will include an ACK'ed request(s). */ ack_psn = psn; if (aeth >> 29) ack_psn--; wqe = get_swqe_ptr(qp, qp->s_acked); ibp = to_iport(qp->ibqp.device, qp->port_num); /* * The MSN might be for a later WQE than the PSN indicates so * only complete WQEs that the PSN finishes. */ while ((diff = qib_cmp24(ack_psn, wqe->lpsn)) >= 0) { /* * RDMA_READ_RESPONSE_ONLY is a special case since * we want to generate completion events for everything * before the RDMA read, copy the data, then generate * the completion for the read. */ if (wqe->wr.opcode == IB_WR_RDMA_READ && opcode == OP(RDMA_READ_RESPONSE_ONLY) && diff == 0) { ret = 1; goto bail; } /* * If this request is a RDMA read or atomic, and the ACK is * for a later operation, this ACK NAKs the RDMA read or * atomic. In other words, only a RDMA_READ_LAST or ONLY * can ACK a RDMA read and likewise for atomic ops. Note * that the NAK case can only happen if relaxed ordering is * used and requests are sent after an RDMA read or atomic * is sent but before the response is received. */ if ((wqe->wr.opcode == IB_WR_RDMA_READ && (opcode != OP(RDMA_READ_RESPONSE_LAST) || diff != 0)) || ((wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) && (opcode != OP(ATOMIC_ACKNOWLEDGE) || diff != 0))) { /* Retry this request. */ if (!(qp->r_flags & QIB_R_RDMAR_SEQ)) { qp->r_flags |= QIB_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } /* * No need to process the ACK/NAK since we are * restarting an earlier request. */ goto bail; } if (wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) { u64 *vaddr = wqe->sg_list[0].vaddr; *vaddr = val; } if (qp->s_num_rd_atomic && (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD)) { qp->s_num_rd_atomic--; /* Restart sending task if fence is complete */ if ((qp->s_flags & QIB_S_WAIT_FENCE) && !qp->s_num_rd_atomic) { qp->s_flags &= ~(QIB_S_WAIT_FENCE | QIB_S_WAIT_ACK); qib_schedule_send(qp); } else if (qp->s_flags & QIB_S_WAIT_RDMAR) { qp->s_flags &= ~(QIB_S_WAIT_RDMAR | QIB_S_WAIT_ACK); qib_schedule_send(qp); } } wqe = do_rc_completion(qp, wqe, ibp); if (qp->s_acked == qp->s_tail) break; } switch (aeth >> 29) { case 0: /* ACK */ ibp->n_rc_acks++; if (qp->s_acked != qp->s_tail) { /* * We are expecting more ACKs so * reset the retransmit timer. */ start_timer(qp); /* * We can stop resending the earlier packets and * continue with the next packet the receiver wants. */ if (qib_cmp24(qp->s_psn, psn) <= 0) reset_psn(qp, psn + 1); } else if (qib_cmp24(qp->s_psn, psn) <= 0) { qp->s_state = OP(SEND_LAST); qp->s_psn = psn + 1; } if (qp->s_flags & QIB_S_WAIT_ACK) { qp->s_flags &= ~QIB_S_WAIT_ACK; qib_schedule_send(qp); } qib_get_credit(qp, aeth); qp->s_rnr_retry = qp->s_rnr_retry_cnt; qp->s_retry = qp->s_retry_cnt; update_last_psn(qp, psn); ret = 1; goto bail; case 1: /* RNR NAK */ ibp->n_rnr_naks++; if (qp->s_acked == qp->s_tail) goto bail; if (qp->s_flags & QIB_S_WAIT_RNR) goto bail; if (qp->s_rnr_retry == 0) { status = IB_WC_RNR_RETRY_EXC_ERR; goto class_b; } if (qp->s_rnr_retry_cnt < 7) qp->s_rnr_retry--; /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); ibp->n_rc_resends += (qp->s_psn - psn) & QIB_PSN_MASK; reset_psn(qp, psn); qp->s_flags &= ~(QIB_S_WAIT_SSN_CREDIT | QIB_S_WAIT_ACK); qp->s_flags |= QIB_S_WAIT_RNR; qp->s_timer.function = qib_rc_rnr_retry; qp->s_timer.expires = jiffies + usecs_to_jiffies( ib_qib_rnr_table[(aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK]); add_timer(&qp->s_timer); goto bail; case 3: /* NAK */ if (qp->s_acked == qp->s_tail) goto bail; /* The last valid PSN is the previous PSN. */ update_last_psn(qp, psn - 1); switch ((aeth >> QIB_AETH_CREDIT_SHIFT) & QIB_AETH_CREDIT_MASK) { case 0: /* PSN sequence error */ ibp->n_seq_naks++; /* * Back up to the responder's expected PSN. * Note that we might get a NAK in the middle of an * RDMA READ response which terminates the RDMA * READ. */ qib_restart_rc(qp, psn, 0); qib_schedule_send(qp); break; case 1: /* Invalid Request */ status = IB_WC_REM_INV_REQ_ERR; ibp->n_other_naks++; goto class_b; case 2: /* Remote Access Error */ status = IB_WC_REM_ACCESS_ERR; ibp->n_other_naks++; goto class_b; case 3: /* Remote Operation Error */ status = IB_WC_REM_OP_ERR; ibp->n_other_naks++; class_b: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); } break; default: /* Ignore other reserved NAK error codes */ goto reserved; } qp->s_retry = qp->s_retry_cnt; qp->s_rnr_retry = qp->s_rnr_retry_cnt; goto bail; default: /* 2: reserved */ reserved: /* Ignore reserved NAK codes. */ goto bail; } bail: return ret; } /* * We have seen an out of sequence RDMA read middle or last packet. * This ACKs SENDs and RDMA writes up to the first RDMA read or atomic SWQE. */ static void rdma_seq_err(struct qib_qp *qp, struct qib_ibport *ibp, u32 psn, struct qib_ctxtdata *rcd) { struct qib_swqe *wqe; /* Remove QP from retry timer */ if (qp->s_flags & (QIB_S_TIMER | QIB_S_WAIT_RNR)) { qp->s_flags &= ~(QIB_S_TIMER | QIB_S_WAIT_RNR); del_timer(&qp->s_timer); } wqe = get_swqe_ptr(qp, qp->s_acked); while (qib_cmp24(psn, wqe->lpsn) > 0) { if (wqe->wr.opcode == IB_WR_RDMA_READ || wqe->wr.opcode == IB_WR_ATOMIC_CMP_AND_SWP || wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) break; wqe = do_rc_completion(qp, wqe, ibp); } ibp->n_rdma_seq++; qp->r_flags |= QIB_R_RDMAR_SEQ; qib_restart_rc(qp, qp->s_last_psn + 1, 0); if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_SEND; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } /** * qib_rc_rcv_resp - process an incoming RC response packet * @ibp: the port this packet came in on * @ohdr: the other headers for this packet * @data: the packet data * @tlen: the packet length * @qp: the QP for this packet * @opcode: the opcode for this packet * @psn: the packet sequence number for this packet * @hdrsize: the header length * @pmtu: the path MTU * * This is called from qib_rc_rcv() to process an incoming RC response * packet for the given QP. * Called at interrupt level. */ static void qib_rc_rcv_resp(struct qib_ibport *ibp, struct qib_other_headers *ohdr, void *data, u32 tlen, struct qib_qp *qp, u32 opcode, u32 psn, u32 hdrsize, u32 pmtu, struct qib_ctxtdata *rcd) { struct qib_swqe *wqe; struct qib_pportdata *ppd = ppd_from_ibp(ibp); enum ib_wc_status status; unsigned long flags; int diff; u32 pad; u32 aeth; u64 val; if (opcode != OP(RDMA_READ_RESPONSE_MIDDLE)) { /* * If ACK'd PSN on SDMA busy list try to make progress to * reclaim SDMA credits. */ if ((qib_cmp24(psn, qp->s_sending_psn) >= 0) && (qib_cmp24(qp->s_sending_psn, qp->s_sending_hpsn) <= 0)) { /* * If send tasklet not running attempt to progress * SDMA queue. */ if (!(qp->s_flags & QIB_S_BUSY)) { /* Acquire SDMA Lock */ spin_lock_irqsave(&ppd->sdma_lock, flags); /* Invoke sdma make progress */ qib_sdma_make_progress(ppd); /* Release SDMA Lock */ spin_unlock_irqrestore(&ppd->sdma_lock, flags); } } } spin_lock_irqsave(&qp->s_lock, flags); if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) goto ack_done; /* Ignore invalid responses. */ if (qib_cmp24(psn, qp->s_next_psn) >= 0) goto ack_done; /* Ignore duplicate responses. */ diff = qib_cmp24(psn, qp->s_last_psn); if (unlikely(diff <= 0)) { /* Update credits for "ghost" ACKs */ if (diff == 0 && opcode == OP(ACKNOWLEDGE)) { aeth = be32_to_cpu(ohdr->u.aeth); if ((aeth >> 29) == 0) qib_get_credit(qp, aeth); } goto ack_done; } /* * Skip everything other than the PSN we expect, if we are waiting * for a reply to a restarted RDMA read or atomic op. */ if (qp->r_flags & QIB_R_RDMAR_SEQ) { if (qib_cmp24(psn, qp->s_last_psn + 1) != 0) goto ack_done; qp->r_flags &= ~QIB_R_RDMAR_SEQ; } if (unlikely(qp->s_acked == qp->s_tail)) goto ack_done; wqe = get_swqe_ptr(qp, qp->s_acked); status = IB_WC_SUCCESS; switch (opcode) { case OP(ACKNOWLEDGE): case OP(ATOMIC_ACKNOWLEDGE): case OP(RDMA_READ_RESPONSE_FIRST): aeth = be32_to_cpu(ohdr->u.aeth); if (opcode == OP(ATOMIC_ACKNOWLEDGE)) { __be32 *p = ohdr->u.at.atomic_ack_eth; val = ((u64) be32_to_cpu(p[0]) << 32) | be32_to_cpu(p[1]); } else val = 0; if (!do_rc_ack(qp, aeth, psn, opcode, val, rcd) || opcode != OP(RDMA_READ_RESPONSE_FIRST)) goto ack_done; hdrsize += 4; wqe = get_swqe_ptr(qp, qp->s_acked); if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* * If this is a response to a resent RDMA read, we * have to be careful to copy the data to the right * location. */ qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_middle; case OP(RDMA_READ_RESPONSE_MIDDLE): /* no AETH, no ACK */ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) goto ack_seq_err; if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; read_middle: if (unlikely(tlen != (hdrsize + pmtu + 4))) goto ack_len_err; if (unlikely(pmtu >= qp->s_rdma_read_len)) goto ack_len_err; /* * We got a response so update the timeout. * 4.096 usec. * (1 << qp->timeout) */ qp->s_flags |= QIB_S_TIMER; mod_timer(&qp->s_timer, jiffies + qp->timeout_jiffies); if (qp->s_flags & QIB_S_WAIT_ACK) { qp->s_flags &= ~QIB_S_WAIT_ACK; qib_schedule_send(qp); } if (opcode == OP(RDMA_READ_RESPONSE_MIDDLE)) qp->s_retry = qp->s_retry_cnt; /* * Update the RDMA receive state but do the copy w/o * holding the locks and blocking interrupts. */ qp->s_rdma_read_len -= pmtu; update_last_psn(qp, psn); spin_unlock_irqrestore(&qp->s_lock, flags); qib_copy_sge(&qp->s_rdma_read_sge, data, pmtu, 0); goto bail; case OP(RDMA_READ_RESPONSE_ONLY): aeth = be32_to_cpu(ohdr->u.aeth); if (!do_rc_ack(qp, aeth, psn, opcode, 0, rcd)) goto ack_done; /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* * Check that the data size is >= 0 && <= pmtu. * Remember to account for the AETH header (4) and * ICRC (4). */ if (unlikely(tlen < (hdrsize + pad + 8))) goto ack_len_err; /* * If this is a response to a resent RDMA read, we * have to be careful to copy the data to the right * location. */ wqe = get_swqe_ptr(qp, qp->s_acked); qp->s_rdma_read_len = restart_sge(&qp->s_rdma_read_sge, wqe, psn, pmtu); goto read_last; case OP(RDMA_READ_RESPONSE_LAST): /* ACKs READ req. */ if (unlikely(qib_cmp24(psn, qp->s_last_psn + 1))) goto ack_seq_err; if (unlikely(wqe->wr.opcode != IB_WR_RDMA_READ)) goto ack_op_err; /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* * Check that the data size is >= 1 && <= pmtu. * Remember to account for the AETH header (4) and * ICRC (4). */ if (unlikely(tlen <= (hdrsize + pad + 8))) goto ack_len_err; read_last: tlen -= hdrsize + pad + 8; if (unlikely(tlen != qp->s_rdma_read_len)) goto ack_len_err; aeth = be32_to_cpu(ohdr->u.aeth); qib_copy_sge(&qp->s_rdma_read_sge, data, tlen, 0); WARN_ON(qp->s_rdma_read_sge.num_sge); (void) do_rc_ack(qp, aeth, psn, OP(RDMA_READ_RESPONSE_LAST), 0, rcd); goto ack_done; } ack_op_err: status = IB_WC_LOC_QP_OP_ERR; goto ack_err; ack_seq_err: rdma_seq_err(qp, ibp, psn, rcd); goto ack_done; ack_len_err: status = IB_WC_LOC_LEN_ERR; ack_err: if (qp->s_last == qp->s_acked) { qib_send_complete(qp, wqe, status); qib_error_qp(qp, IB_WC_WR_FLUSH_ERR); } ack_done: spin_unlock_irqrestore(&qp->s_lock, flags); bail: return; } /** * qib_rc_rcv_error - process an incoming duplicate or error RC packet * @ohdr: the other headers for this packet * @data: the packet data * @qp: the QP for this packet * @opcode: the opcode for this packet * @psn: the packet sequence number for this packet * @diff: the difference between the PSN and the expected PSN * * This is called from qib_rc_rcv() to process an unexpected * incoming RC packet for the given QP. * Called at interrupt level. * Return 1 if no more processing is needed; otherwise return 0 to * schedule a response to be sent. */ static int qib_rc_rcv_error(struct qib_other_headers *ohdr, void *data, struct qib_qp *qp, u32 opcode, u32 psn, int diff, struct qib_ctxtdata *rcd) { struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_ack_entry *e; unsigned long flags; u8 i, prev; int old_req; if (diff > 0) { /* * Packet sequence error. * A NAK will ACK earlier sends and RDMA writes. * Don't queue the NAK if we already sent one. */ if (!qp->r_nak_state) { ibp->n_rc_seqnak++; qp->r_nak_state = IB_NAK_PSN_ERROR; /* Use the expected PSN. */ qp->r_ack_psn = qp->r_psn; /* * Wait to send the sequence NAK until all packets * in the receive queue have been processed. * Otherwise, we end up propagating congestion. */ if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } } goto done; } /* * Handle a duplicate request. Don't re-execute SEND, RDMA * write or atomic op. Don't NAK errors, just silently drop * the duplicate request. Note that r_sge, r_len, and * r_rcv_len may be in use so don't modify them. * * We are supposed to ACK the earliest duplicate PSN but we * can coalesce an outstanding duplicate ACK. We have to * send the earliest so that RDMA reads can be restarted at * the requester's expected PSN. * * First, find where this duplicate PSN falls within the * ACKs previously sent. * old_req is true if there is an older response that is scheduled * to be sent before sending this one. */ e = NULL; old_req = 1; ibp->n_rc_dupreq++; spin_lock_irqsave(&qp->s_lock, flags); for (i = qp->r_head_ack_queue; ; i = prev) { if (i == qp->s_tail_ack_queue) old_req = 0; if (i) prev = i - 1; else prev = QIB_MAX_RDMA_ATOMIC; if (prev == qp->r_head_ack_queue) { e = NULL; break; } e = &qp->s_ack_queue[prev]; if (!e->opcode) { e = NULL; break; } if (qib_cmp24(psn, e->psn) >= 0) { if (prev == qp->s_tail_ack_queue && qib_cmp24(psn, e->lpsn) <= 0) old_req = 0; break; } } switch (opcode) { case OP(RDMA_READ_REQUEST): { struct ib_reth *reth; u32 offset; u32 len; /* * If we didn't find the RDMA read request in the ack queue, * we can ignore this request. */ if (!e || e->opcode != OP(RDMA_READ_REQUEST)) goto unlock_done; /* RETH comes after BTH */ reth = &ohdr->u.rc.reth; /* * Address range must be a subset of the original * request and start on pmtu boundaries. * We reuse the old ack_queue slot since the requester * should not back up and request an earlier PSN for the * same request. */ offset = ((psn - e->psn) & QIB_PSN_MASK) * qp->pmtu; len = be32_to_cpu(reth->length); if (unlikely(offset + len != e->rdma_sge.sge_length)) goto unlock_done; if (e->rdma_sge.mr) { atomic_dec(&e->rdma_sge.mr->refcount); e->rdma_sge.mr = NULL; } if (len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = be64_to_cpu(reth->vaddr); int ok; ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto unlock_done; } else { e->rdma_sge.vaddr = NULL; e->rdma_sge.length = 0; e->rdma_sge.sge_length = 0; } e->psn = psn; if (old_req) goto unlock_done; qp->s_tail_ack_queue = prev; break; } case OP(COMPARE_SWAP): case OP(FETCH_ADD): { /* * If we didn't find the atomic request in the ack queue * or the send tasklet is already backed up to send an * earlier entry, we can ignore this request. */ if (!e || e->opcode != (u8) opcode || old_req) goto unlock_done; qp->s_tail_ack_queue = prev; break; } default: /* * Ignore this operation if it doesn't request an ACK * or an earlier RDMA read or atomic is going to be resent. */ if (!(psn & IB_BTH_REQ_ACK) || old_req) goto unlock_done; /* * Resend the most recent ACK if this request is * after all the previous RDMA reads and atomics. */ if (i == qp->r_head_ack_queue) { spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->r_psn - 1; goto send_ack; } /* * Try to send a simple ACK to work around a Mellanox bug * which doesn't accept a RDMA read response or atomic * response as an ACK for earlier SENDs or RDMA writes. */ if (!(qp->s_flags & QIB_S_RESP_PENDING)) { spin_unlock_irqrestore(&qp->s_lock, flags); qp->r_nak_state = 0; qp->r_ack_psn = qp->s_ack_queue[i].psn - 1; goto send_ack; } /* * Resend the RDMA read or atomic op which * ACKs this duplicate request. */ qp->s_tail_ack_queue = i; break; } qp->s_ack_state = OP(ACKNOWLEDGE); qp->s_flags |= QIB_S_RESP_PENDING; qp->r_nak_state = 0; qib_schedule_send(qp); unlock_done: spin_unlock_irqrestore(&qp->s_lock, flags); done: return 1; send_ack: return 0; } void qib_rc_error(struct qib_qp *qp, enum ib_wc_status err) { unsigned long flags; int lastwqe; spin_lock_irqsave(&qp->s_lock, flags); lastwqe = qib_error_qp(qp, err); spin_unlock_irqrestore(&qp->s_lock, flags); if (lastwqe) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_QP_LAST_WQE_REACHED; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } static inline void qib_update_ack_queue(struct qib_qp *qp, unsigned n) { unsigned next; next = n + 1; if (next > QIB_MAX_RDMA_ATOMIC) next = 0; qp->s_tail_ack_queue = next; qp->s_ack_state = OP(ACKNOWLEDGE); } /** * qib_rc_rcv - process an incoming RC packet * @rcd: the context pointer * @hdr: the header of this packet * @has_grh: true if the header has a GRH * @data: the packet data * @tlen: the packet length * @qp: the QP for this packet * * This is called from qib_qp_rcv() to process an incoming RC packet * for the given QP. * Called at interrupt level. */ void qib_rc_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) { struct qib_ibport *ibp = &rcd->ppd->ibport_data; struct qib_other_headers *ohdr; u32 opcode; u32 hdrsize; u32 psn; u32 pad; struct ib_wc wc; u32 pmtu = qp->pmtu; int diff; struct ib_reth *reth; unsigned long flags; int ret; /* Check for GRH */ if (!has_grh) { ohdr = &hdr->u.oth; hdrsize = 8 + 12; /* LRH + BTH */ } else { ohdr = &hdr->u.l.oth; hdrsize = 8 + 40 + 12; /* LRH + GRH + BTH */ } opcode = be32_to_cpu(ohdr->bth[0]); if (qib_ruc_check_hdr(ibp, hdr, has_grh, qp, opcode)) return; psn = be32_to_cpu(ohdr->bth[2]); opcode >>= 24; /* * Process responses (ACKs) before anything else. Note that the * packet sequence number will be for something in the send work * queue rather than the expected receive packet sequence number. * In other words, this QP is the requester. */ if (opcode >= OP(RDMA_READ_RESPONSE_FIRST) && opcode <= OP(ATOMIC_ACKNOWLEDGE)) { qib_rc_rcv_resp(ibp, ohdr, data, tlen, qp, opcode, psn, hdrsize, pmtu, rcd); return; } /* Compute 24 bits worth of difference. */ diff = qib_cmp24(psn, qp->r_psn); if (unlikely(diff)) { if (qib_rc_rcv_error(ohdr, data, qp, opcode, psn, diff, rcd)) return; goto send_ack; } /* Check for opcode sequence errors. */ switch (qp->r_state) { case OP(SEND_FIRST): case OP(SEND_MIDDLE): if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE)) break; goto nack_inv; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_MIDDLE): if (opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) break; goto nack_inv; default: if (opcode == OP(SEND_MIDDLE) || opcode == OP(SEND_LAST) || opcode == OP(SEND_LAST_WITH_IMMEDIATE) || opcode == OP(RDMA_WRITE_MIDDLE) || opcode == OP(RDMA_WRITE_LAST) || opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE)) goto nack_inv; /* * Note that it is up to the requester to not send a new * RDMA read or atomic operation before receiving an ACK * for the previous operation. */ break; } if (qp->state == IB_QPS_RTR && !(qp->r_flags & QIB_R_COMM_EST)) { qp->r_flags |= QIB_R_COMM_EST; if (qp->ibqp.event_handler) { struct ib_event ev; ev.device = qp->ibqp.device; ev.element.qp = &qp->ibqp; ev.event = IB_EVENT_COMM_EST; qp->ibqp.event_handler(&ev, qp->ibqp.qp_context); } } /* OK, process the packet. */ switch (opcode) { case OP(SEND_FIRST): ret = qib_get_rwqe(qp, 0); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; qp->r_rcv_len = 0; /* FALLTHROUGH */ case OP(SEND_MIDDLE): case OP(RDMA_WRITE_MIDDLE): send_middle: /* Check for invalid length PMTU or posted rwqe len. */ if (unlikely(tlen != (hdrsize + pmtu + 4))) goto nack_inv; qp->r_rcv_len += pmtu; if (unlikely(qp->r_rcv_len > qp->r_len)) goto nack_inv; qib_copy_sge(&qp->r_sge, data, pmtu, 1); break; case OP(RDMA_WRITE_LAST_WITH_IMMEDIATE): /* consume RWQE */ ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; goto send_last_imm; case OP(SEND_ONLY): case OP(SEND_ONLY_WITH_IMMEDIATE): ret = qib_get_rwqe(qp, 0); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; qp->r_rcv_len = 0; if (opcode == OP(SEND_ONLY)) goto no_immediate_data; /* FALLTHROUGH for SEND_ONLY_WITH_IMMEDIATE */ case OP(SEND_LAST_WITH_IMMEDIATE): send_last_imm: wc.ex.imm_data = ohdr->u.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; case OP(SEND_LAST): case OP(RDMA_WRITE_LAST): no_immediate_data: wc.wc_flags = 0; wc.ex.imm_data = 0; send_last: /* Get the number of bytes the message was padded by. */ pad = (be32_to_cpu(ohdr->bth[0]) >> 20) & 3; /* Check for invalid length. */ /* XXX LAST len should be >= 1 */ if (unlikely(tlen < (hdrsize + pad + 4))) goto nack_inv; /* Don't count the CRC. */ tlen -= (hdrsize + pad + 4); wc.byte_len = tlen + qp->r_rcv_len; if (unlikely(wc.byte_len > qp->r_len)) goto nack_inv; qib_copy_sge(&qp->r_sge, data, tlen, 1); while (qp->r_sge.num_sge) { atomic_dec(&qp->r_sge.sge.mr->refcount); if (--qp->r_sge.num_sge) qp->r_sge.sge = *qp->r_sge.sg_list++; } qp->r_msn++; if (!test_and_clear_bit(QIB_R_WRID_VALID, &qp->r_aflags)) break; wc.wr_id = qp->r_wr_id; wc.status = IB_WC_SUCCESS; if (opcode == OP(RDMA_WRITE_LAST_WITH_IMMEDIATE) || opcode == OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE)) wc.opcode = IB_WC_RECV_RDMA_WITH_IMM; else wc.opcode = IB_WC_RECV; wc.qp = &qp->ibqp; wc.src_qp = qp->remote_qpn; wc.slid = qp->remote_ah_attr.dlid; wc.sl = qp->remote_ah_attr.sl; /* zero fields that are N/A */ wc.vendor_err = 0; wc.pkey_index = 0; wc.dlid_path_bits = 0; wc.port_num = 0; /* Signal completion event if the solicited bit is set. */ qib_cq_enter(to_icq(qp->ibqp.recv_cq), &wc, (ohdr->bth[0] & cpu_to_be32(IB_BTH_SOLICITED)) != 0); break; case OP(RDMA_WRITE_FIRST): case OP(RDMA_WRITE_ONLY): case OP(RDMA_WRITE_ONLY_WITH_IMMEDIATE): if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_WRITE))) goto nack_inv; /* consume RWQE */ reth = &ohdr->u.rc.reth; hdrsize += sizeof(*reth); qp->r_len = be32_to_cpu(reth->length); qp->r_rcv_len = 0; qp->r_sge.sg_list = NULL; if (qp->r_len != 0) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = be64_to_cpu(reth->vaddr); int ok; /* Check rkey & NAK */ ok = qib_rkey_ok(qp, &qp->r_sge.sge, qp->r_len, vaddr, rkey, IB_ACCESS_REMOTE_WRITE); if (unlikely(!ok)) goto nack_acc; qp->r_sge.num_sge = 1; } else { qp->r_sge.num_sge = 0; qp->r_sge.sge.mr = NULL; qp->r_sge.sge.vaddr = NULL; qp->r_sge.sge.length = 0; qp->r_sge.sge.sge_length = 0; } if (opcode == OP(RDMA_WRITE_FIRST)) goto send_middle; else if (opcode == OP(RDMA_WRITE_ONLY)) goto no_immediate_data; ret = qib_get_rwqe(qp, 1); if (ret < 0) goto nack_op_err; if (!ret) goto rnr_nak; wc.ex.imm_data = ohdr->u.rc.imm_data; hdrsize += 4; wc.wc_flags = IB_WC_WITH_IMM; goto send_last; case OP(RDMA_READ_REQUEST): { struct qib_ack_entry *e; u32 len; u8 next; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_READ))) goto nack_inv; next = qp->r_head_ack_queue + 1; /* s_ack_queue is size QIB_MAX_RDMA_ATOMIC+1 so use > not >= */ if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; qib_update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { atomic_dec(&e->rdma_sge.mr->refcount); e->rdma_sge.mr = NULL; } reth = &ohdr->u.rc.reth; len = be32_to_cpu(reth->length); if (len) { u32 rkey = be32_to_cpu(reth->rkey); u64 vaddr = be64_to_cpu(reth->vaddr); int ok; /* Check rkey & NAK */ ok = qib_rkey_ok(qp, &e->rdma_sge, len, vaddr, rkey, IB_ACCESS_REMOTE_READ); if (unlikely(!ok)) goto nack_acc_unlck; /* * Update the next expected PSN. We add 1 later * below, so only add the remainder here. */ if (len > pmtu) qp->r_psn += (len - 1) / pmtu; } else { e->rdma_sge.mr = NULL; e->rdma_sge.vaddr = NULL; e->rdma_sge.length = 0; e->rdma_sge.sge_length = 0; } e->opcode = opcode; e->sent = 0; e->psn = psn; e->lpsn = qp->r_psn; /* * We need to increment the MSN here instead of when we * finish sending the result since a duplicate request would * increment it more than once. */ qp->r_msn++; qp->r_psn++; qp->r_state = opcode; qp->r_nak_state = 0; qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; } case OP(COMPARE_SWAP): case OP(FETCH_ADD): { struct ib_atomic_eth *ateth; struct qib_ack_entry *e; u64 vaddr; atomic64_t *maddr; u64 sdata; u32 rkey; u8 next; if (unlikely(!(qp->qp_access_flags & IB_ACCESS_REMOTE_ATOMIC))) goto nack_inv; next = qp->r_head_ack_queue + 1; if (next > QIB_MAX_RDMA_ATOMIC) next = 0; spin_lock_irqsave(&qp->s_lock, flags); if (unlikely(next == qp->s_tail_ack_queue)) { if (!qp->s_ack_queue[next].sent) goto nack_inv_unlck; qib_update_ack_queue(qp, next); } e = &qp->s_ack_queue[qp->r_head_ack_queue]; if (e->opcode == OP(RDMA_READ_REQUEST) && e->rdma_sge.mr) { atomic_dec(&e->rdma_sge.mr->refcount); e->rdma_sge.mr = NULL; } ateth = &ohdr->u.atomic_eth; vaddr = ((u64) be32_to_cpu(ateth->vaddr[0]) << 32) | be32_to_cpu(ateth->vaddr[1]); if (unlikely(vaddr & (sizeof(u64) - 1))) goto nack_inv_unlck; rkey = be32_to_cpu(ateth->rkey); /* Check rkey & NAK */ if (unlikely(!qib_rkey_ok(qp, &qp->r_sge.sge, sizeof(u64), vaddr, rkey, IB_ACCESS_REMOTE_ATOMIC))) goto nack_acc_unlck; /* Perform atomic OP and save result. */ maddr = (atomic64_t *) qp->r_sge.sge.vaddr; sdata = be64_to_cpu(ateth->swap_data); e->atomic_data = (opcode == OP(FETCH_ADD)) ? (u64) atomic64_add_return(sdata, maddr) - sdata : (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr, be64_to_cpu(ateth->compare_data), sdata); atomic_dec(&qp->r_sge.sge.mr->refcount); qp->r_sge.num_sge = 0; e->opcode = opcode; e->sent = 0; e->psn = psn; e->lpsn = psn; qp->r_msn++; qp->r_psn++; qp->r_state = opcode; qp->r_nak_state = 0; qp->r_head_ack_queue = next; /* Schedule the send tasklet. */ qp->s_flags |= QIB_S_RESP_PENDING; qib_schedule_send(qp); goto sunlock; } default: /* NAK unknown opcodes. */ goto nack_inv; } qp->r_psn++; qp->r_state = opcode; qp->r_ack_psn = psn; qp->r_nak_state = 0; /* Send an ACK if requested or required. */ if (psn & (1 << 31)) goto send_ack; return; rnr_nak: qp->r_nak_state = IB_RNR_NAK | qp->r_min_rnr_timer; qp->r_ack_psn = qp->r_psn; /* Queue RNR NAK for later */ if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } return; nack_op_err: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_REMOTE_OPERATIONAL_ERROR; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } return; nack_inv_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_inv: qib_rc_error(qp, IB_WC_LOC_QP_OP_ERR); qp->r_nak_state = IB_NAK_INVALID_REQUEST; qp->r_ack_psn = qp->r_psn; /* Queue NAK for later */ if (list_empty(&qp->rspwait)) { qp->r_flags |= QIB_R_RSP_NAK; atomic_inc(&qp->refcount); list_add_tail(&qp->rspwait, &rcd->qp_wait_list); } return; nack_acc_unlck: spin_unlock_irqrestore(&qp->s_lock, flags); nack_acc: qib_rc_error(qp, IB_WC_LOC_PROT_ERR); qp->r_nak_state = IB_NAK_REMOTE_ACCESS_ERROR; qp->r_ack_psn = qp->r_psn; send_ack: qib_send_rc_ack(qp); return; sunlock: spin_unlock_irqrestore(&qp->s_lock, flags); }
gpl-2.0
touchpro/android_kernel_lge_msm8226
arch/x86/kernel/trampoline.c
4809
1237
#include <linux/io.h> #include <linux/memblock.h> #include <asm/trampoline.h> #include <asm/cacheflush.h> #include <asm/pgtable.h> unsigned char *x86_trampoline_base; void __init setup_trampolines(void) { phys_addr_t mem; size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); /* Has to be in very low memory so we can execute real-mode AP code. */ mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE); if (!mem) panic("Cannot allocate trampoline\n"); x86_trampoline_base = __va(mem); memblock_reserve(mem, size); printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n", x86_trampoline_base, (unsigned long long)mem, size); memcpy(x86_trampoline_base, x86_trampoline_start, size); } /* * setup_trampolines() gets called very early, to guarantee the * availability of low memory. This is before the proper kernel page * tables are set up, so we cannot set page permissions in that * function. Thus, we use an arch_initcall instead. */ static int __init configure_trampolines(void) { size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start); set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT); return 0; } arch_initcall(configure_trampolines);
gpl-2.0
PsychoGame/omnirom_kernel_lge_msm8974
drivers/isdn/gigaset/usb-gigaset.c
4809
25269
/* * USB driver for Gigaset 307x directly or using M105 Data. * * Copyright (c) 2001 by Stefan Eilers * and Hansjoerg Lipp <hjlipp@web.de>. * * This driver was derived from the USB skeleton driver by * Greg Kroah-Hartman <greg@kroah.com> * * ===================================================================== * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * ===================================================================== */ #include "gigaset.h" #include <linux/usb.h> #include <linux/module.h> #include <linux/moduleparam.h> /* Version Information */ #define DRIVER_AUTHOR "Hansjoerg Lipp <hjlipp@web.de>, Stefan Eilers" #define DRIVER_DESC "USB Driver for Gigaset 307x using M105" /* Module parameters */ static int startmode = SM_ISDN; static int cidmode = 1; module_param(startmode, int, S_IRUGO); module_param(cidmode, int, S_IRUGO); MODULE_PARM_DESC(startmode, "start in isdn4linux mode"); MODULE_PARM_DESC(cidmode, "Call-ID mode"); #define GIGASET_MINORS 1 #define GIGASET_MINOR 8 #define GIGASET_MODULENAME "usb_gigaset" #define GIGASET_DEVNAME "ttyGU" /* length limit according to Siemens 3070usb-protokoll.doc ch. 2.1 */ #define IF_WRITEBUF 264 /* Values for the Gigaset M105 Data */ #define USB_M105_VENDOR_ID 0x0681 #define USB_M105_PRODUCT_ID 0x0009 /* table of devices that work with this driver */ static const struct usb_device_id gigaset_table[] = { { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, gigaset_table); /* * Control requests (empty fields: 00) * * RT|RQ|VALUE|INDEX|LEN |DATA * In: * C1 08 01 * Get flags (1 byte). Bits: 0=dtr,1=rts,3-7:? * C1 0F ll ll * Get device information/status (llll: 0x200 and 0x40 seen). * Real size: I only saw MIN(llll,0x64). * Contents: seems to be always the same... * offset 0x00: Length of this structure (0x64) (len: 1,2,3 bytes) * offset 0x3c: String (16 bit chars): "MCCI USB Serial V2.0" * rest: ? * Out: * 41 11 * Initialize/reset device ? * 41 00 xx 00 * ? (xx=00 or 01; 01 on start, 00 on close) * 41 07 vv mm * Set/clear flags vv=value, mm=mask (see RQ 08) * 41 12 xx * Used before the following configuration requests are issued * (with xx=0x0f). I've seen other values<0xf, though. * 41 01 xx xx * Set baud rate. xxxx=ceil(0x384000/rate)=trunc(0x383fff/rate)+1. * 41 03 ps bb * Set byte size and parity. p: 0x20=even,0x10=odd,0x00=no parity * [ 0x30: m, 0x40: s ] * [s: 0: 1 stop bit; 1: 1.5; 2: 2] * bb: bits/byte (seen 7 and 8) * 41 13 -- -- -- -- 10 00 ww 00 00 00 xx 00 00 00 yy 00 00 00 zz 00 00 00 * ?? * Initialization: 01, 40, 00, 00 * Open device: 00 40, 00, 00 * yy and zz seem to be equal, either 0x00 or 0x0a * (ww,xx) pairs seen: (00,00), (00,40), (01,40), (09,80), (19,80) * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). * xx is usually 0x00 but was 0x7e before starting data transfer * in unimodem mode. So, this might be an array of characters that * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. * * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two * flags per packet. */ /* functions called if a device of this driver is connected/disconnected */ static int gigaset_probe(struct usb_interface *interface, const struct usb_device_id *id); static void gigaset_disconnect(struct usb_interface *interface); /* functions called before/after suspend */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message); static int gigaset_resume(struct usb_interface *intf); static int gigaset_pre_reset(struct usb_interface *intf); static struct gigaset_driver *driver; /* usb specific object needed to register this driver with the usb subsystem */ static struct usb_driver gigaset_usb_driver = { .name = GIGASET_MODULENAME, .probe = gigaset_probe, .disconnect = gigaset_disconnect, .id_table = gigaset_table, .suspend = gigaset_suspend, .resume = gigaset_resume, .reset_resume = gigaset_resume, .pre_reset = gigaset_pre_reset, .post_reset = gigaset_resume, }; struct usb_cardstate { struct usb_device *udev; /* usb device pointer */ struct usb_interface *interface; /* interface for this device */ int busy; /* bulk output in progress */ /* Output buffer */ unsigned char *bulk_out_buffer; int bulk_out_size; __u8 bulk_out_endpointAddr; struct urb *bulk_out_urb; /* Input buffer */ unsigned char *rcvbuf; int rcvbuf_size; struct urb *read_urb; __u8 int_in_endpointAddr; char bchars[6]; /* for request 0x19 */ }; static inline unsigned tiocm_to_gigaset(unsigned state) { return ((state & TIOCM_DTR) ? 1 : 0) | ((state & TIOCM_RTS) ? 2 : 0); } static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state) { struct usb_device *udev = cs->hw.usb->udev; unsigned mask, val; int r; mask = tiocm_to_gigaset(old_state ^ new_state); val = tiocm_to_gigaset(new_state); gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, (val & 0xff) | ((mask & 0xff) << 8), 0, NULL, 0, 2000 /* timeout? */); if (r < 0) return r; return 0; } /* * Set M105 configuration value * using undocumented device commands reverse engineered from USB traces * of the Siemens Windows driver */ static int set_value(struct cardstate *cs, u8 req, u16 val) { struct usb_device *udev = cs->hw.usb->udev; int r, r2; gig_dbg(DEBUG_USBREQ, "request %02x (%04x)", (unsigned)req, (unsigned)val); r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x12, 0x41, 0xf /*?*/, 0, NULL, 0, 2000 /*?*/); /* no idea what this does */ if (r < 0) { dev_err(&udev->dev, "error %d on request 0x12\n", -r); return r; } r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), req, 0x41, val, 0, NULL, 0, 2000 /*?*/); if (r < 0) dev_err(&udev->dev, "error %d on request 0x%02x\n", -r, (unsigned)req); r2 = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, 0, 0, cs->hw.usb->bchars, 6, 2000 /*?*/); if (r2 < 0) dev_err(&udev->dev, "error %d on request 0x19\n", -r2); return r < 0 ? r : (r2 < 0 ? r2 : 0); } /* * set the baud rate on the internal serial adapter * using the undocumented parameter setting command */ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag) { u16 val; u32 rate; cflag &= CBAUD; switch (cflag) { case B300: rate = 300; break; case B600: rate = 600; break; case B1200: rate = 1200; break; case B2400: rate = 2400; break; case B4800: rate = 4800; break; case B9600: rate = 9600; break; case B19200: rate = 19200; break; case B38400: rate = 38400; break; case B57600: rate = 57600; break; case B115200: rate = 115200; break; default: rate = 9600; dev_err(cs->dev, "unsupported baudrate request 0x%x," " using default of B9600\n", cflag); } val = 0x383fff / rate + 1; return set_value(cs, 1, val); } /* * set the line format on the internal serial adapter * using the undocumented parameter setting command */ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag) { u16 val = 0; /* set the parity */ if (cflag & PARENB) val |= (cflag & PARODD) ? 0x10 : 0x20; /* set the number of data bits */ switch (cflag & CSIZE) { case CS5: val |= 5 << 8; break; case CS6: val |= 6 << 8; break; case CS7: val |= 7 << 8; break; case CS8: val |= 8 << 8; break; default: dev_err(cs->dev, "CSIZE was not CS5-CS8, using default of 8\n"); val |= 8 << 8; break; } /* set the number of stop bits */ if (cflag & CSTOPB) { if ((cflag & CSIZE) == CS5) val |= 1; /* 1.5 stop bits */ else val |= 2; /* 2 stop bits */ } return set_value(cs, 3, val); } /*============================================================================*/ static int gigaset_init_bchannel(struct bc_state *bcs) { /* nothing to do for M10x */ gigaset_bchannel_up(bcs); return 0; } static int gigaset_close_bchannel(struct bc_state *bcs) { /* nothing to do for M10x */ gigaset_bchannel_down(bcs); return 0; } static int write_modem(struct cardstate *cs); static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb); /* Write tasklet handler: Continue sending current skb, or send command, or * start sending an skb from the send queue. */ static void gigaset_modem_fill(unsigned long data) { struct cardstate *cs = (struct cardstate *) data; struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ struct cmdbuf_t *cb; int again; gig_dbg(DEBUG_OUTPUT, "modem_fill"); if (cs->hw.usb->busy) { gig_dbg(DEBUG_OUTPUT, "modem_fill: busy"); return; } do { again = 0; if (!bcs->tx_skb) { /* no skb is being sent */ cb = cs->cmdbuf; if (cb) { /* commands to send? */ gig_dbg(DEBUG_OUTPUT, "modem_fill: cb"); if (send_cb(cs, cb) < 0) { gig_dbg(DEBUG_OUTPUT, "modem_fill: send_cb failed"); again = 1; /* no callback will be called! */ } } else { /* skbs to send? */ bcs->tx_skb = skb_dequeue(&bcs->squeue); if (bcs->tx_skb) gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); } } if (bcs->tx_skb) { gig_dbg(DEBUG_OUTPUT, "modem_fill: tx_skb"); if (write_modem(cs) < 0) { gig_dbg(DEBUG_OUTPUT, "modem_fill: write_modem failed"); again = 1; /* no callback will be called! */ } } } while (again); } /* * Interrupt Input URB completion routine */ static void gigaset_read_int_callback(struct urb *urb) { struct cardstate *cs = urb->context; struct inbuf_t *inbuf = cs->inbuf; int status = urb->status; int r; unsigned numbytes; unsigned char *src; unsigned long flags; if (!status) { numbytes = urb->actual_length; if (numbytes) { src = cs->hw.usb->rcvbuf; if (unlikely(*src)) dev_warn(cs->dev, "%s: There was no leading 0, but 0x%02x!\n", __func__, (unsigned) *src); ++src; /* skip leading 0x00 */ --numbytes; if (gigaset_fill_inbuf(inbuf, src, numbytes)) { gig_dbg(DEBUG_INTR, "%s-->BH", __func__); gigaset_schedule_event(inbuf->cs); } } else gig_dbg(DEBUG_INTR, "Received zero block length"); } else { /* The urb might have been killed. */ gig_dbg(DEBUG_ANY, "%s - nonzero status received: %d", __func__, status); if (status == -ENOENT || status == -ESHUTDOWN) /* killed or endpoint shutdown: don't resubmit */ return; } /* resubmit URB */ spin_lock_irqsave(&cs->lock, flags); if (!cs->connected) { spin_unlock_irqrestore(&cs->lock, flags); pr_err("%s: disconnected\n", __func__); return; } r = usb_submit_urb(urb, GFP_ATOMIC); spin_unlock_irqrestore(&cs->lock, flags); if (r) dev_err(cs->dev, "error %d resubmitting URB\n", -r); } /* This callback routine is called when data was transmitted to the device. */ static void gigaset_write_bulk_callback(struct urb *urb) { struct cardstate *cs = urb->context; int status = urb->status; unsigned long flags; switch (status) { case 0: /* normal completion */ break; case -ENOENT: /* killed */ gig_dbg(DEBUG_ANY, "%s: killed", __func__); cs->hw.usb->busy = 0; return; default: dev_err(cs->dev, "bulk transfer failed (status %d)\n", -status); /* That's all we can do. Communication problems are handled by timeouts or network protocols. */ } spin_lock_irqsave(&cs->lock, flags); if (!cs->connected) { pr_err("%s: disconnected\n", __func__); } else { cs->hw.usb->busy = 0; tasklet_schedule(&cs->write_tasklet); } spin_unlock_irqrestore(&cs->lock, flags); } static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb) { struct cmdbuf_t *tcb; unsigned long flags; int count; int status = -ENOENT; struct usb_cardstate *ucs = cs->hw.usb; do { if (!cb->len) { tcb = cb; spin_lock_irqsave(&cs->cmdlock, flags); cs->cmdbytes -= cs->curlen; gig_dbg(DEBUG_OUTPUT, "send_cb: sent %u bytes, %u left", cs->curlen, cs->cmdbytes); cs->cmdbuf = cb = cb->next; if (cb) { cb->prev = NULL; cs->curlen = cb->len; } else { cs->lastcmdbuf = NULL; cs->curlen = 0; } spin_unlock_irqrestore(&cs->cmdlock, flags); if (tcb->wake_tasklet) tasklet_schedule(tcb->wake_tasklet); kfree(tcb); } if (cb) { count = min(cb->len, ucs->bulk_out_size); gig_dbg(DEBUG_OUTPUT, "send_cb: send %d bytes", count); usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, ucs->bulk_out_endpointAddr & 0x0f), cb->buf + cb->offset, count, gigaset_write_bulk_callback, cs); cb->offset += count; cb->len -= count; ucs->busy = 1; spin_lock_irqsave(&cs->lock, flags); status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; spin_unlock_irqrestore(&cs->lock, flags); if (status) { ucs->busy = 0; dev_err(cs->dev, "could not submit urb (error %d)\n", -status); cb->len = 0; /* skip urb => remove cb+wakeup in next loop cycle */ } } } while (cb && status); /* next command on error */ return status; } /* Send command to device. */ static int gigaset_write_cmd(struct cardstate *cs, struct cmdbuf_t *cb) { unsigned long flags; gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? DEBUG_TRANSCMD : DEBUG_LOCKCMD, "CMD Transmit", cb->len, cb->buf); spin_lock_irqsave(&cs->cmdlock, flags); cb->prev = cs->lastcmdbuf; if (cs->lastcmdbuf) cs->lastcmdbuf->next = cb; else { cs->cmdbuf = cb; cs->curlen = cb->len; } cs->cmdbytes += cb->len; cs->lastcmdbuf = cb; spin_unlock_irqrestore(&cs->cmdlock, flags); spin_lock_irqsave(&cs->lock, flags); if (cs->connected) tasklet_schedule(&cs->write_tasklet); spin_unlock_irqrestore(&cs->lock, flags); return cb->len; } static int gigaset_write_room(struct cardstate *cs) { unsigned bytes; bytes = cs->cmdbytes; return bytes < IF_WRITEBUF ? IF_WRITEBUF - bytes : 0; } static int gigaset_chars_in_buffer(struct cardstate *cs) { return cs->cmdbytes; } /* * set the break characters on the internal serial adapter * using undocumented device commands reverse engineered from USB traces * of the Siemens Windows driver */ static int gigaset_brkchars(struct cardstate *cs, const unsigned char buf[6]) { struct usb_device *udev = cs->hw.usb->udev; gigaset_dbg_buffer(DEBUG_USBREQ, "brkchars", 6, buf); memcpy(cs->hw.usb->bchars, buf, 6); return usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 0x19, 0x41, 0, 0, &buf, 6, 2000); } static int gigaset_freebcshw(struct bc_state *bcs) { /* unused */ return 1; } /* Initialize the b-channel structure */ static int gigaset_initbcshw(struct bc_state *bcs) { /* unused */ bcs->hw.usb = NULL; return 1; } static void gigaset_reinitbcshw(struct bc_state *bcs) { /* nothing to do for M10x */ } static void gigaset_freecshw(struct cardstate *cs) { tasklet_kill(&cs->write_tasklet); kfree(cs->hw.usb); } static int gigaset_initcshw(struct cardstate *cs) { struct usb_cardstate *ucs; cs->hw.usb = ucs = kmalloc(sizeof(struct usb_cardstate), GFP_KERNEL); if (!ucs) { pr_err("out of memory\n"); return 0; } ucs->bchars[0] = 0; ucs->bchars[1] = 0; ucs->bchars[2] = 0; ucs->bchars[3] = 0; ucs->bchars[4] = 0x11; ucs->bchars[5] = 0x13; ucs->bulk_out_buffer = NULL; ucs->bulk_out_urb = NULL; ucs->read_urb = NULL; tasklet_init(&cs->write_tasklet, gigaset_modem_fill, (unsigned long) cs); return 1; } /* Send data from current skb to the device. */ static int write_modem(struct cardstate *cs) { int ret = 0; int count; struct bc_state *bcs = &cs->bcs[0]; /* only one channel */ struct usb_cardstate *ucs = cs->hw.usb; unsigned long flags; gig_dbg(DEBUG_OUTPUT, "len: %d...", bcs->tx_skb->len); if (!bcs->tx_skb->len) { dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; return -EINVAL; } /* Copy data to bulk out buffer and transmit data */ count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); skb_pull(bcs->tx_skb, count); ucs->busy = 1; gig_dbg(DEBUG_OUTPUT, "write_modem: send %d bytes", count); spin_lock_irqsave(&cs->lock, flags); if (cs->connected) { usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, usb_sndbulkpipe(ucs->udev, ucs->bulk_out_endpointAddr & 0x0f), ucs->bulk_out_buffer, count, gigaset_write_bulk_callback, cs); ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); } else { ret = -ENODEV; } spin_unlock_irqrestore(&cs->lock, flags); if (ret) { dev_err(cs->dev, "could not submit urb (error %d)\n", -ret); ucs->busy = 0; } if (!bcs->tx_skb->len) { /* skb sent completely */ gigaset_skb_sent(bcs, bcs->tx_skb); gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!", (unsigned long) bcs->tx_skb); dev_kfree_skb_any(bcs->tx_skb); bcs->tx_skb = NULL; } return ret; } static int gigaset_probe(struct usb_interface *interface, const struct usb_device_id *id) { int retval; struct usb_device *udev = interface_to_usbdev(interface); struct usb_host_interface *hostif = interface->cur_altsetting; struct cardstate *cs = NULL; struct usb_cardstate *ucs = NULL; struct usb_endpoint_descriptor *endpoint; int buffer_size; gig_dbg(DEBUG_ANY, "%s: Check if device matches ...", __func__); /* See if the device offered us matches what we can accept */ if ((le16_to_cpu(udev->descriptor.idVendor) != USB_M105_VENDOR_ID) || (le16_to_cpu(udev->descriptor.idProduct) != USB_M105_PRODUCT_ID)) { gig_dbg(DEBUG_ANY, "device ID (0x%x, 0x%x) not for me - skip", le16_to_cpu(udev->descriptor.idVendor), le16_to_cpu(udev->descriptor.idProduct)); return -ENODEV; } if (hostif->desc.bInterfaceNumber != 0) { gig_dbg(DEBUG_ANY, "interface %d not for me - skip", hostif->desc.bInterfaceNumber); return -ENODEV; } if (hostif->desc.bAlternateSetting != 0) { dev_notice(&udev->dev, "unsupported altsetting %d - skip", hostif->desc.bAlternateSetting); return -ENODEV; } if (hostif->desc.bInterfaceClass != 255) { dev_notice(&udev->dev, "unsupported interface class %d - skip", hostif->desc.bInterfaceClass); return -ENODEV; } dev_info(&udev->dev, "%s: Device matched ... !\n", __func__); /* allocate memory for our device state and initialize it */ cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME); if (!cs) return -ENODEV; ucs = cs->hw.usb; /* save off device structure ptrs for later use */ usb_get_dev(udev); ucs->udev = udev; ucs->interface = interface; cs->dev = &interface->dev; /* save address of controller structure */ usb_set_intfdata(interface, cs); endpoint = &hostif->endpoint[0].desc; buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->bulk_out_size = buffer_size; ucs->bulk_out_endpointAddr = endpoint->bEndpointAddress; ucs->bulk_out_buffer = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->bulk_out_buffer) { dev_err(cs->dev, "Couldn't allocate bulk_out_buffer\n"); retval = -ENOMEM; goto error; } ucs->bulk_out_urb = usb_alloc_urb(0, GFP_KERNEL); if (!ucs->bulk_out_urb) { dev_err(cs->dev, "Couldn't allocate bulk_out_urb\n"); retval = -ENOMEM; goto error; } endpoint = &hostif->endpoint[1].desc; ucs->busy = 0; ucs->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!ucs->read_urb) { dev_err(cs->dev, "No free urbs available\n"); retval = -ENOMEM; goto error; } buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); ucs->rcvbuf_size = buffer_size; ucs->int_in_endpointAddr = endpoint->bEndpointAddress; ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL); if (!ucs->rcvbuf) { dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); retval = -ENOMEM; goto error; } /* Fill the interrupt urb and send it to the core */ usb_fill_int_urb(ucs->read_urb, udev, usb_rcvintpipe(udev, endpoint->bEndpointAddress & 0x0f), ucs->rcvbuf, buffer_size, gigaset_read_int_callback, cs, endpoint->bInterval); retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); if (retval) { dev_err(cs->dev, "Could not submit URB (error %d)\n", -retval); goto error; } /* tell common part that the device is ready */ if (startmode == SM_LOCKED) cs->mstate = MS_LOCKED; if (!gigaset_start(cs)) { tasklet_kill(&cs->write_tasklet); retval = -ENODEV; goto error; } return 0; error: usb_kill_urb(ucs->read_urb); kfree(ucs->bulk_out_buffer); usb_free_urb(ucs->bulk_out_urb); kfree(ucs->rcvbuf); usb_free_urb(ucs->read_urb); usb_set_intfdata(interface, NULL); ucs->read_urb = ucs->bulk_out_urb = NULL; ucs->rcvbuf = ucs->bulk_out_buffer = NULL; usb_put_dev(ucs->udev); ucs->udev = NULL; ucs->interface = NULL; gigaset_freecs(cs); return retval; } static void gigaset_disconnect(struct usb_interface *interface) { struct cardstate *cs; struct usb_cardstate *ucs; cs = usb_get_intfdata(interface); ucs = cs->hw.usb; dev_info(cs->dev, "disconnecting Gigaset USB adapter\n"); usb_kill_urb(ucs->read_urb); gigaset_stop(cs); usb_set_intfdata(interface, NULL); tasklet_kill(&cs->write_tasklet); usb_kill_urb(ucs->bulk_out_urb); kfree(ucs->bulk_out_buffer); usb_free_urb(ucs->bulk_out_urb); kfree(ucs->rcvbuf); usb_free_urb(ucs->read_urb); ucs->read_urb = ucs->bulk_out_urb = NULL; ucs->rcvbuf = ucs->bulk_out_buffer = NULL; usb_put_dev(ucs->udev); ucs->interface = NULL; ucs->udev = NULL; cs->dev = NULL; gigaset_freecs(cs); } /* gigaset_suspend * This function is called before the USB connection is suspended or reset. */ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) { struct cardstate *cs = usb_get_intfdata(intf); /* stop activity */ cs->connected = 0; /* prevent rescheduling */ usb_kill_urb(cs->hw.usb->read_urb); tasklet_kill(&cs->write_tasklet); usb_kill_urb(cs->hw.usb->bulk_out_urb); gig_dbg(DEBUG_SUSPEND, "suspend complete"); return 0; } /* gigaset_resume * This function is called after the USB connection has been resumed or reset. */ static int gigaset_resume(struct usb_interface *intf) { struct cardstate *cs = usb_get_intfdata(intf); int rc; /* resubmit interrupt URB */ cs->connected = 1; rc = usb_submit_urb(cs->hw.usb->read_urb, GFP_KERNEL); if (rc) { dev_err(cs->dev, "Could not submit read URB (error %d)\n", -rc); return rc; } gig_dbg(DEBUG_SUSPEND, "resume complete"); return 0; } /* gigaset_pre_reset * This function is called before the USB connection is reset. */ static int gigaset_pre_reset(struct usb_interface *intf) { /* same as suspend */ return gigaset_suspend(intf, PMSG_ON); } static const struct gigaset_ops ops = { gigaset_write_cmd, gigaset_write_room, gigaset_chars_in_buffer, gigaset_brkchars, gigaset_init_bchannel, gigaset_close_bchannel, gigaset_initbcshw, gigaset_freebcshw, gigaset_reinitbcshw, gigaset_initcshw, gigaset_freecshw, gigaset_set_modem_ctrl, gigaset_baud_rate, gigaset_set_line_ctrl, gigaset_m10x_send_skb, gigaset_m10x_input, }; /* * This function is called while kernel-module is loaded */ static int __init usb_gigaset_init(void) { int result; /* allocate memory for our driver state and initialize it */ driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, GIGASET_MODULENAME, GIGASET_DEVNAME, &ops, THIS_MODULE); if (driver == NULL) goto error; /* register this driver with the USB subsystem */ result = usb_register(&gigaset_usb_driver); if (result < 0) { pr_err("error %d registering USB driver\n", -result); goto error; } pr_info(DRIVER_DESC "\n"); return 0; error: if (driver) gigaset_freedriver(driver); driver = NULL; return -1; } /* * This function is called while unloading the kernel-module */ static void __exit usb_gigaset_exit(void) { int i; gigaset_blockdriver(driver); /* => probe will fail * => no gigaset_start any more */ /* stop all connected devices */ for (i = 0; i < driver->minors; i++) gigaset_shutdown(driver->cs + i); /* from now on, no isdn callback should be possible */ /* deregister this driver with the USB subsystem */ usb_deregister(&gigaset_usb_driver); /* this will call the disconnect-callback */ /* from now on, no disconnect/probe callback should be running */ gigaset_freedriver(driver); driver = NULL; } module_init(usb_gigaset_init); module_exit(usb_gigaset_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
zlatinski/p-android-omap-3.4-new-ion-topic-sync-dma-buf-fence2
drivers/rtc/rtc-lpc32xx.c
4809
10192
/* * Copyright (C) 2010 NXP Semiconductors * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/io.h> /* * Clock and Power control register offsets */ #define LPC32XX_RTC_UCOUNT 0x00 #define LPC32XX_RTC_DCOUNT 0x04 #define LPC32XX_RTC_MATCH0 0x08 #define LPC32XX_RTC_MATCH1 0x0C #define LPC32XX_RTC_CTRL 0x10 #define LPC32XX_RTC_INTSTAT 0x14 #define LPC32XX_RTC_KEY 0x18 #define LPC32XX_RTC_SRAM 0x80 #define LPC32XX_RTC_CTRL_MATCH0 (1 << 0) #define LPC32XX_RTC_CTRL_MATCH1 (1 << 1) #define LPC32XX_RTC_CTRL_ONSW_MATCH0 (1 << 2) #define LPC32XX_RTC_CTRL_ONSW_MATCH1 (1 << 3) #define LPC32XX_RTC_CTRL_SW_RESET (1 << 4) #define LPC32XX_RTC_CTRL_CNTR_DIS (1 << 6) #define LPC32XX_RTC_CTRL_ONSW_FORCE_HI (1 << 7) #define LPC32XX_RTC_INTSTAT_MATCH0 (1 << 0) #define LPC32XX_RTC_INTSTAT_MATCH1 (1 << 1) #define LPC32XX_RTC_INTSTAT_ONSW (1 << 2) #define LPC32XX_RTC_KEY_ONSW_LOADVAL 0xB5C13F27 #define RTC_NAME "rtc-lpc32xx" #define rtc_readl(dev, reg) \ __raw_readl((dev)->rtc_base + (reg)) #define rtc_writel(dev, reg, val) \ __raw_writel((val), (dev)->rtc_base + (reg)) struct lpc32xx_rtc { void __iomem *rtc_base; int irq; unsigned char alarm_enabled; struct rtc_device *rtc; spinlock_t lock; }; static int lpc32xx_rtc_read_time(struct device *dev, struct rtc_time *time) { unsigned long elapsed_sec; struct lpc32xx_rtc *rtc = dev_get_drvdata(dev); elapsed_sec = rtc_readl(rtc, LPC32XX_RTC_UCOUNT); rtc_time_to_tm(elapsed_sec, time); return rtc_valid_tm(time); } static int lpc32xx_rtc_set_mmss(struct device *dev, unsigned long secs) { struct lpc32xx_rtc *rtc = dev_get_drvdata(dev); u32 tmp; spin_lock_irq(&rtc->lock); /* RTC must be disabled during count update */ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL); rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_CNTR_DIS); rtc_writel(rtc, LPC32XX_RTC_UCOUNT, secs); rtc_writel(rtc, LPC32XX_RTC_DCOUNT, 0xFFFFFFFF - secs); rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp &= ~LPC32XX_RTC_CTRL_CNTR_DIS); spin_unlock_irq(&rtc->lock); return 0; } static int lpc32xx_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct lpc32xx_rtc *rtc = dev_get_drvdata(dev); rtc_time_to_tm(rtc_readl(rtc, LPC32XX_RTC_MATCH0), &wkalrm->time); wkalrm->enabled = rtc->alarm_enabled; wkalrm->pending = !!(rtc_readl(rtc, LPC32XX_RTC_INTSTAT) & LPC32XX_RTC_INTSTAT_MATCH0); return rtc_valid_tm(&wkalrm->time); } static int lpc32xx_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *wkalrm) { struct lpc32xx_rtc *rtc = dev_get_drvdata(dev); unsigned long alarmsecs; u32 tmp; int ret; ret = rtc_tm_to_time(&wkalrm->time, &alarmsecs); if (ret < 0) { dev_warn(dev, "Failed to convert time: %d\n", ret); return ret; } spin_lock_irq(&rtc->lock); /* Disable alarm during update */ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL); rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0); rtc_writel(rtc, LPC32XX_RTC_MATCH0, alarmsecs); rtc->alarm_enabled = wkalrm->enabled; if (wkalrm->enabled) { rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0); rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp | LPC32XX_RTC_CTRL_MATCH0); } spin_unlock_irq(&rtc->lock); return 0; } static int lpc32xx_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct lpc32xx_rtc *rtc = dev_get_drvdata(dev); u32 tmp; spin_lock_irq(&rtc->lock); tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL); if (enabled) { rtc->alarm_enabled = 1; tmp |= LPC32XX_RTC_CTRL_MATCH0; } else { rtc->alarm_enabled = 0; tmp &= ~LPC32XX_RTC_CTRL_MATCH0; } rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp); spin_unlock_irq(&rtc->lock); return 0; } static irqreturn_t lpc32xx_rtc_alarm_interrupt(int irq, void *dev) { struct lpc32xx_rtc *rtc = dev; spin_lock(&rtc->lock); /* Disable alarm interrupt */ rtc_writel(rtc, LPC32XX_RTC_CTRL, rtc_readl(rtc, LPC32XX_RTC_CTRL) & ~LPC32XX_RTC_CTRL_MATCH0); rtc->alarm_enabled = 0; /* * Write a large value to the match value so the RTC won't * keep firing the match status */ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF); rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0); spin_unlock(&rtc->lock); rtc_update_irq(rtc->rtc, 1, RTC_IRQF | RTC_AF); return IRQ_HANDLED; } static const struct rtc_class_ops lpc32xx_rtc_ops = { .read_time = lpc32xx_rtc_read_time, .set_mmss = lpc32xx_rtc_set_mmss, .read_alarm = lpc32xx_rtc_read_alarm, .set_alarm = lpc32xx_rtc_set_alarm, .alarm_irq_enable = lpc32xx_rtc_alarm_irq_enable, }; static int __devinit lpc32xx_rtc_probe(struct platform_device *pdev) { struct resource *res; struct lpc32xx_rtc *rtc; resource_size_t size; int rtcirq; u32 tmp; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "Can't get memory resource\n"); return -ENOENT; } rtcirq = platform_get_irq(pdev, 0); if (rtcirq < 0 || rtcirq >= NR_IRQS) { dev_warn(&pdev->dev, "Can't get interrupt resource\n"); rtcirq = -1; } rtc = devm_kzalloc(&pdev->dev, sizeof(*rtc), GFP_KERNEL); if (unlikely(!rtc)) { dev_err(&pdev->dev, "Can't allocate memory\n"); return -ENOMEM; } rtc->irq = rtcirq; size = resource_size(res); if (!devm_request_mem_region(&pdev->dev, res->start, size, pdev->name)) { dev_err(&pdev->dev, "RTC registers are not free\n"); return -EBUSY; } rtc->rtc_base = devm_ioremap(&pdev->dev, res->start, size); if (!rtc->rtc_base) { dev_err(&pdev->dev, "Can't map memory\n"); return -ENOMEM; } spin_lock_init(&rtc->lock); /* * The RTC is on a separate power domain and can keep it's state * across a chip power cycle. If the RTC has never been previously * setup, then set it up now for the first time. */ tmp = rtc_readl(rtc, LPC32XX_RTC_CTRL); if (rtc_readl(rtc, LPC32XX_RTC_KEY) != LPC32XX_RTC_KEY_ONSW_LOADVAL) { tmp &= ~(LPC32XX_RTC_CTRL_SW_RESET | LPC32XX_RTC_CTRL_CNTR_DIS | LPC32XX_RTC_CTRL_MATCH0 | LPC32XX_RTC_CTRL_MATCH1 | LPC32XX_RTC_CTRL_ONSW_MATCH0 | LPC32XX_RTC_CTRL_ONSW_MATCH1 | LPC32XX_RTC_CTRL_ONSW_FORCE_HI); rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp); /* Clear latched interrupt states */ rtc_writel(rtc, LPC32XX_RTC_MATCH0, 0xFFFFFFFF); rtc_writel(rtc, LPC32XX_RTC_INTSTAT, LPC32XX_RTC_INTSTAT_MATCH0 | LPC32XX_RTC_INTSTAT_MATCH1 | LPC32XX_RTC_INTSTAT_ONSW); /* Write key value to RTC so it won't reload on reset */ rtc_writel(rtc, LPC32XX_RTC_KEY, LPC32XX_RTC_KEY_ONSW_LOADVAL); } else { rtc_writel(rtc, LPC32XX_RTC_CTRL, tmp & ~LPC32XX_RTC_CTRL_MATCH0); } platform_set_drvdata(pdev, rtc); rtc->rtc = rtc_device_register(RTC_NAME, &pdev->dev, &lpc32xx_rtc_ops, THIS_MODULE); if (IS_ERR(rtc->rtc)) { dev_err(&pdev->dev, "Can't get RTC\n"); platform_set_drvdata(pdev, NULL); return PTR_ERR(rtc->rtc); } /* * IRQ is enabled after device registration in case alarm IRQ * is pending upon suspend exit. */ if (rtc->irq >= 0) { if (devm_request_irq(&pdev->dev, rtc->irq, lpc32xx_rtc_alarm_interrupt, 0, pdev->name, rtc) < 0) { dev_warn(&pdev->dev, "Can't request interrupt.\n"); rtc->irq = -1; } else { device_init_wakeup(&pdev->dev, 1); } } return 0; } static int __devexit lpc32xx_rtc_remove(struct platform_device *pdev) { struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev); if (rtc->irq >= 0) device_init_wakeup(&pdev->dev, 0); platform_set_drvdata(pdev, NULL); rtc_device_unregister(rtc->rtc); return 0; } #ifdef CONFIG_PM static int lpc32xx_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev); if (rtc->irq >= 0) { if (device_may_wakeup(&pdev->dev)) enable_irq_wake(rtc->irq); else disable_irq_wake(rtc->irq); } return 0; } static int lpc32xx_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev); if (rtc->irq >= 0 && device_may_wakeup(&pdev->dev)) disable_irq_wake(rtc->irq); return 0; } /* Unconditionally disable the alarm */ static int lpc32xx_rtc_freeze(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev); spin_lock_irq(&rtc->lock); rtc_writel(rtc, LPC32XX_RTC_CTRL, rtc_readl(rtc, LPC32XX_RTC_CTRL) & ~LPC32XX_RTC_CTRL_MATCH0); spin_unlock_irq(&rtc->lock); return 0; } static int lpc32xx_rtc_thaw(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct lpc32xx_rtc *rtc = platform_get_drvdata(pdev); if (rtc->alarm_enabled) { spin_lock_irq(&rtc->lock); rtc_writel(rtc, LPC32XX_RTC_CTRL, rtc_readl(rtc, LPC32XX_RTC_CTRL) | LPC32XX_RTC_CTRL_MATCH0); spin_unlock_irq(&rtc->lock); } return 0; } static const struct dev_pm_ops lpc32xx_rtc_pm_ops = { .suspend = lpc32xx_rtc_suspend, .resume = lpc32xx_rtc_resume, .freeze = lpc32xx_rtc_freeze, .thaw = lpc32xx_rtc_thaw, .restore = lpc32xx_rtc_resume }; #define LPC32XX_RTC_PM_OPS (&lpc32xx_rtc_pm_ops) #else #define LPC32XX_RTC_PM_OPS NULL #endif static struct platform_driver lpc32xx_rtc_driver = { .probe = lpc32xx_rtc_probe, .remove = __devexit_p(lpc32xx_rtc_remove), .driver = { .name = RTC_NAME, .owner = THIS_MODULE, .pm = LPC32XX_RTC_PM_OPS }, }; module_platform_driver(lpc32xx_rtc_driver); MODULE_AUTHOR("Kevin Wells <wellsk40@gmail.com"); MODULE_DESCRIPTION("RTC driver for the LPC32xx SoC"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:rtc-lpc32xx");
gpl-2.0
alexandru-g/kernel_htc_m8_gpe
drivers/media/radio/radio-gemtek.c
4809
9567
/* * GemTek radio card driver * * Copyright 1998 Jonas Munsin <jmunsin@iki.fi> * * GemTek hasn't released any specs on the card, so the protocol had to * be reverse engineered with dosemu. * * Besides the protocol changes, this is mostly a copy of: * * RadioTrack II driver for Linux radio support (C) 1998 Ben Pfaff * * Based on RadioTrack I/RadioReveal (C) 1997 M. Kirkwood * Converted to new API by Alan Cox <alan@lxorguk.ukuu.org.uk> * Various bugfixes and enhancements by Russell Kroll <rkroll@exploits.org> * * Converted to the radio-isa framework by Hans Verkuil <hans.verkuil@cisco.com> * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> * * Note: this card seems to swap the left and right audio channels! * * Fully tested with the Keene USB FM Transmitter and the v4l2-compliance tool. */ #include <linux/module.h> /* Modules */ #include <linux/init.h> /* Initdata */ #include <linux/ioport.h> /* request_region */ #include <linux/delay.h> /* udelay */ #include <linux/videodev2.h> /* kernel radio structs */ #include <linux/mutex.h> #include <linux/io.h> /* outb, outb_p */ #include <linux/slab.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-device.h> #include "radio-isa.h" /* * Module info. */ MODULE_AUTHOR("Jonas Munsin, Pekka Seppänen <pexu@kapsi.fi>"); MODULE_DESCRIPTION("A driver for the GemTek Radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); /* * Module params. */ #ifndef CONFIG_RADIO_GEMTEK_PORT #define CONFIG_RADIO_GEMTEK_PORT -1 #endif #ifndef CONFIG_RADIO_GEMTEK_PROBE #define CONFIG_RADIO_GEMTEK_PROBE 1 #endif #define GEMTEK_MAX 4 static bool probe = CONFIG_RADIO_GEMTEK_PROBE; static bool hardmute; static int io[GEMTEK_MAX] = { [0] = CONFIG_RADIO_GEMTEK_PORT, [1 ... (GEMTEK_MAX - 1)] = -1 }; static int radio_nr[GEMTEK_MAX] = { [0 ... (GEMTEK_MAX - 1)] = -1 }; module_param(probe, bool, 0444); MODULE_PARM_DESC(probe, "Enable automatic device probing."); module_param(hardmute, bool, 0644); MODULE_PARM_DESC(hardmute, "Enable 'hard muting' by shutting down PLL, may " "reduce static noise."); module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "Force I/O ports for the GemTek Radio card if automatic " "probing is disabled or fails. The most common I/O ports are: 0x20c " "0x30c, 0x24c or 0x34c (0x20c, 0x248 and 0x28c have been reported to " "work for the combined sound/radiocard)."); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); /* * Frequency calculation constants. Intermediate frequency 10.52 MHz (nominal * value 10.7 MHz), reference divisor 6.39 kHz (nominal 6.25 kHz). */ #define FSCALE 8 #define IF_OFFSET ((unsigned int)(10.52 * 16000 * (1<<FSCALE))) #define REF_FREQ ((unsigned int)(6.39 * 16 * (1<<FSCALE))) #define GEMTEK_CK 0x01 /* Clock signal */ #define GEMTEK_DA 0x02 /* Serial data */ #define GEMTEK_CE 0x04 /* Chip enable */ #define GEMTEK_NS 0x08 /* No signal */ #define GEMTEK_MT 0x10 /* Line mute */ #define GEMTEK_STDF_3_125_KHZ 0x01 /* Standard frequency 3.125 kHz */ #define GEMTEK_PLL_OFF 0x07 /* PLL off */ #define BU2614_BUS_SIZE 32 /* BU2614 / BU2614FS bus size */ #define SHORT_DELAY 5 /* usec */ #define LONG_DELAY 75 /* usec */ struct gemtek { struct radio_isa_card isa; bool muted; u32 bu2614data; }; #define BU2614_FREQ_BITS 16 /* D0..D15, Frequency data */ #define BU2614_PORT_BITS 3 /* P0..P2, Output port control data */ #define BU2614_VOID_BITS 4 /* unused */ #define BU2614_FMES_BITS 1 /* CT, Frequency measurement beginning data */ #define BU2614_STDF_BITS 3 /* R0..R2, Standard frequency data */ #define BU2614_SWIN_BITS 1 /* S, Switch between FMIN / AMIN */ #define BU2614_SWAL_BITS 1 /* PS, Swallow counter division (AMIN only)*/ #define BU2614_VOID2_BITS 1 /* unused */ #define BU2614_FMUN_BITS 1 /* GT, Frequency measurement time & unlock */ #define BU2614_TEST_BITS 1 /* TS, Test data is input */ #define BU2614_FREQ_SHIFT 0 #define BU2614_PORT_SHIFT (BU2614_FREQ_BITS + BU2614_FREQ_SHIFT) #define BU2614_VOID_SHIFT (BU2614_PORT_BITS + BU2614_PORT_SHIFT) #define BU2614_FMES_SHIFT (BU2614_VOID_BITS + BU2614_VOID_SHIFT) #define BU2614_STDF_SHIFT (BU2614_FMES_BITS + BU2614_FMES_SHIFT) #define BU2614_SWIN_SHIFT (BU2614_STDF_BITS + BU2614_STDF_SHIFT) #define BU2614_SWAL_SHIFT (BU2614_SWIN_BITS + BU2614_SWIN_SHIFT) #define BU2614_VOID2_SHIFT (BU2614_SWAL_BITS + BU2614_SWAL_SHIFT) #define BU2614_FMUN_SHIFT (BU2614_VOID2_BITS + BU2614_VOID2_SHIFT) #define BU2614_TEST_SHIFT (BU2614_FMUN_BITS + BU2614_FMUN_SHIFT) #define MKMASK(field) (((1<<BU2614_##field##_BITS) - 1) << \ BU2614_##field##_SHIFT) #define BU2614_PORT_MASK MKMASK(PORT) #define BU2614_FREQ_MASK MKMASK(FREQ) #define BU2614_VOID_MASK MKMASK(VOID) #define BU2614_FMES_MASK MKMASK(FMES) #define BU2614_STDF_MASK MKMASK(STDF) #define BU2614_SWIN_MASK MKMASK(SWIN) #define BU2614_SWAL_MASK MKMASK(SWAL) #define BU2614_VOID2_MASK MKMASK(VOID2) #define BU2614_FMUN_MASK MKMASK(FMUN) #define BU2614_TEST_MASK MKMASK(TEST) /* * Set data which will be sent to BU2614FS. */ #define gemtek_bu2614_set(dev, field, data) ((dev)->bu2614data = \ ((dev)->bu2614data & ~field##_MASK) | ((data) << field##_SHIFT)) /* * Transmit settings to BU2614FS over GemTek IC. */ static void gemtek_bu2614_transmit(struct gemtek *gt) { struct radio_isa_card *isa = &gt->isa; int i, bit, q, mute; mute = gt->muted ? GEMTEK_MT : 0x00; outb_p(mute | GEMTEK_CE | GEMTEK_DA | GEMTEK_CK, isa->io); udelay(LONG_DELAY); for (i = 0, q = gt->bu2614data; i < 32; i++, q >>= 1) { bit = (q & 1) ? GEMTEK_DA : 0; outb_p(mute | GEMTEK_CE | bit, isa->io); udelay(SHORT_DELAY); outb_p(mute | GEMTEK_CE | bit | GEMTEK_CK, isa->io); udelay(SHORT_DELAY); } outb_p(mute | GEMTEK_DA | GEMTEK_CK, isa->io); udelay(SHORT_DELAY); } /* * Calculate divisor from FM-frequency for BU2614FS (3.125 KHz STDF expected). */ static unsigned long gemtek_convfreq(unsigned long freq) { return ((freq << FSCALE) + IF_OFFSET + REF_FREQ / 2) / REF_FREQ; } static struct radio_isa_card *gemtek_alloc(void) { struct gemtek *gt = kzalloc(sizeof(*gt), GFP_KERNEL); if (gt) gt->muted = true; return gt ? &gt->isa : NULL; } /* * Set FM-frequency. */ static int gemtek_s_frequency(struct radio_isa_card *isa, u32 freq) { struct gemtek *gt = container_of(isa, struct gemtek, isa); if (hardmute && gt->muted) return 0; gemtek_bu2614_set(gt, BU2614_PORT, 0); gemtek_bu2614_set(gt, BU2614_FMES, 0); gemtek_bu2614_set(gt, BU2614_SWIN, 0); /* FM-mode */ gemtek_bu2614_set(gt, BU2614_SWAL, 0); gemtek_bu2614_set(gt, BU2614_FMUN, 1); /* GT bit set */ gemtek_bu2614_set(gt, BU2614_TEST, 0); gemtek_bu2614_set(gt, BU2614_STDF, GEMTEK_STDF_3_125_KHZ); gemtek_bu2614_set(gt, BU2614_FREQ, gemtek_convfreq(freq)); gemtek_bu2614_transmit(gt); return 0; } /* * Set mute flag. */ static int gemtek_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { struct gemtek *gt = container_of(isa, struct gemtek, isa); int i; gt->muted = mute; if (hardmute) { if (!mute) return gemtek_s_frequency(isa, isa->freq); /* Turn off PLL, disable data output */ gemtek_bu2614_set(gt, BU2614_PORT, 0); gemtek_bu2614_set(gt, BU2614_FMES, 0); /* CT bit off */ gemtek_bu2614_set(gt, BU2614_SWIN, 0); /* FM-mode */ gemtek_bu2614_set(gt, BU2614_SWAL, 0); gemtek_bu2614_set(gt, BU2614_FMUN, 0); /* GT bit off */ gemtek_bu2614_set(gt, BU2614_TEST, 0); gemtek_bu2614_set(gt, BU2614_STDF, GEMTEK_PLL_OFF); gemtek_bu2614_set(gt, BU2614_FREQ, 0); gemtek_bu2614_transmit(gt); return 0; } /* Read bus contents (CE, CK and DA). */ i = inb_p(isa->io); /* Write it back with mute flag set. */ outb_p((i >> 5) | (mute ? GEMTEK_MT : 0), isa->io); udelay(SHORT_DELAY); return 0; } static u32 gemtek_g_rxsubchans(struct radio_isa_card *isa) { if (inb_p(isa->io) & GEMTEK_NS) return V4L2_TUNER_SUB_MONO; return V4L2_TUNER_SUB_STEREO; } /* * Check if requested card acts like GemTek Radio card. */ static bool gemtek_probe(struct radio_isa_card *isa, int io) { int i, q; q = inb_p(io); /* Read bus contents before probing. */ /* Try to turn on CE, CK and DA respectively and check if card responds properly. */ for (i = 0; i < 3; ++i) { outb_p(1 << i, io); udelay(SHORT_DELAY); if ((inb_p(io) & ~GEMTEK_NS) != (0x17 | (1 << (i + 5)))) return false; } outb_p(q >> 5, io); /* Write bus contents back. */ udelay(SHORT_DELAY); return true; } static const struct radio_isa_ops gemtek_ops = { .alloc = gemtek_alloc, .probe = gemtek_probe, .s_mute_volume = gemtek_s_mute_volume, .s_frequency = gemtek_s_frequency, .g_rxsubchans = gemtek_g_rxsubchans, }; static const int gemtek_ioports[] = { 0x20c, 0x30c, 0x24c, 0x34c, 0x248, 0x28c }; static struct radio_isa_driver gemtek_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-gemtek", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = gemtek_ioports, .num_of_io_ports = ARRAY_SIZE(gemtek_ioports), .region_size = 1, .card = "GemTek Radio", .ops = &gemtek_ops, .has_stereo = true, }; static int __init gemtek_init(void) { gemtek_driver.probe = probe; return isa_register_driver(&gemtek_driver.driver, GEMTEK_MAX); } static void __exit gemtek_exit(void) { hardmute = 1; /* Turn off PLL */ isa_unregister_driver(&gemtek_driver.driver); } module_init(gemtek_init); module_exit(gemtek_exit);
gpl-2.0
obek/linux-sunxi
net/batman-adv/vis.c
4809
25452
/* * Copyright (C) 2008-2012 B.A.T.M.A.N. contributors: * * Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include "send.h" #include "translation-table.h" #include "vis.h" #include "soft-interface.h" #include "hard-interface.h" #include "hash.h" #include "originator.h" #define MAX_VIS_PACKET_SIZE 1000 static void start_vis_timer(struct bat_priv *bat_priv); /* free the info */ static void free_info(struct kref *ref) { struct vis_info *info = container_of(ref, struct vis_info, refcount); struct bat_priv *bat_priv = info->bat_priv; struct recvlist_node *entry, *tmp; list_del_init(&info->send_list); spin_lock_bh(&bat_priv->vis_list_lock); list_for_each_entry_safe(entry, tmp, &info->recv_list, list) { list_del(&entry->list); kfree(entry); } spin_unlock_bh(&bat_priv->vis_list_lock); kfree_skb(info->skb_packet); kfree(info); } /* Compare two vis packets, used by the hashing algorithm */ static int vis_info_cmp(const struct hlist_node *node, const void *data2) { const struct vis_info *d1, *d2; const struct vis_packet *p1, *p2; d1 = container_of(node, struct vis_info, hash_entry); d2 = data2; p1 = (struct vis_packet *)d1->skb_packet->data; p2 = (struct vis_packet *)d2->skb_packet->data; return compare_eth(p1->vis_orig, p2->vis_orig); } /* hash function to choose an entry in a hash table of given size */ /* hash algorithm from http://en.wikipedia.org/wiki/Hash_table */ static uint32_t vis_info_choose(const void *data, uint32_t size) { const struct vis_info *vis_info = data; const struct vis_packet *packet; const unsigned char *key; uint32_t hash = 0; size_t i; packet = (struct vis_packet *)vis_info->skb_packet->data; key = packet->vis_orig; for (i = 0; i < ETH_ALEN; i++) { hash += key[i]; hash += (hash << 10); hash ^= (hash >> 6); } hash += (hash << 3); hash ^= (hash >> 11); hash += (hash << 15); return hash % size; } static struct vis_info *vis_hash_find(struct bat_priv *bat_priv, const void *data) { struct hashtable_t *hash = bat_priv->vis_hash; struct hlist_head *head; struct hlist_node *node; struct vis_info *vis_info, *vis_info_tmp = NULL; uint32_t index; if (!hash) return NULL; index = vis_info_choose(data, hash->size); head = &hash->table[index]; rcu_read_lock(); hlist_for_each_entry_rcu(vis_info, node, head, hash_entry) { if (!vis_info_cmp(node, data)) continue; vis_info_tmp = vis_info; break; } rcu_read_unlock(); return vis_info_tmp; } /* insert interface to the list of interfaces of one originator, if it * does not already exist in the list */ static void vis_data_insert_interface(const uint8_t *interface, struct hlist_head *if_list, bool primary) { struct if_list_entry *entry; struct hlist_node *pos; hlist_for_each_entry(entry, pos, if_list, list) { if (compare_eth(entry->addr, interface)) return; } /* it's a new address, add it to the list */ entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return; memcpy(entry->addr, interface, ETH_ALEN); entry->primary = primary; hlist_add_head(&entry->list, if_list); } static ssize_t vis_data_read_prim_sec(char *buff, const struct hlist_head *if_list) { struct if_list_entry *entry; struct hlist_node *pos; size_t len = 0; hlist_for_each_entry(entry, pos, if_list, list) { if (entry->primary) len += sprintf(buff + len, "PRIMARY, "); else len += sprintf(buff + len, "SEC %pM, ", entry->addr); } return len; } static size_t vis_data_count_prim_sec(struct hlist_head *if_list) { struct if_list_entry *entry; struct hlist_node *pos; size_t count = 0; hlist_for_each_entry(entry, pos, if_list, list) { if (entry->primary) count += 9; else count += 23; } return count; } /* read an entry */ static ssize_t vis_data_read_entry(char *buff, const struct vis_info_entry *entry, const uint8_t *src, bool primary) { /* maximal length: max(4+17+2, 3+17+1+3+2) == 26 */ if (primary && entry->quality == 0) return sprintf(buff, "TT %pM, ", entry->dest); else if (compare_eth(entry->src, src)) return sprintf(buff, "TQ %pM %d, ", entry->dest, entry->quality); return 0; } int vis_seq_print_text(struct seq_file *seq, void *offset) { struct hard_iface *primary_if; struct hlist_node *node; struct hlist_head *head; struct vis_info *info; struct vis_packet *packet; struct vis_info_entry *entries; struct net_device *net_dev = (struct net_device *)seq->private; struct bat_priv *bat_priv = netdev_priv(net_dev); struct hashtable_t *hash = bat_priv->vis_hash; HLIST_HEAD(vis_if_list); struct if_list_entry *entry; struct hlist_node *pos, *n; uint32_t i; int j, ret = 0; int vis_server = atomic_read(&bat_priv->vis_mode); size_t buff_pos, buf_size; char *buff; int compare; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; if (vis_server == VIS_TYPE_CLIENT_UPDATE) goto out; buf_size = 1; /* Estimate length */ spin_lock_bh(&bat_priv->vis_hash_lock); for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(info, node, head, hash_entry) { packet = (struct vis_packet *)info->skb_packet->data; entries = (struct vis_info_entry *) ((char *)packet + sizeof(*packet)); for (j = 0; j < packet->entries; j++) { if (entries[j].quality == 0) continue; compare = compare_eth(entries[j].src, packet->vis_orig); vis_data_insert_interface(entries[j].src, &vis_if_list, compare); } hlist_for_each_entry(entry, pos, &vis_if_list, list) { buf_size += 18 + 26 * packet->entries; /* add primary/secondary records */ if (compare_eth(entry->addr, packet->vis_orig)) buf_size += vis_data_count_prim_sec(&vis_if_list); buf_size += 1; } hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { hlist_del(&entry->list); kfree(entry); } } rcu_read_unlock(); } buff = kmalloc(buf_size, GFP_ATOMIC); if (!buff) { spin_unlock_bh(&bat_priv->vis_hash_lock); ret = -ENOMEM; goto out; } buff[0] = '\0'; buff_pos = 0; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(info, node, head, hash_entry) { packet = (struct vis_packet *)info->skb_packet->data; entries = (struct vis_info_entry *) ((char *)packet + sizeof(*packet)); for (j = 0; j < packet->entries; j++) { if (entries[j].quality == 0) continue; compare = compare_eth(entries[j].src, packet->vis_orig); vis_data_insert_interface(entries[j].src, &vis_if_list, compare); } hlist_for_each_entry(entry, pos, &vis_if_list, list) { buff_pos += sprintf(buff + buff_pos, "%pM,", entry->addr); for (j = 0; j < packet->entries; j++) buff_pos += vis_data_read_entry( buff + buff_pos, &entries[j], entry->addr, entry->primary); /* add primary/secondary records */ if (compare_eth(entry->addr, packet->vis_orig)) buff_pos += vis_data_read_prim_sec(buff + buff_pos, &vis_if_list); buff_pos += sprintf(buff + buff_pos, "\n"); } hlist_for_each_entry_safe(entry, pos, n, &vis_if_list, list) { hlist_del(&entry->list); kfree(entry); } } rcu_read_unlock(); } spin_unlock_bh(&bat_priv->vis_hash_lock); seq_printf(seq, "%s", buff); kfree(buff); out: if (primary_if) hardif_free_ref(primary_if); return ret; } /* add the info packet to the send list, if it was not * already linked in. */ static void send_list_add(struct bat_priv *bat_priv, struct vis_info *info) { if (list_empty(&info->send_list)) { kref_get(&info->refcount); list_add_tail(&info->send_list, &bat_priv->vis_send_list); } } /* delete the info packet from the send list, if it was * linked in. */ static void send_list_del(struct vis_info *info) { if (!list_empty(&info->send_list)) { list_del_init(&info->send_list); kref_put(&info->refcount, free_info); } } /* tries to add one entry to the receive list. */ static void recv_list_add(struct bat_priv *bat_priv, struct list_head *recv_list, const char *mac) { struct recvlist_node *entry; entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return; memcpy(entry->mac, mac, ETH_ALEN); spin_lock_bh(&bat_priv->vis_list_lock); list_add_tail(&entry->list, recv_list); spin_unlock_bh(&bat_priv->vis_list_lock); } /* returns 1 if this mac is in the recv_list */ static int recv_list_is_in(struct bat_priv *bat_priv, const struct list_head *recv_list, const char *mac) { const struct recvlist_node *entry; spin_lock_bh(&bat_priv->vis_list_lock); list_for_each_entry(entry, recv_list, list) { if (compare_eth(entry->mac, mac)) { spin_unlock_bh(&bat_priv->vis_list_lock); return 1; } } spin_unlock_bh(&bat_priv->vis_list_lock); return 0; } /* try to add the packet to the vis_hash. return NULL if invalid (e.g. too old, * broken.. ). vis hash must be locked outside. is_new is set when the packet * is newer than old entries in the hash. */ static struct vis_info *add_packet(struct bat_priv *bat_priv, struct vis_packet *vis_packet, int vis_info_len, int *is_new, int make_broadcast) { struct vis_info *info, *old_info; struct vis_packet *search_packet, *old_packet; struct vis_info search_elem; struct vis_packet *packet; int hash_added; *is_new = 0; /* sanity check */ if (!bat_priv->vis_hash) return NULL; /* see if the packet is already in vis_hash */ search_elem.skb_packet = dev_alloc_skb(sizeof(*search_packet)); if (!search_elem.skb_packet) return NULL; search_packet = (struct vis_packet *)skb_put(search_elem.skb_packet, sizeof(*search_packet)); memcpy(search_packet->vis_orig, vis_packet->vis_orig, ETH_ALEN); old_info = vis_hash_find(bat_priv, &search_elem); kfree_skb(search_elem.skb_packet); if (old_info) { old_packet = (struct vis_packet *)old_info->skb_packet->data; if (!seq_after(ntohl(vis_packet->seqno), ntohl(old_packet->seqno))) { if (old_packet->seqno == vis_packet->seqno) { recv_list_add(bat_priv, &old_info->recv_list, vis_packet->sender_orig); return old_info; } else { /* newer packet is already in hash. */ return NULL; } } /* remove old entry */ hash_remove(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, old_info); send_list_del(old_info); kref_put(&old_info->refcount, free_info); } info = kmalloc(sizeof(*info), GFP_ATOMIC); if (!info) return NULL; info->skb_packet = dev_alloc_skb(sizeof(*packet) + vis_info_len + sizeof(struct ethhdr)); if (!info->skb_packet) { kfree(info); return NULL; } skb_reserve(info->skb_packet, sizeof(struct ethhdr)); packet = (struct vis_packet *)skb_put(info->skb_packet, sizeof(*packet) + vis_info_len); kref_init(&info->refcount); INIT_LIST_HEAD(&info->send_list); INIT_LIST_HEAD(&info->recv_list); info->first_seen = jiffies; info->bat_priv = bat_priv; memcpy(packet, vis_packet, sizeof(*packet) + vis_info_len); /* initialize and add new packet. */ *is_new = 1; /* Make it a broadcast packet, if required */ if (make_broadcast) memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); /* repair if entries is longer than packet. */ if (packet->entries * sizeof(struct vis_info_entry) > vis_info_len) packet->entries = vis_info_len / sizeof(struct vis_info_entry); recv_list_add(bat_priv, &info->recv_list, packet->sender_orig); /* try to add it */ hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, info, &info->hash_entry); if (hash_added != 0) { /* did not work (for some reason) */ kref_put(&info->refcount, free_info); info = NULL; } return info; } /* handle the server sync packet, forward if needed. */ void receive_server_sync_packet(struct bat_priv *bat_priv, struct vis_packet *vis_packet, int vis_info_len) { struct vis_info *info; int is_new, make_broadcast; int vis_server = atomic_read(&bat_priv->vis_mode); make_broadcast = (vis_server == VIS_TYPE_SERVER_SYNC); spin_lock_bh(&bat_priv->vis_hash_lock); info = add_packet(bat_priv, vis_packet, vis_info_len, &is_new, make_broadcast); if (!info) goto end; /* only if we are server ourselves and packet is newer than the one in * hash.*/ if (vis_server == VIS_TYPE_SERVER_SYNC && is_new) send_list_add(bat_priv, info); end: spin_unlock_bh(&bat_priv->vis_hash_lock); } /* handle an incoming client update packet and schedule forward if needed. */ void receive_client_update_packet(struct bat_priv *bat_priv, struct vis_packet *vis_packet, int vis_info_len) { struct vis_info *info; struct vis_packet *packet; int is_new; int vis_server = atomic_read(&bat_priv->vis_mode); int are_target = 0; /* clients shall not broadcast. */ if (is_broadcast_ether_addr(vis_packet->target_orig)) return; /* Are we the target for this VIS packet? */ if (vis_server == VIS_TYPE_SERVER_SYNC && is_my_mac(vis_packet->target_orig)) are_target = 1; spin_lock_bh(&bat_priv->vis_hash_lock); info = add_packet(bat_priv, vis_packet, vis_info_len, &is_new, are_target); if (!info) goto end; /* note that outdated packets will be dropped at this point. */ packet = (struct vis_packet *)info->skb_packet->data; /* send only if we're the target server or ... */ if (are_target && is_new) { packet->vis_type = VIS_TYPE_SERVER_SYNC; /* upgrade! */ send_list_add(bat_priv, info); /* ... we're not the recipient (and thus need to forward). */ } else if (!is_my_mac(packet->target_orig)) { send_list_add(bat_priv, info); } end: spin_unlock_bh(&bat_priv->vis_hash_lock); } /* Walk the originators and find the VIS server with the best tq. Set the packet * address to its address and return the best_tq. * * Must be called with the originator hash locked */ static int find_best_vis_server(struct bat_priv *bat_priv, struct vis_info *info) { struct hashtable_t *hash = bat_priv->orig_hash; struct neigh_node *router; struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; struct vis_packet *packet; int best_tq = -1; uint32_t i; packet = (struct vis_packet *)info->skb_packet->data; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { router = orig_node_get_router(orig_node); if (!router) continue; if ((orig_node->flags & VIS_SERVER) && (router->tq_avg > best_tq)) { best_tq = router->tq_avg; memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); } neigh_node_free_ref(router); } rcu_read_unlock(); } return best_tq; } /* Return true if the vis packet is full. */ static bool vis_packet_full(const struct vis_info *info) { const struct vis_packet *packet; packet = (struct vis_packet *)info->skb_packet->data; if (MAX_VIS_PACKET_SIZE / sizeof(struct vis_info_entry) < packet->entries + 1) return true; return false; } /* generates a packet of own vis data, * returns 0 on success, -1 if no packet could be generated */ static int generate_vis_packet(struct bat_priv *bat_priv) { struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; struct neigh_node *router; struct vis_info *info = bat_priv->my_vis_info; struct vis_packet *packet = (struct vis_packet *)info->skb_packet->data; struct vis_info_entry *entry; struct tt_common_entry *tt_common_entry; int best_tq = -1; uint32_t i; info->first_seen = jiffies; packet->vis_type = atomic_read(&bat_priv->vis_mode); memcpy(packet->target_orig, broadcast_addr, ETH_ALEN); packet->header.ttl = TTL; packet->seqno = htonl(ntohl(packet->seqno) + 1); packet->entries = 0; skb_trim(info->skb_packet, sizeof(*packet)); if (packet->vis_type == VIS_TYPE_CLIENT_UPDATE) { best_tq = find_best_vis_server(bat_priv, info); if (best_tq < 0) return -1; } for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { router = orig_node_get_router(orig_node); if (!router) continue; if (!compare_eth(router->addr, orig_node->orig)) goto next; if (router->if_incoming->if_status != IF_ACTIVE) goto next; if (router->tq_avg < 1) goto next; /* fill one entry into buffer. */ entry = (struct vis_info_entry *) skb_put(info->skb_packet, sizeof(*entry)); memcpy(entry->src, router->if_incoming->net_dev->dev_addr, ETH_ALEN); memcpy(entry->dest, orig_node->orig, ETH_ALEN); entry->quality = router->tq_avg; packet->entries++; next: neigh_node_free_ref(router); if (vis_packet_full(info)) goto unlock; } rcu_read_unlock(); } hash = bat_priv->tt_local_hash; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(tt_common_entry, node, head, hash_entry) { entry = (struct vis_info_entry *) skb_put(info->skb_packet, sizeof(*entry)); memset(entry->src, 0, ETH_ALEN); memcpy(entry->dest, tt_common_entry->addr, ETH_ALEN); entry->quality = 0; /* 0 means TT */ packet->entries++; if (vis_packet_full(info)) goto unlock; } rcu_read_unlock(); } return 0; unlock: rcu_read_unlock(); return 0; } /* free old vis packets. Must be called with this vis_hash_lock * held */ static void purge_vis_packets(struct bat_priv *bat_priv) { uint32_t i; struct hashtable_t *hash = bat_priv->vis_hash; struct hlist_node *node, *node_tmp; struct hlist_head *head; struct vis_info *info; for (i = 0; i < hash->size; i++) { head = &hash->table[i]; hlist_for_each_entry_safe(info, node, node_tmp, head, hash_entry) { /* never purge own data. */ if (info == bat_priv->my_vis_info) continue; if (has_timed_out(info->first_seen, VIS_TIMEOUT)) { hlist_del(node); send_list_del(info); kref_put(&info->refcount, free_info); } } } } static void broadcast_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) { struct neigh_node *router; struct hashtable_t *hash = bat_priv->orig_hash; struct hlist_node *node; struct hlist_head *head; struct orig_node *orig_node; struct vis_packet *packet; struct sk_buff *skb; struct hard_iface *hard_iface; uint8_t dstaddr[ETH_ALEN]; uint32_t i; packet = (struct vis_packet *)info->skb_packet->data; /* send to all routers in range. */ for (i = 0; i < hash->size; i++) { head = &hash->table[i]; rcu_read_lock(); hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) { /* if it's a vis server and reachable, send it. */ if (!(orig_node->flags & VIS_SERVER)) continue; router = orig_node_get_router(orig_node); if (!router) continue; /* don't send it if we already received the packet from * this node. */ if (recv_list_is_in(bat_priv, &info->recv_list, orig_node->orig)) { neigh_node_free_ref(router); continue; } memcpy(packet->target_orig, orig_node->orig, ETH_ALEN); hard_iface = router->if_incoming; memcpy(dstaddr, router->addr, ETH_ALEN); neigh_node_free_ref(router); skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (skb) send_skb_packet(skb, hard_iface, dstaddr); } rcu_read_unlock(); } } static void unicast_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) { struct orig_node *orig_node; struct neigh_node *router = NULL; struct sk_buff *skb; struct vis_packet *packet; packet = (struct vis_packet *)info->skb_packet->data; orig_node = orig_hash_find(bat_priv, packet->target_orig); if (!orig_node) goto out; router = orig_node_get_router(orig_node); if (!router) goto out; skb = skb_clone(info->skb_packet, GFP_ATOMIC); if (skb) send_skb_packet(skb, router->if_incoming, router->addr); out: if (router) neigh_node_free_ref(router); if (orig_node) orig_node_free_ref(orig_node); } /* only send one vis packet. called from send_vis_packets() */ static void send_vis_packet(struct bat_priv *bat_priv, struct vis_info *info) { struct hard_iface *primary_if; struct vis_packet *packet; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; packet = (struct vis_packet *)info->skb_packet->data; if (packet->header.ttl < 2) { pr_debug("Error - can't send vis packet: ttl exceeded\n"); goto out; } memcpy(packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); packet->header.ttl--; if (is_broadcast_ether_addr(packet->target_orig)) broadcast_vis_packet(bat_priv, info); else unicast_vis_packet(bat_priv, info); packet->header.ttl++; /* restore TTL */ out: if (primary_if) hardif_free_ref(primary_if); } /* called from timer; send (and maybe generate) vis packet. */ static void send_vis_packets(struct work_struct *work) { struct delayed_work *delayed_work = container_of(work, struct delayed_work, work); struct bat_priv *bat_priv = container_of(delayed_work, struct bat_priv, vis_work); struct vis_info *info; spin_lock_bh(&bat_priv->vis_hash_lock); purge_vis_packets(bat_priv); if (generate_vis_packet(bat_priv) == 0) { /* schedule if generation was successful */ send_list_add(bat_priv, bat_priv->my_vis_info); } while (!list_empty(&bat_priv->vis_send_list)) { info = list_first_entry(&bat_priv->vis_send_list, typeof(*info), send_list); kref_get(&info->refcount); spin_unlock_bh(&bat_priv->vis_hash_lock); send_vis_packet(bat_priv, info); spin_lock_bh(&bat_priv->vis_hash_lock); send_list_del(info); kref_put(&info->refcount, free_info); } spin_unlock_bh(&bat_priv->vis_hash_lock); start_vis_timer(bat_priv); } /* init the vis server. this may only be called when if_list is already * initialized (e.g. bat0 is initialized, interfaces have been added) */ int vis_init(struct bat_priv *bat_priv) { struct vis_packet *packet; int hash_added; if (bat_priv->vis_hash) return 1; spin_lock_bh(&bat_priv->vis_hash_lock); bat_priv->vis_hash = hash_new(256); if (!bat_priv->vis_hash) { pr_err("Can't initialize vis_hash\n"); goto err; } bat_priv->my_vis_info = kmalloc(MAX_VIS_PACKET_SIZE, GFP_ATOMIC); if (!bat_priv->my_vis_info) goto err; bat_priv->my_vis_info->skb_packet = dev_alloc_skb(sizeof(*packet) + MAX_VIS_PACKET_SIZE + sizeof(struct ethhdr)); if (!bat_priv->my_vis_info->skb_packet) goto free_info; skb_reserve(bat_priv->my_vis_info->skb_packet, sizeof(struct ethhdr)); packet = (struct vis_packet *)skb_put(bat_priv->my_vis_info->skb_packet, sizeof(*packet)); /* prefill the vis info */ bat_priv->my_vis_info->first_seen = jiffies - msecs_to_jiffies(VIS_INTERVAL); INIT_LIST_HEAD(&bat_priv->my_vis_info->recv_list); INIT_LIST_HEAD(&bat_priv->my_vis_info->send_list); kref_init(&bat_priv->my_vis_info->refcount); bat_priv->my_vis_info->bat_priv = bat_priv; packet->header.version = COMPAT_VERSION; packet->header.packet_type = BAT_VIS; packet->header.ttl = TTL; packet->seqno = 0; packet->entries = 0; INIT_LIST_HEAD(&bat_priv->vis_send_list); hash_added = hash_add(bat_priv->vis_hash, vis_info_cmp, vis_info_choose, bat_priv->my_vis_info, &bat_priv->my_vis_info->hash_entry); if (hash_added != 0) { pr_err("Can't add own vis packet into hash\n"); /* not in hash, need to remove it manually. */ kref_put(&bat_priv->my_vis_info->refcount, free_info); goto err; } spin_unlock_bh(&bat_priv->vis_hash_lock); start_vis_timer(bat_priv); return 1; free_info: kfree(bat_priv->my_vis_info); bat_priv->my_vis_info = NULL; err: spin_unlock_bh(&bat_priv->vis_hash_lock); vis_quit(bat_priv); return 0; } /* Decrease the reference count on a hash item info */ static void free_info_ref(struct hlist_node *node, void *arg) { struct vis_info *info; info = container_of(node, struct vis_info, hash_entry); send_list_del(info); kref_put(&info->refcount, free_info); } /* shutdown vis-server */ void vis_quit(struct bat_priv *bat_priv) { if (!bat_priv->vis_hash) return; cancel_delayed_work_sync(&bat_priv->vis_work); spin_lock_bh(&bat_priv->vis_hash_lock); /* properly remove, kill timers ... */ hash_delete(bat_priv->vis_hash, free_info_ref, NULL); bat_priv->vis_hash = NULL; bat_priv->my_vis_info = NULL; spin_unlock_bh(&bat_priv->vis_hash_lock); } /* schedule packets for (re)transmission */ static void start_vis_timer(struct bat_priv *bat_priv) { INIT_DELAYED_WORK(&bat_priv->vis_work, send_vis_packets); queue_delayed_work(bat_event_workqueue, &bat_priv->vis_work, msecs_to_jiffies(VIS_INTERVAL)); }
gpl-2.0
bigbiff/kernel_asus_tf700t
net/ax25/ax25_ds_in.c
5065
7261
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * State machine for state 1, Awaiting Connection State. * The handling of the timer(s) is in file ax25_ds_timer.c. * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state1_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_SABME: ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); break; case AX25_DISC: ax25_send_control(ax25, AX25_DM, pf, AX25_RESPONSE); break; case AX25_UA: ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25->state = AX25_STATE_3; ax25->n2count = 0; if (ax25->sk != NULL) { bh_lock_sock(ax25->sk); ax25->sk->sk_state = TCP_ESTABLISHED; /* * For WAIT_SABM connections we will produce an accept * ready socket here */ if (!sock_flag(ax25->sk, SOCK_DEAD)) ax25->sk->sk_state_change(ax25->sk); bh_unlock_sock(ax25->sk); } ax25_dama_on(ax25); /* according to DK4EG's spec we are required to * send a RR RESPONSE FINAL NR=0. */ ax25_std_enquiry_response(ax25); break; case AX25_DM: if (pf) ax25_disconnect(ax25, ECONNREFUSED); break; default: if (pf) ax25_send_control(ax25, AX25_SABM, AX25_POLLON, AX25_COMMAND); break; } return 0; } /* * State machine for state 2, Awaiting Release State. * The handling of the timer(s) is in file ax25_ds_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state2_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int pf, int type) { switch (frametype) { case AX25_SABM: case AX25_SABME: ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: case AX25_UA: if (pf) { ax25_dama_off(ax25); ax25_disconnect(ax25, 0); } break; case AX25_I: case AX25_REJ: case AX25_RNR: case AX25_RR: if (pf) { ax25_send_control(ax25, AX25_DISC, AX25_POLLON, AX25_COMMAND); ax25_dama_off(ax25); } break; default: break; } return 0; } /* * State machine for state 3, Connected State. * The handling of the timer(s) is in file ax25_timer.c * Handling of state 0 and connection release is in ax25.c. */ static int ax25_ds_state3_machine(ax25_cb *ax25, struct sk_buff *skb, int frametype, int ns, int nr, int pf, int type) { int queued = 0; switch (frametype) { case AX25_SABM: case AX25_SABME: if (frametype == AX25_SABM) { ax25->modulus = AX25_MODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_WINDOW]; } else { ax25->modulus = AX25_EMODULUS; ax25->window = ax25->ax25_dev->values[AX25_VALUES_EWINDOW]; } ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); ax25->condition = 0x00; ax25->vs = 0; ax25->va = 0; ax25->vr = 0; ax25_requeue_frames(ax25); ax25_dama_on(ax25); break; case AX25_DISC: ax25_send_control(ax25, AX25_UA, pf, AX25_RESPONSE); ax25_dama_off(ax25); ax25_disconnect(ax25, 0); break; case AX25_DM: ax25_dama_off(ax25); ax25_disconnect(ax25, ECONNRESET); break; case AX25_RR: case AX25_RNR: if (frametype == AX25_RR) ax25->condition &= ~AX25_COND_PEER_RX_BUSY; else ax25->condition |= AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count=0; if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_REJ: ax25->condition &= ~AX25_COND_PEER_RX_BUSY; if (ax25_validate_nr(ax25, nr)) { if (ax25->va != nr) ax25->n2count=0; ax25_frames_acked(ax25, nr); ax25_calculate_rtt(ax25); ax25_stop_t1timer(ax25); ax25_start_t3timer(ax25); ax25_requeue_frames(ax25); if (type == AX25_COMMAND && pf) ax25_ds_enquiry_response(ax25); } else { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; } break; case AX25_I: if (!ax25_validate_nr(ax25, nr)) { ax25_ds_nr_error_recovery(ax25); ax25->state = AX25_STATE_1; break; } if (ax25->condition & AX25_COND_PEER_RX_BUSY) { ax25_frames_acked(ax25, nr); ax25->n2count = 0; } else { if (ax25_check_iframes_acked(ax25, nr)) ax25->n2count = 0; } if (ax25->condition & AX25_COND_OWN_RX_BUSY) { if (pf) ax25_ds_enquiry_response(ax25); break; } if (ns == ax25->vr) { ax25->vr = (ax25->vr + 1) % ax25->modulus; queued = ax25_rx_iframe(ax25, skb); if (ax25->condition & AX25_COND_OWN_RX_BUSY) ax25->vr = ns; /* ax25->vr - 1 */ ax25->condition &= ~AX25_COND_REJECT; if (pf) { ax25_ds_enquiry_response(ax25); } else { if (!(ax25->condition & AX25_COND_ACK_PENDING)) { ax25->condition |= AX25_COND_ACK_PENDING; ax25_start_t2timer(ax25); } } } else { if (ax25->condition & AX25_COND_REJECT) { if (pf) ax25_ds_enquiry_response(ax25); } else { ax25->condition |= AX25_COND_REJECT; ax25_ds_enquiry_response(ax25); ax25->condition &= ~AX25_COND_ACK_PENDING; } } break; case AX25_FRMR: case AX25_ILLEGAL: ax25_ds_establish_data_link(ax25); ax25->state = AX25_STATE_1; break; default: break; } return queued; } /* * Higher level upcall for a LAPB frame */ int ax25_ds_frame_in(ax25_cb *ax25, struct sk_buff *skb, int type) { int queued = 0, frametype, ns, nr, pf; frametype = ax25_decode(ax25, skb, &ns, &nr, &pf); switch (ax25->state) { case AX25_STATE_1: queued = ax25_ds_state1_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_2: queued = ax25_ds_state2_machine(ax25, skb, frametype, pf, type); break; case AX25_STATE_3: queued = ax25_ds_state3_machine(ax25, skb, frametype, ns, nr, pf, type); break; } return queued; }
gpl-2.0
Docker-J/kernel_msm
arch/s390/crypto/sha1_s390.c
7369
2805
/* * Cryptographic API. * * s390 implementation of the SHA1 Secure Hash Algorithm. * * Derived from cryptoapi implementation, adapted for in-place * scatterlist interface. Originally based on the public domain * implementation written by Steve Reid. * * s390 Version: * Copyright IBM Corp. 2003,2007 * Author(s): Thomas Spatzier * Jan Glauber (jan.glauber@de.ibm.com) * * Derived from "crypto/sha1_generic.c" * Copyright (c) Alan Smithee. * Copyright (c) Andrew McDonald <andrew@mcdonald.org.uk> * Copyright (c) Jean-Francois Dive <jef@linuxbe.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <linux/init.h> #include <linux/module.h> #include <crypto/sha.h> #include "crypt_s390.h" #include "sha.h" static int sha1_init(struct shash_desc *desc) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); sctx->state[0] = SHA1_H0; sctx->state[1] = SHA1_H1; sctx->state[2] = SHA1_H2; sctx->state[3] = SHA1_H3; sctx->state[4] = SHA1_H4; sctx->count = 0; sctx->func = KIMD_SHA_1; return 0; } static int sha1_export(struct shash_desc *desc, void *out) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); struct sha1_state *octx = out; octx->count = sctx->count; memcpy(octx->state, sctx->state, sizeof(octx->state)); memcpy(octx->buffer, sctx->buf, sizeof(octx->buffer)); return 0; } static int sha1_import(struct shash_desc *desc, const void *in) { struct s390_sha_ctx *sctx = shash_desc_ctx(desc); const struct sha1_state *ictx = in; sctx->count = ictx->count; memcpy(sctx->state, ictx->state, sizeof(ictx->state)); memcpy(sctx->buf, ictx->buffer, sizeof(ictx->buffer)); sctx->func = KIMD_SHA_1; return 0; } static struct shash_alg alg = { .digestsize = SHA1_DIGEST_SIZE, .init = sha1_init, .update = s390_sha_update, .final = s390_sha_final, .export = sha1_export, .import = sha1_import, .descsize = sizeof(struct s390_sha_ctx), .statesize = sizeof(struct sha1_state), .base = { .cra_name = "sha1", .cra_driver_name= "sha1-s390", .cra_priority = CRYPT_S390_PRIORITY, .cra_flags = CRYPTO_ALG_TYPE_SHASH, .cra_blocksize = SHA1_BLOCK_SIZE, .cra_module = THIS_MODULE, } }; static int __init sha1_s390_init(void) { if (!crypt_s390_func_available(KIMD_SHA_1, CRYPT_S390_MSA)) return -EOPNOTSUPP; return crypto_register_shash(&alg); } static void __exit sha1_s390_fini(void) { crypto_unregister_shash(&alg); } module_init(sha1_s390_init); module_exit(sha1_s390_fini); MODULE_ALIAS("sha1"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("SHA1 Secure Hash Algorithm");
gpl-2.0
MarginC/linux
drivers/net/wireless/ipw2x00/libipw_tx.c
9929
15423
/****************************************************************************** Copyright(c) 2003 - 2005 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: Intel Linux Wireless <ilw@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <asm/uaccess.h> #include "libipw.h" /* 802.11 Data Frame ,-------------------------------------------------------------------. Bytes | 2 | 2 | 6 | 6 | 6 | 2 | 0..2312 | 4 | |------|------|---------|---------|---------|------|---------|------| Desc. | ctrl | dura | DA/RA | TA | SA | Sequ | Frame | fcs | | | tion | (BSSID) | | | ence | data | | `--------------------------------------------------| |------' Total: 28 non-data bytes `----.----' | .- 'Frame data' expands, if WEP enabled, to <----------' | V ,-----------------------. Bytes | 4 | 0-2296 | 4 | |-----|-----------|-----| Desc. | IV | Encrypted | ICV | | | Packet | | `-----| |-----' `-----.-----' | .- 'Encrypted Packet' expands to | V ,---------------------------------------------------. Bytes | 1 | 1 | 1 | 3 | 2 | 0-2304 | |------|------|---------|----------|------|---------| Desc. | SNAP | SNAP | Control |Eth Tunnel| Type | IP | | DSAP | SSAP | | | | Packet | | 0xAA | 0xAA |0x03 (UI)|0x00-00-F8| | | `---------------------------------------------------- Total: 8 non-data bytes 802.3 Ethernet Data Frame ,-----------------------------------------. Bytes | 6 | 6 | 2 | Variable | 4 | |-------|-------|------|-----------|------| Desc. | Dest. | Source| Type | IP Packet | fcs | | MAC | MAC | | | | `-----------------------------------------' Total: 18 non-data bytes In the event that fragmentation is required, the incoming payload is split into N parts of size ieee->fts. The first fragment contains the SNAP header and the remaining packets are just data. If encryption is enabled, each fragment payload size is reduced by enough space to add the prefix and postfix (IV and ICV totalling 8 bytes in the case of WEP) So if you have 1500 bytes of payload with ieee->fts set to 500 without encryption it will take 3 frames. With WEP it will take 4 frames as the payload of each frame is reduced to 492 bytes. * SKB visualization * * ,- skb->data * | * | ETHERNET HEADER ,-<-- PAYLOAD * | | 14 bytes from skb->data * | 2 bytes for Type --> ,T. | (sizeof ethhdr) * | | | | * |,-Dest.--. ,--Src.---. | | | * | 6 bytes| | 6 bytes | | | | * v | | | | | | * 0 | v 1 | v | v 2 * 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 * ^ | ^ | ^ | * | | | | | | * | | | | `T' <---- 2 bytes for Type * | | | | * | | '---SNAP--' <-------- 6 bytes for SNAP * | | * `-IV--' <-------------------- 4 bytes for IV (WEP) * * SNAP HEADER * */ static u8 P802_1H_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0xf8 }; static u8 RFC1042_OUI[P80211_OUI_LEN] = { 0x00, 0x00, 0x00 }; static int libipw_copy_snap(u8 * data, __be16 h_proto) { struct libipw_snap_hdr *snap; u8 *oui; snap = (struct libipw_snap_hdr *)data; snap->dsap = 0xaa; snap->ssap = 0xaa; snap->ctrl = 0x03; if (h_proto == htons(ETH_P_AARP) || h_proto == htons(ETH_P_IPX)) oui = P802_1H_OUI; else oui = RFC1042_OUI; snap->oui[0] = oui[0]; snap->oui[1] = oui[1]; snap->oui[2] = oui[2]; memcpy(data + SNAP_SIZE, &h_proto, sizeof(u16)); return SNAP_SIZE + sizeof(u16); } static int libipw_encrypt_fragment(struct libipw_device *ieee, struct sk_buff *frag, int hdr_len) { struct lib80211_crypt_data *crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; int res; if (crypt == NULL) return -1; /* To encrypt, frame format is: * IV (4 bytes), clear payload (including SNAP), ICV (4 bytes) */ atomic_inc(&crypt->refcnt); res = 0; if (crypt->ops && crypt->ops->encrypt_mpdu) res = crypt->ops->encrypt_mpdu(frag, hdr_len, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_INFO "%s: Encryption failed: len=%d.\n", ieee->dev->name, frag->len); ieee->ieee_stats.tx_discards++; return -1; } return 0; } void libipw_txb_free(struct libipw_txb *txb) { int i; if (unlikely(!txb)) return; for (i = 0; i < txb->nr_frags; i++) if (txb->fragments[i]) dev_kfree_skb_any(txb->fragments[i]); kfree(txb); } static struct libipw_txb *libipw_alloc_txb(int nr_frags, int txb_size, int headroom, gfp_t gfp_mask) { struct libipw_txb *txb; int i; txb = kmalloc(sizeof(struct libipw_txb) + (sizeof(u8 *) * nr_frags), gfp_mask); if (!txb) return NULL; memset(txb, 0, sizeof(struct libipw_txb)); txb->nr_frags = nr_frags; txb->frag_size = txb_size; for (i = 0; i < nr_frags; i++) { txb->fragments[i] = __dev_alloc_skb(txb_size + headroom, gfp_mask); if (unlikely(!txb->fragments[i])) { i--; break; } skb_reserve(txb->fragments[i], headroom); } if (unlikely(i != nr_frags)) { while (i >= 0) dev_kfree_skb_any(txb->fragments[i--]); kfree(txb); return NULL; } return txb; } static int libipw_classify(struct sk_buff *skb) { struct ethhdr *eth; struct iphdr *ip; eth = (struct ethhdr *)skb->data; if (eth->h_proto != htons(ETH_P_IP)) return 0; ip = ip_hdr(skb); switch (ip->tos & 0xfc) { case 0x20: return 2; case 0x40: return 1; case 0x60: return 3; case 0x80: return 4; case 0xa0: return 5; case 0xc0: return 6; case 0xe0: return 7; default: return 0; } } /* Incoming skb is converted to a txb which consists of * a block of 802.11 fragment packets (stored as skbs) */ netdev_tx_t libipw_xmit(struct sk_buff *skb, struct net_device *dev) { struct libipw_device *ieee = netdev_priv(dev); struct libipw_txb *txb = NULL; struct libipw_hdr_3addrqos *frag_hdr; int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size, rts_required; unsigned long flags; int encrypt, host_encrypt, host_encrypt_msdu; __be16 ether_type; int bytes, fc, hdr_len; struct sk_buff *skb_frag; struct libipw_hdr_3addrqos header = {/* Ensure zero initialized */ .duration_id = 0, .seq_ctl = 0, .qos_ctl = 0 }; u8 dest[ETH_ALEN], src[ETH_ALEN]; struct lib80211_crypt_data *crypt; int priority = skb->priority; int snapped = 0; if (ieee->is_queue_full && (*ieee->is_queue_full) (dev, priority)) return NETDEV_TX_BUSY; spin_lock_irqsave(&ieee->lock, flags); /* If there is no driver handler to take the TXB, dont' bother * creating it... */ if (!ieee->hard_start_xmit) { printk(KERN_WARNING "%s: No xmit handler.\n", ieee->dev->name); goto success; } if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) { printk(KERN_WARNING "%s: skb too small (%d).\n", ieee->dev->name, skb->len); goto success; } ether_type = ((struct ethhdr *)skb->data)->h_proto; crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx]; encrypt = !(ether_type == htons(ETH_P_PAE) && ieee->ieee802_1x) && ieee->sec.encrypt; host_encrypt = ieee->host_encrypt && encrypt && crypt; host_encrypt_msdu = ieee->host_encrypt_msdu && encrypt && crypt; if (!encrypt && ieee->ieee802_1x && ieee->drop_unencrypted && ether_type != htons(ETH_P_PAE)) { dev->stats.tx_dropped++; goto success; } /* Save source and destination addresses */ skb_copy_from_linear_data(skb, dest, ETH_ALEN); skb_copy_from_linear_data_offset(skb, ETH_ALEN, src, ETH_ALEN); if (host_encrypt) fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA | IEEE80211_FCTL_PROTECTED; else fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA; if (ieee->iw_mode == IW_MODE_INFRA) { fc |= IEEE80211_FCTL_TODS; /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(header.addr1, ieee->bssid, ETH_ALEN); memcpy(header.addr2, src, ETH_ALEN); memcpy(header.addr3, dest, ETH_ALEN); } else if (ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(header.addr1, dest, ETH_ALEN); memcpy(header.addr2, src, ETH_ALEN); memcpy(header.addr3, ieee->bssid, ETH_ALEN); } hdr_len = LIBIPW_3ADDR_LEN; if (ieee->is_qos_active && ieee->is_qos_active(dev, skb)) { fc |= IEEE80211_STYPE_QOS_DATA; hdr_len += 2; skb->priority = libipw_classify(skb); header.qos_ctl |= cpu_to_le16(skb->priority & LIBIPW_QCTL_TID); } header.frame_ctl = cpu_to_le16(fc); /* Advance the SKB to the start of the payload */ skb_pull(skb, sizeof(struct ethhdr)); /* Determine total amount of storage required for TXB packets */ bytes = skb->len + SNAP_SIZE + sizeof(u16); /* Encrypt msdu first on the whole data packet. */ if ((host_encrypt || host_encrypt_msdu) && crypt && crypt->ops && crypt->ops->encrypt_msdu) { int res = 0; int len = bytes + hdr_len + crypt->ops->extra_msdu_prefix_len + crypt->ops->extra_msdu_postfix_len; struct sk_buff *skb_new = dev_alloc_skb(len); if (unlikely(!skb_new)) goto failed; skb_reserve(skb_new, crypt->ops->extra_msdu_prefix_len); memcpy(skb_put(skb_new, hdr_len), &header, hdr_len); snapped = 1; libipw_copy_snap(skb_put(skb_new, SNAP_SIZE + sizeof(u16)), ether_type); skb_copy_from_linear_data(skb, skb_put(skb_new, skb->len), skb->len); res = crypt->ops->encrypt_msdu(skb_new, hdr_len, crypt->priv); if (res < 0) { LIBIPW_ERROR("msdu encryption failed\n"); dev_kfree_skb_any(skb_new); goto failed; } dev_kfree_skb_any(skb); skb = skb_new; bytes += crypt->ops->extra_msdu_prefix_len + crypt->ops->extra_msdu_postfix_len; skb_pull(skb, hdr_len); } if (host_encrypt || ieee->host_open_frag) { /* Determine fragmentation size based on destination (multicast * and broadcast are not fragmented) */ if (is_multicast_ether_addr(dest) || is_broadcast_ether_addr(dest)) frag_size = MAX_FRAG_THRESHOLD; else frag_size = ieee->fts; /* Determine amount of payload per fragment. Regardless of if * this stack is providing the full 802.11 header, one will * eventually be affixed to this fragment -- so we must account * for it when determining the amount of payload space. */ bytes_per_frag = frag_size - hdr_len; if (ieee->config & (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) bytes_per_frag -= LIBIPW_FCS_LEN; /* Each fragment may need to have room for encryption * pre/postfix */ if (host_encrypt) bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len + crypt->ops->extra_mpdu_postfix_len; /* Number of fragments is the total * bytes_per_frag / payload_per_fragment */ nr_frags = bytes / bytes_per_frag; bytes_last_frag = bytes % bytes_per_frag; if (bytes_last_frag) nr_frags++; else bytes_last_frag = bytes_per_frag; } else { nr_frags = 1; bytes_per_frag = bytes_last_frag = bytes; frag_size = bytes + hdr_len; } rts_required = (frag_size > ieee->rts && ieee->config & CFG_LIBIPW_RTS); if (rts_required) nr_frags++; /* When we allocate the TXB we allocate enough space for the reserve * and full fragment bytes (bytes_per_frag doesn't include prefix, * postfix, header, FCS, etc.) */ txb = libipw_alloc_txb(nr_frags, frag_size, ieee->tx_headroom, GFP_ATOMIC); if (unlikely(!txb)) { printk(KERN_WARNING "%s: Could not allocate TXB\n", ieee->dev->name); goto failed; } txb->encrypted = encrypt; if (host_encrypt) txb->payload_size = frag_size * (nr_frags - 1) + bytes_last_frag; else txb->payload_size = bytes; if (rts_required) { skb_frag = txb->fragments[0]; frag_hdr = (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len); /* * Set header frame_ctl to the RTS. */ header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_RTS); memcpy(frag_hdr, &header, hdr_len); /* * Restore header frame_ctl to the original data setting. */ header.frame_ctl = cpu_to_le16(fc); if (ieee->config & (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) skb_put(skb_frag, 4); txb->rts_included = 1; i = 1; } else i = 0; for (; i < nr_frags; i++) { skb_frag = txb->fragments[i]; if (host_encrypt) skb_reserve(skb_frag, crypt->ops->extra_mpdu_prefix_len); frag_hdr = (struct libipw_hdr_3addrqos *)skb_put(skb_frag, hdr_len); memcpy(frag_hdr, &header, hdr_len); /* If this is not the last fragment, then add the MOREFRAGS * bit to the frame control */ if (i != nr_frags - 1) { frag_hdr->frame_ctl = cpu_to_le16(fc | IEEE80211_FCTL_MOREFRAGS); bytes = bytes_per_frag; } else { /* The last fragment takes the remaining length */ bytes = bytes_last_frag; } if (i == 0 && !snapped) { libipw_copy_snap(skb_put (skb_frag, SNAP_SIZE + sizeof(u16)), ether_type); bytes -= SNAP_SIZE + sizeof(u16); } skb_copy_from_linear_data(skb, skb_put(skb_frag, bytes), bytes); /* Advance the SKB... */ skb_pull(skb, bytes); /* Encryption routine will move the header forward in order * to insert the IV between the header and the payload */ if (host_encrypt) libipw_encrypt_fragment(ieee, skb_frag, hdr_len); if (ieee->config & (CFG_LIBIPW_COMPUTE_FCS | CFG_LIBIPW_RESERVE_FCS)) skb_put(skb_frag, 4); } success: spin_unlock_irqrestore(&ieee->lock, flags); dev_kfree_skb_any(skb); if (txb) { netdev_tx_t ret = (*ieee->hard_start_xmit)(txb, dev, priority); if (ret == NETDEV_TX_OK) { dev->stats.tx_packets++; dev->stats.tx_bytes += txb->payload_size; return NETDEV_TX_OK; } libipw_txb_free(txb); } return NETDEV_TX_OK; failed: spin_unlock_irqrestore(&ieee->lock, flags); netif_stop_queue(dev); dev->stats.tx_errors++; return NETDEV_TX_BUSY; } EXPORT_SYMBOL(libipw_xmit); EXPORT_SYMBOL(libipw_txb_free);
gpl-2.0
remicks/android_kernel_lge_hammerhead
drivers/video/matrox/matroxfb_accel.c
9929
14201
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200 and G400 * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Version: 1.65 2002/08/14 * * MTRR stuff: 1998 Tom Rini <trini@kernel.crashing.org> * * Contributors: "menion?" <menion@mindless.com> * Betatesting, fixes, ideas * * "Kurt Garloff" <garloff@suse.de> * Betatesting, fixes, ideas, videomodes, videomodes timmings * * "Tom Rini" <trini@kernel.crashing.org> * MTRR stuff, PPC cleanups, betatesting, fixes, ideas * * "Bibek Sahu" <scorpio@dodds.net> * Access device through readb|w|l and write b|w|l * Extensive debugging stuff * * "Daniel Haun" <haund@usa.net> * Testing, hardware cursor fixes * * "Scott Wood" <sawst46+@pitt.edu> * Fixes * * "Gerd Knorr" <kraxel@goldbach.isdn.cs.tu-berlin.de> * Betatesting * * "Kelly French" <targon@hazmat.com> * "Fernando Herrera" <fherrera@eurielec.etsit.upm.es> * Betatesting, bug reporting * * "Pablo Bianucci" <pbian@pccp.com.ar> * Fixes, ideas, betatesting * * "Inaky Perez Gonzalez" <inaky@peloncho.fis.ucm.es> * Fixes, enhandcements, ideas, betatesting * * "Ryuichi Oikawa" <roikawa@rr.iiij4u.or.jp> * PPC betatesting, PPC support, backward compatibility * * "Paul Womar" <Paul@pwomar.demon.co.uk> * "Owen Waller" <O.Waller@ee.qub.ac.uk> * PPC betatesting * * "Thomas Pornin" <pornin@bolet.ens.fr> * Alpha betatesting * * "Pieter van Leuven" <pvl@iae.nl> * "Ulf Jaenicke-Roessler" <ujr@physik.phy.tu-dresden.de> * G100 testing * * "H. Peter Arvin" <hpa@transmeta.com> * Ideas * * "Cort Dougan" <cort@cs.nmt.edu> * CHRP fixes and PReP cleanup * * "Mark Vojkovich" <mvojkovi@ucsd.edu> * G400 support * * (following author is not in any relation with this code, but his code * is included in this driver) * * Based on framebuffer driver for VBE 2.0 compliant graphic boards * (c) 1998 Gerd Knorr <kraxel@cs.tu-berlin.de> * * (following author is not in any relation with this code, but his ideas * were used when writing this driver) * * FreeVBE/AF (Matrox), "Shawn Hargreaves" <shawn@talula.demon.co.uk> * */ #include "matroxfb_accel.h" #include "matroxfb_DAC1064.h" #include "matroxfb_Ti3026.h" #include "matroxfb_misc.h" #define curr_ydstorg(x) ((x)->curr.ydstorg.pixels) #define mga_ydstlen(y,l) mga_outl(M_YDSTLEN | M_EXEC, ((y) << 16) | (l)) static inline void matrox_cfb4_pal(u_int32_t* pal) { unsigned int i; for (i = 0; i < 16; i++) { pal[i] = i * 0x11111111U; } } static inline void matrox_cfb8_pal(u_int32_t* pal) { unsigned int i; for (i = 0; i < 16; i++) { pal[i] = i * 0x01010101U; } } static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area); static void matroxfb_fillrect(struct fb_info* info, const struct fb_fillrect* rect); static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* image); static void matroxfb_cfb4_fillrect(struct fb_info* info, const struct fb_fillrect* rect); static void matroxfb_cfb4_copyarea(struct fb_info* info, const struct fb_copyarea* area); void matrox_cfbX_init(struct matrox_fb_info *minfo) { u_int32_t maccess; u_int32_t mpitch; u_int32_t mopmode; int accel; DBG(__func__) mpitch = minfo->fbcon.var.xres_virtual; minfo->fbops.fb_copyarea = cfb_copyarea; minfo->fbops.fb_fillrect = cfb_fillrect; minfo->fbops.fb_imageblit = cfb_imageblit; minfo->fbops.fb_cursor = NULL; accel = (minfo->fbcon.var.accel_flags & FB_ACCELF_TEXT) == FB_ACCELF_TEXT; switch (minfo->fbcon.var.bits_per_pixel) { case 4: maccess = 0x00000000; /* accelerate as 8bpp video */ mpitch = (mpitch >> 1) | 0x8000; /* disable linearization */ mopmode = M_OPMODE_4BPP; matrox_cfb4_pal(minfo->cmap); if (accel && !(mpitch & 1)) { minfo->fbops.fb_copyarea = matroxfb_cfb4_copyarea; minfo->fbops.fb_fillrect = matroxfb_cfb4_fillrect; } break; case 8: maccess = 0x00000000; mopmode = M_OPMODE_8BPP; matrox_cfb8_pal(minfo->cmap); if (accel) { minfo->fbops.fb_copyarea = matroxfb_copyarea; minfo->fbops.fb_fillrect = matroxfb_fillrect; minfo->fbops.fb_imageblit = matroxfb_imageblit; } break; case 16: if (minfo->fbcon.var.green.length == 5) maccess = 0xC0000001; else maccess = 0x40000001; mopmode = M_OPMODE_16BPP; if (accel) { minfo->fbops.fb_copyarea = matroxfb_copyarea; minfo->fbops.fb_fillrect = matroxfb_fillrect; minfo->fbops.fb_imageblit = matroxfb_imageblit; } break; case 24: maccess = 0x00000003; mopmode = M_OPMODE_24BPP; if (accel) { minfo->fbops.fb_copyarea = matroxfb_copyarea; minfo->fbops.fb_fillrect = matroxfb_fillrect; minfo->fbops.fb_imageblit = matroxfb_imageblit; } break; case 32: maccess = 0x00000002; mopmode = M_OPMODE_32BPP; if (accel) { minfo->fbops.fb_copyarea = matroxfb_copyarea; minfo->fbops.fb_fillrect = matroxfb_fillrect; minfo->fbops.fb_imageblit = matroxfb_imageblit; } break; default: maccess = 0x00000000; mopmode = 0x00000000; break; /* turn off acceleration!!! */ } mga_fifo(8); mga_outl(M_PITCH, mpitch); mga_outl(M_YDSTORG, curr_ydstorg(minfo)); if (minfo->capable.plnwt) mga_outl(M_PLNWT, -1); if (minfo->capable.srcorg) { mga_outl(M_SRCORG, 0); mga_outl(M_DSTORG, 0); } mga_outl(M_OPMODE, mopmode); mga_outl(M_CXBNDRY, 0xFFFF0000); mga_outl(M_YTOP, 0); mga_outl(M_YBOT, 0x01FFFFFF); mga_outl(M_MACCESS, maccess); minfo->accel.m_dwg_rect = M_DWG_TRAP | M_DWG_SOLID | M_DWG_ARZERO | M_DWG_SGNZERO | M_DWG_SHIFTZERO; if (isMilleniumII(minfo)) minfo->accel.m_dwg_rect |= M_DWG_TRANSC; minfo->accel.m_opmode = mopmode; } EXPORT_SYMBOL(matrox_cfbX_init); static void matrox_accel_bmove(struct matrox_fb_info *minfo, int vxres, int sy, int sx, int dy, int dx, int height, int width) { int start, end; CRITFLAGS DBG(__func__) CRITBEGIN if ((dy < sy) || ((dy == sy) && (dx <= sx))) { mga_fifo(2); mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO | M_DWG_BFCOL | M_DWG_REPLACE); mga_outl(M_AR5, vxres); width--; start = sy*vxres+sx+curr_ydstorg(minfo); end = start+width; } else { mga_fifo(3); mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE); mga_outl(M_SGN, 5); mga_outl(M_AR5, -vxres); width--; end = (sy+height-1)*vxres+sx+curr_ydstorg(minfo); start = end+width; dy += height-1; } mga_fifo(4); mga_outl(M_AR0, end); mga_outl(M_AR3, start); mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx); mga_ydstlen(dy, height); WaitTillIdle(); CRITEND } static void matrox_accel_bmove_lin(struct matrox_fb_info *minfo, int vxres, int sy, int sx, int dy, int dx, int height, int width) { int start, end; CRITFLAGS DBG(__func__) CRITBEGIN if ((dy < sy) || ((dy == sy) && (dx <= sx))) { mga_fifo(2); mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_SGNZERO | M_DWG_BFCOL | M_DWG_REPLACE); mga_outl(M_AR5, vxres); width--; start = sy*vxres+sx+curr_ydstorg(minfo); end = start+width; } else { mga_fifo(3); mga_outl(M_DWGCTL, M_DWG_BITBLT | M_DWG_SHIFTZERO | M_DWG_BFCOL | M_DWG_REPLACE); mga_outl(M_SGN, 5); mga_outl(M_AR5, -vxres); width--; end = (sy+height-1)*vxres+sx+curr_ydstorg(minfo); start = end+width; dy += height-1; } mga_fifo(5); mga_outl(M_AR0, end); mga_outl(M_AR3, start); mga_outl(M_FXBNDRY, ((dx+width)<<16) | dx); mga_outl(M_YDST, dy*vxres >> 5); mga_outl(M_LEN | M_EXEC, height); WaitTillIdle(); CRITEND } static void matroxfb_cfb4_copyarea(struct fb_info* info, const struct fb_copyarea* area) { struct matrox_fb_info *minfo = info2minfo(info); if ((area->sx | area->dx | area->width) & 1) cfb_copyarea(info, area); else matrox_accel_bmove_lin(minfo, minfo->fbcon.var.xres_virtual >> 1, area->sy, area->sx >> 1, area->dy, area->dx >> 1, area->height, area->width >> 1); } static void matroxfb_copyarea(struct fb_info* info, const struct fb_copyarea* area) { struct matrox_fb_info *minfo = info2minfo(info); matrox_accel_bmove(minfo, minfo->fbcon.var.xres_virtual, area->sy, area->sx, area->dy, area->dx, area->height, area->width); } static void matroxfb_accel_clear(struct matrox_fb_info *minfo, u_int32_t color, int sy, int sx, int height, int width) { CRITFLAGS DBG(__func__) CRITBEGIN mga_fifo(5); mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE); mga_outl(M_FCOL, color); mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx); mga_ydstlen(sy, height); WaitTillIdle(); CRITEND } static void matroxfb_fillrect(struct fb_info* info, const struct fb_fillrect* rect) { struct matrox_fb_info *minfo = info2minfo(info); switch (rect->rop) { case ROP_COPY: matroxfb_accel_clear(minfo, ((u_int32_t *)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width); break; } } static void matroxfb_cfb4_clear(struct matrox_fb_info *minfo, u_int32_t bgx, int sy, int sx, int height, int width) { int whattodo; CRITFLAGS DBG(__func__) CRITBEGIN whattodo = 0; if (sx & 1) { sx ++; if (!width) return; width --; whattodo = 1; } if (width & 1) { whattodo |= 2; } width >>= 1; sx >>= 1; if (width) { mga_fifo(5); mga_outl(M_DWGCTL, minfo->accel.m_dwg_rect | M_DWG_REPLACE2); mga_outl(M_FCOL, bgx); mga_outl(M_FXBNDRY, ((sx + width) << 16) | sx); mga_outl(M_YDST, sy * minfo->fbcon.var.xres_virtual >> 6); mga_outl(M_LEN | M_EXEC, height); WaitTillIdle(); } if (whattodo) { u_int32_t step = minfo->fbcon.var.xres_virtual >> 1; vaddr_t vbase = minfo->video.vbase; if (whattodo & 1) { unsigned int uaddr = sy * step + sx - 1; u_int32_t loop; u_int8_t bgx2 = bgx & 0xF0; for (loop = height; loop > 0; loop --) { mga_writeb(vbase, uaddr, (mga_readb(vbase, uaddr) & 0x0F) | bgx2); uaddr += step; } } if (whattodo & 2) { unsigned int uaddr = sy * step + sx + width; u_int32_t loop; u_int8_t bgx2 = bgx & 0x0F; for (loop = height; loop > 0; loop --) { mga_writeb(vbase, uaddr, (mga_readb(vbase, uaddr) & 0xF0) | bgx2); uaddr += step; } } } CRITEND } static void matroxfb_cfb4_fillrect(struct fb_info* info, const struct fb_fillrect* rect) { struct matrox_fb_info *minfo = info2minfo(info); switch (rect->rop) { case ROP_COPY: matroxfb_cfb4_clear(minfo, ((u_int32_t *)info->pseudo_palette)[rect->color], rect->dy, rect->dx, rect->height, rect->width); break; } } static void matroxfb_1bpp_imageblit(struct matrox_fb_info *minfo, u_int32_t fgx, u_int32_t bgx, const u_int8_t *chardata, int width, int height, int yy, int xx) { u_int32_t step; u_int32_t ydstlen; u_int32_t xlen; u_int32_t ar0; u_int32_t charcell; u_int32_t fxbndry; vaddr_t mmio; int easy; CRITFLAGS DBG_HEAVY(__func__); step = (width + 7) >> 3; charcell = height * step; xlen = (charcell + 3) & ~3; ydstlen = (yy << 16) | height; if (width == step << 3) { ar0 = height * width - 1; easy = 1; } else { ar0 = width - 1; easy = 0; } CRITBEGIN mga_fifo(3); if (easy) mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_LINEAR | M_DWG_REPLACE); else mga_outl(M_DWGCTL, M_DWG_ILOAD | M_DWG_SGNZERO | M_DWG_SHIFTZERO | M_DWG_BMONOWF | M_DWG_REPLACE); mga_outl(M_FCOL, fgx); mga_outl(M_BCOL, bgx); fxbndry = ((xx + width - 1) << 16) | xx; mmio = minfo->mmio.vbase; mga_fifo(6); mga_writel(mmio, M_FXBNDRY, fxbndry); mga_writel(mmio, M_AR0, ar0); mga_writel(mmio, M_AR3, 0); if (easy) { mga_writel(mmio, M_YDSTLEN | M_EXEC, ydstlen); mga_memcpy_toio(mmio, chardata, xlen); } else { mga_writel(mmio, M_AR5, 0); mga_writel(mmio, M_YDSTLEN | M_EXEC, ydstlen); if ((step & 3) == 0) { /* Great. Source has 32bit aligned lines, so we can feed them directly to the accelerator. */ mga_memcpy_toio(mmio, chardata, charcell); } else if (step == 1) { /* Special case for 1..8bit widths */ while (height--) { #if defined(__BIG_ENDIAN) fb_writel((*chardata) << 24, mmio.vaddr); #else fb_writel(*chardata, mmio.vaddr); #endif chardata++; } } else if (step == 2) { /* Special case for 9..15bit widths */ while (height--) { #if defined(__BIG_ENDIAN) fb_writel((*(u_int16_t*)chardata) << 16, mmio.vaddr); #else fb_writel(*(u_int16_t*)chardata, mmio.vaddr); #endif chardata += 2; } } else { /* Tell... well, why bother... */ while (height--) { size_t i; for (i = 0; i < step; i += 4) { /* Hope that there are at least three readable bytes beyond the end of bitmap */ fb_writel(get_unaligned((u_int32_t*)(chardata + i)),mmio.vaddr); } chardata += step; } } } WaitTillIdle(); CRITEND } static void matroxfb_imageblit(struct fb_info* info, const struct fb_image* image) { struct matrox_fb_info *minfo = info2minfo(info); DBG_HEAVY(__func__); if (image->depth == 1) { u_int32_t fgx, bgx; fgx = ((u_int32_t*)info->pseudo_palette)[image->fg_color]; bgx = ((u_int32_t*)info->pseudo_palette)[image->bg_color]; matroxfb_1bpp_imageblit(minfo, fgx, bgx, image->data, image->width, image->height, image->dy, image->dx); } else { /* Danger! image->depth is useless: logo painting code always passes framebuffer color depth here, although logo data are always 8bpp and info->pseudo_palette is changed to contain logo palette to be used (but only for true/direct-color... sic...). So do it completely in software... */ cfb_imageblit(info, image); } } MODULE_LICENSE("GPL");
gpl-2.0
val2k/linux
drivers/uwb/whc-rc.c
10441
13503
/* * Wireless Host Controller: Radio Control Interface (WHCI v0.95[2.3]) * Radio Control command/event transport to the UWB stack * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * Initialize and hook up the Radio Control interface. * * For each device probed, creates an 'struct whcrc' which contains * just the representation of the UWB Radio Controller, and the logic * for reading notifications and passing them to the UWB Core. * * So we initialize all of those, register the UWB Radio Controller * and setup the notification/event handle to pipe the notifications * to the UWB management Daemon. * * Once uwb_rc_add() is called, the UWB stack takes control, resets * the radio and readies the device to take commands the UWB * API/user-space. * * Note this driver is just a transport driver; the commands are * formed at the UWB stack and given to this driver who will deliver * them to the hw and transfer the replies/notifications back to the * UWB stack through the UWB daemon (UWBD). */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/sched.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/uwb.h> #include <linux/uwb/whci.h> #include <linux/uwb/umc.h> #include "uwb-internal.h" /** * Descriptor for an instance of the UWB Radio Control Driver that * attaches to the URC interface of the WHCI PCI card. * * Unless there is a lock specific to the 'data members', all access * is protected by uwb_rc->mutex. */ struct whcrc { struct umc_dev *umc_dev; struct uwb_rc *uwb_rc; /* UWB host controller */ unsigned long area; void __iomem *rc_base; size_t rc_len; spinlock_t irq_lock; void *evt_buf, *cmd_buf; dma_addr_t evt_dma_buf, cmd_dma_buf; wait_queue_head_t cmd_wq; struct work_struct event_work; }; /** * Execute an UWB RC command on WHCI/RC * * @rc: Instance of a Radio Controller that is a whcrc * @cmd: Buffer containing the RCCB and payload to execute * @cmd_size: Size of the command buffer. * * We copy the command into whcrc->cmd_buf (as it is pretty and * aligned`and physically contiguous) and then press the right keys in * the controller's URCCMD register to get it to read it. We might * have to wait for the cmd_sem to be open to us. * * NOTE: rc's mutex has to be locked */ static int whcrc_cmd(struct uwb_rc *uwb_rc, const struct uwb_rccb *cmd, size_t cmd_size) { int result = 0; struct whcrc *whcrc = uwb_rc->priv; struct device *dev = &whcrc->umc_dev->dev; u32 urccmd; if (cmd_size >= 4096) return -EINVAL; /* * If the URC is halted, then the hardware has reset itself. * Attempt to recover by restarting the device and then return * an error as it's likely that the current command isn't * valid for a newly started RC. */ if (le_readl(whcrc->rc_base + URCSTS) & URCSTS_HALTED) { dev_err(dev, "requesting reset of halted radio controller\n"); uwb_rc_reset_all(uwb_rc); return -EIO; } result = wait_event_timeout(whcrc->cmd_wq, !(le_readl(whcrc->rc_base + URCCMD) & URCCMD_ACTIVE), HZ/2); if (result == 0) { dev_err(dev, "device is not ready to execute commands\n"); return -ETIMEDOUT; } memmove(whcrc->cmd_buf, cmd, cmd_size); le_writeq(whcrc->cmd_dma_buf, whcrc->rc_base + URCCMDADDR); spin_lock(&whcrc->irq_lock); urccmd = le_readl(whcrc->rc_base + URCCMD); urccmd &= ~(URCCMD_EARV | URCCMD_SIZE_MASK); le_writel(urccmd | URCCMD_ACTIVE | URCCMD_IWR | cmd_size, whcrc->rc_base + URCCMD); spin_unlock(&whcrc->irq_lock); return 0; } static int whcrc_reset(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; return umc_controller_reset(whcrc->umc_dev); } /** * Reset event reception mechanism and tell hw we are ready to get more * * We have read all the events in the event buffer, so we are ready to * reset it to the beginning. * * This is only called during initialization or after an event buffer * has been retired. This means we can be sure that event processing * is disabled and it's safe to update the URCEVTADDR register. * * There's no need to wait for the event processing to start as the * URC will not clear URCCMD_ACTIVE until (internal) event buffer * space is available. */ static void whcrc_enable_events(struct whcrc *whcrc) { u32 urccmd; le_writeq(whcrc->evt_dma_buf, whcrc->rc_base + URCEVTADDR); spin_lock(&whcrc->irq_lock); urccmd = le_readl(whcrc->rc_base + URCCMD) & ~URCCMD_ACTIVE; le_writel(urccmd | URCCMD_EARV, whcrc->rc_base + URCCMD); spin_unlock(&whcrc->irq_lock); } static void whcrc_event_work(struct work_struct *work) { struct whcrc *whcrc = container_of(work, struct whcrc, event_work); size_t size; u64 urcevtaddr; urcevtaddr = le_readq(whcrc->rc_base + URCEVTADDR); size = urcevtaddr & URCEVTADDR_OFFSET_MASK; uwb_rc_neh_grok(whcrc->uwb_rc, whcrc->evt_buf, size); whcrc_enable_events(whcrc); } /** * Catch interrupts? * * We ack inmediately (and expect the hw to do the right thing and * raise another IRQ if things have changed :) */ static irqreturn_t whcrc_irq_cb(int irq, void *_whcrc) { struct whcrc *whcrc = _whcrc; struct device *dev = &whcrc->umc_dev->dev; u32 urcsts; urcsts = le_readl(whcrc->rc_base + URCSTS); if (!(urcsts & URCSTS_INT_MASK)) return IRQ_NONE; le_writel(urcsts & URCSTS_INT_MASK, whcrc->rc_base + URCSTS); if (urcsts & URCSTS_HSE) { dev_err(dev, "host system error -- hardware halted\n"); /* FIXME: do something sensible here */ goto out; } if (urcsts & URCSTS_ER) schedule_work(&whcrc->event_work); if (urcsts & URCSTS_RCI) wake_up_all(&whcrc->cmd_wq); out: return IRQ_HANDLED; } /** * Initialize a UMC RC interface: map regions, get (shared) IRQ */ static int whcrc_setup_rc_umc(struct whcrc *whcrc) { int result = 0; struct device *dev = &whcrc->umc_dev->dev; struct umc_dev *umc_dev = whcrc->umc_dev; whcrc->area = umc_dev->resource.start; whcrc->rc_len = resource_size(&umc_dev->resource); result = -EBUSY; if (request_mem_region(whcrc->area, whcrc->rc_len, KBUILD_MODNAME) == NULL) { dev_err(dev, "can't request URC region (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); goto error_request_region; } whcrc->rc_base = ioremap_nocache(whcrc->area, whcrc->rc_len); if (whcrc->rc_base == NULL) { dev_err(dev, "can't ioremap registers (%zu bytes @ 0x%lx): %d\n", whcrc->rc_len, whcrc->area, result); goto error_ioremap_nocache; } result = request_irq(umc_dev->irq, whcrc_irq_cb, IRQF_SHARED, KBUILD_MODNAME, whcrc); if (result < 0) { dev_err(dev, "can't allocate IRQ %d: %d\n", umc_dev->irq, result); goto error_request_irq; } result = -ENOMEM; whcrc->cmd_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, &whcrc->cmd_dma_buf, GFP_KERNEL); if (whcrc->cmd_buf == NULL) { dev_err(dev, "Can't allocate cmd transfer buffer\n"); goto error_cmd_buffer; } whcrc->evt_buf = dma_alloc_coherent(&umc_dev->dev, PAGE_SIZE, &whcrc->evt_dma_buf, GFP_KERNEL); if (whcrc->evt_buf == NULL) { dev_err(dev, "Can't allocate evt transfer buffer\n"); goto error_evt_buffer; } return 0; error_evt_buffer: dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, whcrc->cmd_dma_buf); error_cmd_buffer: free_irq(umc_dev->irq, whcrc); error_request_irq: iounmap(whcrc->rc_base); error_ioremap_nocache: release_mem_region(whcrc->area, whcrc->rc_len); error_request_region: return result; } /** * Release RC's UMC resources */ static void whcrc_release_rc_umc(struct whcrc *whcrc) { struct umc_dev *umc_dev = whcrc->umc_dev; dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->evt_buf, whcrc->evt_dma_buf); dma_free_coherent(&umc_dev->dev, PAGE_SIZE, whcrc->cmd_buf, whcrc->cmd_dma_buf); free_irq(umc_dev->irq, whcrc); iounmap(whcrc->rc_base); release_mem_region(whcrc->area, whcrc->rc_len); } /** * whcrc_start_rc - start a WHCI radio controller * @whcrc: the radio controller to start * * Reset the UMC device, start the radio controller, enable events and * finally enable interrupts. */ static int whcrc_start_rc(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; struct device *dev = &whcrc->umc_dev->dev; /* Reset the thing */ le_writel(URCCMD_RESET, whcrc->rc_base + URCCMD); if (whci_wait_for(dev, whcrc->rc_base + URCCMD, URCCMD_RESET, 0, 5000, "hardware reset") < 0) return -EBUSY; /* Set the event buffer, start the controller (enable IRQs later) */ le_writel(0, whcrc->rc_base + URCINTR); le_writel(URCCMD_RS, whcrc->rc_base + URCCMD); if (whci_wait_for(dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, 0, 5000, "radio controller start") < 0) return -ETIMEDOUT; whcrc_enable_events(whcrc); le_writel(URCINTR_EN_ALL, whcrc->rc_base + URCINTR); return 0; } /** * whcrc_stop_rc - stop a WHCI radio controller * @whcrc: the radio controller to stop * * Disable interrupts and cancel any pending event processing work * before clearing the Run/Stop bit. */ static void whcrc_stop_rc(struct uwb_rc *rc) { struct whcrc *whcrc = rc->priv; struct umc_dev *umc_dev = whcrc->umc_dev; le_writel(0, whcrc->rc_base + URCINTR); cancel_work_sync(&whcrc->event_work); le_writel(0, whcrc->rc_base + URCCMD); whci_wait_for(&umc_dev->dev, whcrc->rc_base + URCSTS, URCSTS_HALTED, URCSTS_HALTED, 100, "radio controller stop"); } static void whcrc_init(struct whcrc *whcrc) { spin_lock_init(&whcrc->irq_lock); init_waitqueue_head(&whcrc->cmd_wq); INIT_WORK(&whcrc->event_work, whcrc_event_work); } /** * Initialize the radio controller. * * NOTE: we setup whcrc->uwb_rc before calling uwb_rc_add(); in the * IRQ handler we use that to determine if the hw is ready to * handle events. Looks like a race condition, but it really is * not. */ static int whcrc_probe(struct umc_dev *umc_dev) { int result; struct uwb_rc *uwb_rc; struct whcrc *whcrc; struct device *dev = &umc_dev->dev; result = -ENOMEM; uwb_rc = uwb_rc_alloc(); if (uwb_rc == NULL) { dev_err(dev, "unable to allocate RC instance\n"); goto error_rc_alloc; } whcrc = kzalloc(sizeof(*whcrc), GFP_KERNEL); if (whcrc == NULL) { dev_err(dev, "unable to allocate WHC-RC instance\n"); goto error_alloc; } whcrc_init(whcrc); whcrc->umc_dev = umc_dev; result = whcrc_setup_rc_umc(whcrc); if (result < 0) { dev_err(dev, "Can't setup RC UMC interface: %d\n", result); goto error_setup_rc_umc; } whcrc->uwb_rc = uwb_rc; uwb_rc->owner = THIS_MODULE; uwb_rc->cmd = whcrc_cmd; uwb_rc->reset = whcrc_reset; uwb_rc->start = whcrc_start_rc; uwb_rc->stop = whcrc_stop_rc; result = uwb_rc_add(uwb_rc, dev, whcrc); if (result < 0) goto error_rc_add; umc_set_drvdata(umc_dev, whcrc); return 0; error_rc_add: whcrc_release_rc_umc(whcrc); error_setup_rc_umc: kfree(whcrc); error_alloc: uwb_rc_put(uwb_rc); error_rc_alloc: return result; } /** * Clean up the radio control resources * * When we up the command semaphore, everybody possibly held trying to * execute a command should be granted entry and then they'll see the * host is quiescing and up it (so it will chain to the next waiter). * This should not happen (in any case), as we can only remove when * there are no handles open... */ static void whcrc_remove(struct umc_dev *umc_dev) { struct whcrc *whcrc = umc_get_drvdata(umc_dev); struct uwb_rc *uwb_rc = whcrc->uwb_rc; umc_set_drvdata(umc_dev, NULL); uwb_rc_rm(uwb_rc); whcrc_release_rc_umc(whcrc); kfree(whcrc); uwb_rc_put(uwb_rc); } static int whcrc_pre_reset(struct umc_dev *umc) { struct whcrc *whcrc = umc_get_drvdata(umc); struct uwb_rc *uwb_rc = whcrc->uwb_rc; uwb_rc_pre_reset(uwb_rc); return 0; } static int whcrc_post_reset(struct umc_dev *umc) { struct whcrc *whcrc = umc_get_drvdata(umc); struct uwb_rc *uwb_rc = whcrc->uwb_rc; return uwb_rc_post_reset(uwb_rc); } /* PCI device ID's that we handle [so it gets loaded] */ static struct pci_device_id __used whcrc_id_table[] = { { PCI_DEVICE_CLASS(PCI_CLASS_WIRELESS_WHCI, ~0) }, { /* empty last entry */ } }; MODULE_DEVICE_TABLE(pci, whcrc_id_table); static struct umc_driver whcrc_driver = { .name = "whc-rc", .cap_id = UMC_CAP_ID_WHCI_RC, .probe = whcrc_probe, .remove = whcrc_remove, .pre_reset = whcrc_pre_reset, .post_reset = whcrc_post_reset, }; static int __init whcrc_driver_init(void) { return umc_driver_register(&whcrc_driver); } module_init(whcrc_driver_init); static void __exit whcrc_driver_exit(void) { umc_driver_unregister(&whcrc_driver); } module_exit(whcrc_driver_exit); MODULE_AUTHOR("Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com>"); MODULE_DESCRIPTION("Wireless Host Controller Radio Control Driver"); MODULE_LICENSE("GPL");
gpl-2.0
st47k3r/kernel_2.6.35.10-icong-gb-ea564c7
drivers/char/hw_random/geode-rng.c
12745
3049
/* * RNG driver for AMD Geode RNGs * * Copyright 2005 (c) MontaVista Software, Inc. * * with the majority of the code coming from: * * Hardware driver for the Intel/AMD/VIA Random Number Generators (RNG) * (c) Copyright 2003 Red Hat Inc <jgarzik@redhat.com> * * derived from * * Hardware driver for the AMD 768 Random Number Generator (RNG) * (c) Copyright 2001 Red Hat Inc * * derived from * * Hardware driver for Intel i810 Random Number Generator (RNG) * Copyright 2000,2001 Jeff Garzik <jgarzik@pobox.com> * Copyright 2000,2001 Philipp Rumpf <prumpf@mandrakesoft.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/hw_random.h> #include <linux/delay.h> #include <asm/io.h> #define PFX KBUILD_MODNAME ": " #define GEODE_RNG_DATA_REG 0x50 #define GEODE_RNG_STATUS_REG 0x54 /* * Data for PCI driver interface * * This data only exists for exporting the supported * PCI ids via MODULE_DEVICE_TABLE. We do not actually * register a pci_driver, because someone else might one day * want to register another driver on the same PCI id. */ static const struct pci_device_id pci_tbl[] = { { PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_LX_AES), 0, }, { 0, }, /* terminate list */ }; MODULE_DEVICE_TABLE(pci, pci_tbl); static int geode_rng_data_read(struct hwrng *rng, u32 *data) { void __iomem *mem = (void __iomem *)rng->priv; *data = readl(mem + GEODE_RNG_DATA_REG); return 4; } static int geode_rng_data_present(struct hwrng *rng, int wait) { void __iomem *mem = (void __iomem *)rng->priv; int data, i; for (i = 0; i < 20; i++) { data = !!(readl(mem + GEODE_RNG_STATUS_REG)); if (data || !wait) break; udelay(10); } return data; } static struct hwrng geode_rng = { .name = "geode", .data_present = geode_rng_data_present, .data_read = geode_rng_data_read, }; static int __init mod_init(void) { int err = -ENODEV; struct pci_dev *pdev = NULL; const struct pci_device_id *ent; void __iomem *mem; unsigned long rng_base; for_each_pci_dev(pdev) { ent = pci_match_id(pci_tbl, pdev); if (ent) goto found; } /* Device not found. */ goto out; found: rng_base = pci_resource_start(pdev, 0); if (rng_base == 0) goto out; err = -ENOMEM; mem = ioremap(rng_base, 0x58); if (!mem) goto out; geode_rng.priv = (unsigned long)mem; printk(KERN_INFO "AMD Geode RNG detected\n"); err = hwrng_register(&geode_rng); if (err) { printk(KERN_ERR PFX "RNG registering failed (%d)\n", err); goto err_unmap; } out: return err; err_unmap: iounmap(mem); goto out; } static void __exit mod_exit(void) { void __iomem *mem = (void __iomem *)geode_rng.priv; hwrng_unregister(&geode_rng); iounmap(mem); } module_init(mod_init); module_exit(mod_exit); MODULE_DESCRIPTION("H/W RNG driver for AMD Geode LX CPUs"); MODULE_LICENSE("GPL");
gpl-2.0
somcom3x/tw_herc_kernel
drivers/scsi/aic7xxx/aiclib.c
14793
1599
/* * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $ * $Id$ */ #include "aiclib.h"
gpl-2.0
AOSP-SGS2/android_kernel_samsung_galaxynote
drivers/scsi/aic7xxx/aiclib.c
14793
1599
/* * Implementation of Utility functions for all SCSI device types. * * Copyright (c) 1997, 1998, 1999 Justin T. Gibbs. * Copyright (c) 1997, 1998 Kenneth D. Merry. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification, immediately at the beginning of the file. * 2. The name of the author may not be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * $FreeBSD: src/sys/cam/scsi/scsi_all.c,v 1.38 2002/09/23 04:56:35 mjacob Exp $ * $Id$ */ #include "aiclib.h"
gpl-2.0
yoctobsp/linux-yocto-3.14
fs/ocfs2/dlm/dlmrecovery.c
202
88008
/* -*- mode: c; c-basic-offset: 8; -*- * vim: noexpandtab sw=8 ts=8 sts=0: * * dlmrecovery.c * * recovery stuff * * Copyright (C) 2004 Oracle. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public * License along with this program; if not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 021110-1307, USA. * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/random.h> #include <linux/blkdev.h> #include <linux/socket.h> #include <linux/inet.h> #include <linux/timer.h> #include <linux/kthread.h> #include <linux/delay.h> #include "cluster/heartbeat.h" #include "cluster/nodemanager.h" #include "cluster/tcp.h" #include "dlmapi.h" #include "dlmcommon.h" #include "dlmdomain.h" #define MLOG_MASK_PREFIX (ML_DLM|ML_DLM_RECOVERY) #include "cluster/masklog.h" static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_recovery_thread(void *data); static int dlm_do_recovery(struct dlm_ctxt *dlm); static int dlm_pick_recovery_master(struct dlm_ctxt *dlm); static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node); static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node); static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res); static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, const char *lockname, int namelen, int total_locks, u64 cookie, u8 flags, u8 master); static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres, u8 send_to, struct dlm_lock_resource *res, int total_locks); static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres); static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm); static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to); static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node); static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, struct list_head *list, u8 dead_node); static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, u8 dead_node, u8 new_master); static void dlm_reco_ast(void *astdata); static void dlm_reco_bast(void *astdata, int blocked_type); static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st); static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data); static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data); static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 *real_master); static u64 dlm_get_next_mig_cookie(void); static DEFINE_SPINLOCK(dlm_reco_state_lock); static DEFINE_SPINLOCK(dlm_mig_cookie_lock); static u64 dlm_mig_cookie = 1; static u64 dlm_get_next_mig_cookie(void) { u64 c; spin_lock(&dlm_mig_cookie_lock); c = dlm_mig_cookie; if (dlm_mig_cookie == (~0ULL)) dlm_mig_cookie = 1; else dlm_mig_cookie++; spin_unlock(&dlm_mig_cookie_lock); return c; } static inline void dlm_set_reco_dead_node(struct dlm_ctxt *dlm, u8 dead_node) { assert_spin_locked(&dlm->spinlock); if (dlm->reco.dead_node != dead_node) mlog(0, "%s: changing dead_node from %u to %u\n", dlm->name, dlm->reco.dead_node, dead_node); dlm->reco.dead_node = dead_node; } static inline void dlm_set_reco_master(struct dlm_ctxt *dlm, u8 master) { assert_spin_locked(&dlm->spinlock); mlog(0, "%s: changing new_master from %u to %u\n", dlm->name, dlm->reco.new_master, master); dlm->reco.new_master = master; } static inline void __dlm_reset_recovery(struct dlm_ctxt *dlm) { assert_spin_locked(&dlm->spinlock); clear_bit(dlm->reco.dead_node, dlm->recovery_map); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); } static inline void dlm_reset_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); __dlm_reset_recovery(dlm); spin_unlock(&dlm->spinlock); } /* Worker function used during recovery. */ void dlm_dispatch_work(struct work_struct *work) { struct dlm_ctxt *dlm = container_of(work, struct dlm_ctxt, dispatched_work); LIST_HEAD(tmp_list); struct dlm_work_item *item, *next; dlm_workfunc_t *workfunc; int tot=0; spin_lock(&dlm->work_lock); list_splice_init(&dlm->work_list, &tmp_list); spin_unlock(&dlm->work_lock); list_for_each_entry(item, &tmp_list, list) { tot++; } mlog(0, "%s: work thread has %d work items\n", dlm->name, tot); list_for_each_entry_safe(item, next, &tmp_list, list) { workfunc = item->func; list_del_init(&item->list); /* already have ref on dlm to avoid having * it disappear. just double-check. */ BUG_ON(item->dlm != dlm); /* this is allowed to sleep and * call network stuff */ workfunc(item, item->data); dlm_put(dlm); kfree(item); } } /* * RECOVERY THREAD */ void dlm_kick_recovery_thread(struct dlm_ctxt *dlm) { /* wake the recovery thread * this will wake the reco thread in one of three places * 1) sleeping with no recovery happening * 2) sleeping with recovery mastered elsewhere * 3) recovery mastered here, waiting on reco data */ wake_up(&dlm->dlm_reco_thread_wq); } /* Launch the recovery thread */ int dlm_launch_recovery_thread(struct dlm_ctxt *dlm) { mlog(0, "starting dlm recovery thread...\n"); dlm->dlm_reco_thread_task = kthread_run(dlm_recovery_thread, dlm, "dlm_reco_thread"); if (IS_ERR(dlm->dlm_reco_thread_task)) { mlog_errno(PTR_ERR(dlm->dlm_reco_thread_task)); dlm->dlm_reco_thread_task = NULL; return -EINVAL; } return 0; } void dlm_complete_recovery_thread(struct dlm_ctxt *dlm) { if (dlm->dlm_reco_thread_task) { mlog(0, "waiting for dlm recovery thread to exit\n"); kthread_stop(dlm->dlm_reco_thread_task); dlm->dlm_reco_thread_task = NULL; } } /* * this is lame, but here's how recovery works... * 1) all recovery threads cluster wide will work on recovering * ONE node at a time * 2) negotiate who will take over all the locks for the dead node. * thats right... ALL the locks. * 3) once a new master is chosen, everyone scans all locks * and moves aside those mastered by the dead guy * 4) each of these locks should be locked until recovery is done * 5) the new master collects up all of secondary lock queue info * one lock at a time, forcing each node to communicate back * before continuing * 6) each secondary lock queue responds with the full known lock info * 7) once the new master has run all its locks, it sends a ALLDONE! * message to everyone * 8) upon receiving this message, the secondary queue node unlocks * and responds to the ALLDONE * 9) once the new master gets responses from everyone, he unlocks * everything and recovery for this dead node is done *10) go back to 2) while there are still dead nodes * */ static void dlm_print_reco_node_status(struct dlm_ctxt *dlm) { struct dlm_reco_node_data *ndata; struct dlm_lock_resource *res; mlog(ML_NOTICE, "%s(%d): recovery info, state=%s, dead=%u, master=%u\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.state & DLM_RECO_STATE_ACTIVE ? "ACTIVE" : "inactive", dlm->reco.dead_node, dlm->reco.new_master); list_for_each_entry(ndata, &dlm->reco.node_data, list) { char *st = "unknown"; switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: st = "init"; break; case DLM_RECO_NODE_DATA_REQUESTING: st = "requesting"; break; case DLM_RECO_NODE_DATA_DEAD: st = "dead"; break; case DLM_RECO_NODE_DATA_RECEIVING: st = "receiving"; break; case DLM_RECO_NODE_DATA_REQUESTED: st = "requested"; break; case DLM_RECO_NODE_DATA_DONE: st = "done"; break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: st = "finalize-sent"; break; default: st = "bad"; break; } mlog(ML_NOTICE, "%s: reco state, node %u, state=%s\n", dlm->name, ndata->node_num, st); } list_for_each_entry(res, &dlm->reco.resources, recovering) { mlog(ML_NOTICE, "%s: lockres %.*s on recovering list\n", dlm->name, res->lockname.len, res->lockname.name); } } #define DLM_RECO_THREAD_TIMEOUT_MS (5 * 1000) static int dlm_recovery_thread(void *data) { int status; struct dlm_ctxt *dlm = data; unsigned long timeout = msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS); mlog(0, "dlm thread running for %s...\n", dlm->name); while (!kthread_should_stop()) { if (dlm_domain_fully_joined(dlm)) { status = dlm_do_recovery(dlm); if (status == -EAGAIN) { /* do not sleep, recheck immediately. */ continue; } if (status < 0) mlog_errno(status); } wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, kthread_should_stop(), timeout); } mlog(0, "quitting DLM recovery thread\n"); return 0; } /* returns true when the recovery master has contacted us */ static int dlm_reco_master_ready(struct dlm_ctxt *dlm) { int ready; spin_lock(&dlm->spinlock); ready = (dlm->reco.new_master != O2NM_INVALID_NODE_NUM); spin_unlock(&dlm->spinlock); return ready; } /* returns true if node is no longer in the domain * could be dead or just not joined */ int dlm_is_node_dead(struct dlm_ctxt *dlm, u8 node) { int dead; spin_lock(&dlm->spinlock); dead = !test_bit(node, dlm->domain_map); spin_unlock(&dlm->spinlock); return dead; } /* returns true if node is no longer in the domain * could be dead or just not joined */ static int dlm_is_node_recovered(struct dlm_ctxt *dlm, u8 node) { int recovered; spin_lock(&dlm->spinlock); recovered = !test_bit(node, dlm->recovery_map); spin_unlock(&dlm->spinlock); return recovered; } void dlm_wait_for_node_death(struct dlm_ctxt *dlm, u8 node, int timeout) { if (dlm_is_node_dead(dlm, node)) return; printk(KERN_NOTICE "o2dlm: Waiting on the death of node %u in " "domain %s\n", node, dlm->name); if (timeout) wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, node), msecs_to_jiffies(timeout)); else wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, node)); } void dlm_wait_for_node_recovery(struct dlm_ctxt *dlm, u8 node, int timeout) { if (dlm_is_node_recovered(dlm, node)) return; printk(KERN_NOTICE "o2dlm: Waiting on the recovery of node %u in " "domain %s\n", node, dlm->name); if (timeout) wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_recovered(dlm, node), msecs_to_jiffies(timeout)); else wait_event(dlm->dlm_reco_thread_wq, dlm_is_node_recovered(dlm, node)); } /* callers of the top-level api calls (dlmlock/dlmunlock) should * block on the dlm->reco.event when recovery is in progress. * the dlm recovery thread will set this state when it begins * recovering a dead node (as the new master or not) and clear * the state and wake as soon as all affected lock resources have * been marked with the RECOVERY flag */ static int dlm_in_recovery(struct dlm_ctxt *dlm) { int in_recovery; spin_lock(&dlm->spinlock); in_recovery = !!(dlm->reco.state & DLM_RECO_STATE_ACTIVE); spin_unlock(&dlm->spinlock); return in_recovery; } void dlm_wait_for_recovery(struct dlm_ctxt *dlm) { if (dlm_in_recovery(dlm)) { mlog(0, "%s: reco thread %d in recovery: " "state=%d, master=%u, dead=%u\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.state, dlm->reco.new_master, dlm->reco.dead_node); } wait_event(dlm->reco.event, !dlm_in_recovery(dlm)); } static void dlm_begin_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); BUG_ON(dlm->reco.state & DLM_RECO_STATE_ACTIVE); printk(KERN_NOTICE "o2dlm: Begin recovery on domain %s for node %u\n", dlm->name, dlm->reco.dead_node); dlm->reco.state |= DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); } static void dlm_end_recovery(struct dlm_ctxt *dlm) { spin_lock(&dlm->spinlock); BUG_ON(!(dlm->reco.state & DLM_RECO_STATE_ACTIVE)); dlm->reco.state &= ~DLM_RECO_STATE_ACTIVE; spin_unlock(&dlm->spinlock); printk(KERN_NOTICE "o2dlm: End recovery on domain %s\n", dlm->name); wake_up(&dlm->reco.event); } static void dlm_print_recovery_master(struct dlm_ctxt *dlm) { printk(KERN_NOTICE "o2dlm: Node %u (%s) is the Recovery Master for the " "dead node %u in domain %s\n", dlm->reco.new_master, (dlm->node_num == dlm->reco.new_master ? "me" : "he"), dlm->reco.dead_node, dlm->name); } static int dlm_do_recovery(struct dlm_ctxt *dlm) { int status = 0; int ret; spin_lock(&dlm->spinlock); /* check to see if the new master has died */ if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM && test_bit(dlm->reco.new_master, dlm->recovery_map)) { mlog(0, "new master %u died while recovering %u!\n", dlm->reco.new_master, dlm->reco.dead_node); /* unset the new_master, leave dead_node */ dlm_set_reco_master(dlm, O2NM_INVALID_NODE_NUM); } /* select a target to recover */ if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { int bit; bit = find_next_bit (dlm->recovery_map, O2NM_MAX_NODES, 0); if (bit >= O2NM_MAX_NODES || bit < 0) dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); else dlm_set_reco_dead_node(dlm, bit); } else if (!test_bit(dlm->reco.dead_node, dlm->recovery_map)) { /* BUG? */ mlog(ML_ERROR, "dead_node %u no longer in recovery map!\n", dlm->reco.dead_node); dlm_set_reco_dead_node(dlm, O2NM_INVALID_NODE_NUM); } if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { // mlog(0, "nothing to recover! sleeping now!\n"); spin_unlock(&dlm->spinlock); /* return to main thread loop and sleep. */ return 0; } mlog(0, "%s(%d):recovery thread found node %u in the recovery map!\n", dlm->name, task_pid_nr(dlm->dlm_reco_thread_task), dlm->reco.dead_node); spin_unlock(&dlm->spinlock); /* take write barrier */ /* (stops the list reshuffling thread, proxy ast handling) */ dlm_begin_recovery(dlm); if (dlm->reco.new_master == dlm->node_num) goto master_here; if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { /* choose a new master, returns 0 if this node * is the master, -EEXIST if it's another node. * this does not return until a new master is chosen * or recovery completes entirely. */ ret = dlm_pick_recovery_master(dlm); if (!ret) { /* already notified everyone. go. */ goto master_here; } mlog(0, "another node will master this recovery session.\n"); } dlm_print_recovery_master(dlm); /* it is safe to start everything back up here * because all of the dead node's lock resources * have been marked as in-recovery */ dlm_end_recovery(dlm); /* sleep out in main dlm_recovery_thread loop. */ return 0; master_here: dlm_print_recovery_master(dlm); status = dlm_remaster_locks(dlm, dlm->reco.dead_node); if (status < 0) { /* we should never hit this anymore */ mlog(ML_ERROR, "%s: Error %d remastering locks for node %u, " "retrying.\n", dlm->name, status, dlm->reco.dead_node); /* yield a bit to allow any final network messages * to get handled on remaining nodes */ msleep(100); } else { /* success! see if any other nodes need recovery */ mlog(0, "DONE mastering recovery of %s:%u here(this=%u)!\n", dlm->name, dlm->reco.dead_node, dlm->node_num); spin_lock(&dlm->spinlock); __dlm_reset_recovery(dlm); dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; spin_unlock(&dlm->spinlock); } dlm_end_recovery(dlm); /* continue and look for another dead node */ return -EAGAIN; } static int dlm_remaster_locks(struct dlm_ctxt *dlm, u8 dead_node) { int status = 0; struct dlm_reco_node_data *ndata; int all_nodes_done; int destroy = 0; int pass = 0; do { /* we have become recovery master. there is no escaping * this, so just keep trying until we get it. */ status = dlm_init_recovery_area(dlm, dead_node); if (status < 0) { mlog(ML_ERROR, "%s: failed to alloc recovery area, " "retrying\n", dlm->name); msleep(1000); } } while (status != 0); /* safe to access the node data list without a lock, since this * process is the only one to change the list */ list_for_each_entry(ndata, &dlm->reco.node_data, list) { BUG_ON(ndata->state != DLM_RECO_NODE_DATA_INIT); ndata->state = DLM_RECO_NODE_DATA_REQUESTING; mlog(0, "%s: Requesting lock info from node %u\n", dlm->name, ndata->node_num); if (ndata->node_num == dlm->node_num) { ndata->state = DLM_RECO_NODE_DATA_DONE; continue; } do { status = dlm_request_all_locks(dlm, ndata->node_num, dead_node); if (status < 0) { mlog_errno(status); if (dlm_is_host_down(status)) { /* node died, ignore it for recovery */ status = 0; ndata->state = DLM_RECO_NODE_DATA_DEAD; /* wait for the domain map to catch up * with the network state. */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_is_node_dead(dlm, ndata->node_num), msecs_to_jiffies(1000)); mlog(0, "waited 1 sec for %u, " "dead? %s\n", ndata->node_num, dlm_is_node_dead(dlm, ndata->node_num) ? "yes" : "no"); } else { /* -ENOMEM on the other node */ mlog(0, "%s: node %u returned " "%d during recovery, retrying " "after a short wait\n", dlm->name, ndata->node_num, status); msleep(100); } } } while (status != 0); spin_lock(&dlm_reco_state_lock); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_FINALIZE_SENT: case DLM_RECO_NODE_DATA_REQUESTED: BUG(); break; case DLM_RECO_NODE_DATA_DEAD: mlog(0, "node %u died after requesting " "recovery info for node %u\n", ndata->node_num, dead_node); /* fine. don't need this node's info. * continue without it. */ break; case DLM_RECO_NODE_DATA_REQUESTING: ndata->state = DLM_RECO_NODE_DATA_REQUESTED; mlog(0, "now receiving recovery data from " "node %u for dead node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING: mlog(0, "already receiving recovery data from " "node %u for dead node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_DONE: mlog(0, "already DONE receiving recovery data " "from node %u for dead node %u\n", ndata->node_num, dead_node); break; } spin_unlock(&dlm_reco_state_lock); } mlog(0, "%s: Done requesting all lock info\n", dlm->name); /* nodes should be sending reco data now * just need to wait */ while (1) { /* check all the nodes now to see if we are * done, or if anyone died */ all_nodes_done = 1; spin_lock(&dlm_reco_state_lock); list_for_each_entry(ndata, &dlm->reco.node_data, list) { mlog(0, "checking recovery state of node %u\n", ndata->node_num); switch (ndata->state) { case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_REQUESTING: mlog(ML_ERROR, "bad ndata state for " "node %u: state=%d\n", ndata->node_num, ndata->state); BUG(); break; case DLM_RECO_NODE_DATA_DEAD: mlog(0, "node %u died after " "requesting recovery info for " "node %u\n", ndata->node_num, dead_node); break; case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: mlog(0, "%s: node %u still in state %s\n", dlm->name, ndata->node_num, ndata->state==DLM_RECO_NODE_DATA_RECEIVING ? "receiving" : "requested"); all_nodes_done = 0; break; case DLM_RECO_NODE_DATA_DONE: mlog(0, "%s: node %u state is done\n", dlm->name, ndata->node_num); break; case DLM_RECO_NODE_DATA_FINALIZE_SENT: mlog(0, "%s: node %u state is finalize\n", dlm->name, ndata->node_num); break; } } spin_unlock(&dlm_reco_state_lock); mlog(0, "pass #%d, all_nodes_done?: %s\n", ++pass, all_nodes_done?"yes":"no"); if (all_nodes_done) { int ret; /* Set this flag on recovery master to avoid * a new recovery for another dead node start * before the recovery is not done. That may * cause recovery hung.*/ spin_lock(&dlm->spinlock); dlm->reco.state |= DLM_RECO_STATE_FINALIZE; spin_unlock(&dlm->spinlock); /* all nodes are now in DLM_RECO_NODE_DATA_DONE state * just send a finalize message to everyone and * clean up */ mlog(0, "all nodes are done! send finalize\n"); ret = dlm_send_finalize_reco_message(dlm); if (ret < 0) mlog_errno(ret); spin_lock(&dlm->spinlock); dlm_finish_local_lockres_recovery(dlm, dead_node, dlm->node_num); spin_unlock(&dlm->spinlock); mlog(0, "should be done with recovery!\n"); mlog(0, "finishing recovery of %s at %lu, " "dead=%u, this=%u, new=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num, dlm->reco.new_master); destroy = 1; status = 0; /* rescan everything marked dirty along the way */ dlm_kick_thread(dlm, NULL); break; } /* wait to be signalled, with periodic timeout * to check for node death */ wait_event_interruptible_timeout(dlm->dlm_reco_thread_wq, kthread_should_stop(), msecs_to_jiffies(DLM_RECO_THREAD_TIMEOUT_MS)); } if (destroy) dlm_destroy_recovery_area(dlm, dead_node); return status; } static int dlm_init_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) { int num=0; struct dlm_reco_node_data *ndata; spin_lock(&dlm->spinlock); memcpy(dlm->reco.node_map, dlm->domain_map, sizeof(dlm->domain_map)); /* nodes can only be removed (by dying) after dropping * this lock, and death will be trapped later, so this should do */ spin_unlock(&dlm->spinlock); while (1) { num = find_next_bit (dlm->reco.node_map, O2NM_MAX_NODES, num); if (num >= O2NM_MAX_NODES) { break; } BUG_ON(num == dead_node); ndata = kzalloc(sizeof(*ndata), GFP_NOFS); if (!ndata) { dlm_destroy_recovery_area(dlm, dead_node); return -ENOMEM; } ndata->node_num = num; ndata->state = DLM_RECO_NODE_DATA_INIT; spin_lock(&dlm_reco_state_lock); list_add_tail(&ndata->list, &dlm->reco.node_data); spin_unlock(&dlm_reco_state_lock); num++; } return 0; } static void dlm_destroy_recovery_area(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_reco_node_data *ndata, *next; LIST_HEAD(tmplist); spin_lock(&dlm_reco_state_lock); list_splice_init(&dlm->reco.node_data, &tmplist); spin_unlock(&dlm_reco_state_lock); list_for_each_entry_safe(ndata, next, &tmplist, list) { list_del_init(&ndata->list); kfree(ndata); } } static int dlm_request_all_locks(struct dlm_ctxt *dlm, u8 request_from, u8 dead_node) { struct dlm_lock_request lr; int ret; int status; mlog(0, "\n"); mlog(0, "dlm_request_all_locks: dead node is %u, sending request " "to %u\n", dead_node, request_from); memset(&lr, 0, sizeof(lr)); lr.node_idx = dlm->node_num; lr.dead_node = dead_node; // send message ret = o2net_send_message(DLM_LOCK_REQUEST_MSG, dlm->key, &lr, sizeof(lr), request_from, &status); /* negative status is handled by caller */ if (ret < 0) mlog(ML_ERROR, "%s: Error %d send LOCK_REQUEST to node %u " "to recover dead node %u\n", dlm->name, ret, request_from, dead_node); else ret = status; // return from here, then // sleep until all received or error return ret; } int dlm_request_all_locks_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_lock_request *lr = (struct dlm_lock_request *)msg->buf; char *buf = NULL; struct dlm_work_item *item = NULL; if (!dlm_grab(dlm)) return -EINVAL; if (lr->dead_node != dlm->reco.dead_node) { mlog(ML_ERROR, "%s: node %u sent dead_node=%u, but local " "dead_node is %u\n", dlm->name, lr->node_idx, lr->dead_node, dlm->reco.dead_node); dlm_print_reco_node_status(dlm); /* this is a hack */ dlm_put(dlm); return -ENOMEM; } BUG_ON(lr->dead_node != dlm->reco.dead_node); item = kzalloc(sizeof(*item), GFP_NOFS); if (!item) { dlm_put(dlm); return -ENOMEM; } /* this will get freed by dlm_request_all_locks_worker */ buf = (char *) __get_free_page(GFP_NOFS); if (!buf) { kfree(item); dlm_put(dlm); return -ENOMEM; } /* queue up work for dlm_request_all_locks_worker */ dlm_grab(dlm); /* get an extra ref for the work item */ dlm_init_work_item(dlm, item, dlm_request_all_locks_worker, buf); item->u.ral.reco_master = lr->node_idx; item->u.ral.dead_node = lr->dead_node; spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); dlm_put(dlm); return 0; } static void dlm_request_all_locks_worker(struct dlm_work_item *item, void *data) { struct dlm_migratable_lockres *mres; struct dlm_lock_resource *res; struct dlm_ctxt *dlm; LIST_HEAD(resources); int ret; u8 dead_node, reco_master; int skip_all_done = 0; dlm = item->dlm; dead_node = item->u.ral.dead_node; reco_master = item->u.ral.reco_master; mres = (struct dlm_migratable_lockres *)data; mlog(0, "%s: recovery worker started, dead=%u, master=%u\n", dlm->name, dead_node, reco_master); if (dead_node != dlm->reco.dead_node || reco_master != dlm->reco.new_master) { /* worker could have been created before the recovery master * died. if so, do not continue, but do not error. */ if (dlm->reco.new_master == O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: will not send recovery state, " "recovery master %u died, thread=(dead=%u,mas=%u)" " current=(dead=%u,mas=%u)\n", dlm->name, reco_master, dead_node, reco_master, dlm->reco.dead_node, dlm->reco.new_master); } else { mlog(ML_NOTICE, "%s: reco state invalid: reco(dead=%u, " "master=%u), request(dead=%u, master=%u)\n", dlm->name, dlm->reco.dead_node, dlm->reco.new_master, dead_node, reco_master); } goto leave; } /* lock resources should have already been moved to the * dlm->reco.resources list. now move items from that list * to a temp list if the dead owner matches. note that the * whole cluster recovers only one node at a time, so we * can safely move UNKNOWN lock resources for each recovery * session. */ dlm_move_reco_locks_to_list(dlm, &resources, dead_node); /* now we can begin blasting lockreses without the dlm lock */ /* any errors returned will be due to the new_master dying, * the dlm_reco_thread should detect this */ list_for_each_entry(res, &resources, recovering) { ret = dlm_send_one_lockres(dlm, res, mres, reco_master, DLM_MRES_RECOVERY); if (ret < 0) { mlog(ML_ERROR, "%s: node %u went down while sending " "recovery state for dead node %u, ret=%d\n", dlm->name, reco_master, dead_node, ret); skip_all_done = 1; break; } } /* move the resources back to the list */ spin_lock(&dlm->spinlock); list_splice_init(&resources, &dlm->reco.resources); spin_unlock(&dlm->spinlock); if (!skip_all_done) { ret = dlm_send_all_done_msg(dlm, dead_node, reco_master); if (ret < 0) { mlog(ML_ERROR, "%s: node %u went down while sending " "recovery all-done for dead node %u, ret=%d\n", dlm->name, reco_master, dead_node, ret); } } leave: free_page((unsigned long)data); } static int dlm_send_all_done_msg(struct dlm_ctxt *dlm, u8 dead_node, u8 send_to) { int ret, tmpret; struct dlm_reco_data_done done_msg; memset(&done_msg, 0, sizeof(done_msg)); done_msg.node_idx = dlm->node_num; done_msg.dead_node = dead_node; mlog(0, "sending DATA DONE message to %u, " "my node=%u, dead node=%u\n", send_to, done_msg.node_idx, done_msg.dead_node); ret = o2net_send_message(DLM_RECO_DATA_DONE_MSG, dlm->key, &done_msg, sizeof(done_msg), send_to, &tmpret); if (ret < 0) { mlog(ML_ERROR, "%s: Error %d send RECO_DATA_DONE to node %u " "to recover dead node %u\n", dlm->name, ret, send_to, dead_node); if (!dlm_is_host_down(ret)) { BUG(); } } else ret = tmpret; return ret; } int dlm_reco_data_done_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_reco_data_done *done = (struct dlm_reco_data_done *)msg->buf; struct dlm_reco_node_data *ndata = NULL; int ret = -EINVAL; if (!dlm_grab(dlm)) return -EINVAL; mlog(0, "got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node, dlm->reco.dead_node, done->node_idx, dlm->node_num); mlog_bug_on_msg((done->dead_node != dlm->reco.dead_node), "Got DATA DONE: dead_node=%u, reco.dead_node=%u, " "node_idx=%u, this node=%u\n", done->dead_node, dlm->reco.dead_node, done->node_idx, dlm->node_num); spin_lock(&dlm_reco_state_lock); list_for_each_entry(ndata, &dlm->reco.node_data, list) { if (ndata->node_num != done->node_idx) continue; switch (ndata->state) { /* should have moved beyond INIT but not to FINALIZE yet */ case DLM_RECO_NODE_DATA_INIT: case DLM_RECO_NODE_DATA_DEAD: case DLM_RECO_NODE_DATA_FINALIZE_SENT: mlog(ML_ERROR, "bad ndata state for node %u:" " state=%d\n", ndata->node_num, ndata->state); BUG(); break; /* these states are possible at this point, anywhere along * the line of recovery */ case DLM_RECO_NODE_DATA_DONE: case DLM_RECO_NODE_DATA_RECEIVING: case DLM_RECO_NODE_DATA_REQUESTED: case DLM_RECO_NODE_DATA_REQUESTING: mlog(0, "node %u is DONE sending " "recovery data!\n", ndata->node_num); ndata->state = DLM_RECO_NODE_DATA_DONE; ret = 0; break; } } spin_unlock(&dlm_reco_state_lock); /* wake the recovery thread, some node is done */ if (!ret) dlm_kick_recovery_thread(dlm); if (ret < 0) mlog(ML_ERROR, "failed to find recovery node data for node " "%u\n", done->node_idx); dlm_put(dlm); mlog(0, "leaving reco data done handler, ret=%d\n", ret); return ret; } static void dlm_move_reco_locks_to_list(struct dlm_ctxt *dlm, struct list_head *list, u8 dead_node) { struct dlm_lock_resource *res, *next; struct dlm_lock *lock; spin_lock(&dlm->spinlock); list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { spin_lock(&res->spinlock); list_for_each_entry(lock, &res->granted, list) { if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); break; } } spin_unlock(&res->spinlock); continue; } if (res->owner == dead_node) { mlog(0, "found lockres owned by dead node while " "doing recovery for node %u. sending it.\n", dead_node); list_move_tail(&res->recovering, list); } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "found UNKNOWN owner while doing recovery " "for node %u. sending it.\n", dead_node); list_move_tail(&res->recovering, list); } } spin_unlock(&dlm->spinlock); } static inline int dlm_num_locks_in_lockres(struct dlm_lock_resource *res) { int total_locks = 0; struct list_head *iter, *queue = &res->granted; int i; for (i=0; i<3; i++) { list_for_each(iter, queue) total_locks++; queue++; } return total_locks; } static int dlm_send_mig_lockres_msg(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres, u8 send_to, struct dlm_lock_resource *res, int total_locks) { u64 mig_cookie = be64_to_cpu(mres->mig_cookie); int mres_total_locks = be32_to_cpu(mres->total_locks); int sz, ret = 0, status = 0; u8 orig_flags = mres->flags, orig_master = mres->master; BUG_ON(mres->num_locks > DLM_MAX_MIGRATABLE_LOCKS); if (!mres->num_locks) return 0; sz = sizeof(struct dlm_migratable_lockres) + (mres->num_locks * sizeof(struct dlm_migratable_lock)); /* add an all-done flag if we reached the last lock */ orig_flags = mres->flags; BUG_ON(total_locks > mres_total_locks); if (total_locks == mres_total_locks) mres->flags |= DLM_MRES_ALL_DONE; mlog(0, "%s:%.*s: sending mig lockres (%s) to %u\n", dlm->name, res->lockname.len, res->lockname.name, orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery", send_to); /* send it */ ret = o2net_send_message(DLM_MIG_LOCKRES_MSG, dlm->key, mres, sz, send_to, &status); if (ret < 0) { /* XXX: negative status is not handled. * this will end up killing this node. */ mlog(ML_ERROR, "%s: res %.*s, Error %d send MIG_LOCKRES to " "node %u (%s)\n", dlm->name, mres->lockname_len, mres->lockname, ret, send_to, (orig_flags & DLM_MRES_MIGRATION ? "migration" : "recovery")); } else { /* might get an -ENOMEM back here */ ret = status; if (ret < 0) { mlog_errno(ret); if (ret == -EFAULT) { mlog(ML_ERROR, "node %u told me to kill " "myself!\n", send_to); BUG(); } } } /* zero and reinit the message buffer */ dlm_init_migratable_lockres(mres, res->lockname.name, res->lockname.len, mres_total_locks, mig_cookie, orig_flags, orig_master); return ret; } static void dlm_init_migratable_lockres(struct dlm_migratable_lockres *mres, const char *lockname, int namelen, int total_locks, u64 cookie, u8 flags, u8 master) { /* mres here is one full page */ clear_page(mres); mres->lockname_len = namelen; memcpy(mres->lockname, lockname, namelen); mres->num_locks = 0; mres->total_locks = cpu_to_be32(total_locks); mres->mig_cookie = cpu_to_be64(cookie); mres->flags = flags; mres->master = master; } static void dlm_prepare_lvb_for_migration(struct dlm_lock *lock, struct dlm_migratable_lockres *mres, int queue) { if (!lock->lksb) return; /* Ignore lvb in all locks in the blocked list */ if (queue == DLM_BLOCKED_LIST) return; /* Only consider lvbs in locks with granted EX or PR lock levels */ if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) return; if (dlm_lvb_is_empty(mres->lvb)) { memcpy(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN); return; } /* Ensure the lvb copied for migration matches in other valid locks */ if (!memcmp(mres->lvb, lock->lksb->lvb, DLM_LVB_LEN)) return; mlog(ML_ERROR, "Mismatched lvb in lock cookie=%u:%llu, name=%.*s, " "node=%u\n", dlm_get_lock_cookie_node(be64_to_cpu(lock->ml.cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(lock->ml.cookie)), lock->lockres->lockname.len, lock->lockres->lockname.name, lock->ml.node); dlm_print_one_lock_resource(lock->lockres); BUG(); } /* returns 1 if this lock fills the network structure, * 0 otherwise */ static int dlm_add_lock_to_array(struct dlm_lock *lock, struct dlm_migratable_lockres *mres, int queue) { struct dlm_migratable_lock *ml; int lock_num = mres->num_locks; ml = &(mres->ml[lock_num]); ml->cookie = lock->ml.cookie; ml->type = lock->ml.type; ml->convert_type = lock->ml.convert_type; ml->highest_blocked = lock->ml.highest_blocked; ml->list = queue; if (lock->lksb) { ml->flags = lock->lksb->flags; dlm_prepare_lvb_for_migration(lock, mres, queue); } ml->node = lock->ml.node; mres->num_locks++; /* we reached the max, send this network message */ if (mres->num_locks == DLM_MAX_MIGRATABLE_LOCKS) return 1; return 0; } static void dlm_add_dummy_lock(struct dlm_ctxt *dlm, struct dlm_migratable_lockres *mres) { struct dlm_lock dummy; memset(&dummy, 0, sizeof(dummy)); dummy.ml.cookie = 0; dummy.ml.type = LKM_IVMODE; dummy.ml.convert_type = LKM_IVMODE; dummy.ml.highest_blocked = LKM_IVMODE; dummy.lksb = NULL; dummy.ml.node = dlm->node_num; dlm_add_lock_to_array(&dummy, mres, DLM_BLOCKED_LIST); } static inline int dlm_is_dummy_lock(struct dlm_ctxt *dlm, struct dlm_migratable_lock *ml, u8 *nodenum) { if (unlikely(ml->cookie == 0 && ml->type == LKM_IVMODE && ml->convert_type == LKM_IVMODE && ml->highest_blocked == LKM_IVMODE && ml->list == DLM_BLOCKED_LIST)) { *nodenum = ml->node; return 1; } return 0; } int dlm_send_one_lockres(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres, u8 send_to, u8 flags) { struct list_head *queue; int total_locks, i; u64 mig_cookie = 0; struct dlm_lock *lock; int ret = 0; BUG_ON(!(flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); mlog(0, "sending to %u\n", send_to); total_locks = dlm_num_locks_in_lockres(res); if (total_locks > DLM_MAX_MIGRATABLE_LOCKS) { /* rare, but possible */ mlog(0, "argh. lockres has %d locks. this will " "require more than one network packet to " "migrate\n", total_locks); mig_cookie = dlm_get_next_mig_cookie(); } dlm_init_migratable_lockres(mres, res->lockname.name, res->lockname.len, total_locks, mig_cookie, flags, res->owner); total_locks = 0; for (i=DLM_GRANTED_LIST; i<=DLM_BLOCKED_LIST; i++) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry(lock, queue, list) { /* add another lock. */ total_locks++; if (!dlm_add_lock_to_array(lock, mres, i)) continue; /* this filled the lock message, * we must send it immediately. */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); if (ret < 0) goto error; } } if (total_locks == 0) { /* send a dummy lock to indicate a mastery reference only */ mlog(0, "%s:%.*s: sending dummy lock to %u, %s\n", dlm->name, res->lockname.len, res->lockname.name, send_to, flags & DLM_MRES_RECOVERY ? "recovery" : "migration"); dlm_add_dummy_lock(dlm, mres); } /* flush any remaining locks */ ret = dlm_send_mig_lockres_msg(dlm, mres, send_to, res, total_locks); if (ret < 0) goto error; return ret; error: mlog(ML_ERROR, "%s: dlm_send_mig_lockres_msg returned %d\n", dlm->name, ret); if (!dlm_is_host_down(ret)) BUG(); mlog(0, "%s: node %u went down while sending %s " "lockres %.*s\n", dlm->name, send_to, flags & DLM_MRES_RECOVERY ? "recovery" : "migration", res->lockname.len, res->lockname.name); return ret; } /* * this message will contain no more than one page worth of * recovery data, and it will work on only one lockres. * there may be many locks in this page, and we may need to wait * for additional packets to complete all the locks (rare, but * possible). */ /* * NOTE: the allocation error cases here are scary * we really cannot afford to fail an alloc in recovery * do we spin? returning an error only delays the problem really */ int dlm_mig_lockres_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_migratable_lockres *mres = (struct dlm_migratable_lockres *)msg->buf; int ret = 0; u8 real_master; u8 extra_refs = 0; char *buf = NULL; struct dlm_work_item *item = NULL; struct dlm_lock_resource *res = NULL; if (!dlm_grab(dlm)) return -EINVAL; BUG_ON(!(mres->flags & (DLM_MRES_RECOVERY|DLM_MRES_MIGRATION))); real_master = mres->master; if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* cannot migrate a lockres with no master */ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); } mlog(0, "%s message received from node %u\n", (mres->flags & DLM_MRES_RECOVERY) ? "recovery" : "migration", mres->master); if (mres->flags & DLM_MRES_ALL_DONE) mlog(0, "all done flag. all lockres data received!\n"); ret = -ENOMEM; buf = kmalloc(be16_to_cpu(msg->data_len), GFP_NOFS); item = kzalloc(sizeof(*item), GFP_NOFS); if (!buf || !item) goto leave; /* lookup the lock to see if we have a secondary queue for this * already... just add the locks in and this will have its owner * and RECOVERY flag changed when it completes. */ res = dlm_lookup_lockres(dlm, mres->lockname, mres->lockname_len); if (res) { /* this will get a ref on res */ /* mark it as recovering/migrating and hash it */ spin_lock(&res->spinlock); if (mres->flags & DLM_MRES_RECOVERY) { res->state |= DLM_LOCK_RES_RECOVERING; } else { if (res->state & DLM_LOCK_RES_MIGRATING) { /* this is at least the second * lockres message */ mlog(0, "lock %.*s is already migrating\n", mres->lockname_len, mres->lockname); } else if (res->state & DLM_LOCK_RES_RECOVERING) { /* caller should BUG */ mlog(ML_ERROR, "node is attempting to migrate " "lock %.*s, but marked as recovering!\n", mres->lockname_len, mres->lockname); ret = -EFAULT; spin_unlock(&res->spinlock); dlm_lockres_put(res); goto leave; } res->state |= DLM_LOCK_RES_MIGRATING; } spin_unlock(&res->spinlock); } else { /* need to allocate, just like if it was * mastered here normally */ res = dlm_new_lockres(dlm, mres->lockname, mres->lockname_len); if (!res) goto leave; /* to match the ref that we would have gotten if * dlm_lookup_lockres had succeeded */ dlm_lockres_get(res); /* mark it as recovering/migrating and hash it */ if (mres->flags & DLM_MRES_RECOVERY) res->state |= DLM_LOCK_RES_RECOVERING; else res->state |= DLM_LOCK_RES_MIGRATING; spin_lock(&dlm->spinlock); __dlm_insert_lockres(dlm, res); spin_unlock(&dlm->spinlock); /* Add an extra ref for this lock-less lockres lest the * dlm_thread purges it before we get the chance to add * locks to it */ dlm_lockres_get(res); /* There are three refs that need to be put. * 1. Taken above. * 2. kref_init in dlm_new_lockres()->dlm_init_lockres(). * 3. dlm_lookup_lockres() * The first one is handled at the end of this function. The * other two are handled in the worker thread after locks have * been attached. Yes, we don't wait for purge time to match * kref_init. The lockres will still have atleast one ref * added because it is in the hash __dlm_insert_lockres() */ extra_refs++; /* now that the new lockres is inserted, * make it usable by other processes */ spin_lock(&res->spinlock); res->state &= ~DLM_LOCK_RES_IN_PROGRESS; spin_unlock(&res->spinlock); wake_up(&res->wq); } /* at this point we have allocated everything we need, * and we have a hashed lockres with an extra ref and * the proper res->state flags. */ ret = 0; spin_lock(&res->spinlock); /* drop this either when master requery finds a different master * or when a lock is added by the recovery worker */ dlm_lockres_grab_inflight_ref(dlm, res); if (mres->master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* migration cannot have an unknown master */ BUG_ON(!(mres->flags & DLM_MRES_RECOVERY)); mlog(0, "recovery has passed me a lockres with an " "unknown owner.. will need to requery: " "%.*s\n", mres->lockname_len, mres->lockname); } else { /* take a reference now to pin the lockres, drop it * when locks are added in the worker */ dlm_change_lockres_owner(dlm, res, dlm->node_num); } spin_unlock(&res->spinlock); /* queue up work for dlm_mig_lockres_worker */ dlm_grab(dlm); /* get an extra ref for the work item */ memcpy(buf, msg->buf, be16_to_cpu(msg->data_len)); /* copy the whole message */ dlm_init_work_item(dlm, item, dlm_mig_lockres_worker, buf); item->u.ml.lockres = res; /* already have a ref */ item->u.ml.real_master = real_master; item->u.ml.extra_ref = extra_refs; spin_lock(&dlm->work_lock); list_add_tail(&item->list, &dlm->work_list); spin_unlock(&dlm->work_lock); queue_work(dlm->dlm_worker, &dlm->dispatched_work); leave: /* One extra ref taken needs to be put here */ if (extra_refs) dlm_lockres_put(res); dlm_put(dlm); if (ret < 0) { kfree(buf); kfree(item); mlog_errno(ret); } return ret; } static void dlm_mig_lockres_worker(struct dlm_work_item *item, void *data) { struct dlm_ctxt *dlm; struct dlm_migratable_lockres *mres; int ret = 0; struct dlm_lock_resource *res; u8 real_master; u8 extra_ref; dlm = item->dlm; mres = (struct dlm_migratable_lockres *)data; res = item->u.ml.lockres; real_master = item->u.ml.real_master; extra_ref = item->u.ml.extra_ref; if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { /* this case is super-rare. only occurs if * node death happens during migration. */ again: ret = dlm_lockres_master_requery(dlm, res, &real_master); if (ret < 0) { mlog(0, "dlm_lockres_master_requery ret=%d\n", ret); goto again; } if (real_master == DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "lockres %.*s not claimed. " "this node will take it.\n", res->lockname.len, res->lockname.name); } else { spin_lock(&res->spinlock); dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); mlog(0, "master needs to respond to sender " "that node %u still owns %.*s\n", real_master, res->lockname.len, res->lockname.name); /* cannot touch this lockres */ goto leave; } } ret = dlm_process_recovery_data(dlm, res, mres); if (ret < 0) mlog(0, "dlm_process_recovery_data returned %d\n", ret); else mlog(0, "dlm_process_recovery_data succeeded\n"); if ((mres->flags & (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) == (DLM_MRES_MIGRATION|DLM_MRES_ALL_DONE)) { ret = dlm_finish_migration(dlm, res, mres->master); if (ret < 0) mlog_errno(ret); } leave: /* See comment in dlm_mig_lockres_handler() */ if (res) { if (extra_ref) dlm_lockres_put(res); dlm_lockres_put(res); } kfree(data); } static int dlm_lockres_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 *real_master) { struct dlm_node_iter iter; int nodenum; int ret = 0; *real_master = DLM_LOCK_RES_OWNER_UNKNOWN; /* we only reach here if one of the two nodes in a * migration died while the migration was in progress. * at this point we need to requery the master. we * know that the new_master got as far as creating * an mle on at least one node, but we do not know * if any nodes had actually cleared the mle and set * the master to the new_master. the old master * is supposed to set the owner to UNKNOWN in the * event of a new_master death, so the only possible * responses that we can get from nodes here are * that the master is new_master, or that the master * is UNKNOWN. * if all nodes come back with UNKNOWN then we know * the lock needs remastering here. * if any node comes back with a valid master, check * to see if that master is the one that we are * recovering. if so, then the new_master died and * we need to remaster this lock. if not, then the * new_master survived and that node will respond to * other nodes about the owner. * if there is an owner, this node needs to dump this * lockres and alert the sender that this lockres * was rejected. */ spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { /* do not send to self */ if (nodenum == dlm->node_num) continue; ret = dlm_do_master_requery(dlm, res, nodenum, real_master); if (ret < 0) { mlog_errno(ret); if (!dlm_is_host_down(ret)) BUG(); /* host is down, so answer for that node would be * DLM_LOCK_RES_OWNER_UNKNOWN. continue. */ } if (*real_master != DLM_LOCK_RES_OWNER_UNKNOWN) { mlog(0, "lock master is %u\n", *real_master); break; } } return ret; } int dlm_do_master_requery(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 nodenum, u8 *real_master) { int ret = -EINVAL; struct dlm_master_requery req; int status = DLM_LOCK_RES_OWNER_UNKNOWN; memset(&req, 0, sizeof(req)); req.node_idx = dlm->node_num; req.namelen = res->lockname.len; memcpy(req.name, res->lockname.name, res->lockname.len); ret = o2net_send_message(DLM_MASTER_REQUERY_MSG, dlm->key, &req, sizeof(req), nodenum, &status); /* XXX: negative status not handled properly here. */ if (ret < 0) mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_MASTER_REQUERY_MSG, dlm->key, nodenum); else { BUG_ON(status < 0); BUG_ON(status > DLM_LOCK_RES_OWNER_UNKNOWN); *real_master = (u8) (status & 0xff); mlog(0, "node %u responded to master requery with %u\n", nodenum, *real_master); ret = 0; } return ret; } /* this function cannot error, so unless the sending * or receiving of the message failed, the owner can * be trusted */ int dlm_master_requery_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_master_requery *req = (struct dlm_master_requery *)msg->buf; struct dlm_lock_resource *res = NULL; unsigned int hash; int master = DLM_LOCK_RES_OWNER_UNKNOWN; u32 flags = DLM_ASSERT_MASTER_REQUERY; if (!dlm_grab(dlm)) { /* since the domain has gone away on this * node, the proper response is UNKNOWN */ return master; } hash = dlm_lockid_hash(req->name, req->namelen); spin_lock(&dlm->spinlock); res = __dlm_lookup_lockres(dlm, req->name, req->namelen, hash); if (res) { spin_lock(&res->spinlock); master = res->owner; if (master == dlm->node_num) { int ret = dlm_dispatch_assert_master(dlm, res, 0, 0, flags); if (ret < 0) { mlog_errno(-ENOMEM); /* retry!? */ BUG(); } } else /* put.. incase we are not the master */ dlm_lockres_put(res); spin_unlock(&res->spinlock); } spin_unlock(&dlm->spinlock); dlm_put(dlm); return master; } static inline struct list_head * dlm_list_num_to_pointer(struct dlm_lock_resource *res, int list_num) { struct list_head *ret; BUG_ON(list_num < 0); BUG_ON(list_num > 2); ret = &(res->granted); ret += list_num; return ret; } /* TODO: do ast flush business * TODO: do MIGRATING and RECOVERING spinning */ /* * NOTE about in-flight requests during migration: * * Before attempting the migrate, the master has marked the lockres as * MIGRATING and then flushed all of its pending ASTS. So any in-flight * requests either got queued before the MIGRATING flag got set, in which * case the lock data will reflect the change and a return message is on * the way, or the request failed to get in before MIGRATING got set. In * this case, the caller will be told to spin and wait for the MIGRATING * flag to be dropped, then recheck the master. * This holds true for the convert, cancel and unlock cases, and since lvb * updates are tied to these same messages, it applies to lvb updates as * well. For the lock case, there is no way a lock can be on the master * queue and not be on the secondary queue since the lock is always added * locally first. This means that the new target node will never be sent * a lock that he doesn't already have on the list. * In total, this means that the local lock is correct and should not be * updated to match the one sent by the master. Any messages sent back * from the master before the MIGRATING flag will bring the lock properly * up-to-date, and the change will be ordered properly for the waiter. * We will *not* attempt to modify the lock underneath the waiter. */ static int dlm_process_recovery_data(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, struct dlm_migratable_lockres *mres) { struct dlm_migratable_lock *ml; struct list_head *queue, *iter; struct list_head *tmpq = NULL; struct dlm_lock *newlock = NULL; struct dlm_lockstatus *lksb = NULL; int ret = 0; int i, j, bad; struct dlm_lock *lock; u8 from = O2NM_MAX_NODES; unsigned int added = 0; __be64 c; mlog(0, "running %d locks for this lockres\n", mres->num_locks); for (i=0; i<mres->num_locks; i++) { ml = &(mres->ml[i]); if (dlm_is_dummy_lock(dlm, ml, &from)) { /* placeholder, just need to set the refmap bit */ BUG_ON(mres->num_locks != 1); mlog(0, "%s:%.*s: dummy lock for %u\n", dlm->name, mres->lockname_len, mres->lockname, from); spin_lock(&res->spinlock); dlm_lockres_set_refmap_bit(dlm, res, from); spin_unlock(&res->spinlock); added++; break; } BUG_ON(ml->highest_blocked != LKM_IVMODE); newlock = NULL; lksb = NULL; queue = dlm_list_num_to_pointer(res, ml->list); tmpq = NULL; /* if the lock is for the local node it needs to * be moved to the proper location within the queue. * do not allocate a new lock structure. */ if (ml->node == dlm->node_num) { /* MIGRATION ONLY! */ BUG_ON(!(mres->flags & DLM_MRES_MIGRATION)); lock = NULL; spin_lock(&res->spinlock); for (j = DLM_GRANTED_LIST; j <= DLM_BLOCKED_LIST; j++) { tmpq = dlm_list_idx_to_ptr(res, j); list_for_each(iter, tmpq) { lock = list_entry(iter, struct dlm_lock, list); if (lock->ml.cookie == ml->cookie) break; lock = NULL; } if (lock) break; } /* lock is always created locally first, and * destroyed locally last. it must be on the list */ if (!lock) { c = ml->cookie; mlog(ML_ERROR, "Could not find local lock " "with cookie %u:%llu, node %u, " "list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), ml->node, ml->list, ml->flags, ml->type, ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } if (lock->ml.node != ml->node) { c = lock->ml.cookie; mlog(ML_ERROR, "Mismatched node# in lock " "cookie %u:%llu, name %.*s, node %u\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), res->lockname.len, res->lockname.name, lock->ml.node); c = ml->cookie; mlog(ML_ERROR, "Migrate lock cookie %u:%llu, " "node %u, list %u, flags 0x%x, type %d, " "conv %d, highest blocked %d\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), ml->node, ml->list, ml->flags, ml->type, ml->convert_type, ml->highest_blocked); __dlm_print_one_lock_resource(res); BUG(); } if (tmpq != queue) { c = ml->cookie; mlog(0, "Lock cookie %u:%llu was on list %u " "instead of list %u for %.*s\n", dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c)), j, ml->list, res->lockname.len, res->lockname.name); __dlm_print_one_lock_resource(res); spin_unlock(&res->spinlock); continue; } /* see NOTE above about why we do not update * to match the master here */ /* move the lock to its proper place */ /* do not alter lock refcount. switching lists. */ list_move_tail(&lock->list, queue); spin_unlock(&res->spinlock); added++; mlog(0, "just reordered a local lock!\n"); continue; } /* lock is for another node. */ newlock = dlm_new_lock(ml->type, ml->node, be64_to_cpu(ml->cookie), NULL); if (!newlock) { ret = -ENOMEM; goto leave; } lksb = newlock->lksb; dlm_lock_attach_lockres(newlock, res); if (ml->convert_type != LKM_IVMODE) { BUG_ON(queue != &res->converting); newlock->ml.convert_type = ml->convert_type; } lksb->flags |= (ml->flags & (DLM_LKSB_PUT_LVB|DLM_LKSB_GET_LVB)); if (ml->type == LKM_NLMODE) goto skip_lvb; /* * If the lock is in the blocked list it can't have a valid lvb, * so skip it */ if (ml->list == DLM_BLOCKED_LIST) goto skip_lvb; if (!dlm_lvb_is_empty(mres->lvb)) { if (lksb->flags & DLM_LKSB_PUT_LVB) { /* other node was trying to update * lvb when node died. recreate the * lksb with the updated lvb. */ memcpy(lksb->lvb, mres->lvb, DLM_LVB_LEN); /* the lock resource lvb update must happen * NOW, before the spinlock is dropped. * we no longer wait for the AST to update * the lvb. */ memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } else { /* otherwise, the node is sending its * most recent valid lvb info */ BUG_ON(ml->type != LKM_EXMODE && ml->type != LKM_PRMODE); if (!dlm_lvb_is_empty(res->lvb) && (ml->type == LKM_EXMODE || memcmp(res->lvb, mres->lvb, DLM_LVB_LEN))) { int i; mlog(ML_ERROR, "%s:%.*s: received bad " "lvb! type=%d\n", dlm->name, res->lockname.len, res->lockname.name, ml->type); printk("lockres lvb=["); for (i=0; i<DLM_LVB_LEN; i++) printk("%02x", res->lvb[i]); printk("]\nmigrated lvb=["); for (i=0; i<DLM_LVB_LEN; i++) printk("%02x", mres->lvb[i]); printk("]\n"); dlm_print_one_lock_resource(res); BUG(); } memcpy(res->lvb, mres->lvb, DLM_LVB_LEN); } } skip_lvb: /* NOTE: * wrt lock queue ordering and recovery: * 1. order of locks on granted queue is * meaningless. * 2. order of locks on converting queue is * LOST with the node death. sorry charlie. * 3. order of locks on the blocked queue is * also LOST. * order of locks does not affect integrity, it * just means that a lock request may get pushed * back in line as a result of the node death. * also note that for a given node the lock order * for its secondary queue locks is preserved * relative to each other, but clearly *not* * preserved relative to locks from other nodes. */ bad = 0; spin_lock(&res->spinlock); list_for_each_entry(lock, queue, list) { if (lock->ml.cookie == ml->cookie) { c = lock->ml.cookie; mlog(ML_ERROR, "%s:%.*s: %u:%llu: lock already " "exists on this lockres!\n", dlm->name, res->lockname.len, res->lockname.name, dlm_get_lock_cookie_node(be64_to_cpu(c)), dlm_get_lock_cookie_seq(be64_to_cpu(c))); mlog(ML_NOTICE, "sent lock: type=%d, conv=%d, " "node=%u, cookie=%u:%llu, queue=%d\n", ml->type, ml->convert_type, ml->node, dlm_get_lock_cookie_node(be64_to_cpu(ml->cookie)), dlm_get_lock_cookie_seq(be64_to_cpu(ml->cookie)), ml->list); __dlm_print_one_lock_resource(res); bad = 1; break; } } if (!bad) { dlm_lock_get(newlock); list_add_tail(&newlock->list, queue); mlog(0, "%s:%.*s: added lock for node %u, " "setting refmap bit\n", dlm->name, res->lockname.len, res->lockname.name, ml->node); dlm_lockres_set_refmap_bit(dlm, res, ml->node); added++; } spin_unlock(&res->spinlock); } mlog(0, "done running all the locks\n"); leave: /* balance the ref taken when the work was queued */ spin_lock(&res->spinlock); dlm_lockres_drop_inflight_ref(dlm, res); spin_unlock(&res->spinlock); if (ret < 0) { mlog_errno(ret); if (newlock) dlm_lock_put(newlock); } return ret; } void dlm_move_lockres_to_recovery_list(struct dlm_ctxt *dlm, struct dlm_lock_resource *res) { int i; struct list_head *queue; struct dlm_lock *lock, *next; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); res->state |= DLM_LOCK_RES_RECOVERING; if (!list_empty(&res->recovering)) { mlog(0, "Recovering res %s:%.*s, is already on recovery list!\n", dlm->name, res->lockname.len, res->lockname.name); list_del_init(&res->recovering); dlm_lockres_put(res); } /* We need to hold a reference while on the recovery list */ dlm_lockres_get(res); list_add_tail(&res->recovering, &dlm->reco.resources); /* find any pending locks and put them back on proper list */ for (i=DLM_BLOCKED_LIST; i>=DLM_GRANTED_LIST; i--) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry_safe(lock, next, queue, list) { dlm_lock_get(lock); if (lock->convert_pending) { /* move converting lock back to granted */ BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with convert pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); dlm_revert_pending_convert(res, lock); lock->convert_pending = 0; } else if (lock->lock_pending) { /* remove pending lock requests completely */ BUG_ON(i != DLM_BLOCKED_LIST); mlog(0, "node died with lock pending " "on %.*s. remove from blocked list and skip.\n", res->lockname.len, res->lockname.name); /* lock will be floating until ref in * dlmlock_remote is freed after the network * call returns. ok for it to not be on any * list since no ast can be called * (the master is dead). */ dlm_revert_pending_lock(res, lock); lock->lock_pending = 0; } else if (lock->unlock_pending) { /* if an unlock was in progress, treat as * if this had completed successfully * before sending this lock state to the * new master. note that the dlm_unlock * call is still responsible for calling * the unlockast. that will happen after * the network call times out. for now, * just move lists to prepare the new * recovery master. */ BUG_ON(i != DLM_GRANTED_LIST); mlog(0, "node died with unlock pending " "on %.*s. remove from blocked list and skip.\n", res->lockname.len, res->lockname.name); dlm_commit_pending_unlock(res, lock); lock->unlock_pending = 0; } else if (lock->cancel_pending) { /* if a cancel was in progress, treat as * if this had completed successfully * before sending this lock state to the * new master */ BUG_ON(i != DLM_CONVERTING_LIST); mlog(0, "node died with cancel pending " "on %.*s. move back to granted list.\n", res->lockname.len, res->lockname.name); dlm_commit_pending_cancel(res, lock); lock->cancel_pending = 0; } dlm_lock_put(lock); } } } /* removes all recovered locks from the recovery list. * sets the res->owner to the new master. * unsets the RECOVERY flag and wakes waiters. */ static void dlm_finish_local_lockres_recovery(struct dlm_ctxt *dlm, u8 dead_node, u8 new_master) { int i; struct hlist_head *bucket; struct dlm_lock_resource *res, *next; assert_spin_locked(&dlm->spinlock); list_for_each_entry_safe(res, next, &dlm->reco.resources, recovering) { if (res->owner == dead_node) { mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", dlm->name, res->lockname.len, res->lockname.name, res->owner, new_master); list_del_init(&res->recovering); spin_lock(&res->spinlock); /* new_master has our reference from * the lock state sent during recovery */ dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; if (__dlm_lockres_has_locks(res)) __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); dlm_lockres_put(res); } } /* this will become unnecessary eventually, but * for now we need to run the whole hash, clear * the RECOVERING state and set the owner * if necessary */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, bucket, hash_node) { if (!(res->state & DLM_LOCK_RES_RECOVERING)) continue; if (res->owner != dead_node && res->owner != dlm->node_num) continue; if (!list_empty(&res->recovering)) { list_del_init(&res->recovering); dlm_lockres_put(res); } /* new_master has our reference from * the lock state sent during recovery */ mlog(0, "%s: res %.*s, Changing owner from %u to %u\n", dlm->name, res->lockname.len, res->lockname.name, res->owner, new_master); spin_lock(&res->spinlock); dlm_change_lockres_owner(dlm, res, new_master); res->state &= ~DLM_LOCK_RES_RECOVERING; if (__dlm_lockres_has_locks(res)) __dlm_dirty_lockres(dlm, res); spin_unlock(&res->spinlock); wake_up(&res->wq); } } } static inline int dlm_lvb_needs_invalidation(struct dlm_lock *lock, int local) { if (local) { if (lock->ml.type != LKM_EXMODE && lock->ml.type != LKM_PRMODE) return 1; } else if (lock->ml.type == LKM_EXMODE) return 1; return 0; } static void dlm_revalidate_lvb(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 dead_node) { struct list_head *queue; struct dlm_lock *lock; int blank_lvb = 0, local = 0; int i; u8 search_node; assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); if (res->owner == dlm->node_num) /* if this node owned the lockres, and if the dead node * had an EX when he died, blank out the lvb */ search_node = dead_node; else { /* if this is a secondary lockres, and we had no EX or PR * locks granted, we can no longer trust the lvb */ search_node = dlm->node_num; local = 1; /* check local state for valid lvb */ } for (i=DLM_GRANTED_LIST; i<=DLM_CONVERTING_LIST; i++) { queue = dlm_list_idx_to_ptr(res, i); list_for_each_entry(lock, queue, list) { if (lock->ml.node == search_node) { if (dlm_lvb_needs_invalidation(lock, local)) { /* zero the lksb lvb and lockres lvb */ blank_lvb = 1; memset(lock->lksb->lvb, 0, DLM_LVB_LEN); } } } } if (blank_lvb) { mlog(0, "clearing %.*s lvb, dead node %u had EX\n", res->lockname.len, res->lockname.name, dead_node); memset(res->lvb, 0, DLM_LVB_LEN); } } static void dlm_free_dead_locks(struct dlm_ctxt *dlm, struct dlm_lock_resource *res, u8 dead_node) { struct dlm_lock *lock, *next; unsigned int freed = 0; /* this node is the lockres master: * 1) remove any stale locks for the dead node * 2) if the dead node had an EX when he died, blank out the lvb */ assert_spin_locked(&dlm->spinlock); assert_spin_locked(&res->spinlock); /* We do two dlm_lock_put(). One for removing from list and the other is * to force the DLM_UNLOCK_FREE_LOCK action so as to free the locks */ /* TODO: check pending_asts, pending_basts here */ list_for_each_entry_safe(lock, next, &res->granted, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } list_for_each_entry_safe(lock, next, &res->converting, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } list_for_each_entry_safe(lock, next, &res->blocked, list) { if (lock->ml.node == dead_node) { list_del_init(&lock->list); dlm_lock_put(lock); /* Can't schedule DLM_UNLOCK_FREE_LOCK - do manually */ dlm_lock_put(lock); freed++; } } if (freed) { mlog(0, "%s:%.*s: freed %u locks for dead node %u, " "dropping ref from lockres\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); if(!test_bit(dead_node, res->refmap)) { mlog(ML_ERROR, "%s:%.*s: freed %u locks for dead node %u, " "but ref was not set\n", dlm->name, res->lockname.len, res->lockname.name, freed, dead_node); __dlm_print_one_lock_resource(res); } dlm_lockres_clear_refmap_bit(dlm, res, dead_node); } else if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " "no locks and had not purged before dying\n", dlm->name, res->lockname.len, res->lockname.name, dead_node); dlm_lockres_clear_refmap_bit(dlm, res, dead_node); } /* do not kick thread yet */ __dlm_dirty_lockres(dlm, res); } /* if this node is the recovery master, and there are no * locks for a given lockres owned by this node that are in * either PR or EX mode, zero out the lvb before requesting. * */ static void dlm_do_local_recovery_cleanup(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_lock_resource *res; int i; struct hlist_head *bucket; struct dlm_lock *lock; /* purge any stale mles */ dlm_clean_master_list(dlm, dead_node); /* * now clean up all lock resources. there are two rules: * * 1) if the dead node was the master, move the lockres * to the recovering list. set the RECOVERING flag. * this lockres needs to be cleaned up before it can * be used further. * * 2) if this node was the master, remove all locks from * each of the lockres queues that were owned by the * dead node. once recovery finishes, the dlm thread * can be kicked again to see if any ASTs or BASTs * need to be fired as a result. */ for (i = 0; i < DLM_HASH_BUCKETS; i++) { bucket = dlm_lockres_hash(dlm, i); hlist_for_each_entry(res, bucket, hash_node) { /* always prune any $RECOVERY entries for dead nodes, * otherwise hangs can occur during later recovery */ if (dlm_is_recovery_lock(res->lockname.name, res->lockname.len)) { spin_lock(&res->spinlock); list_for_each_entry(lock, &res->granted, list) { if (lock->ml.node == dead_node) { mlog(0, "AHA! there was " "a $RECOVERY lock for dead " "node %u (%s)!\n", dead_node, dlm->name); list_del_init(&lock->list); dlm_lock_put(lock); break; } } spin_unlock(&res->spinlock); continue; } spin_lock(&res->spinlock); /* zero the lvb if necessary */ dlm_revalidate_lvb(dlm, res, dead_node); if (res->owner == dead_node) { if (res->state & DLM_LOCK_RES_DROPPING_REF) { mlog(ML_NOTICE, "%s: res %.*s, Skip " "recovery as it is being freed\n", dlm->name, res->lockname.len, res->lockname.name); } else dlm_move_lockres_to_recovery_list(dlm, res); } else if (res->owner == dlm->node_num) { dlm_free_dead_locks(dlm, res, dead_node); __dlm_lockres_calc_usage(dlm, res); } else if (res->owner == DLM_LOCK_RES_OWNER_UNKNOWN) { if (test_bit(dead_node, res->refmap)) { mlog(0, "%s:%.*s: dead node %u had a ref, but had " "no locks and had not purged before dying\n", dlm->name, res->lockname.len, res->lockname.name, dead_node); dlm_lockres_clear_refmap_bit(dlm, res, dead_node); } } spin_unlock(&res->spinlock); } } } static void __dlm_hb_node_down(struct dlm_ctxt *dlm, int idx) { assert_spin_locked(&dlm->spinlock); if (dlm->reco.new_master == idx) { mlog(0, "%s: recovery master %d just died\n", dlm->name, idx); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { /* finalize1 was reached, so it is safe to clear * the new_master and dead_node. that recovery * is complete. */ mlog(0, "%s: dead master %d had reached " "finalize1 state, clearing\n", dlm->name, idx); dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; __dlm_reset_recovery(dlm); } } /* Clean up join state on node death. */ if (dlm->joining_node == idx) { mlog(0, "Clearing join state for node %u\n", idx); __dlm_set_joining_node(dlm, DLM_LOCK_RES_OWNER_UNKNOWN); } /* check to see if the node is already considered dead */ if (!test_bit(idx, dlm->live_nodes_map)) { mlog(0, "for domain %s, node %d is already dead. " "another node likely did recovery already.\n", dlm->name, idx); return; } /* check to see if we do not care about this node */ if (!test_bit(idx, dlm->domain_map)) { /* This also catches the case that we get a node down * but haven't joined the domain yet. */ mlog(0, "node %u already removed from domain!\n", idx); return; } clear_bit(idx, dlm->live_nodes_map); /* make sure local cleanup occurs before the heartbeat events */ if (!test_bit(idx, dlm->recovery_map)) dlm_do_local_recovery_cleanup(dlm, idx); /* notify anything attached to the heartbeat events */ dlm_hb_event_notify_attached(dlm, idx, 0); mlog(0, "node %u being removed from domain map!\n", idx); clear_bit(idx, dlm->domain_map); clear_bit(idx, dlm->exit_domain_map); /* wake up migration waiters if a node goes down. * perhaps later we can genericize this for other waiters. */ wake_up(&dlm->migration_wq); if (test_bit(idx, dlm->recovery_map)) mlog(0, "domain %s, node %u already added " "to recovery map!\n", dlm->name, idx); else set_bit(idx, dlm->recovery_map); } void dlm_hb_node_down_cb(struct o2nm_node *node, int idx, void *data) { struct dlm_ctxt *dlm = data; if (!dlm_grab(dlm)) return; /* * This will notify any dlm users that a node in our domain * went away without notifying us first. */ if (test_bit(idx, dlm->domain_map)) dlm_fire_domain_eviction_callbacks(dlm, idx); spin_lock(&dlm->spinlock); __dlm_hb_node_down(dlm, idx); spin_unlock(&dlm->spinlock); dlm_put(dlm); } void dlm_hb_node_up_cb(struct o2nm_node *node, int idx, void *data) { struct dlm_ctxt *dlm = data; if (!dlm_grab(dlm)) return; spin_lock(&dlm->spinlock); set_bit(idx, dlm->live_nodes_map); /* do NOT notify mle attached to the heartbeat events. * new nodes are not interesting in mastery until joined. */ spin_unlock(&dlm->spinlock); dlm_put(dlm); } static void dlm_reco_ast(void *astdata) { struct dlm_ctxt *dlm = astdata; mlog(0, "ast for recovery lock fired!, this=%u, dlm=%s\n", dlm->node_num, dlm->name); } static void dlm_reco_bast(void *astdata, int blocked_type) { struct dlm_ctxt *dlm = astdata; mlog(0, "bast for recovery lock fired!, this=%u, dlm=%s\n", dlm->node_num, dlm->name); } static void dlm_reco_unlock_ast(void *astdata, enum dlm_status st) { mlog(0, "unlockast for recovery lock fired!\n"); } /* * dlm_pick_recovery_master will continually attempt to use * dlmlock() on the special "$RECOVERY" lockres with the * LKM_NOQUEUE flag to get an EX. every thread that enters * this function on each node racing to become the recovery * master will not stop attempting this until either: * a) this node gets the EX (and becomes the recovery master), * or b) dlm->reco.new_master gets set to some nodenum * != O2NM_INVALID_NODE_NUM (another node will do the reco). * so each time a recovery master is needed, the entire cluster * will sync at this point. if the new master dies, that will * be detected in dlm_do_recovery */ static int dlm_pick_recovery_master(struct dlm_ctxt *dlm) { enum dlm_status ret; struct dlm_lockstatus lksb; int status = -EINVAL; mlog(0, "starting recovery of %s at %lu, dead=%u, this=%u\n", dlm->name, jiffies, dlm->reco.dead_node, dlm->node_num); again: memset(&lksb, 0, sizeof(lksb)); ret = dlmlock(dlm, LKM_EXMODE, &lksb, LKM_NOQUEUE|LKM_RECOVERY, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN, dlm_reco_ast, dlm, dlm_reco_bast); mlog(0, "%s: dlmlock($RECOVERY) returned %d, lksb=%d\n", dlm->name, ret, lksb.status); if (ret == DLM_NORMAL) { mlog(0, "dlm=%s dlmlock says I got it (this=%u)\n", dlm->name, dlm->node_num); /* got the EX lock. check to see if another node * just became the reco master */ if (dlm_reco_master_ready(dlm)) { mlog(0, "%s: got reco EX lock, but %u will " "do the recovery\n", dlm->name, dlm->reco.new_master); status = -EEXIST; } else { status = 0; /* see if recovery was already finished elsewhere */ spin_lock(&dlm->spinlock); if (dlm->reco.dead_node == O2NM_INVALID_NODE_NUM) { status = -EINVAL; mlog(0, "%s: got reco EX lock, but " "node got recovered already\n", dlm->name); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { mlog(ML_ERROR, "%s: new master is %u " "but no dead node!\n", dlm->name, dlm->reco.new_master); BUG(); } } spin_unlock(&dlm->spinlock); } /* if this node has actually become the recovery master, * set the master and send the messages to begin recovery */ if (!status) { mlog(0, "%s: dead=%u, this=%u, sending " "begin_reco now\n", dlm->name, dlm->reco.dead_node, dlm->node_num); status = dlm_send_begin_reco_message(dlm, dlm->reco.dead_node); /* this always succeeds */ BUG_ON(status); /* set the new_master to this node */ spin_lock(&dlm->spinlock); dlm_set_reco_master(dlm, dlm->node_num); spin_unlock(&dlm->spinlock); } /* recovery lock is a special case. ast will not get fired, * so just go ahead and unlock it. */ ret = dlmunlock(dlm, &lksb, 0, dlm_reco_unlock_ast, dlm); if (ret == DLM_DENIED) { mlog(0, "got DLM_DENIED, trying LKM_CANCEL\n"); ret = dlmunlock(dlm, &lksb, LKM_CANCEL, dlm_reco_unlock_ast, dlm); } if (ret != DLM_NORMAL) { /* this would really suck. this could only happen * if there was a network error during the unlock * because of node death. this means the unlock * is actually "done" and the lock structure is * even freed. we can continue, but only * because this specific lock name is special. */ mlog(ML_ERROR, "dlmunlock returned %d\n", ret); } } else if (ret == DLM_NOTQUEUED) { mlog(0, "dlm=%s dlmlock says another node got it (this=%u)\n", dlm->name, dlm->node_num); /* another node is master. wait on * reco.new_master != O2NM_INVALID_NODE_NUM * for at most one second */ wait_event_timeout(dlm->dlm_reco_thread_wq, dlm_reco_master_ready(dlm), msecs_to_jiffies(1000)); if (!dlm_reco_master_ready(dlm)) { mlog(0, "%s: reco master taking awhile\n", dlm->name); goto again; } /* another node has informed this one that it is reco master */ mlog(0, "%s: reco master %u is ready to recover %u\n", dlm->name, dlm->reco.new_master, dlm->reco.dead_node); status = -EEXIST; } else if (ret == DLM_RECOVERING) { mlog(0, "dlm=%s dlmlock says master node died (this=%u)\n", dlm->name, dlm->node_num); goto again; } else { struct dlm_lock_resource *res; /* dlmlock returned something other than NOTQUEUED or NORMAL */ mlog(ML_ERROR, "%s: got %s from dlmlock($RECOVERY), " "lksb.status=%s\n", dlm->name, dlm_errname(ret), dlm_errname(lksb.status)); res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN); if (res) { dlm_print_one_lock_resource(res); dlm_lockres_put(res); } else { mlog(ML_ERROR, "recovery lock not found\n"); } BUG(); } return status; } static int dlm_send_begin_reco_message(struct dlm_ctxt *dlm, u8 dead_node) { struct dlm_begin_reco br; int ret = 0; struct dlm_node_iter iter; int nodenum; int status; mlog(0, "%s: dead node is %u\n", dlm->name, dead_node); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); clear_bit(dead_node, iter.node_map); memset(&br, 0, sizeof(br)); br.node_idx = dlm->node_num; br.dead_node = dead_node; while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { ret = 0; if (nodenum == dead_node) { mlog(0, "not sending begin reco to dead node " "%u\n", dead_node); continue; } if (nodenum == dlm->node_num) { mlog(0, "not sending begin reco to self\n"); continue; } retry: ret = -EINVAL; mlog(0, "attempting to send begin reco msg to %d\n", nodenum); ret = o2net_send_message(DLM_BEGIN_RECO_MSG, dlm->key, &br, sizeof(br), nodenum, &status); /* negative status is handled ok by caller here */ if (ret >= 0) ret = status; if (dlm_is_host_down(ret)) { /* node is down. not involved in recovery * so just keep going */ mlog(ML_NOTICE, "%s: node %u was down when sending " "begin reco msg (%d)\n", dlm->name, nodenum, ret); ret = 0; } /* * Prior to commit aad1b15310b9bcd59fa81ab8f2b1513b59553ea8, * dlm_begin_reco_handler() returned EAGAIN and not -EAGAIN. * We are handling both for compatibility reasons. */ if (ret == -EAGAIN || ret == EAGAIN) { mlog(0, "%s: trying to start recovery of node " "%u, but node %u is waiting for last recovery " "to complete, backoff for a bit\n", dlm->name, dead_node, nodenum); msleep(100); goto retry; } if (ret < 0) { struct dlm_lock_resource *res; /* this is now a serious problem, possibly ENOMEM * in the network stack. must retry */ mlog_errno(ret); mlog(ML_ERROR, "begin reco of dlm %s to node %u " "returned %d\n", dlm->name, nodenum, ret); res = dlm_lookup_lockres(dlm, DLM_RECOVERY_LOCK_NAME, DLM_RECOVERY_LOCK_NAME_LEN); if (res) { dlm_print_one_lock_resource(res); dlm_lockres_put(res); } else { mlog(ML_ERROR, "recovery lock not found\n"); } /* sleep for a bit in hopes that we can avoid * another ENOMEM */ msleep(100); goto retry; } } return ret; } int dlm_begin_reco_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_begin_reco *br = (struct dlm_begin_reco *)msg->buf; /* ok to return 0, domain has gone away */ if (!dlm_grab(dlm)) return 0; spin_lock(&dlm->spinlock); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { mlog(0, "%s: node %u wants to recover node %u (%u:%u) " "but this node is in finalize state, waiting on finalize2\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); spin_unlock(&dlm->spinlock); dlm_put(dlm); return -EAGAIN; } spin_unlock(&dlm->spinlock); mlog(0, "%s: node %u wants to recover node %u (%u:%u)\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); dlm_fire_domain_eviction_callbacks(dlm, br->dead_node); spin_lock(&dlm->spinlock); if (dlm->reco.new_master != O2NM_INVALID_NODE_NUM) { if (test_bit(dlm->reco.new_master, dlm->recovery_map)) { mlog(0, "%s: new_master %u died, changing " "to %u\n", dlm->name, dlm->reco.new_master, br->node_idx); } else { mlog(0, "%s: new_master %u NOT DEAD, changing " "to %u\n", dlm->name, dlm->reco.new_master, br->node_idx); /* may not have seen the new master as dead yet */ } } if (dlm->reco.dead_node != O2NM_INVALID_NODE_NUM) { mlog(ML_NOTICE, "%s: dead_node previously set to %u, " "node %u changing it to %u\n", dlm->name, dlm->reco.dead_node, br->node_idx, br->dead_node); } dlm_set_reco_master(dlm, br->node_idx); dlm_set_reco_dead_node(dlm, br->dead_node); if (!test_bit(br->dead_node, dlm->recovery_map)) { mlog(0, "recovery master %u sees %u as dead, but this " "node has not yet. marking %u as dead\n", br->node_idx, br->dead_node, br->dead_node); if (!test_bit(br->dead_node, dlm->domain_map) || !test_bit(br->dead_node, dlm->live_nodes_map)) mlog(0, "%u not in domain/live_nodes map " "so setting it in reco map manually\n", br->dead_node); /* force the recovery cleanup in __dlm_hb_node_down * both of these will be cleared in a moment */ set_bit(br->dead_node, dlm->domain_map); set_bit(br->dead_node, dlm->live_nodes_map); __dlm_hb_node_down(dlm, br->dead_node); } spin_unlock(&dlm->spinlock); dlm_kick_recovery_thread(dlm); mlog(0, "%s: recovery started by node %u, for %u (%u:%u)\n", dlm->name, br->node_idx, br->dead_node, dlm->reco.dead_node, dlm->reco.new_master); dlm_put(dlm); return 0; } #define DLM_FINALIZE_STAGE2 0x01 static int dlm_send_finalize_reco_message(struct dlm_ctxt *dlm) { int ret = 0; struct dlm_finalize_reco fr; struct dlm_node_iter iter; int nodenum; int status; int stage = 1; mlog(0, "finishing recovery for node %s:%u, " "stage %d\n", dlm->name, dlm->reco.dead_node, stage); spin_lock(&dlm->spinlock); dlm_node_iter_init(dlm->domain_map, &iter); spin_unlock(&dlm->spinlock); stage2: memset(&fr, 0, sizeof(fr)); fr.node_idx = dlm->node_num; fr.dead_node = dlm->reco.dead_node; if (stage == 2) fr.flags |= DLM_FINALIZE_STAGE2; while ((nodenum = dlm_node_iter_next(&iter)) >= 0) { if (nodenum == dlm->node_num) continue; ret = o2net_send_message(DLM_FINALIZE_RECO_MSG, dlm->key, &fr, sizeof(fr), nodenum, &status); if (ret >= 0) ret = status; if (ret < 0) { mlog(ML_ERROR, "Error %d when sending message %u (key " "0x%x) to node %u\n", ret, DLM_FINALIZE_RECO_MSG, dlm->key, nodenum); if (dlm_is_host_down(ret)) { /* this has no effect on this recovery * session, so set the status to zero to * finish out the last recovery */ mlog(ML_ERROR, "node %u went down after this " "node finished recovery.\n", nodenum); ret = 0; continue; } break; } } if (stage == 1) { /* reset the node_iter back to the top and send finalize2 */ iter.curnode = -1; stage = 2; goto stage2; } return ret; } int dlm_finalize_reco_handler(struct o2net_msg *msg, u32 len, void *data, void **ret_data) { struct dlm_ctxt *dlm = data; struct dlm_finalize_reco *fr = (struct dlm_finalize_reco *)msg->buf; int stage = 1; /* ok to return 0, domain has gone away */ if (!dlm_grab(dlm)) return 0; if (fr->flags & DLM_FINALIZE_STAGE2) stage = 2; mlog(0, "%s: node %u finalizing recovery stage%d of " "node %u (%u:%u)\n", dlm->name, fr->node_idx, stage, fr->dead_node, dlm->reco.dead_node, dlm->reco.new_master); spin_lock(&dlm->spinlock); if (dlm->reco.new_master != fr->node_idx) { mlog(ML_ERROR, "node %u sent recovery finalize msg, but node " "%u is supposed to be the new master, dead=%u\n", fr->node_idx, dlm->reco.new_master, fr->dead_node); BUG(); } if (dlm->reco.dead_node != fr->dead_node) { mlog(ML_ERROR, "node %u sent recovery finalize msg for dead " "node %u, but node %u is supposed to be dead\n", fr->node_idx, fr->dead_node, dlm->reco.dead_node); BUG(); } switch (stage) { case 1: dlm_finish_local_lockres_recovery(dlm, fr->dead_node, fr->node_idx); if (dlm->reco.state & DLM_RECO_STATE_FINALIZE) { mlog(ML_ERROR, "%s: received finalize1 from " "new master %u for dead node %u, but " "this node has already received it!\n", dlm->name, fr->node_idx, fr->dead_node); dlm_print_reco_node_status(dlm); BUG(); } dlm->reco.state |= DLM_RECO_STATE_FINALIZE; spin_unlock(&dlm->spinlock); break; case 2: if (!(dlm->reco.state & DLM_RECO_STATE_FINALIZE)) { mlog(ML_ERROR, "%s: received finalize2 from " "new master %u for dead node %u, but " "this node did not have finalize1!\n", dlm->name, fr->node_idx, fr->dead_node); dlm_print_reco_node_status(dlm); BUG(); } dlm->reco.state &= ~DLM_RECO_STATE_FINALIZE; __dlm_reset_recovery(dlm); spin_unlock(&dlm->spinlock); dlm_kick_recovery_thread(dlm); break; default: BUG(); } mlog(0, "%s: recovery done, reco master was %u, dead now %u, master now %u\n", dlm->name, fr->node_idx, dlm->reco.dead_node, dlm->reco.new_master); dlm_put(dlm); return 0; }
gpl-2.0
eousphoros/android_kernel_samsung_noblelte
drivers/usb/wusbcore/devconnect.c
2250
33005
/* * WUSB Wire Adapter: Control/Data Streaming Interface (WUSB[8]) * Device Connect handling * * Copyright (C) 2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs * FIXME: this file needs to be broken up, it's grown too big * * * WUSB1.0[7.1, 7.5.1, ] * * WUSB device connection is kind of messy. Some background: * * When a device wants to connect it scans the UWB radio channels * looking for a WUSB Channel; a WUSB channel is defined by MMCs * (Micro Managed Commands or something like that) [see * Design-overview for more on this] . * * So, device scans the radio, finds MMCs and thus a host and checks * when the next DNTS is. It sends a Device Notification Connect * (DN_Connect); the host picks it up (through nep.c and notif.c, ends * up in wusb_devconnect_ack(), which creates a wusb_dev structure in * wusbhc->port[port_number].wusb_dev), assigns an unauth address * to the device (this means from 0x80 to 0xfe) and sends, in the MMC * a Connect Ack Information Element (ConnAck IE). * * So now the device now has a WUSB address. From now on, we use * that to talk to it in the RPipes. * * ASSUMPTIONS: * * - We use the the as device address the port number where it is * connected (port 0 doesn't exist). For unauth, it is 128 + that. * * ROADMAP: * * This file contains the logic for doing that--entry points: * * wusb_devconnect_ack() Ack a device until _acked() called. * Called by notif.c:wusb_handle_dn_connect() * when a DN_Connect is received. * * wusb_devconnect_acked() Ack done, release resources. * * wusb_handle_dn_alive() Called by notif.c:wusb_handle_dn() * for processing a DN_Alive pong from a device. * * wusb_handle_dn_disconnect()Called by notif.c:wusb_handle_dn() to * process a disconenct request from a * device. * * __wusb_dev_disable() Called by rh.c:wusbhc_rh_clear_port_feat() when * disabling a port. * * wusb_devconnect_create() Called when creating the host by * lc.c:wusbhc_create(). * * wusb_devconnect_destroy() Cleanup called removing the host. Called * by lc.c:wusbhc_destroy(). * * Each Wireless USB host maintains a list of DN_Connect requests * (actually we maintain a list of pending Connect Acks, the * wusbhc->ca_list). * * LIFE CYCLE OF port->wusb_dev * * Before the @wusbhc structure put()s the reference it owns for * port->wusb_dev [and clean the wusb_dev pointer], it needs to * lock @wusbhc->mutex. */ #include <linux/jiffies.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/export.h> #include "wusbhc.h" static void wusbhc_devconnect_acked_work(struct work_struct *work); static void wusb_dev_free(struct wusb_dev *wusb_dev) { if (wusb_dev) { kfree(wusb_dev->set_gtk_req); usb_free_urb(wusb_dev->set_gtk_urb); kfree(wusb_dev); } } static struct wusb_dev *wusb_dev_alloc(struct wusbhc *wusbhc) { struct wusb_dev *wusb_dev; struct urb *urb; struct usb_ctrlrequest *req; wusb_dev = kzalloc(sizeof(*wusb_dev), GFP_KERNEL); if (wusb_dev == NULL) goto err; wusb_dev->wusbhc = wusbhc; INIT_WORK(&wusb_dev->devconnect_acked_work, wusbhc_devconnect_acked_work); urb = usb_alloc_urb(0, GFP_KERNEL); if (urb == NULL) goto err; wusb_dev->set_gtk_urb = urb; req = kmalloc(sizeof(*req), GFP_KERNEL); if (req == NULL) goto err; wusb_dev->set_gtk_req = req; req->bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; req->bRequest = USB_REQ_SET_DESCRIPTOR; req->wValue = cpu_to_le16(USB_DT_KEY << 8 | wusbhc->gtk_index); req->wIndex = 0; req->wLength = cpu_to_le16(wusbhc->gtk.descr.bLength); return wusb_dev; err: wusb_dev_free(wusb_dev); return NULL; } /* * Using the Connect-Ack list, fill out the @wusbhc Connect-Ack WUSB IE * properly so that it can be added to the MMC. * * We just get the @wusbhc->ca_list and fill out the first four ones or * less (per-spec WUSB1.0[7.5, before T7-38). If the ConnectAck WUSB * IE is not allocated, we alloc it. * * @wusbhc->mutex must be taken */ static void wusbhc_fill_cack_ie(struct wusbhc *wusbhc) { unsigned cnt; struct wusb_dev *dev_itr; struct wuie_connect_ack *cack_ie; cack_ie = &wusbhc->cack_ie; cnt = 0; list_for_each_entry(dev_itr, &wusbhc->cack_list, cack_node) { cack_ie->blk[cnt].CDID = dev_itr->cdid; cack_ie->blk[cnt].bDeviceAddress = dev_itr->addr; if (++cnt >= WUIE_ELT_MAX) break; } cack_ie->hdr.bLength = sizeof(cack_ie->hdr) + cnt * sizeof(cack_ie->blk[0]); } /* * Register a new device that wants to connect * * A new device wants to connect, so we add it to the Connect-Ack * list. We give it an address in the unauthorized range (bit 8 set); * user space will have to drive authorization further on. * * @dev_addr: address to use for the device (which is also the port * number). * * @wusbhc->mutex must be taken */ static struct wusb_dev *wusbhc_cack_add(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, const char *pr_cdid, u8 port_idx) { struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; int new_connection = wusb_dn_connect_new_connection(dnc); u8 dev_addr; int result; /* Is it registered already? */ list_for_each_entry(wusb_dev, &wusbhc->cack_list, cack_node) if (!memcmp(&wusb_dev->cdid, &dnc->CDID, sizeof(wusb_dev->cdid))) return wusb_dev; /* We don't have it, create an entry, register it */ wusb_dev = wusb_dev_alloc(wusbhc); if (wusb_dev == NULL) return NULL; wusb_dev_init(wusb_dev); wusb_dev->cdid = dnc->CDID; wusb_dev->port_idx = port_idx; /* * Devices are always available within the cluster reservation * and since the hardware will take the intersection of the * per-device availability and the cluster reservation, the * per-device availability can simply be set to always * available. */ bitmap_fill(wusb_dev->availability.bm, UWB_NUM_MAS); /* FIXME: handle reconnects instead of assuming connects are always new. */ if (1 && new_connection == 0) new_connection = 1; if (new_connection) { dev_addr = (port_idx + 2) | WUSB_DEV_ADDR_UNAUTH; dev_info(dev, "Connecting new WUSB device to address %u, " "port %u\n", dev_addr, port_idx); result = wusb_set_dev_addr(wusbhc, wusb_dev, dev_addr); if (result < 0) return NULL; } wusb_dev->entry_ts = jiffies; list_add_tail(&wusb_dev->cack_node, &wusbhc->cack_list); wusbhc->cack_count++; wusbhc_fill_cack_ie(wusbhc); return wusb_dev; } /* * Remove a Connect-Ack context entry from the HCs view * * @wusbhc->mutex must be taken */ static void wusbhc_cack_rm(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { list_del_init(&wusb_dev->cack_node); wusbhc->cack_count--; wusbhc_fill_cack_ie(wusbhc); } /* * @wusbhc->mutex must be taken */ static void wusbhc_devconnect_acked(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { wusbhc_cack_rm(wusbhc, wusb_dev); if (wusbhc->cack_count) wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); else wusbhc_mmcie_rm(wusbhc, &wusbhc->cack_ie.hdr); } static void wusbhc_devconnect_acked_work(struct work_struct *work) { struct wusb_dev *wusb_dev = container_of(work, struct wusb_dev, devconnect_acked_work); struct wusbhc *wusbhc = wusb_dev->wusbhc; mutex_lock(&wusbhc->mutex); wusbhc_devconnect_acked(wusbhc, wusb_dev); mutex_unlock(&wusbhc->mutex); wusb_dev_put(wusb_dev); } /* * Ack a device for connection * * FIXME: docs * * @pr_cdid: Printable CDID...hex Use @dnc->cdid for the real deal. * * So we get the connect ack IE (may have been allocated already), * find an empty connect block, an empty virtual port, create an * address with it (see below), make it an unauth addr [bit 7 set] and * set the MMC. * * Addresses: because WUSB hosts have no downstream hubs, we can do a * 1:1 mapping between 'port number' and device * address. This simplifies many things, as during this * initial connect phase the USB stack has no knoledge of * the device and hasn't assigned an address yet--we know * USB's choose_address() will use the same euristics we * use here, so we can assume which address will be assigned. * * USB stack always assigns address 1 to the root hub, so * to the port number we add 2 (thus virtual port #0 is * addr #2). * * @wusbhc shall be referenced */ static void wusbhc_devconnect_ack(struct wusbhc *wusbhc, struct wusb_dn_connect *dnc, const char *pr_cdid) { int result; struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; struct wusb_port *port; unsigned idx, devnum; mutex_lock(&wusbhc->mutex); /* Check we are not handling it already */ for (idx = 0; idx < wusbhc->ports_max; idx++) { port = wusb_port_by_idx(wusbhc, idx); if (port->wusb_dev && memcmp(&dnc->CDID, &port->wusb_dev->cdid, sizeof(dnc->CDID)) == 0) goto error_unlock; } /* Look up those fake ports we have for a free one */ for (idx = 0; idx < wusbhc->ports_max; idx++) { port = wusb_port_by_idx(wusbhc, idx); if ((port->status & USB_PORT_STAT_POWER) && !(port->status & USB_PORT_STAT_CONNECTION)) break; } if (idx >= wusbhc->ports_max) { dev_err(dev, "Host controller can't connect more devices " "(%u already connected); device %s rejected\n", wusbhc->ports_max, pr_cdid); /* NOTE: we could send a WUIE_Disconnect here, but we haven't * event acked, so the device will eventually timeout the * connection, right? */ goto error_unlock; } devnum = idx + 2; /* Make sure we are using no crypto on that "virtual port" */ wusbhc->set_ptk(wusbhc, idx, 0, NULL, 0); /* Grab a filled in Connect-Ack context, fill out the * Connect-Ack Wireless USB IE, set the MMC */ wusb_dev = wusbhc_cack_add(wusbhc, dnc, pr_cdid, idx); if (wusb_dev == NULL) goto error_unlock; result = wusbhc_mmcie_set(wusbhc, 0, 0, &wusbhc->cack_ie.hdr); if (result < 0) goto error_unlock; /* Give the device at least 2ms (WUSB1.0[7.5.1p3]), let's do * three for a good measure */ msleep(3); port->wusb_dev = wusb_dev; port->status |= USB_PORT_STAT_CONNECTION; port->change |= USB_PORT_STAT_C_CONNECTION; /* Now the port status changed to connected; khubd will * pick the change up and try to reset the port to bring it to * the enabled state--so this process returns up to the stack * and it calls back into wusbhc_rh_port_reset(). */ error_unlock: mutex_unlock(&wusbhc->mutex); return; } /* * Disconnect a Wireless USB device from its fake port * * Marks the port as disconnected so that khubd can pick up the change * and drops our knowledge about the device. * * Assumes there is a device connected * * @port_index: zero based port number * * NOTE: @wusbhc->mutex is locked * * WARNING: From here it is not very safe to access anything hanging off * wusb_dev */ static void __wusbhc_dev_disconnect(struct wusbhc *wusbhc, struct wusb_port *port) { struct wusb_dev *wusb_dev = port->wusb_dev; port->status &= ~(USB_PORT_STAT_CONNECTION | USB_PORT_STAT_ENABLE | USB_PORT_STAT_SUSPEND | USB_PORT_STAT_RESET | USB_PORT_STAT_LOW_SPEED | USB_PORT_STAT_HIGH_SPEED); port->change |= USB_PORT_STAT_C_CONNECTION | USB_PORT_STAT_C_ENABLE; if (wusb_dev) { dev_dbg(wusbhc->dev, "disconnecting device from port %d\n", wusb_dev->port_idx); if (!list_empty(&wusb_dev->cack_node)) list_del_init(&wusb_dev->cack_node); /* For the one in cack_add() */ wusb_dev_put(wusb_dev); } port->wusb_dev = NULL; /* After a device disconnects, change the GTK (see [WUSB] * section 6.2.11.2). */ if (wusbhc->active) wusbhc_gtk_rekey(wusbhc); /* The Wireless USB part has forgotten about the device already; now * khubd's timer will pick up the disconnection and remove the USB * device from the system */ } /* * Refresh the list of keep alives to emit in the MMC * * Some devices don't respond to keep alives unless they've been * authenticated, so skip unauthenticated devices. * * We only publish the first four devices that have a coming timeout * condition. Then when we are done processing those, we go for the * next ones. We ignore the ones that have timed out already (they'll * be purged). * * This might cause the first devices to timeout the last devices in * the port array...FIXME: come up with a better algorithm? * * Note we can't do much about MMC's ops errors; we hope next refresh * will kind of handle it. * * NOTE: @wusbhc->mutex is locked */ static void __wusbhc_keep_alive(struct wusbhc *wusbhc) { struct device *dev = wusbhc->dev; unsigned cnt; struct wusb_dev *wusb_dev; struct wusb_port *wusb_port; struct wuie_keep_alive *ie = &wusbhc->keep_alive_ie; unsigned keep_alives, old_keep_alives; old_keep_alives = ie->hdr.bLength - sizeof(ie->hdr); keep_alives = 0; for (cnt = 0; keep_alives < WUIE_ELT_MAX && cnt < wusbhc->ports_max; cnt++) { unsigned tt = msecs_to_jiffies(wusbhc->trust_timeout); wusb_port = wusb_port_by_idx(wusbhc, cnt); wusb_dev = wusb_port->wusb_dev; if (wusb_dev == NULL) continue; if (wusb_dev->usb_dev == NULL || !wusb_dev->usb_dev->authenticated) continue; if (time_after(jiffies, wusb_dev->entry_ts + tt)) { dev_err(dev, "KEEPALIVE: device %u timed out\n", wusb_dev->addr); __wusbhc_dev_disconnect(wusbhc, wusb_port); } else if (time_after(jiffies, wusb_dev->entry_ts + tt/2)) { /* Approaching timeout cut out, need to refresh */ ie->bDeviceAddress[keep_alives++] = wusb_dev->addr; } } if (keep_alives & 0x1) /* pad to even number ([WUSB] section 7.5.9) */ ie->bDeviceAddress[keep_alives++] = 0x7f; ie->hdr.bLength = sizeof(ie->hdr) + keep_alives*sizeof(ie->bDeviceAddress[0]); if (keep_alives > 0) wusbhc_mmcie_set(wusbhc, 10, 5, &ie->hdr); else if (old_keep_alives != 0) wusbhc_mmcie_rm(wusbhc, &ie->hdr); } /* * Do a run through all devices checking for timeouts */ static void wusbhc_keep_alive_run(struct work_struct *ws) { struct delayed_work *dw = to_delayed_work(ws); struct wusbhc *wusbhc = container_of(dw, struct wusbhc, keep_alive_timer); mutex_lock(&wusbhc->mutex); __wusbhc_keep_alive(wusbhc); mutex_unlock(&wusbhc->mutex); queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, msecs_to_jiffies(wusbhc->trust_timeout / 2)); } /* * Find the wusb_dev from its device address. * * The device can be found directly from the address (see * wusb_cack_add() for where the device address is set to port_idx * +2), except when the address is zero. */ static struct wusb_dev *wusbhc_find_dev_by_addr(struct wusbhc *wusbhc, u8 addr) { int p; if (addr == 0xff) /* unconnected */ return NULL; if (addr > 0) { int port = (addr & ~0x80) - 2; if (port < 0 || port >= wusbhc->ports_max) return NULL; return wusb_port_by_idx(wusbhc, port)->wusb_dev; } /* Look for the device with address 0. */ for (p = 0; p < wusbhc->ports_max; p++) { struct wusb_dev *wusb_dev = wusb_port_by_idx(wusbhc, p)->wusb_dev; if (wusb_dev && wusb_dev->addr == addr) return wusb_dev; } return NULL; } /* * Handle a DN_Alive notification (WUSB1.0[7.6.1]) * * This just updates the device activity timestamp and then refreshes * the keep alive IE. * * @wusbhc shall be referenced and unlocked */ static void wusbhc_handle_dn_alive(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { mutex_lock(&wusbhc->mutex); wusb_dev->entry_ts = jiffies; __wusbhc_keep_alive(wusbhc); mutex_unlock(&wusbhc->mutex); } /* * Handle a DN_Connect notification (WUSB1.0[7.6.1]) * * @wusbhc * @pkt_hdr * @size: Size of the buffer where the notification resides; if the * notification data suggests there should be more data than * available, an error will be signaled and the whole buffer * consumed. * * @wusbhc->mutex shall be held */ static void wusbhc_handle_dn_connect(struct wusbhc *wusbhc, struct wusb_dn_hdr *dn_hdr, size_t size) { struct device *dev = wusbhc->dev; struct wusb_dn_connect *dnc; char pr_cdid[WUSB_CKHDID_STRSIZE]; static const char *beacon_behaviour[] = { "reserved", "self-beacon", "directed-beacon", "no-beacon" }; if (size < sizeof(*dnc)) { dev_err(dev, "DN CONNECT: short notification (%zu < %zu)\n", size, sizeof(*dnc)); return; } dnc = container_of(dn_hdr, struct wusb_dn_connect, hdr); ckhdid_printf(pr_cdid, sizeof(pr_cdid), &dnc->CDID); dev_info(dev, "DN CONNECT: device %s @ %x (%s) wants to %s\n", pr_cdid, wusb_dn_connect_prev_dev_addr(dnc), beacon_behaviour[wusb_dn_connect_beacon_behavior(dnc)], wusb_dn_connect_new_connection(dnc) ? "connect" : "reconnect"); /* ACK the connect */ wusbhc_devconnect_ack(wusbhc, dnc, pr_cdid); } /* * Handle a DN_Disconnect notification (WUSB1.0[7.6.1]) * * Device is going down -- do the disconnect. * * @wusbhc shall be referenced and unlocked */ static void wusbhc_handle_dn_disconnect(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev) { struct device *dev = wusbhc->dev; dev_info(dev, "DN DISCONNECT: device 0x%02x going down\n", wusb_dev->addr); mutex_lock(&wusbhc->mutex); __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, wusb_dev->port_idx)); mutex_unlock(&wusbhc->mutex); } /* * Handle a Device Notification coming a host * * The Device Notification comes from a host (HWA, DWA or WHCI) * wrapped in a set of headers. Somebody else has peeled off those * headers for us and we just get one Device Notifications. * * Invalid DNs (e.g., too short) are discarded. * * @wusbhc shall be referenced * * FIXMES: * - implement priorities as in WUSB1.0[Table 7-55]? */ void wusbhc_handle_dn(struct wusbhc *wusbhc, u8 srcaddr, struct wusb_dn_hdr *dn_hdr, size_t size) { struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; if (size < sizeof(struct wusb_dn_hdr)) { dev_err(dev, "DN data shorter than DN header (%d < %d)\n", (int)size, (int)sizeof(struct wusb_dn_hdr)); return; } wusb_dev = wusbhc_find_dev_by_addr(wusbhc, srcaddr); if (wusb_dev == NULL && dn_hdr->bType != WUSB_DN_CONNECT) { dev_dbg(dev, "ignoring DN %d from unconnected device %02x\n", dn_hdr->bType, srcaddr); return; } switch (dn_hdr->bType) { case WUSB_DN_CONNECT: wusbhc_handle_dn_connect(wusbhc, dn_hdr, size); break; case WUSB_DN_ALIVE: wusbhc_handle_dn_alive(wusbhc, wusb_dev); break; case WUSB_DN_DISCONNECT: wusbhc_handle_dn_disconnect(wusbhc, wusb_dev); break; case WUSB_DN_MASAVAILCHANGED: case WUSB_DN_RWAKE: case WUSB_DN_SLEEP: /* FIXME: handle these DNs. */ break; case WUSB_DN_EPRDY: /* The hardware handles these. */ break; default: dev_warn(dev, "unknown DN %u (%d octets) from %u\n", dn_hdr->bType, (int)size, srcaddr); } } EXPORT_SYMBOL_GPL(wusbhc_handle_dn); /* * Disconnect a WUSB device from a the cluster * * @wusbhc * @port Fake port where the device is (wusbhc index, not USB port number). * * In Wireless USB, a disconnect is basically telling the device he is * being disconnected and forgetting about him. * * We send the device a Device Disconnect IE (WUSB1.0[7.5.11]) for 100 * ms and then keep going. * * We don't do much in case of error; we always pretend we disabled * the port and disconnected the device. If physically the request * didn't get there (many things can fail in the way there), the stack * will reject the device's communication attempts. * * @wusbhc should be refcounted and locked */ void __wusbhc_dev_disable(struct wusbhc *wusbhc, u8 port_idx) { int result; struct device *dev = wusbhc->dev; struct wusb_dev *wusb_dev; struct wuie_disconnect *ie; wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; if (wusb_dev == NULL) { /* reset no device? ignore */ dev_dbg(dev, "DISCONNECT: no device at port %u, ignoring\n", port_idx); return; } __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); ie = kzalloc(sizeof(*ie), GFP_KERNEL); if (ie == NULL) return; ie->hdr.bLength = sizeof(*ie); ie->hdr.bIEIdentifier = WUIE_ID_DEVICE_DISCONNECT; ie->bDeviceAddress = wusb_dev->addr; result = wusbhc_mmcie_set(wusbhc, 0, 0, &ie->hdr); if (result < 0) dev_err(dev, "DISCONNECT: can't set MMC: %d\n", result); else { /* At least 6 MMCs, assuming at least 1 MMC per zone. */ msleep(7*4); wusbhc_mmcie_rm(wusbhc, &ie->hdr); } kfree(ie); } /* * Walk over the BOS descriptor, verify and grok it * * @usb_dev: referenced * @wusb_dev: referenced and unlocked * * The BOS descriptor is defined at WUSB1.0[7.4.1], and it defines a * "flexible" way to wrap all kinds of descriptors inside an standard * descriptor (wonder why they didn't use normal descriptors, * btw). Not like they lack code. * * At the end we go to look for the WUSB Device Capabilities * (WUSB1.0[7.4.1.1]) that is wrapped in a device capability descriptor * that is part of the BOS descriptor set. That tells us what does the * device support (dual role, beacon type, UWB PHY rates). */ static int wusb_dev_bos_grok(struct usb_device *usb_dev, struct wusb_dev *wusb_dev, struct usb_bos_descriptor *bos, size_t desc_size) { ssize_t result; struct device *dev = &usb_dev->dev; void *itr, *top; /* Walk over BOS capabilities, verify them */ itr = (void *)bos + sizeof(*bos); top = itr + desc_size - sizeof(*bos); while (itr < top) { struct usb_dev_cap_header *cap_hdr = itr; size_t cap_size; u8 cap_type; if (top - itr < sizeof(*cap_hdr)) { dev_err(dev, "Device BUG? premature end of BOS header " "data [offset 0x%02x]: only %zu bytes left\n", (int)(itr - (void *)bos), top - itr); result = -ENOSPC; goto error_bad_cap; } cap_size = cap_hdr->bLength; cap_type = cap_hdr->bDevCapabilityType; if (cap_size == 0) break; if (cap_size > top - itr) { dev_err(dev, "Device BUG? premature end of BOS data " "[offset 0x%02x cap %02x %zu bytes]: " "only %zu bytes left\n", (int)(itr - (void *)bos), cap_type, cap_size, top - itr); result = -EBADF; goto error_bad_cap; } switch (cap_type) { case USB_CAP_TYPE_WIRELESS_USB: if (cap_size != sizeof(*wusb_dev->wusb_cap_descr)) dev_err(dev, "Device BUG? WUSB Capability " "descriptor is %zu bytes vs %zu " "needed\n", cap_size, sizeof(*wusb_dev->wusb_cap_descr)); else wusb_dev->wusb_cap_descr = itr; break; default: dev_err(dev, "BUG? Unknown BOS capability 0x%02x " "(%zu bytes) at offset 0x%02x\n", cap_type, cap_size, (int)(itr - (void *)bos)); } itr += cap_size; } result = 0; error_bad_cap: return result; } /* * Add information from the BOS descriptors to the device * * @usb_dev: referenced * @wusb_dev: referenced and unlocked * * So what we do is we alloc a space for the BOS descriptor of 64 * bytes; read the first four bytes which include the wTotalLength * field (WUSB1.0[T7-26]) and if it fits in those 64 bytes, read the * whole thing. If not we realloc to that size. * * Then we call the groking function, that will fill up * wusb_dev->wusb_cap_descr, which is what we'll need later on. */ static int wusb_dev_bos_add(struct usb_device *usb_dev, struct wusb_dev *wusb_dev) { ssize_t result; struct device *dev = &usb_dev->dev; struct usb_bos_descriptor *bos; size_t alloc_size = 32, desc_size = 4; bos = kmalloc(alloc_size, GFP_KERNEL); if (bos == NULL) return -ENOMEM; result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); if (result < 4) { dev_err(dev, "Can't get BOS descriptor or too short: %zd\n", result); goto error_get_descriptor; } desc_size = le16_to_cpu(bos->wTotalLength); if (desc_size >= alloc_size) { kfree(bos); alloc_size = desc_size; bos = kmalloc(alloc_size, GFP_KERNEL); if (bos == NULL) return -ENOMEM; } result = usb_get_descriptor(usb_dev, USB_DT_BOS, 0, bos, desc_size); if (result < 0 || result != desc_size) { dev_err(dev, "Can't get BOS descriptor or too short (need " "%zu bytes): %zd\n", desc_size, result); goto error_get_descriptor; } if (result < sizeof(*bos) || le16_to_cpu(bos->wTotalLength) != desc_size) { dev_err(dev, "Can't get BOS descriptor or too short (need " "%zu bytes): %zd\n", desc_size, result); goto error_get_descriptor; } result = wusb_dev_bos_grok(usb_dev, wusb_dev, bos, result); if (result < 0) goto error_bad_bos; wusb_dev->bos = bos; return 0; error_bad_bos: error_get_descriptor: kfree(bos); wusb_dev->wusb_cap_descr = NULL; return result; } static void wusb_dev_bos_rm(struct wusb_dev *wusb_dev) { kfree(wusb_dev->bos); wusb_dev->wusb_cap_descr = NULL; }; /* * USB stack's device addition Notifier Callback * * Called from drivers/usb/core/hub.c when a new device is added; we * use this hook to perform certain WUSB specific setup work on the * new device. As well, it is the first time we can connect the * wusb_dev and the usb_dev. So we note it down in wusb_dev and take a * reference that we'll drop. * * First we need to determine if the device is a WUSB device (else we * ignore it). For that we use the speed setting (USB_SPEED_WIRELESS) * [FIXME: maybe we'd need something more definitive]. If so, we track * it's usb_busd and from there, the WUSB HC. * * Because all WUSB HCs are contained in a 'struct wusbhc', voila, we * get the wusbhc for the device. * * We have a reference on @usb_dev (as we are called at the end of its * enumeration). * * NOTE: @usb_dev locked */ static void wusb_dev_add_ncb(struct usb_device *usb_dev) { int result = 0; struct wusb_dev *wusb_dev; struct wusbhc *wusbhc; struct device *dev = &usb_dev->dev; u8 port_idx; if (usb_dev->wusb == 0 || usb_dev->devnum == 1) return; /* skip non wusb and wusb RHs */ usb_set_device_state(usb_dev, USB_STATE_UNAUTHENTICATED); wusbhc = wusbhc_get_by_usb_dev(usb_dev); if (wusbhc == NULL) goto error_nodev; mutex_lock(&wusbhc->mutex); wusb_dev = __wusb_dev_get_by_usb_dev(wusbhc, usb_dev); port_idx = wusb_port_no_to_idx(usb_dev->portnum); mutex_unlock(&wusbhc->mutex); if (wusb_dev == NULL) goto error_nodev; wusb_dev->usb_dev = usb_get_dev(usb_dev); usb_dev->wusb_dev = wusb_dev_get(wusb_dev); result = wusb_dev_sec_add(wusbhc, usb_dev, wusb_dev); if (result < 0) { dev_err(dev, "Cannot enable security: %d\n", result); goto error_sec_add; } /* Now query the device for it's BOS and attach it to wusb_dev */ result = wusb_dev_bos_add(usb_dev, wusb_dev); if (result < 0) { dev_err(dev, "Cannot get BOS descriptors: %d\n", result); goto error_bos_add; } result = wusb_dev_sysfs_add(wusbhc, usb_dev, wusb_dev); if (result < 0) goto error_add_sysfs; out: wusb_dev_put(wusb_dev); wusbhc_put(wusbhc); error_nodev: return; wusb_dev_sysfs_rm(wusb_dev); error_add_sysfs: wusb_dev_bos_rm(wusb_dev); error_bos_add: wusb_dev_sec_rm(wusb_dev); error_sec_add: mutex_lock(&wusbhc->mutex); __wusbhc_dev_disconnect(wusbhc, wusb_port_by_idx(wusbhc, port_idx)); mutex_unlock(&wusbhc->mutex); goto out; } /* * Undo all the steps done at connection by the notifier callback * * NOTE: @usb_dev locked */ static void wusb_dev_rm_ncb(struct usb_device *usb_dev) { struct wusb_dev *wusb_dev = usb_dev->wusb_dev; if (usb_dev->wusb == 0 || usb_dev->devnum == 1) return; /* skip non wusb and wusb RHs */ wusb_dev_sysfs_rm(wusb_dev); wusb_dev_bos_rm(wusb_dev); wusb_dev_sec_rm(wusb_dev); wusb_dev->usb_dev = NULL; usb_dev->wusb_dev = NULL; wusb_dev_put(wusb_dev); usb_put_dev(usb_dev); } /* * Handle notifications from the USB stack (notifier call back) * * This is called when the USB stack does a * usb_{bus,device}_{add,remove}() so we can do WUSB specific * handling. It is called with [for the case of * USB_DEVICE_{ADD,REMOVE} with the usb_dev locked. */ int wusb_usb_ncb(struct notifier_block *nb, unsigned long val, void *priv) { int result = NOTIFY_OK; switch (val) { case USB_DEVICE_ADD: wusb_dev_add_ncb(priv); break; case USB_DEVICE_REMOVE: wusb_dev_rm_ncb(priv); break; case USB_BUS_ADD: /* ignore (for now) */ case USB_BUS_REMOVE: break; default: WARN_ON(1); result = NOTIFY_BAD; }; return result; } /* * Return a referenced wusb_dev given a @wusbhc and @usb_dev */ struct wusb_dev *__wusb_dev_get_by_usb_dev(struct wusbhc *wusbhc, struct usb_device *usb_dev) { struct wusb_dev *wusb_dev; u8 port_idx; port_idx = wusb_port_no_to_idx(usb_dev->portnum); BUG_ON(port_idx > wusbhc->ports_max); wusb_dev = wusb_port_by_idx(wusbhc, port_idx)->wusb_dev; if (wusb_dev != NULL) /* ops, device is gone */ wusb_dev_get(wusb_dev); return wusb_dev; } EXPORT_SYMBOL_GPL(__wusb_dev_get_by_usb_dev); void wusb_dev_destroy(struct kref *_wusb_dev) { struct wusb_dev *wusb_dev = container_of(_wusb_dev, struct wusb_dev, refcnt); list_del_init(&wusb_dev->cack_node); wusb_dev_free(wusb_dev); } EXPORT_SYMBOL_GPL(wusb_dev_destroy); /* * Create all the device connect handling infrastructure * * This is basically the device info array, Connect Acknowledgement * (cack) lists, keep-alive timers (and delayed work thread). */ int wusbhc_devconnect_create(struct wusbhc *wusbhc) { wusbhc->keep_alive_ie.hdr.bIEIdentifier = WUIE_ID_KEEP_ALIVE; wusbhc->keep_alive_ie.hdr.bLength = sizeof(wusbhc->keep_alive_ie.hdr); INIT_DELAYED_WORK(&wusbhc->keep_alive_timer, wusbhc_keep_alive_run); wusbhc->cack_ie.hdr.bIEIdentifier = WUIE_ID_CONNECTACK; wusbhc->cack_ie.hdr.bLength = sizeof(wusbhc->cack_ie.hdr); INIT_LIST_HEAD(&wusbhc->cack_list); return 0; } /* * Release all resources taken by the devconnect stuff */ void wusbhc_devconnect_destroy(struct wusbhc *wusbhc) { /* no op */ } /* * wusbhc_devconnect_start - start accepting device connections * @wusbhc: the WUSB HC * * Sets the Host Info IE to accept all new connections. * * FIXME: This also enables the keep alives but this is not necessary * until there are connected and authenticated devices. */ int wusbhc_devconnect_start(struct wusbhc *wusbhc) { struct device *dev = wusbhc->dev; struct wuie_host_info *hi; int result; hi = kzalloc(sizeof(*hi), GFP_KERNEL); if (hi == NULL) return -ENOMEM; hi->hdr.bLength = sizeof(*hi); hi->hdr.bIEIdentifier = WUIE_ID_HOST_INFO; hi->attributes = cpu_to_le16((wusbhc->rsv->stream << 3) | WUIE_HI_CAP_ALL); hi->CHID = wusbhc->chid; result = wusbhc_mmcie_set(wusbhc, 0, 0, &hi->hdr); if (result < 0) { dev_err(dev, "Cannot add Host Info MMCIE: %d\n", result); goto error_mmcie_set; } wusbhc->wuie_host_info = hi; queue_delayed_work(wusbd, &wusbhc->keep_alive_timer, (wusbhc->trust_timeout*CONFIG_HZ)/1000/2); return 0; error_mmcie_set: kfree(hi); return result; } /* * wusbhc_devconnect_stop - stop managing connected devices * @wusbhc: the WUSB HC * * Disconnects any devices still connected, stops the keep alives and * removes the Host Info IE. */ void wusbhc_devconnect_stop(struct wusbhc *wusbhc) { int i; mutex_lock(&wusbhc->mutex); for (i = 0; i < wusbhc->ports_max; i++) { if (wusbhc->port[i].wusb_dev) __wusbhc_dev_disconnect(wusbhc, &wusbhc->port[i]); } mutex_unlock(&wusbhc->mutex); cancel_delayed_work_sync(&wusbhc->keep_alive_timer); wusbhc_mmcie_rm(wusbhc, &wusbhc->wuie_host_info->hdr); kfree(wusbhc->wuie_host_info); wusbhc->wuie_host_info = NULL; } /* * wusb_set_dev_addr - set the WUSB device address used by the host * @wusbhc: the WUSB HC the device is connect to * @wusb_dev: the WUSB device * @addr: new device address */ int wusb_set_dev_addr(struct wusbhc *wusbhc, struct wusb_dev *wusb_dev, u8 addr) { int result; wusb_dev->addr = addr; result = wusbhc->dev_info_set(wusbhc, wusb_dev); if (result < 0) dev_err(wusbhc->dev, "device %d: failed to set device " "address\n", wusb_dev->port_idx); else dev_info(wusbhc->dev, "device %d: %s addr %u\n", wusb_dev->port_idx, (addr & WUSB_DEV_ADDR_UNAUTH) ? "unauth" : "auth", wusb_dev->addr); return result; }
gpl-2.0
muftiarfan/DWI_xm
sound/pci/hda/patch_ca0132.c
3274
28340
/* * HD audio interface patch for Creative CA0132 chip * * Copyright (c) 2011, Creative Technology Ltd. * * Based on patch_ca0110.c * Copyright (c) 2008 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/mutex.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" #define WIDGET_CHIP_CTRL 0x15 #define WIDGET_DSP_CTRL 0x16 #define WUH_MEM_CONNID 10 #define DSP_MEM_CONNID 16 enum hda_cmd_vendor_io { /* for DspIO node */ VENDOR_DSPIO_SCP_WRITE_DATA_LOW = 0x000, VENDOR_DSPIO_SCP_WRITE_DATA_HIGH = 0x100, VENDOR_DSPIO_STATUS = 0xF01, VENDOR_DSPIO_SCP_POST_READ_DATA = 0x702, VENDOR_DSPIO_SCP_READ_DATA = 0xF02, VENDOR_DSPIO_DSP_INIT = 0x703, VENDOR_DSPIO_SCP_POST_COUNT_QUERY = 0x704, VENDOR_DSPIO_SCP_READ_COUNT = 0xF04, /* for ChipIO node */ VENDOR_CHIPIO_ADDRESS_LOW = 0x000, VENDOR_CHIPIO_ADDRESS_HIGH = 0x100, VENDOR_CHIPIO_STREAM_FORMAT = 0x200, VENDOR_CHIPIO_DATA_LOW = 0x300, VENDOR_CHIPIO_DATA_HIGH = 0x400, VENDOR_CHIPIO_GET_PARAMETER = 0xF00, VENDOR_CHIPIO_STATUS = 0xF01, VENDOR_CHIPIO_HIC_POST_READ = 0x702, VENDOR_CHIPIO_HIC_READ_DATA = 0xF03, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE = 0x70A, VENDOR_CHIPIO_PLL_PMU_WRITE = 0x70C, VENDOR_CHIPIO_PLL_PMU_READ = 0xF0C, VENDOR_CHIPIO_8051_ADDRESS_LOW = 0x70D, VENDOR_CHIPIO_8051_ADDRESS_HIGH = 0x70E, VENDOR_CHIPIO_FLAG_SET = 0x70F, VENDOR_CHIPIO_FLAGS_GET = 0xF0F, VENDOR_CHIPIO_PARAMETER_SET = 0x710, VENDOR_CHIPIO_PARAMETER_GET = 0xF10, VENDOR_CHIPIO_PORT_ALLOC_CONFIG_SET = 0x711, VENDOR_CHIPIO_PORT_ALLOC_SET = 0x712, VENDOR_CHIPIO_PORT_ALLOC_GET = 0xF12, VENDOR_CHIPIO_PORT_FREE_SET = 0x713, VENDOR_CHIPIO_PARAMETER_EX_ID_GET = 0xF17, VENDOR_CHIPIO_PARAMETER_EX_ID_SET = 0x717, VENDOR_CHIPIO_PARAMETER_EX_VALUE_GET = 0xF18, VENDOR_CHIPIO_PARAMETER_EX_VALUE_SET = 0x718 }; /* * Control flag IDs */ enum control_flag_id { /* Connection manager stream setup is bypassed/enabled */ CONTROL_FLAG_C_MGR = 0, /* DSP DMA is bypassed/enabled */ CONTROL_FLAG_DMA = 1, /* 8051 'idle' mode is disabled/enabled */ CONTROL_FLAG_IDLE_ENABLE = 2, /* Tracker for the SPDIF-in path is bypassed/enabled */ CONTROL_FLAG_TRACKER = 3, /* DigitalOut to Spdif2Out connection is disabled/enabled */ CONTROL_FLAG_SPDIF2OUT = 4, /* Digital Microphone is disabled/enabled */ CONTROL_FLAG_DMIC = 5, /* ADC_B rate is 48 kHz/96 kHz */ CONTROL_FLAG_ADC_B_96KHZ = 6, /* ADC_C rate is 48 kHz/96 kHz */ CONTROL_FLAG_ADC_C_96KHZ = 7, /* DAC rate is 48 kHz/96 kHz (affects all DACs) */ CONTROL_FLAG_DAC_96KHZ = 8, /* DSP rate is 48 kHz/96 kHz */ CONTROL_FLAG_DSP_96KHZ = 9, /* SRC clock is 98 MHz/196 MHz (196 MHz forces rate to 96 KHz) */ CONTROL_FLAG_SRC_CLOCK_196MHZ = 10, /* SRC rate is 48 kHz/96 kHz (48 kHz disabled when clock is 196 MHz) */ CONTROL_FLAG_SRC_RATE_96KHZ = 11, /* Decode Loop (DSP->SRC->DSP) is disabled/enabled */ CONTROL_FLAG_DECODE_LOOP = 12, /* De-emphasis filter on DAC-1 disabled/enabled */ CONTROL_FLAG_DAC1_DEEMPHASIS = 13, /* De-emphasis filter on DAC-2 disabled/enabled */ CONTROL_FLAG_DAC2_DEEMPHASIS = 14, /* De-emphasis filter on DAC-3 disabled/enabled */ CONTROL_FLAG_DAC3_DEEMPHASIS = 15, /* High-pass filter on ADC_B disabled/enabled */ CONTROL_FLAG_ADC_B_HIGH_PASS = 16, /* High-pass filter on ADC_C disabled/enabled */ CONTROL_FLAG_ADC_C_HIGH_PASS = 17, /* Common mode on Port_A disabled/enabled */ CONTROL_FLAG_PORT_A_COMMON_MODE = 18, /* Common mode on Port_D disabled/enabled */ CONTROL_FLAG_PORT_D_COMMON_MODE = 19, /* Impedance for ramp generator on Port_A 16 Ohm/10K Ohm */ CONTROL_FLAG_PORT_A_10KOHM_LOAD = 20, /* Impedance for ramp generator on Port_D, 16 Ohm/10K Ohm */ CONTROL_FLAG_PORT_D_10K0HM_LOAD = 21, /* ASI rate is 48kHz/96kHz */ CONTROL_FLAG_ASI_96KHZ = 22, /* DAC power settings able to control attached ports no/yes */ CONTROL_FLAG_DACS_CONTROL_PORTS = 23, /* Clock Stop OK reporting is disabled/enabled */ CONTROL_FLAG_CONTROL_STOP_OK_ENABLE = 24, /* Number of control flags */ CONTROL_FLAGS_MAX = (CONTROL_FLAG_CONTROL_STOP_OK_ENABLE+1) }; /* * Control parameter IDs */ enum control_parameter_id { /* 0: force HDA, 1: allow DSP if HDA Spdif1Out stream is idle */ CONTROL_PARAM_SPDIF1_SOURCE = 2, /* Stream Control */ /* Select stream with the given ID */ CONTROL_PARAM_STREAM_ID = 24, /* Source connection point for the selected stream */ CONTROL_PARAM_STREAM_SOURCE_CONN_POINT = 25, /* Destination connection point for the selected stream */ CONTROL_PARAM_STREAM_DEST_CONN_POINT = 26, /* Number of audio channels in the selected stream */ CONTROL_PARAM_STREAMS_CHANNELS = 27, /*Enable control for the selected stream */ CONTROL_PARAM_STREAM_CONTROL = 28, /* Connection Point Control */ /* Select connection point with the given ID */ CONTROL_PARAM_CONN_POINT_ID = 29, /* Connection point sample rate */ CONTROL_PARAM_CONN_POINT_SAMPLE_RATE = 30, /* Node Control */ /* Select HDA node with the given ID */ CONTROL_PARAM_NODE_ID = 31 }; /* * Dsp Io Status codes */ enum hda_vendor_status_dspio { /* Success */ VENDOR_STATUS_DSPIO_OK = 0x00, /* Busy, unable to accept new command, the host must retry */ VENDOR_STATUS_DSPIO_BUSY = 0x01, /* SCP command queue is full */ VENDOR_STATUS_DSPIO_SCP_COMMAND_QUEUE_FULL = 0x02, /* SCP response queue is empty */ VENDOR_STATUS_DSPIO_SCP_RESPONSE_QUEUE_EMPTY = 0x03 }; /* * Chip Io Status codes */ enum hda_vendor_status_chipio { /* Success */ VENDOR_STATUS_CHIPIO_OK = 0x00, /* Busy, unable to accept new command, the host must retry */ VENDOR_STATUS_CHIPIO_BUSY = 0x01 }; /* * CA0132 sample rate */ enum ca0132_sample_rate { SR_6_000 = 0x00, SR_8_000 = 0x01, SR_9_600 = 0x02, SR_11_025 = 0x03, SR_16_000 = 0x04, SR_22_050 = 0x05, SR_24_000 = 0x06, SR_32_000 = 0x07, SR_44_100 = 0x08, SR_48_000 = 0x09, SR_88_200 = 0x0A, SR_96_000 = 0x0B, SR_144_000 = 0x0C, SR_176_400 = 0x0D, SR_192_000 = 0x0E, SR_384_000 = 0x0F, SR_COUNT = 0x10, SR_RATE_UNKNOWN = 0x1F }; /* * Scp Helper function */ enum get_set { IS_SET = 0, IS_GET = 1, }; /* * Duplicated from ca0110 codec */ static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac) { if (pin) { snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP) snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); } if (dac) snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO); } static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc) { if (pin) { snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80); if (get_wcaps(codec, pin) & AC_WCAP_IN_AMP) snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } if (adc) snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } static char *dirstr[2] = { "Playback", "Capture" }; static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int chan, int dir) { char namestr[44]; int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int chan, int dir) { char namestr[44]; int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } #define add_out_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 0) #define add_out_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 0) #define add_in_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 1) #define add_in_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 1) #define add_mono_switch(codec, nid, pfx, chan) \ _add_switch(codec, nid, pfx, chan, 0) #define add_mono_volume(codec, nid, pfx, chan) \ _add_volume(codec, nid, pfx, chan, 0) #define add_in_mono_switch(codec, nid, pfx, chan) \ _add_switch(codec, nid, pfx, chan, 1) #define add_in_mono_volume(codec, nid, pfx, chan) \ _add_volume(codec, nid, pfx, chan, 1) /* * CA0132 specific */ struct ca0132_spec { struct auto_pin_cfg autocfg; struct hda_multi_out multiout; hda_nid_t out_pins[AUTO_CFG_MAX_OUTS]; hda_nid_t dacs[AUTO_CFG_MAX_OUTS]; hda_nid_t hp_dac; hda_nid_t input_pins[AUTO_PIN_LAST]; hda_nid_t adcs[AUTO_PIN_LAST]; hda_nid_t dig_out; hda_nid_t dig_in; unsigned int num_inputs; long curr_hp_switch; long curr_hp_volume[2]; long curr_speaker_switch; struct mutex chipio_mutex; const char *input_labels[AUTO_PIN_LAST]; struct hda_pcm pcm_rec[2]; /* PCM information */ }; /* Chip access helper function */ static int chipio_send(struct hda_codec *codec, unsigned int reg, unsigned int data) { unsigned int res; int retry = 50; /* send bits of data specified by reg */ do { res = snd_hda_codec_read(codec, WIDGET_CHIP_CTRL, 0, reg, data); if (res == VENDOR_STATUS_CHIPIO_OK) return 0; } while (--retry); return -EIO; } /* * Write chip address through the vendor widget -- NOT protected by the Mutex! */ static int chipio_write_address(struct hda_codec *codec, unsigned int chip_addx) { int res; /* send low 16 bits of the address */ res = chipio_send(codec, VENDOR_CHIPIO_ADDRESS_LOW, chip_addx & 0xffff); if (res != -EIO) { /* send high 16 bits of the address */ res = chipio_send(codec, VENDOR_CHIPIO_ADDRESS_HIGH, chip_addx >> 16); } return res; } /* * Write data through the vendor widget -- NOT protected by the Mutex! */ static int chipio_write_data(struct hda_codec *codec, unsigned int data) { int res; /* send low 16 bits of the data */ res = chipio_send(codec, VENDOR_CHIPIO_DATA_LOW, data & 0xffff); if (res != -EIO) { /* send high 16 bits of the data */ res = chipio_send(codec, VENDOR_CHIPIO_DATA_HIGH, data >> 16); } return res; } /* * Read data through the vendor widget -- NOT protected by the Mutex! */ static int chipio_read_data(struct hda_codec *codec, unsigned int *data) { int res; /* post read */ res = chipio_send(codec, VENDOR_CHIPIO_HIC_POST_READ, 0); if (res != -EIO) { /* read status */ res = chipio_send(codec, VENDOR_CHIPIO_STATUS, 0); } if (res != -EIO) { /* read data */ *data = snd_hda_codec_read(codec, WIDGET_CHIP_CTRL, 0, VENDOR_CHIPIO_HIC_READ_DATA, 0); } return res; } /* * Write given value to the given address through the chip I/O widget. * protected by the Mutex */ static int chipio_write(struct hda_codec *codec, unsigned int chip_addx, const unsigned int data) { struct ca0132_spec *spec = codec->spec; int err; mutex_lock(&spec->chipio_mutex); /* write the address, and if successful proceed to write data */ err = chipio_write_address(codec, chip_addx); if (err < 0) goto exit; err = chipio_write_data(codec, data); if (err < 0) goto exit; exit: mutex_unlock(&spec->chipio_mutex); return err; } /* * Read the given address through the chip I/O widget * protected by the Mutex */ static int chipio_read(struct hda_codec *codec, unsigned int chip_addx, unsigned int *data) { struct ca0132_spec *spec = codec->spec; int err; mutex_lock(&spec->chipio_mutex); /* write the address, and if successful proceed to write data */ err = chipio_write_address(codec, chip_addx); if (err < 0) goto exit; err = chipio_read_data(codec, data); if (err < 0) goto exit; exit: mutex_unlock(&spec->chipio_mutex); return err; } /* * PCM stuffs */ static void ca0132_setup_stream(struct hda_codec *codec, hda_nid_t nid, u32 stream_tag, int channel_id, int format) { unsigned int oldval, newval; if (!nid) return; snd_printdd("ca0132_setup_stream: " "NID=0x%x, stream=0x%x, channel=%d, format=0x%x\n", nid, stream_tag, channel_id, format); /* update the format-id if changed */ oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_STREAM_FORMAT, 0); if (oldval != format) { msleep(20); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, format); } oldval = snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_CONV, 0); newval = (stream_tag << 4) | channel_id; if (oldval != newval) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, newval); } } static void ca0132_cleanup_stream(struct hda_codec *codec, hda_nid_t nid) { snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_STREAM_FORMAT, 0); snd_hda_codec_write(codec, nid, 0, AC_VERB_SET_CHANNEL_STREAMID, 0); } /* * PCM callbacks */ static int ca0132_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_setup_stream(codec, spec->dacs[0], stream_tag, 0, format); return 0; } static int ca0132_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_cleanup_stream(codec, spec->dacs[0]); return 0; } /* * Digital out */ static int ca0132_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_setup_stream(codec, spec->dig_out, stream_tag, 0, format); return 0; } static int ca0132_dig_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_cleanup_stream(codec, spec->dig_out); return 0; } /* * Analog capture */ static int ca0132_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_setup_stream(codec, spec->adcs[substream->number], stream_tag, 0, format); return 0; } static int ca0132_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_cleanup_stream(codec, spec->adcs[substream->number]); return 0; } /* * Digital capture */ static int ca0132_dig_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_setup_stream(codec, spec->dig_in, stream_tag, 0, format); return 0; } static int ca0132_dig_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0132_spec *spec = codec->spec; ca0132_cleanup_stream(codec, spec->dig_in); return 0; } /* */ static struct hda_pcm_stream ca0132_pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .prepare = ca0132_playback_pcm_prepare, .cleanup = ca0132_playback_pcm_cleanup }, }; static struct hda_pcm_stream ca0132_pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .prepare = ca0132_capture_pcm_prepare, .cleanup = ca0132_capture_pcm_cleanup }, }; static struct hda_pcm_stream ca0132_pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .prepare = ca0132_dig_playback_pcm_prepare, .cleanup = ca0132_dig_playback_pcm_cleanup }, }; static struct hda_pcm_stream ca0132_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .prepare = ca0132_dig_capture_pcm_prepare, .cleanup = ca0132_dig_capture_pcm_cleanup }, }; static int ca0132_build_pcms(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; codec->pcm_info = info; codec->num_pcms = 0; info->name = "CA0132 Analog"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0132_pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dacs[0]; info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = spec->multiout.max_channels; info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_analog_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_inputs; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0]; codec->num_pcms++; if (!spec->dig_out && !spec->dig_in) return 0; info++; info->name = "CA0132 Digital"; info->pcm_type = HDA_PCM_TYPE_SPDIF; if (spec->dig_out) { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0132_pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out; } if (spec->dig_in) { info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0132_pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in; } codec->num_pcms++; return 0; } #define REG_CODEC_MUTE 0x18b014 #define REG_CODEC_HP_VOL_L 0x18b070 #define REG_CODEC_HP_VOL_R 0x18b074 static int ca0132_hp_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; *valp = spec->curr_hp_switch; return 0; } static int ca0132_hp_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; unsigned int data; int err; /* any change? */ if (spec->curr_hp_switch == *valp) return 0; snd_hda_power_up(codec); err = chipio_read(codec, REG_CODEC_MUTE, &data); if (err < 0) goto exit; /* *valp 0 is mute, 1 is unmute */ data = (data & 0x7f) | (*valp ? 0 : 0x80); err = chipio_write(codec, REG_CODEC_MUTE, data); if (err < 0) goto exit; spec->curr_hp_switch = *valp; exit: snd_hda_power_down(codec); return err < 0 ? err : 1; } static int ca0132_speaker_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; *valp = spec->curr_speaker_switch; return 0; } static int ca0132_speaker_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; unsigned int data; int err; /* any change? */ if (spec->curr_speaker_switch == *valp) return 0; snd_hda_power_up(codec); err = chipio_read(codec, REG_CODEC_MUTE, &data); if (err < 0) goto exit; /* *valp 0 is mute, 1 is unmute */ data = (data & 0xef) | (*valp ? 0 : 0x10); err = chipio_write(codec, REG_CODEC_MUTE, data); if (err < 0) goto exit; spec->curr_speaker_switch = *valp; exit: snd_hda_power_down(codec); return err < 0 ? err : 1; } static int ca0132_hp_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; *valp++ = spec->curr_hp_volume[0]; *valp = spec->curr_hp_volume[1]; return 0; } static int ca0132_hp_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); struct ca0132_spec *spec = codec->spec; long *valp = ucontrol->value.integer.value; long left_vol, right_vol; unsigned int data; int val; int err; left_vol = *valp++; right_vol = *valp; /* any change? */ if ((spec->curr_hp_volume[0] == left_vol) && (spec->curr_hp_volume[1] == right_vol)) return 0; snd_hda_power_up(codec); err = chipio_read(codec, REG_CODEC_HP_VOL_L, &data); if (err < 0) goto exit; val = 31 - left_vol; data = (data & 0xe0) | val; err = chipio_write(codec, REG_CODEC_HP_VOL_L, data); if (err < 0) goto exit; val = 31 - right_vol; data = (data & 0xe0) | val; err = chipio_write(codec, REG_CODEC_HP_VOL_R, data); if (err < 0) goto exit; spec->curr_hp_volume[0] = left_vol; spec->curr_hp_volume[1] = right_vol; exit: snd_hda_power_down(codec); return err < 0 ? err : 1; } static int add_hp_switch(struct hda_codec *codec, hda_nid_t nid) { struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO("Headphone Playback Switch", nid, 1, 0, HDA_OUTPUT); knew.get = ca0132_hp_switch_get; knew.put = ca0132_hp_switch_put; return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } static int add_hp_volume(struct hda_codec *codec, hda_nid_t nid) { struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO("Headphone Playback Volume", nid, 3, 0, HDA_OUTPUT); knew.get = ca0132_hp_volume_get; knew.put = ca0132_hp_volume_put; return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } static int add_speaker_switch(struct hda_codec *codec, hda_nid_t nid) { struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO("Speaker Playback Switch", nid, 1, 0, HDA_OUTPUT); knew.get = ca0132_speaker_switch_get; knew.put = ca0132_speaker_switch_put; return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } static void ca0132_fix_hp_caps(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; unsigned int caps; /* set mute-capable, 1db step, 32 steps, ofs 6 */ caps = 0x80031f06; snd_hda_override_amp_caps(codec, cfg->hp_pins[0], HDA_OUTPUT, caps); } static int ca0132_build_controls(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i, err; if (spec->multiout.num_dacs) { err = add_speaker_switch(codec, spec->out_pins[0]); if (err < 0) return err; } if (cfg->hp_outs) { ca0132_fix_hp_caps(codec); err = add_hp_switch(codec, cfg->hp_pins[0]); if (err < 0) return err; err = add_hp_volume(codec, cfg->hp_pins[0]); if (err < 0) return err; } for (i = 0; i < spec->num_inputs; i++) { const char *label = spec->input_labels[i]; err = add_in_switch(codec, spec->adcs[i], label); if (err < 0) return err; err = add_in_volume(codec, spec->adcs[i], label); if (err < 0) return err; if (cfg->inputs[i].type == AUTO_PIN_MIC) { /* add Mic-Boost */ err = add_in_mono_volume(codec, spec->input_pins[i], "Mic Boost", 1); if (err < 0) return err; } } if (spec->dig_out) { err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out, spec->dig_out); if (err < 0) return err; err = add_out_volume(codec, spec->dig_out, "IEC958"); if (err < 0) return err; } if (spec->dig_in) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in); if (err < 0) return err; err = add_in_volume(codec, spec->dig_in, "IEC958"); if (err < 0) return err; } return 0; } static void ca0132_set_ct_ext(struct hda_codec *codec, int enable) { /* Set Creative extension */ snd_printdd("SET CREATIVE EXTENSION\n"); snd_hda_codec_write(codec, WIDGET_CHIP_CTRL, 0, VENDOR_CHIPIO_CT_EXTENSIONS_ENABLE, enable); msleep(20); } static void ca0132_config(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; /* line-outs */ cfg->line_outs = 1; cfg->line_out_pins[0] = 0x0b; /* front */ cfg->line_out_type = AUTO_PIN_LINE_OUT; spec->dacs[0] = 0x02; spec->out_pins[0] = 0x0b; spec->multiout.dac_nids = spec->dacs; spec->multiout.num_dacs = 1; spec->multiout.max_channels = 2; /* headphone */ cfg->hp_outs = 1; cfg->hp_pins[0] = 0x0f; spec->hp_dac = 0; spec->multiout.hp_nid = 0; /* inputs */ cfg->num_inputs = 2; /* Mic-in and line-in */ cfg->inputs[0].pin = 0x12; cfg->inputs[0].type = AUTO_PIN_MIC; cfg->inputs[1].pin = 0x11; cfg->inputs[1].type = AUTO_PIN_LINE_IN; /* Mic-in */ spec->input_pins[0] = 0x12; spec->input_labels[0] = "Mic-In"; spec->adcs[0] = 0x07; /* Line-In */ spec->input_pins[1] = 0x11; spec->input_labels[1] = "Line-In"; spec->adcs[1] = 0x08; spec->num_inputs = 2; } static void ca0132_init_chip(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; mutex_init(&spec->chipio_mutex); } static void ca0132_exit_chip(struct hda_codec *codec) { /* put any chip cleanup stuffs here. */ } static int ca0132_init(struct hda_codec *codec) { struct ca0132_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < spec->multiout.num_dacs; i++) { init_output(codec, spec->out_pins[i], spec->multiout.dac_nids[i]); } init_output(codec, cfg->hp_pins[0], spec->hp_dac); init_output(codec, cfg->dig_out_pins[0], spec->dig_out); for (i = 0; i < spec->num_inputs; i++) init_input(codec, spec->input_pins[i], spec->adcs[i]); init_input(codec, cfg->dig_in_pin, spec->dig_in); ca0132_set_ct_ext(codec, 1); return 0; } static void ca0132_free(struct hda_codec *codec) { ca0132_set_ct_ext(codec, 0); ca0132_exit_chip(codec); kfree(codec->spec); } static struct hda_codec_ops ca0132_patch_ops = { .build_controls = ca0132_build_controls, .build_pcms = ca0132_build_pcms, .init = ca0132_init, .free = ca0132_free, }; static int patch_ca0132(struct hda_codec *codec) { struct ca0132_spec *spec; snd_printdd("patch_ca0132\n"); spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; codec->spec = spec; ca0132_init_chip(codec); ca0132_config(codec); codec->patch_ops = ca0132_patch_ops; return 0; } /* * patch entries */ static struct hda_codec_preset snd_hda_preset_ca0132[] = { { .id = 0x11020011, .name = "CA0132", .patch = patch_ca0132 }, {} /* terminator */ }; MODULE_ALIAS("snd-hda-codec-id:11020011"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Creative CA0132, CA0132 HD-audio codec"); static struct hda_codec_preset_list ca0132_list = { .preset = snd_hda_preset_ca0132, .owner = THIS_MODULE, }; static int __init patch_ca0132_init(void) { return snd_hda_add_codec_preset(&ca0132_list); } static void __exit patch_ca0132_exit(void) { snd_hda_delete_codec_preset(&ca0132_list); } module_init(patch_ca0132_init) module_exit(patch_ca0132_exit)
gpl-2.0
gentu/android_kernel_zte_nx503a
arch/x86/mm/init_32.c
4810
25288
/* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/bugs.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/olpc_ofw.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/paravirt.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/page_types.h> #include <asm/init.h> unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { unsigned long pfn = pgt_buf_end++; void *adr; if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); clear_page(adr); return adr; } /* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); return pmd_table; } #endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; } pte_t * __init populate_extra_pte(unsigned long vaddr) { int pte_idx = pte_index(vaddr); pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug. */ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; BUG_ON(after_bootmem); newpte = alloc_low_page(); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(newpte + i, pte[i]); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); BUG_ON(newpte != pte_offset_kernel(pmd, 0)); __flush_tlb_all(); paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); pte = newpte; } BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) && vaddr > fix_to_virt(FIX_KMAP_END) && lastpte && lastpte + PTRS_PER_PTE != pte); #endif return pte; } /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */ static void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), pmd, vaddr, pte); vaddr += PMD_SIZE; } pmd_idx = 0; } } static inline int is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; return 0; } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET: */ unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; /* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes." */ mapping_iter = 1; if (!cpu_has_pse) use_pse = 0; repeat: pages_2m = pages_4k = 0; pfn = start_pfn; pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pgd = pgd_base + pgd_idx; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= end_pfn) continue; #ifdef CONFIG_X86_PAE pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pmd += pmd_idx; #else pmd_idx = 0; #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(addr) || is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; if (mapping_iter == 1) set_pmd(pmd, pfn_pmd(pfn, init_prot)); else set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; continue; } pte = one_page_table_init(pmd); pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pte += pte_ofs; for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; /* * first pass will use the same initial * identity mapping attribute. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); if (is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; if (mapping_iter == 1) { set_pte(pte, pfn_pte(pfn, init_prot)); last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; } else set_pte(pte, pfn_pte(pfn, prot)); } } } if (mapping_iter == 1) { /* * update direct mapping page count only in the first * iteration. */ update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_4K, pages_4k); /* * local global flush tlb, which will flush the previous * mappings present in both small and large page TLB's. */ __flush_tlb_all(); /* * Second iteration will set the actual desired PTE attributes. */ mapping_iter = 2; goto repeat; } return last_map_addr; } pte_t *kmap_pte; pgprot_t kmap_prot; static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } static void __init kmap_init(void) { unsigned long kmap_vstart; /* * Cache the first kmap pte: */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; } static void __init add_one_highpage_init(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { phys_addr_t start, end; u64 i; for_each_free_mem_range(i, nid, &start, &end, NULL) { unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), start_pfn, end_pfn); unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) add_one_highpage_init(pfn_to_page(pfn)); } } #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) { unsigned long pfn, va; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* * Remove any mappings which extend past the end of physical * memory from the boot time page table: */ for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break; pud = pud_offset(pgd, va); pmd = pmd_offset(pud, va); if (!pmd_present(*pmd)) break; pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break; pte_clear(NULL, va, pte); } paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); } void __init native_pagetable_setup_done(pgd_t *base) { } /* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/x86/kernel/head_32.S. The root of the * pagetable will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */ void __init early_ioremap_page_table_range_init(void) { pgd_t *pgd_base = swapper_pg_dir; unsigned long vaddr, end; /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); early_ioremap_reset(); } static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; permanent_kmaps_init(pgd_base); } pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; /* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes. */ static int __init parse_highmem(char *arg) { if (!arg) return -EINVAL; highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; return 0; } early_param("highmem", parse_highmem); #define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n" #define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it: */ void __init lowmem_pfn_init(void) { /* max_low_pfn is 0, we already have early_res support */ max_low_pfn = max_pfn; if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); highmem_pages = 0; } if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } max_low_pfn -= highmem_pages; } #else if (highmem_pages) printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif } #define MSG_HIGHMEM_TOO_SMALL \ "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: */ void __init highmem_pfn_init(void) { max_low_pfn = MAXMEM_PFN; if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; if (highmem_pages + MAXMEM_PFN < max_pfn) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; } #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); if (max_pfn > MAX_NONPAE_PFN) printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); else printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G if (max_pfn > MAX_NONPAE_PFN) { max_pfn = MAX_NONPAE_PFN; printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ } /* * Determine low and high memory ranges: */ void __init find_low_pfn_range(void) { /* it could update max_pfn */ if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); else highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); setup_bootmem_allocator(); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); after_bootmem = 1; } /* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { pagetable_init(); __flush_tlb_all(); kmap_init(); /* * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } /* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that. */ static void __init test_wp_bit(void) { printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk(KERN_CONT "No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic( "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else { printk(KERN_CONT "Ok.\n"); } } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif /* * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to * be done before free_all_bootmem(). Memblock use free low memory for * temporary data (see find_range_array()) and for this purpose can use * pages that was already passed to the buddy allocator, hence marked as * not accessible in the page tables when compiled with * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not * important here. */ set_highmem_pages_init(); /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages: */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, #endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #define __FIXADDR_TOP (-PAGE_SIZE) #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); #endif #define high_memory (-128UL << 20) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */ static noinline int do_test_wp_bit(void) { char tmp_reg; int flag; __asm__ __volatile__( " movb %0, %1 \n" "1: movb %1, %0 \n" " xorl %2, %2 \n" "2: \n" _ASM_EXTABLE(1b,2b) :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag; } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); } static void mark_nxdata_nx(void) { /* * When this called, init has already been executed and released, * so everything past _etext should be NX. */ unsigned long start = PFN_ALIGN(_etext); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); } #endif
gpl-2.0
yoAeroA00/android_kernel_nokia_msm8610
sound/pci/hda/hda_jack.c
4810
10079
/* * Jack-detection handling for HD-audio * * Copyright (c) 2011 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/jack.h> #include "hda_codec.h" #include "hda_local.h" #include "hda_jack.h" bool is_jack_detectable(struct hda_codec *codec, hda_nid_t nid) { if (codec->no_jack_detect) return false; if (!(snd_hda_query_pin_caps(codec, nid) & AC_PINCAP_PRES_DETECT)) return false; if (!codec->ignore_misc_bit && (get_defcfg_misc(snd_hda_codec_get_pincfg(codec, nid)) & AC_DEFCFG_MISC_NO_PRESENCE)) return false; if (!(get_wcaps(codec, nid) & AC_WCAP_UNSOL_CAP)) return false; return true; } EXPORT_SYMBOL_HDA(is_jack_detectable); /* execute pin sense measurement */ static u32 read_pin_sense(struct hda_codec *codec, hda_nid_t nid) { u32 pincap; if (!codec->no_trigger_sense) { pincap = snd_hda_query_pin_caps(codec, nid); if (pincap & AC_PINCAP_TRIG_REQ) /* need trigger? */ snd_hda_codec_read(codec, nid, 0, AC_VERB_SET_PIN_SENSE, 0); } return snd_hda_codec_read(codec, nid, 0, AC_VERB_GET_PIN_SENSE, 0); } /** * snd_hda_jack_tbl_get - query the jack-table entry for the given NID */ struct hda_jack_tbl * snd_hda_jack_tbl_get(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; if (!nid || !jack) return NULL; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid == nid) return jack; return NULL; } EXPORT_SYMBOL_HDA(snd_hda_jack_tbl_get); /** * snd_hda_jack_tbl_get_from_tag - query the jack-table entry for the given tag */ struct hda_jack_tbl * snd_hda_jack_tbl_get_from_tag(struct hda_codec *codec, unsigned char tag) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; if (!tag || !jack) return NULL; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->tag == tag) return jack; return NULL; } EXPORT_SYMBOL_HDA(snd_hda_jack_tbl_get_from_tag); /** * snd_hda_jack_tbl_new - create a jack-table entry for the given NID */ struct hda_jack_tbl * snd_hda_jack_tbl_new(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); if (jack) return jack; snd_array_init(&codec->jacktbl, sizeof(*jack), 16); jack = snd_array_new(&codec->jacktbl); if (!jack) return NULL; jack->nid = nid; jack->jack_dirty = 1; jack->tag = codec->jacktbl.used; return jack; } EXPORT_SYMBOL_HDA(snd_hda_jack_tbl_new); void snd_hda_jack_tbl_clear(struct hda_codec *codec) { #ifdef CONFIG_SND_HDA_INPUT_JACK /* free jack instances manually when clearing/reconfiguring */ if (!codec->bus->shutdown && codec->jacktbl.list) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; for (i = 0; i < codec->jacktbl.used; i++, jack++) { if (jack->jack) snd_device_free(codec->bus->card, jack->jack); } } #endif snd_array_free(&codec->jacktbl); } /* update the cached value and notification flag if needed */ static void jack_detect_update(struct hda_codec *codec, struct hda_jack_tbl *jack) { if (jack->jack_dirty || !jack->jack_detect) { jack->pin_sense = read_pin_sense(codec, jack->nid); jack->jack_dirty = 0; } } /** * snd_hda_set_dirty_all - Mark all the cached as dirty * * This function sets the dirty flag to all entries of jack table. * It's called from the resume path in hda_codec.c. */ void snd_hda_jack_set_dirty_all(struct hda_codec *codec) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid) jack->jack_dirty = 1; } EXPORT_SYMBOL_HDA(snd_hda_jack_set_dirty_all); /** * snd_hda_pin_sense - execute pin sense measurement * @codec: the CODEC to sense * @nid: the pin NID to sense * * Execute necessary pin sense measurement and return its Presence Detect, * Impedance, ELD Valid etc. status bits. */ u32 snd_hda_pin_sense(struct hda_codec *codec, hda_nid_t nid) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_get(codec, nid); if (jack) { jack_detect_update(codec, jack); return jack->pin_sense; } return read_pin_sense(codec, nid); } EXPORT_SYMBOL_HDA(snd_hda_pin_sense); #define get_jack_plug_state(sense) !!(sense & AC_PINSENSE_PRESENCE) /** * snd_hda_jack_detect - query pin Presence Detect status * @codec: the CODEC to sense * @nid: the pin NID to sense * * Query and return the pin's Presence Detect status. */ int snd_hda_jack_detect(struct hda_codec *codec, hda_nid_t nid) { u32 sense = snd_hda_pin_sense(codec, nid); return get_jack_plug_state(sense); } EXPORT_SYMBOL_HDA(snd_hda_jack_detect); /** * snd_hda_jack_detect_enable - enable the jack-detection */ int snd_hda_jack_detect_enable(struct hda_codec *codec, hda_nid_t nid, unsigned char action) { struct hda_jack_tbl *jack = snd_hda_jack_tbl_new(codec, nid); if (!jack) return -ENOMEM; if (jack->jack_detect) return 0; /* already registered */ jack->jack_detect = 1; if (action) jack->action = action; return snd_hda_codec_write_cache(codec, nid, 0, AC_VERB_SET_UNSOLICITED_ENABLE, AC_USRSP_EN | jack->tag); } EXPORT_SYMBOL_HDA(snd_hda_jack_detect_enable); /** * snd_hda_jack_report_sync - sync the states of all jacks and report if changed */ void snd_hda_jack_report_sync(struct hda_codec *codec) { struct hda_jack_tbl *jack = codec->jacktbl.list; int i, state; for (i = 0; i < codec->jacktbl.used; i++, jack++) if (jack->nid) { jack_detect_update(codec, jack); if (!jack->kctl) continue; state = get_jack_plug_state(jack->pin_sense); snd_kctl_jack_report(codec->bus->card, jack->kctl, state); #ifdef CONFIG_SND_HDA_INPUT_JACK if (jack->jack) snd_jack_report(jack->jack, state ? jack->type : 0); #endif } } EXPORT_SYMBOL_HDA(snd_hda_jack_report_sync); #ifdef CONFIG_SND_HDA_INPUT_JACK /* guess the jack type from the pin-config */ static int get_input_jack_type(struct hda_codec *codec, hda_nid_t nid) { unsigned int def_conf = snd_hda_codec_get_pincfg(codec, nid); switch (get_defcfg_device(def_conf)) { case AC_JACK_LINE_OUT: case AC_JACK_SPEAKER: return SND_JACK_LINEOUT; case AC_JACK_HP_OUT: return SND_JACK_HEADPHONE; case AC_JACK_SPDIF_OUT: case AC_JACK_DIG_OTHER_OUT: return SND_JACK_AVOUT; case AC_JACK_MIC_IN: return SND_JACK_MICROPHONE; default: return SND_JACK_LINEIN; } } static void hda_free_jack_priv(struct snd_jack *jack) { struct hda_jack_tbl *jacks = jack->private_data; jacks->nid = 0; jacks->jack = NULL; } #endif /** * snd_hda_jack_add_kctl - Add a kctl for the given pin * * This assigns a jack-detection kctl to the given pin. The kcontrol * will have the given name and index. */ int snd_hda_jack_add_kctl(struct hda_codec *codec, hda_nid_t nid, const char *name, int idx) { struct hda_jack_tbl *jack; struct snd_kcontrol *kctl; int err, state; jack = snd_hda_jack_tbl_new(codec, nid); if (!jack) return 0; if (jack->kctl) return 0; /* already created */ kctl = snd_kctl_jack_new(name, idx, codec); if (!kctl) return -ENOMEM; err = snd_hda_ctl_add(codec, nid, kctl); if (err < 0) return err; jack->kctl = kctl; state = snd_hda_jack_detect(codec, nid); snd_kctl_jack_report(codec->bus->card, kctl, state); #ifdef CONFIG_SND_HDA_INPUT_JACK jack->type = get_input_jack_type(codec, nid); err = snd_jack_new(codec->bus->card, name, jack->type, &jack->jack); if (err < 0) return err; jack->jack->private_data = jack; jack->jack->private_free = hda_free_jack_priv; snd_jack_report(jack->jack, state ? jack->type : 0); #endif return 0; } EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctl); static int add_jack_kctl(struct hda_codec *codec, hda_nid_t nid, const struct auto_pin_cfg *cfg, char *lastname, int *lastidx) { unsigned int def_conf, conn; char name[44]; int idx, err; if (!nid) return 0; if (!is_jack_detectable(codec, nid)) return 0; def_conf = snd_hda_codec_get_pincfg(codec, nid); conn = get_defcfg_connect(def_conf); if (conn != AC_JACK_PORT_COMPLEX) return 0; snd_hda_get_pin_label(codec, nid, cfg, name, sizeof(name), &idx); if (!strcmp(name, lastname) && idx == *lastidx) idx++; strncpy(lastname, name, 44); *lastidx = idx; err = snd_hda_jack_add_kctl(codec, nid, name, idx); if (err < 0) return err; return snd_hda_jack_detect_enable(codec, nid, 0); } /** * snd_hda_jack_add_kctls - Add kctls for all pins included in the given pincfg */ int snd_hda_jack_add_kctls(struct hda_codec *codec, const struct auto_pin_cfg *cfg) { const hda_nid_t *p; int i, err, lastidx = 0; char lastname[44] = ""; for (i = 0, p = cfg->line_out_pins; i < cfg->line_outs; i++, p++) { err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); if (err < 0) return err; } for (i = 0, p = cfg->hp_pins; i < cfg->hp_outs; i++, p++) { if (*p == *cfg->line_out_pins) /* might be duplicated */ break; err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); if (err < 0) return err; } for (i = 0, p = cfg->speaker_pins; i < cfg->speaker_outs; i++, p++) { if (*p == *cfg->line_out_pins) /* might be duplicated */ break; err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); if (err < 0) return err; } for (i = 0; i < cfg->num_inputs; i++) { err = add_jack_kctl(codec, cfg->inputs[i].pin, cfg, lastname, &lastidx); if (err < 0) return err; } for (i = 0, p = cfg->dig_out_pins; i < cfg->dig_outs; i++, p++) { err = add_jack_kctl(codec, *p, cfg, lastname, &lastidx); if (err < 0) return err; } err = add_jack_kctl(codec, cfg->dig_in_pin, cfg, lastname, &lastidx); if (err < 0) return err; err = add_jack_kctl(codec, cfg->mono_out_pin, cfg, lastname, &lastidx); if (err < 0) return err; return 0; } EXPORT_SYMBOL_HDA(snd_hda_jack_add_kctls);
gpl-2.0
grondinm/android_kernel_motorola_msm8974
drivers/media/radio/radio-maxiradio.c
4810
5810
/* * Guillemot Maxi Radio FM 2000 PCI radio card driver for Linux * (C) 2001 Dimitromanolakis Apostolos <apdim@grecian.net> * * Based in the radio Maestro PCI driver. Actually it uses the same chip * for radio but different pci controller. * * I didn't have any specs I reversed engineered the protocol from * the windows driver (radio.dll). * * The card uses the TEA5757 chip that includes a search function but it * is useless as I haven't found any way to read back the frequency. If * anybody does please mail me. * * For the pdf file see: * http://www.nxp.com/acrobat_download2/expired_datasheets/TEA5757_5759_3.pdf * * * CHANGES: * 0.75b * - better pci interface thanks to Francois Romieu <romieu@cogenit.fr> * * 0.75 Sun Feb 4 22:51:27 EET 2001 * - tiding up * - removed support for multiple devices as it didn't work anyway * * BUGS: * - card unmutes if you change frequency * * (c) 2006, 2007 by Mauro Carvalho Chehab <mchehab@infradead.org>: * - Conversion to V4L2 API * - Uses video_ioctl2 for parsing and to add debug support */ #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/mutex.h> #include <linux/pci.h> #include <linux/videodev2.h> #include <linux/io.h> #include <linux/slab.h> #include <sound/tea575x-tuner.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-fh.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-event.h> MODULE_AUTHOR("Dimitromanolakis Apostolos, apdim@grecian.net"); MODULE_DESCRIPTION("Radio driver for the Guillemot Maxi Radio FM2000."); MODULE_LICENSE("GPL"); MODULE_VERSION("1.0.0"); static int radio_nr = -1; module_param(radio_nr, int, 0644); MODULE_PARM_DESC(radio_nr, "Radio device number"); /* TEA5757 pin mappings */ static const int clk = 1, data = 2, wren = 4, mo_st = 8, power = 16; static atomic_t maxiradio_instance = ATOMIC_INIT(0); #define PCI_VENDOR_ID_GUILLEMOT 0x5046 #define PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO 0x1001 struct maxiradio { struct snd_tea575x tea; struct v4l2_device v4l2_dev; struct pci_dev *pdev; u16 io; /* base of radio io */ }; static inline struct maxiradio *to_maxiradio(struct v4l2_device *v4l2_dev) { return container_of(v4l2_dev, struct maxiradio, v4l2_dev); } static void maxiradio_tea575x_set_pins(struct snd_tea575x *tea, u8 pins) { struct maxiradio *dev = tea->private_data; u8 bits = 0; bits |= (pins & TEA575X_DATA) ? data : 0; bits |= (pins & TEA575X_CLK) ? clk : 0; bits |= (pins & TEA575X_WREN) ? wren : 0; bits |= power; outb(bits, dev->io); } /* Note: this card cannot read out the data of the shift registers, only the mono/stereo pin works. */ static u8 maxiradio_tea575x_get_pins(struct snd_tea575x *tea) { struct maxiradio *dev = tea->private_data; u8 bits = inb(dev->io); return ((bits & data) ? TEA575X_DATA : 0) | ((bits & mo_st) ? TEA575X_MOST : 0); } static void maxiradio_tea575x_set_direction(struct snd_tea575x *tea, bool output) { } static struct snd_tea575x_ops maxiradio_tea_ops = { .set_pins = maxiradio_tea575x_set_pins, .get_pins = maxiradio_tea575x_get_pins, .set_direction = maxiradio_tea575x_set_direction, }; static int __devinit maxiradio_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct maxiradio *dev; struct v4l2_device *v4l2_dev; int retval = -ENOMEM; dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { dev_err(&pdev->dev, "not enough memory\n"); return -ENOMEM; } v4l2_dev = &dev->v4l2_dev; v4l2_device_set_name(v4l2_dev, "maxiradio", &maxiradio_instance); retval = v4l2_device_register(&pdev->dev, v4l2_dev); if (retval < 0) { v4l2_err(v4l2_dev, "Could not register v4l2_device\n"); goto errfr; } dev->tea.private_data = dev; dev->tea.ops = &maxiradio_tea_ops; /* The data pin cannot be read. This may be a hardware limitation, or we just don't know how to read it. */ dev->tea.cannot_read_data = true; dev->tea.v4l2_dev = v4l2_dev; dev->tea.radio_nr = radio_nr; strlcpy(dev->tea.card, "Maxi Radio FM2000", sizeof(dev->tea.card)); snprintf(dev->tea.bus_info, sizeof(dev->tea.bus_info), "PCI:%s", pci_name(pdev)); retval = -ENODEV; if (!request_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0), v4l2_dev->name)) { dev_err(&pdev->dev, "can't reserve I/O ports\n"); goto err_hdl; } if (pci_enable_device(pdev)) goto err_out_free_region; dev->io = pci_resource_start(pdev, 0); if (snd_tea575x_init(&dev->tea)) { printk(KERN_ERR "radio-maxiradio: Unable to detect TEA575x tuner\n"); goto err_out_free_region; } return 0; err_out_free_region: release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); err_hdl: v4l2_device_unregister(v4l2_dev); errfr: kfree(dev); return retval; } static void __devexit maxiradio_remove(struct pci_dev *pdev) { struct v4l2_device *v4l2_dev = dev_get_drvdata(&pdev->dev); struct maxiradio *dev = to_maxiradio(v4l2_dev); snd_tea575x_exit(&dev->tea); /* Turn off power */ outb(0, dev->io); v4l2_device_unregister(v4l2_dev); release_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); } static struct pci_device_id maxiradio_pci_tbl[] = { { PCI_VENDOR_ID_GUILLEMOT, PCI_DEVICE_ID_GUILLEMOT_MAXIRADIO, PCI_ANY_ID, PCI_ANY_ID, }, { 0 } }; MODULE_DEVICE_TABLE(pci, maxiradio_pci_tbl); static struct pci_driver maxiradio_driver = { .name = "radio-maxiradio", .id_table = maxiradio_pci_tbl, .probe = maxiradio_probe, .remove = __devexit_p(maxiradio_remove), }; static int __init maxiradio_init(void) { return pci_register_driver(&maxiradio_driver); } static void __exit maxiradio_exit(void) { pci_unregister_driver(&maxiradio_driver); } module_init(maxiradio_init); module_exit(maxiradio_exit);
gpl-2.0
cafecongnghe/android_kernel_lge_mako
net/tipc/msg.c
4810
9962
/* * net/tipc/msg.c: TIPC message header routines * * Copyright (c) 2000-2006, Ericsson AB * Copyright (c) 2005, 2010-2011, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "msg.h" u32 tipc_msg_tot_importance(struct tipc_msg *m) { if (likely(msg_isdata(m))) { if (likely(msg_orignode(m) == tipc_own_addr)) return msg_importance(m); return msg_importance(m) + 4; } if ((msg_user(m) == MSG_FRAGMENTER) && (msg_type(m) == FIRST_FRAGMENT)) return msg_importance(msg_get_wrapped(m)); return msg_importance(m); } void tipc_msg_init(struct tipc_msg *m, u32 user, u32 type, u32 hsize, u32 destnode) { memset(m, 0, hsize); msg_set_version(m); msg_set_user(m, user); msg_set_hdr_sz(m, hsize); msg_set_size(m, hsize); msg_set_prevnode(m, tipc_own_addr); msg_set_type(m, type); msg_set_orignode(m, tipc_own_addr); msg_set_destnode(m, destnode); } /** * tipc_msg_build - create message using specified header and data * * Note: Caller must not hold any locks in case copy_from_user() is interrupted! * * Returns message data size or errno */ int tipc_msg_build(struct tipc_msg *hdr, struct iovec const *msg_sect, u32 num_sect, unsigned int total_len, int max_size, int usrmem, struct sk_buff **buf) { int dsz, sz, hsz, pos, res, cnt; dsz = total_len; pos = hsz = msg_hdr_sz(hdr); sz = hsz + dsz; msg_set_size(hdr, sz); if (unlikely(sz > max_size)) { *buf = NULL; return dsz; } *buf = tipc_buf_acquire(sz); if (!(*buf)) return -ENOMEM; skb_copy_to_linear_data(*buf, hdr, hsz); for (res = 1, cnt = 0; res && (cnt < num_sect); cnt++) { if (likely(usrmem)) res = !copy_from_user((*buf)->data + pos, msg_sect[cnt].iov_base, msg_sect[cnt].iov_len); else skb_copy_to_linear_data_offset(*buf, pos, msg_sect[cnt].iov_base, msg_sect[cnt].iov_len); pos += msg_sect[cnt].iov_len; } if (likely(res)) return dsz; kfree_skb(*buf); *buf = NULL; return -EFAULT; } #ifdef CONFIG_TIPC_DEBUG void tipc_msg_dbg(struct print_buf *buf, struct tipc_msg *msg, const char *str) { u32 usr = msg_user(msg); tipc_printf(buf, KERN_DEBUG); tipc_printf(buf, str); switch (usr) { case MSG_BUNDLER: tipc_printf(buf, "BNDL::"); tipc_printf(buf, "MSGS(%u):", msg_msgcnt(msg)); break; case BCAST_PROTOCOL: tipc_printf(buf, "BCASTP::"); break; case MSG_FRAGMENTER: tipc_printf(buf, "FRAGM::"); switch (msg_type(msg)) { case FIRST_FRAGMENT: tipc_printf(buf, "FIRST:"); break; case FRAGMENT: tipc_printf(buf, "BODY:"); break; case LAST_FRAGMENT: tipc_printf(buf, "LAST:"); break; default: tipc_printf(buf, "UNKNOWN:%x", msg_type(msg)); } tipc_printf(buf, "NO(%u/%u):", msg_long_msgno(msg), msg_fragm_no(msg)); break; case TIPC_LOW_IMPORTANCE: case TIPC_MEDIUM_IMPORTANCE: case TIPC_HIGH_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE: tipc_printf(buf, "DAT%u:", msg_user(msg)); if (msg_short(msg)) { tipc_printf(buf, "CON:"); break; } switch (msg_type(msg)) { case TIPC_CONN_MSG: tipc_printf(buf, "CON:"); break; case TIPC_MCAST_MSG: tipc_printf(buf, "MCST:"); break; case TIPC_NAMED_MSG: tipc_printf(buf, "NAM:"); break; case TIPC_DIRECT_MSG: tipc_printf(buf, "DIR:"); break; default: tipc_printf(buf, "UNKNOWN TYPE %u", msg_type(msg)); } if (msg_reroute_cnt(msg)) tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg)); break; case NAME_DISTRIBUTOR: tipc_printf(buf, "NMD::"); switch (msg_type(msg)) { case PUBLICATION: tipc_printf(buf, "PUBL(%u):", (msg_size(msg) - msg_hdr_sz(msg)) / 20); /* Items */ break; case WITHDRAWAL: tipc_printf(buf, "WDRW:"); break; default: tipc_printf(buf, "UNKNOWN:%x", msg_type(msg)); } if (msg_reroute_cnt(msg)) tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg)); break; case CONN_MANAGER: tipc_printf(buf, "CONN_MNG:"); switch (msg_type(msg)) { case CONN_PROBE: tipc_printf(buf, "PROBE:"); break; case CONN_PROBE_REPLY: tipc_printf(buf, "PROBE_REPLY:"); break; case CONN_ACK: tipc_printf(buf, "CONN_ACK:"); tipc_printf(buf, "ACK(%u):", msg_msgcnt(msg)); break; default: tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg)); } if (msg_reroute_cnt(msg)) tipc_printf(buf, "REROUTED(%u):", msg_reroute_cnt(msg)); break; case LINK_PROTOCOL: switch (msg_type(msg)) { case STATE_MSG: tipc_printf(buf, "STATE:"); tipc_printf(buf, "%s:", msg_probe(msg) ? "PRB" : ""); tipc_printf(buf, "NXS(%u):", msg_next_sent(msg)); tipc_printf(buf, "GAP(%u):", msg_seq_gap(msg)); tipc_printf(buf, "LSTBC(%u):", msg_last_bcast(msg)); break; case RESET_MSG: tipc_printf(buf, "RESET:"); if (msg_size(msg) != msg_hdr_sz(msg)) tipc_printf(buf, "BEAR:%s:", msg_data(msg)); break; case ACTIVATE_MSG: tipc_printf(buf, "ACTIVATE:"); break; default: tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg)); } tipc_printf(buf, "PLANE(%c):", msg_net_plane(msg)); tipc_printf(buf, "SESS(%u):", msg_session(msg)); break; case CHANGEOVER_PROTOCOL: tipc_printf(buf, "TUNL:"); switch (msg_type(msg)) { case DUPLICATE_MSG: tipc_printf(buf, "DUPL:"); break; case ORIGINAL_MSG: tipc_printf(buf, "ORIG:"); tipc_printf(buf, "EXP(%u)", msg_msgcnt(msg)); break; default: tipc_printf(buf, "UNKNOWN TYPE:%x", msg_type(msg)); } break; case LINK_CONFIG: tipc_printf(buf, "CFG:"); switch (msg_type(msg)) { case DSC_REQ_MSG: tipc_printf(buf, "DSC_REQ:"); break; case DSC_RESP_MSG: tipc_printf(buf, "DSC_RESP:"); break; default: tipc_printf(buf, "UNKNOWN TYPE:%x:", msg_type(msg)); break; } break; default: tipc_printf(buf, "UNKNOWN USER:"); } switch (usr) { case CONN_MANAGER: case TIPC_LOW_IMPORTANCE: case TIPC_MEDIUM_IMPORTANCE: case TIPC_HIGH_IMPORTANCE: case TIPC_CRITICAL_IMPORTANCE: switch (msg_errcode(msg)) { case TIPC_OK: break; case TIPC_ERR_NO_NAME: tipc_printf(buf, "NO_NAME:"); break; case TIPC_ERR_NO_PORT: tipc_printf(buf, "NO_PORT:"); break; case TIPC_ERR_NO_NODE: tipc_printf(buf, "NO_PROC:"); break; case TIPC_ERR_OVERLOAD: tipc_printf(buf, "OVERLOAD:"); break; case TIPC_CONN_SHUTDOWN: tipc_printf(buf, "SHUTDOWN:"); break; default: tipc_printf(buf, "UNKNOWN ERROR(%x):", msg_errcode(msg)); } default: break; } tipc_printf(buf, "HZ(%u):", msg_hdr_sz(msg)); tipc_printf(buf, "SZ(%u):", msg_size(msg)); tipc_printf(buf, "SQNO(%u):", msg_seqno(msg)); if (msg_non_seq(msg)) tipc_printf(buf, "NOSEQ:"); else tipc_printf(buf, "ACK(%u):", msg_ack(msg)); tipc_printf(buf, "BACK(%u):", msg_bcast_ack(msg)); tipc_printf(buf, "PRND(%x)", msg_prevnode(msg)); if (msg_isdata(msg)) { if (msg_named(msg)) { tipc_printf(buf, "NTYP(%u):", msg_nametype(msg)); tipc_printf(buf, "NINST(%u)", msg_nameinst(msg)); } } if ((usr != LINK_PROTOCOL) && (usr != LINK_CONFIG) && (usr != MSG_BUNDLER)) { if (!msg_short(msg)) { tipc_printf(buf, ":ORIG(%x:%u):", msg_orignode(msg), msg_origport(msg)); tipc_printf(buf, ":DEST(%x:%u):", msg_destnode(msg), msg_destport(msg)); } else { tipc_printf(buf, ":OPRT(%u):", msg_origport(msg)); tipc_printf(buf, ":DPRT(%u):", msg_destport(msg)); } } if (msg_user(msg) == NAME_DISTRIBUTOR) { tipc_printf(buf, ":ONOD(%x):", msg_orignode(msg)); tipc_printf(buf, ":DNOD(%x):", msg_destnode(msg)); } if (msg_user(msg) == LINK_CONFIG) { struct tipc_media_addr orig; tipc_printf(buf, ":DDOM(%x):", msg_dest_domain(msg)); tipc_printf(buf, ":NETID(%u):", msg_bc_netid(msg)); memcpy(orig.value, msg_media_addr(msg), sizeof(orig.value)); orig.media_id = 0; orig.broadcast = 0; tipc_media_addr_printf(buf, &orig); } if (msg_user(msg) == BCAST_PROTOCOL) { tipc_printf(buf, "BCNACK:AFTER(%u):", msg_bcgap_after(msg)); tipc_printf(buf, "TO(%u):", msg_bcgap_to(msg)); } tipc_printf(buf, "\n"); if ((usr == CHANGEOVER_PROTOCOL) && (msg_msgcnt(msg))) tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); if ((usr == MSG_FRAGMENTER) && (msg_type(msg) == FIRST_FRAGMENT)) tipc_msg_dbg(buf, msg_get_wrapped(msg), " /"); } #endif
gpl-2.0
manveru0/kernel_I9001_cfs
sound/arm/pxa2xx-pcm.c
4810
3191
/* * linux/sound/arm/pxa2xx-pcm.c -- ALSA PCM interface for the Intel PXA2xx chip * * Author: Nicolas Pitre * Created: Nov 30, 2004 * Copyright: (C) 2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <sound/core.h> #include <sound/pxa2xx-lib.h> #include "pxa2xx-pcm.h" static int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; __pxa2xx_pcm_prepare(substream); return client->prepare(substream); } static int pxa2xx_pcm_open(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct snd_pcm_runtime *runtime = substream->runtime; struct pxa2xx_runtime_data *rtd; int ret; ret = __pxa2xx_pcm_open(substream); if (ret) goto out; rtd = runtime->private_data; rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? client->playback_params : client->capture_params; ret = pxa_request_dma(rtd->params->name, DMA_PRIO_LOW, pxa2xx_pcm_dma_irq, substream); if (ret < 0) goto err2; rtd->dma_ch = ret; ret = client->startup(substream); if (!ret) goto out; pxa_free_dma(rtd->dma_ch); err2: __pxa2xx_pcm_close(substream); out: return ret; } static int pxa2xx_pcm_close(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct pxa2xx_runtime_data *rtd = substream->runtime->private_data; pxa_free_dma(rtd->dma_ch); client->shutdown(substream); return __pxa2xx_pcm_close(substream); } static struct snd_pcm_ops pxa2xx_pcm_ops = { .open = pxa2xx_pcm_open, .close = pxa2xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = __pxa2xx_pcm_hw_params, .hw_free = __pxa2xx_pcm_hw_free, .prepare = pxa2xx_pcm_prepare, .trigger = pxa2xx_pcm_trigger, .pointer = pxa2xx_pcm_pointer, .mmap = pxa2xx_pcm_mmap, }; static u64 pxa2xx_pcm_dmamask = 0xffffffff; int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int play = client->playback_params ? 1 : 0; int capt = client->capture_params ? 1 : 0; int ret; ret = snd_pcm_new(card, "PXA2xx-PCM", 0, play, capt, &pcm); if (ret) goto out; pcm->private_data = client; pcm->private_free = pxa2xx_pcm_free_dma_buffers; if (!card->dev->dma_mask) card->dev->dma_mask = &pxa2xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (play) { int stream = SNDRV_PCM_STREAM_PLAYBACK; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (capt) { int stream = SNDRV_PCM_STREAM_CAPTURE; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (rpcm) *rpcm = pcm; ret = 0; out: return ret; } EXPORT_SYMBOL(pxa2xx_pcm_new); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
Hellybean/android_kernel_lge_hammerhead
sound/soc/codecs/wm8971.c
4810
21989
/* * wm8971.c -- WM8971 ALSA SoC Audio driver * * Copyright 2005 Lab126, Inc. * * Author: Kenneth Kiraly <kiraly@lab126.com> * * Based on wm8753.c by Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/i2c.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include "wm8971.h" #define WM8971_REG_COUNT 43 static struct workqueue_struct *wm8971_workq = NULL; /* codec private data */ struct wm8971_priv { enum snd_soc_control_type control_type; unsigned int sysclk; }; /* * wm8971 register cache * We can't read the WM8971 register space when we * are using 2 wire for device control, so we cache them instead. */ static const u16 wm8971_reg[] = { 0x0097, 0x0097, 0x0079, 0x0079, /* 0 */ 0x0000, 0x0008, 0x0000, 0x000a, /* 4 */ 0x0000, 0x0000, 0x00ff, 0x00ff, /* 8 */ 0x000f, 0x000f, 0x0000, 0x0000, /* 12 */ 0x0000, 0x007b, 0x0000, 0x0032, /* 16 */ 0x0000, 0x00c3, 0x00c3, 0x00c0, /* 20 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 24 */ 0x0000, 0x0000, 0x0000, 0x0000, /* 28 */ 0x0000, 0x0000, 0x0050, 0x0050, /* 32 */ 0x0050, 0x0050, 0x0050, 0x0050, /* 36 */ 0x0079, 0x0079, 0x0079, /* 40 */ }; #define wm8971_reset(c) snd_soc_write(c, WM8971_RESET, 0) /* WM8971 Controls */ static const char *wm8971_bass[] = { "Linear Control", "Adaptive Boost" }; static const char *wm8971_bass_filter[] = { "130Hz @ 48kHz", "200Hz @ 48kHz" }; static const char *wm8971_treble[] = { "8kHz", "4kHz" }; static const char *wm8971_alc_func[] = { "Off", "Right", "Left", "Stereo" }; static const char *wm8971_ng_type[] = { "Constant PGA Gain", "Mute ADC Output" }; static const char *wm8971_deemp[] = { "None", "32kHz", "44.1kHz", "48kHz" }; static const char *wm8971_mono_mux[] = {"Stereo", "Mono (Left)", "Mono (Right)", "Digital Mono"}; static const char *wm8971_dac_phase[] = { "Non Inverted", "Inverted" }; static const char *wm8971_lline_mux[] = {"Line", "NC", "NC", "PGA", "Differential"}; static const char *wm8971_rline_mux[] = {"Line", "Mic", "NC", "PGA", "Differential"}; static const char *wm8971_lpga_sel[] = {"Line", "NC", "NC", "Differential"}; static const char *wm8971_rpga_sel[] = {"Line", "Mic", "NC", "Differential"}; static const char *wm8971_adcpol[] = {"Normal", "L Invert", "R Invert", "L + R Invert"}; static const struct soc_enum wm8971_enum[] = { SOC_ENUM_SINGLE(WM8971_BASS, 7, 2, wm8971_bass), /* 0 */ SOC_ENUM_SINGLE(WM8971_BASS, 6, 2, wm8971_bass_filter), SOC_ENUM_SINGLE(WM8971_TREBLE, 6, 2, wm8971_treble), SOC_ENUM_SINGLE(WM8971_ALC1, 7, 4, wm8971_alc_func), SOC_ENUM_SINGLE(WM8971_NGATE, 1, 2, wm8971_ng_type), /* 4 */ SOC_ENUM_SINGLE(WM8971_ADCDAC, 1, 4, wm8971_deemp), SOC_ENUM_SINGLE(WM8971_ADCTL1, 4, 4, wm8971_mono_mux), SOC_ENUM_SINGLE(WM8971_ADCTL1, 1, 2, wm8971_dac_phase), SOC_ENUM_SINGLE(WM8971_LOUTM1, 0, 5, wm8971_lline_mux), /* 8 */ SOC_ENUM_SINGLE(WM8971_ROUTM1, 0, 5, wm8971_rline_mux), SOC_ENUM_SINGLE(WM8971_LADCIN, 6, 4, wm8971_lpga_sel), SOC_ENUM_SINGLE(WM8971_RADCIN, 6, 4, wm8971_rpga_sel), SOC_ENUM_SINGLE(WM8971_ADCDAC, 5, 4, wm8971_adcpol), /* 12 */ SOC_ENUM_SINGLE(WM8971_ADCIN, 6, 4, wm8971_mono_mux), }; static const struct snd_kcontrol_new wm8971_snd_controls[] = { SOC_DOUBLE_R("Capture Volume", WM8971_LINVOL, WM8971_RINVOL, 0, 63, 0), SOC_DOUBLE_R("Capture ZC Switch", WM8971_LINVOL, WM8971_RINVOL, 6, 1, 0), SOC_DOUBLE_R("Capture Switch", WM8971_LINVOL, WM8971_RINVOL, 7, 1, 1), SOC_DOUBLE_R("Headphone Playback ZC Switch", WM8971_LOUT1V, WM8971_ROUT1V, 7, 1, 0), SOC_DOUBLE_R("Speaker Playback ZC Switch", WM8971_LOUT2V, WM8971_ROUT2V, 7, 1, 0), SOC_SINGLE("Mono Playback ZC Switch", WM8971_MOUTV, 7, 1, 0), SOC_DOUBLE_R("PCM Volume", WM8971_LDAC, WM8971_RDAC, 0, 255, 0), SOC_DOUBLE_R("Bypass Left Playback Volume", WM8971_LOUTM1, WM8971_LOUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Right Playback Volume", WM8971_ROUTM1, WM8971_ROUTM2, 4, 7, 1), SOC_DOUBLE_R("Bypass Mono Playback Volume", WM8971_MOUTM1, WM8971_MOUTM2, 4, 7, 1), SOC_DOUBLE_R("Headphone Playback Volume", WM8971_LOUT1V, WM8971_ROUT1V, 0, 127, 0), SOC_DOUBLE_R("Speaker Playback Volume", WM8971_LOUT2V, WM8971_ROUT2V, 0, 127, 0), SOC_ENUM("Bass Boost", wm8971_enum[0]), SOC_ENUM("Bass Filter", wm8971_enum[1]), SOC_SINGLE("Bass Volume", WM8971_BASS, 0, 7, 1), SOC_SINGLE("Treble Volume", WM8971_TREBLE, 0, 7, 0), SOC_ENUM("Treble Cut-off", wm8971_enum[2]), SOC_SINGLE("Capture Filter Switch", WM8971_ADCDAC, 0, 1, 1), SOC_SINGLE("ALC Target Volume", WM8971_ALC1, 0, 7, 0), SOC_SINGLE("ALC Max Volume", WM8971_ALC1, 4, 7, 0), SOC_SINGLE("ALC Capture Target Volume", WM8971_ALC1, 0, 7, 0), SOC_SINGLE("ALC Capture Max Volume", WM8971_ALC1, 4, 7, 0), SOC_ENUM("ALC Capture Function", wm8971_enum[3]), SOC_SINGLE("ALC Capture ZC Switch", WM8971_ALC2, 7, 1, 0), SOC_SINGLE("ALC Capture Hold Time", WM8971_ALC2, 0, 15, 0), SOC_SINGLE("ALC Capture Decay Time", WM8971_ALC3, 4, 15, 0), SOC_SINGLE("ALC Capture Attack Time", WM8971_ALC3, 0, 15, 0), SOC_SINGLE("ALC Capture NG Threshold", WM8971_NGATE, 3, 31, 0), SOC_ENUM("ALC Capture NG Type", wm8971_enum[4]), SOC_SINGLE("ALC Capture NG Switch", WM8971_NGATE, 0, 1, 0), SOC_SINGLE("Capture 6dB Attenuate", WM8971_ADCDAC, 8, 1, 0), SOC_SINGLE("Playback 6dB Attenuate", WM8971_ADCDAC, 7, 1, 0), SOC_ENUM("Playback De-emphasis", wm8971_enum[5]), SOC_ENUM("Playback Function", wm8971_enum[6]), SOC_ENUM("Playback Phase", wm8971_enum[7]), SOC_DOUBLE_R("Mic Boost", WM8971_LADCIN, WM8971_RADCIN, 4, 3, 0), }; /* * DAPM Controls */ /* Left Mixer */ static const struct snd_kcontrol_new wm8971_left_mixer_controls[] = { SOC_DAPM_SINGLE("Playback Switch", WM8971_LOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8971_LOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8971_LOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8971_LOUTM2, 7, 1, 0), }; /* Right Mixer */ static const struct snd_kcontrol_new wm8971_right_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8971_ROUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8971_ROUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Playback Switch", WM8971_ROUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8971_ROUTM2, 7, 1, 0), }; /* Mono Mixer */ static const struct snd_kcontrol_new wm8971_mono_mixer_controls[] = { SOC_DAPM_SINGLE("Left Playback Switch", WM8971_MOUTM1, 8, 1, 0), SOC_DAPM_SINGLE("Left Bypass Switch", WM8971_MOUTM1, 7, 1, 0), SOC_DAPM_SINGLE("Right Playback Switch", WM8971_MOUTM2, 8, 1, 0), SOC_DAPM_SINGLE("Right Bypass Switch", WM8971_MOUTM2, 7, 1, 0), }; /* Left Line Mux */ static const struct snd_kcontrol_new wm8971_left_line_controls = SOC_DAPM_ENUM("Route", wm8971_enum[8]); /* Right Line Mux */ static const struct snd_kcontrol_new wm8971_right_line_controls = SOC_DAPM_ENUM("Route", wm8971_enum[9]); /* Left PGA Mux */ static const struct snd_kcontrol_new wm8971_left_pga_controls = SOC_DAPM_ENUM("Route", wm8971_enum[10]); /* Right PGA Mux */ static const struct snd_kcontrol_new wm8971_right_pga_controls = SOC_DAPM_ENUM("Route", wm8971_enum[11]); /* Mono ADC Mux */ static const struct snd_kcontrol_new wm8971_monomux_controls = SOC_DAPM_ENUM("Route", wm8971_enum[13]); static const struct snd_soc_dapm_widget wm8971_dapm_widgets[] = { SND_SOC_DAPM_MIXER("Left Mixer", SND_SOC_NOPM, 0, 0, &wm8971_left_mixer_controls[0], ARRAY_SIZE(wm8971_left_mixer_controls)), SND_SOC_DAPM_MIXER("Right Mixer", SND_SOC_NOPM, 0, 0, &wm8971_right_mixer_controls[0], ARRAY_SIZE(wm8971_right_mixer_controls)), SND_SOC_DAPM_MIXER("Mono Mixer", WM8971_PWR2, 2, 0, &wm8971_mono_mixer_controls[0], ARRAY_SIZE(wm8971_mono_mixer_controls)), SND_SOC_DAPM_PGA("Right Out 2", WM8971_PWR2, 3, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 2", WM8971_PWR2, 4, 0, NULL, 0), SND_SOC_DAPM_PGA("Right Out 1", WM8971_PWR2, 5, 0, NULL, 0), SND_SOC_DAPM_PGA("Left Out 1", WM8971_PWR2, 6, 0, NULL, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8971_PWR2, 7, 0), SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8971_PWR2, 8, 0), SND_SOC_DAPM_PGA("Mono Out 1", WM8971_PWR2, 2, 0, NULL, 0), SND_SOC_DAPM_SUPPLY("Mic Bias", WM8971_PWR1, 1, 0, NULL, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8971_PWR1, 2, 0), SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8971_PWR1, 3, 0), SND_SOC_DAPM_MUX("Left PGA Mux", WM8971_PWR1, 5, 0, &wm8971_left_pga_controls), SND_SOC_DAPM_MUX("Right PGA Mux", WM8971_PWR1, 4, 0, &wm8971_right_pga_controls), SND_SOC_DAPM_MUX("Left Line Mux", SND_SOC_NOPM, 0, 0, &wm8971_left_line_controls), SND_SOC_DAPM_MUX("Right Line Mux", SND_SOC_NOPM, 0, 0, &wm8971_right_line_controls), SND_SOC_DAPM_MUX("Left ADC Mux", SND_SOC_NOPM, 0, 0, &wm8971_monomux_controls), SND_SOC_DAPM_MUX("Right ADC Mux", SND_SOC_NOPM, 0, 0, &wm8971_monomux_controls), SND_SOC_DAPM_OUTPUT("LOUT1"), SND_SOC_DAPM_OUTPUT("ROUT1"), SND_SOC_DAPM_OUTPUT("LOUT2"), SND_SOC_DAPM_OUTPUT("ROUT2"), SND_SOC_DAPM_OUTPUT("MONO"), SND_SOC_DAPM_INPUT("LINPUT1"), SND_SOC_DAPM_INPUT("RINPUT1"), SND_SOC_DAPM_INPUT("MIC"), }; static const struct snd_soc_dapm_route wm8971_dapm_routes[] = { /* left mixer */ {"Left Mixer", "Playback Switch", "Left DAC"}, {"Left Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Left Mixer", "Right Playback Switch", "Right DAC"}, {"Left Mixer", "Right Bypass Switch", "Right Line Mux"}, /* right mixer */ {"Right Mixer", "Left Playback Switch", "Left DAC"}, {"Right Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Right Mixer", "Playback Switch", "Right DAC"}, {"Right Mixer", "Right Bypass Switch", "Right Line Mux"}, /* left out 1 */ {"Left Out 1", NULL, "Left Mixer"}, {"LOUT1", NULL, "Left Out 1"}, /* left out 2 */ {"Left Out 2", NULL, "Left Mixer"}, {"LOUT2", NULL, "Left Out 2"}, /* right out 1 */ {"Right Out 1", NULL, "Right Mixer"}, {"ROUT1", NULL, "Right Out 1"}, /* right out 2 */ {"Right Out 2", NULL, "Right Mixer"}, {"ROUT2", NULL, "Right Out 2"}, /* mono mixer */ {"Mono Mixer", "Left Playback Switch", "Left DAC"}, {"Mono Mixer", "Left Bypass Switch", "Left Line Mux"}, {"Mono Mixer", "Right Playback Switch", "Right DAC"}, {"Mono Mixer", "Right Bypass Switch", "Right Line Mux"}, /* mono out */ {"Mono Out", NULL, "Mono Mixer"}, {"MONO1", NULL, "Mono Out"}, /* Left Line Mux */ {"Left Line Mux", "Line", "LINPUT1"}, {"Left Line Mux", "PGA", "Left PGA Mux"}, {"Left Line Mux", "Differential", "Differential Mux"}, /* Right Line Mux */ {"Right Line Mux", "Line", "RINPUT1"}, {"Right Line Mux", "Mic", "MIC"}, {"Right Line Mux", "PGA", "Right PGA Mux"}, {"Right Line Mux", "Differential", "Differential Mux"}, /* Left PGA Mux */ {"Left PGA Mux", "Line", "LINPUT1"}, {"Left PGA Mux", "Differential", "Differential Mux"}, /* Right PGA Mux */ {"Right PGA Mux", "Line", "RINPUT1"}, {"Right PGA Mux", "Differential", "Differential Mux"}, /* Differential Mux */ {"Differential Mux", "Line", "LINPUT1"}, {"Differential Mux", "Line", "RINPUT1"}, /* Left ADC Mux */ {"Left ADC Mux", "Stereo", "Left PGA Mux"}, {"Left ADC Mux", "Mono (Left)", "Left PGA Mux"}, {"Left ADC Mux", "Digital Mono", "Left PGA Mux"}, /* Right ADC Mux */ {"Right ADC Mux", "Stereo", "Right PGA Mux"}, {"Right ADC Mux", "Mono (Right)", "Right PGA Mux"}, {"Right ADC Mux", "Digital Mono", "Right PGA Mux"}, /* ADC */ {"Left ADC", NULL, "Left ADC Mux"}, {"Right ADC", NULL, "Right ADC Mux"}, }; struct _coeff_div { u32 mclk; u32 rate; u16 fs; u8 sr:5; u8 usb:1; }; /* codec hifi mclk clock divider coefficients */ static const struct _coeff_div coeff_div[] = { /* 8k */ {12288000, 8000, 1536, 0x6, 0x0}, {11289600, 8000, 1408, 0x16, 0x0}, {18432000, 8000, 2304, 0x7, 0x0}, {16934400, 8000, 2112, 0x17, 0x0}, {12000000, 8000, 1500, 0x6, 0x1}, /* 11.025k */ {11289600, 11025, 1024, 0x18, 0x0}, {16934400, 11025, 1536, 0x19, 0x0}, {12000000, 11025, 1088, 0x19, 0x1}, /* 16k */ {12288000, 16000, 768, 0xa, 0x0}, {18432000, 16000, 1152, 0xb, 0x0}, {12000000, 16000, 750, 0xa, 0x1}, /* 22.05k */ {11289600, 22050, 512, 0x1a, 0x0}, {16934400, 22050, 768, 0x1b, 0x0}, {12000000, 22050, 544, 0x1b, 0x1}, /* 32k */ {12288000, 32000, 384, 0xc, 0x0}, {18432000, 32000, 576, 0xd, 0x0}, {12000000, 32000, 375, 0xa, 0x1}, /* 44.1k */ {11289600, 44100, 256, 0x10, 0x0}, {16934400, 44100, 384, 0x11, 0x0}, {12000000, 44100, 272, 0x11, 0x1}, /* 48k */ {12288000, 48000, 256, 0x0, 0x0}, {18432000, 48000, 384, 0x1, 0x0}, {12000000, 48000, 250, 0x0, 0x1}, /* 88.2k */ {11289600, 88200, 128, 0x1e, 0x0}, {16934400, 88200, 192, 0x1f, 0x0}, {12000000, 88200, 136, 0x1f, 0x1}, /* 96k */ {12288000, 96000, 128, 0xe, 0x0}, {18432000, 96000, 192, 0xf, 0x0}, {12000000, 96000, 125, 0xe, 0x1}, }; static int get_coeff(int mclk, int rate) { int i; for (i = 0; i < ARRAY_SIZE(coeff_div); i++) { if (coeff_div[i].rate == rate && coeff_div[i].mclk == mclk) return i; } return -EINVAL; } static int wm8971_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec); switch (freq) { case 11289600: case 12000000: case 12288000: case 16934400: case 18432000: wm8971->sysclk = freq; return 0; } return -EINVAL; } static int wm8971_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 iface = 0; /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: iface = 0x0040; break; case SND_SOC_DAIFMT_CBS_CFS: break; default: return -EINVAL; } /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: iface |= 0x0002; break; case SND_SOC_DAIFMT_RIGHT_J: break; case SND_SOC_DAIFMT_LEFT_J: iface |= 0x0001; break; case SND_SOC_DAIFMT_DSP_A: iface |= 0x0003; break; case SND_SOC_DAIFMT_DSP_B: iface |= 0x0013; break; default: return -EINVAL; } /* clock inversion */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: break; case SND_SOC_DAIFMT_IB_IF: iface |= 0x0090; break; case SND_SOC_DAIFMT_IB_NF: iface |= 0x0080; break; case SND_SOC_DAIFMT_NB_IF: iface |= 0x0010; break; default: return -EINVAL; } snd_soc_write(codec, WM8971_IFACE, iface); return 0; } static int wm8971_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec); u16 iface = snd_soc_read(codec, WM8971_IFACE) & 0x1f3; u16 srate = snd_soc_read(codec, WM8971_SRATE) & 0x1c0; int coeff = get_coeff(wm8971->sysclk, params_rate(params)); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: iface |= 0x0004; break; case SNDRV_PCM_FORMAT_S24_LE: iface |= 0x0008; break; case SNDRV_PCM_FORMAT_S32_LE: iface |= 0x000c; break; } /* set iface & srate */ snd_soc_write(codec, WM8971_IFACE, iface); if (coeff >= 0) snd_soc_write(codec, WM8971_SRATE, srate | (coeff_div[coeff].sr << 1) | coeff_div[coeff].usb); return 0; } static int wm8971_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 mute_reg = snd_soc_read(codec, WM8971_ADCDAC) & 0xfff7; if (mute) snd_soc_write(codec, WM8971_ADCDAC, mute_reg | 0x8); else snd_soc_write(codec, WM8971_ADCDAC, mute_reg); return 0; } static int wm8971_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { u16 pwr_reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e; switch (level) { case SND_SOC_BIAS_ON: /* set vmid to 50k and unmute dac */ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x00c1); break; case SND_SOC_BIAS_PREPARE: break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) snd_soc_cache_sync(codec); /* mute dac and set vmid to 500k, enable VREF */ snd_soc_write(codec, WM8971_PWR1, pwr_reg | 0x0140); break; case SND_SOC_BIAS_OFF: snd_soc_write(codec, WM8971_PWR1, 0x0001); break; } codec->dapm.bias_level = level; return 0; } #define WM8971_RATES (SNDRV_PCM_RATE_8000 | SNDRV_PCM_RATE_11025 |\ SNDRV_PCM_RATE_16000 | SNDRV_PCM_RATE_22050 | SNDRV_PCM_RATE_44100 | \ SNDRV_PCM_RATE_48000 | SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define WM8971_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8971_dai_ops = { .hw_params = wm8971_pcm_hw_params, .digital_mute = wm8971_mute, .set_fmt = wm8971_set_dai_fmt, .set_sysclk = wm8971_set_dai_sysclk, }; static struct snd_soc_dai_driver wm8971_dai = { .name = "wm8971-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8971_RATES, .formats = WM8971_FORMATS,}, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8971_RATES, .formats = WM8971_FORMATS,}, .ops = &wm8971_dai_ops, }; static void wm8971_work(struct work_struct *work) { struct snd_soc_dapm_context *dapm = container_of(work, struct snd_soc_dapm_context, delayed_work.work); struct snd_soc_codec *codec = dapm->codec; wm8971_set_bias_level(codec, codec->dapm.bias_level); } static int wm8971_suspend(struct snd_soc_codec *codec) { wm8971_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8971_resume(struct snd_soc_codec *codec) { u16 reg; wm8971_set_bias_level(codec, SND_SOC_BIAS_STANDBY); /* charge wm8971 caps */ if (codec->dapm.suspend_bias_level == SND_SOC_BIAS_ON) { reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e; snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0); codec->dapm.bias_level = SND_SOC_BIAS_ON; queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work, msecs_to_jiffies(1000)); } return 0; } static int wm8971_probe(struct snd_soc_codec *codec) { struct wm8971_priv *wm8971 = snd_soc_codec_get_drvdata(codec); int ret = 0; u16 reg; ret = snd_soc_codec_set_cache_io(codec, 7, 9, wm8971->control_type); if (ret < 0) { printk(KERN_ERR "wm8971: failed to set cache I/O: %d\n", ret); return ret; } INIT_DELAYED_WORK(&codec->dapm.delayed_work, wm8971_work); wm8971_workq = create_workqueue("wm8971"); if (wm8971_workq == NULL) return -ENOMEM; wm8971_reset(codec); /* charge output caps - set vmid to 5k for quick power up */ reg = snd_soc_read(codec, WM8971_PWR1) & 0xfe3e; snd_soc_write(codec, WM8971_PWR1, reg | 0x01c0); codec->dapm.bias_level = SND_SOC_BIAS_STANDBY; queue_delayed_work(wm8971_workq, &codec->dapm.delayed_work, msecs_to_jiffies(1000)); /* set the update bits */ snd_soc_update_bits(codec, WM8971_LDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_RDAC, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_LOUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_ROUT1V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_LOUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_ROUT2V, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_LINVOL, 0x0100, 0x0100); snd_soc_update_bits(codec, WM8971_RINVOL, 0x0100, 0x0100); return ret; } /* power down chip */ static int wm8971_remove(struct snd_soc_codec *codec) { wm8971_set_bias_level(codec, SND_SOC_BIAS_OFF); if (wm8971_workq) destroy_workqueue(wm8971_workq); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8971 = { .probe = wm8971_probe, .remove = wm8971_remove, .suspend = wm8971_suspend, .resume = wm8971_resume, .set_bias_level = wm8971_set_bias_level, .reg_cache_size = ARRAY_SIZE(wm8971_reg), .reg_word_size = sizeof(u16), .reg_cache_default = wm8971_reg, .controls = wm8971_snd_controls, .num_controls = ARRAY_SIZE(wm8971_snd_controls), .dapm_widgets = wm8971_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8971_dapm_widgets), .dapm_routes = wm8971_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8971_dapm_routes), }; static __devinit int wm8971_i2c_probe(struct i2c_client *i2c, const struct i2c_device_id *id) { struct wm8971_priv *wm8971; int ret; wm8971 = devm_kzalloc(&i2c->dev, sizeof(struct wm8971_priv), GFP_KERNEL); if (wm8971 == NULL) return -ENOMEM; wm8971->control_type = SND_SOC_I2C; i2c_set_clientdata(i2c, wm8971); ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_wm8971, &wm8971_dai, 1); return ret; } static __devexit int wm8971_i2c_remove(struct i2c_client *client) { snd_soc_unregister_codec(&client->dev); return 0; } static const struct i2c_device_id wm8971_i2c_id[] = { { "wm8971", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, wm8971_i2c_id); static struct i2c_driver wm8971_i2c_driver = { .driver = { .name = "wm8971", .owner = THIS_MODULE, }, .probe = wm8971_i2c_probe, .remove = __devexit_p(wm8971_i2c_remove), .id_table = wm8971_i2c_id, }; static int __init wm8971_modinit(void) { int ret = 0; ret = i2c_add_driver(&wm8971_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register WM8971 I2C driver: %d\n", ret); } return ret; } module_init(wm8971_modinit); static void __exit wm8971_exit(void) { i2c_del_driver(&wm8971_i2c_driver); } module_exit(wm8971_exit); MODULE_DESCRIPTION("ASoC WM8971 driver"); MODULE_AUTHOR("Lab126"); MODULE_LICENSE("GPL");
gpl-2.0
rastomanchik/android_kernel_htc_primou_new
sound/arm/pxa2xx-pcm.c
4810
3191
/* * linux/sound/arm/pxa2xx-pcm.c -- ALSA PCM interface for the Intel PXA2xx chip * * Author: Nicolas Pitre * Created: Nov 30, 2004 * Copyright: (C) 2004 MontaVista Software, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <sound/core.h> #include <sound/pxa2xx-lib.h> #include "pxa2xx-pcm.h" static int pxa2xx_pcm_prepare(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; __pxa2xx_pcm_prepare(substream); return client->prepare(substream); } static int pxa2xx_pcm_open(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct snd_pcm_runtime *runtime = substream->runtime; struct pxa2xx_runtime_data *rtd; int ret; ret = __pxa2xx_pcm_open(substream); if (ret) goto out; rtd = runtime->private_data; rtd->params = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ? client->playback_params : client->capture_params; ret = pxa_request_dma(rtd->params->name, DMA_PRIO_LOW, pxa2xx_pcm_dma_irq, substream); if (ret < 0) goto err2; rtd->dma_ch = ret; ret = client->startup(substream); if (!ret) goto out; pxa_free_dma(rtd->dma_ch); err2: __pxa2xx_pcm_close(substream); out: return ret; } static int pxa2xx_pcm_close(struct snd_pcm_substream *substream) { struct pxa2xx_pcm_client *client = substream->private_data; struct pxa2xx_runtime_data *rtd = substream->runtime->private_data; pxa_free_dma(rtd->dma_ch); client->shutdown(substream); return __pxa2xx_pcm_close(substream); } static struct snd_pcm_ops pxa2xx_pcm_ops = { .open = pxa2xx_pcm_open, .close = pxa2xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = __pxa2xx_pcm_hw_params, .hw_free = __pxa2xx_pcm_hw_free, .prepare = pxa2xx_pcm_prepare, .trigger = pxa2xx_pcm_trigger, .pointer = pxa2xx_pcm_pointer, .mmap = pxa2xx_pcm_mmap, }; static u64 pxa2xx_pcm_dmamask = 0xffffffff; int pxa2xx_pcm_new(struct snd_card *card, struct pxa2xx_pcm_client *client, struct snd_pcm **rpcm) { struct snd_pcm *pcm; int play = client->playback_params ? 1 : 0; int capt = client->capture_params ? 1 : 0; int ret; ret = snd_pcm_new(card, "PXA2xx-PCM", 0, play, capt, &pcm); if (ret) goto out; pcm->private_data = client; pcm->private_free = pxa2xx_pcm_free_dma_buffers; if (!card->dev->dma_mask) card->dev->dma_mask = &pxa2xx_pcm_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = 0xffffffff; if (play) { int stream = SNDRV_PCM_STREAM_PLAYBACK; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (capt) { int stream = SNDRV_PCM_STREAM_CAPTURE; snd_pcm_set_ops(pcm, stream, &pxa2xx_pcm_ops); ret = pxa2xx_pcm_preallocate_dma_buffer(pcm, stream); if (ret) goto out; } if (rpcm) *rpcm = pcm; ret = 0; out: return ret; } EXPORT_SYMBOL(pxa2xx_pcm_new); MODULE_AUTHOR("Nicolas Pitre"); MODULE_DESCRIPTION("Intel PXA2xx PCM DMA module"); MODULE_LICENSE("GPL");
gpl-2.0
talnoah/m7-sense
arch/x86/mm/init_32.c
4810
25288
/* * * Copyright (C) 1995 Linus Torvalds * * Support of BIGMEM added by Gerhard Wichert, Siemens AG, July 1999 */ #include <linux/module.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/hugetlb.h> #include <linux/swap.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/pagemap.h> #include <linux/pci.h> #include <linux/pfn.h> #include <linux/poison.h> #include <linux/bootmem.h> #include <linux/memblock.h> #include <linux/proc_fs.h> #include <linux/memory_hotplug.h> #include <linux/initrd.h> #include <linux/cpumask.h> #include <linux/gfp.h> #include <asm/asm.h> #include <asm/bios_ebda.h> #include <asm/processor.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/dma.h> #include <asm/fixmap.h> #include <asm/e820.h> #include <asm/apic.h> #include <asm/bugs.h> #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/olpc_ofw.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/paravirt.h> #include <asm/setup.h> #include <asm/cacheflush.h> #include <asm/page_types.h> #include <asm/init.h> unsigned long highstart_pfn, highend_pfn; static noinline int do_test_wp_bit(void); bool __read_mostly __vmalloc_start_set = false; static __init void *alloc_low_page(void) { unsigned long pfn = pgt_buf_end++; void *adr; if (pfn >= pgt_buf_top) panic("alloc_low_page: ran out of memory"); adr = __va(pfn * PAGE_SIZE); clear_page(adr); return adr; } /* * Creates a middle page table and puts a pointer to it in the * given global directory entry. This only returns the gd entry * in non-PAE compilation mode, since the middle layer is folded. */ static pmd_t * __init one_md_table_init(pgd_t *pgd) { pud_t *pud; pmd_t *pmd_table; #ifdef CONFIG_X86_PAE if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { if (after_bootmem) pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); else pmd_table = (pmd_t *)alloc_low_page(); paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); pud = pud_offset(pgd, 0); BUG_ON(pmd_table != pmd_offset(pud, 0)); return pmd_table; } #endif pud = pud_offset(pgd, 0); pmd_table = pmd_offset(pud, 0); return pmd_table; } /* * Create a page table and place a pointer to it in a middle page * directory entry: */ static pte_t * __init one_page_table_init(pmd_t *pmd) { if (!(pmd_val(*pmd) & _PAGE_PRESENT)) { pte_t *page_table = NULL; if (after_bootmem) { #if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) page_table = (pte_t *) alloc_bootmem_pages(PAGE_SIZE); #endif if (!page_table) page_table = (pte_t *)alloc_bootmem_pages(PAGE_SIZE); } else page_table = (pte_t *)alloc_low_page(); paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); BUG_ON(page_table != pte_offset_kernel(pmd, 0)); } return pte_offset_kernel(pmd, 0); } pmd_t * __init populate_extra_pmd(unsigned long vaddr) { int pgd_idx = pgd_index(vaddr); int pmd_idx = pmd_index(vaddr); return one_md_table_init(swapper_pg_dir + pgd_idx) + pmd_idx; } pte_t * __init populate_extra_pte(unsigned long vaddr) { int pte_idx = pte_index(vaddr); pmd_t *pmd; pmd = populate_extra_pmd(vaddr); return one_page_table_init(pmd) + pte_idx; } static pte_t *__init page_table_kmap_check(pte_t *pte, pmd_t *pmd, unsigned long vaddr, pte_t *lastpte) { #ifdef CONFIG_HIGHMEM /* * Something (early fixmap) may already have put a pte * page here, which causes the page table allocation * to become nonlinear. Attempt to fix it, and if it * is still nonlinear then we have to bug. */ int pmd_idx_kmap_begin = fix_to_virt(FIX_KMAP_END) >> PMD_SHIFT; int pmd_idx_kmap_end = fix_to_virt(FIX_KMAP_BEGIN) >> PMD_SHIFT; if (pmd_idx_kmap_begin != pmd_idx_kmap_end && (vaddr >> PMD_SHIFT) >= pmd_idx_kmap_begin && (vaddr >> PMD_SHIFT) <= pmd_idx_kmap_end && ((__pa(pte) >> PAGE_SHIFT) < pgt_buf_start || (__pa(pte) >> PAGE_SHIFT) >= pgt_buf_end)) { pte_t *newpte; int i; BUG_ON(after_bootmem); newpte = alloc_low_page(); for (i = 0; i < PTRS_PER_PTE; i++) set_pte(newpte + i, pte[i]); paravirt_alloc_pte(&init_mm, __pa(newpte) >> PAGE_SHIFT); set_pmd(pmd, __pmd(__pa(newpte)|_PAGE_TABLE)); BUG_ON(newpte != pte_offset_kernel(pmd, 0)); __flush_tlb_all(); paravirt_release_pte(__pa(pte) >> PAGE_SHIFT); pte = newpte; } BUG_ON(vaddr < fix_to_virt(FIX_KMAP_BEGIN - 1) && vaddr > fix_to_virt(FIX_KMAP_END) && lastpte && lastpte + PTRS_PER_PTE != pte); #endif return pte; } /* * This function initializes a certain range of kernel virtual memory * with new bootmem page tables, everywhere page tables are missing in * the given range. * * NOTE: The pagetables are allocated contiguous on the physical space * so we can cache the place of the first one and move around without * checking the pgd every time. */ static void __init page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base) { int pgd_idx, pmd_idx; unsigned long vaddr; pgd_t *pgd; pmd_t *pmd; pte_t *pte = NULL; vaddr = start; pgd_idx = pgd_index(vaddr); pmd_idx = pmd_index(vaddr); pgd = pgd_base + pgd_idx; for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); pmd = pmd + pmd_index(vaddr); for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); pmd++, pmd_idx++) { pte = page_table_kmap_check(one_page_table_init(pmd), pmd, vaddr, pte); vaddr += PMD_SIZE; } pmd_idx = 0; } } static inline int is_kernel_text(unsigned long addr) { if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end) return 1; return 0; } /* * This maps the physical memory to kernel virtual address space, a total * of max_low_pfn pages, by creating page tables starting from address * PAGE_OFFSET: */ unsigned long __init kernel_physical_mapping_init(unsigned long start, unsigned long end, unsigned long page_size_mask) { int use_pse = page_size_mask == (1<<PG_LEVEL_2M); unsigned long last_map_addr = end; unsigned long start_pfn, end_pfn; pgd_t *pgd_base = swapper_pg_dir; int pgd_idx, pmd_idx, pte_ofs; unsigned long pfn; pgd_t *pgd; pmd_t *pmd; pte_t *pte; unsigned pages_2m, pages_4k; int mapping_iter; start_pfn = start >> PAGE_SHIFT; end_pfn = end >> PAGE_SHIFT; /* * First iteration will setup identity mapping using large/small pages * based on use_pse, with other attributes same as set by * the early code in head_32.S * * Second iteration will setup the appropriate attributes (NX, GLOBAL..) * as desired for the kernel identity mapping. * * This two pass mechanism conforms to the TLB app note which says: * * "Software should not write to a paging-structure entry in a way * that would change, for any linear address, both the page size * and either the page frame or attributes." */ mapping_iter = 1; if (!cpu_has_pse) use_pse = 0; repeat: pages_2m = pages_4k = 0; pfn = start_pfn; pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pgd = pgd_base + pgd_idx; for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { pmd = one_md_table_init(pgd); if (pfn >= end_pfn) continue; #ifdef CONFIG_X86_PAE pmd_idx = pmd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pmd += pmd_idx; #else pmd_idx = 0; #endif for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; pmd++, pmd_idx++) { unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; /* * Map with big pages if possible, otherwise * create normal page tables: */ if (use_pse) { unsigned int addr2; pgprot_t prot = PAGE_KERNEL_LARGE; /* * first pass will use the same initial * identity mapping attribute + _PAGE_PSE. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR | _PAGE_PSE); addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + PAGE_OFFSET + PAGE_SIZE-1; if (is_kernel_text(addr) || is_kernel_text(addr2)) prot = PAGE_KERNEL_LARGE_EXEC; pages_2m++; if (mapping_iter == 1) set_pmd(pmd, pfn_pmd(pfn, init_prot)); else set_pmd(pmd, pfn_pmd(pfn, prot)); pfn += PTRS_PER_PTE; continue; } pte = one_page_table_init(pmd); pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); pte += pte_ofs; for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { pgprot_t prot = PAGE_KERNEL; /* * first pass will use the same initial * identity mapping attribute. */ pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); if (is_kernel_text(addr)) prot = PAGE_KERNEL_EXEC; pages_4k++; if (mapping_iter == 1) { set_pte(pte, pfn_pte(pfn, init_prot)); last_map_addr = (pfn << PAGE_SHIFT) + PAGE_SIZE; } else set_pte(pte, pfn_pte(pfn, prot)); } } } if (mapping_iter == 1) { /* * update direct mapping page count only in the first * iteration. */ update_page_count(PG_LEVEL_2M, pages_2m); update_page_count(PG_LEVEL_4K, pages_4k); /* * local global flush tlb, which will flush the previous * mappings present in both small and large page TLB's. */ __flush_tlb_all(); /* * Second iteration will set the actual desired PTE attributes. */ mapping_iter = 2; goto repeat; } return last_map_addr; } pte_t *kmap_pte; pgprot_t kmap_prot; static inline pte_t *kmap_get_fixmap_pte(unsigned long vaddr) { return pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), vaddr), vaddr), vaddr); } static void __init kmap_init(void) { unsigned long kmap_vstart; /* * Cache the first kmap pte: */ kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); kmap_pte = kmap_get_fixmap_pte(kmap_vstart); kmap_prot = PAGE_KERNEL; } #ifdef CONFIG_HIGHMEM static void __init permanent_kmaps_init(pgd_t *pgd_base) { unsigned long vaddr; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; vaddr = PKMAP_BASE; page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base); pgd = swapper_pg_dir + pgd_index(vaddr); pud = pud_offset(pgd, vaddr); pmd = pmd_offset(pud, vaddr); pte = pte_offset_kernel(pmd, vaddr); pkmap_page_table = pte; } static void __init add_one_highpage_init(struct page *page) { ClearPageReserved(page); init_page_count(page); __free_page(page); totalhigh_pages++; } void __init add_highpages_with_active_regions(int nid, unsigned long start_pfn, unsigned long end_pfn) { phys_addr_t start, end; u64 i; for_each_free_mem_range(i, nid, &start, &end, NULL) { unsigned long pfn = clamp_t(unsigned long, PFN_UP(start), start_pfn, end_pfn); unsigned long e_pfn = clamp_t(unsigned long, PFN_DOWN(end), start_pfn, end_pfn); for ( ; pfn < e_pfn; pfn++) if (pfn_valid(pfn)) add_one_highpage_init(pfn_to_page(pfn)); } } #else static inline void permanent_kmaps_init(pgd_t *pgd_base) { } #endif /* CONFIG_HIGHMEM */ void __init native_pagetable_setup_start(pgd_t *base) { unsigned long pfn, va; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; /* * Remove any mappings which extend past the end of physical * memory from the boot time page table: */ for (pfn = max_low_pfn + 1; pfn < 1<<(32-PAGE_SHIFT); pfn++) { va = PAGE_OFFSET + (pfn<<PAGE_SHIFT); pgd = base + pgd_index(va); if (!pgd_present(*pgd)) break; pud = pud_offset(pgd, va); pmd = pmd_offset(pud, va); if (!pmd_present(*pmd)) break; pte = pte_offset_kernel(pmd, va); if (!pte_present(*pte)) break; pte_clear(NULL, va, pte); } paravirt_alloc_pmd(&init_mm, __pa(base) >> PAGE_SHIFT); } void __init native_pagetable_setup_done(pgd_t *base) { } /* * Build a proper pagetable for the kernel mappings. Up until this * point, we've been running on some set of pagetables constructed by * the boot process. * * If we're booting on native hardware, this will be a pagetable * constructed in arch/x86/kernel/head_32.S. The root of the * pagetable will be swapper_pg_dir. * * If we're booting paravirtualized under a hypervisor, then there are * more options: we may already be running PAE, and the pagetable may * or may not be based in swapper_pg_dir. In any case, * paravirt_pagetable_setup_start() will set up swapper_pg_dir * appropriately for the rest of the initialization to work. * * In general, pagetable_init() assumes that the pagetable may already * be partially populated, and so it avoids stomping on any existing * mappings. */ void __init early_ioremap_page_table_range_init(void) { pgd_t *pgd_base = swapper_pg_dir; unsigned long vaddr, end; /* * Fixed mappings, only the page table structure has to be * created - mappings will be set by set_fixmap(): */ vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; page_table_range_init(vaddr, end, pgd_base); early_ioremap_reset(); } static void __init pagetable_init(void) { pgd_t *pgd_base = swapper_pg_dir; permanent_kmaps_init(pgd_base); } pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); EXPORT_SYMBOL_GPL(__supported_pte_mask); /* user-defined highmem size */ static unsigned int highmem_pages = -1; /* * highmem=size forces highmem to be exactly 'size' bytes. * This works even on boxes that have no highmem otherwise. * This also works to reduce highmem size on bigger boxes. */ static int __init parse_highmem(char *arg) { if (!arg) return -EINVAL; highmem_pages = memparse(arg, &arg) >> PAGE_SHIFT; return 0; } early_param("highmem", parse_highmem); #define MSG_HIGHMEM_TOO_BIG \ "highmem size (%luMB) is bigger than pages available (%luMB)!\n" #define MSG_LOWMEM_TOO_SMALL \ "highmem size (%luMB) results in <64MB lowmem, ignoring it!\n" /* * All of RAM fits into lowmem - but if user wants highmem * artificially via the highmem=x boot parameter then create * it: */ void __init lowmem_pfn_init(void) { /* max_low_pfn is 0, we already have early_res support */ max_low_pfn = max_pfn; if (highmem_pages == -1) highmem_pages = 0; #ifdef CONFIG_HIGHMEM if (highmem_pages >= max_pfn) { printk(KERN_ERR MSG_HIGHMEM_TOO_BIG, pages_to_mb(highmem_pages), pages_to_mb(max_pfn)); highmem_pages = 0; } if (highmem_pages) { if (max_low_pfn - highmem_pages < 64*1024*1024/PAGE_SIZE) { printk(KERN_ERR MSG_LOWMEM_TOO_SMALL, pages_to_mb(highmem_pages)); highmem_pages = 0; } max_low_pfn -= highmem_pages; } #else if (highmem_pages) printk(KERN_ERR "ignoring highmem size on non-highmem kernel!\n"); #endif } #define MSG_HIGHMEM_TOO_SMALL \ "only %luMB highmem pages available, ignoring highmem size of %luMB!\n" #define MSG_HIGHMEM_TRIMMED \ "Warning: only 4GB will be used. Use a HIGHMEM64G enabled kernel!\n" /* * We have more RAM than fits into lowmem - we try to put it into * highmem, also taking the highmem=x boot parameter into account: */ void __init highmem_pfn_init(void) { max_low_pfn = MAXMEM_PFN; if (highmem_pages == -1) highmem_pages = max_pfn - MAXMEM_PFN; if (highmem_pages + MAXMEM_PFN < max_pfn) max_pfn = MAXMEM_PFN + highmem_pages; if (highmem_pages + MAXMEM_PFN > max_pfn) { printk(KERN_WARNING MSG_HIGHMEM_TOO_SMALL, pages_to_mb(max_pfn - MAXMEM_PFN), pages_to_mb(highmem_pages)); highmem_pages = 0; } #ifndef CONFIG_HIGHMEM /* Maximum memory usable is what is directly addressable */ printk(KERN_WARNING "Warning only %ldMB will be used.\n", MAXMEM>>20); if (max_pfn > MAX_NONPAE_PFN) printk(KERN_WARNING "Use a HIGHMEM64G enabled kernel.\n"); else printk(KERN_WARNING "Use a HIGHMEM enabled kernel.\n"); max_pfn = MAXMEM_PFN; #else /* !CONFIG_HIGHMEM */ #ifndef CONFIG_HIGHMEM64G if (max_pfn > MAX_NONPAE_PFN) { max_pfn = MAX_NONPAE_PFN; printk(KERN_WARNING MSG_HIGHMEM_TRIMMED); } #endif /* !CONFIG_HIGHMEM64G */ #endif /* !CONFIG_HIGHMEM */ } /* * Determine low and high memory ranges: */ void __init find_low_pfn_range(void) { /* it could update max_pfn */ if (max_pfn <= MAXMEM_PFN) lowmem_pfn_init(); else highmem_pfn_init(); } #ifndef CONFIG_NEED_MULTIPLE_NODES void __init initmem_init(void) { #ifdef CONFIG_HIGHMEM highstart_pfn = highend_pfn = max_pfn; if (max_pfn > max_low_pfn) highstart_pfn = max_low_pfn; printk(KERN_NOTICE "%ldMB HIGHMEM available.\n", pages_to_mb(highend_pfn - highstart_pfn)); num_physpages = highend_pfn; high_memory = (void *) __va(highstart_pfn * PAGE_SIZE - 1) + 1; #else num_physpages = max_low_pfn; high_memory = (void *) __va(max_low_pfn * PAGE_SIZE - 1) + 1; #endif memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0); sparse_memory_present_with_active_regions(0); #ifdef CONFIG_FLATMEM max_mapnr = num_physpages; #endif __vmalloc_start_set = true; printk(KERN_NOTICE "%ldMB LOWMEM available.\n", pages_to_mb(max_low_pfn)); setup_bootmem_allocator(); } #endif /* !CONFIG_NEED_MULTIPLE_NODES */ void __init setup_bootmem_allocator(void) { printk(KERN_INFO " mapped low ram: 0 - %08lx\n", max_pfn_mapped<<PAGE_SHIFT); printk(KERN_INFO " low ram: 0 - %08lx\n", max_low_pfn<<PAGE_SHIFT); after_bootmem = 1; } /* * paging_init() sets up the page tables - note that the first 8MB are * already mapped by head.S. * * This routines also unmaps the page at virtual kernel address 0, so * that we can trap those pesky NULL-reference errors in the kernel. */ void __init paging_init(void) { pagetable_init(); __flush_tlb_all(); kmap_init(); /* * NOTE: at this point the bootmem allocator is fully available. */ olpc_dt_build_devicetree(); sparse_memory_present_with_active_regions(MAX_NUMNODES); sparse_init(); zone_sizes_init(); } /* * Test if the WP bit works in supervisor mode. It isn't supported on 386's * and also on some strange 486's. All 586+'s are OK. This used to involve * black magic jumps to work around some nasty CPU bugs, but fortunately the * switch to using exceptions got rid of all that. */ static void __init test_wp_bit(void) { printk(KERN_INFO "Checking if this processor honours the WP bit even in supervisor mode..."); /* Any page-aligned address will do, the test is non-destructive */ __set_fixmap(FIX_WP_TEST, __pa(&swapper_pg_dir), PAGE_READONLY); boot_cpu_data.wp_works_ok = do_test_wp_bit(); clear_fixmap(FIX_WP_TEST); if (!boot_cpu_data.wp_works_ok) { printk(KERN_CONT "No.\n"); #ifdef CONFIG_X86_WP_WORKS_OK panic( "This kernel doesn't support CPU's with broken WP. Recompile it for a 386!"); #endif } else { printk(KERN_CONT "Ok.\n"); } } void __init mem_init(void) { int codesize, reservedpages, datasize, initsize; int tmp; pci_iommu_alloc(); #ifdef CONFIG_FLATMEM BUG_ON(!mem_map); #endif /* * With CONFIG_DEBUG_PAGEALLOC initialization of highmem pages has to * be done before free_all_bootmem(). Memblock use free low memory for * temporary data (see find_range_array()) and for this purpose can use * pages that was already passed to the buddy allocator, hence marked as * not accessible in the page tables when compiled with * CONFIG_DEBUG_PAGEALLOC. Otherwise order of initialization is not * important here. */ set_highmem_pages_init(); /* this will put all low memory onto the freelists */ totalram_pages += free_all_bootmem(); reservedpages = 0; for (tmp = 0; tmp < max_low_pfn; tmp++) /* * Only count reserved RAM pages: */ if (page_is_ram(tmp) && PageReserved(pfn_to_page(tmp))) reservedpages++; codesize = (unsigned long) &_etext - (unsigned long) &_text; datasize = (unsigned long) &_edata - (unsigned long) &_etext; initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " "%dk reserved, %dk data, %dk init, %ldk highmem)\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10), codesize >> 10, reservedpages << (PAGE_SHIFT-10), datasize >> 10, initsize >> 10, totalhigh_pages << (PAGE_SHIFT-10)); printk(KERN_INFO "virtual kernel memory layout:\n" " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #ifdef CONFIG_HIGHMEM " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" #endif " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" " lowmem : 0x%08lx - 0x%08lx (%4ld MB)\n" " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", FIXADDR_START, FIXADDR_TOP, (FIXADDR_TOP - FIXADDR_START) >> 10, #ifdef CONFIG_HIGHMEM PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, (LAST_PKMAP*PAGE_SIZE) >> 10, #endif VMALLOC_START, VMALLOC_END, (VMALLOC_END - VMALLOC_START) >> 20, (unsigned long)__va(0), (unsigned long)high_memory, ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20, (unsigned long)&__init_begin, (unsigned long)&__init_end, ((unsigned long)&__init_end - (unsigned long)&__init_begin) >> 10, (unsigned long)&_etext, (unsigned long)&_edata, ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, (unsigned long)&_text, (unsigned long)&_etext, ((unsigned long)&_etext - (unsigned long)&_text) >> 10); /* * Check boundaries twice: Some fundamental inconsistencies can * be detected at build time already. */ #define __FIXADDR_TOP (-PAGE_SIZE) #ifdef CONFIG_HIGHMEM BUILD_BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUILD_BUG_ON(VMALLOC_END > PKMAP_BASE); #endif #define high_memory (-128UL << 20) BUILD_BUG_ON(VMALLOC_START >= VMALLOC_END); #undef high_memory #undef __FIXADDR_TOP #ifdef CONFIG_HIGHMEM BUG_ON(PKMAP_BASE + LAST_PKMAP*PAGE_SIZE > FIXADDR_START); BUG_ON(VMALLOC_END > PKMAP_BASE); #endif BUG_ON(VMALLOC_START >= VMALLOC_END); BUG_ON((unsigned long)high_memory > VMALLOC_START); if (boot_cpu_data.wp_works_ok < 0) test_wp_bit(); } #ifdef CONFIG_MEMORY_HOTPLUG int arch_add_memory(int nid, u64 start, u64 size) { struct pglist_data *pgdata = NODE_DATA(nid); struct zone *zone = pgdata->node_zones + ZONE_HIGHMEM; unsigned long start_pfn = start >> PAGE_SHIFT; unsigned long nr_pages = size >> PAGE_SHIFT; return __add_pages(nid, zone, start_pfn, nr_pages); } #endif /* * This function cannot be __init, since exceptions don't work in that * section. Put this after the callers, so that it cannot be inlined. */ static noinline int do_test_wp_bit(void) { char tmp_reg; int flag; __asm__ __volatile__( " movb %0, %1 \n" "1: movb %1, %0 \n" " xorl %2, %2 \n" "2: \n" _ASM_EXTABLE(1b,2b) :"=m" (*(char *)fix_to_virt(FIX_WP_TEST)), "=q" (tmp_reg), "=r" (flag) :"2" (1) :"memory"); return flag; } #ifdef CONFIG_DEBUG_RODATA const int rodata_test_data = 0xC3; EXPORT_SYMBOL_GPL(rodata_test_data); int kernel_set_to_readonly __read_mostly; void set_kernel_text_rw(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read write\n", start, start+size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); } void set_kernel_text_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; if (!kernel_set_to_readonly) return; pr_debug("Set kernel text: %lx - %lx for read only\n", start, start+size); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); } static void mark_nxdata_nx(void) { /* * When this called, init has already been executed and released, * so everything past _etext should be NX. */ unsigned long start = PFN_ALIGN(_etext); /* * This comes from is_kernel_text upper limit. Also HPAGE where used: */ unsigned long size = (((unsigned long)__init_end + HPAGE_SIZE) & HPAGE_MASK) - start; if (__supported_pte_mask & _PAGE_NX) printk(KERN_INFO "NX-protecting the kernel data: %luk\n", size >> 10); set_pages_nx(virt_to_page(start), size >> PAGE_SHIFT); } void mark_rodata_ro(void) { unsigned long start = PFN_ALIGN(_text); unsigned long size = PFN_ALIGN(_etext) - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel text: %luk\n", size >> 10); kernel_set_to_readonly = 1; #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: Reverting %lx-%lx\n", start, start+size); set_pages_rw(virt_to_page(start), size>>PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size>>PAGE_SHIFT); #endif start += size; size = (unsigned long)__end_rodata - start; set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", size >> 10); rodata_test(); #ifdef CONFIG_CPA_DEBUG printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, start + size); set_pages_rw(virt_to_page(start), size >> PAGE_SHIFT); printk(KERN_INFO "Testing CPA: write protecting again\n"); set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); #endif mark_nxdata_nx(); } #endif
gpl-2.0
davidmueller13/flo_kernel
net/batman-adv/hard-interface.c
4810
17306
/* * Copyright (C) 2007-2012 B.A.T.M.A.N. contributors: * * Marek Lindner, Simon Wunderlich * * This program is free software; you can redistribute it and/or * modify it under the terms of version 2 of the GNU General Public * License as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA * */ #include "main.h" #include "hard-interface.h" #include "soft-interface.h" #include "send.h" #include "translation-table.h" #include "routing.h" #include "bat_sysfs.h" #include "originator.h" #include "hash.h" #include <linux/if_arp.h> static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev); void hardif_free_rcu(struct rcu_head *rcu) { struct hard_iface *hard_iface; hard_iface = container_of(rcu, struct hard_iface, rcu); dev_put(hard_iface->net_dev); kfree(hard_iface); } struct hard_iface *hardif_get_by_netdev(const struct net_device *net_dev) { struct hard_iface *hard_iface; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->net_dev == net_dev && atomic_inc_not_zero(&hard_iface->refcount)) goto out; } hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } static int is_valid_iface(const struct net_device *net_dev) { if (net_dev->flags & IFF_LOOPBACK) return 0; if (net_dev->type != ARPHRD_ETHER) return 0; if (net_dev->addr_len != ETH_ALEN) return 0; /* no batman over batman */ if (softif_is_valid(net_dev)) return 0; /* Device is being bridged */ /* if (net_dev->priv_flags & IFF_BRIDGE_PORT) return 0; */ return 1; } static struct hard_iface *hardif_get_active(const struct net_device *soft_iface) { struct hard_iface *hard_iface; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if (hard_iface->soft_iface != soft_iface) continue; if (hard_iface->if_status == IF_ACTIVE && atomic_inc_not_zero(&hard_iface->refcount)) goto out; } hard_iface = NULL; out: rcu_read_unlock(); return hard_iface; } static void primary_if_update_addr(struct bat_priv *bat_priv) { struct vis_packet *vis_packet; struct hard_iface *primary_if; primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto out; vis_packet = (struct vis_packet *) bat_priv->my_vis_info->skb_packet->data; memcpy(vis_packet->vis_orig, primary_if->net_dev->dev_addr, ETH_ALEN); memcpy(vis_packet->sender_orig, primary_if->net_dev->dev_addr, ETH_ALEN); out: if (primary_if) hardif_free_ref(primary_if); } static void primary_if_select(struct bat_priv *bat_priv, struct hard_iface *new_hard_iface) { struct hard_iface *curr_hard_iface; ASSERT_RTNL(); if (new_hard_iface && !atomic_inc_not_zero(&new_hard_iface->refcount)) new_hard_iface = NULL; curr_hard_iface = rcu_dereference_protected(bat_priv->primary_if, 1); rcu_assign_pointer(bat_priv->primary_if, new_hard_iface); if (curr_hard_iface) hardif_free_ref(curr_hard_iface); if (!new_hard_iface) return; bat_priv->bat_algo_ops->bat_ogm_init_primary(new_hard_iface); primary_if_update_addr(bat_priv); } static bool hardif_is_iface_up(const struct hard_iface *hard_iface) { if (hard_iface->net_dev->flags & IFF_UP) return true; return false; } static void check_known_mac_addr(const struct net_device *net_dev) { const struct hard_iface *hard_iface; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if ((hard_iface->if_status != IF_ACTIVE) && (hard_iface->if_status != IF_TO_BE_ACTIVATED)) continue; if (hard_iface->net_dev == net_dev) continue; if (!compare_eth(hard_iface->net_dev->dev_addr, net_dev->dev_addr)) continue; pr_warning("The newly added mac address (%pM) already exists on: %s\n", net_dev->dev_addr, hard_iface->net_dev->name); pr_warning("It is strongly recommended to keep mac addresses unique to avoid problems!\n"); } rcu_read_unlock(); } int hardif_min_mtu(struct net_device *soft_iface) { const struct bat_priv *bat_priv = netdev_priv(soft_iface); const struct hard_iface *hard_iface; /* allow big frames if all devices are capable to do so * (have MTU > 1500 + BAT_HEADER_LEN) */ int min_mtu = ETH_DATA_LEN; if (atomic_read(&bat_priv->fragmentation)) goto out; rcu_read_lock(); list_for_each_entry_rcu(hard_iface, &hardif_list, list) { if ((hard_iface->if_status != IF_ACTIVE) && (hard_iface->if_status != IF_TO_BE_ACTIVATED)) continue; if (hard_iface->soft_iface != soft_iface) continue; min_mtu = min_t(int, hard_iface->net_dev->mtu - BAT_HEADER_LEN, min_mtu); } rcu_read_unlock(); out: return min_mtu; } /* adjusts the MTU if a new interface with a smaller MTU appeared. */ void update_min_mtu(struct net_device *soft_iface) { int min_mtu; min_mtu = hardif_min_mtu(soft_iface); if (soft_iface->mtu != min_mtu) soft_iface->mtu = min_mtu; } static void hardif_activate_interface(struct hard_iface *hard_iface) { struct bat_priv *bat_priv; struct hard_iface *primary_if = NULL; if (hard_iface->if_status != IF_INACTIVE) goto out; bat_priv = netdev_priv(hard_iface->soft_iface); bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); hard_iface->if_status = IF_TO_BE_ACTIVATED; /** * the first active interface becomes our primary interface or * the next active interface after the old primary interface was removed */ primary_if = primary_if_get_selected(bat_priv); if (!primary_if) primary_if_select(bat_priv, hard_iface); bat_info(hard_iface->soft_iface, "Interface activated: %s\n", hard_iface->net_dev->name); update_min_mtu(hard_iface->soft_iface); out: if (primary_if) hardif_free_ref(primary_if); } static void hardif_deactivate_interface(struct hard_iface *hard_iface) { if ((hard_iface->if_status != IF_ACTIVE) && (hard_iface->if_status != IF_TO_BE_ACTIVATED)) return; hard_iface->if_status = IF_INACTIVE; bat_info(hard_iface->soft_iface, "Interface deactivated: %s\n", hard_iface->net_dev->name); update_min_mtu(hard_iface->soft_iface); } int hardif_enable_interface(struct hard_iface *hard_iface, const char *iface_name) { struct bat_priv *bat_priv; struct net_device *soft_iface; int ret; if (hard_iface->if_status != IF_NOT_IN_USE) goto out; if (!atomic_inc_not_zero(&hard_iface->refcount)) goto out; /* hard-interface is part of a bridge */ if (hard_iface->net_dev->priv_flags & IFF_BRIDGE_PORT) pr_err("You are about to enable batman-adv on '%s' which already is part of a bridge. Unless you know exactly what you are doing this is probably wrong and won't work the way you think it would.\n", hard_iface->net_dev->name); soft_iface = dev_get_by_name(&init_net, iface_name); if (!soft_iface) { soft_iface = softif_create(iface_name); if (!soft_iface) { ret = -ENOMEM; goto err; } /* dev_get_by_name() increases the reference counter for us */ dev_hold(soft_iface); } if (!softif_is_valid(soft_iface)) { pr_err("Can't create batman mesh interface %s: already exists as regular interface\n", soft_iface->name); dev_put(soft_iface); ret = -EINVAL; goto err; } hard_iface->soft_iface = soft_iface; bat_priv = netdev_priv(hard_iface->soft_iface); bat_priv->bat_algo_ops->bat_ogm_init(hard_iface); if (!hard_iface->packet_buff) { bat_err(hard_iface->soft_iface, "Can't add interface packet (%s): out of memory\n", hard_iface->net_dev->name); ret = -ENOMEM; goto err; } hard_iface->if_num = bat_priv->num_ifaces; bat_priv->num_ifaces++; hard_iface->if_status = IF_INACTIVE; orig_hash_add_if(hard_iface, bat_priv->num_ifaces); hard_iface->batman_adv_ptype.type = __constant_htons(ETH_P_BATMAN); hard_iface->batman_adv_ptype.func = batman_skb_recv; hard_iface->batman_adv_ptype.dev = hard_iface->net_dev; dev_add_pack(&hard_iface->batman_adv_ptype); atomic_set(&hard_iface->seqno, 1); atomic_set(&hard_iface->frag_seqno, 1); bat_info(hard_iface->soft_iface, "Adding interface: %s\n", hard_iface->net_dev->name); if (atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < ETH_DATA_LEN + BAT_HEADER_LEN) bat_info(hard_iface->soft_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. Packets going over this interface will be fragmented on layer2 which could impact the performance. Setting the MTU to %zi would solve the problem.\n", hard_iface->net_dev->name, hard_iface->net_dev->mtu, ETH_DATA_LEN + BAT_HEADER_LEN); if (!atomic_read(&bat_priv->fragmentation) && hard_iface->net_dev->mtu < ETH_DATA_LEN + BAT_HEADER_LEN) bat_info(hard_iface->soft_iface, "The MTU of interface %s is too small (%i) to handle the transport of batman-adv packets. If you experience problems getting traffic through try increasing the MTU to %zi.\n", hard_iface->net_dev->name, hard_iface->net_dev->mtu, ETH_DATA_LEN + BAT_HEADER_LEN); if (hardif_is_iface_up(hard_iface)) hardif_activate_interface(hard_iface); else bat_err(hard_iface->soft_iface, "Not using interface %s (retrying later): interface not active\n", hard_iface->net_dev->name); /* begin scheduling originator messages on that interface */ schedule_bat_ogm(hard_iface); out: return 0; err: hardif_free_ref(hard_iface); return ret; } void hardif_disable_interface(struct hard_iface *hard_iface) { struct bat_priv *bat_priv = netdev_priv(hard_iface->soft_iface); struct hard_iface *primary_if = NULL; if (hard_iface->if_status == IF_ACTIVE) hardif_deactivate_interface(hard_iface); if (hard_iface->if_status != IF_INACTIVE) goto out; bat_info(hard_iface->soft_iface, "Removing interface: %s\n", hard_iface->net_dev->name); dev_remove_pack(&hard_iface->batman_adv_ptype); bat_priv->num_ifaces--; orig_hash_del_if(hard_iface, bat_priv->num_ifaces); primary_if = primary_if_get_selected(bat_priv); if (hard_iface == primary_if) { struct hard_iface *new_if; new_if = hardif_get_active(hard_iface->soft_iface); primary_if_select(bat_priv, new_if); if (new_if) hardif_free_ref(new_if); } kfree(hard_iface->packet_buff); hard_iface->packet_buff = NULL; hard_iface->if_status = IF_NOT_IN_USE; /* delete all references to this hard_iface */ purge_orig_ref(bat_priv); purge_outstanding_packets(bat_priv, hard_iface); dev_put(hard_iface->soft_iface); /* nobody uses this interface anymore */ if (!bat_priv->num_ifaces) softif_destroy(hard_iface->soft_iface); hard_iface->soft_iface = NULL; hardif_free_ref(hard_iface); out: if (primary_if) hardif_free_ref(primary_if); } static struct hard_iface *hardif_add_interface(struct net_device *net_dev) { struct hard_iface *hard_iface; int ret; ASSERT_RTNL(); ret = is_valid_iface(net_dev); if (ret != 1) goto out; dev_hold(net_dev); hard_iface = kmalloc(sizeof(*hard_iface), GFP_ATOMIC); if (!hard_iface) goto release_dev; ret = sysfs_add_hardif(&hard_iface->hardif_obj, net_dev); if (ret) goto free_if; hard_iface->if_num = -1; hard_iface->net_dev = net_dev; hard_iface->soft_iface = NULL; hard_iface->if_status = IF_NOT_IN_USE; INIT_LIST_HEAD(&hard_iface->list); /* extra reference for return */ atomic_set(&hard_iface->refcount, 2); check_known_mac_addr(hard_iface->net_dev); list_add_tail_rcu(&hard_iface->list, &hardif_list); return hard_iface; free_if: kfree(hard_iface); release_dev: dev_put(net_dev); out: return NULL; } static void hardif_remove_interface(struct hard_iface *hard_iface) { ASSERT_RTNL(); /* first deactivate interface */ if (hard_iface->if_status != IF_NOT_IN_USE) hardif_disable_interface(hard_iface); if (hard_iface->if_status != IF_NOT_IN_USE) return; hard_iface->if_status = IF_TO_BE_REMOVED; sysfs_del_hardif(&hard_iface->hardif_obj); hardif_free_ref(hard_iface); } void hardif_remove_interfaces(void) { struct hard_iface *hard_iface, *hard_iface_tmp; rtnl_lock(); list_for_each_entry_safe(hard_iface, hard_iface_tmp, &hardif_list, list) { list_del_rcu(&hard_iface->list); hardif_remove_interface(hard_iface); } rtnl_unlock(); } static int hard_if_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *net_dev = ptr; struct hard_iface *hard_iface = hardif_get_by_netdev(net_dev); struct hard_iface *primary_if = NULL; struct bat_priv *bat_priv; if (!hard_iface && event == NETDEV_REGISTER) hard_iface = hardif_add_interface(net_dev); if (!hard_iface) goto out; switch (event) { case NETDEV_UP: hardif_activate_interface(hard_iface); break; case NETDEV_GOING_DOWN: case NETDEV_DOWN: hardif_deactivate_interface(hard_iface); break; case NETDEV_UNREGISTER: list_del_rcu(&hard_iface->list); hardif_remove_interface(hard_iface); break; case NETDEV_CHANGEMTU: if (hard_iface->soft_iface) update_min_mtu(hard_iface->soft_iface); break; case NETDEV_CHANGEADDR: if (hard_iface->if_status == IF_NOT_IN_USE) goto hardif_put; check_known_mac_addr(hard_iface->net_dev); bat_priv = netdev_priv(hard_iface->soft_iface); bat_priv->bat_algo_ops->bat_ogm_update_mac(hard_iface); primary_if = primary_if_get_selected(bat_priv); if (!primary_if) goto hardif_put; if (hard_iface == primary_if) primary_if_update_addr(bat_priv); break; default: break; } hardif_put: hardif_free_ref(hard_iface); out: if (primary_if) hardif_free_ref(primary_if); return NOTIFY_DONE; } /* incoming packets with the batman ethertype received on any active hard * interface */ static int batman_skb_recv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { struct bat_priv *bat_priv; struct batman_ogm_packet *batman_ogm_packet; struct hard_iface *hard_iface; int ret; hard_iface = container_of(ptype, struct hard_iface, batman_adv_ptype); skb = skb_share_check(skb, GFP_ATOMIC); /* skb was released by skb_share_check() */ if (!skb) goto err_out; /* packet should hold at least type and version */ if (unlikely(!pskb_may_pull(skb, 2))) goto err_free; /* expect a valid ethernet header here. */ if (unlikely(skb->mac_len != sizeof(struct ethhdr) || !skb_mac_header(skb))) goto err_free; if (!hard_iface->soft_iface) goto err_free; bat_priv = netdev_priv(hard_iface->soft_iface); if (atomic_read(&bat_priv->mesh_state) != MESH_ACTIVE) goto err_free; /* discard frames on not active interfaces */ if (hard_iface->if_status != IF_ACTIVE) goto err_free; batman_ogm_packet = (struct batman_ogm_packet *)skb->data; if (batman_ogm_packet->header.version != COMPAT_VERSION) { bat_dbg(DBG_BATMAN, bat_priv, "Drop packet: incompatible batman version (%i)\n", batman_ogm_packet->header.version); goto err_free; } /* all receive handlers return whether they received or reused * the supplied skb. if not, we have to free the skb. */ switch (batman_ogm_packet->header.packet_type) { /* batman originator packet */ case BAT_OGM: ret = recv_bat_ogm_packet(skb, hard_iface); break; /* batman icmp packet */ case BAT_ICMP: ret = recv_icmp_packet(skb, hard_iface); break; /* unicast packet */ case BAT_UNICAST: ret = recv_unicast_packet(skb, hard_iface); break; /* fragmented unicast packet */ case BAT_UNICAST_FRAG: ret = recv_ucast_frag_packet(skb, hard_iface); break; /* broadcast packet */ case BAT_BCAST: ret = recv_bcast_packet(skb, hard_iface); break; /* vis packet */ case BAT_VIS: ret = recv_vis_packet(skb, hard_iface); break; /* Translation table query (request or response) */ case BAT_TT_QUERY: ret = recv_tt_query(skb, hard_iface); break; /* Roaming advertisement */ case BAT_ROAM_ADV: ret = recv_roam_adv(skb, hard_iface); break; default: ret = NET_RX_DROP; } if (ret == NET_RX_DROP) kfree_skb(skb); /* return NET_RX_SUCCESS in any case as we * most probably dropped the packet for * routing-logical reasons. */ return NET_RX_SUCCESS; err_free: kfree_skb(skb); err_out: return NET_RX_DROP; } /* This function returns true if the interface represented by ifindex is a * 802.11 wireless device */ bool is_wifi_iface(int ifindex) { struct net_device *net_device = NULL; bool ret = false; if (ifindex == NULL_IFINDEX) goto out; net_device = dev_get_by_index(&init_net, ifindex); if (!net_device) goto out; #ifdef CONFIG_WIRELESS_EXT /* pre-cfg80211 drivers have to implement WEXT, so it is possible to * check for wireless_handlers != NULL */ if (net_device->wireless_handlers) ret = true; else #endif /* cfg80211 drivers have to set ieee80211_ptr */ if (net_device->ieee80211_ptr) ret = true; out: if (net_device) dev_put(net_device); return ret; } struct notifier_block hard_if_notifier = { .notifier_call = hard_if_event, };
gpl-2.0
gproj-m/lge-kernel-gproj
drivers/hid/usbhid/hid-pidff.c
8394
37000
/* * Force feedback driver for USB HID PID compliant devices * * Copyright (c) 2005, 2006 Anssi Hannula <anssi.hannula@gmail.com> */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* #define DEBUG */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/input.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/hid.h> #include "usbhid.h" #define PID_EFFECTS_MAX 64 /* Report usage table used to put reports into an array */ #define PID_SET_EFFECT 0 #define PID_EFFECT_OPERATION 1 #define PID_DEVICE_GAIN 2 #define PID_POOL 3 #define PID_BLOCK_LOAD 4 #define PID_BLOCK_FREE 5 #define PID_DEVICE_CONTROL 6 #define PID_CREATE_NEW_EFFECT 7 #define PID_REQUIRED_REPORTS 7 #define PID_SET_ENVELOPE 8 #define PID_SET_CONDITION 9 #define PID_SET_PERIODIC 10 #define PID_SET_CONSTANT 11 #define PID_SET_RAMP 12 static const u8 pidff_reports[] = { 0x21, 0x77, 0x7d, 0x7f, 0x89, 0x90, 0x96, 0xab, 0x5a, 0x5f, 0x6e, 0x73, 0x74 }; /* device_control is really 0x95, but 0x96 specified as it is the usage of the only field in that report */ /* Value usage tables used to put fields and values into arrays */ #define PID_EFFECT_BLOCK_INDEX 0 #define PID_DURATION 1 #define PID_GAIN 2 #define PID_TRIGGER_BUTTON 3 #define PID_TRIGGER_REPEAT_INT 4 #define PID_DIRECTION_ENABLE 5 #define PID_START_DELAY 6 static const u8 pidff_set_effect[] = { 0x22, 0x50, 0x52, 0x53, 0x54, 0x56, 0xa7 }; #define PID_ATTACK_LEVEL 1 #define PID_ATTACK_TIME 2 #define PID_FADE_LEVEL 3 #define PID_FADE_TIME 4 static const u8 pidff_set_envelope[] = { 0x22, 0x5b, 0x5c, 0x5d, 0x5e }; #define PID_PARAM_BLOCK_OFFSET 1 #define PID_CP_OFFSET 2 #define PID_POS_COEFFICIENT 3 #define PID_NEG_COEFFICIENT 4 #define PID_POS_SATURATION 5 #define PID_NEG_SATURATION 6 #define PID_DEAD_BAND 7 static const u8 pidff_set_condition[] = { 0x22, 0x23, 0x60, 0x61, 0x62, 0x63, 0x64, 0x65 }; #define PID_MAGNITUDE 1 #define PID_OFFSET 2 #define PID_PHASE 3 #define PID_PERIOD 4 static const u8 pidff_set_periodic[] = { 0x22, 0x70, 0x6f, 0x71, 0x72 }; static const u8 pidff_set_constant[] = { 0x22, 0x70 }; #define PID_RAMP_START 1 #define PID_RAMP_END 2 static const u8 pidff_set_ramp[] = { 0x22, 0x75, 0x76 }; #define PID_RAM_POOL_AVAILABLE 1 static const u8 pidff_block_load[] = { 0x22, 0xac }; #define PID_LOOP_COUNT 1 static const u8 pidff_effect_operation[] = { 0x22, 0x7c }; static const u8 pidff_block_free[] = { 0x22 }; #define PID_DEVICE_GAIN_FIELD 0 static const u8 pidff_device_gain[] = { 0x7e }; #define PID_RAM_POOL_SIZE 0 #define PID_SIMULTANEOUS_MAX 1 #define PID_DEVICE_MANAGED_POOL 2 static const u8 pidff_pool[] = { 0x80, 0x83, 0xa9 }; /* Special field key tables used to put special field keys into arrays */ #define PID_ENABLE_ACTUATORS 0 #define PID_RESET 1 static const u8 pidff_device_control[] = { 0x97, 0x9a }; #define PID_CONSTANT 0 #define PID_RAMP 1 #define PID_SQUARE 2 #define PID_SINE 3 #define PID_TRIANGLE 4 #define PID_SAW_UP 5 #define PID_SAW_DOWN 6 #define PID_SPRING 7 #define PID_DAMPER 8 #define PID_INERTIA 9 #define PID_FRICTION 10 static const u8 pidff_effect_types[] = { 0x26, 0x27, 0x30, 0x31, 0x32, 0x33, 0x34, 0x40, 0x41, 0x42, 0x43 }; #define PID_BLOCK_LOAD_SUCCESS 0 #define PID_BLOCK_LOAD_FULL 1 static const u8 pidff_block_load_status[] = { 0x8c, 0x8d }; #define PID_EFFECT_START 0 #define PID_EFFECT_STOP 1 static const u8 pidff_effect_operation_status[] = { 0x79, 0x7b }; struct pidff_usage { struct hid_field *field; s32 *value; }; struct pidff_device { struct hid_device *hid; struct hid_report *reports[sizeof(pidff_reports)]; struct pidff_usage set_effect[sizeof(pidff_set_effect)]; struct pidff_usage set_envelope[sizeof(pidff_set_envelope)]; struct pidff_usage set_condition[sizeof(pidff_set_condition)]; struct pidff_usage set_periodic[sizeof(pidff_set_periodic)]; struct pidff_usage set_constant[sizeof(pidff_set_constant)]; struct pidff_usage set_ramp[sizeof(pidff_set_ramp)]; struct pidff_usage device_gain[sizeof(pidff_device_gain)]; struct pidff_usage block_load[sizeof(pidff_block_load)]; struct pidff_usage pool[sizeof(pidff_pool)]; struct pidff_usage effect_operation[sizeof(pidff_effect_operation)]; struct pidff_usage block_free[sizeof(pidff_block_free)]; /* Special field is a field that is not composed of usage<->value pairs that pidff_usage values are */ /* Special field in create_new_effect */ struct hid_field *create_new_effect_type; /* Special fields in set_effect */ struct hid_field *set_effect_type; struct hid_field *effect_direction; /* Special field in device_control */ struct hid_field *device_control; /* Special field in block_load */ struct hid_field *block_load_status; /* Special field in effect_operation */ struct hid_field *effect_operation_status; int control_id[sizeof(pidff_device_control)]; int type_id[sizeof(pidff_effect_types)]; int status_id[sizeof(pidff_block_load_status)]; int operation_id[sizeof(pidff_effect_operation_status)]; int pid_id[PID_EFFECTS_MAX]; }; /* * Scale an unsigned value with range 0..max for the given field */ static int pidff_rescale(int i, int max, struct hid_field *field) { return i * (field->logical_maximum - field->logical_minimum) / max + field->logical_minimum; } /* * Scale a signed value in range -0x8000..0x7fff for the given field */ static int pidff_rescale_signed(int i, struct hid_field *field) { return i == 0 ? 0 : i > 0 ? i * field->logical_maximum / 0x7fff : i * field->logical_minimum / -0x8000; } static void pidff_set(struct pidff_usage *usage, u16 value) { usage->value[0] = pidff_rescale(value, 0xffff, usage->field); pr_debug("calculated from %d to %d\n", value, usage->value[0]); } static void pidff_set_signed(struct pidff_usage *usage, s16 value) { if (usage->field->logical_minimum < 0) usage->value[0] = pidff_rescale_signed(value, usage->field); else { if (value < 0) usage->value[0] = pidff_rescale(-value, 0x8000, usage->field); else usage->value[0] = pidff_rescale(value, 0x7fff, usage->field); } pr_debug("calculated from %d to %d\n", value, usage->value[0]); } /* * Send envelope report to the device */ static void pidff_set_envelope_report(struct pidff_device *pidff, struct ff_envelope *envelope) { pidff->set_envelope[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->set_envelope[PID_ATTACK_LEVEL].value[0] = pidff_rescale(envelope->attack_level > 0x7fff ? 0x7fff : envelope->attack_level, 0x7fff, pidff->set_envelope[PID_ATTACK_LEVEL].field); pidff->set_envelope[PID_FADE_LEVEL].value[0] = pidff_rescale(envelope->fade_level > 0x7fff ? 0x7fff : envelope->fade_level, 0x7fff, pidff->set_envelope[PID_FADE_LEVEL].field); pidff->set_envelope[PID_ATTACK_TIME].value[0] = envelope->attack_length; pidff->set_envelope[PID_FADE_TIME].value[0] = envelope->fade_length; hid_dbg(pidff->hid, "attack %u => %d\n", envelope->attack_level, pidff->set_envelope[PID_ATTACK_LEVEL].value[0]); usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_ENVELOPE], USB_DIR_OUT); } /* * Test if the new envelope differs from old one */ static int pidff_needs_set_envelope(struct ff_envelope *envelope, struct ff_envelope *old) { return envelope->attack_level != old->attack_level || envelope->fade_level != old->fade_level || envelope->attack_length != old->attack_length || envelope->fade_length != old->fade_length; } /* * Send constant force report to the device */ static void pidff_set_constant_force_report(struct pidff_device *pidff, struct ff_effect *effect) { pidff->set_constant[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff_set_signed(&pidff->set_constant[PID_MAGNITUDE], effect->u.constant.level); usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_CONSTANT], USB_DIR_OUT); } /* * Test if the constant parameters have changed between effects */ static int pidff_needs_set_constant(struct ff_effect *effect, struct ff_effect *old) { return effect->u.constant.level != old->u.constant.level; } /* * Send set effect report to the device */ static void pidff_set_effect_report(struct pidff_device *pidff, struct ff_effect *effect) { pidff->set_effect[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff->set_effect_type->value[0] = pidff->create_new_effect_type->value[0]; pidff->set_effect[PID_DURATION].value[0] = effect->replay.length; pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = effect->trigger.button; pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = effect->trigger.interval; pidff->set_effect[PID_GAIN].value[0] = pidff->set_effect[PID_GAIN].field->logical_maximum; pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1; pidff->effect_direction->value[0] = pidff_rescale(effect->direction, 0xffff, pidff->effect_direction); pidff->set_effect[PID_START_DELAY].value[0] = effect->replay.delay; usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_EFFECT], USB_DIR_OUT); } /* * Test if the values used in set_effect have changed */ static int pidff_needs_set_effect(struct ff_effect *effect, struct ff_effect *old) { return effect->replay.length != old->replay.length || effect->trigger.interval != old->trigger.interval || effect->trigger.button != old->trigger.button || effect->direction != old->direction || effect->replay.delay != old->replay.delay; } /* * Send periodic effect report to the device */ static void pidff_set_periodic_report(struct pidff_device *pidff, struct ff_effect *effect) { pidff->set_periodic[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff_set_signed(&pidff->set_periodic[PID_MAGNITUDE], effect->u.periodic.magnitude); pidff_set_signed(&pidff->set_periodic[PID_OFFSET], effect->u.periodic.offset); pidff_set(&pidff->set_periodic[PID_PHASE], effect->u.periodic.phase); pidff->set_periodic[PID_PERIOD].value[0] = effect->u.periodic.period; usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_PERIODIC], USB_DIR_OUT); } /* * Test if periodic effect parameters have changed */ static int pidff_needs_set_periodic(struct ff_effect *effect, struct ff_effect *old) { return effect->u.periodic.magnitude != old->u.periodic.magnitude || effect->u.periodic.offset != old->u.periodic.offset || effect->u.periodic.phase != old->u.periodic.phase || effect->u.periodic.period != old->u.periodic.period; } /* * Send condition effect reports to the device */ static void pidff_set_condition_report(struct pidff_device *pidff, struct ff_effect *effect) { int i; pidff->set_condition[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; for (i = 0; i < 2; i++) { pidff->set_condition[PID_PARAM_BLOCK_OFFSET].value[0] = i; pidff_set_signed(&pidff->set_condition[PID_CP_OFFSET], effect->u.condition[i].center); pidff_set_signed(&pidff->set_condition[PID_POS_COEFFICIENT], effect->u.condition[i].right_coeff); pidff_set_signed(&pidff->set_condition[PID_NEG_COEFFICIENT], effect->u.condition[i].left_coeff); pidff_set(&pidff->set_condition[PID_POS_SATURATION], effect->u.condition[i].right_saturation); pidff_set(&pidff->set_condition[PID_NEG_SATURATION], effect->u.condition[i].left_saturation); pidff_set(&pidff->set_condition[PID_DEAD_BAND], effect->u.condition[i].deadband); usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_CONDITION], USB_DIR_OUT); } } /* * Test if condition effect parameters have changed */ static int pidff_needs_set_condition(struct ff_effect *effect, struct ff_effect *old) { int i; int ret = 0; for (i = 0; i < 2; i++) { struct ff_condition_effect *cond = &effect->u.condition[i]; struct ff_condition_effect *old_cond = &old->u.condition[i]; ret |= cond->center != old_cond->center || cond->right_coeff != old_cond->right_coeff || cond->left_coeff != old_cond->left_coeff || cond->right_saturation != old_cond->right_saturation || cond->left_saturation != old_cond->left_saturation || cond->deadband != old_cond->deadband; } return ret; } /* * Send ramp force report to the device */ static void pidff_set_ramp_force_report(struct pidff_device *pidff, struct ff_effect *effect) { pidff->set_ramp[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; pidff_set_signed(&pidff->set_ramp[PID_RAMP_START], effect->u.ramp.start_level); pidff_set_signed(&pidff->set_ramp[PID_RAMP_END], effect->u.ramp.end_level); usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_RAMP], USB_DIR_OUT); } /* * Test if ramp force parameters have changed */ static int pidff_needs_set_ramp(struct ff_effect *effect, struct ff_effect *old) { return effect->u.ramp.start_level != old->u.ramp.start_level || effect->u.ramp.end_level != old->u.ramp.end_level; } /* * Send a request for effect upload to the device * * Returns 0 if device reported success, -ENOSPC if the device reported memory * is full. Upon unknown response the function will retry for 60 times, if * still unsuccessful -EIO is returned. */ static int pidff_request_effect_upload(struct pidff_device *pidff, int efnum) { int j; pidff->create_new_effect_type->value[0] = efnum; usbhid_submit_report(pidff->hid, pidff->reports[PID_CREATE_NEW_EFFECT], USB_DIR_OUT); hid_dbg(pidff->hid, "create_new_effect sent, type: %d\n", efnum); pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] = 0; pidff->block_load_status->value[0] = 0; usbhid_wait_io(pidff->hid); for (j = 0; j < 60; j++) { hid_dbg(pidff->hid, "pid_block_load requested\n"); usbhid_submit_report(pidff->hid, pidff->reports[PID_BLOCK_LOAD], USB_DIR_IN); usbhid_wait_io(pidff->hid); if (pidff->block_load_status->value[0] == pidff->status_id[PID_BLOCK_LOAD_SUCCESS]) { hid_dbg(pidff->hid, "device reported free memory: %d bytes\n", pidff->block_load[PID_RAM_POOL_AVAILABLE].value ? pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1); return 0; } if (pidff->block_load_status->value[0] == pidff->status_id[PID_BLOCK_LOAD_FULL]) { hid_dbg(pidff->hid, "not enough memory free: %d bytes\n", pidff->block_load[PID_RAM_POOL_AVAILABLE].value ? pidff->block_load[PID_RAM_POOL_AVAILABLE].value[0] : -1); return -ENOSPC; } } hid_err(pidff->hid, "pid_block_load failed 60 times\n"); return -EIO; } /* * Play the effect with PID id n times */ static void pidff_playback_pid(struct pidff_device *pidff, int pid_id, int n) { pidff->effect_operation[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id; if (n == 0) { pidff->effect_operation_status->value[0] = pidff->operation_id[PID_EFFECT_STOP]; } else { pidff->effect_operation_status->value[0] = pidff->operation_id[PID_EFFECT_START]; pidff->effect_operation[PID_LOOP_COUNT].value[0] = n; } usbhid_submit_report(pidff->hid, pidff->reports[PID_EFFECT_OPERATION], USB_DIR_OUT); } /** * Play the effect with effect id @effect_id for @value times */ static int pidff_playback(struct input_dev *dev, int effect_id, int value) { struct pidff_device *pidff = dev->ff->private; pidff_playback_pid(pidff, pidff->pid_id[effect_id], value); return 0; } /* * Erase effect with PID id */ static void pidff_erase_pid(struct pidff_device *pidff, int pid_id) { pidff->block_free[PID_EFFECT_BLOCK_INDEX].value[0] = pid_id; usbhid_submit_report(pidff->hid, pidff->reports[PID_BLOCK_FREE], USB_DIR_OUT); } /* * Stop and erase effect with effect_id */ static int pidff_erase_effect(struct input_dev *dev, int effect_id) { struct pidff_device *pidff = dev->ff->private; int pid_id = pidff->pid_id[effect_id]; hid_dbg(pidff->hid, "starting to erase %d/%d\n", effect_id, pidff->pid_id[effect_id]); /* Wait for the queue to clear. We do not want a full fifo to prevent the effect removal. */ usbhid_wait_io(pidff->hid); pidff_playback_pid(pidff, pid_id, 0); pidff_erase_pid(pidff, pid_id); return 0; } /* * Effect upload handler */ static int pidff_upload_effect(struct input_dev *dev, struct ff_effect *effect, struct ff_effect *old) { struct pidff_device *pidff = dev->ff->private; int type_id; int error; switch (effect->type) { case FF_CONSTANT: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_CONSTANT]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_constant(effect, old)) pidff_set_constant_force_report(pidff, effect); if (!old || pidff_needs_set_envelope(&effect->u.constant.envelope, &old->u.constant.envelope)) pidff_set_envelope_report(pidff, &effect->u.constant.envelope); break; case FF_PERIODIC: if (!old) { switch (effect->u.periodic.waveform) { case FF_SQUARE: type_id = PID_SQUARE; break; case FF_TRIANGLE: type_id = PID_TRIANGLE; break; case FF_SINE: type_id = PID_SINE; break; case FF_SAW_UP: type_id = PID_SAW_UP; break; case FF_SAW_DOWN: type_id = PID_SAW_DOWN; break; default: hid_err(pidff->hid, "invalid waveform\n"); return -EINVAL; } error = pidff_request_effect_upload(pidff, pidff->type_id[type_id]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_periodic(effect, old)) pidff_set_periodic_report(pidff, effect); if (!old || pidff_needs_set_envelope(&effect->u.periodic.envelope, &old->u.periodic.envelope)) pidff_set_envelope_report(pidff, &effect->u.periodic.envelope); break; case FF_RAMP: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_RAMP]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_ramp(effect, old)) pidff_set_ramp_force_report(pidff, effect); if (!old || pidff_needs_set_envelope(&effect->u.ramp.envelope, &old->u.ramp.envelope)) pidff_set_envelope_report(pidff, &effect->u.ramp.envelope); break; case FF_SPRING: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_SPRING]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_condition(effect, old)) pidff_set_condition_report(pidff, effect); break; case FF_FRICTION: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_FRICTION]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_condition(effect, old)) pidff_set_condition_report(pidff, effect); break; case FF_DAMPER: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_DAMPER]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_condition(effect, old)) pidff_set_condition_report(pidff, effect); break; case FF_INERTIA: if (!old) { error = pidff_request_effect_upload(pidff, pidff->type_id[PID_INERTIA]); if (error) return error; } if (!old || pidff_needs_set_effect(effect, old)) pidff_set_effect_report(pidff, effect); if (!old || pidff_needs_set_condition(effect, old)) pidff_set_condition_report(pidff, effect); break; default: hid_err(pidff->hid, "invalid type\n"); return -EINVAL; } if (!old) pidff->pid_id[effect->id] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]; hid_dbg(pidff->hid, "uploaded\n"); return 0; } /* * set_gain() handler */ static void pidff_set_gain(struct input_dev *dev, u16 gain) { struct pidff_device *pidff = dev->ff->private; pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], gain); usbhid_submit_report(pidff->hid, pidff->reports[PID_DEVICE_GAIN], USB_DIR_OUT); } static void pidff_autocenter(struct pidff_device *pidff, u16 magnitude) { struct hid_field *field = pidff->block_load[PID_EFFECT_BLOCK_INDEX].field; if (!magnitude) { pidff_playback_pid(pidff, field->logical_minimum, 0); return; } pidff_playback_pid(pidff, field->logical_minimum, 1); pidff->set_effect[PID_EFFECT_BLOCK_INDEX].value[0] = pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum; pidff->set_effect_type->value[0] = pidff->type_id[PID_SPRING]; pidff->set_effect[PID_DURATION].value[0] = 0; pidff->set_effect[PID_TRIGGER_BUTTON].value[0] = 0; pidff->set_effect[PID_TRIGGER_REPEAT_INT].value[0] = 0; pidff_set(&pidff->set_effect[PID_GAIN], magnitude); pidff->set_effect[PID_DIRECTION_ENABLE].value[0] = 1; pidff->set_effect[PID_START_DELAY].value[0] = 0; usbhid_submit_report(pidff->hid, pidff->reports[PID_SET_EFFECT], USB_DIR_OUT); } /* * pidff_set_autocenter() handler */ static void pidff_set_autocenter(struct input_dev *dev, u16 magnitude) { struct pidff_device *pidff = dev->ff->private; pidff_autocenter(pidff, magnitude); } /* * Find fields from a report and fill a pidff_usage */ static int pidff_find_fields(struct pidff_usage *usage, const u8 *table, struct hid_report *report, int count, int strict) { int i, j, k, found; for (k = 0; k < count; k++) { found = 0; for (i = 0; i < report->maxfield; i++) { if (report->field[i]->maxusage != report->field[i]->report_count) { pr_debug("maxusage and report_count do not match, skipping\n"); continue; } for (j = 0; j < report->field[i]->maxusage; j++) { if (report->field[i]->usage[j].hid == (HID_UP_PID | table[k])) { pr_debug("found %d at %d->%d\n", k, i, j); usage[k].field = report->field[i]; usage[k].value = &report->field[i]->value[j]; found = 1; break; } } if (found) break; } if (!found && strict) { pr_debug("failed to locate %d\n", k); return -1; } } return 0; } /* * Return index into pidff_reports for the given usage */ static int pidff_check_usage(int usage) { int i; for (i = 0; i < sizeof(pidff_reports); i++) if (usage == (HID_UP_PID | pidff_reports[i])) return i; return -1; } /* * Find the reports and fill pidff->reports[] * report_type specifies either OUTPUT or FEATURE reports */ static void pidff_find_reports(struct hid_device *hid, int report_type, struct pidff_device *pidff) { struct hid_report *report; int i, ret; list_for_each_entry(report, &hid->report_enum[report_type].report_list, list) { if (report->maxfield < 1) continue; ret = pidff_check_usage(report->field[0]->logical); if (ret != -1) { hid_dbg(hid, "found usage 0x%02x from field->logical\n", pidff_reports[ret]); pidff->reports[ret] = report; continue; } /* * Sometimes logical collections are stacked to indicate * different usages for the report and the field, in which * case we want the usage of the parent. However, Linux HID * implementation hides this fact, so we have to dig it up * ourselves */ i = report->field[0]->usage[0].collection_index; if (i <= 0 || hid->collection[i - 1].type != HID_COLLECTION_LOGICAL) continue; ret = pidff_check_usage(hid->collection[i - 1].usage); if (ret != -1 && !pidff->reports[ret]) { hid_dbg(hid, "found usage 0x%02x from collection array\n", pidff_reports[ret]); pidff->reports[ret] = report; } } } /* * Test if the required reports have been found */ static int pidff_reports_ok(struct pidff_device *pidff) { int i; for (i = 0; i <= PID_REQUIRED_REPORTS; i++) { if (!pidff->reports[i]) { hid_dbg(pidff->hid, "%d missing\n", i); return 0; } } return 1; } /* * Find a field with a specific usage within a report */ static struct hid_field *pidff_find_special_field(struct hid_report *report, int usage, int enforce_min) { int i; for (i = 0; i < report->maxfield; i++) { if (report->field[i]->logical == (HID_UP_PID | usage) && report->field[i]->report_count > 0) { if (!enforce_min || report->field[i]->logical_minimum == 1) return report->field[i]; else { pr_err("logical_minimum is not 1 as it should be\n"); return NULL; } } } return NULL; } /* * Fill a pidff->*_id struct table */ static int pidff_find_special_keys(int *keys, struct hid_field *fld, const u8 *usagetable, int count) { int i, j; int found = 0; for (i = 0; i < count; i++) { for (j = 0; j < fld->maxusage; j++) { if (fld->usage[j].hid == (HID_UP_PID | usagetable[i])) { keys[i] = j + 1; found++; break; } } } return found; } #define PIDFF_FIND_SPECIAL_KEYS(keys, field, name) \ pidff_find_special_keys(pidff->keys, pidff->field, pidff_ ## name, \ sizeof(pidff_ ## name)) /* * Find and check the special fields */ static int pidff_find_special_fields(struct pidff_device *pidff) { hid_dbg(pidff->hid, "finding special fields\n"); pidff->create_new_effect_type = pidff_find_special_field(pidff->reports[PID_CREATE_NEW_EFFECT], 0x25, 1); pidff->set_effect_type = pidff_find_special_field(pidff->reports[PID_SET_EFFECT], 0x25, 1); pidff->effect_direction = pidff_find_special_field(pidff->reports[PID_SET_EFFECT], 0x57, 0); pidff->device_control = pidff_find_special_field(pidff->reports[PID_DEVICE_CONTROL], 0x96, 1); pidff->block_load_status = pidff_find_special_field(pidff->reports[PID_BLOCK_LOAD], 0x8b, 1); pidff->effect_operation_status = pidff_find_special_field(pidff->reports[PID_EFFECT_OPERATION], 0x78, 1); hid_dbg(pidff->hid, "search done\n"); if (!pidff->create_new_effect_type || !pidff->set_effect_type) { hid_err(pidff->hid, "effect lists not found\n"); return -1; } if (!pidff->effect_direction) { hid_err(pidff->hid, "direction field not found\n"); return -1; } if (!pidff->device_control) { hid_err(pidff->hid, "device control field not found\n"); return -1; } if (!pidff->block_load_status) { hid_err(pidff->hid, "block load status field not found\n"); return -1; } if (!pidff->effect_operation_status) { hid_err(pidff->hid, "effect operation field not found\n"); return -1; } pidff_find_special_keys(pidff->control_id, pidff->device_control, pidff_device_control, sizeof(pidff_device_control)); PIDFF_FIND_SPECIAL_KEYS(control_id, device_control, device_control); if (!PIDFF_FIND_SPECIAL_KEYS(type_id, create_new_effect_type, effect_types)) { hid_err(pidff->hid, "no effect types found\n"); return -1; } if (PIDFF_FIND_SPECIAL_KEYS(status_id, block_load_status, block_load_status) != sizeof(pidff_block_load_status)) { hid_err(pidff->hid, "block load status identifiers not found\n"); return -1; } if (PIDFF_FIND_SPECIAL_KEYS(operation_id, effect_operation_status, effect_operation_status) != sizeof(pidff_effect_operation_status)) { hid_err(pidff->hid, "effect operation identifiers not found\n"); return -1; } return 0; } /** * Find the implemented effect types */ static int pidff_find_effects(struct pidff_device *pidff, struct input_dev *dev) { int i; for (i = 0; i < sizeof(pidff_effect_types); i++) { int pidff_type = pidff->type_id[i]; if (pidff->set_effect_type->usage[pidff_type].hid != pidff->create_new_effect_type->usage[pidff_type].hid) { hid_err(pidff->hid, "effect type number %d is invalid\n", i); return -1; } } if (pidff->type_id[PID_CONSTANT]) set_bit(FF_CONSTANT, dev->ffbit); if (pidff->type_id[PID_RAMP]) set_bit(FF_RAMP, dev->ffbit); if (pidff->type_id[PID_SQUARE]) { set_bit(FF_SQUARE, dev->ffbit); set_bit(FF_PERIODIC, dev->ffbit); } if (pidff->type_id[PID_SINE]) { set_bit(FF_SINE, dev->ffbit); set_bit(FF_PERIODIC, dev->ffbit); } if (pidff->type_id[PID_TRIANGLE]) { set_bit(FF_TRIANGLE, dev->ffbit); set_bit(FF_PERIODIC, dev->ffbit); } if (pidff->type_id[PID_SAW_UP]) { set_bit(FF_SAW_UP, dev->ffbit); set_bit(FF_PERIODIC, dev->ffbit); } if (pidff->type_id[PID_SAW_DOWN]) { set_bit(FF_SAW_DOWN, dev->ffbit); set_bit(FF_PERIODIC, dev->ffbit); } if (pidff->type_id[PID_SPRING]) set_bit(FF_SPRING, dev->ffbit); if (pidff->type_id[PID_DAMPER]) set_bit(FF_DAMPER, dev->ffbit); if (pidff->type_id[PID_INERTIA]) set_bit(FF_INERTIA, dev->ffbit); if (pidff->type_id[PID_FRICTION]) set_bit(FF_FRICTION, dev->ffbit); return 0; } #define PIDFF_FIND_FIELDS(name, report, strict) \ pidff_find_fields(pidff->name, pidff_ ## name, \ pidff->reports[report], \ sizeof(pidff_ ## name), strict) /* * Fill and check the pidff_usages */ static int pidff_init_fields(struct pidff_device *pidff, struct input_dev *dev) { int envelope_ok = 0; if (PIDFF_FIND_FIELDS(set_effect, PID_SET_EFFECT, 1)) { hid_err(pidff->hid, "unknown set_effect report layout\n"); return -ENODEV; } PIDFF_FIND_FIELDS(block_load, PID_BLOCK_LOAD, 0); if (!pidff->block_load[PID_EFFECT_BLOCK_INDEX].value) { hid_err(pidff->hid, "unknown pid_block_load report layout\n"); return -ENODEV; } if (PIDFF_FIND_FIELDS(effect_operation, PID_EFFECT_OPERATION, 1)) { hid_err(pidff->hid, "unknown effect_operation report layout\n"); return -ENODEV; } if (PIDFF_FIND_FIELDS(block_free, PID_BLOCK_FREE, 1)) { hid_err(pidff->hid, "unknown pid_block_free report layout\n"); return -ENODEV; } if (!PIDFF_FIND_FIELDS(set_envelope, PID_SET_ENVELOPE, 1)) envelope_ok = 1; if (pidff_find_special_fields(pidff) || pidff_find_effects(pidff, dev)) return -ENODEV; if (!envelope_ok) { if (test_and_clear_bit(FF_CONSTANT, dev->ffbit)) hid_warn(pidff->hid, "has constant effect but no envelope\n"); if (test_and_clear_bit(FF_RAMP, dev->ffbit)) hid_warn(pidff->hid, "has ramp effect but no envelope\n"); if (test_and_clear_bit(FF_PERIODIC, dev->ffbit)) hid_warn(pidff->hid, "has periodic effect but no envelope\n"); } if (test_bit(FF_CONSTANT, dev->ffbit) && PIDFF_FIND_FIELDS(set_constant, PID_SET_CONSTANT, 1)) { hid_warn(pidff->hid, "unknown constant effect layout\n"); clear_bit(FF_CONSTANT, dev->ffbit); } if (test_bit(FF_RAMP, dev->ffbit) && PIDFF_FIND_FIELDS(set_ramp, PID_SET_RAMP, 1)) { hid_warn(pidff->hid, "unknown ramp effect layout\n"); clear_bit(FF_RAMP, dev->ffbit); } if ((test_bit(FF_SPRING, dev->ffbit) || test_bit(FF_DAMPER, dev->ffbit) || test_bit(FF_FRICTION, dev->ffbit) || test_bit(FF_INERTIA, dev->ffbit)) && PIDFF_FIND_FIELDS(set_condition, PID_SET_CONDITION, 1)) { hid_warn(pidff->hid, "unknown condition effect layout\n"); clear_bit(FF_SPRING, dev->ffbit); clear_bit(FF_DAMPER, dev->ffbit); clear_bit(FF_FRICTION, dev->ffbit); clear_bit(FF_INERTIA, dev->ffbit); } if (test_bit(FF_PERIODIC, dev->ffbit) && PIDFF_FIND_FIELDS(set_periodic, PID_SET_PERIODIC, 1)) { hid_warn(pidff->hid, "unknown periodic effect layout\n"); clear_bit(FF_PERIODIC, dev->ffbit); } PIDFF_FIND_FIELDS(pool, PID_POOL, 0); if (!PIDFF_FIND_FIELDS(device_gain, PID_DEVICE_GAIN, 1)) set_bit(FF_GAIN, dev->ffbit); return 0; } /* * Reset the device */ static void pidff_reset(struct pidff_device *pidff) { struct hid_device *hid = pidff->hid; int i = 0; pidff->device_control->value[0] = pidff->control_id[PID_RESET]; /* We reset twice as sometimes hid_wait_io isn't waiting long enough */ usbhid_submit_report(hid, pidff->reports[PID_DEVICE_CONTROL], USB_DIR_OUT); usbhid_wait_io(hid); usbhid_submit_report(hid, pidff->reports[PID_DEVICE_CONTROL], USB_DIR_OUT); usbhid_wait_io(hid); pidff->device_control->value[0] = pidff->control_id[PID_ENABLE_ACTUATORS]; usbhid_submit_report(hid, pidff->reports[PID_DEVICE_CONTROL], USB_DIR_OUT); usbhid_wait_io(hid); /* pool report is sometimes messed up, refetch it */ usbhid_submit_report(hid, pidff->reports[PID_POOL], USB_DIR_IN); usbhid_wait_io(hid); if (pidff->pool[PID_SIMULTANEOUS_MAX].value) { while (pidff->pool[PID_SIMULTANEOUS_MAX].value[0] < 2) { if (i++ > 20) { hid_warn(pidff->hid, "device reports %d simultaneous effects\n", pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); break; } hid_dbg(pidff->hid, "pid_pool requested again\n"); usbhid_submit_report(hid, pidff->reports[PID_POOL], USB_DIR_IN); usbhid_wait_io(hid); } } } /* * Test if autocenter modification is using the supported method */ static int pidff_check_autocenter(struct pidff_device *pidff, struct input_dev *dev) { int error; /* * Let's find out if autocenter modification is supported * Specification doesn't specify anything, so we request an * effect upload and cancel it immediately. If the approved * effect id was one above the minimum, then we assume the first * effect id is a built-in spring type effect used for autocenter */ error = pidff_request_effect_upload(pidff, 1); if (error) { hid_err(pidff->hid, "upload request failed\n"); return error; } if (pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0] == pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1) { pidff_autocenter(pidff, 0xffff); set_bit(FF_AUTOCENTER, dev->ffbit); } else { hid_notice(pidff->hid, "device has unknown autocenter control method\n"); } pidff_erase_pid(pidff, pidff->block_load[PID_EFFECT_BLOCK_INDEX].value[0]); return 0; } /* * Check if the device is PID and initialize it */ int hid_pidff_init(struct hid_device *hid) { struct pidff_device *pidff; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); struct input_dev *dev = hidinput->input; struct ff_device *ff; int max_effects; int error; hid_dbg(hid, "starting pid init\n"); if (list_empty(&hid->report_enum[HID_OUTPUT_REPORT].report_list)) { hid_dbg(hid, "not a PID device, no output report\n"); return -ENODEV; } pidff = kzalloc(sizeof(*pidff), GFP_KERNEL); if (!pidff) return -ENOMEM; pidff->hid = hid; pidff_find_reports(hid, HID_OUTPUT_REPORT, pidff); pidff_find_reports(hid, HID_FEATURE_REPORT, pidff); if (!pidff_reports_ok(pidff)) { hid_dbg(hid, "reports not ok, aborting\n"); error = -ENODEV; goto fail; } error = pidff_init_fields(pidff, dev); if (error) goto fail; pidff_reset(pidff); if (test_bit(FF_GAIN, dev->ffbit)) { pidff_set(&pidff->device_gain[PID_DEVICE_GAIN_FIELD], 0xffff); usbhid_submit_report(hid, pidff->reports[PID_DEVICE_GAIN], USB_DIR_OUT); } error = pidff_check_autocenter(pidff, dev); if (error) goto fail; max_effects = pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_maximum - pidff->block_load[PID_EFFECT_BLOCK_INDEX].field->logical_minimum + 1; hid_dbg(hid, "max effects is %d\n", max_effects); if (max_effects > PID_EFFECTS_MAX) max_effects = PID_EFFECTS_MAX; if (pidff->pool[PID_SIMULTANEOUS_MAX].value) hid_dbg(hid, "max simultaneous effects is %d\n", pidff->pool[PID_SIMULTANEOUS_MAX].value[0]); if (pidff->pool[PID_RAM_POOL_SIZE].value) hid_dbg(hid, "device memory size is %d bytes\n", pidff->pool[PID_RAM_POOL_SIZE].value[0]); if (pidff->pool[PID_DEVICE_MANAGED_POOL].value && pidff->pool[PID_DEVICE_MANAGED_POOL].value[0] == 0) { hid_notice(hid, "device does not support device managed pool\n"); goto fail; } error = input_ff_create(dev, max_effects); if (error) goto fail; ff = dev->ff; ff->private = pidff; ff->upload = pidff_upload_effect; ff->erase = pidff_erase_effect; ff->set_gain = pidff_set_gain; ff->set_autocenter = pidff_set_autocenter; ff->playback = pidff_playback; hid_info(dev, "Force feedback for USB HID PID devices by Anssi Hannula <anssi.hannula@gmail.com>\n"); return 0; fail: kfree(pidff); return error; }
gpl-2.0
chrisw957/gumstix-linux
drivers/media/radio/radio-trust.c
9674
6025
/* radio-trust.c - Trust FM Radio card driver for Linux 2.2 * by Eric Lammerts <eric@scintilla.utwente.nl> * * Based on radio-aztech.c. Original notes: * * Adapted to support the Video for Linux API by * Russell Kroll <rkroll@exploits.org>. Based on original tuner code by: * * Quay Ly * Donald Song * Jason Lewis (jlewis@twilight.vtc.vsc.edu) * Scott McGrath (smcgrath@twilight.vtc.vsc.edu) * William McGrath (wmcgrath@twilight.vtc.vsc.edu) * * Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org> */ #include <stdarg.h> #include <linux/module.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/videodev2.h> #include <linux/io.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include "radio-isa.h" MODULE_AUTHOR("Eric Lammerts, Russell Kroll, Quay Lu, Donald Song, Jason Lewis, Scott McGrath, William McGrath"); MODULE_DESCRIPTION("A driver for the Trust FM Radio card."); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1.99"); /* acceptable ports: 0x350 (JP3 shorted), 0x358 (JP3 open) */ #ifndef CONFIG_RADIO_TRUST_PORT #define CONFIG_RADIO_TRUST_PORT -1 #endif #define TRUST_MAX 2 static int io[TRUST_MAX] = { [0] = CONFIG_RADIO_TRUST_PORT, [1 ... (TRUST_MAX - 1)] = -1 }; static int radio_nr[TRUST_MAX] = { [0 ... (TRUST_MAX - 1)] = -1 }; module_param_array(io, int, NULL, 0444); MODULE_PARM_DESC(io, "I/O addresses of the Trust FM Radio card (0x350 or 0x358)"); module_param_array(radio_nr, int, NULL, 0444); MODULE_PARM_DESC(radio_nr, "Radio device numbers"); struct trust { struct radio_isa_card isa; int ioval; }; static struct radio_isa_card *trust_alloc(void) { struct trust *tr = kzalloc(sizeof(*tr), GFP_KERNEL); return tr ? &tr->isa : NULL; } /* i2c addresses */ #define TDA7318_ADDR 0x88 #define TSA6060T_ADDR 0xc4 #define TR_DELAY do { inb(tr->isa.io); inb(tr->isa.io); inb(tr->isa.io); } while (0) #define TR_SET_SCL outb(tr->ioval |= 2, tr->isa.io) #define TR_CLR_SCL outb(tr->ioval &= 0xfd, tr->isa.io) #define TR_SET_SDA outb(tr->ioval |= 1, tr->isa.io) #define TR_CLR_SDA outb(tr->ioval &= 0xfe, tr->isa.io) static void write_i2c(struct trust *tr, int n, ...) { unsigned char val, mask; va_list args; va_start(args, n); /* start condition */ TR_SET_SDA; TR_SET_SCL; TR_DELAY; TR_CLR_SDA; TR_CLR_SCL; TR_DELAY; for (; n; n--) { val = va_arg(args, unsigned); for (mask = 0x80; mask; mask >>= 1) { if (val & mask) TR_SET_SDA; else TR_CLR_SDA; TR_SET_SCL; TR_DELAY; TR_CLR_SCL; TR_DELAY; } /* acknowledge bit */ TR_SET_SDA; TR_SET_SCL; TR_DELAY; TR_CLR_SCL; TR_DELAY; } /* stop condition */ TR_CLR_SDA; TR_DELAY; TR_SET_SCL; TR_DELAY; TR_SET_SDA; TR_DELAY; va_end(args); } static int trust_s_mute_volume(struct radio_isa_card *isa, bool mute, int vol) { struct trust *tr = container_of(isa, struct trust, isa); tr->ioval = (tr->ioval & 0xf7) | (mute << 3); outb(tr->ioval, isa->io); write_i2c(tr, 2, TDA7318_ADDR, vol ^ 0x1f); return 0; } static int trust_s_stereo(struct radio_isa_card *isa, bool stereo) { struct trust *tr = container_of(isa, struct trust, isa); tr->ioval = (tr->ioval & 0xfb) | (!stereo << 2); outb(tr->ioval, isa->io); return 0; } static u32 trust_g_signal(struct radio_isa_card *isa) { int i, v; for (i = 0, v = 0; i < 100; i++) v |= inb(isa->io); return (v & 1) ? 0 : 0xffff; } static int trust_s_frequency(struct radio_isa_card *isa, u32 freq) { struct trust *tr = container_of(isa, struct trust, isa); freq /= 160; /* Convert to 10 kHz units */ freq += 1070; /* Add 10.7 MHz IF */ write_i2c(tr, 5, TSA6060T_ADDR, (freq << 1) | 1, freq >> 7, 0x60 | ((freq >> 15) & 1), 0); return 0; } static int basstreble2chip[15] = { 0, 1, 2, 3, 4, 5, 6, 7, 14, 13, 12, 11, 10, 9, 8 }; static int trust_s_ctrl(struct v4l2_ctrl *ctrl) { struct radio_isa_card *isa = container_of(ctrl->handler, struct radio_isa_card, hdl); struct trust *tr = container_of(isa, struct trust, isa); switch (ctrl->id) { case V4L2_CID_AUDIO_BASS: write_i2c(tr, 2, TDA7318_ADDR, 0x60 | basstreble2chip[ctrl->val]); return 0; case V4L2_CID_AUDIO_TREBLE: write_i2c(tr, 2, TDA7318_ADDR, 0x70 | basstreble2chip[ctrl->val]); return 0; } return -EINVAL; } static const struct v4l2_ctrl_ops trust_ctrl_ops = { .s_ctrl = trust_s_ctrl, }; static int trust_initialize(struct radio_isa_card *isa) { struct trust *tr = container_of(isa, struct trust, isa); tr->ioval = 0xf; write_i2c(tr, 2, TDA7318_ADDR, 0x80); /* speaker att. LF = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0xa0); /* speaker att. RF = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0xc0); /* speaker att. LR = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0xe0); /* speaker att. RR = 0 dB */ write_i2c(tr, 2, TDA7318_ADDR, 0x40); /* stereo 1 input, gain = 18.75 dB */ v4l2_ctrl_new_std(&isa->hdl, &trust_ctrl_ops, V4L2_CID_AUDIO_BASS, 0, 15, 1, 8); v4l2_ctrl_new_std(&isa->hdl, &trust_ctrl_ops, V4L2_CID_AUDIO_TREBLE, 0, 15, 1, 8); return isa->hdl.error; } static const struct radio_isa_ops trust_ops = { .init = trust_initialize, .alloc = trust_alloc, .s_mute_volume = trust_s_mute_volume, .s_frequency = trust_s_frequency, .s_stereo = trust_s_stereo, .g_signal = trust_g_signal, }; static const int trust_ioports[] = { 0x350, 0x358 }; static struct radio_isa_driver trust_driver = { .driver = { .match = radio_isa_match, .probe = radio_isa_probe, .remove = radio_isa_remove, .driver = { .name = "radio-trust", }, }, .io_params = io, .radio_nr_params = radio_nr, .io_ports = trust_ioports, .num_of_io_ports = ARRAY_SIZE(trust_ioports), .region_size = 2, .card = "Trust FM Radio", .ops = &trust_ops, .has_stereo = true, .max_volume = 31, }; static int __init trust_init(void) { return isa_register_driver(&trust_driver.driver, TRUST_MAX); } static void __exit trust_exit(void) { isa_unregister_driver(&trust_driver.driver); } module_init(trust_init); module_exit(trust_exit);
gpl-2.0
surdupetru/android_kernel_huawei_msm8916
drivers/video/fb_notify.c
9930
1238
/* * linux/drivers/video/fb_notify.c * * Copyright (C) 2006 Antonino Daplas <adaplas@pol.net> * * 2001 - Documented with DocBook * - Brad Douglas <brad@neruo.com> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/fb.h> #include <linux/notifier.h> #include <linux/export.h> static BLOCKING_NOTIFIER_HEAD(fb_notifier_list); /** * fb_register_client - register a client notifier * @nb: notifier block to callback on events */ int fb_register_client(struct notifier_block *nb) { return blocking_notifier_chain_register(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_register_client); /** * fb_unregister_client - unregister a client notifier * @nb: notifier block to callback on events */ int fb_unregister_client(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&fb_notifier_list, nb); } EXPORT_SYMBOL(fb_unregister_client); /** * fb_notifier_call_chain - notify clients of fb_events * */ int fb_notifier_call_chain(unsigned long val, void *v) { return blocking_notifier_call_chain(&fb_notifier_list, val, v); } EXPORT_SYMBOL_GPL(fb_notifier_call_chain);
gpl-2.0
GustavoRD78/fw_z3
crypto/hmac.c
11466
7022
/* * Cryptographic API. * * HMAC: Keyed-Hashing for Message Authentication (RFC2104). * * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * The HMAC implementation is derived from USAGI. * Copyright (c) 2002 Kazunori Miyazawa <miyazawa@linux-ipv6.org> / USAGI * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <crypto/internal/hash.h> #include <crypto/scatterwalk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/string.h> struct hmac_ctx { struct crypto_shash *hash; }; static inline void *align_ptr(void *p, unsigned int align) { return (void *)ALIGN((unsigned long)p, align); } static inline struct hmac_ctx *hmac_ctx(struct crypto_shash *tfm) { return align_ptr(crypto_shash_ctx_aligned(tfm) + crypto_shash_statesize(tfm) * 2, crypto_tfm_ctx_alignment()); } static int hmac_setkey(struct crypto_shash *parent, const u8 *inkey, unsigned int keylen) { int bs = crypto_shash_blocksize(parent); int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *ipad = crypto_shash_ctx_aligned(parent); char *opad = ipad + ss; struct hmac_ctx *ctx = align_ptr(opad + ss, crypto_tfm_ctx_alignment()); struct crypto_shash *hash = ctx->hash; struct { struct shash_desc shash; char ctx[crypto_shash_descsize(hash)]; } desc; unsigned int i; desc.shash.tfm = hash; desc.shash.flags = crypto_shash_get_flags(parent) & CRYPTO_TFM_REQ_MAY_SLEEP; if (keylen > bs) { int err; err = crypto_shash_digest(&desc.shash, inkey, keylen, ipad); if (err) return err; keylen = ds; } else memcpy(ipad, inkey, keylen); memset(ipad + keylen, 0, bs - keylen); memcpy(opad, ipad, bs); for (i = 0; i < bs; i++) { ipad[i] ^= 0x36; opad[i] ^= 0x5c; } return crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, ipad, bs) ?: crypto_shash_export(&desc.shash, ipad) ?: crypto_shash_init(&desc.shash) ?: crypto_shash_update(&desc.shash, opad, bs) ?: crypto_shash_export(&desc.shash, opad); } static int hmac_export(struct shash_desc *pdesc, void *out) { struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_export(desc, out); } static int hmac_import(struct shash_desc *pdesc, const void *in) { struct shash_desc *desc = shash_desc_ctx(pdesc); struct hmac_ctx *ctx = hmac_ctx(pdesc->tfm); desc->tfm = ctx->hash; desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_import(desc, in); } static int hmac_init(struct shash_desc *pdesc) { return hmac_import(pdesc, crypto_shash_ctx_aligned(pdesc->tfm)); } static int hmac_update(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes) { struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_update(desc, data, nbytes); } static int hmac_final(struct shash_desc *pdesc, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_final(desc, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_finup(struct shash_desc *pdesc, const u8 *data, unsigned int nbytes, u8 *out) { struct crypto_shash *parent = pdesc->tfm; int ds = crypto_shash_digestsize(parent); int ss = crypto_shash_statesize(parent); char *opad = crypto_shash_ctx_aligned(parent) + ss; struct shash_desc *desc = shash_desc_ctx(pdesc); desc->flags = pdesc->flags & CRYPTO_TFM_REQ_MAY_SLEEP; return crypto_shash_finup(desc, data, nbytes, out) ?: crypto_shash_import(desc, opad) ?: crypto_shash_finup(desc, out, ds, out); } static int hmac_init_tfm(struct crypto_tfm *tfm) { struct crypto_shash *parent = __crypto_shash_cast(tfm); struct crypto_shash *hash; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_shash_spawn *spawn = crypto_instance_ctx(inst); struct hmac_ctx *ctx = hmac_ctx(parent); hash = crypto_spawn_shash(spawn); if (IS_ERR(hash)) return PTR_ERR(hash); parent->descsize = sizeof(struct shash_desc) + crypto_shash_descsize(hash); ctx->hash = hash; return 0; } static void hmac_exit_tfm(struct crypto_tfm *tfm) { struct hmac_ctx *ctx = hmac_ctx(__crypto_shash_cast(tfm)); crypto_free_shash(ctx->hash); } static int hmac_create(struct crypto_template *tmpl, struct rtattr **tb) { struct shash_instance *inst; struct crypto_alg *alg; struct shash_alg *salg; int err; int ds; int ss; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_SHASH); if (err) return err; salg = shash_attr_alg(tb[1], 0, 0); if (IS_ERR(salg)) return PTR_ERR(salg); err = -EINVAL; ds = salg->digestsize; ss = salg->statesize; alg = &salg->base; if (ds > alg->cra_blocksize || ss < alg->cra_blocksize) goto out_put_alg; inst = shash_alloc_instance("hmac", alg); err = PTR_ERR(inst); if (IS_ERR(inst)) goto out_put_alg; err = crypto_init_shash_spawn(shash_instance_ctx(inst), salg, shash_crypto_instance(inst)); if (err) goto out_free_inst; inst->alg.base.cra_priority = alg->cra_priority; inst->alg.base.cra_blocksize = alg->cra_blocksize; inst->alg.base.cra_alignmask = alg->cra_alignmask; ss = ALIGN(ss, alg->cra_alignmask + 1); inst->alg.digestsize = ds; inst->alg.statesize = ss; inst->alg.base.cra_ctxsize = sizeof(struct hmac_ctx) + ALIGN(ss * 2, crypto_tfm_ctx_alignment()); inst->alg.base.cra_init = hmac_init_tfm; inst->alg.base.cra_exit = hmac_exit_tfm; inst->alg.init = hmac_init; inst->alg.update = hmac_update; inst->alg.final = hmac_final; inst->alg.finup = hmac_finup; inst->alg.export = hmac_export; inst->alg.import = hmac_import; inst->alg.setkey = hmac_setkey; err = shash_register_instance(tmpl, inst); if (err) { out_free_inst: shash_free_instance(shash_crypto_instance(inst)); } out_put_alg: crypto_mod_put(alg); return err; } static struct crypto_template hmac_tmpl = { .name = "hmac", .create = hmac_create, .free = shash_free_instance, .module = THIS_MODULE, }; static int __init hmac_module_init(void) { return crypto_register_template(&hmac_tmpl); } static void __exit hmac_module_exit(void) { crypto_unregister_template(&hmac_tmpl); } module_init(hmac_module_init); module_exit(hmac_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("HMAC hash algorithm");
gpl-2.0
pluma320/kernel-moto-g
drivers/media/video/zoran/zr36050.c
13002
27029
/* * Zoran ZR36050 basic configuration functions * * Copyright (C) 2001 Wolfgang Scherr <scherr@net4you.at> * * $Id: zr36050.c,v 1.1.2.11 2003/08/03 14:54:53 rbultje Exp $ * * ------------------------------------------------------------------------ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * ------------------------------------------------------------------------ */ #define ZR050_VERSION "v0.7.1" #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/types.h> #include <linux/wait.h> /* I/O commands, error codes */ #include <asm/io.h> /* headerfile of this module */ #include "zr36050.h" /* codec io API */ #include "videocodec.h" /* it doesn't make sense to have more than 20 or so, just to prevent some unwanted loops */ #define MAX_CODECS 20 /* amount of chips attached via this driver */ static int zr36050_codecs; /* debugging is available via module parameter */ static int debug; module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug level (0-4)"); #define dprintk(num, format, args...) \ do { \ if (debug >= num) \ printk(format, ##args); \ } while (0) /* ========================================================================= Local hardware I/O functions: read/write via codec layer (registers are located in the master device) ========================================================================= */ /* read and write functions */ static u8 zr36050_read (struct zr36050 *ptr, u16 reg) { u8 value = 0; // just in case something is wrong... if (ptr->codec->master_data->readreg) value = (ptr->codec->master_data->readreg(ptr->codec, reg)) & 0xFF; else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing read!\n", ptr->name); dprintk(4, "%s: reading from 0x%04x: %02x\n", ptr->name, reg, value); return value; } static void zr36050_write (struct zr36050 *ptr, u16 reg, u8 value) { dprintk(4, "%s: writing 0x%02x to 0x%04x\n", ptr->name, value, reg); // just in case something is wrong... if (ptr->codec->master_data->writereg) ptr->codec->master_data->writereg(ptr->codec, reg, value); else dprintk(1, KERN_ERR "%s: invalid I/O setup, nothing written!\n", ptr->name); } /* ========================================================================= Local helper function: status read ========================================================================= */ /* status is kept in datastructure */ static u8 zr36050_read_status1 (struct zr36050 *ptr) { ptr->status1 = zr36050_read(ptr, ZR050_STATUS_1); zr36050_read(ptr, 0); return ptr->status1; } /* ========================================================================= Local helper function: scale factor read ========================================================================= */ /* scale factor is kept in datastructure */ static u16 zr36050_read_scalefactor (struct zr36050 *ptr) { ptr->scalefact = (zr36050_read(ptr, ZR050_SF_HI) << 8) | (zr36050_read(ptr, ZR050_SF_LO) & 0xFF); /* leave 0 selected for an eventually GO from master */ zr36050_read(ptr, 0); return ptr->scalefact; } /* ========================================================================= Local helper function: wait if codec is ready to proceed (end of processing) or time is over ========================================================================= */ static void zr36050_wait_end (struct zr36050 *ptr) { int i = 0; while (!(zr36050_read_status1(ptr) & 0x4)) { udelay(1); if (i++ > 200000) { // 200ms, there is for sure something wrong!!! dprintk(1, "%s: timeout at wait_end (last status: 0x%02x)\n", ptr->name, ptr->status1); break; } } } /* ========================================================================= Local helper function: basic test of "connectivity", writes/reads to/from memory the SOF marker ========================================================================= */ static int zr36050_basic_test (struct zr36050 *ptr) { zr36050_write(ptr, ZR050_SOF_IDX, 0x00); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0x00); if ((zr36050_read(ptr, ZR050_SOF_IDX) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0x0000) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36050_write(ptr, ZR050_SOF_IDX, 0xff); zr36050_write(ptr, ZR050_SOF_IDX + 1, 0xc0); if (((zr36050_read(ptr, ZR050_SOF_IDX) << 8) | zr36050_read(ptr, ZR050_SOF_IDX + 1)) != 0xffc0) { dprintk(1, KERN_ERR "%s: attach failed, can't connect to jpeg processor!\n", ptr->name); return -ENXIO; } zr36050_wait_end(ptr); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: attach failed, jpeg processor failed (end flag)!\n", ptr->name); return -EBUSY; } return 0; /* looks good! */ } /* ========================================================================= Local helper function: simple loop for pushing the init datasets ========================================================================= */ static int zr36050_pushit (struct zr36050 *ptr, u16 startreg, u16 len, const char *data) { int i = 0; dprintk(4, "%s: write data block to 0x%04x (len=%d)\n", ptr->name, startreg, len); while (i < len) { zr36050_write(ptr, startreg++, data[i++]); } return i; } /* ========================================================================= Basic datasets: jpeg baseline setup data (you find it on lots places in internet, or just extract it from any regular .jpg image...) Could be variable, but until it's not needed it they are just fixed to save memory. Otherwise expand zr36050 structure with arrays, push the values to it and initialize from there, as e.g. the linux zr36057/60 driver does it. ========================================================================= */ static const char zr36050_dqt[0x86] = { 0xff, 0xdb, //Marker: DQT 0x00, 0x84, //Length: 2*65+2 0x00, //Pq,Tq first table 0x10, 0x0b, 0x0c, 0x0e, 0x0c, 0x0a, 0x10, 0x0e, 0x0d, 0x0e, 0x12, 0x11, 0x10, 0x13, 0x18, 0x28, 0x1a, 0x18, 0x16, 0x16, 0x18, 0x31, 0x23, 0x25, 0x1d, 0x28, 0x3a, 0x33, 0x3d, 0x3c, 0x39, 0x33, 0x38, 0x37, 0x40, 0x48, 0x5c, 0x4e, 0x40, 0x44, 0x57, 0x45, 0x37, 0x38, 0x50, 0x6d, 0x51, 0x57, 0x5f, 0x62, 0x67, 0x68, 0x67, 0x3e, 0x4d, 0x71, 0x79, 0x70, 0x64, 0x78, 0x5c, 0x65, 0x67, 0x63, 0x01, //Pq,Tq second table 0x11, 0x12, 0x12, 0x18, 0x15, 0x18, 0x2f, 0x1a, 0x1a, 0x2f, 0x63, 0x42, 0x38, 0x42, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63, 0x63 }; static const char zr36050_dht[0x1a4] = { 0xff, 0xc4, //Marker: DHT 0x01, 0xa2, //Length: 2*AC, 2*DC 0x00, //DC first table 0x00, 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x01, //DC second table 0x00, 0x03, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x10, //AC first table 0x00, 0x02, 0x01, 0x03, 0x03, 0x02, 0x04, 0x03, 0x05, 0x05, 0x04, 0x04, 0x00, 0x00, 0x01, 0x7D, 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xA1, 0x08, 0x23, 0x42, 0xB1, 0xC1, 0x15, 0x52, 0xD1, 0xF0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0A, 0x16, 0x17, 0x18, 0x19, 0x1A, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE1, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF1, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA, 0x11, //AC second table 0x00, 0x02, 0x01, 0x02, 0x04, 0x04, 0x03, 0x04, 0x07, 0x05, 0x04, 0x04, 0x00, 0x01, 0x02, 0x77, 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xA1, 0xB1, 0xC1, 0x09, 0x23, 0x33, 0x52, 0xF0, 0x15, 0x62, 0x72, 0xD1, 0x0A, 0x16, 0x24, 0x34, 0xE1, 0x25, 0xF1, 0x17, 0x18, 0x19, 0x1A, 0x26, 0x27, 0x28, 0x29, 0x2A, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3A, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4A, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5A, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6A, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7A, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8A, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9A, 0xA2, 0xA3, 0xA4, 0xA5, 0xA6, 0xA7, 0xA8, 0xA9, 0xAA, 0xB2, 0xB3, 0xB4, 0xB5, 0xB6, 0xB7, 0xB8, 0xB9, 0xBA, 0xC2, 0xC3, 0xC4, 0xC5, 0xC6, 0xC7, 0xC8, 0xC9, 0xCA, 0xD2, 0xD3, 0xD4, 0xD5, 0xD6, 0xD7, 0xD8, 0xD9, 0xDA, 0xE2, 0xE3, 0xE4, 0xE5, 0xE6, 0xE7, 0xE8, 0xE9, 0xEA, 0xF2, 0xF3, 0xF4, 0xF5, 0xF6, 0xF7, 0xF8, 0xF9, 0xFA }; /* jpeg baseline setup, this is just fixed in this driver (YUV pictures) */ #define NO_OF_COMPONENTS 0x3 //Y,U,V #define BASELINE_PRECISION 0x8 //MCU size (?) static const char zr36050_tq[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's QT static const char zr36050_td[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's DC static const char zr36050_ta[8] = { 0, 1, 1, 0, 0, 0, 0, 0 }; //table idx's AC /* horizontal 422 decimation setup (maybe we support 411 or so later, too) */ static const char zr36050_decimation_h[8] = { 2, 1, 1, 0, 0, 0, 0, 0 }; static const char zr36050_decimation_v[8] = { 1, 1, 1, 0, 0, 0, 0, 0 }; /* ========================================================================= Local helper functions: calculation and setup of parameter-dependent JPEG baseline segments (needed for compression only) ========================================================================= */ /* ------------------------------------------------------------------------- */ /* SOF (start of frame) segment depends on width, height and sampling ratio of each color component */ static int zr36050_set_sof (struct zr36050 *ptr) { char sof_data[34]; // max. size of register set int i; dprintk(3, "%s: write SOF (%dx%d, %d components)\n", ptr->name, ptr->width, ptr->height, NO_OF_COMPONENTS); sof_data[0] = 0xff; sof_data[1] = 0xc0; sof_data[2] = 0x00; sof_data[3] = (3 * NO_OF_COMPONENTS) + 8; sof_data[4] = BASELINE_PRECISION; // only '8' possible with zr36050 sof_data[5] = (ptr->height) >> 8; sof_data[6] = (ptr->height) & 0xff; sof_data[7] = (ptr->width) >> 8; sof_data[8] = (ptr->width) & 0xff; sof_data[9] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sof_data[10 + (i * 3)] = i; // index identifier sof_data[11 + (i * 3)] = (ptr->h_samp_ratio[i] << 4) | (ptr->v_samp_ratio[i]); // sampling ratios sof_data[12 + (i * 3)] = zr36050_tq[i]; // Q table selection } return zr36050_pushit(ptr, ZR050_SOF_IDX, (3 * NO_OF_COMPONENTS) + 10, sof_data); } /* ------------------------------------------------------------------------- */ /* SOS (start of scan) segment depends on the used scan components of each color component */ static int zr36050_set_sos (struct zr36050 *ptr) { char sos_data[16]; // max. size of register set int i; dprintk(3, "%s: write SOS\n", ptr->name); sos_data[0] = 0xff; sos_data[1] = 0xda; sos_data[2] = 0x00; sos_data[3] = 2 + 1 + (2 * NO_OF_COMPONENTS) + 3; sos_data[4] = NO_OF_COMPONENTS; for (i = 0; i < NO_OF_COMPONENTS; i++) { sos_data[5 + (i * 2)] = i; // index sos_data[6 + (i * 2)] = (zr36050_td[i] << 4) | zr36050_ta[i]; // AC/DC tbl.sel. } sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 2] = 00; // scan start sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 3] = 0x3F; sos_data[2 + 1 + (2 * NO_OF_COMPONENTS) + 4] = 00; return zr36050_pushit(ptr, ZR050_SOS1_IDX, 4 + 1 + (2 * NO_OF_COMPONENTS) + 3, sos_data); } /* ------------------------------------------------------------------------- */ /* DRI (define restart interval) */ static int zr36050_set_dri (struct zr36050 *ptr) { char dri_data[6]; // max. size of register set dprintk(3, "%s: write DRI\n", ptr->name); dri_data[0] = 0xff; dri_data[1] = 0xdd; dri_data[2] = 0x00; dri_data[3] = 0x04; dri_data[4] = ptr->dri >> 8; dri_data[5] = ptr->dri & 0xff; return zr36050_pushit(ptr, ZR050_DRI_IDX, 6, dri_data); } /* ========================================================================= Setup function: Setup compression/decompression of Zoran's JPEG processor ( see also zoran 36050 manual ) ... sorry for the spaghetti code ... ========================================================================= */ static void zr36050_init (struct zr36050 *ptr) { int sum = 0; long bitcnt, tmp; if (ptr->mode == CODEC_DO_COMPRESSION) { dprintk(2, "%s: COMPRESSION SETUP\n", ptr->name); /* 050 communicates with 057 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR); /* encoding table preload for compression */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_COMP | ZR050_MO_TLM); zr36050_write(ptr, ZR050_OPTIONS, 0); /* disable all IRQs */ zr36050_write(ptr, ZR050_INT_REQ_0, 0); zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 /* volume control settings */ /*zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol);*/ zr36050_write(ptr, ZR050_SF_HI, ptr->scalefact >> 8); zr36050_write(ptr, ZR050_SF_LO, ptr->scalefact & 0xff); zr36050_write(ptr, ZR050_AF_HI, 0xff); zr36050_write(ptr, ZR050_AF_M, 0xff); zr36050_write(ptr, ZR050_AF_LO, 0xff); /* setup the variable jpeg tables */ sum += zr36050_set_sof(ptr); sum += zr36050_set_sos(ptr); sum += zr36050_set_dri(ptr); /* setup the fixed jpeg tables - maybe variable, though - * (see table init section above) */ dprintk(3, "%s: write DQT, DHT, APP\n", ptr->name); sum += zr36050_pushit(ptr, ZR050_DQT_IDX, sizeof(zr36050_dqt), zr36050_dqt); sum += zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), zr36050_dht); zr36050_write(ptr, ZR050_APP_IDX, 0xff); zr36050_write(ptr, ZR050_APP_IDX + 1, 0xe0 + ptr->app.appn); zr36050_write(ptr, ZR050_APP_IDX + 2, 0x00); zr36050_write(ptr, ZR050_APP_IDX + 3, ptr->app.len + 2); sum += zr36050_pushit(ptr, ZR050_APP_IDX + 4, 60, ptr->app.data) + 4; zr36050_write(ptr, ZR050_COM_IDX, 0xff); zr36050_write(ptr, ZR050_COM_IDX + 1, 0xfe); zr36050_write(ptr, ZR050_COM_IDX + 2, 0x00); zr36050_write(ptr, ZR050_COM_IDX + 3, ptr->com.len + 2); sum += zr36050_pushit(ptr, ZR050_COM_IDX + 4, 60, ptr->com.data) + 4; /* do the internal huffman table preload */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } /* setup misc. data for compression (target code sizes) */ /* size of compressed code to reach without header data */ sum = ptr->real_code_vol - sum; bitcnt = sum << 3; /* need the size in bits */ tmp = bitcnt >> 16; dprintk(3, "%s: code: csize=%d, tot=%d, bit=%ld, highbits=%ld\n", ptr->name, sum, ptr->real_code_vol, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_NET_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_NET_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36050_write(ptr, ZR050_TCV_NET_ML, tmp >> 8); zr36050_write(ptr, ZR050_TCV_NET_LO, tmp & 0xff); bitcnt -= bitcnt >> 7; // bits without stuffing bitcnt -= ((bitcnt * 5) >> 6); // bits without eob tmp = bitcnt >> 16; dprintk(3, "%s: code: nettobit=%ld, highnettobits=%ld\n", ptr->name, bitcnt, tmp); zr36050_write(ptr, ZR050_TCV_DATA_HI, tmp >> 8); zr36050_write(ptr, ZR050_TCV_DATA_MH, tmp & 0xff); tmp = bitcnt & 0xffff; zr36050_write(ptr, ZR050_TCV_DATA_ML, tmp >> 8); zr36050_write(ptr, ZR050_TCV_DATA_LO, tmp & 0xff); /* compression setup with or without bitrate control */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_COMP | ZR050_MO_PASS2 | (ptr->bitrate_ctrl ? ZR050_MO_BRC : 0)); /* this headers seem to deliver "valid AVI" jpeg frames */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DQT | ZR050_ME_DHT | ((ptr->app.len > 0) ? ZR050_ME_APP : 0) | ((ptr->com.len > 0) ? ZR050_ME_COM : 0)); } else { dprintk(2, "%s: EXPANSION SETUP\n", ptr->name); /* 050 communicates with 055 in master mode */ zr36050_write(ptr, ZR050_HARDWARE, ZR050_HW_MSTR | ZR050_HW_CFIS_2_CLK); /* encoding table preload */ zr36050_write(ptr, ZR050_MODE, ZR050_MO_TLM); /* disable all IRQs */ zr36050_write(ptr, ZR050_INT_REQ_0, 0); zr36050_write(ptr, ZR050_INT_REQ_1, 3); // low 2 bits always 1 dprintk(3, "%s: write DHT\n", ptr->name); zr36050_pushit(ptr, ZR050_DHT_IDX, sizeof(zr36050_dht), zr36050_dht); /* do the internal huffman table preload */ zr36050_write(ptr, ZR050_MARKERS_EN, ZR050_ME_DHTI); zr36050_write(ptr, ZR050_GO, 1); // launch codec zr36050_wait_end(ptr); dprintk(2, "%s: Status after table preload: 0x%02x\n", ptr->name, ptr->status1); if ((ptr->status1 & 0x4) == 0) { dprintk(1, KERN_ERR "%s: init aborted!\n", ptr->name); return; // something is wrong, its timed out!!!! } /* setup misc. data for expansion */ zr36050_write(ptr, ZR050_MODE, 0); zr36050_write(ptr, ZR050_MARKERS_EN, 0); } /* adr on selected, to allow GO from master */ zr36050_read(ptr, 0); } /* ========================================================================= CODEC API FUNCTIONS this functions are accessed by the master via the API structure ========================================================================= */ /* set compression/expansion mode and launches codec - this should be the last call from the master before starting processing */ static int zr36050_set_mode (struct videocodec *codec, int mode) { struct zr36050 *ptr = (struct zr36050 *) codec->data; dprintk(2, "%s: set_mode %d call\n", ptr->name, mode); if ((mode != CODEC_DO_EXPANSION) && (mode != CODEC_DO_COMPRESSION)) return -EINVAL; ptr->mode = mode; zr36050_init(ptr); return 0; } /* set picture size (norm is ignored as the codec doesn't know about it) */ static int zr36050_set_video (struct videocodec *codec, struct tvnorm *norm, struct vfe_settings *cap, struct vfe_polarity *pol) { struct zr36050 *ptr = (struct zr36050 *) codec->data; int size; dprintk(2, "%s: set_video %d.%d, %d/%d-%dx%d (0x%x) q%d call\n", ptr->name, norm->HStart, norm->VStart, cap->x, cap->y, cap->width, cap->height, cap->decimation, cap->quality); /* if () return -EINVAL; * trust the master driver that it knows what it does - so * we allow invalid startx/y and norm for now ... */ ptr->width = cap->width / (cap->decimation & 0xff); ptr->height = cap->height / ((cap->decimation >> 8) & 0xff); /* (KM) JPEG quality */ size = ptr->width * ptr->height; size *= 16; /* size in bits */ /* apply quality setting */ size = size * cap->quality / 200; /* Minimum: 1kb */ if (size < 8192) size = 8192; /* Maximum: 7/8 of code buffer */ if (size > ptr->total_code_vol * 7) size = ptr->total_code_vol * 7; ptr->real_code_vol = size >> 3; /* in bytes */ /* Set max_block_vol here (previously in zr36050_init, moved * here for consistency with zr36060 code */ zr36050_write(ptr, ZR050_MBCV, ptr->max_block_vol); return 0; } /* additional control functions */ static int zr36050_control (struct videocodec *codec, int type, int size, void *data) { struct zr36050 *ptr = (struct zr36050 *) codec->data; int *ival = (int *) data; dprintk(2, "%s: control %d call with %d byte\n", ptr->name, type, size); switch (type) { case CODEC_G_STATUS: /* get last status */ if (size != sizeof(int)) return -EFAULT; zr36050_read_status1(ptr); *ival = ptr->status1; break; case CODEC_G_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; *ival = CODEC_MODE_BJPG; break; case CODEC_S_CODEC_MODE: if (size != sizeof(int)) return -EFAULT; if (*ival != CODEC_MODE_BJPG) return -EINVAL; /* not needed, do nothing */ return 0; case CODEC_G_VFE: case CODEC_S_VFE: /* not needed, do nothing */ return 0; case CODEC_S_MMAP: /* not available, give an error */ return -ENXIO; case CODEC_G_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; *ival = ptr->total_code_vol; break; case CODEC_S_JPEG_TDS_BYTE: /* get target volume in byte */ if (size != sizeof(int)) return -EFAULT; ptr->total_code_vol = *ival; /* (Kieran Morrissey) * code copied from zr36060.c to ensure proper bitrate */ ptr->real_code_vol = (ptr->total_code_vol * 6) >> 3; break; case CODEC_G_JPEG_SCALE: /* get scaling factor */ if (size != sizeof(int)) return -EFAULT; *ival = zr36050_read_scalefactor(ptr); break; case CODEC_S_JPEG_SCALE: /* set scaling factor */ if (size != sizeof(int)) return -EFAULT; ptr->scalefact = *ival; break; case CODEC_G_JPEG_APP_DATA: { /* get appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; *app = ptr->app; break; } case CODEC_S_JPEG_APP_DATA: { /* set appn marker data */ struct jpeg_app_marker *app = data; if (size != sizeof(struct jpeg_app_marker)) return -EFAULT; ptr->app = *app; break; } case CODEC_G_JPEG_COM_DATA: { /* get comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; *com = ptr->com; break; } case CODEC_S_JPEG_COM_DATA: { /* set comment marker data */ struct jpeg_com_marker *com = data; if (size != sizeof(struct jpeg_com_marker)) return -EFAULT; ptr->com = *com; break; } default: return -EINVAL; } return size; } /* ========================================================================= Exit and unregister function: Deinitializes Zoran's JPEG processor ========================================================================= */ static int zr36050_unset (struct videocodec *codec) { struct zr36050 *ptr = codec->data; if (ptr) { /* do wee need some codec deinit here, too ???? */ dprintk(1, "%s: finished codec #%d\n", ptr->name, ptr->num); kfree(ptr); codec->data = NULL; zr36050_codecs--; return 0; } return -EFAULT; } /* ========================================================================= Setup and registry function: Initializes Zoran's JPEG processor Also sets pixel size, average code size, mode (compr./decompr.) (the given size is determined by the processor with the video interface) ========================================================================= */ static int zr36050_setup (struct videocodec *codec) { struct zr36050 *ptr; int res; dprintk(2, "zr36050: initializing MJPEG subsystem #%d.\n", zr36050_codecs); if (zr36050_codecs == MAX_CODECS) { dprintk(1, KERN_ERR "zr36050: Can't attach more codecs!\n"); return -ENOSPC; } //mem structure init codec->data = ptr = kzalloc(sizeof(struct zr36050), GFP_KERNEL); if (NULL == ptr) { dprintk(1, KERN_ERR "zr36050: Can't get enough memory!\n"); return -ENOMEM; } snprintf(ptr->name, sizeof(ptr->name), "zr36050[%d]", zr36050_codecs); ptr->num = zr36050_codecs++; ptr->codec = codec; //testing res = zr36050_basic_test(ptr); if (res < 0) { zr36050_unset(codec); return res; } //final setup memcpy(ptr->h_samp_ratio, zr36050_decimation_h, 8); memcpy(ptr->v_samp_ratio, zr36050_decimation_v, 8); ptr->bitrate_ctrl = 0; /* 0 or 1 - fixed file size flag * (what is the difference?) */ ptr->mode = CODEC_DO_COMPRESSION; ptr->width = 384; ptr->height = 288; ptr->total_code_vol = 16000; ptr->max_block_vol = 240; ptr->scalefact = 0x100; ptr->dri = 1; /* no app/com marker by default */ ptr->app.appn = 0; ptr->app.len = 0; ptr->com.len = 0; zr36050_init(ptr); dprintk(1, KERN_INFO "%s: codec attached and running\n", ptr->name); return 0; } static const struct videocodec zr36050_codec = { .owner = THIS_MODULE, .name = "zr36050", .magic = 0L, // magic not used .flags = CODEC_FLAG_JPEG | CODEC_FLAG_HARDWARE | CODEC_FLAG_ENCODER | CODEC_FLAG_DECODER, .type = CODEC_TYPE_ZR36050, .setup = zr36050_setup, // functionality .unset = zr36050_unset, .set_mode = zr36050_set_mode, .set_video = zr36050_set_video, .control = zr36050_control, // others are not used }; /* ========================================================================= HOOK IN DRIVER AS KERNEL MODULE ========================================================================= */ static int __init zr36050_init_module (void) { //dprintk(1, "ZR36050 driver %s\n",ZR050_VERSION); zr36050_codecs = 0; return videocodec_register(&zr36050_codec); } static void __exit zr36050_cleanup_module (void) { if (zr36050_codecs) { dprintk(1, "zr36050: something's wrong - %d codecs left somehow.\n", zr36050_codecs); } videocodec_unregister(&zr36050_codec); } module_init(zr36050_init_module); module_exit(zr36050_cleanup_module); MODULE_AUTHOR("Wolfgang Scherr <scherr@net4you.at>"); MODULE_DESCRIPTION("Driver module for ZR36050 jpeg processors " ZR050_VERSION); MODULE_LICENSE("GPL");
gpl-2.0
NoelMacwan/android_kernel_sony_msm8226
drivers/hid/usbhid/hid-core.c
203
43033
/* * USB HID support for Linux * * Copyright (c) 1999 Andreas Gal * Copyright (c) 2000-2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2005 Michael Haboustak <mike-@cinci.rr.com> for Concept2, Inc * Copyright (c) 2007-2008 Oliver Neukum * Copyright (c) 2006-2010 Jiri Kosina * Copyright 2011,2012 Sony Corporation * Copyright (c) 2012 Sony Mobile Communications AB. */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/mm.h> #include <linux/mutex.h> #include <linux/spinlock.h> #include <asm/unaligned.h> #include <asm/byteorder.h> #include <linux/input.h> #include <linux/wait.h> #include <linux/workqueue.h> #include <linux/usb.h> #include <linux/hid.h> #include <linux/hiddev.h> #include <linux/hid-debug.h> #include <linux/hidraw.h> #include "usbhid.h" /* * Version Information */ #define DRIVER_DESC "USB HID core driver" #define DRIVER_LICENSE "GPL" /* * Module parameters. */ static unsigned int hid_mousepoll_interval; module_param_named(mousepoll, hid_mousepoll_interval, uint, 0644); MODULE_PARM_DESC(mousepoll, "Polling interval of mice"); static unsigned int ignoreled; module_param_named(ignoreled, ignoreled, uint, 0644); MODULE_PARM_DESC(ignoreled, "Autosuspend with active leds"); /* Quirks specified at module load time */ static char *quirks_param[MAX_USBHID_BOOT_QUIRKS] = { [ 0 ... (MAX_USBHID_BOOT_QUIRKS - 1) ] = NULL }; module_param_array_named(quirks, quirks_param, charp, NULL, 0444); MODULE_PARM_DESC(quirks, "Add/modify USB HID quirks by specifying " " quirks=vendorID:productID:quirks" " where vendorID, productID, and quirks are all in" " 0x-prefixed hex"); /* * Input submission and I/O error handler. */ static DEFINE_MUTEX(hid_open_mut); static void hid_io_error(struct hid_device *hid); static int hid_submit_out(struct hid_device *hid); static int hid_submit_ctrl(struct hid_device *hid); static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid); /* Start up the input URB */ static int hid_start_in(struct hid_device *hid) { unsigned long flags; int rc = 0; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); if (hid->open > 0 && !test_bit(HID_DISCONNECTED, &usbhid->iofl) && !test_bit(HID_REPORTED_IDLE, &usbhid->iofl) && !test_and_set_bit(HID_IN_RUNNING, &usbhid->iofl)) { rc = usb_submit_urb(usbhid->urbin, GFP_ATOMIC); if (rc != 0) clear_bit(HID_IN_RUNNING, &usbhid->iofl); } spin_unlock_irqrestore(&usbhid->lock, flags); return rc; } /* I/O retry timer routine */ static void hid_retry_timeout(unsigned long _hid) { struct hid_device *hid = (struct hid_device *) _hid; struct usbhid_device *usbhid = hid->driver_data; dev_dbg(&usbhid->intf->dev, "retrying intr urb\n"); if (hid_start_in(hid)) hid_io_error(hid); } /* Workqueue routine to reset the device or clear a halt */ static void hid_reset(struct work_struct *work) { struct usbhid_device *usbhid = container_of(work, struct usbhid_device, reset_work); struct hid_device *hid = usbhid->hid; int rc = 0; if (test_bit(HID_CLEAR_HALT, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "clear halt\n"); rc = usb_clear_halt(hid_to_usb_dev(hid), usbhid->urbin->pipe); clear_bit(HID_CLEAR_HALT, &usbhid->iofl); hid_start_in(hid); } else if (test_bit(HID_RESET_PENDING, &usbhid->iofl)) { dev_dbg(&usbhid->intf->dev, "resetting device\n"); rc = usb_lock_device_for_reset(hid_to_usb_dev(hid), usbhid->intf); if (rc == 0) { rc = usb_reset_device(hid_to_usb_dev(hid)); usb_unlock_device(hid_to_usb_dev(hid)); } clear_bit(HID_RESET_PENDING, &usbhid->iofl); } switch (rc) { case 0: if (!test_bit(HID_IN_RUNNING, &usbhid->iofl)) hid_io_error(hid); break; default: hid_err(hid, "can't reset device, %s-%s/input%d, status %d\n", hid_to_usb_dev(hid)->bus->bus_name, hid_to_usb_dev(hid)->devpath, usbhid->ifnum, rc); /* FALLTHROUGH */ case -EHOSTUNREACH: case -ENODEV: case -EINTR: break; } } /* Main I/O error handler */ static void hid_io_error(struct hid_device *hid) { unsigned long flags; struct usbhid_device *usbhid = hid->driver_data; spin_lock_irqsave(&usbhid->lock, flags); /* Stop when disconnected */ if (test_bit(HID_DISCONNECTED, &usbhid->iofl)) goto done; /* If it has been a while since the last error, we'll assume * this a brand new error and reset the retry timeout. */ if (time_after(jiffies, usbhid->stop_retry + HZ/2)) usbhid->retry_delay = 0; /* When an error occurs, retry at increasing intervals */ if (usbhid->retry_delay == 0) { usbhid->retry_delay = 13; /* Then 26, 52, 104, 104, ... */ usbhid->stop_retry = jiffies + msecs_to_jiffies(1000); } else if (usbhid->retry_delay < 100) usbhid->retry_delay *= 2; if (time_after(jiffies, usbhid->stop_retry)) { /* Retries failed, so do a port reset */ if (!test_and_set_bit(HID_RESET_PENDING, &usbhid->iofl)) { schedule_work(&usbhid->reset_work); goto done; } } mod_timer(&usbhid->io_retry, jiffies + msecs_to_jiffies(usbhid->retry_delay)); done: spin_unlock_irqrestore(&usbhid->lock, flags); } static void usbhid_mark_busy(struct usbhid_device *usbhid) { struct usb_interface *intf = usbhid->intf; usb_mark_last_busy(interface_to_usbdev(intf)); } static int usbhid_restart_out_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; int r; if (!hid) return 0; if ((kicked = (usbhid->outhead != usbhid->outtail))) { dbg("Kicking head %d tail %d", usbhid->outhead, usbhid->outtail); r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return r; /* Asynchronously flush queue. */ set_bit(HID_OUT_RUNNING, &usbhid->iofl); if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } return kicked; } static int usbhid_restart_ctrl_queue(struct usbhid_device *usbhid) { struct hid_device *hid = usb_get_intfdata(usbhid->intf); int kicked; int r; WARN_ON(hid == NULL); if (!hid) return 0; if ((kicked = (usbhid->ctrlhead != usbhid->ctrltail))) { dbg("Kicking head %d tail %d", usbhid->ctrlhead, usbhid->ctrltail); r = usb_autopm_get_interface_async(usbhid->intf); if (r < 0) return r; /* Asynchronously flush queue. */ set_bit(HID_CTRL_RUNNING, &usbhid->iofl); if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } return kicked; } /* * Input interrupt completion handler. */ static void hid_irq_in(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; int status; switch (urb->status) { case 0: /* success */ usbhid_mark_busy(usbhid); usbhid->retry_delay = 0; hid_input_report(urb->context, HID_INPUT_REPORT, urb->transfer_buffer, urb->actual_length, 1); /* * autosuspend refused while keys are pressed * because most keyboards don't wake up when * a key is released */ if (hid_check_keys_pressed(hid)) set_bit(HID_KEYS_PRESSED, &usbhid->iofl); else clear_bit(HID_KEYS_PRESSED, &usbhid->iofl); break; case -EPIPE: /* stall */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); set_bit(HID_CLEAR_HALT, &usbhid->iofl); schedule_work(&usbhid->reset_work); return; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: /* unplug */ clear_bit(HID_IN_RUNNING, &usbhid->iofl); return; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ETIME: /* protocol error or unplug */ case -ETIMEDOUT: /* Should never happen, but... */ usbhid_mark_busy(usbhid); clear_bit(HID_IN_RUNNING, &usbhid->iofl); hid_io_error(hid); return; default: /* error */ hid_warn(urb->dev, "input irq status %d received\n", urb->status); } status = usb_submit_urb(urb, GFP_ATOMIC); if (status) { clear_bit(HID_IN_RUNNING, &usbhid->iofl); if (status != -EPERM) { hid_err(hid, "can't resubmit intr, %s-%s/input%d, status %d\n", hid_to_usb_dev(hid)->bus->bus_name, hid_to_usb_dev(hid)->devpath, usbhid->ifnum, status); hid_io_error(hid); } } } static int hid_submit_out(struct hid_device *hid) { struct hid_report *report; char *raw_report; struct usbhid_device *usbhid = hid->driver_data; int r; report = usbhid->out[usbhid->outtail].report; raw_report = usbhid->out[usbhid->outtail].raw_report; usbhid->urbout->transfer_buffer_length = ((report->size - 1) >> 3) + 1 + (report->id > 0); usbhid->urbout->dev = hid_to_usb_dev(hid); memcpy(usbhid->outbuf, raw_report, usbhid->urbout->transfer_buffer_length); kfree(raw_report); dbg_hid("submitting out urb\n"); r = usb_submit_urb(usbhid->urbout, GFP_ATOMIC); if (r < 0) { hid_err(hid, "usb_submit_urb(out) failed: %d\n", r); return r; } usbhid->last_out = jiffies; return 0; } static int hid_submit_ctrl(struct hid_device *hid) { struct hid_report *report; unsigned char dir; char *raw_report; int len, r; struct usbhid_device *usbhid = hid->driver_data; report = usbhid->ctrl[usbhid->ctrltail].report; raw_report = usbhid->ctrl[usbhid->ctrltail].raw_report; dir = usbhid->ctrl[usbhid->ctrltail].dir; len = ((report->size - 1) >> 3) + 1 + (report->id > 0); if (dir == USB_DIR_OUT) { usbhid->urbctrl->pipe = usb_sndctrlpipe(hid_to_usb_dev(hid), 0); usbhid->urbctrl->transfer_buffer_length = len; memcpy(usbhid->ctrlbuf, raw_report, len); kfree(raw_report); } else { int maxpacket, padlen; usbhid->urbctrl->pipe = usb_rcvctrlpipe(hid_to_usb_dev(hid), 0); maxpacket = usb_maxpacket(hid_to_usb_dev(hid), usbhid->urbctrl->pipe, 0); if (maxpacket > 0) { padlen = DIV_ROUND_UP(len, maxpacket); padlen *= maxpacket; if (padlen > usbhid->bufsize) padlen = usbhid->bufsize; } else padlen = 0; usbhid->urbctrl->transfer_buffer_length = padlen; } usbhid->urbctrl->dev = hid_to_usb_dev(hid); usbhid->cr->bRequestType = USB_TYPE_CLASS | USB_RECIP_INTERFACE | dir; usbhid->cr->bRequest = (dir == USB_DIR_OUT) ? HID_REQ_SET_REPORT : HID_REQ_GET_REPORT; usbhid->cr->wValue = cpu_to_le16(((report->type + 1) << 8) | report->id); usbhid->cr->wIndex = cpu_to_le16(usbhid->ifnum); usbhid->cr->wLength = cpu_to_le16(len); dbg_hid("submitting ctrl urb: %s wValue=0x%04x wIndex=0x%04x wLength=%u\n", usbhid->cr->bRequest == HID_REQ_SET_REPORT ? "Set_Report" : "Get_Report", usbhid->cr->wValue, usbhid->cr->wIndex, usbhid->cr->wLength); r = usb_submit_urb(usbhid->urbctrl, GFP_ATOMIC); if (r < 0) { hid_err(hid, "usb_submit_urb(ctrl) failed: %d\n", r); return r; } usbhid->last_ctrl = jiffies; return 0; } /* * Output interrupt completion handler. */ static void hid_irq_out(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; int unplug = 0; switch (urb->status) { case 0: /* success */ break; case -ESHUTDOWN: /* unplug */ unplug = 1; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: break; default: /* error */ hid_warn(urb->dev, "output irq status %d received\n", urb->status); } spin_lock_irqsave(&usbhid->lock, flags); if (unplug) usbhid->outtail = usbhid->outhead; else usbhid->outtail = (usbhid->outtail + 1) & (HID_OUTPUT_FIFO_SIZE - 1); if (usbhid->outhead != usbhid->outtail && !hid_submit_out(hid)) { /* Successfully submitted next urb in queue */ spin_unlock_irqrestore(&usbhid->lock, flags); return; } clear_bit(HID_OUT_RUNNING, &usbhid->iofl); spin_unlock_irqrestore(&usbhid->lock, flags); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } /* * Control pipe completion handler. */ static void hid_ctrl(struct urb *urb) { struct hid_device *hid = urb->context; struct usbhid_device *usbhid = hid->driver_data; int unplug = 0, status = urb->status; spin_lock(&usbhid->lock); switch (status) { case 0: /* success */ if (usbhid->ctrl[usbhid->ctrltail].dir == USB_DIR_IN) hid_input_report(urb->context, usbhid->ctrl[usbhid->ctrltail].report->type, urb->transfer_buffer, urb->actual_length, 0); break; case -ESHUTDOWN: /* unplug */ unplug = 1; case -EILSEQ: /* protocol error or unplug */ case -EPROTO: /* protocol error or unplug */ case -ECONNRESET: /* unlink */ case -ENOENT: case -EPIPE: /* report not available */ break; default: /* error */ hid_warn(urb->dev, "ctrl urb status %d received\n", status); } if (unplug) usbhid->ctrltail = usbhid->ctrlhead; else usbhid->ctrltail = (usbhid->ctrltail + 1) & (HID_CONTROL_FIFO_SIZE - 1); if (usbhid->ctrlhead != usbhid->ctrltail && !hid_submit_ctrl(hid)) { /* Successfully submitted next urb in queue */ spin_unlock(&usbhid->lock); return; } clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); spin_unlock(&usbhid->lock); usb_autopm_put_interface_async(usbhid->intf); wake_up(&usbhid->wait); } static void __usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { int head; struct usbhid_device *usbhid = hid->driver_data; int len = ((report->size - 1) >> 3) + 1 + (report->id > 0); if ((hid->quirks & HID_QUIRK_NOGET) && dir == USB_DIR_IN) return; if (usbhid->urbout && dir == USB_DIR_OUT && report->type == HID_OUTPUT_REPORT) { if ((head = (usbhid->outhead + 1) & (HID_OUTPUT_FIFO_SIZE - 1)) == usbhid->outtail) { hid_warn(hid, "output queue full\n"); return; } usbhid->out[usbhid->outhead].raw_report = kmalloc(len, GFP_ATOMIC); if (!usbhid->out[usbhid->outhead].raw_report) { hid_warn(hid, "output queueing failed\n"); return; } hid_output_report(report, usbhid->out[usbhid->outhead].raw_report); usbhid->out[usbhid->outhead].report = report; usbhid->outhead = head; /* Try to awake from autosuspend... */ if (usb_autopm_get_interface_async(usbhid->intf) < 0) return; /* * But if still suspended, leave urb enqueued, don't submit. * Submission will occur if/when resume() drains the queue. */ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) return; if (!test_and_set_bit(HID_OUT_RUNNING, &usbhid->iofl)) { if (hid_submit_out(hid)) { clear_bit(HID_OUT_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } else { /* * the queue is known to run * but an earlier request may be stuck * we may need to time out * no race because this is called under * spinlock */ if (time_after(jiffies, usbhid->last_out + HZ * 5)) usb_unlink_urb(usbhid->urbout); } return; } if ((head = (usbhid->ctrlhead + 1) & (HID_CONTROL_FIFO_SIZE - 1)) == usbhid->ctrltail) { hid_warn(hid, "control queue full\n"); return; } if (dir == USB_DIR_OUT) { usbhid->ctrl[usbhid->ctrlhead].raw_report = kmalloc(len, GFP_ATOMIC); if (!usbhid->ctrl[usbhid->ctrlhead].raw_report) { hid_warn(hid, "control queueing failed\n"); return; } hid_output_report(report, usbhid->ctrl[usbhid->ctrlhead].raw_report); } usbhid->ctrl[usbhid->ctrlhead].report = report; usbhid->ctrl[usbhid->ctrlhead].dir = dir; usbhid->ctrlhead = head; /* Try to awake from autosuspend... */ if (usb_autopm_get_interface_async(usbhid->intf) < 0) return; /* * If already suspended, leave urb enqueued, but don't submit. * Submission will occur if/when resume() drains the queue. */ if (test_bit(HID_REPORTED_IDLE, &usbhid->iofl)) return; if (!test_and_set_bit(HID_CTRL_RUNNING, &usbhid->iofl)) { if (hid_submit_ctrl(hid)) { clear_bit(HID_CTRL_RUNNING, &usbhid->iofl); usb_autopm_put_interface_async(usbhid->intf); } wake_up(&usbhid->wait); } else { /* * the queue is known to run * but an earlier request may be stuck * we may need to time out * no race because this is called under * spinlock */ if (time_after(jiffies, usbhid->last_ctrl + HZ * 5)) usb_unlink_urb(usbhid->urbctrl); } } void usbhid_submit_report(struct hid_device *hid, struct hid_report *report, unsigned char dir) { struct usbhid_device *usbhid = hid->driver_data; unsigned long flags; spin_lock_irqsave(&usbhid->lock, flags); __usbhid_submit_report(hid, report, dir); spin_unlock_irqrestore(&usbhid->lock, flags); } EXPORT_SYMBOL_GPL(usbhid_submit_report); /* Workqueue routine to send requests to change LEDs */ static void hid_led(struct work_struct *work) { struct usbhid_device *usbhid = container_of(work, struct usbhid_device, led_work); struct hid_device *hid = usbhid->hid; struct hid_field *field; unsigned long flags; field = hidinput_get_led_field(hid); if (!field) { hid_warn(hid, "LED event field not found\n"); return; } spin_lock_irqsave(&usbhid->lock, flags); if (!test_bit(HID_DISCONNECTED, &usbhid->iofl)) { usbhid->ledcount = hidinput_count_leds(hid); hid_dbg(usbhid->hid, "New ledcount = %u\n", usbhid->ledcount); __usbhid_submit_report(hid, field->report, USB_DIR_OUT); } spin_unlock_irqrestore(&usbhid->lock, flags); } static int usb_hidinput_input_event(struct input_dev *dev, unsigned int type, unsigned int code, int value) { struct hid_device *hid = input_get_drvdata(dev); struct usbhid_device *usbhid = hid->driver_data; struct hid_field *field; unsigned long flags; int offset; if (type == EV_FF) return input_ff_event(dev, type, code, value); if (type != EV_LED) return -1; if ((offset = hidinput_find_field(hid, type, code, &field)) == -1) { hid_warn(dev, "event field not found\n"); return -1; } spin_lock_irqsave(&usbhid->lock, flags); hid_set_field(field, offset, value); spin_unlock_irqrestore(&usbhid->lock, flags); /* * Defer performing requested LED action. * This is more likely gather all LED changes into a single URB. */ schedule_work(&usbhid->led_work); return 0; } int usbhid_wait_io(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (!wait_event_timeout(usbhid->wait, (!test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl)), 10*HZ)) { dbg_hid("timeout waiting for ctrl or out queue to clear\n"); return -1; } return 0; } EXPORT_SYMBOL_GPL(usbhid_wait_io); static int hid_set_idle(struct usb_device *dev, int ifnum, int report, int idle) { return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_IDLE, USB_TYPE_CLASS | USB_RECIP_INTERFACE, (idle << 8) | report, ifnum, NULL, 0, USB_CTRL_SET_TIMEOUT); } static int hid_get_class_descriptor(struct usb_device *dev, int ifnum, unsigned char type, void *buf, int size) { int result, retries = 4; memset(buf, 0, size); do { result = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), USB_REQ_GET_DESCRIPTOR, USB_RECIP_INTERFACE | USB_DIR_IN, (type << 8), ifnum, buf, size, USB_CTRL_GET_TIMEOUT); retries--; } while (result < size && retries); return result; } int usbhid_open(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; int res; mutex_lock(&hid_open_mut); if (!hid->open++) { res = usb_autopm_get_interface(usbhid->intf); /* the device must be awake to reliably request remote wakeup */ if (res < 0) { hid->open--; mutex_unlock(&hid_open_mut); return -EIO; } usbhid->intf->needs_remote_wakeup = 1; if (hid_start_in(hid)) hid_io_error(hid); usb_autopm_put_interface(usbhid->intf); } mutex_unlock(&hid_open_mut); return 0; } void usbhid_close(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; mutex_lock(&hid_open_mut); /* protecting hid->open to make sure we don't restart * data acquistion due to a resumption we no longer * care about */ spin_lock_irq(&usbhid->lock); if (!--hid->open) { spin_unlock_irq(&usbhid->lock); hid_cancel_delayed_stuff(usbhid); usb_kill_urb(usbhid->urbin); usbhid->intf->needs_remote_wakeup = 0; } else { spin_unlock_irq(&usbhid->lock); } mutex_unlock(&hid_open_mut); } /* * Initialize all reports */ void usbhid_init_reports(struct hid_device *hid) { struct hid_report *report; struct usbhid_device *usbhid = hid->driver_data; int err, ret; list_for_each_entry(report, &hid->report_enum[HID_INPUT_REPORT].report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); list_for_each_entry(report, &hid->report_enum[HID_FEATURE_REPORT].report_list, list) usbhid_submit_report(hid, report, USB_DIR_IN); err = 0; ret = usbhid_wait_io(hid); while (ret) { err |= ret; if (test_bit(HID_CTRL_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbctrl); if (test_bit(HID_OUT_RUNNING, &usbhid->iofl)) usb_kill_urb(usbhid->urbout); ret = usbhid_wait_io(hid); } if (err) hid_warn(hid, "timeout initializing reports\n"); } /* * Reset LEDs which BIOS might have left on. For now, just NumLock (0x01). */ static int hid_find_field_early(struct hid_device *hid, unsigned int page, unsigned int hid_code, struct hid_field **pfield) { struct hid_report *report; struct hid_field *field; struct hid_usage *usage; int i, j; list_for_each_entry(report, &hid->report_enum[HID_OUTPUT_REPORT].report_list, list) { for (i = 0; i < report->maxfield; i++) { field = report->field[i]; for (j = 0; j < field->maxusage; j++) { usage = &field->usage[j]; if ((usage->hid & HID_USAGE_PAGE) == page && (usage->hid & 0xFFFF) == hid_code) { *pfield = field; return j; } } } } return -1; } void usbhid_set_leds(struct hid_device *hid) { struct hid_field *field; int offset; if ((offset = hid_find_field_early(hid, HID_UP_LED, 0x01, &field)) != -1) { hid_set_field(field, offset, 0); usbhid_submit_report(hid, field->report, USB_DIR_OUT); } } EXPORT_SYMBOL_GPL(usbhid_set_leds); /* * Traverse the supplied list of reports and find the longest */ static void hid_find_max_report(struct hid_device *hid, unsigned int type, unsigned int *max) { struct hid_report *report; unsigned int size; list_for_each_entry(report, &hid->report_enum[type].report_list, list) { size = ((report->size - 1) >> 3) + 1 + hid->report_enum[type].numbered; if (*max < size) *max = size; } } static int hid_alloc_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usbhid->inbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->inbuf_dma); usbhid->outbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->outbuf_dma); usbhid->cr = kmalloc(sizeof(*usbhid->cr), GFP_KERNEL); usbhid->ctrlbuf = usb_alloc_coherent(dev, usbhid->bufsize, GFP_KERNEL, &usbhid->ctrlbuf_dma); if (!usbhid->inbuf || !usbhid->outbuf || !usbhid->cr || !usbhid->ctrlbuf) return -1; return 0; } static int usbhid_get_raw_report(struct hid_device *hid, unsigned char report_number, __u8 *buf, size_t count, unsigned char report_type) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int skipped_report_id = 0; int ret; /* Byte 0 is the report number. Report data starts at byte 1.*/ buf[0] = report_number; if (report_number == 0x0) { /* Offset the return buffer by 1, so that the report ID will remain in byte 0. */ buf++; count--; skipped_report_id = 1; } ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), HID_REQ_GET_REPORT, USB_DIR_IN | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((report_type + 1) << 8) | report_number, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id */ if (ret > 0 && skipped_report_id) ret++; return ret; } static int usbhid_output_raw_report(struct hid_device *hid, __u8 *buf, size_t count, unsigned char report_type) { struct usbhid_device *usbhid = hid->driver_data; struct usb_device *dev = hid_to_usb_dev(hid); struct usb_interface *intf = usbhid->intf; struct usb_host_interface *interface = intf->cur_altsetting; int ret; #ifndef CONFIG_HID_SONY_PS3_CTRL_BT if (usbhid->urbout && report_type != HID_FEATURE_REPORT) { #else if (usbhid->urbout && report_type != HID_FEATURE_REPORT && report_type != HID_FEATREP_SKIPREPID && report_type != HID_OUTREP_SKIPREPID) { #endif int actual_length; int skipped_report_id = 0; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } ret = usb_interrupt_msg(dev, usbhid->urbout->pipe, buf, count, &actual_length, USB_CTRL_SET_TIMEOUT); /* return the number of bytes transferred */ if (ret == 0) { ret = actual_length; /* count also the report id */ if (skipped_report_id) ret++; } } else { int skipped_report_id = 0; int report_id = buf[0]; if (buf[0] == 0x0) { /* Don't send the Report ID */ buf++; count--; skipped_report_id = 1; } #ifdef CONFIG_HID_SONY_PS3_CTRL_BT if (report_type == HID_FEATREP_SKIPREPID || report_type == HID_OUTREP_SKIPREPID) { buf++; count--; skipped_report_id = 1; switch (report_type) { case HID_FEATREP_SKIPREPID: report_type = HID_FEATURE_REPORT; break; case HID_OUTREP_SKIPREPID: report_type = HID_OUTPUT_REPORT; default: break; } } #endif ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), HID_REQ_SET_REPORT, USB_DIR_OUT | USB_TYPE_CLASS | USB_RECIP_INTERFACE, ((report_type + 1) << 8) | report_id, interface->desc.bInterfaceNumber, buf, count, USB_CTRL_SET_TIMEOUT); /* count also the report id, if this was a numbered report. */ if (ret > 0 && skipped_report_id) ret++; } return ret; } static void usbhid_restart_queues(struct usbhid_device *usbhid) { if (usbhid->urbout) usbhid_restart_out_queue(usbhid); usbhid_restart_ctrl_queue(usbhid); } static void hid_free_buffers(struct usb_device *dev, struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usb_free_coherent(dev, usbhid->bufsize, usbhid->inbuf, usbhid->inbuf_dma); usb_free_coherent(dev, usbhid->bufsize, usbhid->outbuf, usbhid->outbuf_dma); kfree(usbhid->cr); usb_free_coherent(dev, usbhid->bufsize, usbhid->ctrlbuf, usbhid->ctrlbuf_dma); } static int usbhid_parse(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev (intf); struct hid_descriptor *hdesc; u32 quirks = 0; unsigned int rsize = 0; char *rdesc; int ret, n; quirks = usbhid_lookup_quirk(le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); if (quirks & HID_QUIRK_IGNORE) return -ENODEV; /* Many keyboards and mice don't like to be polled for reports, * so we will always set the HID_QUIRK_NOGET flag for them. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT) { if (interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD || interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) quirks |= HID_QUIRK_NOGET; } if (usb_get_extra_descriptor(interface, HID_DT_HID, &hdesc) && (!interface->desc.bNumEndpoints || usb_get_extra_descriptor(&interface->endpoint[0], HID_DT_HID, &hdesc))) { dbg_hid("class descriptor not present\n"); return -ENODEV; } hid->version = le16_to_cpu(hdesc->bcdHID); hid->country = hdesc->bCountryCode; for (n = 0; n < hdesc->bNumDescriptors; n++) if (hdesc->desc[n].bDescriptorType == HID_DT_REPORT) rsize = le16_to_cpu(hdesc->desc[n].wDescriptorLength); if (!rsize || rsize > HID_MAX_DESCRIPTOR_SIZE) { dbg_hid("weird size of report descriptor (%u)\n", rsize); return -EINVAL; } if (!(rdesc = kmalloc(rsize, GFP_KERNEL))) { dbg_hid("couldn't allocate rdesc memory\n"); return -ENOMEM; } hid_set_idle(dev, interface->desc.bInterfaceNumber, 0, 0); ret = hid_get_class_descriptor(dev, interface->desc.bInterfaceNumber, HID_DT_REPORT, rdesc, rsize); if (ret < 0) { dbg_hid("reading report descriptor failed\n"); kfree(rdesc); goto err; } ret = hid_parse_report(hid, rdesc, rsize); kfree(rdesc); if (ret) { dbg_hid("parsing report descriptor failed\n"); goto err; } hid->quirks |= quirks; return 0; err: return ret; } static int usbhid_start(struct hid_device *hid) { struct usb_interface *intf = to_usb_interface(hid->dev.parent); struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid = hid->driver_data; unsigned int n, insize = 0; int ret; clear_bit(HID_DISCONNECTED, &usbhid->iofl); usbhid->bufsize = HID_MIN_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_OUTPUT_REPORT, &usbhid->bufsize); hid_find_max_report(hid, HID_FEATURE_REPORT, &usbhid->bufsize); if (usbhid->bufsize > HID_MAX_BUFFER_SIZE) usbhid->bufsize = HID_MAX_BUFFER_SIZE; hid_find_max_report(hid, HID_INPUT_REPORT, &insize); if (insize > HID_MAX_BUFFER_SIZE) insize = HID_MAX_BUFFER_SIZE; if (hid_alloc_buffers(dev, hid)) { ret = -ENOMEM; goto fail; } for (n = 0; n < interface->desc.bNumEndpoints; n++) { struct usb_endpoint_descriptor *endpoint; int pipe; int interval; endpoint = &interface->endpoint[n].desc; if (!usb_endpoint_xfer_int(endpoint)) continue; interval = endpoint->bInterval; /* Some vendors give fullspeed interval on highspeed devides */ if (hid->quirks & HID_QUIRK_FULLSPEED_INTERVAL && dev->speed == USB_SPEED_HIGH) { interval = fls(endpoint->bInterval*8); printk(KERN_INFO "%s: Fixing fullspeed to highspeed interval: %d -> %d\n", hid->name, endpoint->bInterval, interval); } /* Change the polling interval of mice. */ if (hid->collection->usage == HID_GD_MOUSE && hid_mousepoll_interval > 0) interval = hid_mousepoll_interval; ret = -ENOMEM; if (usb_endpoint_dir_in(endpoint)) { if (usbhid->urbin) continue; if (!(usbhid->urbin = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_rcvintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbin, dev, pipe, usbhid->inbuf, insize, hid_irq_in, hid, interval); usbhid->urbin->transfer_dma = usbhid->inbuf_dma; usbhid->urbin->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } else { if (usbhid->urbout) continue; if (!(usbhid->urbout = usb_alloc_urb(0, GFP_KERNEL))) goto fail; pipe = usb_sndintpipe(dev, endpoint->bEndpointAddress); usb_fill_int_urb(usbhid->urbout, dev, pipe, usbhid->outbuf, 0, hid_irq_out, hid, interval); usbhid->urbout->transfer_dma = usbhid->outbuf_dma; usbhid->urbout->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; } } usbhid->urbctrl = usb_alloc_urb(0, GFP_KERNEL); if (!usbhid->urbctrl) { ret = -ENOMEM; goto fail; } usb_fill_control_urb(usbhid->urbctrl, dev, 0, (void *) usbhid->cr, usbhid->ctrlbuf, 1, hid_ctrl, hid); usbhid->urbctrl->transfer_dma = usbhid->ctrlbuf_dma; usbhid->urbctrl->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; if (!(hid->quirks & HID_QUIRK_NO_INIT_REPORTS)) usbhid_init_reports(hid); set_bit(HID_STARTED, &usbhid->iofl); /* Some keyboards don't work until their LEDs have been set. * Since BIOSes do set the LEDs, it must be safe for any device * that supports the keyboard boot protocol. * In addition, enable remote wakeup by default for all keyboard * devices supporting the boot protocol. */ if (interface->desc.bInterfaceSubClass == USB_INTERFACE_SUBCLASS_BOOT && interface->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_KEYBOARD) { usbhid_set_leds(hid); device_set_wakeup_enable(&dev->dev, 1); } return 0; fail: usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbout); usb_free_urb(usbhid->urbctrl); usbhid->urbin = NULL; usbhid->urbout = NULL; usbhid->urbctrl = NULL; hid_free_buffers(dev, hid); return ret; } static void usbhid_stop(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; if (WARN_ON(!usbhid)) return; clear_bit(HID_STARTED, &usbhid->iofl); spin_lock_irq(&usbhid->lock); /* Sync with error and led handlers */ set_bit(HID_DISCONNECTED, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbout); usb_kill_urb(usbhid->urbctrl); hid_cancel_delayed_stuff(usbhid); hid->claimed = 0; usb_free_urb(usbhid->urbin); usb_free_urb(usbhid->urbctrl); usb_free_urb(usbhid->urbout); usbhid->urbin = NULL; /* don't mess up next start */ usbhid->urbctrl = NULL; usbhid->urbout = NULL; hid_free_buffers(hid_to_usb_dev(hid), hid); } static int usbhid_power(struct hid_device *hid, int lvl) { int r = 0; switch (lvl) { case PM_HINT_FULLON: r = usbhid_get_power(hid); break; case PM_HINT_NORMAL: usbhid_put_power(hid); break; } return r; } static struct hid_ll_driver usb_hid_driver = { .parse = usbhid_parse, .start = usbhid_start, .stop = usbhid_stop, .open = usbhid_open, .close = usbhid_close, .power = usbhid_power, .hidinput_input_event = usb_hidinput_input_event, }; static int usbhid_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_host_interface *interface = intf->cur_altsetting; struct usb_device *dev = interface_to_usbdev(intf); struct usbhid_device *usbhid; struct hid_device *hid; unsigned int n, has_in = 0; size_t len; int ret; dbg_hid("HID probe called for ifnum %d\n", intf->altsetting->desc.bInterfaceNumber); for (n = 0; n < interface->desc.bNumEndpoints; n++) if (usb_endpoint_is_int_in(&interface->endpoint[n].desc)) has_in++; if (!has_in) { hid_err(intf, "couldn't find an input interrupt endpoint\n"); return -ENODEV; } hid = hid_allocate_device(); if (IS_ERR(hid)) return PTR_ERR(hid); usb_set_intfdata(intf, hid); hid->ll_driver = &usb_hid_driver; hid->hid_get_raw_report = usbhid_get_raw_report; hid->hid_output_raw_report = usbhid_output_raw_report; hid->ff_init = hid_pidff_init; #ifdef CONFIG_USB_HIDDEV hid->hiddev_connect = hiddev_connect; hid->hiddev_disconnect = hiddev_disconnect; hid->hiddev_hid_event = hiddev_hid_event; hid->hiddev_report_event = hiddev_report_event; #endif hid->dev.parent = &intf->dev; hid->bus = BUS_USB; hid->vendor = le16_to_cpu(dev->descriptor.idVendor); hid->product = le16_to_cpu(dev->descriptor.idProduct); hid->name[0] = 0; hid->quirks = usbhid_lookup_quirk(hid->vendor, hid->product); if (intf->cur_altsetting->desc.bInterfaceProtocol == USB_INTERFACE_PROTOCOL_MOUSE) hid->type = HID_TYPE_USBMOUSE; else if (intf->cur_altsetting->desc.bInterfaceProtocol == 0) hid->type = HID_TYPE_USBNONE; if (dev->manufacturer) strlcpy(hid->name, dev->manufacturer, sizeof(hid->name)); if (dev->product) { if (dev->manufacturer) strlcat(hid->name, " ", sizeof(hid->name)); strlcat(hid->name, dev->product, sizeof(hid->name)); } if (!strlen(hid->name)) snprintf(hid->name, sizeof(hid->name), "HID %04x:%04x", le16_to_cpu(dev->descriptor.idVendor), le16_to_cpu(dev->descriptor.idProduct)); usb_make_path(dev, hid->phys, sizeof(hid->phys)); strlcat(hid->phys, "/input", sizeof(hid->phys)); len = strlen(hid->phys); if (len < sizeof(hid->phys) - 1) snprintf(hid->phys + len, sizeof(hid->phys) - len, "%d", intf->altsetting[0].desc.bInterfaceNumber); if (usb_string(dev, dev->descriptor.iSerialNumber, hid->uniq, 64) <= 0) hid->uniq[0] = 0; usbhid = kzalloc(sizeof(*usbhid), GFP_KERNEL); if (usbhid == NULL) { ret = -ENOMEM; goto err; } hid->driver_data = usbhid; usbhid->hid = hid; usbhid->intf = intf; usbhid->ifnum = interface->desc.bInterfaceNumber; init_waitqueue_head(&usbhid->wait); INIT_WORK(&usbhid->reset_work, hid_reset); setup_timer(&usbhid->io_retry, hid_retry_timeout, (unsigned long) hid); spin_lock_init(&usbhid->lock); INIT_WORK(&usbhid->led_work, hid_led); ret = hid_add_device(hid); if (ret) { if (ret != -ENODEV) hid_err(intf, "can't add hid device: %d\n", ret); goto err_free; } return 0; err_free: kfree(usbhid); err: hid_destroy_device(hid); return ret; } static void usbhid_disconnect(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid; if (WARN_ON(!hid)) return; usbhid = hid->driver_data; hid_destroy_device(hid); kfree(usbhid); } static void hid_cancel_delayed_stuff(struct usbhid_device *usbhid) { del_timer_sync(&usbhid->io_retry); cancel_work_sync(&usbhid->reset_work); cancel_work_sync(&usbhid->led_work); } static void hid_cease_io(struct usbhid_device *usbhid) { del_timer_sync(&usbhid->io_retry); usb_kill_urb(usbhid->urbin); usb_kill_urb(usbhid->urbctrl); usb_kill_urb(usbhid->urbout); } /* Treat USB reset pretty much the same as suspend/resume */ static int hid_pre_reset(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; spin_lock_irq(&usbhid->lock); set_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_cease_io(usbhid); return 0; } /* Same routine used for post_reset and reset_resume */ static int hid_post_reset(struct usb_interface *intf) { struct usb_device *dev = interface_to_usbdev (intf); struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; spin_lock_irq(&usbhid->lock); clear_bit(HID_RESET_PENDING, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); hid_set_idle(dev, intf->cur_altsetting->desc.bInterfaceNumber, 0, 0); status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_restart_queues(usbhid); return 0; } int usbhid_get_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; return usb_autopm_get_interface(usbhid->intf); } void usbhid_put_power(struct hid_device *hid) { struct usbhid_device *usbhid = hid->driver_data; usb_autopm_put_interface(usbhid->intf); } #ifdef CONFIG_PM static int hid_suspend(struct usb_interface *intf, pm_message_t message) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; if (PMSG_IS_AUTO(message)) { spin_lock_irq(&usbhid->lock); /* Sync with error handler */ if (!test_bit(HID_RESET_PENDING, &usbhid->iofl) && !test_bit(HID_CLEAR_HALT, &usbhid->iofl) && !test_bit(HID_OUT_RUNNING, &usbhid->iofl) && !test_bit(HID_CTRL_RUNNING, &usbhid->iofl) && !test_bit(HID_KEYS_PRESSED, &usbhid->iofl) && (!usbhid->ledcount || ignoreled)) { set_bit(HID_REPORTED_IDLE, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (hid->driver && hid->driver->suspend) { status = hid->driver->suspend(hid, message); if (status < 0) return status; } } else { usbhid_mark_busy(usbhid); spin_unlock_irq(&usbhid->lock); return -EBUSY; } } else { if (hid->driver && hid->driver->suspend) { status = hid->driver->suspend(hid, message); if (status < 0) return status; } spin_lock_irq(&usbhid->lock); set_bit(HID_REPORTED_IDLE, &usbhid->iofl); spin_unlock_irq(&usbhid->lock); if (usbhid_wait_io(hid) < 0) return -EIO; } hid_cancel_delayed_stuff(usbhid); hid_cease_io(usbhid); if (PMSG_IS_AUTO(message) && test_bit(HID_KEYS_PRESSED, &usbhid->iofl)) { /* lost race against keypresses */ status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_mark_busy(usbhid); return -EBUSY; } dev_dbg(&intf->dev, "suspend\n"); return 0; } static int hid_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata (intf); struct usbhid_device *usbhid = hid->driver_data; int status; if (!test_bit(HID_STARTED, &usbhid->iofl)) return 0; clear_bit(HID_REPORTED_IDLE, &usbhid->iofl); usbhid_mark_busy(usbhid); if (test_bit(HID_CLEAR_HALT, &usbhid->iofl) || test_bit(HID_RESET_PENDING, &usbhid->iofl)) schedule_work(&usbhid->reset_work); usbhid->retry_delay = 0; status = hid_start_in(hid); if (status < 0) hid_io_error(hid); usbhid_restart_queues(usbhid); if (status >= 0 && hid->driver && hid->driver->resume) { int ret = hid->driver->resume(hid); if (ret < 0) status = ret; } dev_dbg(&intf->dev, "resume status %d\n", status); return 0; } static int hid_reset_resume(struct usb_interface *intf) { struct hid_device *hid = usb_get_intfdata(intf); struct usbhid_device *usbhid = hid->driver_data; int status; clear_bit(HID_REPORTED_IDLE, &usbhid->iofl); status = hid_post_reset(intf); if (status >= 0 && hid->driver && hid->driver->reset_resume) { int ret = hid->driver->reset_resume(hid); if (ret < 0) status = ret; } return status; } #endif /* CONFIG_PM */ static const struct usb_device_id hid_usb_ids[] = { { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, .bInterfaceClass = USB_INTERFACE_CLASS_HID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE (usb, hid_usb_ids); static struct usb_driver hid_driver = { .name = "usbhid", .probe = usbhid_probe, .disconnect = usbhid_disconnect, #ifdef CONFIG_PM .suspend = hid_suspend, .resume = hid_resume, .reset_resume = hid_reset_resume, #endif .pre_reset = hid_pre_reset, .post_reset = hid_post_reset, .id_table = hid_usb_ids, .supports_autosuspend = 1, }; static const struct hid_device_id hid_usb_table[] = { { HID_USB_DEVICE(HID_ANY_ID, HID_ANY_ID) }, { } }; struct usb_interface *usbhid_find_interface(int minor) { return usb_find_interface(&hid_driver, minor); } static struct hid_driver hid_usb_driver = { .name = "generic-usb", .id_table = hid_usb_table, }; static int __init hid_init(void) { int retval = -ENOMEM; retval = hid_register_driver(&hid_usb_driver); if (retval) goto hid_register_fail; retval = usbhid_quirks_init(quirks_param); if (retval) goto usbhid_quirks_init_fail; retval = usb_register(&hid_driver); if (retval) goto usb_register_fail; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_DESC "\n"); return 0; usb_register_fail: usbhid_quirks_exit(); usbhid_quirks_init_fail: hid_unregister_driver(&hid_usb_driver); hid_register_fail: return retval; } static void __exit hid_exit(void) { usb_deregister(&hid_driver); usbhid_quirks_exit(); hid_unregister_driver(&hid_usb_driver); } module_init(hid_init); module_exit(hid_exit); MODULE_AUTHOR("Andreas Gal"); MODULE_AUTHOR("Vojtech Pavlik"); MODULE_AUTHOR("Jiri Kosina"); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE);
gpl-2.0
syshack/KVMGT-kernel
drivers/staging/xgifb/vb_init.c
459
37164
#include <linux/delay.h> #include <linux/vmalloc.h> #include "XGIfb.h" #include "vb_def.h" #include "vb_util.h" #include "vb_setmode.h" #include "vb_init.h" static const unsigned short XGINew_DDRDRAM_TYPE340[4][2] = { { 16, 0x45}, { 8, 0x35}, { 4, 0x31}, { 2, 0x21} }; static const unsigned short XGINew_DDRDRAM_TYPE20[12][2] = { { 128, 0x5D}, { 64, 0x59}, { 64, 0x4D}, { 32, 0x55}, { 32, 0x49}, { 32, 0x3D}, { 16, 0x51}, { 16, 0x45}, { 16, 0x39}, { 8, 0x41}, { 8, 0x35}, { 4, 0x31} }; #define XGIFB_ROM_SIZE 65536 static unsigned char XGINew_GetXG20DRAMType(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) { unsigned char data, temp; if (HwDeviceExtension->jChipType < XG20) { data = xgifb_reg_get(pVBInfo->P3c4, 0x39) & 0x02; if (data == 0) data = (xgifb_reg_get(pVBInfo->P3c4, 0x3A) & 0x02) >> 1; return data; } else if (HwDeviceExtension->jChipType == XG27) { temp = xgifb_reg_get(pVBInfo->P3c4, 0x3B); /* SR3B[7][3]MAA15 MAA11 (Power on Trapping) */ if (((temp & 0x88) == 0x80) || ((temp & 0x88) == 0x08)) data = 0; /* DDR */ else data = 1; /* DDRII */ return data; } else if (HwDeviceExtension->jChipType == XG21) { /* Independent GPIO control */ xgifb_reg_and(pVBInfo->P3d4, 0xB4, ~0x02); udelay(800); xgifb_reg_or(pVBInfo->P3d4, 0x4A, 0x80); /* Enable GPIOH read */ /* GPIOF 0:DVI 1:DVO */ data = xgifb_reg_get(pVBInfo->P3d4, 0x48); /* HOTPLUG_SUPPORT */ /* for current XG20 & XG21, GPIOH is floating, driver will * fix DDR temporarily */ /* DVI read GPIOH */ data &= 0x01; /* 1=DDRII, 0=DDR */ /* ~HOTPLUG_SUPPORT */ xgifb_reg_or(pVBInfo->P3d4, 0xB4, 0x02); return data; } else { data = xgifb_reg_get(pVBInfo->P3d4, 0x97) & 0x01; if (data == 1) data++; return data; } } static void XGINew_DDR1x_MRS_340(unsigned long P3c4, struct vb_device_info *pVBInfo) { xgifb_reg_set(P3c4, 0x18, 0x01); xgifb_reg_set(P3c4, 0x19, 0x20); xgifb_reg_set(P3c4, 0x16, 0x00); xgifb_reg_set(P3c4, 0x16, 0x80); mdelay(3); xgifb_reg_set(P3c4, 0x18, 0x00); xgifb_reg_set(P3c4, 0x19, 0x20); xgifb_reg_set(P3c4, 0x16, 0x00); xgifb_reg_set(P3c4, 0x16, 0x80); udelay(60); xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */ xgifb_reg_set(P3c4, 0x19, 0x01); xgifb_reg_set(P3c4, 0x16, 0x03); xgifb_reg_set(P3c4, 0x16, 0x83); mdelay(1); xgifb_reg_set(P3c4, 0x1B, 0x03); udelay(500); xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */ xgifb_reg_set(P3c4, 0x19, 0x00); xgifb_reg_set(P3c4, 0x16, 0x03); xgifb_reg_set(P3c4, 0x16, 0x83); xgifb_reg_set(P3c4, 0x1B, 0x00); } static void XGINew_SetMemoryClock(struct vb_device_info *pVBInfo) { xgifb_reg_set(pVBInfo->P3c4, 0x28, pVBInfo->MCLKData[pVBInfo->ram_type].SR28); xgifb_reg_set(pVBInfo->P3c4, 0x29, pVBInfo->MCLKData[pVBInfo->ram_type].SR29); xgifb_reg_set(pVBInfo->P3c4, 0x2A, pVBInfo->MCLKData[pVBInfo->ram_type].SR2A); xgifb_reg_set(pVBInfo->P3c4, 0x2E, XGI340_ECLKData[pVBInfo->ram_type].SR2E); xgifb_reg_set(pVBInfo->P3c4, 0x2F, XGI340_ECLKData[pVBInfo->ram_type].SR2F); xgifb_reg_set(pVBInfo->P3c4, 0x30, XGI340_ECLKData[pVBInfo->ram_type].SR30); } static void XGINew_DDRII_Bootup_XG27( struct xgi_hw_device_info *HwDeviceExtension, unsigned long P3c4, struct vb_device_info *pVBInfo) { unsigned long P3d4 = P3c4 + 0x10; pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo); XGINew_SetMemoryClock(pVBInfo); /* Set Double Frequency */ xgifb_reg_set(P3d4, 0x97, pVBInfo->XGINew_CR97); /* CR97 */ udelay(200); xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS2 */ xgifb_reg_set(P3c4, 0x19, 0x80); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS3 */ xgifb_reg_set(P3c4, 0x19, 0xC0); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS1 */ xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */ udelay(30); xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Enable */ xgifb_reg_set(P3c4, 0x19, 0x0A); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */ udelay(30); xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */ xgifb_reg_set(P3c4, 0x16, 0x80); /* Set SR16 */ xgifb_reg_set(P3c4, 0x1B, 0x04); /* Set SR1B */ udelay(60); xgifb_reg_set(P3c4, 0x1B, 0x00); /* Set SR1B */ xgifb_reg_set(P3c4, 0x18, 0x42); /* Set SR18 */ /* MRS, DLL Reset */ xgifb_reg_set(P3c4, 0x19, 0x08); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x00); /* Set SR16 */ udelay(30); xgifb_reg_set(P3c4, 0x16, 0x83); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x18, 0x80); /* Set SR18 */ /* MRS, ODT */ xgifb_reg_set(P3c4, 0x19, 0x46); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */ udelay(30); xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */ udelay(15); xgifb_reg_set(P3c4, 0x18, 0x00); /* Set SR18 */ /* EMRS */ xgifb_reg_set(P3c4, 0x19, 0x40); /* Set SR19 */ xgifb_reg_set(P3c4, 0x16, 0x20); /* Set SR16 */ udelay(30); xgifb_reg_set(P3c4, 0x16, 0xA0); /* Set SR16 */ udelay(15); /* Set SR1B refresh control 000:close; 010:open */ xgifb_reg_set(P3c4, 0x1B, 0x04); udelay(200); } static void XGINew_DDR2_MRS_XG20(struct xgi_hw_device_info *HwDeviceExtension, unsigned long P3c4, struct vb_device_info *pVBInfo) { unsigned long P3d4 = P3c4 + 0x10; pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo); XGINew_SetMemoryClock(pVBInfo); xgifb_reg_set(P3d4, 0x97, 0x11); /* CR97 */ udelay(200); xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS2 */ xgifb_reg_set(P3c4, 0x19, 0x80); xgifb_reg_set(P3c4, 0x16, 0x05); xgifb_reg_set(P3c4, 0x16, 0x85); xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS3 */ xgifb_reg_set(P3c4, 0x19, 0xC0); xgifb_reg_set(P3c4, 0x16, 0x05); xgifb_reg_set(P3c4, 0x16, 0x85); xgifb_reg_set(P3c4, 0x18, 0x00); /* EMRS1 */ xgifb_reg_set(P3c4, 0x19, 0x40); xgifb_reg_set(P3c4, 0x16, 0x05); xgifb_reg_set(P3c4, 0x16, 0x85); xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */ xgifb_reg_set(P3c4, 0x19, 0x02); xgifb_reg_set(P3c4, 0x16, 0x05); xgifb_reg_set(P3c4, 0x16, 0x85); udelay(15); xgifb_reg_set(P3c4, 0x1B, 0x04); /* SR1B */ udelay(30); xgifb_reg_set(P3c4, 0x1B, 0x00); /* SR1B */ udelay(100); xgifb_reg_set(P3c4, 0x18, 0x42); /* MRS1 */ xgifb_reg_set(P3c4, 0x19, 0x00); xgifb_reg_set(P3c4, 0x16, 0x05); xgifb_reg_set(P3c4, 0x16, 0x85); udelay(200); } static void XGINew_DDR1x_MRS_XG20(unsigned long P3c4, struct vb_device_info *pVBInfo) { xgifb_reg_set(P3c4, 0x18, 0x01); xgifb_reg_set(P3c4, 0x19, 0x40); xgifb_reg_set(P3c4, 0x16, 0x00); xgifb_reg_set(P3c4, 0x16, 0x80); udelay(60); xgifb_reg_set(P3c4, 0x18, 0x00); xgifb_reg_set(P3c4, 0x19, 0x40); xgifb_reg_set(P3c4, 0x16, 0x00); xgifb_reg_set(P3c4, 0x16, 0x80); udelay(60); xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */ xgifb_reg_set(P3c4, 0x19, 0x01); xgifb_reg_set(P3c4, 0x16, 0x03); xgifb_reg_set(P3c4, 0x16, 0x83); mdelay(1); xgifb_reg_set(P3c4, 0x1B, 0x03); udelay(500); xgifb_reg_set(P3c4, 0x18, pVBInfo->SR18[pVBInfo->ram_type]); /* SR18 */ xgifb_reg_set(P3c4, 0x19, 0x00); xgifb_reg_set(P3c4, 0x16, 0x03); xgifb_reg_set(P3c4, 0x16, 0x83); xgifb_reg_set(P3c4, 0x1B, 0x00); } static void XGINew_DDR1x_DefaultRegister( struct xgi_hw_device_info *HwDeviceExtension, unsigned long Port, struct vb_device_info *pVBInfo) { unsigned long P3d4 = Port, P3c4 = Port - 0x10; if (HwDeviceExtension->jChipType >= XG20) { XGINew_SetMemoryClock(pVBInfo); xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR82 */ xgifb_reg_set(P3d4, 0x85, pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */ xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]); /* CR86 */ xgifb_reg_set(P3d4, 0x98, 0x01); xgifb_reg_set(P3d4, 0x9A, 0x02); XGINew_DDR1x_MRS_XG20(P3c4, pVBInfo); } else { XGINew_SetMemoryClock(pVBInfo); switch (HwDeviceExtension->jChipType) { case XG42: /* CR82 */ xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]); /* CR85 */ xgifb_reg_set(P3d4, 0x85, pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR86 */ xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]); break; default: xgifb_reg_set(P3d4, 0x82, 0x88); xgifb_reg_set(P3d4, 0x86, 0x00); /* Insert read command for delay */ xgifb_reg_get(P3d4, 0x86); xgifb_reg_set(P3d4, 0x86, 0x88); xgifb_reg_get(P3d4, 0x86); xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]); xgifb_reg_set(P3d4, 0x82, 0x77); xgifb_reg_set(P3d4, 0x85, 0x00); /* Insert read command for delay */ xgifb_reg_get(P3d4, 0x85); xgifb_reg_set(P3d4, 0x85, 0x88); /* Insert read command for delay */ xgifb_reg_get(P3d4, 0x85); /* CR85 */ xgifb_reg_set(P3d4, 0x85, pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR82 */ xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]); break; } xgifb_reg_set(P3d4, 0x97, 0x00); xgifb_reg_set(P3d4, 0x98, 0x01); xgifb_reg_set(P3d4, 0x9A, 0x02); XGINew_DDR1x_MRS_340(P3c4, pVBInfo); } } static void XGINew_DDR2_DefaultRegister( struct xgi_hw_device_info *HwDeviceExtension, unsigned long Port, struct vb_device_info *pVBInfo) { unsigned long P3d4 = Port, P3c4 = Port - 0x10; /* keep following setting sequence, each setting in * the same reg insert idle */ xgifb_reg_set(P3d4, 0x82, 0x77); xgifb_reg_set(P3d4, 0x86, 0x00); xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */ xgifb_reg_set(P3d4, 0x86, 0x88); xgifb_reg_get(P3d4, 0x86); /* Insert read command for delay */ /* CR86 */ xgifb_reg_set(P3d4, 0x86, pVBInfo->CR40[13][pVBInfo->ram_type]); xgifb_reg_set(P3d4, 0x82, 0x77); xgifb_reg_set(P3d4, 0x85, 0x00); xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */ xgifb_reg_set(P3d4, 0x85, 0x88); xgifb_reg_get(P3d4, 0x85); /* Insert read command for delay */ xgifb_reg_set(P3d4, 0x85, pVBInfo->CR40[12][pVBInfo->ram_type]); /* CR85 */ if (HwDeviceExtension->jChipType == XG27) /* CR82 */ xgifb_reg_set(P3d4, 0x82, pVBInfo->CR40[11][pVBInfo->ram_type]); else xgifb_reg_set(P3d4, 0x82, 0xA8); /* CR82 */ xgifb_reg_set(P3d4, 0x98, 0x01); xgifb_reg_set(P3d4, 0x9A, 0x02); if (HwDeviceExtension->jChipType == XG27) XGINew_DDRII_Bootup_XG27(HwDeviceExtension, P3c4, pVBInfo); else XGINew_DDR2_MRS_XG20(HwDeviceExtension, P3c4, pVBInfo); } static void XGI_SetDRAM_Helper(unsigned long P3d4, u8 seed, u8 temp2, u8 reg, u8 shift_factor, u8 mask1, u8 mask2) { u8 j; for (j = 0; j < 4; j++) { temp2 |= (((seed >> (2 * j)) & 0x03) << shift_factor); xgifb_reg_set(P3d4, reg, temp2); xgifb_reg_get(P3d4, reg); temp2 &= mask1; temp2 += mask2; } } static void XGINew_SetDRAMDefaultRegister340( struct xgi_hw_device_info *HwDeviceExtension, unsigned long Port, struct vb_device_info *pVBInfo) { unsigned char temp, temp1, temp2, temp3, j, k; unsigned long P3d4 = Port, P3c4 = Port - 0x10; xgifb_reg_set(P3d4, 0x6D, pVBInfo->CR40[8][pVBInfo->ram_type]); xgifb_reg_set(P3d4, 0x68, pVBInfo->CR40[5][pVBInfo->ram_type]); xgifb_reg_set(P3d4, 0x69, pVBInfo->CR40[6][pVBInfo->ram_type]); xgifb_reg_set(P3d4, 0x6A, pVBInfo->CR40[7][pVBInfo->ram_type]); /* CR6B DQS fine tune delay */ temp = 0xaa; XGI_SetDRAM_Helper(P3d4, temp, 0, 0x6B, 2, 0xF0, 0x10); /* CR6E DQM fine tune delay */ XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6E, 2, 0xF0, 0x10); temp3 = 0; for (k = 0; k < 4; k++) { /* CR6E_D[1:0] select channel */ xgifb_reg_and_or(P3d4, 0x6E, 0xFC, temp3); XGI_SetDRAM_Helper(P3d4, 0, 0, 0x6F, 0, 0xF8, 0x08); temp3 += 0x01; } xgifb_reg_set(P3d4, 0x80, pVBInfo->CR40[9][pVBInfo->ram_type]); /* CR80 */ xgifb_reg_set(P3d4, 0x81, pVBInfo->CR40[10][pVBInfo->ram_type]); /* CR81 */ temp2 = 0x80; /* CR89 terminator type select */ XGI_SetDRAM_Helper(P3d4, 0, temp2, 0x89, 0, 0xF0, 0x10); temp = 0; temp1 = temp & 0x03; temp2 |= temp1; xgifb_reg_set(P3d4, 0x89, temp2); temp = pVBInfo->CR40[3][pVBInfo->ram_type]; temp1 = temp & 0x0F; temp2 = (temp >> 4) & 0x07; temp3 = temp & 0x80; xgifb_reg_set(P3d4, 0x45, temp1); /* CR45 */ xgifb_reg_set(P3d4, 0x99, temp2); /* CR99 */ xgifb_reg_or(P3d4, 0x40, temp3); /* CR40_D[7] */ xgifb_reg_set(P3d4, 0x41, pVBInfo->CR40[0][pVBInfo->ram_type]); /* CR41 */ if (HwDeviceExtension->jChipType == XG27) xgifb_reg_set(P3d4, 0x8F, XG27_CR8F); /* CR8F */ for (j = 0; j <= 6; j++) /* CR90 - CR96 */ xgifb_reg_set(P3d4, (0x90 + j), pVBInfo->CR40[14 + j][pVBInfo->ram_type]); for (j = 0; j <= 2; j++) /* CRC3 - CRC5 */ xgifb_reg_set(P3d4, (0xC3 + j), pVBInfo->CR40[21 + j][pVBInfo->ram_type]); for (j = 0; j < 2; j++) /* CR8A - CR8B */ xgifb_reg_set(P3d4, (0x8A + j), pVBInfo->CR40[1 + j][pVBInfo->ram_type]); if (HwDeviceExtension->jChipType == XG42) xgifb_reg_set(P3d4, 0x8C, 0x87); xgifb_reg_set(P3d4, 0x59, pVBInfo->CR40[4][pVBInfo->ram_type]); /* CR59 */ xgifb_reg_set(P3d4, 0x83, 0x09); /* CR83 */ xgifb_reg_set(P3d4, 0x87, 0x00); /* CR87 */ xgifb_reg_set(P3d4, 0xCF, XG40_CRCF); /* CRCF */ if (pVBInfo->ram_type) { xgifb_reg_set(P3c4, 0x17, 0x80); /* SR17 DDRII */ if (HwDeviceExtension->jChipType == XG27) xgifb_reg_set(P3c4, 0x17, 0x02); /* SR17 DDRII */ } else { xgifb_reg_set(P3c4, 0x17, 0x00); /* SR17 DDR */ } xgifb_reg_set(P3c4, 0x1A, 0x87); /* SR1A */ temp = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo); if (temp == 0) { XGINew_DDR1x_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo); } else { xgifb_reg_set(P3d4, 0xB0, 0x80); /* DDRII Dual frequency mode */ XGINew_DDR2_DefaultRegister(HwDeviceExtension, P3d4, pVBInfo); } xgifb_reg_set(P3c4, 0x1B, 0x03); /* SR1B */ } static unsigned short XGINew_SetDRAMSize20Reg( unsigned short dram_size, struct vb_device_info *pVBInfo) { unsigned short data = 0, memsize = 0; int RankSize; unsigned char ChannelNo; RankSize = dram_size * pVBInfo->ram_bus / 8; data = xgifb_reg_get(pVBInfo->P3c4, 0x13); data &= 0x80; if (data == 0x80) RankSize *= 2; data = 0; if (pVBInfo->ram_channel == 3) ChannelNo = 4; else ChannelNo = pVBInfo->ram_channel; if (ChannelNo * RankSize <= 256) { while ((RankSize >>= 1) > 0) data += 0x10; memsize = data >> 4; /* Fix DRAM Sizing Error */ xgifb_reg_set(pVBInfo->P3c4, 0x14, (xgifb_reg_get(pVBInfo->P3c4, 0x14) & 0x0F) | (data & 0xF0)); udelay(15); } return memsize; } static int XGINew_ReadWriteRest(unsigned short StopAddr, unsigned short StartAddr, struct vb_device_info *pVBInfo) { int i; unsigned long Position = 0; void __iomem *fbaddr = pVBInfo->FBAddr; writel(Position, fbaddr + Position); for (i = StartAddr; i <= StopAddr; i++) { Position = 1 << i; writel(Position, fbaddr + Position); } udelay(500); /* Fix #1759 Memory Size error in Multi-Adapter. */ Position = 0; if (readl(fbaddr + Position) != Position) return 0; for (i = StartAddr; i <= StopAddr; i++) { Position = 1 << i; if (readl(fbaddr + Position) != Position) return 0; } return 1; } static unsigned char XGINew_CheckFrequence(struct vb_device_info *pVBInfo) { unsigned char data; data = xgifb_reg_get(pVBInfo->P3d4, 0x97); if ((data & 0x10) == 0) { data = xgifb_reg_get(pVBInfo->P3c4, 0x39); data = (data & 0x02) >> 1; return data; } else { return data & 0x01; } } static void XGINew_CheckChannel(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) { unsigned char data; switch (HwDeviceExtension->jChipType) { case XG20: case XG21: data = xgifb_reg_get(pVBInfo->P3d4, 0x97); data = data & 0x01; pVBInfo->ram_channel = 1; /* XG20 "JUST" one channel */ if (data == 0) { /* Single_32_16 */ if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x1000000) { pVBInfo->ram_bus = 32; /* 32 bits */ /* 22bit + 2 rank + 32bit */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52); udelay(15); if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) return; if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x800000) { /* 22bit + 1 rank + 32bit */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x31); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x42); udelay(15); if (XGINew_ReadWriteRest(23, 23, pVBInfo) == 1) return; } } if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x800000) { pVBInfo->ram_bus = 16; /* 16 bits */ /* 22bit + 2 rank + 16bit */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41); udelay(15); if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1) return; else xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x31); udelay(15); } } else { /* Dual_16_8 */ if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x800000) { pVBInfo->ram_bus = 16; /* 16 bits */ /* (0x31:12x8x2) 22bit + 2 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); /* 0x41:16Mx16 bit*/ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x41); udelay(15); if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1) return; if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x400000) { /* (0x31:12x8x2) 22bit + 1 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x31); /* 0x31:8Mx16 bit*/ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x31); udelay(15); if (XGINew_ReadWriteRest(22, 22, pVBInfo) == 1) return; } } if ((HwDeviceExtension->ulVideoMemorySize - 1) > 0x400000) { pVBInfo->ram_bus = 8; /* 8 bits */ /* (0x31:12x8x2) 22bit + 2 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xB1); /* 0x30:8Mx8 bit*/ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30); udelay(15); if (XGINew_ReadWriteRest(22, 21, pVBInfo) == 1) return; else /* (0x31:12x8x2) 22bit + 1 rank */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x31); udelay(15); } } break; case XG27: pVBInfo->ram_bus = 16; /* 16 bits */ pVBInfo->ram_channel = 1; /* Single channel */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x51); /* 32Mx16 bit*/ break; case XG42: /* XG42 SR14 D[3] Reserve D[2] = 1, Dual Channel = 0, Single Channel It's Different from Other XG40 Series. */ if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII, DDR2x */ pVBInfo->ram_bus = 32; /* 32 bits */ pVBInfo->ram_channel = 2; /* 2 Channel */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x44); if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) return; xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x34); if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1) return; pVBInfo->ram_channel = 1; /* Single Channel */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x40); if (XGINew_ReadWriteRest(23, 22, pVBInfo) == 1) return; else { xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x30); } } else { /* DDR */ pVBInfo->ram_bus = 64; /* 64 bits */ pVBInfo->ram_channel = 1; /* 1 channels */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x52); if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) return; else { xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x42); } } break; default: /* XG40 */ if (XGINew_CheckFrequence(pVBInfo) == 1) { /* DDRII */ pVBInfo->ram_bus = 32; /* 32 bits */ pVBInfo->ram_channel = 3; xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4C); if (XGINew_ReadWriteRest(25, 23, pVBInfo) == 1) return; pVBInfo->ram_channel = 2; /* 2 channels */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x48); if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) return; xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x3C); if (XGINew_ReadWriteRest(24, 23, pVBInfo) == 1) { pVBInfo->ram_channel = 3; /* 4 channels */ } else { pVBInfo->ram_channel = 2; /* 2 channels */ xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x38); } } else { /* DDR */ pVBInfo->ram_bus = 64; /* 64 bits */ pVBInfo->ram_channel = 2; /* 2 channels */ xgifb_reg_set(pVBInfo->P3c4, 0x13, 0xA1); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x5A); if (XGINew_ReadWriteRest(25, 24, pVBInfo) == 1) { return; } else { xgifb_reg_set(pVBInfo->P3c4, 0x13, 0x21); xgifb_reg_set(pVBInfo->P3c4, 0x14, 0x4A); } } break; } } static int XGINew_DDRSizing340(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) { u8 i, size; unsigned short memsize, start_addr; const unsigned short (*dram_table)[2]; xgifb_reg_set(pVBInfo->P3c4, 0x15, 0x00); /* noninterleaving */ xgifb_reg_set(pVBInfo->P3c4, 0x1C, 0x00); /* nontiling */ XGINew_CheckChannel(HwDeviceExtension, pVBInfo); if (HwDeviceExtension->jChipType >= XG20) { dram_table = XGINew_DDRDRAM_TYPE20; size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE20); start_addr = 5; } else { dram_table = XGINew_DDRDRAM_TYPE340; size = ARRAY_SIZE(XGINew_DDRDRAM_TYPE340); start_addr = 9; } for (i = 0; i < size; i++) { /* SetDRAMSizingType */ xgifb_reg_and_or(pVBInfo->P3c4, 0x13, 0x80, dram_table[i][1]); udelay(15); /* should delay 50 ns */ memsize = XGINew_SetDRAMSize20Reg(dram_table[i][0], pVBInfo); if (memsize == 0) continue; memsize += (pVBInfo->ram_channel - 2) + 20; if ((HwDeviceExtension->ulVideoMemorySize - 1) < (unsigned long) (1 << memsize)) continue; if (XGINew_ReadWriteRest(memsize, start_addr, pVBInfo) == 1) return 1; } return 0; } static void XGINew_SetDRAMSize_340(struct xgifb_video_info *xgifb_info, struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) { unsigned short data; pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress; XGISetModeNew(xgifb_info, HwDeviceExtension, 0x2e); data = xgifb_reg_get(pVBInfo->P3c4, 0x21); /* disable read cache */ xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data & 0xDF)); XGI_DisplayOff(xgifb_info, HwDeviceExtension, pVBInfo); XGINew_DDRSizing340(HwDeviceExtension, pVBInfo); data = xgifb_reg_get(pVBInfo->P3c4, 0x21); /* enable read cache */ xgifb_reg_set(pVBInfo->P3c4, 0x21, (unsigned short) (data | 0x20)); } static u8 *xgifb_copy_rom(struct pci_dev *dev, size_t *rom_size) { void __iomem *rom_address; u8 *rom_copy; rom_address = pci_map_rom(dev, rom_size); if (rom_address == NULL) return NULL; rom_copy = vzalloc(XGIFB_ROM_SIZE); if (rom_copy == NULL) goto done; *rom_size = min_t(size_t, *rom_size, XGIFB_ROM_SIZE); memcpy_fromio(rom_copy, rom_address, *rom_size); done: pci_unmap_rom(dev, rom_address); return rom_copy; } static bool xgifb_read_vbios(struct pci_dev *pdev) { struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); u8 *vbios; unsigned long i; unsigned char j; struct XGI21_LVDSCapStruct *lvds; size_t vbios_size; int entry; vbios = xgifb_copy_rom(pdev, &vbios_size); if (vbios == NULL) { dev_err(&pdev->dev, "Video BIOS not available\n"); return false; } if (vbios_size <= 0x65) goto error; /* * The user can ignore the LVDS bit in the BIOS and force the display * type. */ if (!(vbios[0x65] & 0x1) && (!xgifb_info->display2_force || xgifb_info->display2 != XGIFB_DISP_LCD)) { vfree(vbios); return false; } if (vbios_size <= 0x317) goto error; i = vbios[0x316] | (vbios[0x317] << 8); if (vbios_size <= i - 1) goto error; j = vbios[i - 1]; if (j == 0) goto error; if (j == 0xff) j = 1; /* * Read the LVDS table index scratch register set by the BIOS. */ entry = xgifb_reg_get(xgifb_info->dev_info.P3d4, 0x36); if (entry >= j) entry = 0; i += entry * 25; lvds = &xgifb_info->lvds_data; if (vbios_size <= i + 24) goto error; lvds->LVDS_Capability = vbios[i] | (vbios[i + 1] << 8); lvds->LVDSHT = vbios[i + 2] | (vbios[i + 3] << 8); lvds->LVDSVT = vbios[i + 4] | (vbios[i + 5] << 8); lvds->LVDSHDE = vbios[i + 6] | (vbios[i + 7] << 8); lvds->LVDSVDE = vbios[i + 8] | (vbios[i + 9] << 8); lvds->LVDSHFP = vbios[i + 10] | (vbios[i + 11] << 8); lvds->LVDSVFP = vbios[i + 12] | (vbios[i + 13] << 8); lvds->LVDSHSYNC = vbios[i + 14] | (vbios[i + 15] << 8); lvds->LVDSVSYNC = vbios[i + 16] | (vbios[i + 17] << 8); lvds->VCLKData1 = vbios[i + 18]; lvds->VCLKData2 = vbios[i + 19]; lvds->PSC_S1 = vbios[i + 20]; lvds->PSC_S2 = vbios[i + 21]; lvds->PSC_S3 = vbios[i + 22]; lvds->PSC_S4 = vbios[i + 23]; lvds->PSC_S5 = vbios[i + 24]; vfree(vbios); return true; error: dev_err(&pdev->dev, "Video BIOS corrupted\n"); vfree(vbios); return false; } static void XGINew_ChkSenseStatus(struct vb_device_info *pVBInfo) { unsigned short tempbx = 0, temp, tempcx, CR3CData; temp = xgifb_reg_get(pVBInfo->P3d4, 0x32); if (temp & Monitor1Sense) tempbx |= ActiveCRT1; if (temp & LCDSense) tempbx |= ActiveLCD; if (temp & Monitor2Sense) tempbx |= ActiveCRT2; if (temp & TVSense) { tempbx |= ActiveTV; if (temp & AVIDEOSense) tempbx |= (ActiveAVideo << 8); if (temp & SVIDEOSense) tempbx |= (ActiveSVideo << 8); if (temp & SCARTSense) tempbx |= (ActiveSCART << 8); if (temp & HiTVSense) tempbx |= (ActiveHiTV << 8); if (temp & YPbPrSense) tempbx |= (ActiveYPbPr << 8); } tempcx = xgifb_reg_get(pVBInfo->P3d4, 0x3d); tempcx |= (xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8); if (tempbx & tempcx) { CR3CData = xgifb_reg_get(pVBInfo->P3d4, 0x3c); if (!(CR3CData & DisplayDeviceFromCMOS)) tempcx = 0x1FF0; } else { tempcx = 0x1FF0; } tempbx &= tempcx; xgifb_reg_set(pVBInfo->P3d4, 0x3d, (tempbx & 0x00FF)); xgifb_reg_set(pVBInfo->P3d4, 0x3e, ((tempbx & 0xFF00) >> 8)); } static void XGINew_SetModeScratch(struct vb_device_info *pVBInfo) { unsigned short temp, tempcl = 0, tempch = 0, CR31Data, CR38Data; temp = xgifb_reg_get(pVBInfo->P3d4, 0x3d); temp |= xgifb_reg_get(pVBInfo->P3d4, 0x3e) << 8; temp |= (xgifb_reg_get(pVBInfo->P3d4, 0x31) & (DriverMode >> 8)) << 8; if (pVBInfo->IF_DEF_CRT2Monitor == 1) { if (temp & ActiveCRT2) tempcl = SetCRT2ToRAMDAC; } if (temp & ActiveLCD) { tempcl |= SetCRT2ToLCD; if (temp & DriverMode) { if (temp & ActiveTV) { tempch = SetToLCDA | EnableDualEdge; temp ^= SetCRT2ToLCD; if ((temp >> 8) & ActiveAVideo) tempcl |= SetCRT2ToAVIDEO; if ((temp >> 8) & ActiveSVideo) tempcl |= SetCRT2ToSVIDEO; if ((temp >> 8) & ActiveSCART) tempcl |= SetCRT2ToSCART; if (pVBInfo->IF_DEF_HiVision == 1) { if ((temp >> 8) & ActiveHiTV) tempcl |= SetCRT2ToHiVision; } if (pVBInfo->IF_DEF_YPbPr == 1) { if ((temp >> 8) & ActiveYPbPr) tempch |= SetYPbPr; } } } } else { if ((temp >> 8) & ActiveAVideo) tempcl |= SetCRT2ToAVIDEO; if ((temp >> 8) & ActiveSVideo) tempcl |= SetCRT2ToSVIDEO; if ((temp >> 8) & ActiveSCART) tempcl |= SetCRT2ToSCART; if (pVBInfo->IF_DEF_HiVision == 1) { if ((temp >> 8) & ActiveHiTV) tempcl |= SetCRT2ToHiVision; } if (pVBInfo->IF_DEF_YPbPr == 1) { if ((temp >> 8) & ActiveYPbPr) tempch |= SetYPbPr; } } tempcl |= SetSimuScanMode; if ((!(temp & ActiveCRT1)) && ((temp & ActiveLCD) || (temp & ActiveTV) || (temp & ActiveCRT2))) tempcl ^= (SetSimuScanMode | SwitchCRT2); if ((temp & ActiveLCD) && (temp & ActiveTV)) tempcl ^= (SetSimuScanMode | SwitchCRT2); xgifb_reg_set(pVBInfo->P3d4, 0x30, tempcl); CR31Data = xgifb_reg_get(pVBInfo->P3d4, 0x31); CR31Data &= ~(SetNotSimuMode >> 8); if (!(temp & ActiveCRT1)) CR31Data |= (SetNotSimuMode >> 8); CR31Data &= ~(DisableCRT2Display >> 8); if (!((temp & ActiveLCD) || (temp & ActiveTV) || (temp & ActiveCRT2))) CR31Data |= (DisableCRT2Display >> 8); xgifb_reg_set(pVBInfo->P3d4, 0x31, CR31Data); CR38Data = xgifb_reg_get(pVBInfo->P3d4, 0x38); CR38Data &= ~SetYPbPr; CR38Data |= tempch; xgifb_reg_set(pVBInfo->P3d4, 0x38, CR38Data); } static unsigned short XGINew_SenseLCD(struct xgi_hw_device_info *HwDeviceExtension, struct vb_device_info *pVBInfo) { unsigned short temp = HwDeviceExtension->ulCRT2LCDType; switch (HwDeviceExtension->ulCRT2LCDType) { case LCD_640x480: case LCD_1024x600: case LCD_1152x864: case LCD_1280x960: case LCD_1152x768: case LCD_1920x1440: case LCD_2048x1536: temp = 0; /* overwrite used ulCRT2LCDType */ break; case LCD_UNKNOWN: /* unknown lcd, do nothing */ return 0; } xgifb_reg_and_or(pVBInfo->P3d4, 0x36, 0xF0, temp); return 1; } static void XGINew_GetXG21Sense(struct pci_dev *pdev, struct vb_device_info *pVBInfo) { struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); unsigned char Temp; if (xgifb_read_vbios(pdev)) { /* For XG21 LVDS */ xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense); /* LVDS on chip */ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0); } else { /* Enable GPIOA/B read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03); Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0xC0; if (Temp == 0xC0) { /* DVI & DVO GPIOA/B pull high */ XGINew_SenseLCD(&xgifb_info->hw_info, pVBInfo); xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense); /* Enable read GPIOF */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x20, 0x20); if (xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x04) Temp = 0xA0; /* Only DVO on chip */ else Temp = 0x80; /* TMDS on chip */ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, Temp); /* Disable read GPIOF */ xgifb_reg_and(pVBInfo->P3d4, 0x4A, ~0x20); } } } static void XGINew_GetXG27Sense(struct vb_device_info *pVBInfo) { unsigned char Temp, bCR4A; bCR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A); /* Enable GPIOA/B/C read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x07, 0x07); Temp = xgifb_reg_get(pVBInfo->P3d4, 0x48) & 0x07; xgifb_reg_set(pVBInfo->P3d4, 0x4A, bCR4A); if (Temp <= 0x02) { /* LVDS setting */ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xC0); xgifb_reg_set(pVBInfo->P3d4, 0x30, 0x21); } else { /* TMDS/DVO setting */ xgifb_reg_and_or(pVBInfo->P3d4, 0x38, ~0xE0, 0xA0); } xgifb_reg_or(pVBInfo->P3d4, 0x32, LCDSense); } static unsigned char GetXG21FPBits(struct vb_device_info *pVBInfo) { unsigned char CR38, CR4A, temp; CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A); /* enable GPIOE read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x10, 0x10); CR38 = xgifb_reg_get(pVBInfo->P3d4, 0x38); temp = 0; if ((CR38 & 0xE0) > 0x80) { temp = xgifb_reg_get(pVBInfo->P3d4, 0x48); temp &= 0x08; temp >>= 3; } xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A); return temp; } static unsigned char GetXG27FPBits(struct vb_device_info *pVBInfo) { unsigned char CR4A, temp; CR4A = xgifb_reg_get(pVBInfo->P3d4, 0x4A); /* enable GPIOA/B/C read */ xgifb_reg_and_or(pVBInfo->P3d4, 0x4A, ~0x03, 0x03); temp = xgifb_reg_get(pVBInfo->P3d4, 0x48); if (temp > 2) temp = ((temp & 0x04) >> 1) | ((~temp) & 0x01); xgifb_reg_set(pVBInfo->P3d4, 0x4A, CR4A); return temp; } static bool xgifb_bridge_is_on(struct vb_device_info *vb_info) { u8 flag; flag = xgifb_reg_get(vb_info->Part4Port, 0x00); return flag == 1 || flag == 2; } unsigned char XGIInitNew(struct pci_dev *pdev) { struct xgifb_video_info *xgifb_info = pci_get_drvdata(pdev); struct xgi_hw_device_info *HwDeviceExtension = &xgifb_info->hw_info; struct vb_device_info VBINF; struct vb_device_info *pVBInfo = &VBINF; unsigned char i, temp = 0, temp1; pVBInfo->FBAddr = HwDeviceExtension->pjVideoMemoryAddress; if (pVBInfo->FBAddr == NULL) { dev_dbg(&pdev->dev, "pVBInfo->FBAddr == 0\n"); return 0; } XGIRegInit(pVBInfo, xgifb_info->vga_base); outb(0x67, pVBInfo->P3c2); InitTo330Pointer(HwDeviceExtension->jChipType, pVBInfo); /* Openkey */ xgifb_reg_set(pVBInfo->P3c4, 0x05, 0x86); /* GetXG21Sense (GPIO) */ if (HwDeviceExtension->jChipType == XG21) XGINew_GetXG21Sense(pdev, pVBInfo); if (HwDeviceExtension->jChipType == XG27) XGINew_GetXG27Sense(pVBInfo); /* Reset Extended register */ for (i = 0x06; i < 0x20; i++) xgifb_reg_set(pVBInfo->P3c4, i, 0); for (i = 0x21; i <= 0x27; i++) xgifb_reg_set(pVBInfo->P3c4, i, 0); for (i = 0x31; i <= 0x3B; i++) xgifb_reg_set(pVBInfo->P3c4, i, 0); /* Auto over driver for XG42 */ if (HwDeviceExtension->jChipType == XG42) xgifb_reg_set(pVBInfo->P3c4, 0x3B, 0xC0); for (i = 0x79; i <= 0x7C; i++) xgifb_reg_set(pVBInfo->P3d4, i, 0); if (HwDeviceExtension->jChipType >= XG20) xgifb_reg_set(pVBInfo->P3d4, 0x97, pVBInfo->XGINew_CR97); /* SetDefExt1Regs begin */ xgifb_reg_set(pVBInfo->P3c4, 0x07, XGI330_SR07); if (HwDeviceExtension->jChipType == XG27) { xgifb_reg_set(pVBInfo->P3c4, 0x40, XG27_SR40); xgifb_reg_set(pVBInfo->P3c4, 0x41, XG27_SR41); } xgifb_reg_set(pVBInfo->P3c4, 0x11, 0x0F); xgifb_reg_set(pVBInfo->P3c4, 0x1F, XGI330_SR1F); /* Frame buffer can read/write SR20 */ xgifb_reg_set(pVBInfo->P3c4, 0x20, 0xA0); /* H/W request for slow corner chip */ xgifb_reg_set(pVBInfo->P3c4, 0x36, 0x70); if (HwDeviceExtension->jChipType == XG27) xgifb_reg_set(pVBInfo->P3c4, 0x36, XG27_SR36); if (HwDeviceExtension->jChipType < XG20) { u32 Temp; /* Set AGP customize registers (in SetDefAGPRegs) Start */ for (i = 0x47; i <= 0x4C; i++) xgifb_reg_set(pVBInfo->P3d4, i, XGI340_AGPReg[i - 0x47]); for (i = 0x70; i <= 0x71; i++) xgifb_reg_set(pVBInfo->P3d4, i, XGI340_AGPReg[6 + i - 0x70]); for (i = 0x74; i <= 0x77; i++) xgifb_reg_set(pVBInfo->P3d4, i, XGI340_AGPReg[8 + i - 0x74]); pci_read_config_dword(pdev, 0x50, &Temp); Temp >>= 20; Temp &= 0xF; if (Temp == 1) xgifb_reg_set(pVBInfo->P3d4, 0x48, 0x20); /* CR48 */ } /* != XG20 */ /* Set PCI */ xgifb_reg_set(pVBInfo->P3c4, 0x23, XGI330_SR23); xgifb_reg_set(pVBInfo->P3c4, 0x24, XGI330_SR24); xgifb_reg_set(pVBInfo->P3c4, 0x25, 0); if (HwDeviceExtension->jChipType < XG20) { /* Set VB */ XGI_UnLockCRT2(pVBInfo); /* disable VideoCapture */ xgifb_reg_and_or(pVBInfo->Part0Port, 0x3F, 0xEF, 0x00); xgifb_reg_set(pVBInfo->Part1Port, 0x00, 0x00); /* chk if BCLK>=100MHz */ temp1 = xgifb_reg_get(pVBInfo->P3d4, 0x7B); xgifb_reg_set(pVBInfo->Part1Port, 0x02, XGI330_CRT2Data_1_2); xgifb_reg_set(pVBInfo->Part1Port, 0x2E, 0x08); /* use VB */ } /* != XG20 */ xgifb_reg_set(pVBInfo->P3c4, 0x27, 0x1F); if ((HwDeviceExtension->jChipType == XG42) && XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo) != 0) { /* Not DDR */ xgifb_reg_set(pVBInfo->P3c4, 0x31, (XGI330_SR31 & 0x3F) | 0x40); xgifb_reg_set(pVBInfo->P3c4, 0x32, (XGI330_SR32 & 0xFC) | 0x01); } else { xgifb_reg_set(pVBInfo->P3c4, 0x31, XGI330_SR31); xgifb_reg_set(pVBInfo->P3c4, 0x32, XGI330_SR32); } xgifb_reg_set(pVBInfo->P3c4, 0x33, XGI330_SR33); if (HwDeviceExtension->jChipType < XG20) { if (xgifb_bridge_is_on(pVBInfo)) { xgifb_reg_set(pVBInfo->Part2Port, 0x00, 0x1C); xgifb_reg_set(pVBInfo->Part4Port, 0x0D, XGI330_CRT2Data_4_D); xgifb_reg_set(pVBInfo->Part4Port, 0x0E, XGI330_CRT2Data_4_E); xgifb_reg_set(pVBInfo->Part4Port, 0x10, XGI330_CRT2Data_4_10); xgifb_reg_set(pVBInfo->Part4Port, 0x0F, 0x3F); XGI_LockCRT2(pVBInfo); } } /* != XG20 */ XGI_SenseCRT1(pVBInfo); if (HwDeviceExtension->jChipType == XG21) { xgifb_reg_and_or(pVBInfo->P3d4, 0x32, ~Monitor1Sense, Monitor1Sense); /* Z9 default has CRT */ temp = GetXG21FPBits(pVBInfo); xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x01, temp); } if (HwDeviceExtension->jChipType == XG27) { xgifb_reg_and_or(pVBInfo->P3d4, 0x32, ~Monitor1Sense, Monitor1Sense); /* Z9 default has CRT */ temp = GetXG27FPBits(pVBInfo); xgifb_reg_and_or(pVBInfo->P3d4, 0x37, ~0x03, temp); } pVBInfo->ram_type = XGINew_GetXG20DRAMType(HwDeviceExtension, pVBInfo); XGINew_SetDRAMDefaultRegister340(HwDeviceExtension, pVBInfo->P3d4, pVBInfo); XGINew_SetDRAMSize_340(xgifb_info, HwDeviceExtension, pVBInfo); xgifb_reg_set(pVBInfo->P3c4, 0x22, 0xfa); xgifb_reg_set(pVBInfo->P3c4, 0x21, 0xa3); XGINew_ChkSenseStatus(pVBInfo); XGINew_SetModeScratch(pVBInfo); xgifb_reg_set(pVBInfo->P3d4, 0x8c, 0x87); return 1; } /* end of init */
gpl-2.0
mike-dunn/linux-treo680
drivers/staging/lustre/lustre/obdclass/llog_ioctl.c
459
10707
/* * GPL HEADER START * * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 only, * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License version 2 for more details (a copy is included * in the LICENSE file that accompanied this code). * * You should have received a copy of the GNU General Public License * version 2 along with this program; If not, see * http://www.sun.com/software/products/lustre/docs/GPLv2.pdf * * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara, * CA 95054 USA or visit www.sun.com if you need additional information or * have any questions. * * GPL HEADER END */ /* * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. * Use is subject to license terms. * * Copyright (c) 2011, 2012, Intel Corporation. */ /* * This file is part of Lustre, http://www.lustre.org/ * Lustre is a trademark of Sun Microsystems, Inc. */ #define DEBUG_SUBSYSTEM S_LOG #include <obd_class.h> #include <lustre_log.h> #include "llog_internal.h" static int str2logid(struct llog_logid *logid, char *str, int len) { char *start, *end, *endp; __u64 id, seq; start = str; if (*start != '#') return -EINVAL; start++; if (start - str >= len - 1) return -EINVAL; end = strchr(start, '#'); if (end == NULL || end == start) return -EINVAL; *end = '\0'; id = simple_strtoull(start, &endp, 0); if (endp != end) return -EINVAL; start = ++end; if (start - str >= len - 1) return -EINVAL; end = strchr(start, '#'); if (end == NULL || end == start) return -EINVAL; *end = '\0'; seq = simple_strtoull(start, &endp, 0); if (endp != end) return -EINVAL; ostid_set_seq(&logid->lgl_oi, seq); ostid_set_id(&logid->lgl_oi, id); start = ++end; if (start - str >= len - 1) return -EINVAL; logid->lgl_ogen = simple_strtoul(start, &endp, 16); if (*endp != '\0') return -EINVAL; return 0; } static int llog_check_cb(const struct lu_env *env, struct llog_handle *handle, struct llog_rec_hdr *rec, void *data) { struct obd_ioctl_data *ioc_data = (struct obd_ioctl_data *)data; static int l, remains, from, to; static char *out; char *endp; int cur_index, rc = 0; if (ioc_data && ioc_data->ioc_inllen1 > 0) { l = 0; remains = ioc_data->ioc_inllen4 + cfs_size_round(ioc_data->ioc_inllen1) + cfs_size_round(ioc_data->ioc_inllen2) + cfs_size_round(ioc_data->ioc_inllen3); from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0); if (*endp != '\0') return -EINVAL; to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0); if (*endp != '\0') return -EINVAL; ioc_data->ioc_inllen1 = 0; out = ioc_data->ioc_bulk; } cur_index = rec->lrh_index; if (cur_index < from) return 0; if (to > 0 && cur_index > to) return -LLOG_EEMPTY; if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) { struct llog_logid_rec *lir = (struct llog_logid_rec *)rec; struct llog_handle *loghandle; if (rec->lrh_type != LLOG_LOGID_MAGIC) { l = snprintf(out, remains, "[index]: %05d [type]: " "%02x [len]: %04d failed\n", cur_index, rec->lrh_type, rec->lrh_len); } if (handle->lgh_ctxt == NULL) return -EOPNOTSUPP; rc = llog_cat_id2handle(env, handle, &loghandle, &lir->lid_id); if (rc) { CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n", POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen); return rc; } rc = llog_process(env, loghandle, llog_check_cb, NULL, NULL); llog_handle_put(loghandle); } else { bool ok; switch (rec->lrh_type) { case OST_SZ_REC: case MDS_UNLINK_REC: case MDS_UNLINK64_REC: case MDS_SETATTR64_REC: case OBD_CFG_REC: case LLOG_GEN_REC: case LLOG_HDR_MAGIC: ok = true; break; default: ok = false; } l = snprintf(out, remains, "[index]: %05d [type]: " "%02x [len]: %04d %s\n", cur_index, rec->lrh_type, rec->lrh_len, ok ? "ok" : "failed"); out += l; remains -= l; if (remains <= 0) { CERROR("%s: no space to print log records\n", handle->lgh_ctxt->loc_obd->obd_name); return -LLOG_EEMPTY; } } return rc; } static int llog_print_cb(const struct lu_env *env, struct llog_handle *handle, struct llog_rec_hdr *rec, void *data) { struct obd_ioctl_data *ioc_data = (struct obd_ioctl_data *)data; static int l, remains, from, to; static char *out; char *endp; int cur_index; if (ioc_data != NULL && ioc_data->ioc_inllen1 > 0) { l = 0; remains = ioc_data->ioc_inllen4 + cfs_size_round(ioc_data->ioc_inllen1) + cfs_size_round(ioc_data->ioc_inllen2) + cfs_size_round(ioc_data->ioc_inllen3); from = simple_strtol(ioc_data->ioc_inlbuf2, &endp, 0); if (*endp != '\0') return -EINVAL; to = simple_strtol(ioc_data->ioc_inlbuf3, &endp, 0); if (*endp != '\0') return -EINVAL; out = ioc_data->ioc_bulk; ioc_data->ioc_inllen1 = 0; } cur_index = rec->lrh_index; if (cur_index < from) return 0; if (to > 0 && cur_index > to) return -LLOG_EEMPTY; if (handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) { struct llog_logid_rec *lir = (struct llog_logid_rec *)rec; if (rec->lrh_type != LLOG_LOGID_MAGIC) { CERROR("invalid record in catalog\n"); return -EINVAL; } l = snprintf(out, remains, "[index]: %05d [logid]: #"DOSTID"#%08x\n", cur_index, POSTID(&lir->lid_id.lgl_oi), lir->lid_id.lgl_ogen); } else if (rec->lrh_type == OBD_CFG_REC) { int rc; rc = class_config_parse_rec(rec, out, remains); if (rc < 0) return rc; l = rc; } else { l = snprintf(out, remains, "[index]: %05d [type]: %02x [len]: %04d\n", cur_index, rec->lrh_type, rec->lrh_len); } out += l; remains -= l; if (remains <= 0) { CERROR("not enough space for print log records\n"); return -LLOG_EEMPTY; } return 0; } static int llog_remove_log(const struct lu_env *env, struct llog_handle *cat, struct llog_logid *logid) { struct llog_handle *log; int rc; rc = llog_cat_id2handle(env, cat, &log, logid); if (rc) { CDEBUG(D_IOCTL, "cannot find log #"DOSTID"#%08x\n", POSTID(&logid->lgl_oi), logid->lgl_ogen); return -ENOENT; } rc = llog_destroy(env, log); if (rc) { CDEBUG(D_IOCTL, "cannot destroy log\n"); GOTO(out, rc); } llog_cat_cleanup(env, cat, log, log->u.phd.phd_cookie.lgc_index); out: llog_handle_put(log); return rc; } static int llog_delete_cb(const struct lu_env *env, struct llog_handle *handle, struct llog_rec_hdr *rec, void *data) { struct llog_logid_rec *lir = (struct llog_logid_rec *)rec; int rc; if (rec->lrh_type != LLOG_LOGID_MAGIC) return -EINVAL; rc = llog_remove_log(env, handle, &lir->lid_id); return rc; } int llog_ioctl(const struct lu_env *env, struct llog_ctxt *ctxt, int cmd, struct obd_ioctl_data *data) { struct llog_logid logid; int rc = 0; struct llog_handle *handle = NULL; if (*data->ioc_inlbuf1 == '#') { rc = str2logid(&logid, data->ioc_inlbuf1, data->ioc_inllen1); if (rc) return rc; rc = llog_open(env, ctxt, &handle, &logid, NULL, LLOG_OPEN_EXISTS); if (rc) return rc; } else if (*data->ioc_inlbuf1 == '$') { char *name = data->ioc_inlbuf1 + 1; rc = llog_open(env, ctxt, &handle, NULL, name, LLOG_OPEN_EXISTS); if (rc) return rc; } else { return -EINVAL; } rc = llog_init_handle(env, handle, 0, NULL); if (rc) GOTO(out_close, rc = -ENOENT); switch (cmd) { case OBD_IOC_LLOG_INFO: { int l; int remains = data->ioc_inllen2 + cfs_size_round(data->ioc_inllen1); char *out = data->ioc_bulk; l = snprintf(out, remains, "logid: #"DOSTID"#%08x\n" "flags: %x (%s)\n" "records count: %d\n" "last index: %d\n", POSTID(&handle->lgh_id.lgl_oi), handle->lgh_id.lgl_ogen, handle->lgh_hdr->llh_flags, handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT ? "cat" : "plain", handle->lgh_hdr->llh_count, handle->lgh_last_idx); out += l; remains -= l; if (remains <= 0) { CERROR("%s: not enough space for log header info\n", ctxt->loc_obd->obd_name); rc = -ENOSPC; } break; } case OBD_IOC_LLOG_CHECK: LASSERT(data->ioc_inllen1 > 0); rc = llog_process(env, handle, llog_check_cb, data, NULL); if (rc == -LLOG_EEMPTY) rc = 0; else if (rc) GOTO(out_close, rc); break; case OBD_IOC_LLOG_PRINT: LASSERT(data->ioc_inllen1 > 0); rc = llog_process(env, handle, llog_print_cb, data, NULL); if (rc == -LLOG_EEMPTY) rc = 0; else if (rc) GOTO(out_close, rc); break; case OBD_IOC_LLOG_CANCEL: { struct llog_cookie cookie; struct llog_logid plain; char *endp; cookie.lgc_index = simple_strtoul(data->ioc_inlbuf3, &endp, 0); if (*endp != '\0') GOTO(out_close, rc = -EINVAL); if (handle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) { rc = llog_cancel_rec(NULL, handle, cookie.lgc_index); GOTO(out_close, rc); } else if (!(handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)) { GOTO(out_close, rc = -EINVAL); } if (data->ioc_inlbuf2 == NULL) /* catalog but no logid */ GOTO(out_close, rc = -ENOTTY); rc = str2logid(&plain, data->ioc_inlbuf2, data->ioc_inllen2); if (rc) GOTO(out_close, rc); cookie.lgc_lgl = plain; rc = llog_cat_cancel_records(env, handle, 1, &cookie); if (rc) GOTO(out_close, rc); break; } case OBD_IOC_LLOG_REMOVE: { struct llog_logid plain; if (handle->lgh_hdr->llh_flags & LLOG_F_IS_PLAIN) { rc = llog_destroy(env, handle); GOTO(out_close, rc); } else if (!(handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT)) { GOTO(out_close, rc = -EINVAL); } if (data->ioc_inlbuf2 > 0) { /* remove indicate log from the catalog */ rc = str2logid(&plain, data->ioc_inlbuf2, data->ioc_inllen2); if (rc) GOTO(out_close, rc); rc = llog_remove_log(env, handle, &plain); } else { /* remove all the log of the catalog */ rc = llog_process(env, handle, llog_delete_cb, NULL, NULL); if (rc) GOTO(out_close, rc); } break; } default: CERROR("%s: Unknown ioctl cmd %#x\n", ctxt->loc_obd->obd_name, cmd); GOTO(out_close, rc = -ENOTTY); } out_close: if (handle->lgh_hdr && handle->lgh_hdr->llh_flags & LLOG_F_IS_CAT) llog_cat_close(env, handle); else llog_close(env, handle); return rc; } EXPORT_SYMBOL(llog_ioctl);
gpl-2.0
Framework43/touchpad-kernel
arch/arm/mach-fsm/pmic8058-mpp.c
715
6328
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ /* * Qualcomm PMIC8058 MPP driver * */ #include <linux/platform_device.h> #include <linux/gpio.h> #include <linux/mfd/pmic8058.h> #include <mach/mpp.h> #include <linux/seq_file.h> #ifndef CONFIG_GPIOLIB #include "gpio_chip.h" #endif /* MPP Control Registers */ #define SSBI_MPP_CNTRL_BASE 0x50 #define SSBI_MPP_CNTRL(n) (SSBI_MPP_CNTRL_BASE + (n)) /* MPP Type */ #define PM8058_MPP_TYPE_MASK 0xE0 #define PM8058_MPP_TYPE_SHIFT 5 /* MPP Config Level */ #define PM8058_MPP_CONFIG_LVL_MASK 0x1C #define PM8058_MPP_CONFIG_LVL_SHIFT 2 /* MPP Config Control */ #define PM8058_MPP_CONFIG_CTL_MASK 0x03 static int pm8058_mpp_get(struct gpio_chip *chip, unsigned mpp) { struct pm8058_gpio_platform_data *pdata; struct pm8058_chip *pm_chip; if (mpp >= PM8058_MPPS || chip == NULL) return -EINVAL; pdata = chip->dev->platform_data; pm_chip = dev_get_drvdata(chip->dev); return pm8058_irq_get_rt_status(pm_chip, pdata->irq_base + mpp); } #ifndef CONFIG_GPIOLIB static int pm8058_mpp_get_irq_num(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp) { struct pm8058_gpio_platform_data *pdata; pdata = chip->dev->platform_data; gpio -= chip->start; *irqp = pdata->irq_base + gpio; if (irqnumflagsp) *irqnumflagsp = 0; return 0; } static int pm8058_mpp_read(struct gpio_chip *chip, unsigned n) { n -= chip->start; return pm8058_mpp_get(chip, n); } struct msm_gpio_chip pm8058_mpp_chip = { .chip = { .get_irq_num = pm8058_mpp_get_irq_num, .read = pm8058_mpp_read, } }; int pm8058_mpp_config(unsigned mpp, unsigned type, unsigned level, unsigned control) { u8 config; int rc; struct pm8058_chip *pm_chip; if (mpp >= PM8058_MPPS) return -EINVAL; pm_chip = dev_get_drvdata(pm8058_mpp_chip.chip.dev); config = (type << PM8058_MPP_TYPE_SHIFT) & PM8058_MPP_TYPE_MASK; config |= (level << PM8058_MPP_CONFIG_LVL_SHIFT) & PM8058_MPP_CONFIG_LVL_MASK; config |= control & PM8058_MPP_CONFIG_CTL_MASK; rc = pm8058_write(pm_chip, SSBI_MPP_CNTRL(mpp), &config, 1); if (rc) pr_err("%s: pm8058_write(): rc=%d\n", __func__, rc); return rc; } EXPORT_SYMBOL(pm8058_mpp_config); static int __devinit pm8058_mpp_probe(struct platform_device *pdev) { int rc; struct pm8058_gpio_platform_data *pdata = pdev->dev.platform_data; pm8058_mpp_chip.chip.dev = &pdev->dev; pm8058_mpp_chip.chip.start = pdata->gpio_base; pm8058_mpp_chip.chip.end = pdata->gpio_base + PM8058_MPPS - 1; rc = register_gpio_chip(&pm8058_mpp_chip.chip); pr_info("%s: register_gpio_chip(): rc=%d\n", __func__, rc); return rc; } static int __devexit pm8058_mpp_remove(struct platform_device *pdev) { return 0; } #else static int pm8058_mpp_to_irq(struct gpio_chip *chip, unsigned offset) { struct pm8058_gpio_platform_data *pdata; pdata = chip->dev->platform_data; return pdata->irq_base + offset; } static int pm8058_mpp_read(struct gpio_chip *chip, unsigned offset) { return pm8058_mpp_get(chip, offset); } static void pm8058_mpp_dbg_show(struct seq_file *s, struct gpio_chip *chip) { static const char *ctype[] = { "d_in", "d_out", "bi_dir", "a_in", "a_out", "sink", "dtest_sink", "dtest_out" }; struct pm8058_chip *pm_chip = dev_get_drvdata(chip->dev); u8 type, state, ctrl; const char *label; int i; for (i = 0; i < PM8058_MPPS; i++) { pm8058_read(pm_chip, SSBI_MPP_CNTRL(i), &ctrl, 1); label = gpiochip_is_requested(chip, i); type = (ctrl & PM8058_MPP_TYPE_MASK) >> PM8058_MPP_TYPE_SHIFT; state = pm8058_mpp_get(chip, i); seq_printf(s, "gpio-%-3d (%-12.12s) %-10.10s" " %s 0x%02x\n", chip->base + i, label ? label : "--", ctype[type], state ? "hi" : "lo", ctrl); } } static struct gpio_chip pm8058_mpp_chip = { .label = "pm8058-mpp", .to_irq = pm8058_mpp_to_irq, .get = pm8058_mpp_read, .dbg_show = pm8058_mpp_dbg_show, .ngpio = PM8058_MPPS, .can_sleep = 1, }; int pm8058_mpp_config(unsigned mpp, unsigned type, unsigned level, unsigned control) { u8 config; int rc; struct pm8058_chip *pm_chip; if (mpp >= PM8058_MPPS) return -EINVAL; pm_chip = dev_get_drvdata(pm8058_mpp_chip.dev); config = (type << PM8058_MPP_TYPE_SHIFT) & PM8058_MPP_TYPE_MASK; config |= (level << PM8058_MPP_CONFIG_LVL_SHIFT) & PM8058_MPP_CONFIG_LVL_MASK; config |= control & PM8058_MPP_CONFIG_CTL_MASK; rc = pm8058_write(pm_chip, SSBI_MPP_CNTRL(mpp), &config, 1); if (rc) pr_err("%s: pm8058_write(): rc=%d\n", __func__, rc); return rc; } EXPORT_SYMBOL(pm8058_mpp_config); static int __devinit pm8058_mpp_probe(struct platform_device *pdev) { int ret; struct pm8058_gpio_platform_data *pdata = pdev->dev.platform_data; pm8058_mpp_chip.dev = &pdev->dev; pm8058_mpp_chip.base = pdata->gpio_base; ret = gpiochip_add(&pm8058_mpp_chip); pr_info("%s: gpiochip_add(): rc=%d\n", __func__, ret); return ret; } static int __devexit pm8058_mpp_remove(struct platform_device *pdev) { return gpiochip_remove(&pm8058_mpp_chip); } #endif static struct platform_driver pm8058_mpp_driver = { .probe = pm8058_mpp_probe, .remove = __devexit_p(pm8058_mpp_remove), .driver = { .name = "pm8058-mpp", .owner = THIS_MODULE, }, }; static int __init pm8058_mpp_init(void) { return platform_driver_register(&pm8058_mpp_driver); } static void __exit pm8058_mpp_exit(void) { platform_driver_unregister(&pm8058_mpp_driver); } subsys_initcall(pm8058_mpp_init); module_exit(pm8058_mpp_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("PMIC8058 MPP driver"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:pm8058-mpp");
gpl-2.0
jongh90/kvm
drivers/leds/leds-lm3533.c
971
18619
/* * leds-lm3533.c -- LM3533 LED driver * * Copyright (C) 2011-2012 Texas Instruments * * Author: Johan Hovold <jhovold@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/leds.h> #include <linux/mfd/core.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/mfd/lm3533.h> #define LM3533_LVCTRLBANK_MIN 2 #define LM3533_LVCTRLBANK_MAX 5 #define LM3533_LVCTRLBANK_COUNT 4 #define LM3533_RISEFALLTIME_MAX 7 #define LM3533_ALS_CHANNEL_LV_MIN 1 #define LM3533_ALS_CHANNEL_LV_MAX 2 #define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b #define LM3533_REG_PATTERN_ENABLE 0x28 #define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71 #define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72 #define LM3533_REG_PATTERN_RISETIME_BASE 0x74 #define LM3533_REG_PATTERN_FALLTIME_BASE 0x75 #define LM3533_REG_PATTERN_STEP 0x10 #define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04 #define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02 #define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01 #define LM3533_LED_FLAG_PATTERN_ENABLE 1 struct lm3533_led { struct lm3533 *lm3533; struct lm3533_ctrlbank cb; struct led_classdev cdev; int id; struct mutex mutex; unsigned long flags; struct work_struct work; u8 new_brightness; }; static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev) { return container_of(cdev, struct lm3533_led, cdev); } static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led) { return led->id + 2; } static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base) { return base + led->id; } static inline u8 lm3533_led_get_pattern(struct lm3533_led *led) { return led->id; } static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led, u8 base) { return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP; } static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable) { u8 mask; u8 val; int pattern; int state; int ret = 0; dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable); mutex_lock(&led->mutex); state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags); if ((enable && state) || (!enable && !state)) goto out; pattern = lm3533_led_get_pattern(led); mask = 1 << (2 * pattern); if (enable) val = mask; else val = 0; ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask); if (ret) { dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n", pattern, enable); goto out; } __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags); out: mutex_unlock(&led->mutex); return ret; } static void lm3533_led_work(struct work_struct *work) { struct lm3533_led *led = container_of(work, struct lm3533_led, work); dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness); if (led->new_brightness == 0) lm3533_led_pattern_enable(led, 0); /* disable blink */ lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness); } static void lm3533_led_set(struct led_classdev *cdev, enum led_brightness value) { struct lm3533_led *led = to_lm3533_led(cdev); dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value); led->new_brightness = value; schedule_work(&led->work); } static enum led_brightness lm3533_led_get(struct led_classdev *cdev) { struct lm3533_led *led = to_lm3533_led(cdev); u8 val; int ret; ret = lm3533_ctrlbank_get_brightness(&led->cb, &val); if (ret) return ret; dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val); return val; } /* Pattern generator defines (delays in us). */ #define LM3533_LED_DELAY1_VMIN 0x00 #define LM3533_LED_DELAY2_VMIN 0x3d #define LM3533_LED_DELAY3_VMIN 0x80 #define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1) #define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1) #define LM3533_LED_DELAY3_VMAX 0xff #define LM3533_LED_DELAY1_TMIN 16384U #define LM3533_LED_DELAY2_TMIN 1130496U #define LM3533_LED_DELAY3_TMIN 10305536U #define LM3533_LED_DELAY1_TMAX 999424U #define LM3533_LED_DELAY2_TMAX 9781248U #define LM3533_LED_DELAY3_TMAX 76890112U /* t_step = (t_max - t_min) / (v_max - v_min) */ #define LM3533_LED_DELAY1_TSTEP 16384 #define LM3533_LED_DELAY2_TSTEP 131072 #define LM3533_LED_DELAY3_TSTEP 524288 /* Delay limits for hardware accelerated blinking (in ms). */ #define LM3533_LED_DELAY_ON_MAX \ ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000) #define LM3533_LED_DELAY_OFF_MAX \ ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000) /* * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step * size of t_step, where * * t_step = (t_max - t_min) / (v_max - v_min) * * and updates *t to reflect the mapped value. */ static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step, u8 v_min, u8 v_max) { unsigned val; val = (*t + t_step / 2 - t_min) / t_step + v_min; *t = t_step * (val - v_min) + t_min; return (u8)val; } /* * Returns time code corresponding to *delay (in ms) and updates *delay to * reflect actual hardware delay. * * Hardware supports 256 discrete delay times, divided into three groups with * the following ranges and step-sizes: * * [ 16, 999] [0x00, 0x3e] step 16 ms * [ 1130, 9781] [0x3d, 0x7f] step 131 ms * [10306, 76890] [0x80, 0xff] step 524 ms * * Note that delay group 3 is only available for delay_off. */ static u8 lm3533_led_get_hw_delay(unsigned *delay) { unsigned t; u8 val; t = *delay * 1000; if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) { t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX); val = time_to_val(&t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TSTEP, LM3533_LED_DELAY3_VMIN, LM3533_LED_DELAY3_VMAX); } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) { t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX); val = time_to_val(&t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TSTEP, LM3533_LED_DELAY2_VMIN, LM3533_LED_DELAY2_VMAX); } else { t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX); val = time_to_val(&t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TSTEP, LM3533_LED_DELAY1_VMIN, LM3533_LED_DELAY1_VMAX); } *delay = (t + 500) / 1000; return val; } /* * Set delay register base to *delay (in ms) and update *delay to reflect * actual hardware delay used. */ static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base, unsigned long *delay) { unsigned t; u8 val; u8 reg; int ret; t = (unsigned)*delay; /* Delay group 3 is only available for low time (delay off). */ if (base != LM3533_REG_PATTERN_LOW_TIME_BASE) t = min(t, LM3533_LED_DELAY2_TMAX / 1000); val = lm3533_led_get_hw_delay(&t); dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__, *delay, t, val); reg = lm3533_led_get_pattern_reg(led, base); ret = lm3533_write(led->lm3533, reg, val); if (ret) dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg); *delay = t; return ret; } static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t) { return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t); } static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t) { return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t); } static int lm3533_led_blink_set(struct led_classdev *cdev, unsigned long *delay_on, unsigned long *delay_off) { struct lm3533_led *led = to_lm3533_led(cdev); int ret; dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__, *delay_on, *delay_off); if (*delay_on > LM3533_LED_DELAY_ON_MAX || *delay_off > LM3533_LED_DELAY_OFF_MAX) return -EINVAL; if (*delay_on == 0 && *delay_off == 0) { *delay_on = 500; *delay_off = 500; } ret = lm3533_led_delay_on_set(led, delay_on); if (ret) return ret; ret = lm3533_led_delay_off_set(led, delay_off); if (ret) return ret; return lm3533_led_pattern_enable(led, 1); } static ssize_t show_id(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); return scnprintf(buf, PAGE_SIZE, "%d\n", led->id); } /* * Pattern generator rise/fall times: * * 0 - 2048 us (default) * 1 - 262 ms * 2 - 524 ms * 3 - 1.049 s * 4 - 2.097 s * 5 - 4.194 s * 6 - 8.389 s * 7 - 16.78 s */ static ssize_t show_risefalltime(struct device *dev, struct device_attribute *attr, char *buf, u8 base) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); ssize_t ret; u8 reg; u8 val; reg = lm3533_led_get_pattern_reg(led, base); ret = lm3533_read(led->lm3533, reg, &val); if (ret) return ret; return scnprintf(buf, PAGE_SIZE, "%x\n", val); } static ssize_t show_risetime(struct device *dev, struct device_attribute *attr, char *buf) { return show_risefalltime(dev, attr, buf, LM3533_REG_PATTERN_RISETIME_BASE); } static ssize_t show_falltime(struct device *dev, struct device_attribute *attr, char *buf) { return show_risefalltime(dev, attr, buf, LM3533_REG_PATTERN_FALLTIME_BASE); } static ssize_t store_risefalltime(struct device *dev, struct device_attribute *attr, const char *buf, size_t len, u8 base) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); u8 val; u8 reg; int ret; if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX) return -EINVAL; reg = lm3533_led_get_pattern_reg(led, base); ret = lm3533_write(led->lm3533, reg, val); if (ret) return ret; return len; } static ssize_t store_risetime(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return store_risefalltime(dev, attr, buf, len, LM3533_REG_PATTERN_RISETIME_BASE); } static ssize_t store_falltime(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { return store_risefalltime(dev, attr, buf, len, LM3533_REG_PATTERN_FALLTIME_BASE); } static ssize_t show_als_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); unsigned channel; u8 reg; u8 val; int ret; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); ret = lm3533_read(led->lm3533, reg, &val); if (ret) return ret; channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1; return scnprintf(buf, PAGE_SIZE, "%u\n", channel); } static ssize_t store_als_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); unsigned channel; u8 reg; u8 val; u8 mask; int ret; if (kstrtouint(buf, 0, &channel)) return -EINVAL; if (channel < LM3533_ALS_CHANNEL_LV_MIN || channel > LM3533_ALS_CHANNEL_LV_MAX) return -EINVAL; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK; val = channel - 1; ret = lm3533_update(led->lm3533, reg, val, mask); if (ret) return ret; return len; } static ssize_t show_als_en(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); bool enable; u8 reg; u8 val; int ret; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); ret = lm3533_read(led->lm3533, reg, &val); if (ret) return ret; enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK; return scnprintf(buf, PAGE_SIZE, "%d\n", enable); } static ssize_t store_als_en(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); unsigned enable; u8 reg; u8 mask; u8 val; int ret; if (kstrtouint(buf, 0, &enable)) return -EINVAL; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK; if (enable) val = mask; else val = 0; ret = lm3533_update(led->lm3533, reg, val, mask); if (ret) return ret; return len; } static ssize_t show_linear(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); u8 reg; u8 val; int linear; int ret; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); ret = lm3533_read(led->lm3533, reg, &val); if (ret) return ret; if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK) linear = 1; else linear = 0; return scnprintf(buf, PAGE_SIZE, "%x\n", linear); } static ssize_t store_linear(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); unsigned long linear; u8 reg; u8 mask; u8 val; int ret; if (kstrtoul(buf, 0, &linear)) return -EINVAL; reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE); mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK; if (linear) val = mask; else val = 0; ret = lm3533_update(led->lm3533, reg, val, mask); if (ret) return ret; return len; } static ssize_t show_pwm(struct device *dev, struct device_attribute *attr, char *buf) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); u8 val; int ret; ret = lm3533_ctrlbank_get_pwm(&led->cb, &val); if (ret) return ret; return scnprintf(buf, PAGE_SIZE, "%u\n", val); } static ssize_t store_pwm(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); u8 val; int ret; if (kstrtou8(buf, 0, &val)) return -EINVAL; ret = lm3533_ctrlbank_set_pwm(&led->cb, val); if (ret) return ret; return len; } static LM3533_ATTR_RW(als_channel); static LM3533_ATTR_RW(als_en); static LM3533_ATTR_RW(falltime); static LM3533_ATTR_RO(id); static LM3533_ATTR_RW(linear); static LM3533_ATTR_RW(pwm); static LM3533_ATTR_RW(risetime); static struct attribute *lm3533_led_attributes[] = { &dev_attr_als_channel.attr, &dev_attr_als_en.attr, &dev_attr_falltime.attr, &dev_attr_id.attr, &dev_attr_linear.attr, &dev_attr_pwm.attr, &dev_attr_risetime.attr, NULL, }; static umode_t lm3533_led_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { struct device *dev = container_of(kobj, struct device, kobj); struct led_classdev *led_cdev = dev_get_drvdata(dev); struct lm3533_led *led = to_lm3533_led(led_cdev); umode_t mode = attr->mode; if (attr == &dev_attr_als_channel.attr || attr == &dev_attr_als_en.attr) { if (!led->lm3533->have_als) mode = 0; } return mode; }; static struct attribute_group lm3533_led_attribute_group = { .is_visible = lm3533_led_attr_is_visible, .attrs = lm3533_led_attributes }; static const struct attribute_group *lm3533_led_attribute_groups[] = { &lm3533_led_attribute_group, NULL }; static int lm3533_led_setup(struct lm3533_led *led, struct lm3533_led_platform_data *pdata) { int ret; ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current); if (ret) return ret; return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm); } static int lm3533_led_probe(struct platform_device *pdev) { struct lm3533 *lm3533; struct lm3533_led_platform_data *pdata; struct lm3533_led *led; int ret; dev_dbg(&pdev->dev, "%s\n", __func__); lm3533 = dev_get_drvdata(pdev->dev.parent); if (!lm3533) return -EINVAL; pdata = dev_get_platdata(&pdev->dev); if (!pdata) { dev_err(&pdev->dev, "no platform data\n"); return -EINVAL; } if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) { dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id); return -EINVAL; } led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL); if (!led) return -ENOMEM; led->lm3533 = lm3533; led->cdev.name = pdata->name; led->cdev.default_trigger = pdata->default_trigger; led->cdev.brightness_set = lm3533_led_set; led->cdev.brightness_get = lm3533_led_get; led->cdev.blink_set = lm3533_led_blink_set; led->cdev.brightness = LED_OFF; led->cdev.groups = lm3533_led_attribute_groups, led->id = pdev->id; mutex_init(&led->mutex); INIT_WORK(&led->work, lm3533_led_work); /* The class framework makes a callback to get brightness during * registration so use parent device (for error reporting) until * registered. */ led->cb.lm3533 = lm3533; led->cb.id = lm3533_led_get_ctrlbank_id(led); led->cb.dev = lm3533->dev; platform_set_drvdata(pdev, led); ret = led_classdev_register(pdev->dev.parent, &led->cdev); if (ret) { dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id); return ret; } led->cb.dev = led->cdev.dev; ret = lm3533_led_setup(led, pdata); if (ret) goto err_unregister; ret = lm3533_ctrlbank_enable(&led->cb); if (ret) goto err_unregister; return 0; err_unregister: led_classdev_unregister(&led->cdev); flush_work(&led->work); return ret; } static int lm3533_led_remove(struct platform_device *pdev) { struct lm3533_led *led = platform_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); led_classdev_unregister(&led->cdev); flush_work(&led->work); return 0; } static void lm3533_led_shutdown(struct platform_device *pdev) { struct lm3533_led *led = platform_get_drvdata(pdev); dev_dbg(&pdev->dev, "%s\n", __func__); lm3533_ctrlbank_disable(&led->cb); lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */ flush_work(&led->work); } static struct platform_driver lm3533_led_driver = { .driver = { .name = "lm3533-leds", }, .probe = lm3533_led_probe, .remove = lm3533_led_remove, .shutdown = lm3533_led_shutdown, }; module_platform_driver(lm3533_led_driver); MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>"); MODULE_DESCRIPTION("LM3533 LED driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:lm3533-leds");
gpl-2.0
danielstuart14/msm-3.10-8974
arch/arm/mach-davinci/da850.c
1227
36228
/* * TI DA850/OMAP-L138 chip specific setup * * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/ * * Derived from: arch/arm/mach-davinci/da830.c * Original Copyrights follow: * * 2009 (c) MontaVista Software, Inc. This file is licensed under * the terms of the GNU General Public License version 2. This program * is licensed "as is" without any warranty of any kind, whether express * or implied. */ #include <linux/gpio.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/cpufreq.h> #include <linux/regulator/consumer.h> #include <asm/mach/map.h> #include <mach/psc.h> #include <mach/irqs.h> #include <mach/cputype.h> #include <mach/common.h> #include <mach/time.h> #include <mach/da8xx.h> #include <mach/cpufreq.h> #include <mach/pm.h> #include <mach/gpio-davinci.h> #include "clock.h" #include "mux.h" /* SoC specific clock flags */ #define DA850_CLK_ASYNC3 BIT(16) #define DA850_PLL1_BASE 0x01e1a000 #define DA850_TIMER64P2_BASE 0x01f0c000 #define DA850_TIMER64P3_BASE 0x01f0d000 #define DA850_REF_FREQ 24000000 #define CFGCHIP3_ASYNC3_CLKSRC BIT(4) #define CFGCHIP3_PLL1_MASTER_LOCK BIT(5) #define CFGCHIP0_PLL_MASTER_LOCK BIT(4) static int da850_set_armrate(struct clk *clk, unsigned long rate); static int da850_round_armrate(struct clk *clk, unsigned long rate); static int da850_set_pll0rate(struct clk *clk, unsigned long armrate); static struct pll_data pll0_data = { .num = 1, .phys_base = DA8XX_PLL0_BASE, .flags = PLL_HAS_PREDIV | PLL_HAS_POSTDIV, }; static struct clk ref_clk = { .name = "ref_clk", .rate = DA850_REF_FREQ, .set_rate = davinci_simple_set_rate, }; static struct clk pll0_clk = { .name = "pll0", .parent = &ref_clk, .pll_data = &pll0_data, .flags = CLK_PLL, .set_rate = da850_set_pll0rate, }; static struct clk pll0_aux_clk = { .name = "pll0_aux_clk", .parent = &pll0_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll0_sysclk1 = { .name = "pll0_sysclk1", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV1, }; static struct clk pll0_sysclk2 = { .name = "pll0_sysclk2", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll0_sysclk3 = { .name = "pll0_sysclk3", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, .set_rate = davinci_set_sysclk_rate, .maxrate = 100000000, }; static struct clk pll0_sysclk4 = { .name = "pll0_sysclk4", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV4, }; static struct clk pll0_sysclk5 = { .name = "pll0_sysclk5", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV5, }; static struct clk pll0_sysclk6 = { .name = "pll0_sysclk6", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV6, }; static struct clk pll0_sysclk7 = { .name = "pll0_sysclk7", .parent = &pll0_clk, .flags = CLK_PLL, .div_reg = PLLDIV7, }; static struct pll_data pll1_data = { .num = 2, .phys_base = DA850_PLL1_BASE, .flags = PLL_HAS_POSTDIV, }; static struct clk pll1_clk = { .name = "pll1", .parent = &ref_clk, .pll_data = &pll1_data, .flags = CLK_PLL, }; static struct clk pll1_aux_clk = { .name = "pll1_aux_clk", .parent = &pll1_clk, .flags = CLK_PLL | PRE_PLL, }; static struct clk pll1_sysclk2 = { .name = "pll1_sysclk2", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV2, }; static struct clk pll1_sysclk3 = { .name = "pll1_sysclk3", .parent = &pll1_clk, .flags = CLK_PLL, .div_reg = PLLDIV3, }; static struct clk i2c0_clk = { .name = "i2c0", .parent = &pll0_aux_clk, }; static struct clk timerp64_0_clk = { .name = "timer0", .parent = &pll0_aux_clk, }; static struct clk timerp64_1_clk = { .name = "timer1", .parent = &pll0_aux_clk, }; static struct clk arm_rom_clk = { .name = "arm_rom", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_ARM_RAM_ROM, .flags = ALWAYS_ENABLED, }; static struct clk tpcc0_clk = { .name = "tpcc0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPCC, .flags = ALWAYS_ENABLED | CLK_PSC, }; static struct clk tptc0_clk = { .name = "tptc0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPTC0, .flags = ALWAYS_ENABLED, }; static struct clk tptc1_clk = { .name = "tptc1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_TPTC1, .flags = ALWAYS_ENABLED, }; static struct clk tpcc1_clk = { .name = "tpcc1", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_TPCC1, .gpsc = 1, .flags = CLK_PSC | ALWAYS_ENABLED, }; static struct clk tptc2_clk = { .name = "tptc2", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_TPTC2, .gpsc = 1, .flags = ALWAYS_ENABLED, }; static struct clk pruss_clk = { .name = "pruss", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_PRUSS, }; static struct clk uart0_clk = { .name = "uart0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_UART0, }; static struct clk uart1_clk = { .name = "uart1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_UART1, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk uart2_clk = { .name = "uart2", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_UART2, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk aintc_clk = { .name = "aintc", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC0_AINTC, .flags = ALWAYS_ENABLED, }; static struct clk gpio_clk = { .name = "gpio", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_GPIO, .gpsc = 1, }; static struct clk i2c1_clk = { .name = "i2c1", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_I2C, .gpsc = 1, }; static struct clk emif3_clk = { .name = "emif3", .parent = &pll0_sysclk5, .lpsc = DA8XX_LPSC1_EMIF3C, .gpsc = 1, .flags = ALWAYS_ENABLED, }; static struct clk arm_clk = { .name = "arm", .parent = &pll0_sysclk6, .lpsc = DA8XX_LPSC0_ARM, .flags = ALWAYS_ENABLED, .set_rate = da850_set_armrate, .round_rate = da850_round_armrate, }; static struct clk rmii_clk = { .name = "rmii", .parent = &pll0_sysclk7, }; static struct clk emac_clk = { .name = "emac", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_CPGMAC, .gpsc = 1, }; static struct clk mcasp_clk = { .name = "mcasp", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_McASP0, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk lcdc_clk = { .name = "lcdc", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_LCDC, .gpsc = 1, }; static struct clk mmcsd0_clk = { .name = "mmcsd0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_MMC_SD, }; static struct clk mmcsd1_clk = { .name = "mmcsd1", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_MMC_SD1, .gpsc = 1, }; static struct clk aemif_clk = { .name = "aemif", .parent = &pll0_sysclk3, .lpsc = DA8XX_LPSC0_EMIF25, .flags = ALWAYS_ENABLED, }; static struct clk usb11_clk = { .name = "usb11", .parent = &pll0_sysclk4, .lpsc = DA8XX_LPSC1_USB11, .gpsc = 1, }; static struct clk usb20_clk = { .name = "usb20", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_USB20, .gpsc = 1, }; static struct clk spi0_clk = { .name = "spi0", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC0_SPI0, }; static struct clk spi1_clk = { .name = "spi1", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_SPI1, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk vpif_clk = { .name = "vpif", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_VPIF, .gpsc = 1, }; static struct clk sata_clk = { .name = "sata", .parent = &pll0_sysclk2, .lpsc = DA850_LPSC1_SATA, .gpsc = 1, .flags = PSC_FORCE, }; static struct clk dsp_clk = { .name = "dsp", .parent = &pll0_sysclk1, .domain = DAVINCI_GPSC_DSPDOMAIN, .lpsc = DA8XX_LPSC0_GEM, .flags = PSC_LRST | PSC_FORCE, }; static struct clk ehrpwm_clk = { .name = "ehrpwm", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_PWM, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; #define DA8XX_EHRPWM_TBCLKSYNC BIT(12) static void ehrpwm_tblck_enable(struct clk *clk) { u32 val; val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG)); val |= DA8XX_EHRPWM_TBCLKSYNC; writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG)); } static void ehrpwm_tblck_disable(struct clk *clk) { u32 val; val = readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG)); val &= ~DA8XX_EHRPWM_TBCLKSYNC; writel(val, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP1_REG)); } static struct clk ehrpwm_tbclk = { .name = "ehrpwm_tbclk", .parent = &ehrpwm_clk, .clk_enable = ehrpwm_tblck_enable, .clk_disable = ehrpwm_tblck_disable, }; static struct clk ecap_clk = { .name = "ecap", .parent = &pll0_sysclk2, .lpsc = DA8XX_LPSC1_ECAP, .gpsc = 1, .flags = DA850_CLK_ASYNC3, }; static struct clk_lookup da850_clks[] = { CLK(NULL, "ref", &ref_clk), CLK(NULL, "pll0", &pll0_clk), CLK(NULL, "pll0_aux", &pll0_aux_clk), CLK(NULL, "pll0_sysclk1", &pll0_sysclk1), CLK(NULL, "pll0_sysclk2", &pll0_sysclk2), CLK(NULL, "pll0_sysclk3", &pll0_sysclk3), CLK(NULL, "pll0_sysclk4", &pll0_sysclk4), CLK(NULL, "pll0_sysclk5", &pll0_sysclk5), CLK(NULL, "pll0_sysclk6", &pll0_sysclk6), CLK(NULL, "pll0_sysclk7", &pll0_sysclk7), CLK(NULL, "pll1", &pll1_clk), CLK(NULL, "pll1_aux", &pll1_aux_clk), CLK(NULL, "pll1_sysclk2", &pll1_sysclk2), CLK(NULL, "pll1_sysclk3", &pll1_sysclk3), CLK("i2c_davinci.1", NULL, &i2c0_clk), CLK(NULL, "timer0", &timerp64_0_clk), CLK("watchdog", NULL, &timerp64_1_clk), CLK(NULL, "arm_rom", &arm_rom_clk), CLK(NULL, "tpcc0", &tpcc0_clk), CLK(NULL, "tptc0", &tptc0_clk), CLK(NULL, "tptc1", &tptc1_clk), CLK(NULL, "tpcc1", &tpcc1_clk), CLK(NULL, "tptc2", &tptc2_clk), CLK("pruss_uio", "pruss", &pruss_clk), CLK(NULL, "uart0", &uart0_clk), CLK(NULL, "uart1", &uart1_clk), CLK(NULL, "uart2", &uart2_clk), CLK(NULL, "aintc", &aintc_clk), CLK(NULL, "gpio", &gpio_clk), CLK("i2c_davinci.2", NULL, &i2c1_clk), CLK(NULL, "emif3", &emif3_clk), CLK(NULL, "arm", &arm_clk), CLK(NULL, "rmii", &rmii_clk), CLK("davinci_emac.1", NULL, &emac_clk), CLK("davinci-mcasp.0", NULL, &mcasp_clk), CLK("da8xx_lcdc.0", "fck", &lcdc_clk), CLK("da830-mmc.0", NULL, &mmcsd0_clk), CLK("da830-mmc.1", NULL, &mmcsd1_clk), CLK(NULL, "aemif", &aemif_clk), CLK(NULL, "usb11", &usb11_clk), CLK(NULL, "usb20", &usb20_clk), CLK("spi_davinci.0", NULL, &spi0_clk), CLK("spi_davinci.1", NULL, &spi1_clk), CLK("vpif", NULL, &vpif_clk), CLK("ahci", NULL, &sata_clk), CLK("davinci-rproc.0", NULL, &dsp_clk), CLK("ehrpwm", "fck", &ehrpwm_clk), CLK("ehrpwm", "tbclk", &ehrpwm_tbclk), CLK("ecap", "fck", &ecap_clk), CLK(NULL, NULL, NULL), }; /* * Device specific mux setup * * soc description mux mode mode mux dbg * reg offset mask mode */ static const struct mux_config da850_pins[] = { #ifdef CONFIG_DAVINCI_MUX /* UART0 function */ MUX_CFG(DA850, NUART0_CTS, 3, 24, 15, 2, false) MUX_CFG(DA850, NUART0_RTS, 3, 28, 15, 2, false) MUX_CFG(DA850, UART0_RXD, 3, 16, 15, 2, false) MUX_CFG(DA850, UART0_TXD, 3, 20, 15, 2, false) /* UART1 function */ MUX_CFG(DA850, UART1_RXD, 4, 24, 15, 2, false) MUX_CFG(DA850, UART1_TXD, 4, 28, 15, 2, false) /* UART2 function */ MUX_CFG(DA850, UART2_RXD, 4, 16, 15, 2, false) MUX_CFG(DA850, UART2_TXD, 4, 20, 15, 2, false) /* I2C1 function */ MUX_CFG(DA850, I2C1_SCL, 4, 16, 15, 4, false) MUX_CFG(DA850, I2C1_SDA, 4, 20, 15, 4, false) /* I2C0 function */ MUX_CFG(DA850, I2C0_SDA, 4, 12, 15, 2, false) MUX_CFG(DA850, I2C0_SCL, 4, 8, 15, 2, false) /* EMAC function */ MUX_CFG(DA850, MII_TXEN, 2, 4, 15, 8, false) MUX_CFG(DA850, MII_TXCLK, 2, 8, 15, 8, false) MUX_CFG(DA850, MII_COL, 2, 12, 15, 8, false) MUX_CFG(DA850, MII_TXD_3, 2, 16, 15, 8, false) MUX_CFG(DA850, MII_TXD_2, 2, 20, 15, 8, false) MUX_CFG(DA850, MII_TXD_1, 2, 24, 15, 8, false) MUX_CFG(DA850, MII_TXD_0, 2, 28, 15, 8, false) MUX_CFG(DA850, MII_RXCLK, 3, 0, 15, 8, false) MUX_CFG(DA850, MII_RXDV, 3, 4, 15, 8, false) MUX_CFG(DA850, MII_RXER, 3, 8, 15, 8, false) MUX_CFG(DA850, MII_CRS, 3, 12, 15, 8, false) MUX_CFG(DA850, MII_RXD_3, 3, 16, 15, 8, false) MUX_CFG(DA850, MII_RXD_2, 3, 20, 15, 8, false) MUX_CFG(DA850, MII_RXD_1, 3, 24, 15, 8, false) MUX_CFG(DA850, MII_RXD_0, 3, 28, 15, 8, false) MUX_CFG(DA850, MDIO_CLK, 4, 0, 15, 8, false) MUX_CFG(DA850, MDIO_D, 4, 4, 15, 8, false) MUX_CFG(DA850, RMII_TXD_0, 14, 12, 15, 8, false) MUX_CFG(DA850, RMII_TXD_1, 14, 8, 15, 8, false) MUX_CFG(DA850, RMII_TXEN, 14, 16, 15, 8, false) MUX_CFG(DA850, RMII_CRS_DV, 15, 4, 15, 8, false) MUX_CFG(DA850, RMII_RXD_0, 14, 24, 15, 8, false) MUX_CFG(DA850, RMII_RXD_1, 14, 20, 15, 8, false) MUX_CFG(DA850, RMII_RXER, 14, 28, 15, 8, false) MUX_CFG(DA850, RMII_MHZ_50_CLK, 15, 0, 15, 0, false) /* McASP function */ MUX_CFG(DA850, ACLKR, 0, 0, 15, 1, false) MUX_CFG(DA850, ACLKX, 0, 4, 15, 1, false) MUX_CFG(DA850, AFSR, 0, 8, 15, 1, false) MUX_CFG(DA850, AFSX, 0, 12, 15, 1, false) MUX_CFG(DA850, AHCLKR, 0, 16, 15, 1, false) MUX_CFG(DA850, AHCLKX, 0, 20, 15, 1, false) MUX_CFG(DA850, AMUTE, 0, 24, 15, 1, false) MUX_CFG(DA850, AXR_15, 1, 0, 15, 1, false) MUX_CFG(DA850, AXR_14, 1, 4, 15, 1, false) MUX_CFG(DA850, AXR_13, 1, 8, 15, 1, false) MUX_CFG(DA850, AXR_12, 1, 12, 15, 1, false) MUX_CFG(DA850, AXR_11, 1, 16, 15, 1, false) MUX_CFG(DA850, AXR_10, 1, 20, 15, 1, false) MUX_CFG(DA850, AXR_9, 1, 24, 15, 1, false) MUX_CFG(DA850, AXR_8, 1, 28, 15, 1, false) MUX_CFG(DA850, AXR_7, 2, 0, 15, 1, false) MUX_CFG(DA850, AXR_6, 2, 4, 15, 1, false) MUX_CFG(DA850, AXR_5, 2, 8, 15, 1, false) MUX_CFG(DA850, AXR_4, 2, 12, 15, 1, false) MUX_CFG(DA850, AXR_3, 2, 16, 15, 1, false) MUX_CFG(DA850, AXR_2, 2, 20, 15, 1, false) MUX_CFG(DA850, AXR_1, 2, 24, 15, 1, false) MUX_CFG(DA850, AXR_0, 2, 28, 15, 1, false) /* LCD function */ MUX_CFG(DA850, LCD_D_7, 16, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_6, 16, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_5, 16, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_4, 16, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_3, 16, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_2, 16, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_1, 17, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_0, 17, 4, 15, 2, false) MUX_CFG(DA850, LCD_D_15, 17, 8, 15, 2, false) MUX_CFG(DA850, LCD_D_14, 17, 12, 15, 2, false) MUX_CFG(DA850, LCD_D_13, 17, 16, 15, 2, false) MUX_CFG(DA850, LCD_D_12, 17, 20, 15, 2, false) MUX_CFG(DA850, LCD_D_11, 17, 24, 15, 2, false) MUX_CFG(DA850, LCD_D_10, 17, 28, 15, 2, false) MUX_CFG(DA850, LCD_D_9, 18, 0, 15, 2, false) MUX_CFG(DA850, LCD_D_8, 18, 4, 15, 2, false) MUX_CFG(DA850, LCD_PCLK, 18, 24, 15, 2, false) MUX_CFG(DA850, LCD_HSYNC, 19, 0, 15, 2, false) MUX_CFG(DA850, LCD_VSYNC, 19, 4, 15, 2, false) MUX_CFG(DA850, NLCD_AC_ENB_CS, 19, 24, 15, 2, false) /* MMC/SD0 function */ MUX_CFG(DA850, MMCSD0_DAT_0, 10, 8, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_1, 10, 12, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_2, 10, 16, 15, 2, false) MUX_CFG(DA850, MMCSD0_DAT_3, 10, 20, 15, 2, false) MUX_CFG(DA850, MMCSD0_CLK, 10, 0, 15, 2, false) MUX_CFG(DA850, MMCSD0_CMD, 10, 4, 15, 2, false) /* MMC/SD1 function */ MUX_CFG(DA850, MMCSD1_DAT_0, 18, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_1, 19, 16, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_2, 19, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_DAT_3, 19, 8, 15, 2, false) MUX_CFG(DA850, MMCSD1_CLK, 18, 12, 15, 2, false) MUX_CFG(DA850, MMCSD1_CMD, 18, 16, 15, 2, false) /* EMIF2.5/EMIFA function */ MUX_CFG(DA850, EMA_D_7, 9, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_6, 9, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_5, 9, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_4, 9, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_3, 9, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_2, 9, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_1, 9, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_0, 9, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_1, 12, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_2, 12, 20, 15, 1, false) MUX_CFG(DA850, NEMA_CS_3, 7, 4, 15, 1, false) MUX_CFG(DA850, NEMA_CS_4, 7, 8, 15, 1, false) MUX_CFG(DA850, NEMA_WE, 7, 16, 15, 1, false) MUX_CFG(DA850, NEMA_OE, 7, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_0, 12, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_3, 12, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_4, 12, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_5, 12, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_6, 12, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_7, 12, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_8, 11, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_9, 11, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_10, 11, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_11, 11, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_12, 11, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_13, 11, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_14, 11, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_15, 11, 0, 15, 1, false) MUX_CFG(DA850, EMA_A_16, 10, 28, 15, 1, false) MUX_CFG(DA850, EMA_A_17, 10, 24, 15, 1, false) MUX_CFG(DA850, EMA_A_18, 10, 20, 15, 1, false) MUX_CFG(DA850, EMA_A_19, 10, 16, 15, 1, false) MUX_CFG(DA850, EMA_A_20, 10, 12, 15, 1, false) MUX_CFG(DA850, EMA_A_21, 10, 8, 15, 1, false) MUX_CFG(DA850, EMA_A_22, 10, 4, 15, 1, false) MUX_CFG(DA850, EMA_A_23, 10, 0, 15, 1, false) MUX_CFG(DA850, EMA_D_8, 8, 28, 15, 1, false) MUX_CFG(DA850, EMA_D_9, 8, 24, 15, 1, false) MUX_CFG(DA850, EMA_D_10, 8, 20, 15, 1, false) MUX_CFG(DA850, EMA_D_11, 8, 16, 15, 1, false) MUX_CFG(DA850, EMA_D_12, 8, 12, 15, 1, false) MUX_CFG(DA850, EMA_D_13, 8, 8, 15, 1, false) MUX_CFG(DA850, EMA_D_14, 8, 4, 15, 1, false) MUX_CFG(DA850, EMA_D_15, 8, 0, 15, 1, false) MUX_CFG(DA850, EMA_BA_1, 5, 24, 15, 1, false) MUX_CFG(DA850, EMA_CLK, 6, 0, 15, 1, false) MUX_CFG(DA850, EMA_WAIT_1, 6, 24, 15, 1, false) MUX_CFG(DA850, NEMA_CS_2, 7, 0, 15, 1, false) /* GPIO function */ MUX_CFG(DA850, GPIO2_4, 6, 12, 15, 8, false) MUX_CFG(DA850, GPIO2_6, 6, 4, 15, 8, false) MUX_CFG(DA850, GPIO2_8, 5, 28, 15, 8, false) MUX_CFG(DA850, GPIO2_15, 5, 0, 15, 8, false) MUX_CFG(DA850, GPIO3_12, 7, 12, 15, 8, false) MUX_CFG(DA850, GPIO3_13, 7, 8, 15, 8, false) MUX_CFG(DA850, GPIO4_0, 10, 28, 15, 8, false) MUX_CFG(DA850, GPIO4_1, 10, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_9, 13, 24, 15, 8, false) MUX_CFG(DA850, GPIO6_10, 13, 20, 15, 8, false) MUX_CFG(DA850, GPIO6_13, 13, 8, 15, 8, false) MUX_CFG(DA850, RTC_ALARM, 0, 28, 15, 2, false) /* VPIF Capture */ MUX_CFG(DA850, VPIF_DIN0, 15, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN1, 15, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN2, 14, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN3, 14, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN4, 14, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN5, 14, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN6, 14, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN7, 14, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DIN8, 16, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DIN9, 16, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DIN10, 15, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DIN11, 15, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DIN12, 15, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DIN13, 15, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DIN14, 15, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DIN15, 15, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN0, 14, 0, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN1, 14, 4, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN2, 19, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKIN3, 19, 16, 15, 1, false) /* VPIF Display */ MUX_CFG(DA850, VPIF_DOUT0, 17, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT1, 17, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT2, 16, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT3, 16, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT4, 16, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT5, 16, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT6, 16, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT7, 16, 8, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT8, 18, 4, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT9, 18, 0, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT10, 17, 28, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT11, 17, 24, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT12, 17, 20, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT13, 17, 16, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT14, 17, 12, 15, 1, false) MUX_CFG(DA850, VPIF_DOUT15, 17, 8, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO2, 19, 12, 15, 1, false) MUX_CFG(DA850, VPIF_CLKO3, 19, 20, 15, 1, false) #endif }; const short da850_i2c0_pins[] __initconst = { DA850_I2C0_SDA, DA850_I2C0_SCL, -1 }; const short da850_i2c1_pins[] __initconst = { DA850_I2C1_SCL, DA850_I2C1_SDA, -1 }; const short da850_lcdcntl_pins[] __initconst = { DA850_LCD_D_0, DA850_LCD_D_1, DA850_LCD_D_2, DA850_LCD_D_3, DA850_LCD_D_4, DA850_LCD_D_5, DA850_LCD_D_6, DA850_LCD_D_7, DA850_LCD_D_8, DA850_LCD_D_9, DA850_LCD_D_10, DA850_LCD_D_11, DA850_LCD_D_12, DA850_LCD_D_13, DA850_LCD_D_14, DA850_LCD_D_15, DA850_LCD_PCLK, DA850_LCD_HSYNC, DA850_LCD_VSYNC, DA850_NLCD_AC_ENB_CS, -1 }; const short da850_vpif_capture_pins[] __initdata = { DA850_VPIF_DIN0, DA850_VPIF_DIN1, DA850_VPIF_DIN2, DA850_VPIF_DIN3, DA850_VPIF_DIN4, DA850_VPIF_DIN5, DA850_VPIF_DIN6, DA850_VPIF_DIN7, DA850_VPIF_DIN8, DA850_VPIF_DIN9, DA850_VPIF_DIN10, DA850_VPIF_DIN11, DA850_VPIF_DIN12, DA850_VPIF_DIN13, DA850_VPIF_DIN14, DA850_VPIF_DIN15, DA850_VPIF_CLKIN0, DA850_VPIF_CLKIN1, DA850_VPIF_CLKIN2, DA850_VPIF_CLKIN3, -1 }; const short da850_vpif_display_pins[] __initdata = { DA850_VPIF_DOUT0, DA850_VPIF_DOUT1, DA850_VPIF_DOUT2, DA850_VPIF_DOUT3, DA850_VPIF_DOUT4, DA850_VPIF_DOUT5, DA850_VPIF_DOUT6, DA850_VPIF_DOUT7, DA850_VPIF_DOUT8, DA850_VPIF_DOUT9, DA850_VPIF_DOUT10, DA850_VPIF_DOUT11, DA850_VPIF_DOUT12, DA850_VPIF_DOUT13, DA850_VPIF_DOUT14, DA850_VPIF_DOUT15, DA850_VPIF_CLKO2, DA850_VPIF_CLKO3, -1 }; /* FIQ are pri 0-1; otherwise 2-7, with 7 lowest priority */ static u8 da850_default_priorities[DA850_N_CP_INTC_IRQ] = { [IRQ_DA8XX_COMMTX] = 7, [IRQ_DA8XX_COMMRX] = 7, [IRQ_DA8XX_NINT] = 7, [IRQ_DA8XX_EVTOUT0] = 7, [IRQ_DA8XX_EVTOUT1] = 7, [IRQ_DA8XX_EVTOUT2] = 7, [IRQ_DA8XX_EVTOUT3] = 7, [IRQ_DA8XX_EVTOUT4] = 7, [IRQ_DA8XX_EVTOUT5] = 7, [IRQ_DA8XX_EVTOUT6] = 7, [IRQ_DA8XX_EVTOUT7] = 7, [IRQ_DA8XX_CCINT0] = 7, [IRQ_DA8XX_CCERRINT] = 7, [IRQ_DA8XX_TCERRINT0] = 7, [IRQ_DA8XX_AEMIFINT] = 7, [IRQ_DA8XX_I2CINT0] = 7, [IRQ_DA8XX_MMCSDINT0] = 7, [IRQ_DA8XX_MMCSDINT1] = 7, [IRQ_DA8XX_ALLINT0] = 7, [IRQ_DA8XX_RTC] = 7, [IRQ_DA8XX_SPINT0] = 7, [IRQ_DA8XX_TINT12_0] = 7, [IRQ_DA8XX_TINT34_0] = 7, [IRQ_DA8XX_TINT12_1] = 7, [IRQ_DA8XX_TINT34_1] = 7, [IRQ_DA8XX_UARTINT0] = 7, [IRQ_DA8XX_KEYMGRINT] = 7, [IRQ_DA850_MPUADDRERR0] = 7, [IRQ_DA8XX_CHIPINT0] = 7, [IRQ_DA8XX_CHIPINT1] = 7, [IRQ_DA8XX_CHIPINT2] = 7, [IRQ_DA8XX_CHIPINT3] = 7, [IRQ_DA8XX_TCERRINT1] = 7, [IRQ_DA8XX_C0_RX_THRESH_PULSE] = 7, [IRQ_DA8XX_C0_RX_PULSE] = 7, [IRQ_DA8XX_C0_TX_PULSE] = 7, [IRQ_DA8XX_C0_MISC_PULSE] = 7, [IRQ_DA8XX_C1_RX_THRESH_PULSE] = 7, [IRQ_DA8XX_C1_RX_PULSE] = 7, [IRQ_DA8XX_C1_TX_PULSE] = 7, [IRQ_DA8XX_C1_MISC_PULSE] = 7, [IRQ_DA8XX_MEMERR] = 7, [IRQ_DA8XX_GPIO0] = 7, [IRQ_DA8XX_GPIO1] = 7, [IRQ_DA8XX_GPIO2] = 7, [IRQ_DA8XX_GPIO3] = 7, [IRQ_DA8XX_GPIO4] = 7, [IRQ_DA8XX_GPIO5] = 7, [IRQ_DA8XX_GPIO6] = 7, [IRQ_DA8XX_GPIO7] = 7, [IRQ_DA8XX_GPIO8] = 7, [IRQ_DA8XX_I2CINT1] = 7, [IRQ_DA8XX_LCDINT] = 7, [IRQ_DA8XX_UARTINT1] = 7, [IRQ_DA8XX_MCASPINT] = 7, [IRQ_DA8XX_ALLINT1] = 7, [IRQ_DA8XX_SPINT1] = 7, [IRQ_DA8XX_UHPI_INT1] = 7, [IRQ_DA8XX_USB_INT] = 7, [IRQ_DA8XX_IRQN] = 7, [IRQ_DA8XX_RWAKEUP] = 7, [IRQ_DA8XX_UARTINT2] = 7, [IRQ_DA8XX_DFTSSINT] = 7, [IRQ_DA8XX_EHRPWM0] = 7, [IRQ_DA8XX_EHRPWM0TZ] = 7, [IRQ_DA8XX_EHRPWM1] = 7, [IRQ_DA8XX_EHRPWM1TZ] = 7, [IRQ_DA850_SATAINT] = 7, [IRQ_DA850_TINTALL_2] = 7, [IRQ_DA8XX_ECAP0] = 7, [IRQ_DA8XX_ECAP1] = 7, [IRQ_DA8XX_ECAP2] = 7, [IRQ_DA850_MMCSDINT0_1] = 7, [IRQ_DA850_MMCSDINT1_1] = 7, [IRQ_DA850_T12CMPINT0_2] = 7, [IRQ_DA850_T12CMPINT1_2] = 7, [IRQ_DA850_T12CMPINT2_2] = 7, [IRQ_DA850_T12CMPINT3_2] = 7, [IRQ_DA850_T12CMPINT4_2] = 7, [IRQ_DA850_T12CMPINT5_2] = 7, [IRQ_DA850_T12CMPINT6_2] = 7, [IRQ_DA850_T12CMPINT7_2] = 7, [IRQ_DA850_T12CMPINT0_3] = 7, [IRQ_DA850_T12CMPINT1_3] = 7, [IRQ_DA850_T12CMPINT2_3] = 7, [IRQ_DA850_T12CMPINT3_3] = 7, [IRQ_DA850_T12CMPINT4_3] = 7, [IRQ_DA850_T12CMPINT5_3] = 7, [IRQ_DA850_T12CMPINT6_3] = 7, [IRQ_DA850_T12CMPINT7_3] = 7, [IRQ_DA850_RPIINT] = 7, [IRQ_DA850_VPIFINT] = 7, [IRQ_DA850_CCINT1] = 7, [IRQ_DA850_CCERRINT1] = 7, [IRQ_DA850_TCERRINT2] = 7, [IRQ_DA850_TINTALL_3] = 7, [IRQ_DA850_MCBSP0RINT] = 7, [IRQ_DA850_MCBSP0XINT] = 7, [IRQ_DA850_MCBSP1RINT] = 7, [IRQ_DA850_MCBSP1XINT] = 7, [IRQ_DA8XX_ARMCLKSTOPREQ] = 7, }; static struct map_desc da850_io_desc[] = { { .virtual = IO_VIRT, .pfn = __phys_to_pfn(IO_PHYS), .length = IO_SIZE, .type = MT_DEVICE }, { .virtual = DA8XX_CP_INTC_VIRT, .pfn = __phys_to_pfn(DA8XX_CP_INTC_BASE), .length = DA8XX_CP_INTC_SIZE, .type = MT_DEVICE }, }; static u32 da850_psc_bases[] = { DA8XX_PSC0_BASE, DA8XX_PSC1_BASE }; /* Contents of JTAG ID register used to identify exact cpu type */ static struct davinci_id da850_ids[] = { { .variant = 0x0, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138", }, { .variant = 0x1, .part_no = 0xb7d1, .manufacturer = 0x017, /* 0x02f >> 1 */ .cpu_id = DAVINCI_CPU_ID_DA850, .name = "da850/omap-l138/am18x", }, }; static struct davinci_timer_instance da850_timer_instance[4] = { { .base = DA8XX_TIMER64P0_BASE, .bottom_irq = IRQ_DA8XX_TINT12_0, .top_irq = IRQ_DA8XX_TINT34_0, }, { .base = DA8XX_TIMER64P1_BASE, .bottom_irq = IRQ_DA8XX_TINT12_1, .top_irq = IRQ_DA8XX_TINT34_1, }, { .base = DA850_TIMER64P2_BASE, .bottom_irq = IRQ_DA850_TINT12_2, .top_irq = IRQ_DA850_TINT34_2, }, { .base = DA850_TIMER64P3_BASE, .bottom_irq = IRQ_DA850_TINT12_3, .top_irq = IRQ_DA850_TINT34_3, }, }; /* * T0_BOT: Timer 0, bottom : Used for clock_event * T0_TOP: Timer 0, top : Used for clocksource * T1_BOT, T1_TOP: Timer 1, bottom & top: Used for watchdog timer */ static struct davinci_timer_info da850_timer_info = { .timers = da850_timer_instance, .clockevent_id = T0_BOT, .clocksource_id = T0_TOP, }; static void da850_set_async3_src(int pllnum) { struct clk *clk, *newparent = pllnum ? &pll1_sysclk2 : &pll0_sysclk2; struct clk_lookup *c; unsigned int v; int ret; for (c = da850_clks; c->clk; c++) { clk = c->clk; if (clk->flags & DA850_CLK_ASYNC3) { ret = clk_set_parent(clk, newparent); WARN(ret, "DA850: unable to re-parent clock %s", clk->name); } } v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); if (pllnum) v |= CFGCHIP3_ASYNC3_CLKSRC; else v &= ~CFGCHIP3_ASYNC3_CLKSRC; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); } #ifdef CONFIG_CPU_FREQ /* * Notes: * According to the TRM, minimum PLLM results in maximum power savings. * The OPP definitions below should keep the PLLM as low as possible. * * The output of the PLLM must be between 300 to 600 MHz. */ struct da850_opp { unsigned int freq; /* in KHz */ unsigned int prediv; unsigned int mult; unsigned int postdiv; unsigned int cvdd_min; /* in uV */ unsigned int cvdd_max; /* in uV */ }; static const struct da850_opp da850_opp_456 = { .freq = 456000, .prediv = 1, .mult = 19, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_408 = { .freq = 408000, .prediv = 1, .mult = 17, .postdiv = 1, .cvdd_min = 1300000, .cvdd_max = 1350000, }; static const struct da850_opp da850_opp_372 = { .freq = 372000, .prediv = 2, .mult = 31, .postdiv = 1, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_300 = { .freq = 300000, .prediv = 1, .mult = 25, .postdiv = 2, .cvdd_min = 1200000, .cvdd_max = 1320000, }; static const struct da850_opp da850_opp_200 = { .freq = 200000, .prediv = 1, .mult = 25, .postdiv = 3, .cvdd_min = 1100000, .cvdd_max = 1160000, }; static const struct da850_opp da850_opp_96 = { .freq = 96000, .prediv = 1, .mult = 20, .postdiv = 5, .cvdd_min = 1000000, .cvdd_max = 1050000, }; #define OPP(freq) \ { \ .driver_data = (unsigned int) &da850_opp_##freq, \ .frequency = freq * 1000, \ } static struct cpufreq_frequency_table da850_freq_table[] = { OPP(456), OPP(408), OPP(372), OPP(300), OPP(200), OPP(96), { .driver_data = 0, .frequency = CPUFREQ_TABLE_END, }, }; #ifdef CONFIG_REGULATOR static int da850_set_voltage(unsigned int index); static int da850_regulator_init(void); #endif static struct davinci_cpufreq_config cpufreq_info = { .freq_table = da850_freq_table, #ifdef CONFIG_REGULATOR .init = da850_regulator_init, .set_voltage = da850_set_voltage, #endif }; #ifdef CONFIG_REGULATOR static struct regulator *cvdd; static int da850_set_voltage(unsigned int index) { struct da850_opp *opp; if (!cvdd) return -ENODEV; opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; return regulator_set_voltage(cvdd, opp->cvdd_min, opp->cvdd_max); } static int da850_regulator_init(void) { cvdd = regulator_get(NULL, "cvdd"); if (WARN(IS_ERR(cvdd), "Unable to obtain voltage regulator for CVDD;" " voltage scaling unsupported\n")) { return PTR_ERR(cvdd); } return 0; } #endif static struct platform_device da850_cpufreq_device = { .name = "cpufreq-davinci", .dev = { .platform_data = &cpufreq_info, }, .id = -1, }; unsigned int da850_max_speed = 300000; int da850_register_cpufreq(char *async_clk) { int i; /* cpufreq driver can help keep an "async" clock constant */ if (async_clk) clk_add_alias("async", da850_cpufreq_device.name, async_clk, NULL); for (i = 0; i < ARRAY_SIZE(da850_freq_table); i++) { if (da850_freq_table[i].frequency <= da850_max_speed) { cpufreq_info.freq_table = &da850_freq_table[i]; break; } } return platform_device_register(&da850_cpufreq_device); } static int da850_round_armrate(struct clk *clk, unsigned long rate) { int i, ret = 0, diff; unsigned int best = (unsigned int) -1; struct cpufreq_frequency_table *table = cpufreq_info.freq_table; rate /= 1000; /* convert to kHz */ for (i = 0; table[i].frequency != CPUFREQ_TABLE_END; i++) { diff = table[i].frequency - rate; if (diff < 0) diff = -diff; if (diff < best) { best = diff; ret = table[i].frequency; } } return ret * 1000; } static int da850_set_armrate(struct clk *clk, unsigned long index) { struct clk *pllclk = &pll0_clk; return clk_set_rate(pllclk, index); } static int da850_set_pll0rate(struct clk *clk, unsigned long index) { unsigned int prediv, mult, postdiv; struct da850_opp *opp; struct pll_data *pll = clk->pll_data; int ret; opp = (struct da850_opp *) cpufreq_info.freq_table[index].driver_data; prediv = opp->prediv; mult = opp->mult; postdiv = opp->postdiv; ret = davinci_set_pllrate(pll, prediv, mult, postdiv); if (WARN_ON(ret)) return ret; return 0; } #else int __init da850_register_cpufreq(char *async_clk) { return 0; } static int da850_set_armrate(struct clk *clk, unsigned long rate) { return -EINVAL; } static int da850_set_pll0rate(struct clk *clk, unsigned long armrate) { return -EINVAL; } static int da850_round_armrate(struct clk *clk, unsigned long rate) { return clk->rate; } #endif int __init da850_register_pm(struct platform_device *pdev) { int ret; struct davinci_pm_config *pdata = pdev->dev.platform_data; ret = davinci_cfg_reg(DA850_RTC_ALARM); if (ret) return ret; pdata->ddr2_ctlr_base = da8xx_get_mem_ctlr(); pdata->deepsleep_reg = DA8XX_SYSCFG1_VIRT(DA8XX_DEEPSLEEP_REG); pdata->ddrpsc_num = DA8XX_LPSC1_EMIF3C; pdata->cpupll_reg_base = ioremap(DA8XX_PLL0_BASE, SZ_4K); if (!pdata->cpupll_reg_base) return -ENOMEM; pdata->ddrpll_reg_base = ioremap(DA850_PLL1_BASE, SZ_4K); if (!pdata->ddrpll_reg_base) { ret = -ENOMEM; goto no_ddrpll_mem; } pdata->ddrpsc_reg_base = ioremap(DA8XX_PSC1_BASE, SZ_4K); if (!pdata->ddrpsc_reg_base) { ret = -ENOMEM; goto no_ddrpsc_mem; } return platform_device_register(pdev); no_ddrpsc_mem: iounmap(pdata->ddrpll_reg_base); no_ddrpll_mem: iounmap(pdata->cpupll_reg_base); return ret; } /* VPIF resource, platform data */ static u64 da850_vpif_dma_mask = DMA_BIT_MASK(32); static struct resource da850_vpif_resource[] = { { .start = DA8XX_VPIF_BASE, .end = DA8XX_VPIF_BASE + 0xfff, .flags = IORESOURCE_MEM, } }; static struct platform_device da850_vpif_dev = { .name = "vpif", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_resource, .num_resources = ARRAY_SIZE(da850_vpif_resource), }; static struct resource da850_vpif_display_resource[] = { { .start = IRQ_DA850_VPIFINT, .end = IRQ_DA850_VPIFINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_display_dev = { .name = "vpif_display", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_display_resource, .num_resources = ARRAY_SIZE(da850_vpif_display_resource), }; static struct resource da850_vpif_capture_resource[] = { { .start = IRQ_DA850_VPIFINT, .end = IRQ_DA850_VPIFINT, .flags = IORESOURCE_IRQ, }, { .start = IRQ_DA850_VPIFINT, .end = IRQ_DA850_VPIFINT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device da850_vpif_capture_dev = { .name = "vpif_capture", .id = -1, .dev = { .dma_mask = &da850_vpif_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = da850_vpif_capture_resource, .num_resources = ARRAY_SIZE(da850_vpif_capture_resource), }; int __init da850_register_vpif(void) { return platform_device_register(&da850_vpif_dev); } int __init da850_register_vpif_display(struct vpif_display_config *display_config) { da850_vpif_display_dev.dev.platform_data = display_config; return platform_device_register(&da850_vpif_display_dev); } int __init da850_register_vpif_capture(struct vpif_capture_config *capture_config) { da850_vpif_capture_dev.dev.platform_data = capture_config; return platform_device_register(&da850_vpif_capture_dev); } static struct davinci_soc_info davinci_soc_info_da850 = { .io_desc = da850_io_desc, .io_desc_num = ARRAY_SIZE(da850_io_desc), .jtag_id_reg = DA8XX_SYSCFG0_BASE + DA8XX_JTAG_ID_REG, .ids = da850_ids, .ids_num = ARRAY_SIZE(da850_ids), .cpu_clks = da850_clks, .psc_bases = da850_psc_bases, .psc_bases_num = ARRAY_SIZE(da850_psc_bases), .pinmux_base = DA8XX_SYSCFG0_BASE + 0x120, .pinmux_pins = da850_pins, .pinmux_pins_num = ARRAY_SIZE(da850_pins), .intc_base = DA8XX_CP_INTC_BASE, .intc_type = DAVINCI_INTC_TYPE_CP_INTC, .intc_irq_prios = da850_default_priorities, .intc_irq_num = DA850_N_CP_INTC_IRQ, .timer_info = &da850_timer_info, .gpio_type = GPIO_TYPE_DAVINCI, .gpio_base = DA8XX_GPIO_BASE, .gpio_num = 144, .gpio_irq = IRQ_DA8XX_GPIO0, .serial_dev = &da8xx_serial_device, .emac_pdata = &da8xx_emac_pdata, .sram_dma = DA8XX_SHARED_RAM_BASE, .sram_len = SZ_128K, }; void __init da850_init(void) { unsigned int v; davinci_common_init(&davinci_soc_info_da850); da8xx_syscfg0_base = ioremap(DA8XX_SYSCFG0_BASE, SZ_4K); if (WARN(!da8xx_syscfg0_base, "Unable to map syscfg0 module")) return; da8xx_syscfg1_base = ioremap(DA8XX_SYSCFG1_BASE, SZ_4K); if (WARN(!da8xx_syscfg1_base, "Unable to map syscfg1 module")) return; /* * Move the clock source of Async3 domain to PLL1 SYSCLK2. * This helps keeping the peripherals on this domain insulated * from CPU frequency changes caused by DVFS. The firmware sets * both PLL0 and PLL1 to the same frequency so, there should not * be any noticeable change even in non-DVFS use cases. */ da850_set_async3_src(1); /* Unlock writing to PLL0 registers */ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG)); v &= ~CFGCHIP0_PLL_MASTER_LOCK; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP0_REG)); /* Unlock writing to PLL1 registers */ v = __raw_readl(DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); v &= ~CFGCHIP3_PLL1_MASTER_LOCK; __raw_writel(v, DA8XX_SYSCFG0_VIRT(DA8XX_CFGCHIP3_REG)); }
gpl-2.0
djvoleur/S6-UniKernel_v4
net/netfilter/ipvs/ip_vs_sync.c
1483
52659
/* * IPVS An implementation of the IP virtual server support for the * LINUX operating system. IPVS is now implemented as a module * over the NetFilter framework. IPVS can be used to build a * high-performance and highly available server based on a * cluster of servers. * * Version 1, is capable of handling both version 0 and 1 messages. * Version 0 is the plain old format. * Note Version 0 receivers will just drop Ver 1 messages. * Version 1 is capable of handle IPv6, Persistence data, * time-outs, and firewall marks. * In ver.1 "ip_vs_sync_conn_options" will be sent in netw. order. * Ver. 0 can be turned on by sysctl -w net.ipv4.vs.sync_version=0 * * Definitions Message: is a complete datagram * Sync_conn: is a part of a Message * Param Data is an option to a Sync_conn. * * Authors: Wensong Zhang <wensong@linuxvirtualserver.org> * * ip_vs_sync: sync connection info from master load balancer to backups * through multicast * * Changes: * Alexandre Cassen : Added master & backup support at a time. * Alexandre Cassen : Added SyncID support for incoming sync * messages filtering. * Justin Ossevoort : Fix endian problem on sync message size. * Hans Schillstrom : Added Version 1: i.e. IPv6, * Persistence support, fwmark and time-out. */ #define KMSG_COMPONENT "IPVS" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/module.h> #include <linux/slab.h> #include <linux/inetdevice.h> #include <linux/net.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/igmp.h> /* for ip_mc_join_group */ #include <linux/udp.h> #include <linux/err.h> #include <linux/kthread.h> #include <linux/wait.h> #include <linux/kernel.h> #include <asm/unaligned.h> /* Used for ntoh_seq and hton_seq */ #include <net/ip.h> #include <net/sock.h> #include <net/ip_vs.h> #define IP_VS_SYNC_GROUP 0xe0000051 /* multicast addr - 224.0.0.81 */ #define IP_VS_SYNC_PORT 8848 /* multicast port */ #define SYNC_PROTO_VER 1 /* Protocol version in header */ static struct lock_class_key __ipvs_sync_key; /* * IPVS sync connection entry * Version 0, i.e. original version. */ struct ip_vs_sync_conn_v0 { __u8 reserved; /* Protocol, addresses and port numbers */ __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 cport; __be16 vport; __be16 dport; __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* Flags and state transition */ __be16 flags; /* status flags */ __be16 state; /* state info */ /* The sequence options start here */ }; struct ip_vs_sync_conn_options { struct ip_vs_seq in_seq; /* incoming seq. struct */ struct ip_vs_seq out_seq; /* outgoing seq. struct */ }; /* Sync Connection format (sync_conn) 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Type | Protocol | Ver. | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Flags | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | State | cport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | vport | dport | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | fwmark | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | timeout (in sec.) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | ... | | IP-Addresses (v4 or v6) | | ... | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Optional Parameters. +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param. Type | Param. Length | Param. data | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | ... | | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | Param Type | Param. Length | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Param data | | Last Param data should be padded for 32 bit alignment | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ */ /* * Type 0, IPv4 sync connection format */ struct ip_vs_sync_v4 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ __be32 caddr; /* client address */ __be32 vaddr; /* virtual address */ __be32 daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; /* * Type 2 messages IPv6 */ struct ip_vs_sync_v6 { __u8 type; __u8 protocol; /* Which protocol (TCP/UDP) */ __be16 ver_size; /* Version msb 4 bits */ /* Flags and state transition */ __be32 flags; /* status flags */ __be16 state; /* state info */ /* Protocol, addresses and port numbers */ __be16 cport; __be16 vport; __be16 dport; __be32 fwmark; /* Firewall mark from skb */ __be32 timeout; /* cp timeout */ struct in6_addr caddr; /* client address */ struct in6_addr vaddr; /* virtual address */ struct in6_addr daddr; /* destination address */ /* The sequence options start here */ /* PE data padded to 32bit alignment after seq. options */ }; union ip_vs_sync_conn { struct ip_vs_sync_v4 v4; struct ip_vs_sync_v6 v6; }; /* Bits in Type field in above */ #define STYPE_INET6 0 #define STYPE_F_INET6 (1 << STYPE_INET6) #define SVER_SHIFT 12 /* Shift to get version */ #define SVER_MASK 0x0fff /* Mask to strip version */ #define IPVS_OPT_SEQ_DATA 1 #define IPVS_OPT_PE_DATA 2 #define IPVS_OPT_PE_NAME 3 #define IPVS_OPT_PARAM 7 #define IPVS_OPT_F_SEQ_DATA (1 << (IPVS_OPT_SEQ_DATA-1)) #define IPVS_OPT_F_PE_DATA (1 << (IPVS_OPT_PE_DATA-1)) #define IPVS_OPT_F_PE_NAME (1 << (IPVS_OPT_PE_NAME-1)) #define IPVS_OPT_F_PARAM (1 << (IPVS_OPT_PARAM-1)) struct ip_vs_sync_thread_data { struct net *net; struct socket *sock; char *buf; int id; }; /* Version 0 definition of packet sizes */ #define SIMPLE_CONN_SIZE (sizeof(struct ip_vs_sync_conn_v0)) #define FULL_CONN_SIZE \ (sizeof(struct ip_vs_sync_conn_v0) + sizeof(struct ip_vs_sync_conn_options)) /* The master mulitcasts messages (Datagrams) to the backup load balancers in the following format. Version 1: Note, first byte should be Zero, so ver 0 receivers will drop the packet. 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | 0 | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | Version | Reserved, set to Zero | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (1) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | . | ~ . ~ | . | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | IPVS Sync Connection (n) | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Version 0 Header 0 1 2 3 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | Count Conns | SyncID | Size | +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | IPVS Sync Connection (1) | */ #define SYNC_MESG_HEADER_LEN 4 #define MAX_CONNS_PER_SYNCBUFF 255 /* nr_conns in ip_vs_sync_mesg is 8 bit */ /* Version 0 header */ struct ip_vs_sync_mesg_v0 { __u8 nr_conns; __u8 syncid; __be16 size; /* ip_vs_sync_conn entries start here */ }; /* Version 1 header */ struct ip_vs_sync_mesg { __u8 reserved; /* must be zero */ __u8 syncid; __be16 size; __u8 nr_conns; __s8 version; /* SYNC_PROTO_VER */ __u16 spare; /* ip_vs_sync_conn entries start here */ }; struct ip_vs_sync_buff { struct list_head list; unsigned long firstuse; /* pointers for the message data */ struct ip_vs_sync_mesg *mesg; unsigned char *head; unsigned char *end; }; /* * Copy of struct ip_vs_seq * From unaligned network order to aligned host order */ static void ntoh_seq(struct ip_vs_seq *no, struct ip_vs_seq *ho) { ho->init_seq = get_unaligned_be32(&no->init_seq); ho->delta = get_unaligned_be32(&no->delta); ho->previous_delta = get_unaligned_be32(&no->previous_delta); } /* * Copy of struct ip_vs_seq * From Aligned host order to unaligned network order */ static void hton_seq(struct ip_vs_seq *ho, struct ip_vs_seq *no) { put_unaligned_be32(ho->init_seq, &no->init_seq); put_unaligned_be32(ho->delta, &no->delta); put_unaligned_be32(ho->previous_delta, &no->previous_delta); } static inline struct ip_vs_sync_buff * sb_dequeue(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_lock); if (list_empty(&ms->sync_queue)) { sb = NULL; __set_current_state(TASK_INTERRUPTIBLE); } else { sb = list_entry(ms->sync_queue.next, struct ip_vs_sync_buff, list); list_del(&sb->list); ms->sync_queue_len--; if (!ms->sync_queue_len) ms->sync_queue_delay = 0; } spin_unlock_bh(&ipvs->sync_lock); return sb; } /* * Create a new sync buffer for Version 1 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create(struct netns_ipvs *ipvs) { struct ip_vs_sync_buff *sb; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } sb->mesg->reserved = 0; /* old nr_conns i.e. must be zero now */ sb->mesg->version = SYNC_PROTO_VER; sb->mesg->syncid = ipvs->master_syncid; sb->mesg->size = htons(sizeof(struct ip_vs_sync_mesg)); sb->mesg->nr_conns = 0; sb->mesg->spare = 0; sb->head = (unsigned char *)sb->mesg + sizeof(struct ip_vs_sync_mesg); sb->end = (unsigned char *)sb->mesg + ipvs->send_mesg_maxlen; sb->firstuse = jiffies; return sb; } static inline void ip_vs_sync_buff_release(struct ip_vs_sync_buff *sb) { kfree(sb->mesg); kfree(sb); } static inline void sb_queue_tail(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb = ms->sync_buff; spin_lock(&ipvs->sync_lock); if (ipvs->sync_state & IP_VS_STATE_MASTER && ms->sync_queue_len < sysctl_sync_qlen_max(ipvs)) { if (!ms->sync_queue_len) schedule_delayed_work(&ms->master_wakeup_work, max(IPVS_SYNC_SEND_DELAY, 1)); ms->sync_queue_len++; list_add_tail(&sb->list, &ms->sync_queue); if ((++ms->sync_queue_delay) == IPVS_SYNC_WAKEUP_RATE) wake_up_process(ms->master_thread); } else ip_vs_sync_buff_release(sb); spin_unlock(&ipvs->sync_lock); } /* * Get the current sync buffer if it has been created for more * than the specified time or the specified time is zero. */ static inline struct ip_vs_sync_buff * get_curr_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms, unsigned long time) { struct ip_vs_sync_buff *sb; spin_lock_bh(&ipvs->sync_buff_lock); sb = ms->sync_buff; if (sb && time_after_eq(jiffies - sb->firstuse, time)) { ms->sync_buff = NULL; __set_current_state(TASK_RUNNING); } else sb = NULL; spin_unlock_bh(&ipvs->sync_buff_lock); return sb; } static inline int select_master_thread_id(struct netns_ipvs *ipvs, struct ip_vs_conn *cp) { return ((long) cp >> (1 + ilog2(sizeof(*cp)))) & ipvs->threads_mask; } /* * Create a new sync buffer for Version 0 proto. */ static inline struct ip_vs_sync_buff * ip_vs_sync_buff_create_v0(struct netns_ipvs *ipvs) { struct ip_vs_sync_buff *sb; struct ip_vs_sync_mesg_v0 *mesg; if (!(sb=kmalloc(sizeof(struct ip_vs_sync_buff), GFP_ATOMIC))) return NULL; sb->mesg = kmalloc(ipvs->send_mesg_maxlen, GFP_ATOMIC); if (!sb->mesg) { kfree(sb); return NULL; } mesg = (struct ip_vs_sync_mesg_v0 *)sb->mesg; mesg->nr_conns = 0; mesg->syncid = ipvs->master_syncid; mesg->size = htons(sizeof(struct ip_vs_sync_mesg_v0)); sb->head = (unsigned char *)mesg + sizeof(struct ip_vs_sync_mesg_v0); sb->end = (unsigned char *)mesg + ipvs->send_mesg_maxlen; sb->firstuse = jiffies; return sb; } /* Check if conn should be synced. * pkts: conn packets, use sysctl_sync_threshold to avoid packet check * - (1) sync_refresh_period: reduce sync rate. Additionally, retry * sync_retries times with period of sync_refresh_period/8 * - (2) if both sync_refresh_period and sync_period are 0 send sync only * for state changes or only once when pkts matches sync_threshold * - (3) templates: rate can be reduced only with sync_refresh_period or * with (2) */ static int ip_vs_sync_conn_needed(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, int pkts) { unsigned long orig = ACCESS_ONCE(cp->sync_endtime); unsigned long now = jiffies; unsigned long n = (now + cp->timeout) & ~3UL; unsigned int sync_refresh_period; int sync_period; int force; /* Check if we sync in current state */ if (unlikely(cp->flags & IP_VS_CONN_F_TEMPLATE)) force = 0; else if (likely(cp->protocol == IPPROTO_TCP)) { if (!((1 << cp->state) & ((1 << IP_VS_TCP_S_ESTABLISHED) | (1 << IP_VS_TCP_S_FIN_WAIT) | (1 << IP_VS_TCP_S_CLOSE) | (1 << IP_VS_TCP_S_CLOSE_WAIT) | (1 << IP_VS_TCP_S_TIME_WAIT)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_TCP_S_ESTABLISHED) goto set; } else if (unlikely(cp->protocol == IPPROTO_SCTP)) { if (!((1 << cp->state) & ((1 << IP_VS_SCTP_S_ESTABLISHED) | (1 << IP_VS_SCTP_S_CLOSED) | (1 << IP_VS_SCTP_S_SHUT_ACK_CLI) | (1 << IP_VS_SCTP_S_SHUT_ACK_SER)))) return 0; force = cp->state != cp->old_state; if (force && cp->state != IP_VS_SCTP_S_ESTABLISHED) goto set; } else { /* UDP or another protocol with single state */ force = 0; } sync_refresh_period = sysctl_sync_refresh_period(ipvs); if (sync_refresh_period > 0) { long diff = n - orig; long min_diff = max(cp->timeout >> 1, 10UL * HZ); /* Avoid sync if difference is below sync_refresh_period * and below the half timeout. */ if (abs(diff) < min_t(long, sync_refresh_period, min_diff)) { int retries = orig & 3; if (retries >= sysctl_sync_retries(ipvs)) return 0; if (time_before(now, orig - cp->timeout + (sync_refresh_period >> 3))) return 0; n |= retries + 1; } } sync_period = sysctl_sync_period(ipvs); if (sync_period > 0) { if (!(cp->flags & IP_VS_CONN_F_TEMPLATE) && pkts % sync_period != sysctl_sync_threshold(ipvs)) return 0; } else if (sync_refresh_period <= 0 && pkts != sysctl_sync_threshold(ipvs)) return 0; set: cp->old_state = cp->state; n = cmpxchg(&cp->sync_endtime, orig, n); return n == orig || force; } /* * Version 0 , could be switched in by sys_ctl. * Add an ip_vs_conn information into the current sync_buff. */ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg_v0 *m; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; int len; if (unlikely(cp->af != AF_INET)) return; /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) return; if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) return; spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; buff = ms->sync_buff; if (buff) { m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; /* Send buffer if it is for v1 */ if (!m->nr_conns) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; } } if (!buff) { buff = ip_vs_sync_buff_create_v0(ipvs); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; } len = (cp->flags & IP_VS_CONN_F_SEQ_MASK) ? FULL_CONN_SIZE : SIMPLE_CONN_SIZE; m = (struct ip_vs_sync_mesg_v0 *) buff->mesg; s = (struct ip_vs_sync_conn_v0 *) buff->head; /* copy members */ s->reserved = 0; s->protocol = cp->protocol; s->cport = cp->cport; s->vport = cp->vport; s->dport = cp->dport; s->caddr = cp->caddr.ip; s->vaddr = cp->vaddr.ip; s->daddr = cp->daddr.ip; s->flags = htons(cp->flags & ~IP_VS_CONN_F_HASHED); s->state = htons(cp->state); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { struct ip_vs_sync_conn_options *opt = (struct ip_vs_sync_conn_options *)&s[1]; memcpy(opt, &cp->in_seq, sizeof(*opt)); } m->nr_conns++; m->size = htons(ntohs(m->size) + len); buff->head += len; /* check if there is a space for next one */ if (buff->head + FULL_CONN_SIZE > buff->end) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; } spin_unlock_bh(&ipvs->sync_buff_lock); /* synchronize its controller if it has */ cp = cp->control; if (cp) { if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_add_return(1, &cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); ip_vs_sync_conn(net, cp->control, pkts); } } /* * Add an ip_vs_conn information into the current sync_buff. * Called by ip_vs_in. * Sending Version 1 messages */ void ip_vs_sync_conn(struct net *net, struct ip_vs_conn *cp, int pkts) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m; union ip_vs_sync_conn *s; struct ip_vs_sync_buff *buff; struct ipvs_master_sync_state *ms; int id; __u8 *p; unsigned int len, pe_name_len, pad; /* Handle old version of the protocol */ if (sysctl_sync_ver(ipvs) == 0) { ip_vs_sync_conn_v0(net, cp, pkts); return; } /* Do not sync ONE PACKET */ if (cp->flags & IP_VS_CONN_F_ONE_PACKET) goto control; sloop: if (!ip_vs_sync_conn_needed(ipvs, cp, pkts)) goto control; /* Sanity checks */ pe_name_len = 0; if (cp->pe_data_len) { if (!cp->pe_data || !cp->dest) { IP_VS_ERR_RL("SYNC, connection pe_data invalid\n"); return; } pe_name_len = strnlen(cp->pe->name, IP_VS_PENAME_MAXLEN); } spin_lock_bh(&ipvs->sync_buff_lock); if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { spin_unlock_bh(&ipvs->sync_buff_lock); return; } id = select_master_thread_id(ipvs, cp); ms = &ipvs->ms[id]; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) len = sizeof(struct ip_vs_sync_v6); else #endif len = sizeof(struct ip_vs_sync_v4); if (cp->flags & IP_VS_CONN_F_SEQ_MASK) len += sizeof(struct ip_vs_sync_conn_options) + 2; if (cp->pe_data_len) len += cp->pe_data_len + 2; /* + Param hdr field */ if (pe_name_len) len += pe_name_len + 2; /* check if there is a space for this one */ pad = 0; buff = ms->sync_buff; if (buff) { m = buff->mesg; pad = (4 - (size_t) buff->head) & 3; /* Send buffer if it is for v0 */ if (buff->head + len + pad > buff->end || m->reserved) { sb_queue_tail(ipvs, ms); ms->sync_buff = NULL; buff = NULL; pad = 0; } } if (!buff) { buff = ip_vs_sync_buff_create(ipvs); if (!buff) { spin_unlock_bh(&ipvs->sync_buff_lock); pr_err("ip_vs_sync_buff_create failed.\n"); return; } ms->sync_buff = buff; m = buff->mesg; } p = buff->head; buff->head += pad + len; m->size = htons(ntohs(m->size) + pad + len); /* Add ev. padding from prev. sync_conn */ while (pad--) *(p++) = 0; s = (union ip_vs_sync_conn *)p; /* Set message type & copy members */ s->v4.type = (cp->af == AF_INET6 ? STYPE_F_INET6 : 0); s->v4.ver_size = htons(len & SVER_MASK); /* Version 0 */ s->v4.flags = htonl(cp->flags & ~IP_VS_CONN_F_HASHED); s->v4.state = htons(cp->state); s->v4.protocol = cp->protocol; s->v4.cport = cp->cport; s->v4.vport = cp->vport; s->v4.dport = cp->dport; s->v4.fwmark = htonl(cp->fwmark); s->v4.timeout = htonl(cp->timeout / HZ); m->nr_conns++; #ifdef CONFIG_IP_VS_IPV6 if (cp->af == AF_INET6) { p += sizeof(struct ip_vs_sync_v6); s->v6.caddr = cp->caddr.in6; s->v6.vaddr = cp->vaddr.in6; s->v6.daddr = cp->daddr.in6; } else #endif { p += sizeof(struct ip_vs_sync_v4); /* options ptr */ s->v4.caddr = cp->caddr.ip; s->v4.vaddr = cp->vaddr.ip; s->v4.daddr = cp->daddr.ip; } if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { *(p++) = IPVS_OPT_SEQ_DATA; *(p++) = sizeof(struct ip_vs_sync_conn_options); hton_seq((struct ip_vs_seq *)p, &cp->in_seq); p += sizeof(struct ip_vs_seq); hton_seq((struct ip_vs_seq *)p, &cp->out_seq); p += sizeof(struct ip_vs_seq); } /* Handle pe data */ if (cp->pe_data_len && cp->pe_data) { *(p++) = IPVS_OPT_PE_DATA; *(p++) = cp->pe_data_len; memcpy(p, cp->pe_data, cp->pe_data_len); p += cp->pe_data_len; if (pe_name_len) { /* Add PE_NAME */ *(p++) = IPVS_OPT_PE_NAME; *(p++) = pe_name_len; memcpy(p, cp->pe->name, pe_name_len); p += pe_name_len; } } spin_unlock_bh(&ipvs->sync_buff_lock); control: /* synchronize its controller if it has */ cp = cp->control; if (!cp) return; if (cp->flags & IP_VS_CONN_F_TEMPLATE) pkts = atomic_add_return(1, &cp->in_pkts); else pkts = sysctl_sync_threshold(ipvs); goto sloop; } /* * fill_param used by version 1 */ static inline int ip_vs_conn_fill_param_sync(struct net *net, int af, union ip_vs_sync_conn *sc, struct ip_vs_conn_param *p, __u8 *pe_data, unsigned int pe_data_len, __u8 *pe_name, unsigned int pe_name_len) { #ifdef CONFIG_IP_VS_IPV6 if (af == AF_INET6) ip_vs_conn_fill_param(net, af, sc->v6.protocol, (const union nf_inet_addr *)&sc->v6.caddr, sc->v6.cport, (const union nf_inet_addr *)&sc->v6.vaddr, sc->v6.vport, p); else #endif ip_vs_conn_fill_param(net, af, sc->v4.protocol, (const union nf_inet_addr *)&sc->v4.caddr, sc->v4.cport, (const union nf_inet_addr *)&sc->v4.vaddr, sc->v4.vport, p); /* Handle pe data */ if (pe_data_len) { if (pe_name_len) { char buff[IP_VS_PENAME_MAXLEN+1]; memcpy(buff, pe_name, pe_name_len); buff[pe_name_len]=0; p->pe = __ip_vs_pe_getbyname(buff); if (!p->pe) { IP_VS_DBG(3, "BACKUP, no %s engine found/loaded\n", buff); return 1; } } else { IP_VS_ERR_RL("BACKUP, Invalid PE parameters\n"); return 1; } p->pe_data = kmemdup(pe_data, pe_data_len, GFP_ATOMIC); if (!p->pe_data) { if (p->pe->module) module_put(p->pe->module); return -ENOMEM; } p->pe_data_len = pe_data_len; } return 0; } /* * Connection Add / Update. * Common for version 0 and 1 reception of backup sync_conns. * Param: ... * timeout is in sec. */ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param, unsigned int flags, unsigned int state, unsigned int protocol, unsigned int type, const union nf_inet_addr *daddr, __be16 dport, unsigned long timeout, __u32 fwmark, struct ip_vs_sync_conn_options *opt) { struct ip_vs_dest *dest; struct ip_vs_conn *cp; struct netns_ipvs *ipvs = net_ipvs(net); if (!(flags & IP_VS_CONN_F_TEMPLATE)) cp = ip_vs_conn_in_get(param); else cp = ip_vs_ct_in_get(param); if (cp) { /* Free pe_data */ kfree(param->pe_data); dest = cp->dest; spin_lock_bh(&cp->lock); if ((cp->flags ^ flags) & IP_VS_CONN_F_INACTIVE && !(flags & IP_VS_CONN_F_TEMPLATE) && dest) { if (flags & IP_VS_CONN_F_INACTIVE) { atomic_dec(&dest->activeconns); atomic_inc(&dest->inactconns); } else { atomic_inc(&dest->activeconns); atomic_dec(&dest->inactconns); } } flags &= IP_VS_CONN_F_BACKUP_UPD_MASK; flags |= cp->flags & ~IP_VS_CONN_F_BACKUP_UPD_MASK; cp->flags = flags; spin_unlock_bh(&cp->lock); if (!dest) ip_vs_try_bind_dest(cp); } else { /* * Find the appropriate destination for the connection. * If it is not found the connection will remain unbound * but still handled. */ rcu_read_lock(); dest = ip_vs_find_dest(net, type, daddr, dport, param->vaddr, param->vport, protocol, fwmark, flags); cp = ip_vs_conn_new(param, daddr, dport, flags, dest, fwmark); rcu_read_unlock(); if (!cp) { if (param->pe_data) kfree(param->pe_data); IP_VS_DBG(2, "BACKUP, add new conn. failed\n"); return; } } if (opt) memcpy(&cp->in_seq, opt, sizeof(*opt)); atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs)); cp->state = state; cp->old_state = cp->state; /* * For Ver 0 messages style * - Not possible to recover the right timeout for templates * - can not find the right fwmark * virtual service. If needed, we can do it for * non-fwmark persistent services. * Ver 1 messages style. * - No problem. */ if (timeout) { if (timeout > MAX_SCHEDULE_TIMEOUT / HZ) timeout = MAX_SCHEDULE_TIMEOUT / HZ; cp->timeout = timeout*HZ; } else { struct ip_vs_proto_data *pd; pd = ip_vs_proto_data_get(net, protocol); if (!(flags & IP_VS_CONN_F_TEMPLATE) && pd && pd->timeout_table) cp->timeout = pd->timeout_table[state]; else cp->timeout = (3*60*HZ); } ip_vs_conn_put(cp); } /* * Process received multicast message for Version 0 */ static void ip_vs_process_message_v0(struct net *net, const char *buffer, const size_t buflen) { struct ip_vs_sync_mesg_v0 *m = (struct ip_vs_sync_mesg_v0 *)buffer; struct ip_vs_sync_conn_v0 *s; struct ip_vs_sync_conn_options *opt; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; char *p; int i; p = (char *)buffer + sizeof(struct ip_vs_sync_mesg_v0); for (i=0; i<m->nr_conns; i++) { unsigned int flags, state; if (p + SIMPLE_CONN_SIZE > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, bogus conn\n"); return; } s = (struct ip_vs_sync_conn_v0 *) p; flags = ntohs(s->flags) | IP_VS_CONN_F_SYNC; flags &= ~IP_VS_CONN_F_HASHED; if (flags & IP_VS_CONN_F_SEQ_MASK) { opt = (struct ip_vs_sync_conn_options *)&s[1]; p += FULL_CONN_SIZE; if (p > buffer+buflen) { IP_VS_ERR_RL("BACKUP v0, Dropping buffer bogus conn options\n"); return; } } else { opt = NULL; p += SIMPLE_CONN_SIZE; } state = ntohs(s->state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->protocol); if (!pp) { IP_VS_DBG(2, "BACKUP v0, Unsupported protocol %u\n", s->protocol); continue; } if (state >= pp->num_states) { IP_VS_DBG(2, "BACKUP v0, Invalid %s state %u\n", pp->name, state); continue; } } else { /* protocol in templates is not used for state/timeout */ if (state > 0) { IP_VS_DBG(2, "BACKUP v0, Invalid template state %u\n", state); state = 0; } } ip_vs_conn_fill_param(net, AF_INET, s->protocol, (const union nf_inet_addr *)&s->caddr, s->cport, (const union nf_inet_addr *)&s->vaddr, s->vport, &param); /* Send timeout as Zero */ ip_vs_proc_conn(net, &param, flags, state, s->protocol, AF_INET, (union nf_inet_addr *)&s->daddr, s->dport, 0, 0, opt); } } /* * Handle options */ static inline int ip_vs_proc_seqopt(__u8 *p, unsigned int plen, __u32 *opt_flags, struct ip_vs_sync_conn_options *opt) { struct ip_vs_sync_conn_options *topt; topt = (struct ip_vs_sync_conn_options *)p; if (plen != sizeof(struct ip_vs_sync_conn_options)) { IP_VS_DBG(2, "BACKUP, bogus conn options length\n"); return -EINVAL; } if (*opt_flags & IPVS_OPT_F_SEQ_DATA) { IP_VS_DBG(2, "BACKUP, conn options found twice\n"); return -EINVAL; } ntoh_seq(&topt->in_seq, &opt->in_seq); ntoh_seq(&topt->out_seq, &opt->out_seq); *opt_flags |= IPVS_OPT_F_SEQ_DATA; return 0; } static int ip_vs_proc_str(__u8 *p, unsigned int plen, unsigned int *data_len, __u8 **data, unsigned int maxlen, __u32 *opt_flags, __u32 flag) { if (plen > maxlen) { IP_VS_DBG(2, "BACKUP, bogus par.data len > %d\n", maxlen); return -EINVAL; } if (*opt_flags & flag) { IP_VS_DBG(2, "BACKUP, Par.data found twice 0x%x\n", flag); return -EINVAL; } *data_len = plen; *data = p; *opt_flags |= flag; return 0; } /* * Process a Version 1 sync. connection */ static inline int ip_vs_proc_sync_conn(struct net *net, __u8 *p, __u8 *msg_end) { struct ip_vs_sync_conn_options opt; union ip_vs_sync_conn *s; struct ip_vs_protocol *pp; struct ip_vs_conn_param param; __u32 flags; unsigned int af, state, pe_data_len=0, pe_name_len=0; __u8 *pe_data=NULL, *pe_name=NULL; __u32 opt_flags=0; int retc=0; s = (union ip_vs_sync_conn *) p; if (s->v6.type & STYPE_F_INET6) { #ifdef CONFIG_IP_VS_IPV6 af = AF_INET6; p += sizeof(struct ip_vs_sync_v6); #else IP_VS_DBG(3,"BACKUP, IPv6 msg received, and IPVS is not compiled for IPv6\n"); retc = 10; goto out; #endif } else if (!s->v4.type) { af = AF_INET; p += sizeof(struct ip_vs_sync_v4); } else { return -10; } if (p > msg_end) return -20; /* Process optional params check Type & Len. */ while (p < msg_end) { int ptype; int plen; if (p+2 > msg_end) return -30; ptype = *(p++); plen = *(p++); if (!plen || ((p + plen) > msg_end)) return -40; /* Handle seq option p = param data */ switch (ptype & ~IPVS_OPT_F_PARAM) { case IPVS_OPT_SEQ_DATA: if (ip_vs_proc_seqopt(p, plen, &opt_flags, &opt)) return -50; break; case IPVS_OPT_PE_DATA: if (ip_vs_proc_str(p, plen, &pe_data_len, &pe_data, IP_VS_PEDATA_MAXLEN, &opt_flags, IPVS_OPT_F_PE_DATA)) return -60; break; case IPVS_OPT_PE_NAME: if (ip_vs_proc_str(p, plen,&pe_name_len, &pe_name, IP_VS_PENAME_MAXLEN, &opt_flags, IPVS_OPT_F_PE_NAME)) return -70; break; default: /* Param data mandatory ? */ if (!(ptype & IPVS_OPT_F_PARAM)) { IP_VS_DBG(3, "BACKUP, Unknown mandatory param %d found\n", ptype & ~IPVS_OPT_F_PARAM); retc = 20; goto out; } } p += plen; /* Next option */ } /* Get flags and Mask off unsupported */ flags = ntohl(s->v4.flags) & IP_VS_CONN_F_BACKUP_MASK; flags |= IP_VS_CONN_F_SYNC; state = ntohs(s->v4.state); if (!(flags & IP_VS_CONN_F_TEMPLATE)) { pp = ip_vs_proto_get(s->v4.protocol); if (!pp) { IP_VS_DBG(3,"BACKUP, Unsupported protocol %u\n", s->v4.protocol); retc = 30; goto out; } if (state >= pp->num_states) { IP_VS_DBG(3, "BACKUP, Invalid %s state %u\n", pp->name, state); retc = 40; goto out; } } else { /* protocol in templates is not used for state/timeout */ if (state > 0) { IP_VS_DBG(3, "BACKUP, Invalid template state %u\n", state); state = 0; } } if (ip_vs_conn_fill_param_sync(net, af, s, &param, pe_data, pe_data_len, pe_name, pe_name_len)) { retc = 50; goto out; } /* If only IPv4, just silent skip IPv6 */ if (af == AF_INET) ip_vs_proc_conn(net, &param, flags, state, s->v4.protocol, af, (union nf_inet_addr *)&s->v4.daddr, s->v4.dport, ntohl(s->v4.timeout), ntohl(s->v4.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #ifdef CONFIG_IP_VS_IPV6 else ip_vs_proc_conn(net, &param, flags, state, s->v6.protocol, af, (union nf_inet_addr *)&s->v6.daddr, s->v6.dport, ntohl(s->v6.timeout), ntohl(s->v6.fwmark), (opt_flags & IPVS_OPT_F_SEQ_DATA ? &opt : NULL) ); #endif return 0; /* Error exit */ out: IP_VS_DBG(2, "BACKUP, Single msg dropped err:%d\n", retc); return retc; } /* * Process received multicast message and create the corresponding * ip_vs_conn entries. * Handles Version 0 & 1 */ static void ip_vs_process_message(struct net *net, __u8 *buffer, const size_t buflen) { struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_sync_mesg *m2 = (struct ip_vs_sync_mesg *)buffer; __u8 *p, *msg_end; int i, nr_conns; if (buflen < sizeof(struct ip_vs_sync_mesg_v0)) { IP_VS_DBG(2, "BACKUP, message header too short\n"); return; } if (buflen != ntohs(m2->size)) { IP_VS_DBG(2, "BACKUP, bogus message size\n"); return; } /* SyncID sanity check */ if (ipvs->backup_syncid != 0 && m2->syncid != ipvs->backup_syncid) { IP_VS_DBG(7, "BACKUP, Ignoring syncid = %d\n", m2->syncid); return; } /* Handle version 1 message */ if ((m2->version == SYNC_PROTO_VER) && (m2->reserved == 0) && (m2->spare == 0)) { msg_end = buffer + sizeof(struct ip_vs_sync_mesg); nr_conns = m2->nr_conns; for (i=0; i<nr_conns; i++) { union ip_vs_sync_conn *s; unsigned int size; int retc; p = msg_end; if (p + sizeof(s->v4) > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, to small\n"); return; } s = (union ip_vs_sync_conn *)p; size = ntohs(s->v4.ver_size) & SVER_MASK; msg_end = p + size; /* Basic sanity checks */ if (msg_end > buffer+buflen) { IP_VS_ERR_RL("BACKUP, Dropping buffer, msg > buffer\n"); return; } if (ntohs(s->v4.ver_size) >> SVER_SHIFT) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Unknown version %d\n", ntohs(s->v4.ver_size) >> SVER_SHIFT); return; } /* Process a single sync_conn */ retc = ip_vs_proc_sync_conn(net, p, msg_end); if (retc < 0) { IP_VS_ERR_RL("BACKUP, Dropping buffer, Err: %d in decoding\n", retc); return; } /* Make sure we have 32 bit alignment */ msg_end = p + ((size + 3) & ~3); } } else { /* Old type of message */ ip_vs_process_message_v0(net, buffer, buflen); return; } } /* * Setup sndbuf (mode=1) or rcvbuf (mode=0) */ static void set_sock_size(struct sock *sk, int mode, int val) { /* setsockopt(sock, SOL_SOCKET, SO_SNDBUF, &val, sizeof(val)); */ /* setsockopt(sock, SOL_SOCKET, SO_RCVBUF, &val, sizeof(val)); */ lock_sock(sk); if (mode) { val = clamp_t(int, val, (SOCK_MIN_SNDBUF + 1) / 2, sysctl_wmem_max); sk->sk_sndbuf = val * 2; sk->sk_userlocks |= SOCK_SNDBUF_LOCK; } else { val = clamp_t(int, val, (SOCK_MIN_RCVBUF + 1) / 2, sysctl_rmem_max); sk->sk_rcvbuf = val * 2; sk->sk_userlocks |= SOCK_RCVBUF_LOCK; } release_sock(sk); } /* * Setup loopback of outgoing multicasts on a sending socket */ static void set_mcast_loop(struct sock *sk, u_char loop) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MULTICAST_LOOP, &loop, sizeof(loop)); */ lock_sock(sk); inet->mc_loop = loop ? 1 : 0; release_sock(sk); } /* * Specify TTL for outgoing multicasts on a sending socket */ static void set_mcast_ttl(struct sock *sk, u_char ttl) { struct inet_sock *inet = inet_sk(sk); /* setsockopt(sock, SOL_IP, IP_MULTICAST_TTL, &ttl, sizeof(ttl)); */ lock_sock(sk); inet->mc_ttl = ttl; release_sock(sk); } /* * Specifiy default interface for outgoing multicasts */ static int set_mcast_if(struct sock *sk, char *ifname) { struct net_device *dev; struct inet_sock *inet = inet_sk(sk); struct net *net = sock_net(sk); dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; lock_sock(sk); inet->mc_index = dev->ifindex; /* inet->mc_addr = 0; */ release_sock(sk); return 0; } /* * Set the maximum length of sync message according to the * specified interface's MTU. */ static int set_sync_mesg_maxlen(struct net *net, int sync_state) { struct netns_ipvs *ipvs = net_ipvs(net); struct net_device *dev; int num; if (sync_state == IP_VS_STATE_MASTER) { dev = __dev_get_by_name(net, ipvs->master_mcast_ifn); if (!dev) return -ENODEV; num = (dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr) - SYNC_MESG_HEADER_LEN - 20) / SIMPLE_CONN_SIZE; ipvs->send_mesg_maxlen = SYNC_MESG_HEADER_LEN + SIMPLE_CONN_SIZE * min(num, MAX_CONNS_PER_SYNCBUFF); IP_VS_DBG(7, "setting the maximum length of sync sending " "message %d.\n", ipvs->send_mesg_maxlen); } else if (sync_state == IP_VS_STATE_BACKUP) { dev = __dev_get_by_name(net, ipvs->backup_mcast_ifn); if (!dev) return -ENODEV; ipvs->recv_mesg_maxlen = dev->mtu - sizeof(struct iphdr) - sizeof(struct udphdr); IP_VS_DBG(7, "setting the maximum length of sync receiving " "message %d.\n", ipvs->recv_mesg_maxlen); } return 0; } /* * Join a multicast group. * the group is specified by a class D multicast address 224.0.0.0/8 * in the in_addr structure passed in as a parameter. */ static int join_mcast_group(struct sock *sk, struct in_addr *addr, char *ifname) { struct net *net = sock_net(sk); struct ip_mreqn mreq; struct net_device *dev; int ret; memset(&mreq, 0, sizeof(mreq)); memcpy(&mreq.imr_multiaddr, addr, sizeof(struct in_addr)); dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; if (sk->sk_bound_dev_if && dev->ifindex != sk->sk_bound_dev_if) return -EINVAL; mreq.imr_ifindex = dev->ifindex; lock_sock(sk); ret = ip_mc_join_group(sk, &mreq); release_sock(sk); return ret; } static int bind_mcastif_addr(struct socket *sock, char *ifname) { struct net *net = sock_net(sock->sk); struct net_device *dev; __be32 addr; struct sockaddr_in sin; dev = __dev_get_by_name(net, ifname); if (!dev) return -ENODEV; addr = inet_select_addr(dev, 0, RT_SCOPE_UNIVERSE); if (!addr) pr_err("You probably need to specify IP address on " "multicast interface.\n"); IP_VS_DBG(7, "binding socket with (%s) %pI4\n", ifname, &addr); /* Now bind the socket with the address of multicast interface */ sin.sin_family = AF_INET; sin.sin_addr.s_addr = addr; sin.sin_port = 0; return sock->ops->bind(sock, (struct sockaddr*)&sin, sizeof(sin)); } /* * Set up sending multicast socket over UDP */ static struct socket *make_send_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); /* multicast addr */ struct sockaddr_in mcast_addr = { .sin_family = AF_INET, .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; struct socket *sock; int result; /* First create a socket move it to right name space later */ result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } /* * Kernel sockets that are a part of a namespace, should not * hold a reference to a namespace in order to allow to stop it. * After sk_change_net should be released using sk_release_kernel. */ sk_change_net(sock->sk, net); result = set_mcast_if(sock->sk, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error setting outbound mcast interface\n"); goto error; } set_mcast_loop(sock->sk, 0); set_mcast_ttl(sock->sk, 1); result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 1, result); result = bind_mcastif_addr(sock, ipvs->master_mcast_ifn); if (result < 0) { pr_err("Error binding address of the mcast interface\n"); goto error; } result = sock->ops->connect(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr), 0); if (result < 0) { pr_err("Error connecting to the multicast addr\n"); goto error; } return sock; error: sk_release_kernel(sock->sk); return ERR_PTR(result); } /* * Set up receiving multicast socket over UDP */ static struct socket *make_receive_sock(struct net *net, int id) { struct netns_ipvs *ipvs = net_ipvs(net); /* multicast addr */ struct sockaddr_in mcast_addr = { .sin_family = AF_INET, .sin_port = cpu_to_be16(IP_VS_SYNC_PORT + id), .sin_addr.s_addr = cpu_to_be32(IP_VS_SYNC_GROUP), }; struct socket *sock; int result; /* First create a socket */ result = sock_create_kern(PF_INET, SOCK_DGRAM, IPPROTO_UDP, &sock); if (result < 0) { pr_err("Error during creation of socket; terminating\n"); return ERR_PTR(result); } /* * Kernel sockets that are a part of a namespace, should not * hold a reference to a namespace in order to allow to stop it. * After sk_change_net should be released using sk_release_kernel. */ sk_change_net(sock->sk, net); /* it is equivalent to the REUSEADDR option in user-space */ sock->sk->sk_reuse = SK_CAN_REUSE; result = sysctl_sync_sock_size(ipvs); if (result > 0) set_sock_size(sock->sk, 0, result); result = sock->ops->bind(sock, (struct sockaddr *) &mcast_addr, sizeof(struct sockaddr)); if (result < 0) { pr_err("Error binding to the multicast addr\n"); goto error; } /* join the multicast group */ result = join_mcast_group(sock->sk, (struct in_addr *) &mcast_addr.sin_addr, ipvs->backup_mcast_ifn); if (result < 0) { pr_err("Error joining to the multicast group\n"); goto error; } return sock; error: sk_release_kernel(sock->sk); return ERR_PTR(result); } static int ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) { struct msghdr msg = {.msg_flags = MSG_DONTWAIT|MSG_NOSIGNAL}; struct kvec iov; int len; EnterFunction(7); iov.iov_base = (void *)buffer; iov.iov_len = length; len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); LeaveFunction(7); return len; } static int ip_vs_send_sync_msg(struct socket *sock, struct ip_vs_sync_mesg *msg) { int msize; int ret; msize = ntohs(msg->size); ret = ip_vs_send_async(sock, (char *)msg, msize); if (ret >= 0 || ret == -EAGAIN) return ret; pr_err("ip_vs_send_async error %d\n", ret); return 0; } static int ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) { struct msghdr msg = {NULL,}; struct kvec iov; int len; EnterFunction(7); /* Receive a packet */ iov.iov_base = buffer; iov.iov_len = (size_t)buflen; len = kernel_recvmsg(sock, &msg, &iov, 1, buflen, MSG_DONTWAIT); if (len < 0) return len; LeaveFunction(7); return len; } /* Wakeup the master thread for sending */ static void master_wakeup_work_handler(struct work_struct *work) { struct ipvs_master_sync_state *ms = container_of(work, struct ipvs_master_sync_state, master_wakeup_work.work); struct netns_ipvs *ipvs = ms->ipvs; spin_lock_bh(&ipvs->sync_lock); if (ms->sync_queue_len && ms->sync_queue_delay < IPVS_SYNC_WAKEUP_RATE) { ms->sync_queue_delay = IPVS_SYNC_WAKEUP_RATE; wake_up_process(ms->master_thread); } spin_unlock_bh(&ipvs->sync_lock); } /* Get next buffer to send */ static inline struct ip_vs_sync_buff * next_sync_buff(struct netns_ipvs *ipvs, struct ipvs_master_sync_state *ms) { struct ip_vs_sync_buff *sb; sb = sb_dequeue(ipvs, ms); if (sb) return sb; /* Do not delay entries in buffer for more than 2 seconds */ return get_curr_sync_buff(ipvs, ms, IPVS_SYNC_FLUSH_TIME); } static int sync_thread_master(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); struct ipvs_master_sync_state *ms = &ipvs->ms[tinfo->id]; struct sock *sk = tinfo->sock->sk; struct ip_vs_sync_buff *sb; pr_info("sync thread started: state = MASTER, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->master_mcast_ifn, ipvs->master_syncid, tinfo->id); for (;;) { sb = next_sync_buff(ipvs, ms); if (unlikely(kthread_should_stop())) break; if (!sb) { schedule_timeout(IPVS_SYNC_CHECK_PERIOD); continue; } while (ip_vs_send_sync_msg(tinfo->sock, sb->mesg) < 0) { int ret = 0; __wait_event_interruptible(*sk_sleep(sk), sock_writeable(sk) || kthread_should_stop(), ret); if (unlikely(kthread_should_stop())) goto done; } ip_vs_sync_buff_release(sb); } done: __set_current_state(TASK_RUNNING); if (sb) ip_vs_sync_buff_release(sb); /* clean up the sync_buff queue */ while ((sb = sb_dequeue(ipvs, ms))) ip_vs_sync_buff_release(sb); __set_current_state(TASK_RUNNING); /* clean up the current sync_buff */ sb = get_curr_sync_buff(ipvs, ms, 0); if (sb) ip_vs_sync_buff_release(sb); /* release the sending multicast socket */ sk_release_kernel(tinfo->sock->sk); kfree(tinfo); return 0; } static int sync_thread_backup(void *data) { struct ip_vs_sync_thread_data *tinfo = data; struct netns_ipvs *ipvs = net_ipvs(tinfo->net); int len; pr_info("sync thread started: state = BACKUP, mcast_ifn = %s, " "syncid = %d, id = %d\n", ipvs->backup_mcast_ifn, ipvs->backup_syncid, tinfo->id); while (!kthread_should_stop()) { wait_event_interruptible(*sk_sleep(tinfo->sock->sk), !skb_queue_empty(&tinfo->sock->sk->sk_receive_queue) || kthread_should_stop()); /* do we have data now? */ while (!skb_queue_empty(&(tinfo->sock->sk->sk_receive_queue))) { len = ip_vs_receive(tinfo->sock, tinfo->buf, ipvs->recv_mesg_maxlen); if (len <= 0) { if (len != -EAGAIN) pr_err("receiving message error\n"); break; } ip_vs_process_message(tinfo->net, tinfo->buf, len); } } /* release the sending multicast socket */ sk_release_kernel(tinfo->sock->sk); kfree(tinfo->buf); kfree(tinfo); return 0; } int start_sync_thread(struct net *net, int state, char *mcast_ifn, __u8 syncid) { struct ip_vs_sync_thread_data *tinfo; struct task_struct **array = NULL, *task; struct socket *sock; struct netns_ipvs *ipvs = net_ipvs(net); char *name; int (*threadfn)(void *data); int id, count; int result = -ENOMEM; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); IP_VS_DBG(7, "Each ip_vs_sync_conn entry needs %Zd bytes\n", sizeof(struct ip_vs_sync_conn_v0)); if (!ipvs->sync_state) { count = clamp(sysctl_sync_ports(ipvs), 1, IPVS_SYNC_PORTS_MAX); ipvs->threads_mask = count - 1; } else count = ipvs->threads_mask + 1; if (state == IP_VS_STATE_MASTER) { if (ipvs->ms) return -EEXIST; strlcpy(ipvs->master_mcast_ifn, mcast_ifn, sizeof(ipvs->master_mcast_ifn)); ipvs->master_syncid = syncid; name = "ipvs-m:%d:%d"; threadfn = sync_thread_master; } else if (state == IP_VS_STATE_BACKUP) { if (ipvs->backup_threads) return -EEXIST; strlcpy(ipvs->backup_mcast_ifn, mcast_ifn, sizeof(ipvs->backup_mcast_ifn)); ipvs->backup_syncid = syncid; name = "ipvs-b:%d:%d"; threadfn = sync_thread_backup; } else { return -EINVAL; } if (state == IP_VS_STATE_MASTER) { struct ipvs_master_sync_state *ms; ipvs->ms = kzalloc(count * sizeof(ipvs->ms[0]), GFP_KERNEL); if (!ipvs->ms) goto out; ms = ipvs->ms; for (id = 0; id < count; id++, ms++) { INIT_LIST_HEAD(&ms->sync_queue); ms->sync_queue_len = 0; ms->sync_queue_delay = 0; INIT_DELAYED_WORK(&ms->master_wakeup_work, master_wakeup_work_handler); ms->ipvs = ipvs; } } else { array = kzalloc(count * sizeof(struct task_struct *), GFP_KERNEL); if (!array) goto out; } set_sync_mesg_maxlen(net, state); tinfo = NULL; for (id = 0; id < count; id++) { if (state == IP_VS_STATE_MASTER) sock = make_send_sock(net, id); else sock = make_receive_sock(net, id); if (IS_ERR(sock)) { result = PTR_ERR(sock); goto outtinfo; } tinfo = kmalloc(sizeof(*tinfo), GFP_KERNEL); if (!tinfo) goto outsocket; tinfo->net = net; tinfo->sock = sock; if (state == IP_VS_STATE_BACKUP) { tinfo->buf = kmalloc(ipvs->recv_mesg_maxlen, GFP_KERNEL); if (!tinfo->buf) goto outtinfo; } else { tinfo->buf = NULL; } tinfo->id = id; task = kthread_run(threadfn, tinfo, name, ipvs->gen, id); if (IS_ERR(task)) { result = PTR_ERR(task); goto outtinfo; } tinfo = NULL; if (state == IP_VS_STATE_MASTER) ipvs->ms[id].master_thread = task; else array[id] = task; } /* mark as active */ if (state == IP_VS_STATE_BACKUP) ipvs->backup_threads = array; spin_lock_bh(&ipvs->sync_buff_lock); ipvs->sync_state |= state; spin_unlock_bh(&ipvs->sync_buff_lock); /* increase the module use count */ ip_vs_use_count_inc(); return 0; outsocket: sk_release_kernel(sock->sk); outtinfo: if (tinfo) { sk_release_kernel(tinfo->sock->sk); kfree(tinfo->buf); kfree(tinfo); } count = id; while (count-- > 0) { if (state == IP_VS_STATE_MASTER) kthread_stop(ipvs->ms[count].master_thread); else kthread_stop(array[count]); } kfree(array); out: if (!(ipvs->sync_state & IP_VS_STATE_MASTER)) { kfree(ipvs->ms); ipvs->ms = NULL; } return result; } int stop_sync_thread(struct net *net, int state) { struct netns_ipvs *ipvs = net_ipvs(net); struct task_struct **array; int id; int retc = -EINVAL; IP_VS_DBG(7, "%s(): pid %d\n", __func__, task_pid_nr(current)); if (state == IP_VS_STATE_MASTER) { if (!ipvs->ms) return -ESRCH; /* * The lock synchronizes with sb_queue_tail(), so that we don't * add sync buffers to the queue, when we are already in * progress of stopping the master sync daemon. */ spin_lock_bh(&ipvs->sync_buff_lock); spin_lock(&ipvs->sync_lock); ipvs->sync_state &= ~IP_VS_STATE_MASTER; spin_unlock(&ipvs->sync_lock); spin_unlock_bh(&ipvs->sync_buff_lock); retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { struct ipvs_master_sync_state *ms = &ipvs->ms[id]; int ret; pr_info("stopping master sync thread %d ...\n", task_pid_nr(ms->master_thread)); cancel_delayed_work_sync(&ms->master_wakeup_work); ret = kthread_stop(ms->master_thread); if (retc >= 0) retc = ret; } kfree(ipvs->ms); ipvs->ms = NULL; } else if (state == IP_VS_STATE_BACKUP) { if (!ipvs->backup_threads) return -ESRCH; ipvs->sync_state &= ~IP_VS_STATE_BACKUP; array = ipvs->backup_threads; retc = 0; for (id = ipvs->threads_mask; id >= 0; id--) { int ret; pr_info("stopping backup sync thread %d ...\n", task_pid_nr(array[id])); ret = kthread_stop(array[id]); if (retc >= 0) retc = ret; } kfree(array); ipvs->backup_threads = NULL; } /* decrease the module use count */ ip_vs_use_count_dec(); return retc; } /* * Initialize data struct for each netns */ int __net_init ip_vs_sync_net_init(struct net *net) { struct netns_ipvs *ipvs = net_ipvs(net); __mutex_init(&ipvs->sync_mutex, "ipvs->sync_mutex", &__ipvs_sync_key); spin_lock_init(&ipvs->sync_lock); spin_lock_init(&ipvs->sync_buff_lock); return 0; } void ip_vs_sync_net_cleanup(struct net *net) { int retc; struct netns_ipvs *ipvs = net_ipvs(net); mutex_lock(&ipvs->sync_mutex); retc = stop_sync_thread(net, IP_VS_STATE_MASTER); if (retc && retc != -ESRCH) pr_err("Failed to stop Master Daemon\n"); retc = stop_sync_thread(net, IP_VS_STATE_BACKUP); if (retc && retc != -ESRCH) pr_err("Failed to stop Backup Daemon\n"); mutex_unlock(&ipvs->sync_mutex); }
gpl-2.0
NStep/nx_bullhead
drivers/mtd/nand/gpmi-nand/gpmi-nand.c
1739
46851
/* * Freescale GPMI NAND Flash Driver * * Copyright (C) 2010-2011 Freescale Semiconductor, Inc. * Copyright (C) 2008 Embedded Alley Solutions, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/clk.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/mtd/partitions.h> #include <linux/pinctrl/consumer.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_mtd.h> #include "gpmi-nand.h" /* Resource names for the GPMI NAND driver. */ #define GPMI_NAND_GPMI_REGS_ADDR_RES_NAME "gpmi-nand" #define GPMI_NAND_BCH_REGS_ADDR_RES_NAME "bch" #define GPMI_NAND_BCH_INTERRUPT_RES_NAME "bch" /* add our owner bbt descriptor */ static uint8_t scan_ff_pattern[] = { 0xff }; static struct nand_bbt_descr gpmi_bbt_descr = { .options = 0, .offs = 0, .len = 1, .pattern = scan_ff_pattern }; /* We will use all the (page + OOB). */ static struct nand_ecclayout gpmi_hw_ecclayout = { .eccbytes = 0, .eccpos = { 0, }, .oobfree = { {.offset = 0, .length = 0} } }; static irqreturn_t bch_irq(int irq, void *cookie) { struct gpmi_nand_data *this = cookie; gpmi_clear_bch(this); complete(&this->bch_done); return IRQ_HANDLED; } /* * Calculate the ECC strength by hand: * E : The ECC strength. * G : the length of Galois Field. * N : The chunk count of per page. * O : the oobsize of the NAND chip. * M : the metasize of per page. * * The formula is : * E * G * N * ------------ <= (O - M) * 8 * * So, we get E by: * (O - M) * 8 * E <= ------------- * G * N */ static inline int get_ecc_strength(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct mtd_info *mtd = &this->mtd; int ecc_strength; ecc_strength = ((mtd->oobsize - geo->metadata_size) * 8) / (geo->gf_len * geo->ecc_chunk_count); /* We need the minor even number. */ return round_down(ecc_strength, 2); } static inline bool gpmi_check_ecc(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; /* Do the sanity check. */ if (GPMI_IS_MX23(this) || GPMI_IS_MX28(this)) { /* The mx23/mx28 only support the GF13. */ if (geo->gf_len == 14) return false; if (geo->ecc_strength > MXS_ECC_STRENGTH_MAX) return false; } else if (GPMI_IS_MX6Q(this)) { if (geo->ecc_strength > MX6_ECC_STRENGTH_MAX) return false; } return true; } int common_nfc_set_geometry(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct mtd_info *mtd = &this->mtd; unsigned int metadata_size; unsigned int status_size; unsigned int block_mark_bit_offset; /* * The size of the metadata can be changed, though we set it to 10 * bytes now. But it can't be too large, because we have to save * enough space for BCH. */ geo->metadata_size = 10; /* The default for the length of Galois Field. */ geo->gf_len = 13; /* The default for chunk size. */ geo->ecc_chunk_size = 512; while (geo->ecc_chunk_size < mtd->oobsize) { geo->ecc_chunk_size *= 2; /* keep C >= O */ geo->gf_len = 14; } geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunk_size; /* We use the same ECC strength for all chunks. */ geo->ecc_strength = get_ecc_strength(this); if (!gpmi_check_ecc(this)) { dev_err(this->dev, "We can not support this nand chip." " Its required ecc strength(%d) is beyond our" " capability(%d).\n", geo->ecc_strength, (GPMI_IS_MX6Q(this) ? MX6_ECC_STRENGTH_MAX : MXS_ECC_STRENGTH_MAX)); return -EINVAL; } geo->page_size = mtd->writesize + mtd->oobsize; geo->payload_size = mtd->writesize; /* * The auxiliary buffer contains the metadata and the ECC status. The * metadata is padded to the nearest 32-bit boundary. The ECC status * contains one byte for every ECC chunk, and is also padded to the * nearest 32-bit boundary. */ metadata_size = ALIGN(geo->metadata_size, 4); status_size = ALIGN(geo->ecc_chunk_count, 4); geo->auxiliary_size = metadata_size + status_size; geo->auxiliary_status_offset = metadata_size; if (!this->swap_block_mark) return 0; /* * We need to compute the byte and bit offsets of * the physical block mark within the ECC-based view of the page. * * NAND chip with 2K page shows below: * (Block Mark) * | | * | D | * |<---->| * V V * +---+----------+-+----------+-+----------+-+----------+-+ * | M | data |E| data |E| data |E| data |E| * +---+----------+-+----------+-+----------+-+----------+-+ * * The position of block mark moves forward in the ECC-based view * of page, and the delta is: * * E * G * (N - 1) * D = (---------------- + M) * 8 * * With the formula to compute the ECC strength, and the condition * : C >= O (C is the ecc chunk size) * * It's easy to deduce to the following result: * * E * G (O - M) C - M C - M * ----------- <= ------- <= -------- < --------- * 8 N N (N - 1) * * So, we get: * * E * G * (N - 1) * D = (---------------- + M) < C * 8 * * The above inequality means the position of block mark * within the ECC-based view of the page is still in the data chunk, * and it's NOT in the ECC bits of the chunk. * * Use the following to compute the bit position of the * physical block mark within the ECC-based view of the page: * (page_size - D) * 8 * * --Huang Shijie */ block_mark_bit_offset = mtd->writesize * 8 - (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) + geo->metadata_size * 8); geo->block_mark_byte_offset = block_mark_bit_offset / 8; geo->block_mark_bit_offset = block_mark_bit_offset % 8; return 0; } struct dma_chan *get_dma_chan(struct gpmi_nand_data *this) { int chipnr = this->current_chip; return this->dma_chans[chipnr]; } /* Can we use the upper's buffer directly for DMA? */ void prepare_data_dma(struct gpmi_nand_data *this, enum dma_data_direction dr) { struct scatterlist *sgl = &this->data_sgl; int ret; this->direct_dma_map_ok = true; /* first try to map the upper buffer directly */ sg_init_one(sgl, this->upper_buf, this->upper_len); ret = dma_map_sg(this->dev, sgl, 1, dr); if (ret == 0) { /* We have to use our own DMA buffer. */ sg_init_one(sgl, this->data_buffer_dma, PAGE_SIZE); if (dr == DMA_TO_DEVICE) memcpy(this->data_buffer_dma, this->upper_buf, this->upper_len); ret = dma_map_sg(this->dev, sgl, 1, dr); if (ret == 0) pr_err("DMA mapping failed.\n"); this->direct_dma_map_ok = false; } } /* This will be called after the DMA operation is finished. */ static void dma_irq_callback(void *param) { struct gpmi_nand_data *this = param; struct completion *dma_c = &this->dma_done; switch (this->dma_type) { case DMA_FOR_COMMAND: dma_unmap_sg(this->dev, &this->cmd_sgl, 1, DMA_TO_DEVICE); break; case DMA_FOR_READ_DATA: dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_FROM_DEVICE); if (this->direct_dma_map_ok == false) memcpy(this->upper_buf, this->data_buffer_dma, this->upper_len); break; case DMA_FOR_WRITE_DATA: dma_unmap_sg(this->dev, &this->data_sgl, 1, DMA_TO_DEVICE); break; case DMA_FOR_READ_ECC_PAGE: case DMA_FOR_WRITE_ECC_PAGE: /* We have to wait the BCH interrupt to finish. */ break; default: pr_err("in wrong DMA operation.\n"); } complete(dma_c); } int start_dma_without_bch_irq(struct gpmi_nand_data *this, struct dma_async_tx_descriptor *desc) { struct completion *dma_c = &this->dma_done; int err; init_completion(dma_c); desc->callback = dma_irq_callback; desc->callback_param = this; dmaengine_submit(desc); dma_async_issue_pending(get_dma_chan(this)); /* Wait for the interrupt from the DMA block. */ err = wait_for_completion_timeout(dma_c, msecs_to_jiffies(1000)); if (!err) { pr_err("DMA timeout, last DMA :%d\n", this->last_dma_type); gpmi_dump_info(this); return -ETIMEDOUT; } return 0; } /* * This function is used in BCH reading or BCH writing pages. * It will wait for the BCH interrupt as long as ONE second. * Actually, we must wait for two interrupts : * [1] firstly the DMA interrupt and * [2] secondly the BCH interrupt. */ int start_dma_with_bch_irq(struct gpmi_nand_data *this, struct dma_async_tx_descriptor *desc) { struct completion *bch_c = &this->bch_done; int err; /* Prepare to receive an interrupt from the BCH block. */ init_completion(bch_c); /* start the DMA */ start_dma_without_bch_irq(this, desc); /* Wait for the interrupt from the BCH block. */ err = wait_for_completion_timeout(bch_c, msecs_to_jiffies(1000)); if (!err) { pr_err("BCH timeout, last DMA :%d\n", this->last_dma_type); gpmi_dump_info(this); return -ETIMEDOUT; } return 0; } static int acquire_register_block(struct gpmi_nand_data *this, const char *res_name) { struct platform_device *pdev = this->pdev; struct resources *res = &this->resources; struct resource *r; void __iomem *p; r = platform_get_resource_byname(pdev, IORESOURCE_MEM, res_name); if (!r) { pr_err("Can't get resource for %s\n", res_name); return -ENXIO; } p = ioremap(r->start, resource_size(r)); if (!p) { pr_err("Can't remap %s\n", res_name); return -ENOMEM; } if (!strcmp(res_name, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME)) res->gpmi_regs = p; else if (!strcmp(res_name, GPMI_NAND_BCH_REGS_ADDR_RES_NAME)) res->bch_regs = p; else pr_err("unknown resource name : %s\n", res_name); return 0; } static void release_register_block(struct gpmi_nand_data *this) { struct resources *res = &this->resources; if (res->gpmi_regs) iounmap(res->gpmi_regs); if (res->bch_regs) iounmap(res->bch_regs); res->gpmi_regs = NULL; res->bch_regs = NULL; } static int acquire_bch_irq(struct gpmi_nand_data *this, irq_handler_t irq_h) { struct platform_device *pdev = this->pdev; struct resources *res = &this->resources; const char *res_name = GPMI_NAND_BCH_INTERRUPT_RES_NAME; struct resource *r; int err; r = platform_get_resource_byname(pdev, IORESOURCE_IRQ, res_name); if (!r) { pr_err("Can't get resource for %s\n", res_name); return -ENXIO; } err = request_irq(r->start, irq_h, 0, res_name, this); if (err) { pr_err("Can't own %s\n", res_name); return err; } res->bch_low_interrupt = r->start; res->bch_high_interrupt = r->end; return 0; } static void release_bch_irq(struct gpmi_nand_data *this) { struct resources *res = &this->resources; int i = res->bch_low_interrupt; for (; i <= res->bch_high_interrupt; i++) free_irq(i, this); } static void release_dma_channels(struct gpmi_nand_data *this) { unsigned int i; for (i = 0; i < DMA_CHANS; i++) if (this->dma_chans[i]) { dma_release_channel(this->dma_chans[i]); this->dma_chans[i] = NULL; } } static int acquire_dma_channels(struct gpmi_nand_data *this) { struct platform_device *pdev = this->pdev; struct dma_chan *dma_chan; /* request dma channel */ dma_chan = dma_request_slave_channel(&pdev->dev, "rx-tx"); if (!dma_chan) { pr_err("Failed to request DMA channel.\n"); goto acquire_err; } this->dma_chans[0] = dma_chan; return 0; acquire_err: release_dma_channels(this); return -EINVAL; } static void gpmi_put_clks(struct gpmi_nand_data *this) { struct resources *r = &this->resources; struct clk *clk; int i; for (i = 0; i < GPMI_CLK_MAX; i++) { clk = r->clock[i]; if (clk) { clk_put(clk); r->clock[i] = NULL; } } } static char *extra_clks_for_mx6q[GPMI_CLK_MAX] = { "gpmi_apb", "gpmi_bch", "gpmi_bch_apb", "per1_bch", }; static int gpmi_get_clks(struct gpmi_nand_data *this) { struct resources *r = &this->resources; char **extra_clks = NULL; struct clk *clk; int i; /* The main clock is stored in the first. */ r->clock[0] = clk_get(this->dev, "gpmi_io"); if (IS_ERR(r->clock[0])) goto err_clock; /* Get extra clocks */ if (GPMI_IS_MX6Q(this)) extra_clks = extra_clks_for_mx6q; if (!extra_clks) return 0; for (i = 1; i < GPMI_CLK_MAX; i++) { if (extra_clks[i - 1] == NULL) break; clk = clk_get(this->dev, extra_clks[i - 1]); if (IS_ERR(clk)) goto err_clock; r->clock[i] = clk; } if (GPMI_IS_MX6Q(this)) /* * Set the default value for the gpmi clock in mx6q: * * If you want to use the ONFI nand which is in the * Synchronous Mode, you should change the clock as you need. */ clk_set_rate(r->clock[0], 22000000); return 0; err_clock: dev_dbg(this->dev, "failed in finding the clocks.\n"); gpmi_put_clks(this); return -ENOMEM; } static int acquire_resources(struct gpmi_nand_data *this) { struct pinctrl *pinctrl; int ret; ret = acquire_register_block(this, GPMI_NAND_GPMI_REGS_ADDR_RES_NAME); if (ret) goto exit_regs; ret = acquire_register_block(this, GPMI_NAND_BCH_REGS_ADDR_RES_NAME); if (ret) goto exit_regs; ret = acquire_bch_irq(this, bch_irq); if (ret) goto exit_regs; ret = acquire_dma_channels(this); if (ret) goto exit_dma_channels; pinctrl = devm_pinctrl_get_select_default(&this->pdev->dev); if (IS_ERR(pinctrl)) { ret = PTR_ERR(pinctrl); goto exit_pin; } ret = gpmi_get_clks(this); if (ret) goto exit_clock; return 0; exit_clock: exit_pin: release_dma_channels(this); exit_dma_channels: release_bch_irq(this); exit_regs: release_register_block(this); return ret; } static void release_resources(struct gpmi_nand_data *this) { gpmi_put_clks(this); release_register_block(this); release_bch_irq(this); release_dma_channels(this); } static int init_hardware(struct gpmi_nand_data *this) { int ret; /* * This structure contains the "safe" GPMI timing that should succeed * with any NAND Flash device * (although, with less-than-optimal performance). */ struct nand_timing safe_timing = { .data_setup_in_ns = 80, .data_hold_in_ns = 60, .address_setup_in_ns = 25, .gpmi_sample_delay_in_ns = 6, .tREA_in_ns = -1, .tRLOH_in_ns = -1, .tRHOH_in_ns = -1, }; /* Initialize the hardwares. */ ret = gpmi_init(this); if (ret) return ret; this->timing = safe_timing; return 0; } static int read_page_prepare(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void **use_virt, dma_addr_t *use_phys) { struct device *dev = this->dev; if (virt_addr_valid(destination)) { dma_addr_t dest_phys; dest_phys = dma_map_single(dev, destination, length, DMA_FROM_DEVICE); if (dma_mapping_error(dev, dest_phys)) { if (alt_size < length) { pr_err("%s, Alternate buffer is too small\n", __func__); return -ENOMEM; } goto map_failed; } *use_virt = destination; *use_phys = dest_phys; this->direct_dma_map_ok = true; return 0; } map_failed: *use_virt = alt_virt; *use_phys = alt_phys; this->direct_dma_map_ok = false; return 0; } static inline void read_page_end(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void *used_virt, dma_addr_t used_phys) { if (this->direct_dma_map_ok) dma_unmap_single(this->dev, used_phys, length, DMA_FROM_DEVICE); } static inline void read_page_swap_end(struct gpmi_nand_data *this, void *destination, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, void *used_virt, dma_addr_t used_phys) { if (!this->direct_dma_map_ok) memcpy(destination, alt_virt, length); } static int send_page_prepare(struct gpmi_nand_data *this, const void *source, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, const void **use_virt, dma_addr_t *use_phys) { struct device *dev = this->dev; if (virt_addr_valid(source)) { dma_addr_t source_phys; source_phys = dma_map_single(dev, (void *)source, length, DMA_TO_DEVICE); if (dma_mapping_error(dev, source_phys)) { if (alt_size < length) { pr_err("%s, Alternate buffer is too small\n", __func__); return -ENOMEM; } goto map_failed; } *use_virt = source; *use_phys = source_phys; return 0; } map_failed: /* * Copy the content of the source buffer into the alternate * buffer and set up the return values accordingly. */ memcpy(alt_virt, source, length); *use_virt = alt_virt; *use_phys = alt_phys; return 0; } static void send_page_end(struct gpmi_nand_data *this, const void *source, unsigned length, void *alt_virt, dma_addr_t alt_phys, unsigned alt_size, const void *used_virt, dma_addr_t used_phys) { struct device *dev = this->dev; if (used_virt == source) dma_unmap_single(dev, used_phys, length, DMA_TO_DEVICE); } static void gpmi_free_dma_buffer(struct gpmi_nand_data *this) { struct device *dev = this->dev; if (this->page_buffer_virt && virt_addr_valid(this->page_buffer_virt)) dma_free_coherent(dev, this->page_buffer_size, this->page_buffer_virt, this->page_buffer_phys); kfree(this->cmd_buffer); kfree(this->data_buffer_dma); this->cmd_buffer = NULL; this->data_buffer_dma = NULL; this->page_buffer_virt = NULL; this->page_buffer_size = 0; } /* Allocate the DMA buffers */ static int gpmi_alloc_dma_buffer(struct gpmi_nand_data *this) { struct bch_geometry *geo = &this->bch_geometry; struct device *dev = this->dev; /* [1] Allocate a command buffer. PAGE_SIZE is enough. */ this->cmd_buffer = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); if (this->cmd_buffer == NULL) goto error_alloc; /* [2] Allocate a read/write data buffer. PAGE_SIZE is enough. */ this->data_buffer_dma = kzalloc(PAGE_SIZE, GFP_DMA | GFP_KERNEL); if (this->data_buffer_dma == NULL) goto error_alloc; /* * [3] Allocate the page buffer. * * Both the payload buffer and the auxiliary buffer must appear on * 32-bit boundaries. We presume the size of the payload buffer is a * power of two and is much larger than four, which guarantees the * auxiliary buffer will appear on a 32-bit boundary. */ this->page_buffer_size = geo->payload_size + geo->auxiliary_size; this->page_buffer_virt = dma_alloc_coherent(dev, this->page_buffer_size, &this->page_buffer_phys, GFP_DMA); if (!this->page_buffer_virt) goto error_alloc; /* Slice up the page buffer. */ this->payload_virt = this->page_buffer_virt; this->payload_phys = this->page_buffer_phys; this->auxiliary_virt = this->payload_virt + geo->payload_size; this->auxiliary_phys = this->payload_phys + geo->payload_size; return 0; error_alloc: gpmi_free_dma_buffer(this); pr_err("Error allocating DMA buffers!\n"); return -ENOMEM; } static void gpmi_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int ret; /* * Every operation begins with a command byte and a series of zero or * more address bytes. These are distinguished by either the Address * Latch Enable (ALE) or Command Latch Enable (CLE) signals being * asserted. When MTD is ready to execute the command, it will deassert * both latch enables. * * Rather than run a separate DMA operation for every single byte, we * queue them up and run a single DMA operation for the entire series * of command and data bytes. NAND_CMD_NONE means the END of the queue. */ if ((ctrl & (NAND_ALE | NAND_CLE))) { if (data != NAND_CMD_NONE) this->cmd_buffer[this->command_length++] = data; return; } if (!this->command_length) return; ret = gpmi_send_command(this); if (ret) pr_err("Chip: %u, Error %d\n", this->current_chip, ret); this->command_length = 0; } static int gpmi_dev_ready(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; return gpmi_is_ready(this, this->current_chip); } static void gpmi_select_chip(struct mtd_info *mtd, int chipnr) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; if ((this->current_chip < 0) && (chipnr >= 0)) gpmi_begin(this); else if ((this->current_chip >= 0) && (chipnr < 0)) gpmi_end(this); this->current_chip = chipnr; } static void gpmi_read_buf(struct mtd_info *mtd, uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; pr_debug("len is %d\n", len); this->upper_buf = buf; this->upper_len = len; gpmi_read_data(this); } static void gpmi_write_buf(struct mtd_info *mtd, const uint8_t *buf, int len) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; pr_debug("len is %d\n", len); this->upper_buf = (uint8_t *)buf; this->upper_len = len; gpmi_send_data(this); } static uint8_t gpmi_read_byte(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; uint8_t *buf = this->data_buffer_dma; gpmi_read_buf(mtd, buf, 1); return buf[0]; } /* * Handles block mark swapping. * It can be called in swapping the block mark, or swapping it back, * because the the operations are the same. */ static void block_mark_swapping(struct gpmi_nand_data *this, void *payload, void *auxiliary) { struct bch_geometry *nfc_geo = &this->bch_geometry; unsigned char *p; unsigned char *a; unsigned int bit; unsigned char mask; unsigned char from_data; unsigned char from_oob; if (!this->swap_block_mark) return; /* * If control arrives here, we're swapping. Make some convenience * variables. */ bit = nfc_geo->block_mark_bit_offset; p = payload + nfc_geo->block_mark_byte_offset; a = auxiliary; /* * Get the byte from the data area that overlays the block mark. Since * the ECC engine applies its own view to the bits in the page, the * physical block mark won't (in general) appear on a byte boundary in * the data. */ from_data = (p[0] >> bit) | (p[1] << (8 - bit)); /* Get the byte from the OOB. */ from_oob = a[0]; /* Swap them. */ a[0] = from_data; mask = (0x1 << bit) - 1; p[0] = (p[0] & mask) | (from_oob << bit); mask = ~0 << bit; p[1] = (p[1] & mask) | (from_oob >> (8 - bit)); } static int gpmi_ecc_read_page(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int oob_required, int page) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; void *payload_virt; dma_addr_t payload_phys; void *auxiliary_virt; dma_addr_t auxiliary_phys; unsigned int i; unsigned char *status; unsigned int max_bitflips = 0; int ret; pr_debug("page number is : %d\n", page); ret = read_page_prepare(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, &payload_virt, &payload_phys); if (ret) { pr_err("Inadequate DMA buffer\n"); ret = -ENOMEM; return ret; } auxiliary_virt = this->auxiliary_virt; auxiliary_phys = this->auxiliary_phys; /* go! */ ret = gpmi_read_page(this, payload_phys, auxiliary_phys); read_page_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); if (ret) { pr_err("Error in ECC-based read: %d\n", ret); return ret; } /* handle the block mark swapping */ block_mark_swapping(this, payload_virt, auxiliary_virt); /* Loop over status bytes, accumulating ECC status. */ status = auxiliary_virt + nfc_geo->auxiliary_status_offset; for (i = 0; i < nfc_geo->ecc_chunk_count; i++, status++) { if ((*status == STATUS_GOOD) || (*status == STATUS_ERASED)) continue; if (*status == STATUS_UNCORRECTABLE) { mtd->ecc_stats.failed++; continue; } mtd->ecc_stats.corrected += *status; max_bitflips = max_t(unsigned int, max_bitflips, *status); } if (oob_required) { /* * It's time to deliver the OOB bytes. See gpmi_ecc_read_oob() * for details about our policy for delivering the OOB. * * We fill the caller's buffer with set bits, and then copy the * block mark to th caller's buffer. Note that, if block mark * swapping was necessary, it has already been done, so we can * rely on the first byte of the auxiliary buffer to contain * the block mark. */ memset(chip->oob_poi, ~0, mtd->oobsize); chip->oob_poi[0] = ((uint8_t *) auxiliary_virt)[0]; } read_page_swap_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); return max_bitflips; } static int gpmi_ecc_write_page(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf, int oob_required) { struct gpmi_nand_data *this = chip->priv; struct bch_geometry *nfc_geo = &this->bch_geometry; const void *payload_virt; dma_addr_t payload_phys; const void *auxiliary_virt; dma_addr_t auxiliary_phys; int ret; pr_debug("ecc write page.\n"); if (this->swap_block_mark) { /* * If control arrives here, we're doing block mark swapping. * Since we can't modify the caller's buffers, we must copy them * into our own. */ memcpy(this->payload_virt, buf, mtd->writesize); payload_virt = this->payload_virt; payload_phys = this->payload_phys; memcpy(this->auxiliary_virt, chip->oob_poi, nfc_geo->auxiliary_size); auxiliary_virt = this->auxiliary_virt; auxiliary_phys = this->auxiliary_phys; /* Handle block mark swapping. */ block_mark_swapping(this, (void *) payload_virt, (void *) auxiliary_virt); } else { /* * If control arrives here, we're not doing block mark swapping, * so we can to try and use the caller's buffers. */ ret = send_page_prepare(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, &payload_virt, &payload_phys); if (ret) { pr_err("Inadequate payload DMA buffer\n"); return 0; } ret = send_page_prepare(this, chip->oob_poi, mtd->oobsize, this->auxiliary_virt, this->auxiliary_phys, nfc_geo->auxiliary_size, &auxiliary_virt, &auxiliary_phys); if (ret) { pr_err("Inadequate auxiliary DMA buffer\n"); goto exit_auxiliary; } } /* Ask the NFC. */ ret = gpmi_send_page(this, payload_phys, auxiliary_phys); if (ret) pr_err("Error in ECC-based write: %d\n", ret); if (!this->swap_block_mark) { send_page_end(this, chip->oob_poi, mtd->oobsize, this->auxiliary_virt, this->auxiliary_phys, nfc_geo->auxiliary_size, auxiliary_virt, auxiliary_phys); exit_auxiliary: send_page_end(this, buf, mtd->writesize, this->payload_virt, this->payload_phys, nfc_geo->payload_size, payload_virt, payload_phys); } return 0; } /* * There are several places in this driver where we have to handle the OOB and * block marks. This is the function where things are the most complicated, so * this is where we try to explain it all. All the other places refer back to * here. * * These are the rules, in order of decreasing importance: * * 1) Nothing the caller does can be allowed to imperil the block mark. * * 2) In read operations, the first byte of the OOB we return must reflect the * true state of the block mark, no matter where that block mark appears in * the physical page. * * 3) ECC-based read operations return an OOB full of set bits (since we never * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads * return). * * 4) "Raw" read operations return a direct view of the physical bytes in the * page, using the conventional definition of which bytes are data and which * are OOB. This gives the caller a way to see the actual, physical bytes * in the page, without the distortions applied by our ECC engine. * * * What we do for this specific read operation depends on two questions: * * 1) Are we doing a "raw" read, or an ECC-based read? * * 2) Are we using block mark swapping or transcription? * * There are four cases, illustrated by the following Karnaugh map: * * | Raw | ECC-based | * -------------+-------------------------+-------------------------+ * | Read the conventional | | * | OOB at the end of the | | * Swapping | page and return it. It | | * | contains exactly what | | * | we want. | Read the block mark and | * -------------+-------------------------+ return it in a buffer | * | Read the conventional | full of set bits. | * | OOB at the end of the | | * | page and also the block | | * Transcribing | mark in the metadata. | | * | Copy the block mark | | * | into the first byte of | | * | the OOB. | | * -------------+-------------------------+-------------------------+ * * Note that we break rule #4 in the Transcribing/Raw case because we're not * giving an accurate view of the actual, physical bytes in the page (we're * overwriting the block mark). That's OK because it's more important to follow * rule #2. * * It turns out that knowing whether we want an "ECC-based" or "raw" read is not * easy. When reading a page, for example, the NAND Flash MTD code calls our * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an * ECC-based or raw view of the page is implicit in which function it calls * (there is a similar pair of ECC-based/raw functions for writing). * * FIXME: The following paragraph is incorrect, now that there exist * ecc.read_oob_raw and ecc.write_oob_raw functions. * * Since MTD assumes the OOB is not covered by ECC, there is no pair of * ECC-based/raw functions for reading or or writing the OOB. The fact that the * caller wants an ECC-based or raw view of the page is not propagated down to * this driver. */ static int gpmi_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { struct gpmi_nand_data *this = chip->priv; pr_debug("page number is %d\n", page); /* clear the OOB buffer */ memset(chip->oob_poi, ~0, mtd->oobsize); /* Read out the conventional OOB. */ chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); chip->read_buf(mtd, chip->oob_poi, mtd->oobsize); /* * Now, we want to make sure the block mark is correct. In the * Swapping/Raw case, we already have it. Otherwise, we need to * explicitly read it. */ if (!this->swap_block_mark) { /* Read the block mark into the first byte of the OOB buffer. */ chip->cmdfunc(mtd, NAND_CMD_READ0, 0, page); chip->oob_poi[0] = chip->read_byte(mtd); } return 0; } static int gpmi_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, int page) { /* * The BCH will use all the (page + oob). * Our gpmi_hw_ecclayout can only prohibit the JFFS2 to write the oob. * But it can not stop some ioctls such MEMWRITEOOB which uses * MTD_OPS_PLACE_OOB. So We have to implement this function to prohibit * these ioctls too. */ return -EPERM; } static int gpmi_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int block, ret = 0; uint8_t *block_mark; int column, page, status, chipnr; /* Get block number */ block = (int)(ofs >> chip->bbt_erase_shift); if (chip->bbt) chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); /* Do we have a flash based bad block table ? */ if (chip->bbt_options & NAND_BBT_USE_FLASH) ret = nand_update_bbt(mtd, ofs); else { chipnr = (int)(ofs >> chip->chip_shift); chip->select_chip(mtd, chipnr); column = this->swap_block_mark ? mtd->writesize : 0; /* Write the block mark. */ block_mark = this->data_buffer_dma; block_mark[0] = 0; /* bad block marker */ /* Shift to get page */ page = (int)(ofs >> chip->page_shift); chip->cmdfunc(mtd, NAND_CMD_SEQIN, column, page); chip->write_buf(mtd, block_mark, 1); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) ret = -EIO; chip->select_chip(mtd, -1); } if (!ret) mtd->ecc_stats.badblocks++; return ret; } static int nand_boot_set_geometry(struct gpmi_nand_data *this) { struct boot_rom_geometry *geometry = &this->rom_geometry; /* * Set the boot block stride size. * * In principle, we should be reading this from the OTP bits, since * that's where the ROM is going to get it. In fact, we don't have any * way to read the OTP bits, so we go with the default and hope for the * best. */ geometry->stride_size_in_pages = 64; /* * Set the search area stride exponent. * * In principle, we should be reading this from the OTP bits, since * that's where the ROM is going to get it. In fact, we don't have any * way to read the OTP bits, so we go with the default and hope for the * best. */ geometry->search_area_stride_exponent = 2; return 0; } static const char *fingerprint = "STMP"; static int mx23_check_transcription_stamp(struct gpmi_nand_data *this) { struct boot_rom_geometry *rom_geo = &this->rom_geometry; struct device *dev = this->dev; struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; unsigned int search_area_size_in_strides; unsigned int stride; unsigned int page; uint8_t *buffer = chip->buffers->databuf; int saved_chip_number; int found_an_ncb_fingerprint = false; /* Compute the number of strides in a search area. */ search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; saved_chip_number = this->current_chip; chip->select_chip(mtd, 0); /* * Loop through the first search area, looking for the NCB fingerprint. */ dev_dbg(dev, "Scanning for an NCB fingerprint...\n"); for (stride = 0; stride < search_area_size_in_strides; stride++) { /* Compute the page addresses. */ page = stride * rom_geo->stride_size_in_pages; dev_dbg(dev, "Looking for a fingerprint in page 0x%x\n", page); /* * Read the NCB fingerprint. The fingerprint is four bytes long * and starts in the 12th byte of the page. */ chip->cmdfunc(mtd, NAND_CMD_READ0, 12, page); chip->read_buf(mtd, buffer, strlen(fingerprint)); /* Look for the fingerprint. */ if (!memcmp(buffer, fingerprint, strlen(fingerprint))) { found_an_ncb_fingerprint = true; break; } } chip->select_chip(mtd, saved_chip_number); if (found_an_ncb_fingerprint) dev_dbg(dev, "\tFound a fingerprint\n"); else dev_dbg(dev, "\tNo fingerprint found\n"); return found_an_ncb_fingerprint; } /* Writes a transcription stamp. */ static int mx23_write_transcription_stamp(struct gpmi_nand_data *this) { struct device *dev = this->dev; struct boot_rom_geometry *rom_geo = &this->rom_geometry; struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; unsigned int block_size_in_pages; unsigned int search_area_size_in_strides; unsigned int search_area_size_in_pages; unsigned int search_area_size_in_blocks; unsigned int block; unsigned int stride; unsigned int page; uint8_t *buffer = chip->buffers->databuf; int saved_chip_number; int status; /* Compute the search area geometry. */ block_size_in_pages = mtd->erasesize / mtd->writesize; search_area_size_in_strides = 1 << rom_geo->search_area_stride_exponent; search_area_size_in_pages = search_area_size_in_strides * rom_geo->stride_size_in_pages; search_area_size_in_blocks = (search_area_size_in_pages + (block_size_in_pages - 1)) / block_size_in_pages; dev_dbg(dev, "Search Area Geometry :\n"); dev_dbg(dev, "\tin Blocks : %u\n", search_area_size_in_blocks); dev_dbg(dev, "\tin Strides: %u\n", search_area_size_in_strides); dev_dbg(dev, "\tin Pages : %u\n", search_area_size_in_pages); /* Select chip 0. */ saved_chip_number = this->current_chip; chip->select_chip(mtd, 0); /* Loop over blocks in the first search area, erasing them. */ dev_dbg(dev, "Erasing the search area...\n"); for (block = 0; block < search_area_size_in_blocks; block++) { /* Compute the page address. */ page = block * block_size_in_pages; /* Erase this block. */ dev_dbg(dev, "\tErasing block 0x%x\n", block); chip->cmdfunc(mtd, NAND_CMD_ERASE1, -1, page); chip->cmdfunc(mtd, NAND_CMD_ERASE2, -1, -1); /* Wait for the erase to finish. */ status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) dev_err(dev, "[%s] Erase failed.\n", __func__); } /* Write the NCB fingerprint into the page buffer. */ memset(buffer, ~0, mtd->writesize); memset(chip->oob_poi, ~0, mtd->oobsize); memcpy(buffer + 12, fingerprint, strlen(fingerprint)); /* Loop through the first search area, writing NCB fingerprints. */ dev_dbg(dev, "Writing NCB fingerprints...\n"); for (stride = 0; stride < search_area_size_in_strides; stride++) { /* Compute the page addresses. */ page = stride * rom_geo->stride_size_in_pages; /* Write the first page of the current stride. */ dev_dbg(dev, "Writing an NCB fingerprint in page 0x%x\n", page); chip->cmdfunc(mtd, NAND_CMD_SEQIN, 0x00, page); chip->ecc.write_page_raw(mtd, chip, buffer, 0); chip->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); /* Wait for the write to finish. */ status = chip->waitfunc(mtd, chip); if (status & NAND_STATUS_FAIL) dev_err(dev, "[%s] Write failed.\n", __func__); } /* Deselect chip 0. */ chip->select_chip(mtd, saved_chip_number); return 0; } static int mx23_boot_init(struct gpmi_nand_data *this) { struct device *dev = this->dev; struct nand_chip *chip = &this->nand; struct mtd_info *mtd = &this->mtd; unsigned int block_count; unsigned int block; int chipnr; int page; loff_t byte; uint8_t block_mark; int ret = 0; /* * If control arrives here, we can't use block mark swapping, which * means we're forced to use transcription. First, scan for the * transcription stamp. If we find it, then we don't have to do * anything -- the block marks are already transcribed. */ if (mx23_check_transcription_stamp(this)) return 0; /* * If control arrives here, we couldn't find a transcription stamp, so * so we presume the block marks are in the conventional location. */ dev_dbg(dev, "Transcribing bad block marks...\n"); /* Compute the number of blocks in the entire medium. */ block_count = chip->chipsize >> chip->phys_erase_shift; /* * Loop over all the blocks in the medium, transcribing block marks as * we go. */ for (block = 0; block < block_count; block++) { /* * Compute the chip, page and byte addresses for this block's * conventional mark. */ chipnr = block >> (chip->chip_shift - chip->phys_erase_shift); page = block << (chip->phys_erase_shift - chip->page_shift); byte = block << chip->phys_erase_shift; /* Send the command to read the conventional block mark. */ chip->select_chip(mtd, chipnr); chip->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); block_mark = chip->read_byte(mtd); chip->select_chip(mtd, -1); /* * Check if the block is marked bad. If so, we need to mark it * again, but this time the result will be a mark in the * location where we transcribe block marks. */ if (block_mark != 0xff) { dev_dbg(dev, "Transcribing mark in block %u\n", block); ret = chip->block_markbad(mtd, byte); if (ret) dev_err(dev, "Failed to mark block bad with " "ret %d\n", ret); } } /* Write the stamp that indicates we've transcribed the block marks. */ mx23_write_transcription_stamp(this); return 0; } static int nand_boot_init(struct gpmi_nand_data *this) { nand_boot_set_geometry(this); /* This is ROM arch-specific initilization before the BBT scanning. */ if (GPMI_IS_MX23(this)) return mx23_boot_init(this); return 0; } static int gpmi_set_geometry(struct gpmi_nand_data *this) { int ret; /* Free the temporary DMA memory for reading ID. */ gpmi_free_dma_buffer(this); /* Set up the NFC geometry which is used by BCH. */ ret = bch_set_geometry(this); if (ret) { pr_err("Error setting BCH geometry : %d\n", ret); return ret; } /* Alloc the new DMA buffers according to the pagesize and oobsize */ return gpmi_alloc_dma_buffer(this); } static int gpmi_pre_bbt_scan(struct gpmi_nand_data *this) { int ret; /* Set up swap_block_mark, must be set before the gpmi_set_geometry() */ if (GPMI_IS_MX23(this)) this->swap_block_mark = false; else this->swap_block_mark = true; /* Set up the medium geometry */ ret = gpmi_set_geometry(this); if (ret) return ret; /* Adjust the ECC strength according to the chip. */ this->nand.ecc.strength = this->bch_geometry.ecc_strength; this->mtd.ecc_strength = this->bch_geometry.ecc_strength; this->mtd.bitflip_threshold = this->bch_geometry.ecc_strength; /* NAND boot init, depends on the gpmi_set_geometry(). */ return nand_boot_init(this); } static int gpmi_scan_bbt(struct mtd_info *mtd) { struct nand_chip *chip = mtd->priv; struct gpmi_nand_data *this = chip->priv; int ret; /* Prepare for the BBT scan. */ ret = gpmi_pre_bbt_scan(this); if (ret) return ret; /* * Can we enable the extra features? such as EDO or Sync mode. * * We do not check the return value now. That's means if we fail in * enable the extra features, we still can run in the normal way. */ gpmi_extra_init(this); /* use the default BBT implementation */ return nand_default_bbt(mtd); } static void gpmi_nfc_exit(struct gpmi_nand_data *this) { nand_release(&this->mtd); gpmi_free_dma_buffer(this); } static int gpmi_nfc_init(struct gpmi_nand_data *this) { struct mtd_info *mtd = &this->mtd; struct nand_chip *chip = &this->nand; struct mtd_part_parser_data ppdata = {}; int ret; /* init current chip */ this->current_chip = -1; /* init the MTD data structures */ mtd->priv = chip; mtd->name = "gpmi-nand"; mtd->owner = THIS_MODULE; /* init the nand_chip{}, we don't support a 16-bit NAND Flash bus. */ chip->priv = this; chip->select_chip = gpmi_select_chip; chip->cmd_ctrl = gpmi_cmd_ctrl; chip->dev_ready = gpmi_dev_ready; chip->read_byte = gpmi_read_byte; chip->read_buf = gpmi_read_buf; chip->write_buf = gpmi_write_buf; chip->ecc.read_page = gpmi_ecc_read_page; chip->ecc.write_page = gpmi_ecc_write_page; chip->ecc.read_oob = gpmi_ecc_read_oob; chip->ecc.write_oob = gpmi_ecc_write_oob; chip->scan_bbt = gpmi_scan_bbt; chip->badblock_pattern = &gpmi_bbt_descr; chip->block_markbad = gpmi_block_markbad; chip->options |= NAND_NO_SUBPAGE_WRITE; chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 1; chip->ecc.strength = 8; chip->ecc.layout = &gpmi_hw_ecclayout; if (of_get_nand_on_flash_bbt(this->dev->of_node)) chip->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; /* Allocate a temporary DMA buffer for reading ID in the nand_scan() */ this->bch_geometry.payload_size = 1024; this->bch_geometry.auxiliary_size = 128; ret = gpmi_alloc_dma_buffer(this); if (ret) goto err_out; ret = nand_scan(mtd, 1); if (ret) { pr_err("Chip scan failed\n"); goto err_out; } ppdata.of_node = this->pdev->dev.of_node; ret = mtd_device_parse_register(mtd, NULL, &ppdata, NULL, 0); if (ret) goto err_out; return 0; err_out: gpmi_nfc_exit(this); return ret; } static const struct platform_device_id gpmi_ids[] = { { .name = "imx23-gpmi-nand", .driver_data = IS_MX23, }, { .name = "imx28-gpmi-nand", .driver_data = IS_MX28, }, { .name = "imx6q-gpmi-nand", .driver_data = IS_MX6Q, }, {}, }; static const struct of_device_id gpmi_nand_id_table[] = { { .compatible = "fsl,imx23-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX23] }, { .compatible = "fsl,imx28-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX28] }, { .compatible = "fsl,imx6q-gpmi-nand", .data = (void *)&gpmi_ids[IS_MX6Q] }, {} }; MODULE_DEVICE_TABLE(of, gpmi_nand_id_table); static int gpmi_nand_probe(struct platform_device *pdev) { struct gpmi_nand_data *this; const struct of_device_id *of_id; int ret; of_id = of_match_device(gpmi_nand_id_table, &pdev->dev); if (of_id) { pdev->id_entry = of_id->data; } else { pr_err("Failed to find the right device id.\n"); return -ENOMEM; } this = kzalloc(sizeof(*this), GFP_KERNEL); if (!this) { pr_err("Failed to allocate per-device memory\n"); return -ENOMEM; } platform_set_drvdata(pdev, this); this->pdev = pdev; this->dev = &pdev->dev; ret = acquire_resources(this); if (ret) goto exit_acquire_resources; ret = init_hardware(this); if (ret) goto exit_nfc_init; ret = gpmi_nfc_init(this); if (ret) goto exit_nfc_init; dev_info(this->dev, "driver registered.\n"); return 0; exit_nfc_init: release_resources(this); exit_acquire_resources: platform_set_drvdata(pdev, NULL); dev_err(this->dev, "driver registration failed: %d\n", ret); kfree(this); return ret; } static int gpmi_nand_remove(struct platform_device *pdev) { struct gpmi_nand_data *this = platform_get_drvdata(pdev); gpmi_nfc_exit(this); release_resources(this); platform_set_drvdata(pdev, NULL); kfree(this); return 0; } static struct platform_driver gpmi_nand_driver = { .driver = { .name = "gpmi-nand", .of_match_table = gpmi_nand_id_table, }, .probe = gpmi_nand_probe, .remove = gpmi_nand_remove, .id_table = gpmi_ids, }; module_platform_driver(gpmi_nand_driver); MODULE_AUTHOR("Freescale Semiconductor, Inc."); MODULE_DESCRIPTION("i.MX GPMI NAND Flash Controller Driver"); MODULE_LICENSE("GPL");
gpl-2.0
omegamoon/rockchip-rk3188-mk908
drivers/usb/gadget/fusb300_udc.c
2507
42556
/* * Fusb300 UDC (USB gadget) * * Copyright (C) 2010 Faraday Technology Corp. * * Author : Yuan-hsin Chen <yhchen@faraday-tech.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include "fusb300_udc.h" MODULE_DESCRIPTION("FUSB300 USB gadget driver"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Yuan Hsin Chen <yhchen@faraday-tech.com>"); MODULE_ALIAS("platform:fusb300_udc"); #define DRIVER_VERSION "20 October 2010" static const char udc_name[] = "fusb300_udc"; static const char * const fusb300_ep_name[] = { "ep0", "ep1", "ep2", "ep3", "ep4", "ep5", "ep6", "ep7", "ep8", "ep9", "ep10", "ep11", "ep12", "ep13", "ep14", "ep15" }; static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status); static void fusb300_enable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg |= value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_disable_bit(struct fusb300 *fusb300, u32 offset, u32 value) { u32 reg = ioread32(fusb300->reg + offset); reg &= ~value; iowrite32(reg, fusb300->reg + offset); } static void fusb300_ep_setting(struct fusb300_ep *ep, struct fusb300_ep_info info) { ep->epnum = info.epnum; ep->type = info.type; } static int fusb300_ep_release(struct fusb300_ep *ep) { if (!ep->epnum) return 0; ep->epnum = 0; ep->stall = 0; ep->wedged = 0; return 0; } static void fusb300_set_fifo_entry(struct fusb300 *fusb300, u32 ep) { u32 val = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); val &= ~FUSB300_EPSET1_FIFOENTRY_MSK; val |= FUSB300_EPSET1_FIFOENTRY(FUSB300_FIFO_ENTRY_NUM); iowrite32(val, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_start_entry(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); u32 start_entry = fusb300->fifo_entry_num * FUSB300_FIFO_ENTRY_NUM; reg &= ~FUSB300_EPSET1_START_ENTRY_MSK ; reg |= FUSB300_EPSET1_START_ENTRY(start_entry); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); if (fusb300->fifo_entry_num == FUSB300_MAX_FIFO_ENTRY) { fusb300->fifo_entry_num = 0; fusb300->addrofs = 0; pr_err("fifo entry is over the maximum number!\n"); } else fusb300->fifo_entry_num++; } /* set fusb300_set_start_entry first before fusb300_set_epaddrofs */ static void fusb300_set_epaddrofs(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_ADDROFS_MSK; reg |= FUSB300_EPSET2_ADDROFS(fusb300->addrofs); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); fusb300->addrofs += (info.maxpacket + 7) / 8 * FUSB300_FIFO_ENTRY_NUM; } static void ep_fifo_setting(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_fifo_entry(fusb300, info.epnum); fusb300_set_start_entry(fusb300, info.epnum); fusb300_set_epaddrofs(fusb300, info); } static void fusb300_set_eptype(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_TYPE_MSK; reg |= FUSB300_EPSET1_TYPE(info.type); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_epdir(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg; if (!info.dir_in) return; reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_DIR_MSK; reg |= FUSB300_EPSET1_DIRIN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_ep_active(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); reg |= FUSB300_EPSET1_ACTEN; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(ep)); } static void fusb300_set_epmps(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); reg &= ~FUSB300_EPSET2_MPS_MSK; reg |= FUSB300_EPSET2_MPS(info.maxpacket); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET2(info.epnum)); } static void fusb300_set_interval(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_INTERVAL(0x7); reg |= FUSB300_EPSET1_INTERVAL(info.interval); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void fusb300_set_bwnum(struct fusb300 *fusb300, struct fusb300_ep_info info) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); reg &= ~FUSB300_EPSET1_BWNUM(0x3); reg |= FUSB300_EPSET1_BWNUM(info.bw_num); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET1(info.epnum)); } static void set_ep_reg(struct fusb300 *fusb300, struct fusb300_ep_info info) { fusb300_set_eptype(fusb300, info); fusb300_set_epdir(fusb300, info); fusb300_set_epmps(fusb300, info); if (info.interval) fusb300_set_interval(fusb300, info); if (info.bw_num) fusb300_set_bwnum(fusb300, info); fusb300_set_ep_active(fusb300, info.epnum); } static int config_ep(struct fusb300_ep *ep, const struct usb_endpoint_descriptor *desc) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_ep_info info; ep->desc = desc; info.interval = 0; info.addrofs = 0; info.bw_num = 0; info.type = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; info.dir_in = (desc->bEndpointAddress & USB_ENDPOINT_DIR_MASK) ? 1 : 0; info.maxpacket = le16_to_cpu(desc->wMaxPacketSize); info.epnum = desc->bEndpointAddress & USB_ENDPOINT_NUMBER_MASK; if ((info.type == USB_ENDPOINT_XFER_INT) || (info.type == USB_ENDPOINT_XFER_ISOC)) { info.interval = desc->bInterval; if (info.type == USB_ENDPOINT_XFER_ISOC) info.bw_num = ((desc->wMaxPacketSize & 0x1800) >> 11); } ep_fifo_setting(fusb300, info); set_ep_reg(fusb300, info); fusb300_ep_setting(ep, info); fusb300->ep[info.epnum] = ep; return 0; } static int fusb300_enable(struct usb_ep *_ep, const struct usb_endpoint_descriptor *desc) { struct fusb300_ep *ep; ep = container_of(_ep, struct fusb300_ep, ep); if (ep->fusb300->reenum) { ep->fusb300->fifo_entry_num = 0; ep->fusb300->addrofs = 0; ep->fusb300->reenum = 0; } return config_ep(ep, desc); } static int fusb300_disable(struct usb_ep *_ep) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); BUG_ON(!ep); while (!list_empty(&ep->queue)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); spin_lock_irqsave(&ep->fusb300->lock, flags); done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); } return fusb300_ep_release(ep); } static struct usb_request *fusb300_alloc_request(struct usb_ep *_ep, gfp_t gfp_flags) { struct fusb300_request *req; req = kzalloc(sizeof(struct fusb300_request), gfp_flags); if (!req) return NULL; INIT_LIST_HEAD(&req->queue); return &req->req; } static void fusb300_free_request(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_request *req; req = container_of(_req, struct fusb300_request, req); kfree(req); } static int enable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't enable_fifo_int ep0\n"); return -EINVAL; } return 0; } static int disable_fifo_int(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; if (ep->epnum) { fusb300_disable_bit(fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_FIFO_INT(ep->epnum)); } else { pr_err("can't disable_fifo_int ep0\n"); return -EINVAL; } return 0; } static void fusb300_set_cxlen(struct fusb300 *fusb300, u32 length) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); reg &= ~FUSB300_CSR_LEN_MSK; reg |= FUSB300_CSR_LEN(length); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_CSR); } /* write data to cx fifo */ static void fusb300_wrcxf(struct fusb300_ep *ep, struct fusb300_request *req) { int i = 0; u8 *tmp; u32 data; struct fusb300 *fusb300 = ep->fusb300; u32 length = req->req.length - req->req.actual; tmp = req->req.buf + req->req.actual; if (length > SS_CTL_MAX_PACKET_SIZE) { fusb300_set_cxlen(fusb300, SS_CTL_MAX_PACKET_SIZE); for (i = (SS_CTL_MAX_PACKET_SIZE >> 2); i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp += 4; } req->req.actual += SS_CTL_MAX_PACKET_SIZE; } else { /* length is less than max packet size */ fusb300_set_cxlen(fusb300, length); for (i = length >> 2; i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); tmp = tmp + 4; } switch (length % 4) { case 1: data = *tmp; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 2: data = *tmp | *(tmp + 1) << 8; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; case 3: data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; printk(KERN_DEBUG " 0x%x\n", data); iowrite32(data, fusb300->reg + FUSB300_OFFSET_CXPORT); break; default: break; } req->req.actual += length; } } static void fusb300_set_epnstall(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_STL); } static void fusb300_clear_epnstall(struct fusb300 *fusb300, u8 ep) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); if (reg & FUSB300_EPSET0_STL) { printk(KERN_DEBUG "EP%d stall... Clear!!\n", ep); reg &= ~FUSB300_EPSET0_STL; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); } } static void ep0_queue(struct fusb300_ep *ep, struct fusb300_request *req) { if (ep->fusb300->ep0_dir) { /* if IN */ if (req->req.length) { fusb300_wrcxf(ep, req); } else printk(KERN_DEBUG "%s : req->req.length = 0x%x\n", __func__, req->req.length); if ((req->req.length == req->req.actual) || (req->req.actual < ep->ep.maxpacket)) done(ep, req, 0); } else { /* OUT */ if (!req->req.length) done(ep, req, 0); else fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER1, FUSB300_IGER1_CX_OUT_INT); } } static int fusb300_queue(struct usb_ep *_ep, struct usb_request *_req, gfp_t gfp_flags) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; int request = 0; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) return -ESHUTDOWN; spin_lock_irqsave(&ep->fusb300->lock, flags); if (list_empty(&ep->queue)) request = 1; list_add_tail(&req->queue, &ep->queue); req->req.actual = 0; req->req.status = -EINPROGRESS; if (ep->desc == NULL) /* ep0 */ ep0_queue(ep, req); else if (request && !ep->stall) enable_fifo_int(ep); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct fusb300_ep *ep; struct fusb300_request *req; unsigned long flags; ep = container_of(_ep, struct fusb300_ep, ep); req = container_of(_req, struct fusb300_request, req); spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) done(ep, req, -ECONNRESET); spin_unlock_irqrestore(&ep->fusb300->lock, flags); return 0; } static int fusb300_set_halt_and_wedge(struct usb_ep *_ep, int value, int wedge) { struct fusb300_ep *ep; struct fusb300 *fusb300; unsigned long flags; int ret = 0; ep = container_of(_ep, struct fusb300_ep, ep); fusb300 = ep->fusb300; spin_lock_irqsave(&ep->fusb300->lock, flags); if (!list_empty(&ep->queue)) { ret = -EAGAIN; goto out; } if (value) { fusb300_set_epnstall(fusb300, ep->epnum); ep->stall = 1; if (wedge) ep->wedged = 1; } else { fusb300_clear_epnstall(fusb300, ep->epnum); ep->stall = 0; ep->wedged = 0; } out: spin_unlock_irqrestore(&ep->fusb300->lock, flags); return ret; } static int fusb300_set_halt(struct usb_ep *_ep, int value) { return fusb300_set_halt_and_wedge(_ep, value, 0); } static int fusb300_set_wedge(struct usb_ep *_ep) { return fusb300_set_halt_and_wedge(_ep, 1, 1); } static void fusb300_fifo_flush(struct usb_ep *_ep) { } static struct usb_ep_ops fusb300_ep_ops = { .enable = fusb300_enable, .disable = fusb300_disable, .alloc_request = fusb300_alloc_request, .free_request = fusb300_free_request, .queue = fusb300_queue, .dequeue = fusb300_dequeue, .set_halt = fusb300_set_halt, .fifo_flush = fusb300_fifo_flush, .set_wedge = fusb300_set_wedge, }; /*****************************************************************************/ static void fusb300_clear_int(struct fusb300 *fusb300, u32 offset, u32 value) { iowrite32(value, fusb300->reg + offset); } static void fusb300_reset(void) { } static void fusb300_set_cxstall(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_STL); } static void fusb300_set_cxdone(struct fusb300 *fusb300) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_CSR, FUSB300_CSR_DONE); } /* read data from cx fifo */ void fusb300_rdcxf(struct fusb300 *fusb300, u8 *buffer, u32 length) { int i = 0; u8 *tmp; u32 data; tmp = buffer; for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_CXPORT); printk(KERN_DEBUG " 0x%x\n", data); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } } #if 0 static void fusb300_dbg_fifo(struct fusb300_ep *ep, u8 entry, u16 length) { u32 reg; u32 i = 0; u32 j = 0; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); for (i = 0; i < (length >> 2); i++) { if (i * 4 == 1024) break; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i * 4); printk(KERN_DEBUG" 0x%-8x", reg); j++; if ((j % 4) == 0) printk(KERN_DEBUG "\n"); } if (length % 4) { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i * 4); printk(KERN_DEBUG " 0x%x\n", reg); } if ((j % 4) != 0) printk(KERN_DEBUG "\n"); fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, FUSB300_GTM_TST_FIFO_DEG); } static void fusb300_cmp_dbg_fifo(struct fusb300_ep *ep, u8 entry, u16 length, u8 *golden) { u32 reg; u32 i = 0; u32 golden_value; u8 *tmp; tmp = golden; printk(KERN_DEBUG "fusb300_cmp_dbg_fifo (entry %d) : start\n", entry); reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_GTM); reg &= ~(FUSB300_GTM_TST_EP_ENTRY(0xF) | FUSB300_GTM_TST_EP_NUM(0xF) | FUSB300_GTM_TST_FIFO_DEG); reg |= (FUSB300_GTM_TST_EP_ENTRY(entry) | FUSB300_GTM_TST_EP_NUM(ep->epnum) | FUSB300_GTM_TST_FIFO_DEG); iowrite32(reg, ep->fusb300->reg + FUSB300_OFFSET_GTM); for (i = 0; i < (length >> 2); i++) { if (i * 4 == 1024) break; golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); if (reg != golden_value) { printk(KERN_DEBUG "0x%x : ", (u32)(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4)); printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", golden_value, reg); } tmp += 4; } switch (length % 4) { case 1: golden_value = *tmp; case 2: golden_value = *tmp | *(tmp + 1) << 8; case 3: golden_value = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; default: break; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4); if (reg != golden_value) { printk(KERN_DEBUG "0x%x:", (u32)(ep->fusb300->reg + FUSB300_OFFSET_BUFDBG_START + i*4)); printk(KERN_DEBUG " golden = 0x%x, reg = 0x%x\n", golden_value, reg); } } printk(KERN_DEBUG "fusb300_cmp_dbg_fifo : end\n"); fusb300_disable_bit(ep->fusb300, FUSB300_OFFSET_GTM, FUSB300_GTM_TST_FIFO_DEG); } #endif static void fusb300_rdfifo(struct fusb300_ep *ep, struct fusb300_request *req, u32 length) { int i = 0; u8 *tmp; u32 data, reg; struct fusb300 *fusb300 = ep->fusb300; tmp = req->req.buf + req->req.actual; req->req.actual += length; if (req->req.actual > req->req.length) printk(KERN_DEBUG "req->req.actual > req->req.length\n"); for (i = (length >> 2); i > 0; i--) { data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; *(tmp + 3) = (data >> 24) & 0xFF; tmp = tmp + 4; } switch (length % 4) { case 1: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; break; case 2: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; break; case 3: data = ioread32(fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); *tmp = data & 0xFF; *(tmp + 1) = (data >> 8) & 0xFF; *(tmp + 2) = (data >> 16) & 0xFF; break; default: break; } do { reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; if (i) printk(KERN_INFO "sync fifo is not empty!\n"); i++; } while (!reg); } /* write data to fifo */ static void fusb300_wrfifo(struct fusb300_ep *ep, struct fusb300_request *req) { int i = 0; u8 *tmp; u32 data, reg; struct fusb300 *fusb300 = ep->fusb300; tmp = req->req.buf; req->req.actual = req->req.length; for (i = (req->req.length >> 2); i > 0; i--) { data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16 | *(tmp + 3) << 24; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); tmp += 4; } switch (req->req.length % 4) { case 1: data = *tmp; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; case 2: data = *tmp | *(tmp + 1) << 8; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; case 3: data = *tmp | *(tmp + 1) << 8 | *(tmp + 2) << 16; iowrite32(data, fusb300->reg + FUSB300_OFFSET_EPPORT(ep->epnum)); break; default: break; } do { reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); reg &= FUSB300_IGR1_SYNF0_EMPTY_INT; if (i) printk(KERN_INFO"sync fifo is not empty!\n"); i++; } while (!reg); } static u8 fusb300_get_epnstall(struct fusb300 *fusb300, u8 ep) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET0(ep)); value = reg & FUSB300_EPSET0_STL; return value; } static u8 fusb300_get_cxstall(struct fusb300 *fusb300) { u8 value; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_CSR); value = (reg & FUSB300_CSR_STL) >> 1; return value; } static void request_error(struct fusb300 *fusb300) { fusb300_set_cxstall(fusb300); printk(KERN_DEBUG "request error!!\n"); } static void get_status(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) __releases(fusb300->lock) __acquires(fusb300->lock) { u8 ep; u16 status = 0; u16 w_index = ctrl->wIndex; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: status = 1 << USB_DEVICE_SELF_POWERED; break; case USB_RECIP_INTERFACE: status = 0; break; case USB_RECIP_ENDPOINT: ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) { if (fusb300_get_epnstall(fusb300, ep)) status = 1 << USB_ENDPOINT_HALT; } else { if (fusb300_get_cxstall(fusb300)) status = 0; } break; default: request_error(fusb300); return; /* exit */ } fusb300->ep0_data = cpu_to_le16(status); fusb300->ep0_req->buf = &fusb300->ep0_data; fusb300->ep0_req->length = 2; spin_unlock(&fusb300->lock); fusb300_queue(fusb300->gadget.ep0, fusb300->ep0_req, GFP_KERNEL); spin_lock(&fusb300->lock); } static void set_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 ep; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: { u16 w_index = le16_to_cpu(ctrl->wIndex); ep = w_index & USB_ENDPOINT_NUMBER_MASK; if (ep) fusb300_set_epnstall(fusb300, ep); else fusb300_set_cxstall(fusb300); fusb300_set_cxdone(fusb300); } break; default: request_error(fusb300); break; } } static void fusb300_clear_seqnum(struct fusb300 *fusb300, u8 ep) { fusb300_enable_bit(fusb300, FUSB300_OFFSET_EPSET0(ep), FUSB300_EPSET0_CLRSEQNUM); } static void clear_feature(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { struct fusb300_ep *ep = fusb300->ep[ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK]; switch (ctrl->bRequestType & USB_RECIP_MASK) { case USB_RECIP_DEVICE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_INTERFACE: fusb300_set_cxdone(fusb300); break; case USB_RECIP_ENDPOINT: if (ctrl->wIndex & USB_ENDPOINT_NUMBER_MASK) { if (ep->wedged) { fusb300_set_cxdone(fusb300); break; } if (ep->stall) { ep->stall = 0; fusb300_clear_seqnum(fusb300, ep->epnum); fusb300_clear_epnstall(fusb300, ep->epnum); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } } fusb300_set_cxdone(fusb300); break; default: request_error(fusb300); break; } } static void fusb300_set_dev_addr(struct fusb300 *fusb300, u16 addr) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_DAR); reg &= ~FUSB300_DAR_DRVADDR_MSK; reg |= FUSB300_DAR_DRVADDR(addr); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_DAR); } static void set_address(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { if (ctrl->wValue >= 0x0100) request_error(fusb300); else { fusb300_set_dev_addr(fusb300, ctrl->wValue); fusb300_set_cxdone(fusb300); } } #define UVC_COPY_DESCRIPTORS(mem, src) \ do { \ const struct usb_descriptor_header * const *__src; \ for (__src = src; *__src; ++__src) { \ memcpy(mem, *__src, (*__src)->bLength); \ mem += (*__src)->bLength; \ } \ } while (0) static void fusb300_ep0_complete(struct usb_ep *ep, struct usb_request *req) { } static int setup_packet(struct fusb300 *fusb300, struct usb_ctrlrequest *ctrl) { u8 *p = (u8 *)ctrl; u8 ret = 0; u8 i = 0; fusb300_rdcxf(fusb300, p, 8); fusb300->ep0_dir = ctrl->bRequestType & USB_DIR_IN; fusb300->ep0_length = ctrl->wLength; /* check request */ if ((ctrl->bRequestType & USB_TYPE_MASK) == USB_TYPE_STANDARD) { switch (ctrl->bRequest) { case USB_REQ_GET_STATUS: get_status(fusb300, ctrl); break; case USB_REQ_CLEAR_FEATURE: clear_feature(fusb300, ctrl); break; case USB_REQ_SET_FEATURE: set_feature(fusb300, ctrl); break; case USB_REQ_SET_ADDRESS: set_address(fusb300, ctrl); break; case USB_REQ_SET_CONFIGURATION: fusb300_enable_bit(fusb300, FUSB300_OFFSET_DAR, FUSB300_DAR_SETCONFG); /* clear sequence number */ for (i = 1; i <= FUSB300_MAX_NUM_EP; i++) fusb300_clear_seqnum(fusb300, i); fusb300->reenum = 1; ret = 1; break; default: ret = 1; break; } } else ret = 1; return ret; } static void fusb300_set_ep_bycnt(struct fusb300_ep *ep, u32 bycnt) { struct fusb300 *fusb300 = ep->fusb300; u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); reg &= ~FUSB300_FFR_BYCNT; reg |= bycnt & FUSB300_FFR_BYCNT; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); } static void done(struct fusb300_ep *ep, struct fusb300_request *req, int status) { list_del_init(&req->queue); /* don't modify queue heads during completion callback */ if (ep->fusb300->gadget.speed == USB_SPEED_UNKNOWN) req->req.status = -ESHUTDOWN; else req->req.status = status; spin_unlock(&ep->fusb300->lock); req->req.complete(&ep->ep, &req->req); spin_lock(&ep->fusb300->lock); if (ep->epnum) { disable_fifo_int(ep); if (!list_empty(&ep->queue)) enable_fifo_int(ep); } else fusb300_set_cxdone(ep->fusb300); } void fusb300_fill_idma_prdtbl(struct fusb300_ep *ep, struct fusb300_request *req) { u32 value; u32 reg; /* wait SW owner */ do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); reg &= FUSB300_EPPRD0_H; } while (reg); iowrite32((u32) req->req.buf, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W1(ep->epnum)); value = FUSB300_EPPRD0_BTC(req->req.length) | FUSB300_EPPRD0_H | FUSB300_EPPRD0_F | FUSB300_EPPRD0_L | FUSB300_EPPRD0_I; iowrite32(value, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W0(ep->epnum)); iowrite32(0x0, ep->fusb300->reg + FUSB300_OFFSET_EPPRD_W2(ep->epnum)); fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_EPPRDRDY, FUSB300_EPPRDR_EP_PRD_RDY(ep->epnum)); } static void fusb300_wait_idma_finished(struct fusb300_ep *ep) { u32 reg; do { reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR1); if ((reg & FUSB300_IGR1_VBUS_CHG_INT) || (reg & FUSB300_IGR1_WARM_RST_INT) || (reg & FUSB300_IGR1_HOT_RST_INT) || (reg & FUSB300_IGR1_USBRST_INT) ) goto IDMA_RESET; reg = ioread32(ep->fusb300->reg + FUSB300_OFFSET_IGR0); reg &= FUSB300_IGR0_EPn_PRD_INT(ep->epnum); } while (!reg); fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGR0, FUSB300_IGR0_EPn_PRD_INT(ep->epnum)); IDMA_RESET: fusb300_clear_int(ep->fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); } static void fusb300_set_idma(struct fusb300_ep *ep, struct fusb300_request *req) { dma_addr_t d; u8 *tmp = NULL; d = dma_map_single(NULL, req->req.buf, req->req.length, DMA_TO_DEVICE); if (dma_mapping_error(NULL, d)) { kfree(req->req.buf); printk(KERN_DEBUG "dma_mapping_error\n"); } dma_sync_single_for_device(NULL, d, req->req.length, DMA_TO_DEVICE); fusb300_enable_bit(ep->fusb300, FUSB300_OFFSET_IGER0, FUSB300_IGER0_EEPn_PRD_INT(ep->epnum)); tmp = req->req.buf; req->req.buf = (u8 *)d; fusb300_fill_idma_prdtbl(ep, req); /* check idma is done */ fusb300_wait_idma_finished(ep); req->req.buf = tmp; if (d) dma_unmap_single(NULL, d, req->req.length, DMA_TO_DEVICE); } static void in_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) { #if 0 fusb300_set_ep_bycnt(ep, req->req.length); fusb300_wrfifo(ep, req); #else fusb300_set_idma(ep, req); #endif } done(ep, req, 0); } static void out_ep_fifo_handler(struct fusb300_ep *ep) { struct fusb300 *fusb300 = ep->fusb300; struct fusb300_request *req = list_entry(ep->queue.next, struct fusb300_request, queue); u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPFFR(ep->epnum)); u32 length = reg & FUSB300_FFR_BYCNT; fusb300_rdfifo(ep, req, length); /* finish out transfer */ if ((req->req.length == req->req.actual) || (length < ep->ep.maxpacket)) done(ep, req, 0); } static void check_device_mode(struct fusb300 *fusb300) { u32 reg = ioread32(fusb300->reg + FUSB300_OFFSET_GCR); switch (reg & FUSB300_GCR_DEVEN_MSK) { case FUSB300_GCR_DEVEN_SS: fusb300->gadget.speed = USB_SPEED_SUPER; break; case FUSB300_GCR_DEVEN_HS: fusb300->gadget.speed = USB_SPEED_HIGH; break; case FUSB300_GCR_DEVEN_FS: fusb300->gadget.speed = USB_SPEED_FULL; break; default: fusb300->gadget.speed = USB_SPEED_UNKNOWN; break; } printk(KERN_INFO "dev_mode = %d\n", (reg & FUSB300_GCR_DEVEN_MSK)); } static void fusb300_ep0out(struct fusb300 *fusb300) { struct fusb300_ep *ep = fusb300->ep[0]; u32 reg; if (!list_empty(&ep->queue)) { struct fusb300_request *req; req = list_first_entry(&ep->queue, struct fusb300_request, queue); if (req->req.length) fusb300_rdcxf(ep->fusb300, req->req.buf, req->req.length); done(ep, req, 0); reg = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); reg &= ~FUSB300_IGER1_CX_OUT_INT; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_IGER1); } else pr_err("%s : empty queue\n", __func__); } static void fusb300_ep0in(struct fusb300 *fusb300) { struct fusb300_request *req; struct fusb300_ep *ep = fusb300->ep[0]; if ((!list_empty(&ep->queue)) && (fusb300->ep0_dir)) { req = list_entry(ep->queue.next, struct fusb300_request, queue); if (req->req.length) fusb300_wrcxf(ep, req); if ((req->req.length - req->req.actual) < ep->ep.maxpacket) done(ep, req, 0); } else fusb300_set_cxdone(fusb300); } static void fusb300_grp2_handler(void) { } static void fusb300_grp3_handler(void) { } static void fusb300_grp4_handler(void) { } static void fusb300_grp5_handler(void) { } static irqreturn_t fusb300_irq(int irq, void *_fusb300) { struct fusb300 *fusb300 = _fusb300; u32 int_grp1 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR1); u32 int_grp1_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER1); u32 int_grp0 = ioread32(fusb300->reg + FUSB300_OFFSET_IGR0); u32 int_grp0_en = ioread32(fusb300->reg + FUSB300_OFFSET_IGER0); struct usb_ctrlrequest ctrl; u8 in; u32 reg; int i; spin_lock(&fusb300->lock); int_grp1 &= int_grp1_en; int_grp0 &= int_grp0_en; if (int_grp1 & FUSB300_IGR1_WARM_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_WARM_RST_INT); printk(KERN_INFO"fusb300_warmreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_HOT_RST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HOT_RST_INT); printk(KERN_INFO"fusb300_hotreset\n"); fusb300_reset(); } if (int_grp1 & FUSB300_IGR1_USBRST_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_USBRST_INT); fusb300_reset(); } /* COMABT_INT has a highest priority */ if (int_grp1 & FUSB300_IGR1_CX_COMABT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_CX_COMABT_INT); printk(KERN_INFO"fusb300_ep0abt\n"); } if (int_grp1 & FUSB300_IGR1_VBUS_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_VBUS_CHG_INT); printk(KERN_INFO"fusb300_vbus_change\n"); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_FAIL_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_FAIL_INT); } if (int_grp1 & FUSB300_IGR1_U3_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U3_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U2_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U2_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_EXIT_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_EXIT_INT); printk(KERN_INFO "FUSB300_IGR1_U1_EXIT_INT\n"); } if (int_grp1 & FUSB300_IGR1_U3_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U3_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U3_ENTRY_INT\n"); fusb300_enable_bit(fusb300, FUSB300_OFFSET_SSCR1, FUSB300_SSCR1_GO_U3_DONE); } if (int_grp1 & FUSB300_IGR1_U2_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U2_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U2_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_U1_ENTRY_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_U1_ENTRY_INT); printk(KERN_INFO "FUSB300_IGR1_U1_ENTRY_INT\n"); } if (int_grp1 & FUSB300_IGR1_RESM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_RESM_INT); printk(KERN_INFO "fusb300_resume\n"); } if (int_grp1 & FUSB300_IGR1_SUSP_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_SUSP_INT); printk(KERN_INFO "fusb300_suspend\n"); } if (int_grp1 & FUSB300_IGR1_HS_LPM_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_HS_LPM_INT); printk(KERN_INFO "fusb300_HS_LPM_INT\n"); } if (int_grp1 & FUSB300_IGR1_DEV_MODE_CHG_INT) { fusb300_clear_int(fusb300, FUSB300_OFFSET_IGR1, FUSB300_IGR1_DEV_MODE_CHG_INT); check_device_mode(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_COMFAIL_INT) { fusb300_set_cxstall(fusb300); printk(KERN_INFO "fusb300_ep0fail\n"); } if (int_grp1 & FUSB300_IGR1_CX_SETUP_INT) { printk(KERN_INFO "fusb300_ep0setup\n"); if (setup_packet(fusb300, &ctrl)) { spin_unlock(&fusb300->lock); if (fusb300->driver->setup(&fusb300->gadget, &ctrl) < 0) fusb300_set_cxstall(fusb300); spin_lock(&fusb300->lock); } } if (int_grp1 & FUSB300_IGR1_CX_CMDEND_INT) printk(KERN_INFO "fusb300_cmdend\n"); if (int_grp1 & FUSB300_IGR1_CX_OUT_INT) { printk(KERN_INFO "fusb300_cxout\n"); fusb300_ep0out(fusb300); } if (int_grp1 & FUSB300_IGR1_CX_IN_INT) { printk(KERN_INFO "fusb300_cxin\n"); fusb300_ep0in(fusb300); } if (int_grp1 & FUSB300_IGR1_INTGRP5) fusb300_grp5_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP4) fusb300_grp4_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP3) fusb300_grp3_handler(); if (int_grp1 & FUSB300_IGR1_INTGRP2) fusb300_grp2_handler(); if (int_grp0) { for (i = 1; i < FUSB300_MAX_NUM_EP; i++) { if (int_grp0 & FUSB300_IGR0_EPn_FIFO_INT(i)) { reg = ioread32(fusb300->reg + FUSB300_OFFSET_EPSET1(i)); in = (reg & FUSB300_EPSET1_DIRIN) ? 1 : 0; if (in) in_ep_fifo_handler(fusb300->ep[i]); else out_ep_fifo_handler(fusb300->ep[i]); } } } spin_unlock(&fusb300->lock); return IRQ_HANDLED; } static void fusb300_set_u2_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~0xff; reg |= FUSB300_SSCR2_U2TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void fusb300_set_u1_timeout(struct fusb300 *fusb300, u32 time) { u32 reg; reg = ioread32(fusb300->reg + FUSB300_OFFSET_TT); reg &= ~(0xff << 8); reg |= FUSB300_SSCR2_U1TIMEOUT(time); iowrite32(reg, fusb300->reg + FUSB300_OFFSET_TT); } static void init_controller(struct fusb300 *fusb300) { u32 reg; u32 mask = 0; u32 val = 0; /* split on */ mask = val = FUSB300_AHBBCR_S0_SPLIT_ON | FUSB300_AHBBCR_S1_SPLIT_ON; reg = ioread32(fusb300->reg + FUSB300_OFFSET_AHBCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_AHBCR); /* enable high-speed LPM */ mask = val = FUSB300_HSCR_HS_LPM_PERMIT; reg = ioread32(fusb300->reg + FUSB300_OFFSET_HSCR); reg &= ~mask; reg |= val; iowrite32(reg, fusb300->reg + FUSB300_OFFSET_HSCR); /*set u1 u2 timmer*/ fusb300_set_u2_timeout(fusb300, 0xff); fusb300_set_u1_timeout(fusb300, 0xff); /* enable all grp1 interrupt */ iowrite32(0xcfffff9f, fusb300->reg + FUSB300_OFFSET_IGER1); } /*------------------------------------------------------------------------*/ static struct fusb300 *the_controller; int usb_gadget_probe_driver(struct usb_gadget_driver *driver, int (*bind)(struct usb_gadget *)) { struct fusb300 *fusb300 = the_controller; int retval; if (!driver || driver->speed < USB_SPEED_FULL || !bind || !driver->setup) return -EINVAL; if (!fusb300) return -ENODEV; if (fusb300->driver) return -EBUSY; /* hook up the driver */ driver->driver.bus = NULL; fusb300->driver = driver; fusb300->gadget.dev.driver = &driver->driver; retval = device_add(&fusb300->gadget.dev); if (retval) { pr_err("device_add error (%d)\n", retval); goto error; } retval = bind(&fusb300->gadget); if (retval) { pr_err("bind to driver error (%d)\n", retval); device_del(&fusb300->gadget.dev); goto error; } return 0; error: fusb300->driver = NULL; fusb300->gadget.dev.driver = NULL; return retval; } EXPORT_SYMBOL(usb_gadget_probe_driver); int usb_gadget_unregister_driver(struct usb_gadget_driver *driver) { struct fusb300 *fusb300 = the_controller; if (driver != fusb300->driver || !driver->unbind) return -EINVAL; driver->unbind(&fusb300->gadget); fusb300->gadget.dev.driver = NULL; init_controller(fusb300); device_del(&fusb300->gadget.dev); fusb300->driver = NULL; return 0; } EXPORT_SYMBOL(usb_gadget_unregister_driver); /*--------------------------------------------------------------------------*/ static int fusb300_udc_pullup(struct usb_gadget *_gadget, int is_active) { return 0; } static struct usb_gadget_ops fusb300_gadget_ops = { .pullup = fusb300_udc_pullup, }; static int __exit fusb300_remove(struct platform_device *pdev) { struct fusb300 *fusb300 = dev_get_drvdata(&pdev->dev); iounmap(fusb300->reg); free_irq(platform_get_irq(pdev, 0), fusb300); fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); return 0; } static int __init fusb300_probe(struct platform_device *pdev) { struct resource *res, *ires, *ires1; void __iomem *reg = NULL; struct fusb300 *fusb300 = NULL; struct fusb300_ep *_ep[FUSB300_MAX_NUM_EP]; int ret = 0; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { ret = -ENODEV; pr_err("platform_get_resource error.\n"); goto clean_up; } ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!ires) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ error.\n"); goto clean_up; } ires1 = platform_get_resource(pdev, IORESOURCE_IRQ, 1); if (!ires1) { ret = -ENODEV; dev_err(&pdev->dev, "platform_get_resource IORESOURCE_IRQ 1 error.\n"); goto clean_up; } reg = ioremap(res->start, resource_size(res)); if (reg == NULL) { ret = -ENOMEM; pr_err("ioremap error.\n"); goto clean_up; } /* initialize udc */ fusb300 = kzalloc(sizeof(struct fusb300), GFP_KERNEL); if (fusb300 == NULL) { pr_err("kzalloc error\n"); goto clean_up; } for (i = 0; i < FUSB300_MAX_NUM_EP; i++) { _ep[i] = kzalloc(sizeof(struct fusb300_ep), GFP_KERNEL); if (_ep[i] == NULL) { pr_err("_ep kzalloc error\n"); goto clean_up; } fusb300->ep[i] = _ep[i]; } spin_lock_init(&fusb300->lock); dev_set_drvdata(&pdev->dev, fusb300); fusb300->gadget.ops = &fusb300_gadget_ops; device_initialize(&fusb300->gadget.dev); dev_set_name(&fusb300->gadget.dev, "gadget"); fusb300->gadget.is_dualspeed = 1; fusb300->gadget.dev.parent = &pdev->dev; fusb300->gadget.dev.dma_mask = pdev->dev.dma_mask; fusb300->gadget.dev.release = pdev->dev.release; fusb300->gadget.name = udc_name; fusb300->reg = reg; ret = request_irq(ires->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq error (%d)\n", ret); goto clean_up; } ret = request_irq(ires1->start, fusb300_irq, IRQF_DISABLED | IRQF_SHARED, udc_name, fusb300); if (ret < 0) { pr_err("request_irq1 error (%d)\n", ret); goto clean_up; } INIT_LIST_HEAD(&fusb300->gadget.ep_list); for (i = 0; i < FUSB300_MAX_NUM_EP ; i++) { struct fusb300_ep *ep = fusb300->ep[i]; if (i != 0) { INIT_LIST_HEAD(&fusb300->ep[i]->ep.ep_list); list_add_tail(&fusb300->ep[i]->ep.ep_list, &fusb300->gadget.ep_list); } ep->fusb300 = fusb300; INIT_LIST_HEAD(&ep->queue); ep->ep.name = fusb300_ep_name[i]; ep->ep.ops = &fusb300_ep_ops; ep->ep.maxpacket = HS_BULK_MAX_PACKET_SIZE; } fusb300->ep[0]->ep.maxpacket = HS_CTL_MAX_PACKET_SIZE; fusb300->ep[0]->epnum = 0; fusb300->gadget.ep0 = &fusb300->ep[0]->ep; INIT_LIST_HEAD(&fusb300->gadget.ep0->ep_list); the_controller = fusb300; fusb300->ep0_req = fusb300_alloc_request(&fusb300->ep[0]->ep, GFP_KERNEL); if (fusb300->ep0_req == NULL) goto clean_up3; init_controller(fusb300); dev_info(&pdev->dev, "version %s\n", DRIVER_VERSION); return 0; clean_up3: free_irq(ires->start, fusb300); clean_up: if (fusb300) { if (fusb300->ep0_req) fusb300_free_request(&fusb300->ep[0]->ep, fusb300->ep0_req); kfree(fusb300); } if (reg) iounmap(reg); return ret; } static struct platform_driver fusb300_driver = { .remove = __exit_p(fusb300_remove), .driver = { .name = (char *) udc_name, .owner = THIS_MODULE, }, }; static int __init fusb300_udc_init(void) { return platform_driver_probe(&fusb300_driver, fusb300_probe); } module_init(fusb300_udc_init); static void __exit fusb300_udc_cleanup(void) { platform_driver_unregister(&fusb300_driver); } module_exit(fusb300_udc_cleanup);
gpl-2.0
mialwe/mnics2
sound/soc/codecs/tpa6130a2.c
2763
11814
/* * ALSA SoC Texas Instruments TPA6130A2 headset stereo amplifier driver * * Copyright (C) Nokia Corporation * * Author: Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/errno.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/regulator/consumer.h> #include <linux/slab.h> #include <sound/tpa6130a2-plat.h> #include <sound/soc.h> #include <sound/tlv.h> #include "tpa6130a2.h" static struct i2c_client *tpa6130a2_client; /* This struct is used to save the context */ struct tpa6130a2_data { struct mutex mutex; unsigned char regs[TPA6130A2_CACHEREGNUM]; struct regulator *supply; int power_gpio; u8 power_state:1; enum tpa_model id; }; static int tpa6130a2_i2c_read(int reg) { struct tpa6130a2_data *data; int val; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); /* If powered off, return the cached value */ if (data->power_state) { val = i2c_smbus_read_byte_data(tpa6130a2_client, reg); if (val < 0) dev_err(&tpa6130a2_client->dev, "Read failed\n"); else data->regs[reg] = val; } else { val = data->regs[reg]; } return val; } static int tpa6130a2_i2c_write(int reg, u8 value) { struct tpa6130a2_data *data; int val = 0; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); if (data->power_state) { val = i2c_smbus_write_byte_data(tpa6130a2_client, reg, value); if (val < 0) { dev_err(&tpa6130a2_client->dev, "Write failed\n"); return val; } } /* Either powered on or off, we save the context */ data->regs[reg] = value; return val; } static u8 tpa6130a2_read(int reg) { struct tpa6130a2_data *data; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); return data->regs[reg]; } static int tpa6130a2_initialize(void) { struct tpa6130a2_data *data; int i, ret = 0; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); for (i = 1; i < TPA6130A2_REG_VERSION; i++) { ret = tpa6130a2_i2c_write(i, data->regs[i]); if (ret < 0) break; } return ret; } static int tpa6130a2_power(u8 power) { struct tpa6130a2_data *data; u8 val; int ret = 0; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); mutex_lock(&data->mutex); if (power == data->power_state) goto exit; if (power) { ret = regulator_enable(data->supply); if (ret != 0) { dev_err(&tpa6130a2_client->dev, "Failed to enable supply: %d\n", ret); goto exit; } /* Power on */ if (data->power_gpio >= 0) gpio_set_value(data->power_gpio, 1); data->power_state = 1; ret = tpa6130a2_initialize(); if (ret < 0) { dev_err(&tpa6130a2_client->dev, "Failed to initialize chip\n"); if (data->power_gpio >= 0) gpio_set_value(data->power_gpio, 0); regulator_disable(data->supply); data->power_state = 0; goto exit; } } else { /* set SWS */ val = tpa6130a2_read(TPA6130A2_REG_CONTROL); val |= TPA6130A2_SWS; tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); /* Power off */ if (data->power_gpio >= 0) gpio_set_value(data->power_gpio, 0); ret = regulator_disable(data->supply); if (ret != 0) { dev_err(&tpa6130a2_client->dev, "Failed to disable supply: %d\n", ret); goto exit; } data->power_state = 0; } exit: mutex_unlock(&data->mutex); return ret; } static int tpa6130a2_get_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct tpa6130a2_data *data; unsigned int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); mutex_lock(&data->mutex); ucontrol->value.integer.value[0] = (tpa6130a2_read(reg) >> shift) & mask; if (invert) ucontrol->value.integer.value[0] = max - ucontrol->value.integer.value[0]; mutex_unlock(&data->mutex); return 0; } static int tpa6130a2_put_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; struct tpa6130a2_data *data; unsigned int reg = mc->reg; unsigned int shift = mc->shift; int max = mc->max; unsigned int mask = (1 << fls(max)) - 1; unsigned int invert = mc->invert; unsigned int val = (ucontrol->value.integer.value[0] & mask); unsigned int val_reg; BUG_ON(tpa6130a2_client == NULL); data = i2c_get_clientdata(tpa6130a2_client); if (invert) val = max - val; mutex_lock(&data->mutex); val_reg = tpa6130a2_read(reg); if (((val_reg >> shift) & mask) == val) { mutex_unlock(&data->mutex); return 0; } val_reg &= ~(mask << shift); val_reg |= val << shift; tpa6130a2_i2c_write(reg, val_reg); mutex_unlock(&data->mutex); return 1; } /* * TPA6130 volume. From -59.5 to 4 dB with increasing step size when going * down in gain. */ static const unsigned int tpa6130_tlv[] = { TLV_DB_RANGE_HEAD(10), 0, 1, TLV_DB_SCALE_ITEM(-5950, 600, 0), 2, 3, TLV_DB_SCALE_ITEM(-5000, 250, 0), 4, 5, TLV_DB_SCALE_ITEM(-4550, 160, 0), 6, 7, TLV_DB_SCALE_ITEM(-4140, 190, 0), 8, 9, TLV_DB_SCALE_ITEM(-3650, 120, 0), 10, 11, TLV_DB_SCALE_ITEM(-3330, 160, 0), 12, 13, TLV_DB_SCALE_ITEM(-3040, 180, 0), 14, 20, TLV_DB_SCALE_ITEM(-2710, 110, 0), 21, 37, TLV_DB_SCALE_ITEM(-1960, 74, 0), 38, 63, TLV_DB_SCALE_ITEM(-720, 45, 0), }; static const struct snd_kcontrol_new tpa6130a2_controls[] = { SOC_SINGLE_EXT_TLV("TPA6130A2 Headphone Playback Volume", TPA6130A2_REG_VOL_MUTE, 0, 0x3f, 0, tpa6130a2_get_volsw, tpa6130a2_put_volsw, tpa6130_tlv), }; static const unsigned int tpa6140_tlv[] = { TLV_DB_RANGE_HEAD(3), 0, 8, TLV_DB_SCALE_ITEM(-5900, 400, 0), 9, 16, TLV_DB_SCALE_ITEM(-2500, 200, 0), 17, 31, TLV_DB_SCALE_ITEM(-1000, 100, 0), }; static const struct snd_kcontrol_new tpa6140a2_controls[] = { SOC_SINGLE_EXT_TLV("TPA6140A2 Headphone Playback Volume", TPA6130A2_REG_VOL_MUTE, 1, 0x1f, 0, tpa6130a2_get_volsw, tpa6130a2_put_volsw, tpa6140_tlv), }; /* * Enable or disable channel (left or right) * The bit number for mute and amplifier are the same per channel: * bit 6: Right channel * bit 7: Left channel * in both registers. */ static void tpa6130a2_channel_enable(u8 channel, int enable) { u8 val; if (enable) { /* Enable channel */ /* Enable amplifier */ val = tpa6130a2_read(TPA6130A2_REG_CONTROL); val |= channel; val &= ~TPA6130A2_SWS; tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); /* Unmute channel */ val = tpa6130a2_read(TPA6130A2_REG_VOL_MUTE); val &= ~channel; tpa6130a2_i2c_write(TPA6130A2_REG_VOL_MUTE, val); } else { /* Disable channel */ /* Mute channel */ val = tpa6130a2_read(TPA6130A2_REG_VOL_MUTE); val |= channel; tpa6130a2_i2c_write(TPA6130A2_REG_VOL_MUTE, val); /* Disable amplifier */ val = tpa6130a2_read(TPA6130A2_REG_CONTROL); val &= ~channel; tpa6130a2_i2c_write(TPA6130A2_REG_CONTROL, val); } } int tpa6130a2_stereo_enable(struct snd_soc_codec *codec, int enable) { int ret = 0; if (enable) { ret = tpa6130a2_power(1); if (ret < 0) return ret; tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L, 1); } else { tpa6130a2_channel_enable(TPA6130A2_HP_EN_R | TPA6130A2_HP_EN_L, 0); ret = tpa6130a2_power(0); } return ret; } EXPORT_SYMBOL_GPL(tpa6130a2_stereo_enable); int tpa6130a2_add_controls(struct snd_soc_codec *codec) { struct tpa6130a2_data *data; if (tpa6130a2_client == NULL) return -ENODEV; data = i2c_get_clientdata(tpa6130a2_client); if (data->id == TPA6140A2) return snd_soc_add_controls(codec, tpa6140a2_controls, ARRAY_SIZE(tpa6140a2_controls)); else return snd_soc_add_controls(codec, tpa6130a2_controls, ARRAY_SIZE(tpa6130a2_controls)); } EXPORT_SYMBOL_GPL(tpa6130a2_add_controls); static int __devinit tpa6130a2_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct device *dev; struct tpa6130a2_data *data; struct tpa6130a2_platform_data *pdata; const char *regulator; int ret; dev = &client->dev; if (client->dev.platform_data == NULL) { dev_err(dev, "Platform data not set\n"); dump_stack(); return -ENODEV; } data = kzalloc(sizeof(*data), GFP_KERNEL); if (data == NULL) { dev_err(dev, "Can not allocate memory\n"); return -ENOMEM; } tpa6130a2_client = client; i2c_set_clientdata(tpa6130a2_client, data); pdata = client->dev.platform_data; data->power_gpio = pdata->power_gpio; data->id = pdata->id; mutex_init(&data->mutex); /* Set default register values */ data->regs[TPA6130A2_REG_CONTROL] = TPA6130A2_SWS; data->regs[TPA6130A2_REG_VOL_MUTE] = TPA6130A2_MUTE_R | TPA6130A2_MUTE_L; if (data->power_gpio >= 0) { ret = gpio_request(data->power_gpio, "tpa6130a2 enable"); if (ret < 0) { dev_err(dev, "Failed to request power GPIO (%d)\n", data->power_gpio); goto err_gpio; } gpio_direction_output(data->power_gpio, 0); } switch (data->id) { default: dev_warn(dev, "Unknown TPA model (%d). Assuming 6130A2\n", pdata->id); case TPA6130A2: regulator = "Vdd"; break; case TPA6140A2: regulator = "AVdd"; break; } data->supply = regulator_get(dev, regulator); if (IS_ERR(data->supply)) { ret = PTR_ERR(data->supply); dev_err(dev, "Failed to request supply: %d\n", ret); goto err_regulator; } ret = tpa6130a2_power(1); if (ret != 0) goto err_power; /* Read version */ ret = tpa6130a2_i2c_read(TPA6130A2_REG_VERSION) & TPA6130A2_VERSION_MASK; if ((ret != 1) && (ret != 2)) dev_warn(dev, "UNTESTED version detected (%d)\n", ret); /* Disable the chip */ ret = tpa6130a2_power(0); if (ret != 0) goto err_power; return 0; err_power: regulator_put(data->supply); err_regulator: if (data->power_gpio >= 0) gpio_free(data->power_gpio); err_gpio: kfree(data); i2c_set_clientdata(tpa6130a2_client, NULL); tpa6130a2_client = NULL; return ret; } static int __devexit tpa6130a2_remove(struct i2c_client *client) { struct tpa6130a2_data *data = i2c_get_clientdata(client); tpa6130a2_power(0); if (data->power_gpio >= 0) gpio_free(data->power_gpio); regulator_put(data->supply); kfree(data); tpa6130a2_client = NULL; return 0; } static const struct i2c_device_id tpa6130a2_id[] = { { "tpa6130a2", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tpa6130a2_id); static struct i2c_driver tpa6130a2_i2c_driver = { .driver = { .name = "tpa6130a2", .owner = THIS_MODULE, }, .probe = tpa6130a2_probe, .remove = __devexit_p(tpa6130a2_remove), .id_table = tpa6130a2_id, }; static int __init tpa6130a2_init(void) { return i2c_add_driver(&tpa6130a2_i2c_driver); } static void __exit tpa6130a2_exit(void) { i2c_del_driver(&tpa6130a2_i2c_driver); } MODULE_AUTHOR("Peter Ujfalusi <peter.ujfalusi@ti.com>"); MODULE_DESCRIPTION("TPA6130A2 Headphone amplifier driver"); MODULE_LICENSE("GPL"); module_init(tpa6130a2_init); module_exit(tpa6130a2_exit);
gpl-2.0
Xmister/linux-sunxi
drivers/infiniband/ulp/ipoib/ipoib_ib.c
2763
27791
/* * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved. * Copyright (c) 2005 Sun Microsystems, Inc. All rights reserved. * Copyright (c) 2005 Mellanox Technologies. All rights reserved. * Copyright (c) 2004, 2005 Voltaire, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/ip.h> #include <linux/tcp.h> #include "ipoib.h" #ifdef CONFIG_INFINIBAND_IPOIB_DEBUG_DATA static int data_debug_level; module_param(data_debug_level, int, 0644); MODULE_PARM_DESC(data_debug_level, "Enable data path debug tracing if > 0"); #endif static DEFINE_MUTEX(pkey_mutex); struct ipoib_ah *ipoib_create_ah(struct net_device *dev, struct ib_pd *pd, struct ib_ah_attr *attr) { struct ipoib_ah *ah; ah = kmalloc(sizeof *ah, GFP_KERNEL); if (!ah) return NULL; ah->dev = dev; ah->last_send = 0; kref_init(&ah->ref); ah->ah = ib_create_ah(pd, attr); if (IS_ERR(ah->ah)) { kfree(ah); ah = NULL; } else ipoib_dbg(netdev_priv(dev), "Created ah %p\n", ah->ah); return ah; } void ipoib_free_ah(struct kref *kref) { struct ipoib_ah *ah = container_of(kref, struct ipoib_ah, ref); struct ipoib_dev_priv *priv = netdev_priv(ah->dev); unsigned long flags; spin_lock_irqsave(&priv->lock, flags); list_add_tail(&ah->list, &priv->dead_ahs); spin_unlock_irqrestore(&priv->lock, flags); } static void ipoib_ud_dma_unmap_rx(struct ipoib_dev_priv *priv, u64 mapping[IPOIB_UD_RX_SG]) { if (ipoib_ud_need_sg(priv->max_ib_mtu)) { ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_HEAD_SIZE, DMA_FROM_DEVICE); ib_dma_unmap_page(priv->ca, mapping[1], PAGE_SIZE, DMA_FROM_DEVICE); } else ib_dma_unmap_single(priv->ca, mapping[0], IPOIB_UD_BUF_SIZE(priv->max_ib_mtu), DMA_FROM_DEVICE); } static void ipoib_ud_skb_put_frags(struct ipoib_dev_priv *priv, struct sk_buff *skb, unsigned int length) { if (ipoib_ud_need_sg(priv->max_ib_mtu)) { skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; unsigned int size; /* * There is only two buffers needed for max_payload = 4K, * first buf size is IPOIB_UD_HEAD_SIZE */ skb->tail += IPOIB_UD_HEAD_SIZE; skb->len += length; size = length - IPOIB_UD_HEAD_SIZE; frag->size = size; skb->data_len += size; skb->truesize += size; } else skb_put(skb, length); } static int ipoib_ib_post_receive(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_recv_wr *bad_wr; int ret; priv->rx_wr.wr_id = id | IPOIB_OP_RECV; priv->rx_sge[0].addr = priv->rx_ring[id].mapping[0]; priv->rx_sge[1].addr = priv->rx_ring[id].mapping[1]; ret = ib_post_recv(priv->qp, &priv->rx_wr, &bad_wr); if (unlikely(ret)) { ipoib_warn(priv, "receive failed for buf %d (%d)\n", id, ret); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[id].mapping); dev_kfree_skb_any(priv->rx_ring[id].skb); priv->rx_ring[id].skb = NULL; } return ret; } static struct sk_buff *ipoib_alloc_rx_skb(struct net_device *dev, int id) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct sk_buff *skb; int buf_size; u64 *mapping; if (ipoib_ud_need_sg(priv->max_ib_mtu)) buf_size = IPOIB_UD_HEAD_SIZE; else buf_size = IPOIB_UD_BUF_SIZE(priv->max_ib_mtu); skb = dev_alloc_skb(buf_size + 4); if (unlikely(!skb)) return NULL; /* * IB will leave a 40 byte gap for a GRH and IPoIB adds a 4 byte * header. So we need 4 more bytes to get to 48 and align the * IP header to a multiple of 16. */ skb_reserve(skb, 4); mapping = priv->rx_ring[id].mapping; mapping[0] = ib_dma_map_single(priv->ca, skb->data, buf_size, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[0]))) goto error; if (ipoib_ud_need_sg(priv->max_ib_mtu)) { struct page *page = alloc_page(GFP_ATOMIC); if (!page) goto partial_error; skb_fill_page_desc(skb, 0, page, 0, PAGE_SIZE); mapping[1] = ib_dma_map_page(priv->ca, skb_shinfo(skb)->frags[0].page, 0, PAGE_SIZE, DMA_FROM_DEVICE); if (unlikely(ib_dma_mapping_error(priv->ca, mapping[1]))) goto partial_error; } priv->rx_ring[id].skb = skb; return skb; partial_error: ib_dma_unmap_single(priv->ca, mapping[0], buf_size, DMA_FROM_DEVICE); error: dev_kfree_skb_any(skb); return NULL; } static int ipoib_ib_post_receives(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i; for (i = 0; i < ipoib_recvq_size; ++i) { if (!ipoib_alloc_rx_skb(dev, i)) { ipoib_warn(priv, "failed to allocate receive buffer %d\n", i); return -ENOMEM; } if (ipoib_ib_post_receive(dev, i)) { ipoib_warn(priv, "ipoib_ib_post_receive failed for buf %d\n", i); return -EIO; } } return 0; } static void ipoib_ib_handle_rx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id & ~IPOIB_OP_RECV; struct sk_buff *skb; u64 mapping[IPOIB_UD_RX_SG]; union ib_gid *dgid; ipoib_dbg_data(priv, "recv completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_recvq_size)) { ipoib_warn(priv, "recv completion event with wrid %d (> %d)\n", wr_id, ipoib_recvq_size); return; } skb = priv->rx_ring[wr_id].skb; if (unlikely(wc->status != IB_WC_SUCCESS)) { if (wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed recv event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[wr_id].mapping); dev_kfree_skb_any(skb); priv->rx_ring[wr_id].skb = NULL; return; } /* * Drop packets that this interface sent, ie multicast packets * that the HCA has replicated. */ if (wc->slid == priv->local_lid && wc->src_qp == priv->qp->qp_num) goto repost; memcpy(mapping, priv->rx_ring[wr_id].mapping, IPOIB_UD_RX_SG * sizeof *mapping); /* * If we can't allocate a new RX buffer, dump * this packet and reuse the old buffer. */ if (unlikely(!ipoib_alloc_rx_skb(dev, wr_id))) { ++dev->stats.rx_dropped; goto repost; } ipoib_dbg_data(priv, "received %d bytes, SLID 0x%04x\n", wc->byte_len, wc->slid); ipoib_ud_dma_unmap_rx(priv, mapping); ipoib_ud_skb_put_frags(priv, skb, wc->byte_len); /* First byte of dgid signals multicast when 0xff */ dgid = &((struct ib_grh *)skb->data)->dgid; if (!(wc->wc_flags & IB_WC_GRH) || dgid->raw[0] != 0xff) skb->pkt_type = PACKET_HOST; else if (memcmp(dgid, dev->broadcast + 4, sizeof(union ib_gid)) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; skb_pull(skb, IB_GRH_BYTES); skb->protocol = ((struct ipoib_header *) skb->data)->proto; skb_reset_mac_header(skb); skb_pull(skb, IPOIB_ENCAP_LEN); ++dev->stats.rx_packets; dev->stats.rx_bytes += skb->len; skb->dev = dev; if ((dev->features & NETIF_F_RXCSUM) && likely(wc->csum_ok)) skb->ip_summed = CHECKSUM_UNNECESSARY; napi_gro_receive(&priv->napi, skb); repost: if (unlikely(ipoib_ib_post_receive(dev, wr_id))) ipoib_warn(priv, "ipoib_ib_post_receive failed " "for buf %d\n", wr_id); } static int ipoib_dma_map_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { mapping[0] = ib_dma_map_single(ca, skb->data, skb_headlen(skb), DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[0]))) return -EIO; off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; mapping[i + off] = ib_dma_map_page(ca, frag->page, frag->page_offset, frag->size, DMA_TO_DEVICE); if (unlikely(ib_dma_mapping_error(ca, mapping[i + off]))) goto partial_error; } return 0; partial_error: for (; i > 0; --i) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1]; ib_dma_unmap_page(ca, mapping[i - !off], frag->size, DMA_TO_DEVICE); } if (off) ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); return -EIO; } static void ipoib_dma_unmap_tx(struct ib_device *ca, struct ipoib_tx_buf *tx_req) { struct sk_buff *skb = tx_req->skb; u64 *mapping = tx_req->mapping; int i; int off; if (skb_headlen(skb)) { ib_dma_unmap_single(ca, mapping[0], skb_headlen(skb), DMA_TO_DEVICE); off = 1; } else off = 0; for (i = 0; i < skb_shinfo(skb)->nr_frags; ++i) { skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ib_dma_unmap_page(ca, mapping[i + off], frag->size, DMA_TO_DEVICE); } } static void ipoib_ib_handle_tx_wc(struct net_device *dev, struct ib_wc *wc) { struct ipoib_dev_priv *priv = netdev_priv(dev); unsigned int wr_id = wc->wr_id; struct ipoib_tx_buf *tx_req; ipoib_dbg_data(priv, "send completion: id %d, status: %d\n", wr_id, wc->status); if (unlikely(wr_id >= ipoib_sendq_size)) { ipoib_warn(priv, "send completion event with wrid %d (> %d)\n", wr_id, ipoib_sendq_size); return; } tx_req = &priv->tx_ring[wr_id]; ipoib_dma_unmap_tx(priv->ca, tx_req); ++dev->stats.tx_packets; dev->stats.tx_bytes += tx_req->skb->len; dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; if (unlikely(--priv->tx_outstanding == ipoib_sendq_size >> 1) && netif_queue_stopped(dev) && test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) netif_wake_queue(dev); if (wc->status != IB_WC_SUCCESS && wc->status != IB_WC_WR_FLUSH_ERR) ipoib_warn(priv, "failed send event " "(status=%d, wrid=%d vend_err %x)\n", wc->status, wr_id, wc->vendor_err); } static int poll_tx(struct ipoib_dev_priv *priv) { int n, i; n = ib_poll_cq(priv->send_cq, MAX_SEND_CQE, priv->send_wc); for (i = 0; i < n; ++i) ipoib_ib_handle_tx_wc(priv->dev, priv->send_wc + i); return n == MAX_SEND_CQE; } int ipoib_poll(struct napi_struct *napi, int budget) { struct ipoib_dev_priv *priv = container_of(napi, struct ipoib_dev_priv, napi); struct net_device *dev = priv->dev; int done; int t; int n, i; done = 0; poll_more: while (done < budget) { int max = (budget - done); t = min(IPOIB_NUM_WC, max); n = ib_poll_cq(priv->recv_cq, t, priv->ibwc); for (i = 0; i < n; i++) { struct ib_wc *wc = priv->ibwc + i; if (wc->wr_id & IPOIB_OP_RECV) { ++done; if (wc->wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, wc); else ipoib_ib_handle_rx_wc(dev, wc); } else ipoib_cm_handle_tx_wc(priv->dev, wc); } if (n != t) break; } if (done < budget) { napi_complete(napi); if (unlikely(ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP | IB_CQ_REPORT_MISSED_EVENTS)) && napi_reschedule(napi)) goto poll_more; } return done; } void ipoib_ib_completion(struct ib_cq *cq, void *dev_ptr) { struct net_device *dev = dev_ptr; struct ipoib_dev_priv *priv = netdev_priv(dev); napi_schedule(&priv->napi); } static void drain_tx_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); netif_tx_lock(dev); while (poll_tx(priv)) ; /* nothing */ if (netif_queue_stopped(dev)) mod_timer(&priv->poll_timer, jiffies + 1); netif_tx_unlock(dev); } void ipoib_send_comp_handler(struct ib_cq *cq, void *dev_ptr) { struct ipoib_dev_priv *priv = netdev_priv(dev_ptr); mod_timer(&priv->poll_timer, jiffies); } static inline int post_send(struct ipoib_dev_priv *priv, unsigned int wr_id, struct ib_ah *address, u32 qpn, struct ipoib_tx_buf *tx_req, void *head, int hlen) { struct ib_send_wr *bad_wr; int i, off; struct sk_buff *skb = tx_req->skb; skb_frag_t *frags = skb_shinfo(skb)->frags; int nr_frags = skb_shinfo(skb)->nr_frags; u64 *mapping = tx_req->mapping; if (skb_headlen(skb)) { priv->tx_sge[0].addr = mapping[0]; priv->tx_sge[0].length = skb_headlen(skb); off = 1; } else off = 0; for (i = 0; i < nr_frags; ++i) { priv->tx_sge[i + off].addr = mapping[i + off]; priv->tx_sge[i + off].length = frags[i].size; } priv->tx_wr.num_sge = nr_frags + off; priv->tx_wr.wr_id = wr_id; priv->tx_wr.wr.ud.remote_qpn = qpn; priv->tx_wr.wr.ud.ah = address; if (head) { priv->tx_wr.wr.ud.mss = skb_shinfo(skb)->gso_size; priv->tx_wr.wr.ud.header = head; priv->tx_wr.wr.ud.hlen = hlen; priv->tx_wr.opcode = IB_WR_LSO; } else priv->tx_wr.opcode = IB_WR_SEND; return ib_post_send(priv->qp, &priv->tx_wr, &bad_wr); } void ipoib_send(struct net_device *dev, struct sk_buff *skb, struct ipoib_ah *address, u32 qpn) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_tx_buf *tx_req; int hlen, rc; void *phead; if (skb_is_gso(skb)) { hlen = skb_transport_offset(skb) + tcp_hdrlen(skb); phead = skb->data; if (unlikely(!skb_pull(skb, hlen))) { ipoib_warn(priv, "linear data too small\n"); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } } else { if (unlikely(skb->len > priv->mcast_mtu + IPOIB_ENCAP_LEN)) { ipoib_warn(priv, "packet len %d (> %d) too long to send, dropping\n", skb->len, priv->mcast_mtu + IPOIB_ENCAP_LEN); ++dev->stats.tx_dropped; ++dev->stats.tx_errors; ipoib_cm_skb_too_long(dev, skb, priv->mcast_mtu); return; } phead = NULL; hlen = 0; } ipoib_dbg_data(priv, "sending packet, length=%d address=%p qpn=0x%06x\n", skb->len, address, qpn); /* * We put the skb into the tx_ring _before_ we call post_send() * because it's entirely possible that the completion handler will * run before we execute anything after the post_send(). That * means we have to make sure everything is properly recorded and * our state is consistent before we call post_send(). */ tx_req = &priv->tx_ring[priv->tx_head & (ipoib_sendq_size - 1)]; tx_req->skb = skb; if (unlikely(ipoib_dma_map_tx(priv->ca, tx_req))) { ++dev->stats.tx_errors; dev_kfree_skb_any(skb); return; } if (skb->ip_summed == CHECKSUM_PARTIAL) priv->tx_wr.send_flags |= IB_SEND_IP_CSUM; else priv->tx_wr.send_flags &= ~IB_SEND_IP_CSUM; if (++priv->tx_outstanding == ipoib_sendq_size) { ipoib_dbg(priv, "TX ring full, stopping kernel net queue\n"); if (ib_req_notify_cq(priv->send_cq, IB_CQ_NEXT_COMP)) ipoib_warn(priv, "request notify on send CQ failed\n"); netif_stop_queue(dev); } rc = post_send(priv, priv->tx_head & (ipoib_sendq_size - 1), address->ah, qpn, tx_req, phead, hlen); if (unlikely(rc)) { ipoib_warn(priv, "post_send failed, error %d\n", rc); ++dev->stats.tx_errors; --priv->tx_outstanding; ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(skb); if (netif_queue_stopped(dev)) netif_wake_queue(dev); } else { dev->trans_start = jiffies; address->last_send = priv->tx_head; ++priv->tx_head; skb_orphan(skb); } if (unlikely(priv->tx_outstanding > MAX_SEND_CQE)) while (poll_tx(priv)) ; /* nothing */ } static void __ipoib_reap_ah(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ipoib_ah *ah, *tah; LIST_HEAD(remove_list); unsigned long flags; netif_tx_lock_bh(dev); spin_lock_irqsave(&priv->lock, flags); list_for_each_entry_safe(ah, tah, &priv->dead_ahs, list) if ((int) priv->tx_tail - (int) ah->last_send >= 0) { list_del(&ah->list); ib_destroy_ah(ah->ah); kfree(ah); } spin_unlock_irqrestore(&priv->lock, flags); netif_tx_unlock_bh(dev); } void ipoib_reap_ah(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, ah_reap_task.work); struct net_device *dev = priv->dev; __ipoib_reap_ah(dev); if (!test_bit(IPOIB_STOP_REAPER, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); } static void ipoib_ib_tx_timer_func(unsigned long ctx) { drain_tx_cq((struct net_device *)ctx); } int ipoib_ib_dev_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int ret; if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &priv->pkey_index)) { ipoib_warn(priv, "P_Key 0x%04x not found\n", priv->pkey); clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); return -1; } set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); ret = ipoib_init_qp(dev); if (ret) { ipoib_warn(priv, "ipoib_init_qp returned %d\n", ret); return -1; } ret = ipoib_ib_post_receives(dev); if (ret) { ipoib_warn(priv, "ipoib_ib_post_receives returned %d\n", ret); ipoib_ib_dev_stop(dev, 1); return -1; } ret = ipoib_cm_dev_open(dev); if (ret) { ipoib_warn(priv, "ipoib_cm_dev_open returned %d\n", ret); ipoib_ib_dev_stop(dev, 1); return -1; } clear_bit(IPOIB_STOP_REAPER, &priv->flags); queue_delayed_work(ipoib_workqueue, &priv->ah_reap_task, round_jiffies_relative(HZ)); if (!test_and_set_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_enable(&priv->napi); return 0; } static void ipoib_pkey_dev_check_presence(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); u16 pkey_index = 0; if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &pkey_index)) clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); else set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); } int ipoib_ib_dev_up(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_pkey_dev_check_presence(dev); if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { ipoib_dbg(priv, "PKEY is not assigned.\n"); return 0; } set_bit(IPOIB_FLAG_OPER_UP, &priv->flags); return ipoib_mcast_start_thread(dev); } int ipoib_ib_dev_down(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "downing ib_dev\n"); clear_bit(IPOIB_FLAG_OPER_UP, &priv->flags); netif_carrier_off(dev); /* Shutdown the P_Key thread if still active */ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); set_bit(IPOIB_PKEY_STOP, &priv->flags); cancel_delayed_work(&priv->pkey_poll_task); mutex_unlock(&pkey_mutex); if (flush) flush_workqueue(ipoib_workqueue); } ipoib_mcast_stop_thread(dev, flush); ipoib_mcast_dev_flush(dev); ipoib_flush_paths(dev); return 0; } static int recvs_pending(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int pending = 0; int i; for (i = 0; i < ipoib_recvq_size; ++i) if (priv->rx_ring[i].skb) ++pending; return pending; } void ipoib_drain_cq(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); int i, n; /* * We call completion handling routines that expect to be * called from the BH-disabled NAPI poll context, so disable * BHs here too. */ local_bh_disable(); do { n = ib_poll_cq(priv->recv_cq, IPOIB_NUM_WC, priv->ibwc); for (i = 0; i < n; ++i) { /* * Convert any successful completions to flush * errors to avoid passing packets up the * stack after bringing the device down. */ if (priv->ibwc[i].status == IB_WC_SUCCESS) priv->ibwc[i].status = IB_WC_WR_FLUSH_ERR; if (priv->ibwc[i].wr_id & IPOIB_OP_RECV) { if (priv->ibwc[i].wr_id & IPOIB_OP_CM) ipoib_cm_handle_rx_wc(dev, priv->ibwc + i); else ipoib_ib_handle_rx_wc(dev, priv->ibwc + i); } else ipoib_cm_handle_tx_wc(dev, priv->ibwc + i); } } while (n == IPOIB_NUM_WC); while (poll_tx(priv)) ; /* nothing */ local_bh_enable(); } int ipoib_ib_dev_stop(struct net_device *dev, int flush) { struct ipoib_dev_priv *priv = netdev_priv(dev); struct ib_qp_attr qp_attr; unsigned long begin; struct ipoib_tx_buf *tx_req; int i; if (test_and_clear_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) napi_disable(&priv->napi); ipoib_cm_dev_stop(dev); /* * Move our QP to the error state and then reinitialize in * when all work requests have completed or have been flushed. */ qp_attr.qp_state = IB_QPS_ERR; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to ERROR state\n"); /* Wait for all sends and receives to complete */ begin = jiffies; while (priv->tx_head != priv->tx_tail || recvs_pending(dev)) { if (time_after(jiffies, begin + 5 * HZ)) { ipoib_warn(priv, "timing out; %d sends %d receives not completed\n", priv->tx_head - priv->tx_tail, recvs_pending(dev)); /* * assume the HW is wedged and just free up * all our pending work requests. */ while ((int) priv->tx_tail - (int) priv->tx_head < 0) { tx_req = &priv->tx_ring[priv->tx_tail & (ipoib_sendq_size - 1)]; ipoib_dma_unmap_tx(priv->ca, tx_req); dev_kfree_skb_any(tx_req->skb); ++priv->tx_tail; --priv->tx_outstanding; } for (i = 0; i < ipoib_recvq_size; ++i) { struct ipoib_rx_buf *rx_req; rx_req = &priv->rx_ring[i]; if (!rx_req->skb) continue; ipoib_ud_dma_unmap_rx(priv, priv->rx_ring[i].mapping); dev_kfree_skb_any(rx_req->skb); rx_req->skb = NULL; } goto timeout; } ipoib_drain_cq(dev); msleep(1); } ipoib_dbg(priv, "All sends and receives done.\n"); timeout: del_timer_sync(&priv->poll_timer); qp_attr.qp_state = IB_QPS_RESET; if (ib_modify_qp(priv->qp, &qp_attr, IB_QP_STATE)) ipoib_warn(priv, "Failed to modify QP to RESET state\n"); /* Wait for all AHs to be reaped */ set_bit(IPOIB_STOP_REAPER, &priv->flags); cancel_delayed_work(&priv->ah_reap_task); if (flush) flush_workqueue(ipoib_workqueue); begin = jiffies; while (!list_empty(&priv->dead_ahs)) { __ipoib_reap_ah(dev); if (time_after(jiffies, begin + HZ)) { ipoib_warn(priv, "timing out; will leak address handles\n"); break; } msleep(1); } ib_req_notify_cq(priv->recv_cq, IB_CQ_NEXT_COMP); return 0; } int ipoib_ib_dev_init(struct net_device *dev, struct ib_device *ca, int port) { struct ipoib_dev_priv *priv = netdev_priv(dev); priv->ca = ca; priv->port = port; priv->qp = NULL; if (ipoib_transport_dev_init(dev, ca)) { printk(KERN_WARNING "%s: ipoib_transport_dev_init failed\n", ca->name); return -ENODEV; } setup_timer(&priv->poll_timer, ipoib_ib_tx_timer_func, (unsigned long) dev); if (dev->flags & IFF_UP) { if (ipoib_ib_dev_open(dev)) { ipoib_transport_dev_cleanup(dev); return -ENODEV; } } return 0; } static void __ipoib_ib_dev_flush(struct ipoib_dev_priv *priv, enum ipoib_flush_level level) { struct ipoib_dev_priv *cpriv; struct net_device *dev = priv->dev; u16 new_index; mutex_lock(&priv->vlan_mutex); /* * Flush any child interfaces too -- they might be up even if * the parent is down. */ list_for_each_entry(cpriv, &priv->child_intfs, list) __ipoib_ib_dev_flush(cpriv, level); mutex_unlock(&priv->vlan_mutex); if (!test_bit(IPOIB_FLAG_INITIALIZED, &priv->flags)) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_INITIALIZED not set.\n"); return; } if (!test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { ipoib_dbg(priv, "Not flushing - IPOIB_FLAG_ADMIN_UP not set.\n"); return; } if (level == IPOIB_FLUSH_HEAVY) { if (ib_find_pkey(priv->ca, priv->port, priv->pkey, &new_index)) { clear_bit(IPOIB_PKEY_ASSIGNED, &priv->flags); ipoib_ib_dev_down(dev, 0); ipoib_ib_dev_stop(dev, 0); if (ipoib_pkey_dev_delay_open(dev)) return; } /* restart QP only if P_Key index is changed */ if (test_and_set_bit(IPOIB_PKEY_ASSIGNED, &priv->flags) && new_index == priv->pkey_index) { ipoib_dbg(priv, "Not flushing - P_Key index not changed.\n"); return; } priv->pkey_index = new_index; } if (level == IPOIB_FLUSH_LIGHT) { ipoib_mark_paths_invalid(dev); ipoib_mcast_dev_flush(dev); } if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_down(dev, 0); if (level == IPOIB_FLUSH_HEAVY) { ipoib_ib_dev_stop(dev, 0); ipoib_ib_dev_open(dev); } /* * The device could have been brought down between the start and when * we get here, don't bring it back up if it's not configured up */ if (test_bit(IPOIB_FLAG_ADMIN_UP, &priv->flags)) { if (level >= IPOIB_FLUSH_NORMAL) ipoib_ib_dev_up(dev); ipoib_mcast_restart_task(&priv->restart_task); } } void ipoib_ib_dev_flush_light(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_light); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_LIGHT); } void ipoib_ib_dev_flush_normal(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_normal); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_NORMAL); } void ipoib_ib_dev_flush_heavy(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, flush_heavy); __ipoib_ib_dev_flush(priv, IPOIB_FLUSH_HEAVY); } void ipoib_ib_dev_cleanup(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); ipoib_dbg(priv, "cleaning up ib_dev\n"); ipoib_mcast_stop_thread(dev, 1); ipoib_mcast_dev_flush(dev); ipoib_transport_dev_cleanup(dev); } /* * Delayed P_Key Assigment Interim Support * * The following is initial implementation of delayed P_Key assigment * mechanism. It is using the same approach implemented for the multicast * group join. The single goal of this implementation is to quickly address * Bug #2507. This implementation will probably be removed when the P_Key * change async notification is available. */ void ipoib_pkey_poll(struct work_struct *work) { struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, pkey_poll_task.work); struct net_device *dev = priv->dev; ipoib_pkey_dev_check_presence(dev); if (test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) ipoib_open(dev); else { mutex_lock(&pkey_mutex); if (!test_bit(IPOIB_PKEY_STOP, &priv->flags)) queue_delayed_work(ipoib_workqueue, &priv->pkey_poll_task, HZ); mutex_unlock(&pkey_mutex); } } int ipoib_pkey_dev_delay_open(struct net_device *dev) { struct ipoib_dev_priv *priv = netdev_priv(dev); /* Look for the interface pkey value in the IB Port P_Key table and */ /* set the interface pkey assigment flag */ ipoib_pkey_dev_check_presence(dev); /* P_Key value not assigned yet - start polling */ if (!test_bit(IPOIB_PKEY_ASSIGNED, &priv->flags)) { mutex_lock(&pkey_mutex); clear_bit(IPOIB_PKEY_STOP, &priv->flags); queue_delayed_work(ipoib_workqueue, &priv->pkey_poll_task, HZ); mutex_unlock(&pkey_mutex); return 1; } return 0; }
gpl-2.0
droidroidz/USCC_R970_kernel
drivers/staging/rtl8192e/rtllib_rx.c
4811
81041
/* * Original code based Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <jkmaline@cc.hut.fi> * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2004, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include "rtllib.h" #include "dot11d.h" static inline void rtllib_monitor_rx(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_status, size_t hdr_length) { skb->dev = ieee->dev; skb_reset_mac_header(skb); skb_pull(skb, hdr_length); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_RAW); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } /* Called only as a tasklet (software IRQ) */ static struct rtllib_frag_entry * rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq, unsigned int frag, u8 tid, u8 *src, u8 *dst) { struct rtllib_frag_entry *entry; int i; for (i = 0; i < RTLLIB_FRAG_CACHE_LEN; i++) { entry = &ieee->frag_cache[tid][i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { RTLLIB_DEBUG_FRAG( "expiring fragment cache entry " "seq=%u last_frag=%u\n", entry->seq, entry->last_frag); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * rtllib_frag_cache_get(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int frag = WLAN_GET_SEQ_FRAG(sc); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct rtllib_frag_entry *entry; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + sizeof(struct rtllib_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ + (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */); if (skb == NULL) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; ieee->frag_next_idx[tid]++; if (ieee->frag_next_idx[tid] >= RTLLIB_FRAG_CACHE_LEN) ieee->frag_next_idx[tid] = 0; if (entry->skb != NULL) dev_kfree_skb_any(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = rtllib_frag_cache_find(ieee, seq, frag, tid, hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr) { u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct rtllib_frag_entry *entry; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } entry = rtllib_frag_cache_find(ieee, seq, -1, tid, hdr->addr2, hdr->addr1); if (entry == NULL) { RTLLIB_DEBUG_FRAG( "could not invalidate fragment cache " "entry (seq=%u)\n", seq); return -1; } entry->skb = NULL; return 0; } /* rtllib_rx_frame_mgtmt * * Responsible for handling management control frames * * Called by rtllib_rx */ static inline int rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, u16 type, u16 stype) { /* On the struct stats definition there is written that * this is not mandatory.... but seems that the probe * response parser uses it */ struct rtllib_hdr_3addr * hdr = (struct rtllib_hdr_3addr *)skb->data; rx_stats->len = skb->len; rtllib_rx_mgt(ieee, skb, rx_stats); if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) { dev_kfree_skb_any(skb); return 0; } rtllib_rx_frame_softmac(ieee, skb, rx_stats, type, stype); dev_kfree_skb_any(skb); return 0; } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ /* Called by rtllib_rx_frame_decrypt */ static int rtllib_is_eapol_frame(struct rtllib_device *ieee, struct sk_buff *skb, size_t hdrlen) { struct net_device *dev = ieee->dev; u16 fc, ethertype; struct rtllib_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct rtllib_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) == RTLLIB_FCTL_TODS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 && memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) == RTLLIB_FCTL_FROMDS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ pos = skb->data + hdrlen; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ), by rtllib_rx */ static inline int rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct rtllib_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; if (ieee->hwsec_active) { struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; if (ieee->need_sw_enc) tcb_desc->bHwSec = 0; } hdr = (struct rtllib_hdr_4addr *) skb->data; hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { RTLLIB_DEBUG_DROP( "decryption failed (SA= %pM" ") res=%d\n", hdr->addr2, res); if (res == -2) RTLLIB_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", skb->data[hdrlen + 3] >> 6); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ), by rtllib_rx */ static inline int rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb, int keyidx, struct lib80211_crypt_data *crypt) { struct rtllib_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; if (ieee->hwsec_active) { struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; if (ieee->need_sw_enc) tcb_desc->bHwSec = 0; } hdr = (struct rtllib_hdr_4addr *) skb->data; hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA= %pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* this function is stolen from ipw2200 driver*/ #define IEEE_PACKET_RETRY_TIME (5*HZ) static int is_duplicate_packet(struct rtllib_device *ieee, struct rtllib_hdr_4addr *header) { u16 fc = le16_to_cpu(header->frame_ctl); u16 sc = le16_to_cpu(header->seq_ctl); u16 seq = WLAN_GET_SEQ_SEQ(sc); u16 frag = WLAN_GET_SEQ_FRAG(sc); u16 *last_seq, *last_frag; unsigned long *last_time; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)header; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)header; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: { struct list_head *p; struct ieee_ibss_seq *entry = NULL; u8 *mac = header->addr2; int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE; list_for_each(p, &ieee->ibss_mac_hash[index]) { entry = list_entry(p, struct ieee_ibss_seq, list); if (!memcmp(entry->mac, mac, ETH_ALEN)) break; } if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); if (!entry) { printk(KERN_WARNING "Cannot malloc new mac entry\n"); return 0; } memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; entry->packet_time[tid] = jiffies; list_add(&entry->list, &ieee->ibss_mac_hash[index]); return 0; } last_seq = &entry->seq_num[tid]; last_frag = &entry->frag_num[tid]; last_time = &entry->packet_time[tid]; break; } case IW_MODE_INFRA: last_seq = &ieee->last_rxseq_num[tid]; last_frag = &ieee->last_rxfrag_num[tid]; last_time = &ieee->last_packet_time[tid]; break; default: return 0; } if ((*last_seq == seq) && time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) { if (*last_frag == frag) goto drop; if (*last_frag + 1 != frag) /* out-of-order fragment */ goto drop; } else *last_seq = seq; *last_frag = frag; *last_time = jiffies; return 0; drop: return 1; } static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry) { struct list_head *pList = &pTS->RxPendingPktList; while (pList->next != &pTS->RxPendingPktList) { if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *) list_entry(pList->next, struct rx_reorder_entry, List))->SeqNum)) pList = pList->next; else if (SN_EQUAL(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)list_entry(pList->next, struct rx_reorder_entry, List))->SeqNum)) return false; else break; } pReorderEntry->List.next = pList->next; pReorderEntry->List.next->prev = &pReorderEntry->List; pReorderEntry->List.prev = pList; pList->next = &pReorderEntry->List; return true; } void rtllib_indicate_packets(struct rtllib_device *ieee, struct rtllib_rxb **prxbIndicateArray, u8 index) { struct net_device_stats *stats = &ieee->stats; u8 i = 0 , j = 0; u16 ethertype; for (j = 0; j < index; j++) { struct rtllib_rxb *prxb = prxbIndicateArray[j]; for (i = 0; i < prxb->nr_subframes; i++) { struct sk_buff *sub_skb = prxb->subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation * and replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } /* Indicat the packets to upper layer */ if (sub_skb) { stats->rx_packets++; stats->rx_bytes += sub_skb->len; memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev); sub_skb->dev = ieee->dev; sub_skb->dev->stats.rx_packets++; sub_skb->dev->stats.rx_bytes += sub_skb->len; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; netif_rx(sub_skb); } } kfree(prxb); prxb = NULL; } } void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee, struct rx_ts_record *pTS) { struct rx_reorder_entry *pRxReorderEntry; u8 RfdCnt = 0; del_timer_sync(&pTS->RxPktPendingTimer); while (!list_empty(&pTS->RxPendingPktList)) { if (RfdCnt >= REORDER_WIN_SIZE) { printk(KERN_INFO "-------------->%s() error! RfdCnt >= REORDER_WIN_SIZE\n", __func__); break; } pRxReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev, struct rx_reorder_entry, List); RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum %d!\n", __func__, pRxReorderEntry->SeqNum); list_del_init(&pRxReorderEntry->List); ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb; RfdCnt = RfdCnt + 1; list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List); } rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt); pTS->RxIndicateSeq = 0xffff; } static void RxReorderIndicatePacket(struct rtllib_device *ieee, struct rtllib_rxb *prxb, struct rx_ts_record *pTS, u16 SeqNum) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct rx_reorder_entry *pReorderEntry = NULL; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = 0; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; unsigned long flags; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Seq is %d, pTS->RxIndicateSeq" " is %d, WinSize is %d\n", __func__, SeqNum, pTS->RxIndicateSeq, WinSize); spin_lock_irqsave(&(ieee->reorder_spinlock), flags); WinEnd = (pTS->RxIndicateSeq + WinSize - 1) % 4096; /* Rx Reorder initialize condition.*/ if (pTS->RxIndicateSeq == 0xffff) pTS->RxIndicateSeq = SeqNum; /* Drop out the packet which SeqNum is smaller than WinStart */ if (SN_LESS(SeqNum, pTS->RxIndicateSeq)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); pHTInfo->RxReorderDropCounter++; { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if (SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) { pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; bMatchWinStart = true; } else if (SN_LESS(WinEnd, SeqNum)) { if (SeqNum >= (WinSize - 1)) pTS->RxIndicateSeq = SeqNum + 1 - WinSize; else pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum + 1)) + 1; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Window Shift! IndicateSeq: %d," " NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); } /* * Indication process. * After Packet dropping and Sliding Window shifting as above, we can * now just indicate the packets with the SeqNum smaller than latest * WinStart and struct buffer other packets. */ /* For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to * WinStart => Buffer it. */ if (bMatchWinStart) { /* Current packet is going to be indicated.*/ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packets indication!! " "IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); ieee->prxbIndicateArray[0] = prxb; index = 1; } else { /* Current packet is going to be inserted into pending list.*/ if (!list_empty(&ieee->RxReorder_Unused_List)) { pReorderEntry = (struct rx_reorder_entry *) list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, List); list_del_init(&pReorderEntry->List); /* Make a reorder entry and insert into a the packet list.*/ pReorderEntry->SeqNum = SeqNum; pReorderEntry->prxb = prxb; if (!AddReorderEntry(pTS, pReorderEntry)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Duplicate packet is " "dropped!! IndicateSeq: %d, " "NewSeq: %d\n", __func__, pTS->RxIndicateSeq, SeqNum); list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } } else { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Pkt insert into struct buffer!! " "IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); } } else { /* * Packets are dropped if there are not enough reorder * entries. This part should be modified!! We can just * indicate all the packets in struct buffer and get * reorder entries. */ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():" " There is no reorder entry!! Packet is " "dropped!!\n"); { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } } } /* Check if there is any packet need indicate.*/ while (!list_empty(&pTS->RxPendingPktList)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): start RREORDER indicate\n", __func__); pReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev, struct rx_reorder_entry, List); if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) { /* This protect struct buffer from overflow. */ if (index >= REORDER_WIN_SIZE) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicate" "Packet(): Buffer overflow!!\n"); bPktInBuf = true; break; } list_del_init(&pReorderEntry->List); if (SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; ieee->prxbIndicateArray[index] = pReorderEntry->prxb; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum" " %d!\n", __func__, pReorderEntry->SeqNum); index++; list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } /* Handling pending timer. Set this timer to prevent from long time * Rx buffering.*/ if (index > 0) { if (timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxTimeoutIndicateSeq = 0xffff; if (index > REORDER_WIN_SIZE) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():" " Rx Reorer struct buffer full!!\n"); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } rtllib_indicate_packets(ieee, ieee->prxbIndicateArray, index); bPktInBuf = false; } if (bPktInBuf && pTS->RxTimeoutIndicateSeq == 0xffff) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): SET rx timeout timer\n", __func__); pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; mod_timer(&pTS->RxPktPendingTimer, jiffies + MSECS(pHTInfo->RxReorderPendingTime)); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, struct rtllib_rxb *rxb, u8 *src, u8 *dst) { struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 LLCOffset = sizeof(struct rtllib_hdr_3addr); u16 ChkLength; bool bIsAggregateFrame = false; u16 nSubframe_Length; u8 nPadding_Length = 0; u16 SeqNum = 0; struct sk_buff *sub_skb; u8 *data_ptr; /* just for debug purpose */ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl)); if ((RTLLIB_QOS_HAS_SEQ(fc)) && (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved)) bIsAggregateFrame = true; if (RTLLIB_QOS_HAS_SEQ(fc)) LLCOffset += 2; if (rx_stats->bContainHTC) LLCOffset += sHTCLng; ChkLength = LLCOffset; if (skb->len <= ChkLength) return 0; skb_pull(skb, LLCOffset); ieee->bIsAggregateFrame = bIsAggregateFrame; if (!bIsAggregateFrame) { rxb->nr_subframes = 1; /* altered by clark 3/30/2010 * The struct buffer size of the skb indicated to upper layer * must be less than 5000, or the defraged IP datagram * in the IP layer will exceed "ipfrag_high_tresh" and be * discarded. so there must not use the function * "skb_copy" and "skb_clone" for "skb". */ /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, skb->len); memcpy(data_ptr, skb->data, skb->len); sub_skb->dev = ieee->dev; rxb->subframes[0] = sub_skb; memcpy(rxb->src, src, ETH_ALEN); memcpy(rxb->dst, dst, ETH_ALEN); rxb->subframes[0]->dev = ieee->dev; return 1; } else { rxb->nr_subframes = 0; memcpy(rxb->src, src, ETH_ALEN); memcpy(rxb->dst, dst, ETH_ALEN); while (skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16 *)(skb->data + 12)); nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8); if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) { printk(KERN_INFO "%s: A-MSDU parse error!! " "pRfd->nTotalSubframe : %d\n",\ __func__, rxb->nr_subframes); printk(KERN_INFO "%s: A-MSDU parse error!! " "Subframe Length: %d\n", __func__, nSubframe_Length); printk(KERN_INFO "nRemain_Length is %d and " "nSubframe_Length is : %d\n", skb->len, nSubframe_Length); printk(KERN_INFO "The Packet SeqNum is %d\n", SeqNum); return 0; } /* move the data point to data content */ skb_pull(skb, ETHERNET_HEADER_SIZE); /* altered by clark 3/30/2010 * The struct buffer size of the skb indicated to upper layer * must be less than 5000, or the defraged IP datagram * in the IP layer will exceed "ipfrag_high_tresh" and be * discarded. so there must not use the function * "skb_copy" and "skb_clone" for "skb". */ /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr, skb->data, nSubframe_Length); sub_skb->dev = ieee->dev; rxb->subframes[rxb->nr_subframes++] = sub_skb; if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) { RTLLIB_DEBUG_RX("ParseSubframe(): Too many " "Subframes! Packets dropped!\n"); break; } skb_pull(skb, nSubframe_Length); if (skb->len != 0) { nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4); if (nPadding_Length == 4) nPadding_Length = 0; if (skb->len < nPadding_Length) return 0; skb_pull(skb, nPadding_Length); } } return rxb->nr_subframes; } } static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); size_t hdrlen = 0; hdrlen = rtllib_get_hdrlen(fc); if (HTCCheck(ieee, skb->data)) { if (net_ratelimit()) printk(KERN_INFO "%s: find HTCControl!\n", __func__); hdrlen += 4; rx_stats->bContainHTC = 1; } if (RTLLIB_QOS_HAS_SEQ(fc)) rx_stats->bIsQosData = 1; return hdrlen; } static int rtllib_rx_check_duplicate(struct rtllib_device *ieee, struct sk_buff *skb, u8 multicast) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc, sc; u8 frag, type, stype; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); if ((ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) { if (!((type == RTLLIB_FTYPE_MGMT) && (stype == RTLLIB_STYPE_BEACON))) { if (is_duplicate_packet(ieee, hdr)) return -1; } } else { struct rx_ts_record *pRxTS = NULL; if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2, (u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) { if ((fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) && (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) { return -1; } else { pRxTS->RxLastFragNum = frag; pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc); } } else { RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!%s(): No TS!! Skip" " the check!!\n", __func__); return -1; } } return 0; } static void rtllib_rx_extract_addr(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr, u8 *dst, u8 *src, u8 *bssid) { u16 fc = le16_to_cpu(hdr->frame_ctl); switch (fc & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) { case RTLLIB_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); memcpy(bssid, hdr->addr2, ETH_ALEN); break; case RTLLIB_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr1, ETH_ALEN); break; case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); break; } } static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc, u8 *dst, u8 *src, u8 *bssid, u8 *addr2) { u8 zero_addr[ETH_ALEN] = {0}; u8 type, stype; type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); /* Filter frames from different BSS */ if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS) && (compare_ether_addr(ieee->current_network.bssid, bssid) != 0) && memcmp(ieee->current_network.bssid, zero_addr, ETH_ALEN)) { return -1; } /* Filter packets sent by an STA that will be forwarded by AP */ if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn && ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame) { if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) && (compare_ether_addr(dst, ieee->current_network.bssid) != 0) && (compare_ether_addr(bssid, ieee->current_network.bssid) == 0)) { return -1; } } /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (!ieee->IntelPromiscuousModeInfo.bPromiscuousOn) { if (stype != RTLLIB_STYPE_DATA && stype != RTLLIB_STYPE_DATA_CFACK && stype != RTLLIB_STYPE_DATA_CFPOLL && stype != RTLLIB_STYPE_DATA_CFACKPOLL && stype != RTLLIB_STYPE_QOS_DATA) { if (stype != RTLLIB_STYPE_NULLFUNC) RTLLIB_DEBUG_DROP( "RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x)\n", type, stype); return -1; } } if (ieee->iw_mode != IW_MODE_MESH) { /* packets from our adapter are dropped (echo) */ if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN)) return -1; /* {broad,multi}cast packets to our BSS go through */ if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) { if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN)) return -1; } } return 0; } static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb, struct lib80211_crypt_data **crypt, size_t hdrlen) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); int idx = 0; if (ieee->host_decrypt) { if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; *crypt = ieee->crypt_info.crypt[idx]; /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (*crypt && ((*crypt)->ops == NULL || (*crypt)->ops->decrypt_mpdu == NULL)) *crypt = NULL; if (!*crypt && (fc & RTLLIB_FCTL_WEP)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ RTLLIB_DEBUG_DROP("Decryption failed (not set)" " (SA= %pM)\n", hdr->addr2); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } } return 0; } static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, struct lib80211_crypt_data *crypt, size_t hdrlen) { struct rtllib_hdr_4addr *hdr; int keyidx = 0; u16 fc, sc; u8 frag; hdr = (struct rtllib_hdr_4addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); if ((!rx_stats->Decrypted)) ieee->need_sw_enc = 1; else ieee->need_sw_enc = 0; keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt); if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) { printk(KERN_INFO "%s: decrypt frame error\n", __func__); return -1; } hdr = (struct rtllib_hdr_4addr *) skb->data; if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr); RTLLIB_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { RTLLIB_DEBUG(RTLLIB_DL_RX | RTLLIB_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & RTLLIB_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); return -1; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", __func__); rtllib_frag_cache_invalidate(ieee, hdr); return -1; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ memcpy(skb_put(frag_skb, flen), skb->data, flen); } else { /* append frame payload to the end of the fragment * cache skb */ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & RTLLIB_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ return -2; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct rtllib_hdr_4addr *) skb->data; rtllib_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) { printk(KERN_INFO "%s: ==>decrypt msdu error\n", __func__); return -1; } hdr = (struct rtllib_hdr_4addr *) skb->data; if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) { if (/*ieee->ieee802_1x &&*/ rtllib_is_eapol_frame(ieee, skb, hdrlen)) { /* pass unencrypted EAPOL frames even if encryption is * configured */ struct eapol *eap = (struct eapol *)(skb->data + 24); RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } else { RTLLIB_DEBUG_DROP( "encryption configured, but RX " "frame not encrypted (SA= %pM)\n", hdr->addr2); return -1; } } if (crypt && !(fc & RTLLIB_FCTL_WEP) && rtllib_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep && !rtllib_is_eapol_frame(ieee, skb, hdrlen)) { RTLLIB_DEBUG_DROP( "dropped unencrypted RX data " "frame from %pM" " (drop_unencrypted=1)\n", hdr->addr2); return -1; } if (rtllib_is_eapol_frame(ieee, skb, hdrlen)) printk(KERN_WARNING "RX: IEEE802.1X EAPOL frame!\n"); return 0; } static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast, u8 nr_subframes) { if (unicast) { if ((ieee->state == RTLLIB_LINKED)) { if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) || (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) { if (ieee->LeisurePSLeave) ieee->LeisurePSLeave(ieee->dev); } } } ieee->last_rx_ps_time = jiffies; } static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee, struct rtllib_rx_stats *rx_stats, struct rtllib_rxb *rxb, u8 *dst, u8 *src) { struct net_device *dev = ieee->dev; u16 ethertype; int i = 0; if (rxb == NULL) { printk(KERN_INFO "%s: rxb is NULL!!\n", __func__); return ; } for (i = 0; i < rxb->nr_subframes; i++) { struct sk_buff *sub_skb = rxb->subframes[i]; if (sub_skb) { /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } ieee->stats.rx_packets++; ieee->stats.rx_bytes += sub_skb->len; if (is_multicast_ether_addr(dst)) ieee->stats.multicast++; /* Indicat the packets to upper layer */ memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->protocol = eth_type_trans(sub_skb, dev); sub_skb->dev = dev; sub_skb->dev->stats.rx_packets++; sub_skb->dev->stats.rx_bytes += sub_skb->len; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ netif_rx(sub_skb); } } kfree(rxb); rxb = NULL; } static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; struct lib80211_crypt_data *crypt = NULL; struct rtllib_rxb *rxb = NULL; struct rx_ts_record *pTS = NULL; u16 fc, sc, SeqNum = 0; u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0; u8 dst[ETH_ALEN], src[ETH_ALEN], bssid[ETH_ALEN] = {0}, *payload; size_t hdrlen = 0; bool bToOtherSTA = false; int ret = 0, i = 0; hdr = (struct rtllib_hdr_4addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); /*Filter pkt not to me*/ multicast = is_multicast_ether_addr(hdr->addr1)|is_broadcast_ether_addr(hdr->addr1); unicast = !multicast; if (unicast && (compare_ether_addr(dev->dev_addr, hdr->addr1) != 0)) { if (ieee->bNetPromiscuousMode) bToOtherSTA = true; else goto rx_dropped; } /*Filter pkt has too small length */ hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats); if (skb->len < hdrlen) { printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__); goto rx_dropped; } /* Filter Duplicate pkt */ ret = rtllib_rx_check_duplicate(ieee, skb, multicast); if (ret < 0) goto rx_dropped; /* Filter CTRL Frame */ if (type == RTLLIB_FTYPE_CTL) goto rx_dropped; /* Filter MGNT Frame */ if (type == RTLLIB_FTYPE_MGMT) { if (bToOtherSTA) goto rx_dropped; if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Filter WAPI DATA Frame */ /* Update statstics for AP roaming */ if (!bToOtherSTA) { ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; } dev->last_rx = jiffies; /* Data frame - extract src/dst addresses */ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid); /* Filter Data frames */ ret = rtllib_rx_data_filter(ieee, fc, dst, src, bssid, hdr->addr2); if (ret < 0) goto rx_dropped; if (skb->len == hdrlen) goto rx_dropped; /* Send pspoll based on moredata */ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->sta_sleep == LPS_IS_SLEEP) && (ieee->polling) && (!bToOtherSTA)) { if (WLAN_FC_MORE_DATA(fc)) { /* more data bit is set, let's request a new frame from the AP */ rtllib_sta_ps_send_pspoll_frame(ieee); } else { ieee->polling = false; } } /* Get crypt if encrypted */ ret = rtllib_rx_get_crypt(ieee, skb, &crypt, hdrlen); if (ret == -1) goto rx_dropped; /* Decrypt data frame (including reassemble) */ ret = rtllib_rx_decrypt(ieee, skb, rx_stats, crypt, hdrlen); if (ret == -1) goto rx_dropped; else if (ret == -2) goto rx_exit; /* Get TS for Rx Reorder */ hdr = (struct rtllib_hdr_4addr *) skb->data; if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data) && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1) && (!bToOtherSTA)) { TID = Frame_QoSTID(skb->data); SeqNum = WLAN_GET_SEQ_SEQ(sc); GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID, RX_DIR, true); if (TID != 0 && TID != 3) ieee->bis_any_nonbepkts = true; } /* Parse rx data frame (For AMSDU) */ /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC); if (rxb == NULL) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "%s(): kmalloc rxb error\n", __func__); goto rx_dropped; } /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) { /* only to free rxb, and not submit the packets to upper layer */ for (i = 0; i < rxb->nr_subframes; i++) dev_kfree_skb(rxb->subframes[i]); kfree(rxb); rxb = NULL; goto rx_dropped; } /* Update WAPI PN */ /* Check if leave LPS */ if (!bToOtherSTA) { if (ieee->bIsAggregateFrame) nr_subframes = rxb->nr_subframes; else nr_subframes = 1; if (unicast) ieee->LinkDetectInfo.NumRxUnicastOkInPeriod += nr_subframes; rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes); } /* Indicate packets to upper layer or Rx Reorder */ if (ieee->pHTInfo->bCurRxReorderEnable == false || pTS == NULL || bToOtherSTA) rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src); else RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); dev_kfree_skb(skb); rx_exit: return 1; rx_dropped: if (rxb != NULL) { kfree(rxb); rxb = NULL; } ieee->stats.rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; } static int rtllib_rx_Master(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { return 0; } static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); size_t hdrlen = rtllib_get_hdrlen(fc); if (skb->len < hdrlen) { printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__); return 0; } if (HTCCheck(ieee, skb->data)) { if (net_ratelimit()) printk(KERN_INFO "%s: Find HTCControl!\n", __func__); hdrlen += 4; } rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); ieee->stats.rx_packets++; ieee->stats.rx_bytes += skb->len; return 1; } static int rtllib_rx_Mesh(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { return 0; } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { int ret = 0; if ((NULL == ieee) || (NULL == skb) || (NULL == rx_stats)) { printk(KERN_INFO "%s: Input parameters NULL!\n", __func__); goto rx_dropped; } if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", __func__); goto rx_dropped; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: case IW_MODE_INFRA: ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats); break; case IW_MODE_MASTER: case IW_MODE_REPEAT: ret = rtllib_rx_Master(ieee, skb, rx_stats); break; case IW_MODE_MONITOR: ret = rtllib_rx_Monitor(ieee, skb, rx_stats); break; case IW_MODE_MESH: ret = rtllib_rx_Mesh(ieee, skb, rx_stats); break; default: printk(KERN_INFO"%s: ERR iw mode!!!\n", __func__); break; } return ret; rx_dropped: ieee->stats.rx_dropped++; return 0; } EXPORT_SYMBOL(rtllib_rx); static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; /* * Make ther structure we read from the beacon packet has * the right values */ static int rtllib_verify_qos_info(struct rtllib_qos_information_element *info_element, int sub_type) { if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) return -1; if (info_element->qui_type != QOS_OUI_TYPE) return -1; if (info_element->version != QOS_VERSION_1) return -1; return 0; } /* * Parse a QoS parameter element */ static int rtllib_read_qos_param_element(struct rtllib_qos_parameter_info *element_param, struct rtllib_info_element *info_element) { int ret = 0; u16 size = sizeof(struct rtllib_qos_parameter_info) - 2; if ((info_element == NULL) || (element_param == NULL)) return -1; if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { memcpy(element_param->info_element.qui, info_element->data, info_element->len); element_param->info_element.elementID = info_element->id; element_param->info_element.length = info_element->len; } else ret = -1; if (ret == 0) ret = rtllib_verify_qos_info(&element_param->info_element, QOS_OUI_PARAM_SUB_TYPE); return ret; } /* * Parse a QoS information element */ static int rtllib_read_qos_info_element(struct rtllib_qos_information_element *element_info, struct rtllib_info_element *info_element) { int ret = 0; u16 size = sizeof(struct rtllib_qos_information_element) - 2; if (element_info == NULL) return -1; if (info_element == NULL) return -1; if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { memcpy(element_info->qui, info_element->data, info_element->len); element_info->elementID = info_element->id; element_info->length = info_element->len; } else ret = -1; if (ret == 0) ret = rtllib_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); return ret; } /* * Write QoS parameters from the ac parameters. */ static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info *param_elm, struct rtllib_qos_data *qos_data) { struct rtllib_qos_ac_parameter *ac_params; struct rtllib_qos_parameters *qos_param = &(qos_data->parameters); int rc = 0; int i; u8 aci; u8 acm; qos_data->wmm_acm = 0; for (i = 0; i < QOS_QUEUE_NUM; i++) { ac_params = &(param_elm->ac_params_record[i]); aci = (ac_params->aci_aifsn & 0x60) >> 5; acm = (ac_params->aci_aifsn & 0x10) >> 4; if (aci >= QOS_QUEUE_NUM) continue; switch (aci) { case 1: /* BIT(0) | BIT(3) */ if (acm) qos_data->wmm_acm |= (0x01<<0)|(0x01<<3); break; case 2: /* BIT(4) | BIT(5) */ if (acm) qos_data->wmm_acm |= (0x01<<4)|(0x01<<5); break; case 3: /* BIT(6) | BIT(7) */ if (acm) qos_data->wmm_acm |= (0x01<<6)|(0x01<<7); break; case 0: default: /* BIT(1) | BIT(2) */ if (acm) qos_data->wmm_acm |= (0x01<<1)|(0x01<<2); break; } qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f; /* WMM spec P.11: The minimum value for AIFSN shall be 2 */ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci]; qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F; qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4; qos_param->flag[aci] = (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit); } return rc; } /* * we have a generic data element which it may contain QoS information or * parameters element. check the information element length to decide * which type to read */ static int rtllib_parse_qos_info_param_IE(struct rtllib_info_element *info_element, struct rtllib_network *network) { int rc = 0; struct rtllib_qos_information_element qos_info_element; rc = rtllib_read_qos_info_element(&qos_info_element, info_element); if (rc == 0) { network->qos_data.param_count = qos_info_element.ac_info & 0x0F; network->flags |= NETWORK_HAS_QOS_INFORMATION; } else { struct rtllib_qos_parameter_info param_element; rc = rtllib_read_qos_param_element(&param_element, info_element); if (rc == 0) { rtllib_qos_convert_ac_to_parameters(&param_element, &(network->qos_data)); network->flags |= NETWORK_HAS_QOS_PARAMETERS; network->qos_data.param_count = param_element.info_element.ac_info & 0x0F; } } if (rc == 0) { RTLLIB_DEBUG_QOS("QoS is supported\n"); network->qos_data.supported = 1; } return rc; } #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x static const char *get_info_element_string(u16 id) { switch (id) { MFIE_STRING(SSID); MFIE_STRING(RATES); MFIE_STRING(FH_SET); MFIE_STRING(DS_SET); MFIE_STRING(CF_SET); MFIE_STRING(TIM); MFIE_STRING(IBSS_SET); MFIE_STRING(COUNTRY); MFIE_STRING(HOP_PARAMS); MFIE_STRING(HOP_TABLE); MFIE_STRING(REQUEST); MFIE_STRING(CHALLENGE); MFIE_STRING(POWER_CONSTRAINT); MFIE_STRING(POWER_CAPABILITY); MFIE_STRING(TPC_REQUEST); MFIE_STRING(TPC_REPORT); MFIE_STRING(SUPP_CHANNELS); MFIE_STRING(CSA); MFIE_STRING(MEASURE_REQUEST); MFIE_STRING(MEASURE_REPORT); MFIE_STRING(QUIET); MFIE_STRING(IBSS_DFS); MFIE_STRING(RSN); MFIE_STRING(RATES_EX); MFIE_STRING(GENERIC); MFIE_STRING(QOS_PARAMETER); default: return "UNKNOWN"; } } static inline void rtllib_extract_country_ie( struct rtllib_device *ieee, struct rtllib_info_element *info_element, struct rtllib_network *network, u8 *addr2) { if (IS_DOT11D_ENABLE(ieee)) { if (info_element->len != 0) { memcpy(network->CountryIeBuf, info_element->data, info_element->len); network->CountryIeLen = info_element->len; if (!IS_COUNTRY_IE_VALID(ieee)) { if ((rtllib_act_scanning(ieee, false) == true) && (ieee->FirstIe_InScan == 1)) printk(KERN_INFO "Received beacon ContryIE, SSID: <%s>\n", network->ssid); Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data); } } if (IS_EQUAL_CIE_SRC(ieee, addr2)) UPDATE_CIE_WATCHDOG(ieee); } } int rtllib_parse_info_param(struct rtllib_device *ieee, struct rtllib_info_element *info_element, u16 length, struct rtllib_network *network, struct rtllib_rx_stats *stats) { u8 i; short offset; u16 tmp_htcap_len = 0; u16 tmp_htinfo_len = 0; u16 ht_realtek_agg_len = 0; u8 ht_realtek_agg_buf[MAX_IE_LEN]; char rates_str[64]; char *p; while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { RTLLIB_DEBUG_MGMT("Info elem: parse failed: " "info_element->len + 2 > left : " "info_element->len+2=%zd left=%d, id=%d.\n", info_element->len + sizeof(*info_element), length, info_element->id); /* We stop processing but don't return an error here * because some misbehaviour APs break this rule. ie. * Orinoco AP1000. */ break; } switch (info_element->id) { case MFIE_TYPE_SSID: if (rtllib_is_empty_essid(info_element->data, info_element->len)) { network->flags |= NETWORK_EMPTY_ESSID; break; } network->ssid_len = min(info_element->len, (u8) IW_ESSID_MAX_SIZE); memcpy(network->ssid, info_element->data, network->ssid_len); if (network->ssid_len < IW_ESSID_MAX_SIZE) memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); RTLLIB_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: p = rates_str; network->rates_len = min(info_element->len, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); if (rtllib_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & RTLLIB_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } if (rtllib_is_cck_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_CCK; } } RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; case MFIE_TYPE_RATES_EX: p = rates_str; network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); if (rtllib_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & RTLLIB_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: if (info_element->len < 4) break; network->tim.tim_count = info_element->data[0]; network->tim.tim_period = info_element->data[1]; network->dtim_period = info_element->data[1]; if (ieee->state != RTLLIB_LINKED) break; network->last_dtim_sta_time = jiffies; network->dtim_data = RTLLIB_DTIM_VALID; if (info_element->data[2] & 1) network->dtim_data |= RTLLIB_DTIM_MBCAST; offset = (info_element->data[2] >> 1)*2; if (ieee->assoc_id < 8*offset || ieee->assoc_id > 8*(offset + info_element->len - 3)) break; offset = (ieee->assoc_id / 8) - offset; if (info_element->data[3 + offset] & (1 << (ieee->assoc_id % 8))) network->dtim_data |= RTLLIB_DTIM_UCAST; network->listen_interval = network->dtim_period; break; case MFIE_TYPE_ERP: network->erp_value = info_element->data[0]; network->flags |= NETWORK_HAS_ERP_VALUE; RTLLIB_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; RTLLIB_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: RTLLIB_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: RTLLIB_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!rtllib_parse_qos_info_param_IE(info_element, network)) break; if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x01) { network->wpa_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->wpa_ie, info_element, network->wpa_ie_len); break; } if (info_element->len == 7 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x01 && info_element->data[4] == 0x02) network->Turbo_Enable = 1; if (tmp_htcap_len == 0) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x033) { tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htcap_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen); } } if (tmp_htcap_len != 0) { network->bssht.bdSupportHT = true; network->bssht.bdHT1R = ((((struct ht_capab_ele *)(network->bssht.bdHTCapBuf))->MCS[1]) == 0); } else { network->bssht.bdSupportHT = false; network->bssht.bdHT1R = false; } } if (tmp_htinfo_len == 0) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x034) { tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htinfo_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; if (tmp_htinfo_len) { network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen); } } } } if (ieee->aggregation) { if (network->bssht.bdSupportHT) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x02) { ht_realtek_agg_len = min(info_element->len, (u8)MAX_IE_LEN); memcpy(ht_realtek_agg_buf, info_element->data, info_element->len); } if (ht_realtek_agg_len >= 5) { network->realtek_cap_exit = true; network->bssht.bdRT2RTAggregation = true; if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02)) network->bssht.bdRT2RTLongSlotTime = true; if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & RT_HT_CAP_USE_92SE)) network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; } } if (ht_realtek_agg_len >= 5) { if ((ht_realtek_agg_buf[5] & RT_HT_CAP_USE_SOFTAP)) network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_SOFTAP; } } if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x05 && info_element->data[2] == 0xb5) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf7) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x10 && info_element->data[2] == 0x18)) { network->broadcom_cap_exist = true; } if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0c && info_element->data[2] == 0x43) network->ralink_cap_exist = true; if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x03 && info_element->data[2] == 0x7f) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) network->atheros_cap_exist = true; if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0x43)) network->marvell_cap_exist = true; if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96) network->cisco_cap_exist = true; if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf5) network->airgo_cap_exist = true; if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { memcpy(network->CcxRmState, &info_element[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else network->bCcxRmEnable = false; network->MBssidMask = network->CcxRmState[1] & 0x07; if (network->MBssidMask != 0) { network->bMBssidValid = true; network->MBssidMask = 0xff << (network->MBssidMask); memcpy(network->MBssid, network->bssid, ETH_ALEN); network->MBssid[5] &= network->MBssidMask; } else { network->bMBssidValid = false; } } else { network->bCcxRmEnable = false; } } if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x03) { if (info_element->len == 5) { network->bWithCcxVerNum = true; network->BssCcxVerNumber = info_element->data[4]; } else { network->bWithCcxVerNum = false; network->BssCcxVerNumber = 0; } } if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x04) { RTLLIB_DEBUG_MGMT("MFIE_TYPE_WZC: %d bytes\n", info_element->len); network->wzc_ie_len = min(info_element->len+2, MAX_WZC_IE_LEN); memcpy(network->wzc_ie, info_element, network->wzc_ie_len); } break; case MFIE_TYPE_RSN: RTLLIB_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->rsn_ie, info_element, network->rsn_ie_len); break; case MFIE_TYPE_HT_CAP: RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n", info_element->len); tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htcap_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen); network->bssht.bdSupportHT = true; network->bssht.bdHT1R = ((((struct ht_capab_ele *) network->bssht.bdHTCapBuf))->MCS[1]) == 0; network->bssht.bdBandWidth = (enum ht_channel_width) (((struct ht_capab_ele *) (network->bssht.bdHTCapBuf))->ChlWidth); } else { network->bssht.bdSupportHT = false; network->bssht.bdHT1R = false; network->bssht.bdBandWidth = HT_CHANNEL_WIDTH_20; } break; case MFIE_TYPE_HT_INFO: RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n", info_element->len); tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htinfo_len) { network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE; network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen); } break; case MFIE_TYPE_AIRONET: RTLLIB_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n", info_element->len); if (info_element->len > IE_CISCO_FLAG_POSITION) { network->bWithAironetIE = true; if ((info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_MIC) || (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK)) network->bCkipSupported = true; else network->bCkipSupported = false; } else { network->bWithAironetIE = false; network->bCkipSupported = false; } break; case MFIE_TYPE_QOS_PARAMETER: printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; case MFIE_TYPE_COUNTRY: RTLLIB_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n", info_element->len); rtllib_extract_country_ie(ieee, info_element, network, network->bssid); break; /* TODO */ default: RTLLIB_DEBUG_MGMT ("Unsupported info element: %s (%d)\n", get_info_element_string(info_element->id), info_element->id); break; } length -= sizeof(*info_element) + info_element->len; info_element = (struct rtllib_info_element *)&info_element-> data[info_element->len]; } if (!network->atheros_cap_exist && !network->broadcom_cap_exist && !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) network->unknown_cap_exist = true; else network->unknown_cap_exist = false; return 0; } static inline u8 rtllib_SignalStrengthTranslate(u8 CurrSS) { u8 RetSS; if (CurrSS >= 71 && CurrSS <= 100) RetSS = 90 + ((CurrSS - 70) / 3); else if (CurrSS >= 41 && CurrSS <= 70) RetSS = 78 + ((CurrSS - 40) / 3); else if (CurrSS >= 31 && CurrSS <= 40) RetSS = 66 + (CurrSS - 30); else if (CurrSS >= 21 && CurrSS <= 30) RetSS = 54 + (CurrSS - 20); else if (CurrSS >= 5 && CurrSS <= 20) RetSS = 42 + (((CurrSS - 5) * 2) / 3); else if (CurrSS == 4) RetSS = 36; else if (CurrSS == 3) RetSS = 27; else if (CurrSS == 2) RetSS = 18; else if (CurrSS == 1) RetSS = 9; else RetSS = CurrSS; return RetSS; } static long rtllib_translate_todbm(u8 signal_strength_index) { long signal_power; signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } static inline int rtllib_network_init( struct rtllib_device *ieee, struct rtllib_probe_response *beacon, struct rtllib_network *network, struct rtllib_rx_stats *stats) { /* network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; network->qos_data.old_param_count = 0; */ memset(&network->qos_data, 0, sizeof(struct rtllib_qos_data)); /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); network->beacon_interval = le32_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval;*/ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; network->hidden_ssid_len = 0; memset(network->hidden_ssid, 0, sizeof(network->hidden_ssid)); network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; network->berp_info_valid = false; network->broadcom_cap_exist = false; network->ralink_cap_exist = false; network->atheros_cap_exist = false; network->cisco_cap_exist = false; network->unknown_cap_exist = false; network->realtek_cap_exit = false; network->marvell_cap_exist = false; network->airgo_cap_exist = false; network->Turbo_Enable = 0; network->SignalStrength = stats->SignalStrength; network->RSSI = stats->SignalStrength; network->CountryIeLen = 0; memset(network->CountryIeBuf, 0, MAX_IE_LEN); HTInitializeBssDesc(&network->bssht); if (stats->freq == RTLLIB_52GHZ_BAND) { /* for A band (No DS info) */ network->channel = stats->received_channel; } else network->flags |= NETWORK_HAS_CCK; network->wpa_ie_len = 0; network->rsn_ie_len = 0; network->wzc_ie_len = 0; if (rtllib_parse_info_param(ieee, beacon->info_element, (stats->len - sizeof(*beacon)), network, stats)) return 1; network->mode = 0; if (stats->freq == RTLLIB_52GHZ_BAND) network->mode = IEEE_A; else { if (network->flags & NETWORK_HAS_OFDM) network->mode |= IEEE_G; if (network->flags & NETWORK_HAS_CCK) network->mode |= IEEE_B; } if (network->mode == 0) { RTLLIB_DEBUG_SCAN("Filtered out '%s (%pM)' " "network.\n", escape_essid(network->ssid, network->ssid_len), network->bssid); return 1; } if (network->bssht.bdSupportHT) { if (network->mode == IEEE_A) network->mode = IEEE_N_5G; else if (network->mode & (IEEE_G | IEEE_B)) network->mode = IEEE_N_24G; } if (rtllib_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; stats->signal = 30 + (stats->SignalStrength * 70) / 100; stats->noise = rtllib_translate_todbm((u8)(100-stats->signal)) - 25; memcpy(&network->stats, stats, sizeof(network->stats)); return 0; } static inline int is_same_network(struct rtllib_network *src, struct rtllib_network *dst, u8 ssidbroad) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return (((src->ssid_len == dst->ssid_len) || (!ssidbroad)) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (!ssidbroad)) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_ESS) == (dst->capability & WLAN_CAPABILITY_ESS))); } static inline void update_ibss_network(struct rtllib_network *dst, struct rtllib_network *src) { memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats)); dst->last_scanned = jiffies; } static inline void update_network(struct rtllib_network *dst, struct rtllib_network *src) { int qos_active; u8 old_param; memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); dst->rates_len = src->rates_len; memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); dst->rates_ex_len = src->rates_ex_len; if (src->ssid_len > 0) { if (dst->ssid_len == 0) { memset(dst->hidden_ssid, 0, sizeof(dst->hidden_ssid)); dst->hidden_ssid_len = src->ssid_len; memcpy(dst->hidden_ssid, src->ssid, src->ssid_len); } else { memset(dst->ssid, 0, dst->ssid_len); dst->ssid_len = src->ssid_len; memcpy(dst->ssid, src->ssid, src->ssid_len); } } dst->mode = src->mode; dst->flags = src->flags; dst->time_stamp[0] = src->time_stamp[0]; dst->time_stamp[1] = src->time_stamp[1]; if (src->flags & NETWORK_HAS_ERP_VALUE) { dst->erp_value = src->erp_value; dst->berp_info_valid = src->berp_info_valid = true; } dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; dst->dtim_period = src->dtim_period; dst->dtim_data = src->dtim_data; dst->last_dtim_sta_time = src->last_dtim_sta_time; memcpy(&dst->tim, &src->tim, sizeof(struct rtllib_tim_parameters)); dst->bssht.bdSupportHT = src->bssht.bdSupportHT; dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation; dst->bssht.bdHTCapLen = src->bssht.bdHTCapLen; memcpy(dst->bssht.bdHTCapBuf, src->bssht.bdHTCapBuf, src->bssht.bdHTCapLen); dst->bssht.bdHTInfoLen = src->bssht.bdHTInfoLen; memcpy(dst->bssht.bdHTInfoBuf, src->bssht.bdHTInfoBuf, src->bssht.bdHTInfoLen); dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer; dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime; dst->broadcom_cap_exist = src->broadcom_cap_exist; dst->ralink_cap_exist = src->ralink_cap_exist; dst->atheros_cap_exist = src->atheros_cap_exist; dst->realtek_cap_exit = src->realtek_cap_exit; dst->marvell_cap_exist = src->marvell_cap_exist; dst->cisco_cap_exist = src->cisco_cap_exist; dst->airgo_cap_exist = src->airgo_cap_exist; dst->unknown_cap_exist = src->unknown_cap_exist; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); dst->rsn_ie_len = src->rsn_ie_len; memcpy(dst->wzc_ie, src->wzc_ie, src->wzc_ie_len); dst->wzc_ie_len = src->wzc_ie_len; dst->last_scanned = jiffies; /* qos related parameters */ qos_active = dst->qos_data.active; old_param = dst->qos_data.param_count; dst->qos_data.supported = src->qos_data.supported; if (dst->flags & NETWORK_HAS_QOS_PARAMETERS) memcpy(&dst->qos_data, &src->qos_data, sizeof(struct rtllib_qos_data)); if (dst->qos_data.supported == 1) { if (dst->ssid_len) RTLLIB_DEBUG_QOS ("QoS the network %s is QoS supported\n", dst->ssid); else RTLLIB_DEBUG_QOS ("QoS the network is QoS supported\n"); } dst->qos_data.active = qos_active; dst->qos_data.old_param_count = old_param; /* dst->last_associate is not overwritten */ dst->wmm_info = src->wmm_info; if (src->wmm_param[0].ac_aci_acm_aifsn || src->wmm_param[1].ac_aci_acm_aifsn || src->wmm_param[2].ac_aci_acm_aifsn || src->wmm_param[3].ac_aci_acm_aifsn) memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); dst->SignalStrength = src->SignalStrength; dst->RSSI = src->RSSI; dst->Turbo_Enable = src->Turbo_Enable; dst->CountryIeLen = src->CountryIeLen; memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen); dst->bWithAironetIE = src->bWithAironetIE; dst->bCkipSupported = src->bCkipSupported; memcpy(dst->CcxRmState, src->CcxRmState, 2); dst->bCcxRmEnable = src->bCcxRmEnable; dst->MBssidMask = src->MBssidMask; dst->bMBssidValid = src->bMBssidValid; memcpy(dst->MBssid, src->MBssid, 6); dst->bWithCcxVerNum = src->bWithCcxVerNum; dst->BssCcxVerNumber = src->BssCcxVerNumber; } static inline int is_beacon(__le16 fc) { return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == RTLLIB_STYPE_BEACON); } static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel) { if (MAX_CHANNEL_NUMBER < channel) { printk(KERN_INFO "%s(): Invalid Channel\n", __func__); return 0; } if (rtllib->active_channel_map[channel] == 2) return 1; return 0; } int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel) { if (MAX_CHANNEL_NUMBER < channel) { printk(KERN_INFO "%s(): Invalid Channel\n", __func__); return 0; } if (rtllib->active_channel_map[channel] > 0) return 1; return 0; } EXPORT_SYMBOL(rtllib_legal_channel); static inline void rtllib_process_probe_response( struct rtllib_device *ieee, struct rtllib_probe_response *beacon, struct rtllib_rx_stats *stats) { struct rtllib_network *target; struct rtllib_network *oldest = NULL; struct rtllib_info_element *info_element = &beacon->info_element[0]; unsigned long flags; short renew; struct rtllib_network *network = kzalloc(sizeof(struct rtllib_network), GFP_ATOMIC); if (!network) return; RTLLIB_DEBUG_SCAN( "'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, (beacon->capability & (1<<0xf)) ? '1' : '0', (beacon->capability & (1<<0xe)) ? '1' : '0', (beacon->capability & (1<<0xd)) ? '1' : '0', (beacon->capability & (1<<0xc)) ? '1' : '0', (beacon->capability & (1<<0xb)) ? '1' : '0', (beacon->capability & (1<<0xa)) ? '1' : '0', (beacon->capability & (1<<0x9)) ? '1' : '0', (beacon->capability & (1<<0x8)) ? '1' : '0', (beacon->capability & (1<<0x7)) ? '1' : '0', (beacon->capability & (1<<0x6)) ? '1' : '0', (beacon->capability & (1<<0x5)) ? '1' : '0', (beacon->capability & (1<<0x4)) ? '1' : '0', (beacon->capability & (1<<0x3)) ? '1' : '0', (beacon->capability & (1<<0x2)) ? '1' : '0', (beacon->capability & (1<<0x1)) ? '1' : '0', (beacon->capability & (1<<0x0)) ? '1' : '0'); if (rtllib_network_init(ieee, beacon, network, stats)) { RTLLIB_DEBUG_SCAN("Dropped '%s' ( %pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); goto free_network; } if (!rtllib_legal_channel(ieee, network->channel)) goto free_network; if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP) { if (IsPassiveChannel(ieee, network->channel)) { printk(KERN_INFO "GetScanInfo(): For Global Domain, " "filter probe response at channel(%d).\n", network->channel); goto free_network; } } /* The network parsed correctly -- so now we scan our known networks * to see if we can find it in our list. * * NOTE: This search is definitely not optimized. Once its doing * the "right thing" we'll optimize it for efficiency if * necessary */ /* Search for this entry in the list and update it if it is * already there. */ spin_lock_irqsave(&ieee->lock, flags); if (is_same_network(&ieee->current_network, network, (network->ssid_len ? 1 : 0))) { update_network(&ieee->current_network, network); if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid) { if (ieee->current_network.erp_value & ERP_UseProtection) ieee->current_network.buseprotection = true; else ieee->current_network.buseprotection = false; } if (is_beacon(beacon->header.frame_ctl)) { if (ieee->state >= RTLLIB_LINKED) ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } } list_for_each_entry(target, &ieee->network_list, list) { if (is_same_network(target, network, (target->ssid_len ? 1 : 0))) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) oldest = target; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (&target->list == &ieee->network_list) { if (list_empty(&ieee->network_free_list)) { /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; RTLLIB_DEBUG_SCAN("Expired '%s' ( %pM) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), target->bssid); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, struct rtllib_network, list); list_del(ieee->network_free_list.next); } RTLLIB_DEBUG_SCAN("Adding '%s' ( %pM) via %s.\n", escape_essid(network->ssid, network->ssid_len), network->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); memcpy(target, network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) rtllib_softmac_new_net(ieee, network); } else { RTLLIB_DEBUG_SCAN("Updating '%s' ( %pM) via %s.\n", escape_essid(target->ssid, target->ssid_len), target->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); /* we have an entry and we are going to update it. But this * entry may be already expired. In this case we do the same * as we found a new net and call the new_net handler */ renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); if ((!target->ssid_len) && (((network->ssid_len > 0) && (target->hidden_ssid_len == 0)) || ((ieee->current_network.ssid_len == network->ssid_len) && (strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0) && (ieee->state == RTLLIB_NOLINK)))) renew = 1; update_network(target, network); if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) rtllib_softmac_new_net(ieee, network); } spin_unlock_irqrestore(&ieee->lock, flags); if (is_beacon(beacon->header.frame_ctl) && is_same_network(&ieee->current_network, network, (network->ssid_len ? 1 : 0)) && (ieee->state == RTLLIB_LINKED)) { if (ieee->handle_beacon != NULL) ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network); } free_network: kfree(network); return; } void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *stats) { struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ; if (WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_PROBE_RESP && WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_BEACON) ieee->last_rx_ps_time = jiffies; switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case RTLLIB_STYPE_BEACON: RTLLIB_DEBUG_MGMT("received BEACON (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Beacon\n"); rtllib_process_probe_response( ieee, (struct rtllib_probe_response *)header, stats); if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED && ieee->iw_mode == IW_MODE_INFRA && ieee->state == RTLLIB_LINKED)) tasklet_schedule(&ieee->ps_task); break; case RTLLIB_STYPE_PROBE_RESP: RTLLIB_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Probe response\n"); rtllib_process_probe_response(ieee, (struct rtllib_probe_response *)header, stats); break; case RTLLIB_STYPE_PROBE_REQ: RTLLIB_DEBUG_MGMT("received PROBE RESQUEST (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Probe request\n"); if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) && ((ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) && ieee->state == RTLLIB_LINKED)) rtllib_rx_probe_rq(ieee, skb); break; } }
gpl-2.0
MikeC84/jet-3.4.10-gdd05a11
drivers/scsi/qla2xxx/qla_mid.c
4811
21287
/* * QLogic Fibre Channel HBA Driver * Copyright (c) 2003-2011 QLogic Corporation * * See LICENSE.qla2xxx for copyright and licensing details. */ #include "qla_def.h" #include "qla_gbl.h" #include <linux/moduleparam.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/list.h> #include <scsi/scsi_tcq.h> #include <scsi/scsicam.h> #include <linux/delay.h> void qla2x00_vp_stop_timer(scsi_qla_host_t *vha) { if (vha->vp_idx && vha->timer_active) { del_timer_sync(&vha->timer); vha->timer_active = 0; } } static uint32_t qla24xx_allocate_vp_id(scsi_qla_host_t *vha) { uint32_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags; /* Find an empty slot and assign an vp_id */ mutex_lock(&ha->vport_lock); vp_id = find_first_zero_bit(ha->vp_idx_map, ha->max_npiv_vports + 1); if (vp_id > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa000, "vp_id %d is bigger than max-supported %d.\n", vp_id, ha->max_npiv_vports); mutex_unlock(&ha->vport_lock); return vp_id; } set_bit(vp_id, ha->vp_idx_map); ha->num_vhosts++; vha->vp_idx = vp_id; spin_lock_irqsave(&ha->vport_slock, flags); list_add_tail(&vha->list, &ha->vp_list); spin_unlock_irqrestore(&ha->vport_slock, flags); mutex_unlock(&ha->vport_lock); return vp_id; } void qla24xx_deallocate_vp_id(scsi_qla_host_t *vha) { uint16_t vp_id; struct qla_hw_data *ha = vha->hw; unsigned long flags = 0; mutex_lock(&ha->vport_lock); /* * Wait for all pending activities to finish before removing vport from * the list. * Lock needs to be held for safe removal from the list (it * ensures no active vp_list traversal while the vport is removed * from the queue) */ spin_lock_irqsave(&ha->vport_slock, flags); while (atomic_read(&vha->vref_count)) { spin_unlock_irqrestore(&ha->vport_slock, flags); msleep(500); spin_lock_irqsave(&ha->vport_slock, flags); } list_del(&vha->list); spin_unlock_irqrestore(&ha->vport_slock, flags); vp_id = vha->vp_idx; ha->num_vhosts--; clear_bit(vp_id, ha->vp_idx_map); mutex_unlock(&ha->vport_lock); } static scsi_qla_host_t * qla24xx_find_vhost_by_name(struct qla_hw_data *ha, uint8_t *port_name) { scsi_qla_host_t *vha; struct scsi_qla_host *tvha; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); /* Locate matching device in database. */ list_for_each_entry_safe(vha, tvha, &ha->vp_list, list) { if (!memcmp(port_name, vha->port_name, WWN_SIZE)) { spin_unlock_irqrestore(&ha->vport_slock, flags); return vha; } } spin_unlock_irqrestore(&ha->vport_slock, flags); return NULL; } /* * qla2x00_mark_vp_devices_dead * Updates fcport state when device goes offline. * * Input: * ha = adapter block pointer. * fcport = port structure pointer. * * Return: * None. * * Context: */ static void qla2x00_mark_vp_devices_dead(scsi_qla_host_t *vha) { /* * !!! NOTE !!! * This function, if called in contexts other than vp create, disable * or delete, please make sure this is synchronized with the * delete thread. */ fc_port_t *fcport; list_for_each_entry(fcport, &vha->vp_fcports, list) { ql_dbg(ql_dbg_vport, vha, 0xa001, "Marking port dead, loop_id=0x%04x : %x.\n", fcport->loop_id, fcport->vp_idx); qla2x00_mark_device_lost(vha, fcport, 0, 0); qla2x00_set_fcport_state(fcport, FCS_UNCONFIGURED); } } int qla24xx_disable_vp(scsi_qla_host_t *vha) { int ret; ret = qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_mark_vp_devices_dead(vha); atomic_set(&vha->vp_state, VP_FAILED); vha->flags.management_server_logged_in = 0; if (ret == QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_DISABLED); } else { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); return -1; } return 0; } int qla24xx_enable_vp(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *base_vha = pci_get_drvdata(ha->pdev); /* Check if physical ha port is Up */ if (atomic_read(&base_vha->loop_state) == LOOP_DOWN || atomic_read(&base_vha->loop_state) == LOOP_DEAD || !(ha->current_topology & ISP_CFG_F)) { vha->vp_err_state = VP_ERR_PORTDWN; fc_vport_set_state(vha->fc_vport, FC_VPORT_LINKDOWN); goto enable_failed; } /* Initialize the new vport unless it is a persistent port */ mutex_lock(&ha->vport_lock); ret = qla24xx_modify_vp_config(vha); mutex_unlock(&ha->vport_lock); if (ret != QLA_SUCCESS) { fc_vport_set_state(vha->fc_vport, FC_VPORT_FAILED); goto enable_failed; } ql_dbg(ql_dbg_taskm, vha, 0x801a, "Virtual port with id: %d - Enabled.\n", vha->vp_idx); return 0; enable_failed: ql_dbg(ql_dbg_taskm, vha, 0x801b, "Virtual port with id: %d - Disabled.\n", vha->vp_idx); return 1; } static void qla24xx_configure_vp(scsi_qla_host_t *vha) { struct fc_vport *fc_vport; int ret; fc_vport = vha->fc_vport; ql_dbg(ql_dbg_vport, vha, 0xa002, "%s: change request #3.\n", __func__); ret = qla2x00_send_change_request(vha, 0x3, vha->vp_idx); if (ret != QLA_SUCCESS) { ql_dbg(ql_dbg_vport, vha, 0xa003, "Failed to enable " "receiving of RSCN requests: 0x%x.\n", ret); return; } else { /* Corresponds to SCR enabled */ clear_bit(VP_SCR_NEEDED, &vha->vp_flags); } vha->flags.online = 1; if (qla24xx_configure_vhba(vha)) return; atomic_set(&vha->vp_state, VP_ACTIVE); fc_vport_set_state(fc_vport, FC_VPORT_ACTIVE); } void qla2x00_alert_all_vps(struct rsp_que *rsp, uint16_t *mb) { scsi_qla_host_t *vha; struct qla_hw_data *ha = rsp->hw; int i = 0; unsigned long flags; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vha, &ha->vp_list, list) { if (vha->vp_idx) { atomic_inc(&vha->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); switch (mb[0]) { case MBA_LIP_OCCURRED: case MBA_LOOP_UP: case MBA_LOOP_DOWN: case MBA_LIP_RESET: case MBA_POINT_TO_POINT: case MBA_CHG_IN_CONNECTION: case MBA_PORT_UPDATE: case MBA_RSCN_UPDATE: ql_dbg(ql_dbg_async, vha, 0x5024, "Async_event for VP[%d], mb=0x%x vha=%p.\n", i, *mb, vha); qla2x00_async_event(vha, rsp, mb); break; } spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vha->vref_count); } i++; } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla2x00_vp_abort_isp(scsi_qla_host_t *vha) { /* * Physical port will do most of the abort and recovery work. We can * just treat it as a loop down */ if (atomic_read(&vha->loop_state) != LOOP_DOWN) { atomic_set(&vha->loop_state, LOOP_DOWN); qla2x00_mark_all_devices_lost(vha, 0); } else { if (!atomic_read(&vha->loop_down_timer)) atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); } /* * To exclusively reset vport, we need to log it out first. Note: this * control_vp can fail if ISP reset is already issued, this is * expected, as the vp would be already logged out due to ISP reset. */ if (!test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags)) qla24xx_control_vp(vha, VCE_COMMAND_DISABLE_VPS_LOGO_ALL); ql_dbg(ql_dbg_taskm, vha, 0x801d, "Scheduling enable of Vport %d.\n", vha->vp_idx); return qla24xx_enable_vp(vha); } static int qla2x00_do_dpc_vp(scsi_qla_host_t *vha) { ql_dbg(ql_dbg_dpc, vha, 0x4012, "Entering %s.\n", __func__); ql_dbg(ql_dbg_dpc, vha, 0x4013, "vp_flags: 0x%lx.\n", vha->vp_flags); qla2x00_do_work(vha); if (test_and_clear_bit(VP_IDX_ACQUIRED, &vha->vp_flags)) { /* VP acquired. complete port configuration */ ql_dbg(ql_dbg_dpc, vha, 0x4014, "Configure VP scheduled.\n"); qla24xx_configure_vp(vha); ql_dbg(ql_dbg_dpc, vha, 0x4015, "Configure VP end.\n"); return 0; } if (test_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags)) { ql_dbg(ql_dbg_dpc, vha, 0x4016, "FCPort update scheduled.\n"); qla2x00_update_fcports(vha); clear_bit(FCPORT_UPDATE_NEEDED, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x4017, "FCPort update end.\n"); } if ((test_and_clear_bit(RELOGIN_NEEDED, &vha->dpc_flags)) && !test_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags) && atomic_read(&vha->loop_state) != LOOP_DOWN) { ql_dbg(ql_dbg_dpc, vha, 0x4018, "Relogin needed scheduled.\n"); qla2x00_relogin(vha); ql_dbg(ql_dbg_dpc, vha, 0x4019, "Relogin needed end.\n"); } if (test_and_clear_bit(RESET_MARKER_NEEDED, &vha->dpc_flags) && (!(test_and_set_bit(RESET_ACTIVE, &vha->dpc_flags)))) { clear_bit(RESET_ACTIVE, &vha->dpc_flags); } if (test_and_clear_bit(LOOP_RESYNC_NEEDED, &vha->dpc_flags)) { if (!(test_and_set_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags))) { ql_dbg(ql_dbg_dpc, vha, 0x401a, "Loop resync scheduled.\n"); qla2x00_loop_resync(vha); clear_bit(LOOP_RESYNC_ACTIVE, &vha->dpc_flags); ql_dbg(ql_dbg_dpc, vha, 0x401b, "Loop resync end.\n"); } } ql_dbg(ql_dbg_dpc, vha, 0x401c, "Exiting %s.\n", __func__); return 0; } void qla2x00_do_dpc_all_vps(scsi_qla_host_t *vha) { int ret; struct qla_hw_data *ha = vha->hw; scsi_qla_host_t *vp; unsigned long flags = 0; if (vha->vp_idx) return; if (list_empty(&ha->vp_list)) return; clear_bit(VP_DPC_NEEDED, &vha->dpc_flags); if (!(ha->current_topology & ISP_CFG_F)) return; spin_lock_irqsave(&ha->vport_slock, flags); list_for_each_entry(vp, &ha->vp_list, list) { if (vp->vp_idx) { atomic_inc(&vp->vref_count); spin_unlock_irqrestore(&ha->vport_slock, flags); ret = qla2x00_do_dpc_vp(vp); spin_lock_irqsave(&ha->vport_slock, flags); atomic_dec(&vp->vref_count); } } spin_unlock_irqrestore(&ha->vport_slock, flags); } int qla24xx_vport_create_req_sanity_check(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; uint8_t port_name[WWN_SIZE]; if (fc_vport->roles != FC_PORT_ROLE_FCP_INITIATOR) return VPCERR_UNSUPPORTED; /* Check up the F/W and H/W support NPIV */ if (!ha->flags.npiv_supported) return VPCERR_UNSUPPORTED; /* Check up whether npiv supported switch presented */ if (!(ha->switch_cap & FLOGI_MID_SUPPORT)) return VPCERR_NO_FABRIC_SUPP; /* Check up unique WWPN */ u64_to_wwn(fc_vport->port_name, port_name); if (!memcmp(port_name, base_vha->port_name, WWN_SIZE)) return VPCERR_BAD_WWN; vha = qla24xx_find_vhost_by_name(ha, port_name); if (vha) return VPCERR_BAD_WWN; /* Check up max-npiv-supports */ if (ha->num_vhosts > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa004, "num_vhosts %ud is bigger " "than max_npiv_vports %ud.\n", ha->num_vhosts, ha->max_npiv_vports); return VPCERR_UNSUPPORTED; } return 0; } scsi_qla_host_t * qla24xx_create_vhost(struct fc_vport *fc_vport) { scsi_qla_host_t *base_vha = shost_priv(fc_vport->shost); struct qla_hw_data *ha = base_vha->hw; scsi_qla_host_t *vha; struct scsi_host_template *sht = &qla2xxx_driver_template; struct Scsi_Host *host; vha = qla2x00_create_host(sht, ha); if (!vha) { ql_log(ql_log_warn, vha, 0xa005, "scsi_host_alloc() failed for vport.\n"); return(NULL); } host = vha->host; fc_vport->dd_data = vha; /* New host info */ u64_to_wwn(fc_vport->node_name, vha->node_name); u64_to_wwn(fc_vport->port_name, vha->port_name); vha->fc_vport = fc_vport; vha->device_flags = 0; vha->vp_idx = qla24xx_allocate_vp_id(vha); if (vha->vp_idx > ha->max_npiv_vports) { ql_dbg(ql_dbg_vport, vha, 0xa006, "Couldn't allocate vp_id.\n"); goto create_vhost_failed; } vha->mgmt_svr_loop_id = 10 + vha->vp_idx; vha->dpc_flags = 0L; /* * To fix the issue of processing a parent's RSCN for the vport before * its SCR is complete. */ set_bit(VP_SCR_NEEDED, &vha->vp_flags); atomic_set(&vha->loop_state, LOOP_DOWN); atomic_set(&vha->loop_down_timer, LOOP_DOWN_TIME); qla2x00_start_timer(vha, qla2x00_timer, WATCH_INTERVAL); vha->req = base_vha->req; host->can_queue = base_vha->req->length + 128; host->this_id = 255; host->cmd_per_lun = 3; if (IS_T10_PI_CAPABLE(ha) && ql2xenabledif) host->max_cmd_len = 32; else host->max_cmd_len = MAX_CMDSZ; host->max_channel = MAX_BUSES - 1; host->max_lun = ql2xmaxlun; host->unique_id = host->host_no; host->max_id = ha->max_fibre_devices; host->transportt = qla2xxx_transport_vport_template; ql_dbg(ql_dbg_vport, vha, 0xa007, "Detect vport hba %ld at address = %p.\n", vha->host_no, vha); vha->flags.init_done = 1; mutex_lock(&ha->vport_lock); set_bit(vha->vp_idx, ha->vp_idx_map); ha->cur_vport_count++; mutex_unlock(&ha->vport_lock); return vha; create_vhost_failed: return NULL; } static void qla25xx_free_req_que(struct scsi_qla_host *vha, struct req_que *req) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = req->id; dma_free_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), req->ring, req->dma); req->ring = NULL; req->dma = 0; if (que_id) { ha->req_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); } kfree(req); req = NULL; } static void qla25xx_free_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { struct qla_hw_data *ha = vha->hw; uint16_t que_id = rsp->id; if (rsp->msix && rsp->msix->have_irq) { free_irq(rsp->msix->vector, rsp); rsp->msix->have_irq = 0; rsp->msix->rsp = NULL; } dma_free_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), rsp->ring, rsp->dma); rsp->ring = NULL; rsp->dma = 0; if (que_id) { ha->rsp_q_map[que_id] = NULL; mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); } kfree(rsp); rsp = NULL; } int qla25xx_delete_req_que(struct scsi_qla_host *vha, struct req_que *req) { int ret = -1; if (req) { req->options |= BIT_0; ret = qla25xx_init_req_que(vha, req); } if (ret == QLA_SUCCESS) qla25xx_free_req_que(vha, req); return ret; } static int qla25xx_delete_rsp_que(struct scsi_qla_host *vha, struct rsp_que *rsp) { int ret = -1; if (rsp) { rsp->options |= BIT_0; ret = qla25xx_init_rsp_que(vha, rsp); } if (ret == QLA_SUCCESS) qla25xx_free_rsp_que(vha, rsp); return ret; } /* Delete all queues for a given vhost */ int qla25xx_delete_queues(struct scsi_qla_host *vha) { int cnt, ret = 0; struct req_que *req = NULL; struct rsp_que *rsp = NULL; struct qla_hw_data *ha = vha->hw; /* Delete request queues */ for (cnt = 1; cnt < ha->max_req_queues; cnt++) { req = ha->req_q_map[cnt]; if (req) { ret = qla25xx_delete_req_que(vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00ea, "Couldn't delete req que %d.\n", req->id); return ret; } } } /* Delete response queues */ for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) { rsp = ha->rsp_q_map[cnt]; if (rsp) { ret = qla25xx_delete_rsp_que(vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_warn, vha, 0x00eb, "Couldn't delete rsp que %d.\n", rsp->id); return ret; } } } return ret; } int qla25xx_create_req_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int rsp_que, uint8_t qos) { int ret = 0; struct req_que *req = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; uint32_t cnt; req = kzalloc(sizeof(struct req_que), GFP_KERNEL); if (req == NULL) { ql_log(ql_log_fatal, base_vha, 0x00d9, "Failed to allocate memory for request queue.\n"); goto failed; } req->length = REQUEST_ENTRY_CNT_24XX; req->ring = dma_alloc_coherent(&ha->pdev->dev, (req->length + 1) * sizeof(request_t), &req->dma, GFP_KERNEL); if (req->ring == NULL) { ql_log(ql_log_fatal, base_vha, 0x00da, "Failed to allocte memory for request_ring.\n"); goto que_failed; } mutex_lock(&ha->vport_lock); que_id = find_first_zero_bit(ha->req_qid_map, ha->max_req_queues); if (que_id >= ha->max_req_queues) { mutex_unlock(&ha->vport_lock); ql_log(ql_log_warn, base_vha, 0x00db, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->req_qid_map); ha->req_q_map[que_id] = req; req->rid = rid; req->vp_idx = vp_idx; req->qos = qos; ql_dbg(ql_dbg_multiq, base_vha, 0xc002, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); ql_dbg(ql_dbg_init, base_vha, 0x00dc, "queue_id=%d rid=%d vp_idx=%d qos=%d.\n", que_id, req->rid, req->vp_idx, req->qos); if (rsp_que < 0) req->rsp = NULL; else req->rsp = ha->rsp_q_map[rsp_que]; /* Use alternate PCI bus number */ if (MSB(req->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(req->rid)) options |= BIT_5; req->options = options; ql_dbg(ql_dbg_multiq, base_vha, 0xc003, "options=0x%x.\n", req->options); ql_dbg(ql_dbg_init, base_vha, 0x00dd, "options=0x%x.\n", req->options); for (cnt = 1; cnt < MAX_OUTSTANDING_COMMANDS; cnt++) req->outstanding_cmds[cnt] = NULL; req->current_outstanding_cmd = 1; req->ring_ptr = req->ring; req->ring_index = 0; req->cnt = req->length; req->id = que_id; reg = ISP_QUE_REG(ha, que_id); req->max_q_depth = ha->req_q_map[0]->max_q_depth; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc004, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ql_dbg(ql_dbg_init, base_vha, 0x00de, "ring_ptr=%p ring_index=%d, " "cnt=%d id=%d max_q_depth=%d.\n", req->ring_ptr, req->ring_index, req->cnt, req->id, req->max_q_depth); ret = qla25xx_init_req_que(base_vha, req); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00df, "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->req_qid_map); mutex_unlock(&ha->vport_lock); goto que_failed; } return req->id; que_failed: qla25xx_free_req_que(base_vha, req); failed: return 0; } static void qla_do_work(struct work_struct *work) { unsigned long flags; struct rsp_que *rsp = container_of(work, struct rsp_que, q_work); struct scsi_qla_host *vha; struct qla_hw_data *ha = rsp->hw; spin_lock_irqsave(&rsp->hw->hardware_lock, flags); vha = pci_get_drvdata(ha->pdev); qla24xx_process_response_queue(vha, rsp); spin_unlock_irqrestore(&rsp->hw->hardware_lock, flags); } /* create response queue */ int qla25xx_create_rsp_que(struct qla_hw_data *ha, uint16_t options, uint8_t vp_idx, uint16_t rid, int req) { int ret = 0; struct rsp_que *rsp = NULL; struct scsi_qla_host *base_vha = pci_get_drvdata(ha->pdev); uint16_t que_id = 0; device_reg_t __iomem *reg; rsp = kzalloc(sizeof(struct rsp_que), GFP_KERNEL); if (rsp == NULL) { ql_log(ql_log_warn, base_vha, 0x0066, "Failed to allocate memory for response queue.\n"); goto failed; } rsp->length = RESPONSE_ENTRY_CNT_MQ; rsp->ring = dma_alloc_coherent(&ha->pdev->dev, (rsp->length + 1) * sizeof(response_t), &rsp->dma, GFP_KERNEL); if (rsp->ring == NULL) { ql_log(ql_log_warn, base_vha, 0x00e1, "Failed to allocate memory for response ring.\n"); goto que_failed; } mutex_lock(&ha->vport_lock); que_id = find_first_zero_bit(ha->rsp_qid_map, ha->max_rsp_queues); if (que_id >= ha->max_rsp_queues) { mutex_unlock(&ha->vport_lock); ql_log(ql_log_warn, base_vha, 0x00e2, "No resources to create additional request queue.\n"); goto que_failed; } set_bit(que_id, ha->rsp_qid_map); if (ha->flags.msix_enabled) rsp->msix = &ha->msix_entries[que_id + 1]; else ql_log(ql_log_warn, base_vha, 0x00e3, "MSIX not enalbled.\n"); ha->rsp_q_map[que_id] = rsp; rsp->rid = rid; rsp->vp_idx = vp_idx; rsp->hw = ha; ql_dbg(ql_dbg_init, base_vha, 0x00e4, "queue_id=%d rid=%d vp_idx=%d hw=%p.\n", que_id, rsp->rid, rsp->vp_idx, rsp->hw); /* Use alternate PCI bus number */ if (MSB(rsp->rid)) options |= BIT_4; /* Use alternate PCI devfn */ if (LSB(rsp->rid)) options |= BIT_5; /* Enable MSIX handshake mode on for uncapable adapters */ if (!IS_MSIX_NACK_CAPABLE(ha)) options |= BIT_6; rsp->options = options; rsp->id = que_id; reg = ISP_QUE_REG(ha, que_id); rsp->rsp_q_in = &reg->isp25mq.rsp_q_in; rsp->rsp_q_out = &reg->isp25mq.rsp_q_out; mutex_unlock(&ha->vport_lock); ql_dbg(ql_dbg_multiq, base_vha, 0xc00b, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ql_dbg(ql_dbg_init, base_vha, 0x00e5, "options=%x id=%d rsp_q_in=%p rsp_q_out=%p", rsp->options, rsp->id, rsp->rsp_q_in, rsp->rsp_q_out); ret = qla25xx_request_irq(rsp); if (ret) goto que_failed; ret = qla25xx_init_rsp_que(base_vha, rsp); if (ret != QLA_SUCCESS) { ql_log(ql_log_fatal, base_vha, 0x00e7, "%s failed.\n", __func__); mutex_lock(&ha->vport_lock); clear_bit(que_id, ha->rsp_qid_map); mutex_unlock(&ha->vport_lock); goto que_failed; } if (req >= 0) rsp->req = ha->req_q_map[req]; else rsp->req = NULL; qla2x00_init_response_q_entries(rsp); if (rsp->hw->wq) INIT_WORK(&rsp->q_work, qla_do_work); return rsp->id; que_failed: qla25xx_free_rsp_que(base_vha, rsp); failed: return 0; }
gpl-2.0
coderzstas/android_kernel_asus_grouper
drivers/staging/rtl8192e/rtllib_rx.c
4811
81041
/* * Original code based Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <jkmaline@cc.hut.fi> * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2004, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andreamrl@tiscali.it> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include "rtllib.h" #include "dot11d.h" static inline void rtllib_monitor_rx(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_status, size_t hdr_length) { skb->dev = ieee->dev; skb_reset_mac_header(skb); skb_pull(skb, hdr_length); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = __constant_htons(ETH_P_80211_RAW); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } /* Called only as a tasklet (software IRQ) */ static struct rtllib_frag_entry * rtllib_frag_cache_find(struct rtllib_device *ieee, unsigned int seq, unsigned int frag, u8 tid, u8 *src, u8 *dst) { struct rtllib_frag_entry *entry; int i; for (i = 0; i < RTLLIB_FRAG_CACHE_LEN; i++) { entry = &ieee->frag_cache[tid][i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { RTLLIB_DEBUG_FRAG( "expiring fragment cache entry " "seq=%u last_frag=%u\n", entry->seq, entry->last_frag); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * rtllib_frag_cache_get(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int frag = WLAN_GET_SEQ_FRAG(sc); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct rtllib_frag_entry *entry; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + sizeof(struct rtllib_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ + (RTLLIB_QOS_HAS_SEQ(fc) ? 2 : 0) /* QOS Control */); if (skb == NULL) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; ieee->frag_next_idx[tid]++; if (ieee->frag_next_idx[tid] >= RTLLIB_FRAG_CACHE_LEN) ieee->frag_next_idx[tid] = 0; if (entry->skb != NULL) dev_kfree_skb_any(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = rtllib_frag_cache_find(ieee, seq, frag, tid, hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int rtllib_frag_cache_invalidate(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr) { u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct rtllib_frag_entry *entry; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } entry = rtllib_frag_cache_find(ieee, seq, -1, tid, hdr->addr2, hdr->addr1); if (entry == NULL) { RTLLIB_DEBUG_FRAG( "could not invalidate fragment cache " "entry (seq=%u)\n", seq); return -1; } entry->skb = NULL; return 0; } /* rtllib_rx_frame_mgtmt * * Responsible for handling management control frames * * Called by rtllib_rx */ static inline int rtllib_rx_frame_mgmt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, u16 type, u16 stype) { /* On the struct stats definition there is written that * this is not mandatory.... but seems that the probe * response parser uses it */ struct rtllib_hdr_3addr * hdr = (struct rtllib_hdr_3addr *)skb->data; rx_stats->len = skb->len; rtllib_rx_mgt(ieee, skb, rx_stats); if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN))) { dev_kfree_skb_any(skb); return 0; } rtllib_rx_frame_softmac(ieee, skb, rx_stats, type, stype); dev_kfree_skb_any(skb); return 0; } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ /* Called by rtllib_rx_frame_decrypt */ static int rtllib_is_eapol_frame(struct rtllib_device *ieee, struct sk_buff *skb, size_t hdrlen) { struct net_device *dev = ieee->dev; u16 fc, ethertype; struct rtllib_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct rtllib_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) == RTLLIB_FCTL_TODS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 && memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (RTLLIB_FCTL_TODS | RTLLIB_FCTL_FROMDS)) == RTLLIB_FCTL_FROMDS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ pos = skb->data + hdrlen; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ), by rtllib_rx */ static inline int rtllib_rx_frame_decrypt(struct rtllib_device *ieee, struct sk_buff *skb, struct lib80211_crypt_data *crypt) { struct rtllib_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; if (ieee->hwsec_active) { struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; if (ieee->need_sw_enc) tcb_desc->bHwSec = 0; } hdr = (struct rtllib_hdr_4addr *) skb->data; hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { RTLLIB_DEBUG_DROP( "decryption failed (SA= %pM" ") res=%d\n", hdr->addr2, res); if (res == -2) RTLLIB_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", skb->data[hdrlen + 3] >> 6); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ), by rtllib_rx */ static inline int rtllib_rx_frame_decrypt_msdu(struct rtllib_device *ieee, struct sk_buff *skb, int keyidx, struct lib80211_crypt_data *crypt) { struct rtllib_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; if (ieee->hwsec_active) { struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; if (ieee->need_sw_enc) tcb_desc->bHwSec = 0; } hdr = (struct rtllib_hdr_4addr *) skb->data; hdrlen = rtllib_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA= %pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* this function is stolen from ipw2200 driver*/ #define IEEE_PACKET_RETRY_TIME (5*HZ) static int is_duplicate_packet(struct rtllib_device *ieee, struct rtllib_hdr_4addr *header) { u16 fc = le16_to_cpu(header->frame_ctl); u16 sc = le16_to_cpu(header->seq_ctl); u16 seq = WLAN_GET_SEQ_SEQ(sc); u16 frag = WLAN_GET_SEQ_FRAG(sc); u16 *last_seq, *last_frag; unsigned long *last_time; struct rtllib_hdr_3addrqos *hdr_3addrqos; struct rtllib_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & RTLLIB_FCTL_DSTODS) == RTLLIB_FCTL_DSTODS) && RTLLIB_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtllib_hdr_4addrqos *)header; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else if (RTLLIB_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtllib_hdr_3addrqos *)header; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & RTLLIB_QCTL_TID; tid = UP2AC(tid); tid++; } else { tid = 0; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: { struct list_head *p; struct ieee_ibss_seq *entry = NULL; u8 *mac = header->addr2; int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE; list_for_each(p, &ieee->ibss_mac_hash[index]) { entry = list_entry(p, struct ieee_ibss_seq, list); if (!memcmp(entry->mac, mac, ETH_ALEN)) break; } if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); if (!entry) { printk(KERN_WARNING "Cannot malloc new mac entry\n"); return 0; } memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; entry->packet_time[tid] = jiffies; list_add(&entry->list, &ieee->ibss_mac_hash[index]); return 0; } last_seq = &entry->seq_num[tid]; last_frag = &entry->frag_num[tid]; last_time = &entry->packet_time[tid]; break; } case IW_MODE_INFRA: last_seq = &ieee->last_rxseq_num[tid]; last_frag = &ieee->last_rxfrag_num[tid]; last_time = &ieee->last_packet_time[tid]; break; default: return 0; } if ((*last_seq == seq) && time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) { if (*last_frag == frag) goto drop; if (*last_frag + 1 != frag) /* out-of-order fragment */ goto drop; } else *last_seq = seq; *last_frag = frag; *last_time = jiffies; return 0; drop: return 1; } static bool AddReorderEntry(struct rx_ts_record *pTS, struct rx_reorder_entry *pReorderEntry) { struct list_head *pList = &pTS->RxPendingPktList; while (pList->next != &pTS->RxPendingPktList) { if (SN_LESS(pReorderEntry->SeqNum, ((struct rx_reorder_entry *) list_entry(pList->next, struct rx_reorder_entry, List))->SeqNum)) pList = pList->next; else if (SN_EQUAL(pReorderEntry->SeqNum, ((struct rx_reorder_entry *)list_entry(pList->next, struct rx_reorder_entry, List))->SeqNum)) return false; else break; } pReorderEntry->List.next = pList->next; pReorderEntry->List.next->prev = &pReorderEntry->List; pReorderEntry->List.prev = pList; pList->next = &pReorderEntry->List; return true; } void rtllib_indicate_packets(struct rtllib_device *ieee, struct rtllib_rxb **prxbIndicateArray, u8 index) { struct net_device_stats *stats = &ieee->stats; u8 i = 0 , j = 0; u16 ethertype; for (j = 0; j < index; j++) { struct rtllib_rxb *prxb = prxbIndicateArray[j]; for (i = 0; i < prxb->nr_subframes; i++) { struct sk_buff *sub_skb = prxb->subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation * and replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } /* Indicat the packets to upper layer */ if (sub_skb) { stats->rx_packets++; stats->rx_bytes += sub_skb->len; memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev); sub_skb->dev = ieee->dev; sub_skb->dev->stats.rx_packets++; sub_skb->dev->stats.rx_bytes += sub_skb->len; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; netif_rx(sub_skb); } } kfree(prxb); prxb = NULL; } } void rtllib_FlushRxTsPendingPkts(struct rtllib_device *ieee, struct rx_ts_record *pTS) { struct rx_reorder_entry *pRxReorderEntry; u8 RfdCnt = 0; del_timer_sync(&pTS->RxPktPendingTimer); while (!list_empty(&pTS->RxPendingPktList)) { if (RfdCnt >= REORDER_WIN_SIZE) { printk(KERN_INFO "-------------->%s() error! RfdCnt >= REORDER_WIN_SIZE\n", __func__); break; } pRxReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev, struct rx_reorder_entry, List); RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum %d!\n", __func__, pRxReorderEntry->SeqNum); list_del_init(&pRxReorderEntry->List); ieee->RfdArray[RfdCnt] = pRxReorderEntry->prxb; RfdCnt = RfdCnt + 1; list_add_tail(&pRxReorderEntry->List, &ieee->RxReorder_Unused_List); } rtllib_indicate_packets(ieee, ieee->RfdArray, RfdCnt); pTS->RxIndicateSeq = 0xffff; } static void RxReorderIndicatePacket(struct rtllib_device *ieee, struct rtllib_rxb *prxb, struct rx_ts_record *pTS, u16 SeqNum) { struct rt_hi_throughput *pHTInfo = ieee->pHTInfo; struct rx_reorder_entry *pReorderEntry = NULL; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = 0; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; unsigned long flags; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Seq is %d, pTS->RxIndicateSeq" " is %d, WinSize is %d\n", __func__, SeqNum, pTS->RxIndicateSeq, WinSize); spin_lock_irqsave(&(ieee->reorder_spinlock), flags); WinEnd = (pTS->RxIndicateSeq + WinSize - 1) % 4096; /* Rx Reorder initialize condition.*/ if (pTS->RxIndicateSeq == 0xffff) pTS->RxIndicateSeq = SeqNum; /* Drop out the packet which SeqNum is smaller than WinStart */ if (SN_LESS(SeqNum, pTS->RxIndicateSeq)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); pHTInfo->RxReorderDropCounter++; { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if (SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) { pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; bMatchWinStart = true; } else if (SN_LESS(WinEnd, SeqNum)) { if (SeqNum >= (WinSize - 1)) pTS->RxIndicateSeq = SeqNum + 1 - WinSize; else pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum + 1)) + 1; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Window Shift! IndicateSeq: %d," " NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); } /* * Indication process. * After Packet dropping and Sliding Window shifting as above, we can * now just indicate the packets with the SeqNum smaller than latest * WinStart and struct buffer other packets. */ /* For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to * WinStart => Buffer it. */ if (bMatchWinStart) { /* Current packet is going to be indicated.*/ RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Packets indication!! " "IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); ieee->prxbIndicateArray[0] = prxb; index = 1; } else { /* Current packet is going to be inserted into pending list.*/ if (!list_empty(&ieee->RxReorder_Unused_List)) { pReorderEntry = (struct rx_reorder_entry *) list_entry(ieee->RxReorder_Unused_List.next, struct rx_reorder_entry, List); list_del_init(&pReorderEntry->List); /* Make a reorder entry and insert into a the packet list.*/ pReorderEntry->SeqNum = SeqNum; pReorderEntry->prxb = prxb; if (!AddReorderEntry(pTS, pReorderEntry)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Duplicate packet is " "dropped!! IndicateSeq: %d, " "NewSeq: %d\n", __func__, pTS->RxIndicateSeq, SeqNum); list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } } else { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "Pkt insert into struct buffer!! " "IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); } } else { /* * Packets are dropped if there are not enough reorder * entries. This part should be modified!! We can just * indicate all the packets in struct buffer and get * reorder entries. */ RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():" " There is no reorder entry!! Packet is " "dropped!!\n"); { int i; for (i = 0; i < prxb->nr_subframes; i++) dev_kfree_skb(prxb->subframes[i]); kfree(prxb); prxb = NULL; } } } /* Check if there is any packet need indicate.*/ while (!list_empty(&pTS->RxPendingPktList)) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): start RREORDER indicate\n", __func__); pReorderEntry = (struct rx_reorder_entry *)list_entry(pTS->RxPendingPktList.prev, struct rx_reorder_entry, List); if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) { /* This protect struct buffer from overflow. */ if (index >= REORDER_WIN_SIZE) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicate" "Packet(): Buffer overflow!!\n"); bPktInBuf = true; break; } list_del_init(&pReorderEntry->List); if (SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; ieee->prxbIndicateArray[index] = pReorderEntry->prxb; RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): Indicate SeqNum" " %d!\n", __func__, pReorderEntry->SeqNum); index++; list_add_tail(&pReorderEntry->List, &ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } /* Handling pending timer. Set this timer to prevent from long time * Rx buffering.*/ if (index > 0) { if (timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxTimeoutIndicateSeq = 0xffff; if (index > REORDER_WIN_SIZE) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "RxReorderIndicatePacket():" " Rx Reorer struct buffer full!!\n"); spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); return; } rtllib_indicate_packets(ieee, ieee->prxbIndicateArray, index); bPktInBuf = false; } if (bPktInBuf && pTS->RxTimeoutIndicateSeq == 0xffff) { RTLLIB_DEBUG(RTLLIB_DL_REORDER, "%s(): SET rx timeout timer\n", __func__); pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; mod_timer(&pTS->RxPktPendingTimer, jiffies + MSECS(pHTInfo->RxReorderPendingTime)); } spin_unlock_irqrestore(&(ieee->reorder_spinlock), flags); } static u8 parse_subframe(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, struct rtllib_rxb *rxb, u8 *src, u8 *dst) { struct rtllib_hdr_3addr *hdr = (struct rtllib_hdr_3addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 LLCOffset = sizeof(struct rtllib_hdr_3addr); u16 ChkLength; bool bIsAggregateFrame = false; u16 nSubframe_Length; u8 nPadding_Length = 0; u16 SeqNum = 0; struct sk_buff *sub_skb; u8 *data_ptr; /* just for debug purpose */ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl)); if ((RTLLIB_QOS_HAS_SEQ(fc)) && (((union frameqos *)(skb->data + RTLLIB_3ADDR_LEN))->field.reserved)) bIsAggregateFrame = true; if (RTLLIB_QOS_HAS_SEQ(fc)) LLCOffset += 2; if (rx_stats->bContainHTC) LLCOffset += sHTCLng; ChkLength = LLCOffset; if (skb->len <= ChkLength) return 0; skb_pull(skb, LLCOffset); ieee->bIsAggregateFrame = bIsAggregateFrame; if (!bIsAggregateFrame) { rxb->nr_subframes = 1; /* altered by clark 3/30/2010 * The struct buffer size of the skb indicated to upper layer * must be less than 5000, or the defraged IP datagram * in the IP layer will exceed "ipfrag_high_tresh" and be * discarded. so there must not use the function * "skb_copy" and "skb_clone" for "skb". */ /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(RTLLIB_SKBBUFFER_SIZE); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, skb->len); memcpy(data_ptr, skb->data, skb->len); sub_skb->dev = ieee->dev; rxb->subframes[0] = sub_skb; memcpy(rxb->src, src, ETH_ALEN); memcpy(rxb->dst, dst, ETH_ALEN); rxb->subframes[0]->dev = ieee->dev; return 1; } else { rxb->nr_subframes = 0; memcpy(rxb->src, src, ETH_ALEN); memcpy(rxb->dst, dst, ETH_ALEN); while (skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16 *)(skb->data + 12)); nSubframe_Length = (nSubframe_Length >> 8) + (nSubframe_Length << 8); if (skb->len < (ETHERNET_HEADER_SIZE + nSubframe_Length)) { printk(KERN_INFO "%s: A-MSDU parse error!! " "pRfd->nTotalSubframe : %d\n",\ __func__, rxb->nr_subframes); printk(KERN_INFO "%s: A-MSDU parse error!! " "Subframe Length: %d\n", __func__, nSubframe_Length); printk(KERN_INFO "nRemain_Length is %d and " "nSubframe_Length is : %d\n", skb->len, nSubframe_Length); printk(KERN_INFO "The Packet SeqNum is %d\n", SeqNum); return 0; } /* move the data point to data content */ skb_pull(skb, ETHERNET_HEADER_SIZE); /* altered by clark 3/30/2010 * The struct buffer size of the skb indicated to upper layer * must be less than 5000, or the defraged IP datagram * in the IP layer will exceed "ipfrag_high_tresh" and be * discarded. so there must not use the function * "skb_copy" and "skb_clone" for "skb". */ /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr, skb->data, nSubframe_Length); sub_skb->dev = ieee->dev; rxb->subframes[rxb->nr_subframes++] = sub_skb; if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) { RTLLIB_DEBUG_RX("ParseSubframe(): Too many " "Subframes! Packets dropped!\n"); break; } skb_pull(skb, nSubframe_Length); if (skb->len != 0) { nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4); if (nPadding_Length == 4) nPadding_Length = 0; if (skb->len < nPadding_Length) return 0; skb_pull(skb, nPadding_Length); } } return rxb->nr_subframes; } } static size_t rtllib_rx_get_hdrlen(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); size_t hdrlen = 0; hdrlen = rtllib_get_hdrlen(fc); if (HTCCheck(ieee, skb->data)) { if (net_ratelimit()) printk(KERN_INFO "%s: find HTCControl!\n", __func__); hdrlen += 4; rx_stats->bContainHTC = 1; } if (RTLLIB_QOS_HAS_SEQ(fc)) rx_stats->bIsQosData = 1; return hdrlen; } static int rtllib_rx_check_duplicate(struct rtllib_device *ieee, struct sk_buff *skb, u8 multicast) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc, sc; u8 frag, type, stype; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); if ((ieee->pHTInfo->bCurRxReorderEnable == false) || !ieee->current_network.qos_data.active || !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) { if (!((type == RTLLIB_FTYPE_MGMT) && (stype == RTLLIB_STYPE_BEACON))) { if (is_duplicate_packet(ieee, hdr)) return -1; } } else { struct rx_ts_record *pRxTS = NULL; if (GetTs(ieee, (struct ts_common_info **) &pRxTS, hdr->addr2, (u8)Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) { if ((fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) && (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) { return -1; } else { pRxTS->RxLastFragNum = frag; pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc); } } else { RTLLIB_DEBUG(RTLLIB_DL_ERR, "ERR!!%s(): No TS!! Skip" " the check!!\n", __func__); return -1; } } return 0; } static void rtllib_rx_extract_addr(struct rtllib_device *ieee, struct rtllib_hdr_4addr *hdr, u8 *dst, u8 *src, u8 *bssid) { u16 fc = le16_to_cpu(hdr->frame_ctl); switch (fc & (RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS)) { case RTLLIB_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); memcpy(bssid, hdr->addr2, ETH_ALEN); break; case RTLLIB_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr1, ETH_ALEN); break; case RTLLIB_FCTL_FROMDS | RTLLIB_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; case 0: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); break; } } static int rtllib_rx_data_filter(struct rtllib_device *ieee, u16 fc, u8 *dst, u8 *src, u8 *bssid, u8 *addr2) { u8 zero_addr[ETH_ALEN] = {0}; u8 type, stype; type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); /* Filter frames from different BSS */ if (((fc & RTLLIB_FCTL_DSTODS) != RTLLIB_FCTL_DSTODS) && (compare_ether_addr(ieee->current_network.bssid, bssid) != 0) && memcmp(ieee->current_network.bssid, zero_addr, ETH_ALEN)) { return -1; } /* Filter packets sent by an STA that will be forwarded by AP */ if (ieee->IntelPromiscuousModeInfo.bPromiscuousOn && ieee->IntelPromiscuousModeInfo.bFilterSourceStationFrame) { if ((fc & RTLLIB_FCTL_TODS) && !(fc & RTLLIB_FCTL_FROMDS) && (compare_ether_addr(dst, ieee->current_network.bssid) != 0) && (compare_ether_addr(bssid, ieee->current_network.bssid) == 0)) { return -1; } } /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (!ieee->IntelPromiscuousModeInfo.bPromiscuousOn) { if (stype != RTLLIB_STYPE_DATA && stype != RTLLIB_STYPE_DATA_CFACK && stype != RTLLIB_STYPE_DATA_CFPOLL && stype != RTLLIB_STYPE_DATA_CFACKPOLL && stype != RTLLIB_STYPE_QOS_DATA) { if (stype != RTLLIB_STYPE_NULLFUNC) RTLLIB_DEBUG_DROP( "RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x)\n", type, stype); return -1; } } if (ieee->iw_mode != IW_MODE_MESH) { /* packets from our adapter are dropped (echo) */ if (!memcmp(src, ieee->dev->dev_addr, ETH_ALEN)) return -1; /* {broad,multi}cast packets to our BSS go through */ if (is_multicast_ether_addr(dst) || is_broadcast_ether_addr(dst)) { if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN)) return -1; } } return 0; } static int rtllib_rx_get_crypt(struct rtllib_device *ieee, struct sk_buff *skb, struct lib80211_crypt_data **crypt, size_t hdrlen) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); int idx = 0; if (ieee->host_decrypt) { if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; *crypt = ieee->crypt_info.crypt[idx]; /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (*crypt && ((*crypt)->ops == NULL || (*crypt)->ops->decrypt_mpdu == NULL)) *crypt = NULL; if (!*crypt && (fc & RTLLIB_FCTL_WEP)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ RTLLIB_DEBUG_DROP("Decryption failed (not set)" " (SA= %pM)\n", hdr->addr2); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } } return 0; } static int rtllib_rx_decrypt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats, struct lib80211_crypt_data *crypt, size_t hdrlen) { struct rtllib_hdr_4addr *hdr; int keyidx = 0; u16 fc, sc; u8 frag; hdr = (struct rtllib_hdr_4addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); if ((!rx_stats->Decrypted)) ieee->need_sw_enc = 1; else ieee->need_sw_enc = 0; keyidx = rtllib_rx_frame_decrypt(ieee, skb, crypt); if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && (keyidx < 0)) { printk(KERN_INFO "%s: decrypt frame error\n", __func__); return -1; } hdr = (struct rtllib_hdr_4addr *) skb->data; if ((frag != 0 || (fc & RTLLIB_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = rtllib_frag_cache_get(ieee, hdr); RTLLIB_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { RTLLIB_DEBUG(RTLLIB_DL_RX | RTLLIB_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & RTLLIB_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); return -1; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", __func__); rtllib_frag_cache_invalidate(ieee, hdr); return -1; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ memcpy(skb_put(frag_skb, flen), skb->data, flen); } else { /* append frame payload to the end of the fragment * cache skb */ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & RTLLIB_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ return -2; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct rtllib_hdr_4addr *) skb->data; rtllib_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (ieee->host_decrypt && (fc & RTLLIB_FCTL_WEP) && rtllib_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) { printk(KERN_INFO "%s: ==>decrypt msdu error\n", __func__); return -1; } hdr = (struct rtllib_hdr_4addr *) skb->data; if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep) { if (/*ieee->ieee802_1x &&*/ rtllib_is_eapol_frame(ieee, skb, hdrlen)) { /* pass unencrypted EAPOL frames even if encryption is * configured */ struct eapol *eap = (struct eapol *)(skb->data + 24); RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } else { RTLLIB_DEBUG_DROP( "encryption configured, but RX " "frame not encrypted (SA= %pM)\n", hdr->addr2); return -1; } } if (crypt && !(fc & RTLLIB_FCTL_WEP) && rtllib_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); RTLLIB_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } if (crypt && !(fc & RTLLIB_FCTL_WEP) && !ieee->open_wep && !rtllib_is_eapol_frame(ieee, skb, hdrlen)) { RTLLIB_DEBUG_DROP( "dropped unencrypted RX data " "frame from %pM" " (drop_unencrypted=1)\n", hdr->addr2); return -1; } if (rtllib_is_eapol_frame(ieee, skb, hdrlen)) printk(KERN_WARNING "RX: IEEE802.1X EAPOL frame!\n"); return 0; } static void rtllib_rx_check_leave_lps(struct rtllib_device *ieee, u8 unicast, u8 nr_subframes) { if (unicast) { if ((ieee->state == RTLLIB_LINKED)) { if (((ieee->LinkDetectInfo.NumRxUnicastOkInPeriod + ieee->LinkDetectInfo.NumTxOkInPeriod) > 8) || (ieee->LinkDetectInfo.NumRxUnicastOkInPeriod > 2)) { if (ieee->LeisurePSLeave) ieee->LeisurePSLeave(ieee->dev); } } } ieee->last_rx_ps_time = jiffies; } static void rtllib_rx_indicate_pkt_legacy(struct rtllib_device *ieee, struct rtllib_rx_stats *rx_stats, struct rtllib_rxb *rxb, u8 *dst, u8 *src) { struct net_device *dev = ieee->dev; u16 ethertype; int i = 0; if (rxb == NULL) { printk(KERN_INFO "%s: rxb is NULL!!\n", __func__); return ; } for (i = 0; i < rxb->nr_subframes; i++) { struct sk_buff *sub_skb = rxb->subframes[i]; if (sub_skb) { /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } ieee->stats.rx_packets++; ieee->stats.rx_bytes += sub_skb->len; if (is_multicast_ether_addr(dst)) ieee->stats.multicast++; /* Indicat the packets to upper layer */ memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->protocol = eth_type_trans(sub_skb, dev); sub_skb->dev = dev; sub_skb->dev->stats.rx_packets++; sub_skb->dev->stats.rx_bytes += sub_skb->len; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ netif_rx(sub_skb); } } kfree(rxb); rxb = NULL; } static int rtllib_rx_InfraAdhoc(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; struct lib80211_crypt_data *crypt = NULL; struct rtllib_rxb *rxb = NULL; struct rx_ts_record *pTS = NULL; u16 fc, sc, SeqNum = 0; u8 type, stype, multicast = 0, unicast = 0, nr_subframes = 0, TID = 0; u8 dst[ETH_ALEN], src[ETH_ALEN], bssid[ETH_ALEN] = {0}, *payload; size_t hdrlen = 0; bool bToOtherSTA = false; int ret = 0, i = 0; hdr = (struct rtllib_hdr_4addr *)skb->data; fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); /*Filter pkt not to me*/ multicast = is_multicast_ether_addr(hdr->addr1)|is_broadcast_ether_addr(hdr->addr1); unicast = !multicast; if (unicast && (compare_ether_addr(dev->dev_addr, hdr->addr1) != 0)) { if (ieee->bNetPromiscuousMode) bToOtherSTA = true; else goto rx_dropped; } /*Filter pkt has too small length */ hdrlen = rtllib_rx_get_hdrlen(ieee, skb, rx_stats); if (skb->len < hdrlen) { printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__); goto rx_dropped; } /* Filter Duplicate pkt */ ret = rtllib_rx_check_duplicate(ieee, skb, multicast); if (ret < 0) goto rx_dropped; /* Filter CTRL Frame */ if (type == RTLLIB_FTYPE_CTL) goto rx_dropped; /* Filter MGNT Frame */ if (type == RTLLIB_FTYPE_MGMT) { if (bToOtherSTA) goto rx_dropped; if (rtllib_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Filter WAPI DATA Frame */ /* Update statstics for AP roaming */ if (!bToOtherSTA) { ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; } dev->last_rx = jiffies; /* Data frame - extract src/dst addresses */ rtllib_rx_extract_addr(ieee, hdr, dst, src, bssid); /* Filter Data frames */ ret = rtllib_rx_data_filter(ieee, fc, dst, src, bssid, hdr->addr2); if (ret < 0) goto rx_dropped; if (skb->len == hdrlen) goto rx_dropped; /* Send pspoll based on moredata */ if ((ieee->iw_mode == IW_MODE_INFRA) && (ieee->sta_sleep == LPS_IS_SLEEP) && (ieee->polling) && (!bToOtherSTA)) { if (WLAN_FC_MORE_DATA(fc)) { /* more data bit is set, let's request a new frame from the AP */ rtllib_sta_ps_send_pspoll_frame(ieee); } else { ieee->polling = false; } } /* Get crypt if encrypted */ ret = rtllib_rx_get_crypt(ieee, skb, &crypt, hdrlen); if (ret == -1) goto rx_dropped; /* Decrypt data frame (including reassemble) */ ret = rtllib_rx_decrypt(ieee, skb, rx_stats, crypt, hdrlen); if (ret == -1) goto rx_dropped; else if (ret == -2) goto rx_exit; /* Get TS for Rx Reorder */ hdr = (struct rtllib_hdr_4addr *) skb->data; if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data) && !is_multicast_ether_addr(hdr->addr1) && !is_broadcast_ether_addr(hdr->addr1) && (!bToOtherSTA)) { TID = Frame_QoSTID(skb->data); SeqNum = WLAN_GET_SEQ_SEQ(sc); GetTs(ieee, (struct ts_common_info **) &pTS, hdr->addr2, TID, RX_DIR, true); if (TID != 0 && TID != 3) ieee->bis_any_nonbepkts = true; } /* Parse rx data frame (For AMSDU) */ /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; rxb = kmalloc(sizeof(struct rtllib_rxb), GFP_ATOMIC); if (rxb == NULL) { RTLLIB_DEBUG(RTLLIB_DL_ERR, "%s(): kmalloc rxb error\n", __func__); goto rx_dropped; } /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if (parse_subframe(ieee, skb, rx_stats, rxb, src, dst) == 0) { /* only to free rxb, and not submit the packets to upper layer */ for (i = 0; i < rxb->nr_subframes; i++) dev_kfree_skb(rxb->subframes[i]); kfree(rxb); rxb = NULL; goto rx_dropped; } /* Update WAPI PN */ /* Check if leave LPS */ if (!bToOtherSTA) { if (ieee->bIsAggregateFrame) nr_subframes = rxb->nr_subframes; else nr_subframes = 1; if (unicast) ieee->LinkDetectInfo.NumRxUnicastOkInPeriod += nr_subframes; rtllib_rx_check_leave_lps(ieee, unicast, nr_subframes); } /* Indicate packets to upper layer or Rx Reorder */ if (ieee->pHTInfo->bCurRxReorderEnable == false || pTS == NULL || bToOtherSTA) rtllib_rx_indicate_pkt_legacy(ieee, rx_stats, rxb, dst, src); else RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); dev_kfree_skb(skb); rx_exit: return 1; rx_dropped: if (rxb != NULL) { kfree(rxb); rxb = NULL; } ieee->stats.rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; } static int rtllib_rx_Master(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { return 0; } static int rtllib_rx_Monitor(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { struct rtllib_hdr_4addr *hdr = (struct rtllib_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); size_t hdrlen = rtllib_get_hdrlen(fc); if (skb->len < hdrlen) { printk(KERN_INFO "%s():ERR!!! skb->len is smaller than hdrlen\n", __func__); return 0; } if (HTCCheck(ieee, skb->data)) { if (net_ratelimit()) printk(KERN_INFO "%s: Find HTCControl!\n", __func__); hdrlen += 4; } rtllib_monitor_rx(ieee, skb, rx_stats, hdrlen); ieee->stats.rx_packets++; ieee->stats.rx_bytes += skb->len; return 1; } static int rtllib_rx_Mesh(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { return 0; } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ int rtllib_rx(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *rx_stats) { int ret = 0; if ((NULL == ieee) || (NULL == skb) || (NULL == rx_stats)) { printk(KERN_INFO "%s: Input parameters NULL!\n", __func__); goto rx_dropped; } if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", __func__); goto rx_dropped; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: case IW_MODE_INFRA: ret = rtllib_rx_InfraAdhoc(ieee, skb, rx_stats); break; case IW_MODE_MASTER: case IW_MODE_REPEAT: ret = rtllib_rx_Master(ieee, skb, rx_stats); break; case IW_MODE_MONITOR: ret = rtllib_rx_Monitor(ieee, skb, rx_stats); break; case IW_MODE_MESH: ret = rtllib_rx_Mesh(ieee, skb, rx_stats); break; default: printk(KERN_INFO"%s: ERR iw mode!!!\n", __func__); break; } return ret; rx_dropped: ieee->stats.rx_dropped++; return 0; } EXPORT_SYMBOL(rtllib_rx); static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; /* * Make ther structure we read from the beacon packet has * the right values */ static int rtllib_verify_qos_info(struct rtllib_qos_information_element *info_element, int sub_type) { if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) return -1; if (info_element->qui_type != QOS_OUI_TYPE) return -1; if (info_element->version != QOS_VERSION_1) return -1; return 0; } /* * Parse a QoS parameter element */ static int rtllib_read_qos_param_element(struct rtllib_qos_parameter_info *element_param, struct rtllib_info_element *info_element) { int ret = 0; u16 size = sizeof(struct rtllib_qos_parameter_info) - 2; if ((info_element == NULL) || (element_param == NULL)) return -1; if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { memcpy(element_param->info_element.qui, info_element->data, info_element->len); element_param->info_element.elementID = info_element->id; element_param->info_element.length = info_element->len; } else ret = -1; if (ret == 0) ret = rtllib_verify_qos_info(&element_param->info_element, QOS_OUI_PARAM_SUB_TYPE); return ret; } /* * Parse a QoS information element */ static int rtllib_read_qos_info_element(struct rtllib_qos_information_element *element_info, struct rtllib_info_element *info_element) { int ret = 0; u16 size = sizeof(struct rtllib_qos_information_element) - 2; if (element_info == NULL) return -1; if (info_element == NULL) return -1; if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { memcpy(element_info->qui, info_element->data, info_element->len); element_info->elementID = info_element->id; element_info->length = info_element->len; } else ret = -1; if (ret == 0) ret = rtllib_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); return ret; } /* * Write QoS parameters from the ac parameters. */ static int rtllib_qos_convert_ac_to_parameters(struct rtllib_qos_parameter_info *param_elm, struct rtllib_qos_data *qos_data) { struct rtllib_qos_ac_parameter *ac_params; struct rtllib_qos_parameters *qos_param = &(qos_data->parameters); int rc = 0; int i; u8 aci; u8 acm; qos_data->wmm_acm = 0; for (i = 0; i < QOS_QUEUE_NUM; i++) { ac_params = &(param_elm->ac_params_record[i]); aci = (ac_params->aci_aifsn & 0x60) >> 5; acm = (ac_params->aci_aifsn & 0x10) >> 4; if (aci >= QOS_QUEUE_NUM) continue; switch (aci) { case 1: /* BIT(0) | BIT(3) */ if (acm) qos_data->wmm_acm |= (0x01<<0)|(0x01<<3); break; case 2: /* BIT(4) | BIT(5) */ if (acm) qos_data->wmm_acm |= (0x01<<4)|(0x01<<5); break; case 3: /* BIT(6) | BIT(7) */ if (acm) qos_data->wmm_acm |= (0x01<<6)|(0x01<<7); break; case 0: default: /* BIT(1) | BIT(2) */ if (acm) qos_data->wmm_acm |= (0x01<<1)|(0x01<<2); break; } qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f; /* WMM spec P.11: The minimum value for AIFSN shall be 2 */ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2 : qos_param->aifs[aci]; qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F; qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4; qos_param->flag[aci] = (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit); } return rc; } /* * we have a generic data element which it may contain QoS information or * parameters element. check the information element length to decide * which type to read */ static int rtllib_parse_qos_info_param_IE(struct rtllib_info_element *info_element, struct rtllib_network *network) { int rc = 0; struct rtllib_qos_information_element qos_info_element; rc = rtllib_read_qos_info_element(&qos_info_element, info_element); if (rc == 0) { network->qos_data.param_count = qos_info_element.ac_info & 0x0F; network->flags |= NETWORK_HAS_QOS_INFORMATION; } else { struct rtllib_qos_parameter_info param_element; rc = rtllib_read_qos_param_element(&param_element, info_element); if (rc == 0) { rtllib_qos_convert_ac_to_parameters(&param_element, &(network->qos_data)); network->flags |= NETWORK_HAS_QOS_PARAMETERS; network->qos_data.param_count = param_element.info_element.ac_info & 0x0F; } } if (rc == 0) { RTLLIB_DEBUG_QOS("QoS is supported\n"); network->qos_data.supported = 1; } return rc; } #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x static const char *get_info_element_string(u16 id) { switch (id) { MFIE_STRING(SSID); MFIE_STRING(RATES); MFIE_STRING(FH_SET); MFIE_STRING(DS_SET); MFIE_STRING(CF_SET); MFIE_STRING(TIM); MFIE_STRING(IBSS_SET); MFIE_STRING(COUNTRY); MFIE_STRING(HOP_PARAMS); MFIE_STRING(HOP_TABLE); MFIE_STRING(REQUEST); MFIE_STRING(CHALLENGE); MFIE_STRING(POWER_CONSTRAINT); MFIE_STRING(POWER_CAPABILITY); MFIE_STRING(TPC_REQUEST); MFIE_STRING(TPC_REPORT); MFIE_STRING(SUPP_CHANNELS); MFIE_STRING(CSA); MFIE_STRING(MEASURE_REQUEST); MFIE_STRING(MEASURE_REPORT); MFIE_STRING(QUIET); MFIE_STRING(IBSS_DFS); MFIE_STRING(RSN); MFIE_STRING(RATES_EX); MFIE_STRING(GENERIC); MFIE_STRING(QOS_PARAMETER); default: return "UNKNOWN"; } } static inline void rtllib_extract_country_ie( struct rtllib_device *ieee, struct rtllib_info_element *info_element, struct rtllib_network *network, u8 *addr2) { if (IS_DOT11D_ENABLE(ieee)) { if (info_element->len != 0) { memcpy(network->CountryIeBuf, info_element->data, info_element->len); network->CountryIeLen = info_element->len; if (!IS_COUNTRY_IE_VALID(ieee)) { if ((rtllib_act_scanning(ieee, false) == true) && (ieee->FirstIe_InScan == 1)) printk(KERN_INFO "Received beacon ContryIE, SSID: <%s>\n", network->ssid); Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data); } } if (IS_EQUAL_CIE_SRC(ieee, addr2)) UPDATE_CIE_WATCHDOG(ieee); } } int rtllib_parse_info_param(struct rtllib_device *ieee, struct rtllib_info_element *info_element, u16 length, struct rtllib_network *network, struct rtllib_rx_stats *stats) { u8 i; short offset; u16 tmp_htcap_len = 0; u16 tmp_htinfo_len = 0; u16 ht_realtek_agg_len = 0; u8 ht_realtek_agg_buf[MAX_IE_LEN]; char rates_str[64]; char *p; while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { RTLLIB_DEBUG_MGMT("Info elem: parse failed: " "info_element->len + 2 > left : " "info_element->len+2=%zd left=%d, id=%d.\n", info_element->len + sizeof(*info_element), length, info_element->id); /* We stop processing but don't return an error here * because some misbehaviour APs break this rule. ie. * Orinoco AP1000. */ break; } switch (info_element->id) { case MFIE_TYPE_SSID: if (rtllib_is_empty_essid(info_element->data, info_element->len)) { network->flags |= NETWORK_EMPTY_ESSID; break; } network->ssid_len = min(info_element->len, (u8) IW_ESSID_MAX_SIZE); memcpy(network->ssid, info_element->data, network->ssid_len); if (network->ssid_len < IW_ESSID_MAX_SIZE) memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); RTLLIB_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: p = rates_str; network->rates_len = min(info_element->len, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); if (rtllib_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & RTLLIB_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } if (rtllib_is_cck_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_CCK; } } RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; case MFIE_TYPE_RATES_EX: p = rates_str; network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); if (rtllib_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & RTLLIB_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } RTLLIB_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: RTLLIB_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: if (info_element->len < 4) break; network->tim.tim_count = info_element->data[0]; network->tim.tim_period = info_element->data[1]; network->dtim_period = info_element->data[1]; if (ieee->state != RTLLIB_LINKED) break; network->last_dtim_sta_time = jiffies; network->dtim_data = RTLLIB_DTIM_VALID; if (info_element->data[2] & 1) network->dtim_data |= RTLLIB_DTIM_MBCAST; offset = (info_element->data[2] >> 1)*2; if (ieee->assoc_id < 8*offset || ieee->assoc_id > 8*(offset + info_element->len - 3)) break; offset = (ieee->assoc_id / 8) - offset; if (info_element->data[3 + offset] & (1 << (ieee->assoc_id % 8))) network->dtim_data |= RTLLIB_DTIM_UCAST; network->listen_interval = network->dtim_period; break; case MFIE_TYPE_ERP: network->erp_value = info_element->data[0]; network->flags |= NETWORK_HAS_ERP_VALUE; RTLLIB_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; RTLLIB_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: RTLLIB_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: RTLLIB_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!rtllib_parse_qos_info_param_IE(info_element, network)) break; if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x01) { network->wpa_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->wpa_ie, info_element, network->wpa_ie_len); break; } if (info_element->len == 7 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x01 && info_element->data[4] == 0x02) network->Turbo_Enable = 1; if (tmp_htcap_len == 0) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x033) { tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htcap_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen); } } if (tmp_htcap_len != 0) { network->bssht.bdSupportHT = true; network->bssht.bdHT1R = ((((struct ht_capab_ele *)(network->bssht.bdHTCapBuf))->MCS[1]) == 0); } else { network->bssht.bdSupportHT = false; network->bssht.bdHT1R = false; } } if (tmp_htinfo_len == 0) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x034) { tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htinfo_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; if (tmp_htinfo_len) { network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen); } } } } if (ieee->aggregation) { if (network->bssht.bdSupportHT) { if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x02) { ht_realtek_agg_len = min(info_element->len, (u8)MAX_IE_LEN); memcpy(ht_realtek_agg_buf, info_element->data, info_element->len); } if (ht_realtek_agg_len >= 5) { network->realtek_cap_exit = true; network->bssht.bdRT2RTAggregation = true; if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02)) network->bssht.bdRT2RTLongSlotTime = true; if ((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & RT_HT_CAP_USE_92SE)) network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_92SE; } } if (ht_realtek_agg_len >= 5) { if ((ht_realtek_agg_buf[5] & RT_HT_CAP_USE_SOFTAP)) network->bssht.RT2RT_HT_Mode |= RT_HT_CAP_USE_SOFTAP; } } if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x05 && info_element->data[2] == 0xb5) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf7) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x10 && info_element->data[2] == 0x18)) { network->broadcom_cap_exist = true; } if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0c && info_element->data[2] == 0x43) network->ralink_cap_exist = true; if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x03 && info_element->data[2] == 0x7f) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) network->atheros_cap_exist = true; if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0x43)) network->marvell_cap_exist = true; if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96) network->cisco_cap_exist = true; if (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf5) network->airgo_cap_exist = true; if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if (info_element->len == 6) { memcpy(network->CcxRmState, &info_element[4], 2); if (network->CcxRmState[0] != 0) network->bCcxRmEnable = true; else network->bCcxRmEnable = false; network->MBssidMask = network->CcxRmState[1] & 0x07; if (network->MBssidMask != 0) { network->bMBssidValid = true; network->MBssidMask = 0xff << (network->MBssidMask); memcpy(network->MBssid, network->bssid, ETH_ALEN); network->MBssid[5] &= network->MBssidMask; } else { network->bMBssidValid = false; } } else { network->bCcxRmEnable = false; } } if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x03) { if (info_element->len == 5) { network->bWithCcxVerNum = true; network->BssCcxVerNumber = info_element->data[4]; } else { network->bWithCcxVerNum = false; network->BssCcxVerNumber = 0; } } if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x04) { RTLLIB_DEBUG_MGMT("MFIE_TYPE_WZC: %d bytes\n", info_element->len); network->wzc_ie_len = min(info_element->len+2, MAX_WZC_IE_LEN); memcpy(network->wzc_ie, info_element, network->wzc_ie_len); } break; case MFIE_TYPE_RSN: RTLLIB_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->rsn_ie, info_element, network->rsn_ie_len); break; case MFIE_TYPE_HT_CAP: RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n", info_element->len); tmp_htcap_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htcap_len != 0) { network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf) ? sizeof(network->bssht.bdHTCapBuf) : tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf, info_element->data, network->bssht.bdHTCapLen); network->bssht.bdSupportHT = true; network->bssht.bdHT1R = ((((struct ht_capab_ele *) network->bssht.bdHTCapBuf))->MCS[1]) == 0; network->bssht.bdBandWidth = (enum ht_channel_width) (((struct ht_capab_ele *) (network->bssht.bdHTCapBuf))->ChlWidth); } else { network->bssht.bdSupportHT = false; network->bssht.bdHT1R = false; network->bssht.bdBandWidth = HT_CHANNEL_WIDTH_20; } break; case MFIE_TYPE_HT_INFO: RTLLIB_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n", info_element->len); tmp_htinfo_len = min(info_element->len, (u8)MAX_IE_LEN); if (tmp_htinfo_len) { network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE; network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf) ? sizeof(network->bssht.bdHTInfoBuf) : tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf, info_element->data, network->bssht.bdHTInfoLen); } break; case MFIE_TYPE_AIRONET: RTLLIB_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n", info_element->len); if (info_element->len > IE_CISCO_FLAG_POSITION) { network->bWithAironetIE = true; if ((info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_MIC) || (info_element->data[IE_CISCO_FLAG_POSITION] & SUPPORT_CKIP_PK)) network->bCkipSupported = true; else network->bCkipSupported = false; } else { network->bWithAironetIE = false; network->bCkipSupported = false; } break; case MFIE_TYPE_QOS_PARAMETER: printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; case MFIE_TYPE_COUNTRY: RTLLIB_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n", info_element->len); rtllib_extract_country_ie(ieee, info_element, network, network->bssid); break; /* TODO */ default: RTLLIB_DEBUG_MGMT ("Unsupported info element: %s (%d)\n", get_info_element_string(info_element->id), info_element->id); break; } length -= sizeof(*info_element) + info_element->len; info_element = (struct rtllib_info_element *)&info_element-> data[info_element->len]; } if (!network->atheros_cap_exist && !network->broadcom_cap_exist && !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) network->unknown_cap_exist = true; else network->unknown_cap_exist = false; return 0; } static inline u8 rtllib_SignalStrengthTranslate(u8 CurrSS) { u8 RetSS; if (CurrSS >= 71 && CurrSS <= 100) RetSS = 90 + ((CurrSS - 70) / 3); else if (CurrSS >= 41 && CurrSS <= 70) RetSS = 78 + ((CurrSS - 40) / 3); else if (CurrSS >= 31 && CurrSS <= 40) RetSS = 66 + (CurrSS - 30); else if (CurrSS >= 21 && CurrSS <= 30) RetSS = 54 + (CurrSS - 20); else if (CurrSS >= 5 && CurrSS <= 20) RetSS = 42 + (((CurrSS - 5) * 2) / 3); else if (CurrSS == 4) RetSS = 36; else if (CurrSS == 3) RetSS = 27; else if (CurrSS == 2) RetSS = 18; else if (CurrSS == 1) RetSS = 9; else RetSS = CurrSS; return RetSS; } static long rtllib_translate_todbm(u8 signal_strength_index) { long signal_power; signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } static inline int rtllib_network_init( struct rtllib_device *ieee, struct rtllib_probe_response *beacon, struct rtllib_network *network, struct rtllib_rx_stats *stats) { /* network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; network->qos_data.old_param_count = 0; */ memset(&network->qos_data, 0, sizeof(struct rtllib_qos_data)); /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); network->beacon_interval = le32_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval;*/ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; network->hidden_ssid_len = 0; memset(network->hidden_ssid, 0, sizeof(network->hidden_ssid)); network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; network->berp_info_valid = false; network->broadcom_cap_exist = false; network->ralink_cap_exist = false; network->atheros_cap_exist = false; network->cisco_cap_exist = false; network->unknown_cap_exist = false; network->realtek_cap_exit = false; network->marvell_cap_exist = false; network->airgo_cap_exist = false; network->Turbo_Enable = 0; network->SignalStrength = stats->SignalStrength; network->RSSI = stats->SignalStrength; network->CountryIeLen = 0; memset(network->CountryIeBuf, 0, MAX_IE_LEN); HTInitializeBssDesc(&network->bssht); if (stats->freq == RTLLIB_52GHZ_BAND) { /* for A band (No DS info) */ network->channel = stats->received_channel; } else network->flags |= NETWORK_HAS_CCK; network->wpa_ie_len = 0; network->rsn_ie_len = 0; network->wzc_ie_len = 0; if (rtllib_parse_info_param(ieee, beacon->info_element, (stats->len - sizeof(*beacon)), network, stats)) return 1; network->mode = 0; if (stats->freq == RTLLIB_52GHZ_BAND) network->mode = IEEE_A; else { if (network->flags & NETWORK_HAS_OFDM) network->mode |= IEEE_G; if (network->flags & NETWORK_HAS_CCK) network->mode |= IEEE_B; } if (network->mode == 0) { RTLLIB_DEBUG_SCAN("Filtered out '%s (%pM)' " "network.\n", escape_essid(network->ssid, network->ssid_len), network->bssid); return 1; } if (network->bssht.bdSupportHT) { if (network->mode == IEEE_A) network->mode = IEEE_N_5G; else if (network->mode & (IEEE_G | IEEE_B)) network->mode = IEEE_N_24G; } if (rtllib_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; stats->signal = 30 + (stats->SignalStrength * 70) / 100; stats->noise = rtllib_translate_todbm((u8)(100-stats->signal)) - 25; memcpy(&network->stats, stats, sizeof(network->stats)); return 0; } static inline int is_same_network(struct rtllib_network *src, struct rtllib_network *dst, u8 ssidbroad) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return (((src->ssid_len == dst->ssid_len) || (!ssidbroad)) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (!ssidbroad)) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_ESS) == (dst->capability & WLAN_CAPABILITY_ESS))); } static inline void update_ibss_network(struct rtllib_network *dst, struct rtllib_network *src) { memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats)); dst->last_scanned = jiffies; } static inline void update_network(struct rtllib_network *dst, struct rtllib_network *src) { int qos_active; u8 old_param; memcpy(&dst->stats, &src->stats, sizeof(struct rtllib_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); dst->rates_len = src->rates_len; memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); dst->rates_ex_len = src->rates_ex_len; if (src->ssid_len > 0) { if (dst->ssid_len == 0) { memset(dst->hidden_ssid, 0, sizeof(dst->hidden_ssid)); dst->hidden_ssid_len = src->ssid_len; memcpy(dst->hidden_ssid, src->ssid, src->ssid_len); } else { memset(dst->ssid, 0, dst->ssid_len); dst->ssid_len = src->ssid_len; memcpy(dst->ssid, src->ssid, src->ssid_len); } } dst->mode = src->mode; dst->flags = src->flags; dst->time_stamp[0] = src->time_stamp[0]; dst->time_stamp[1] = src->time_stamp[1]; if (src->flags & NETWORK_HAS_ERP_VALUE) { dst->erp_value = src->erp_value; dst->berp_info_valid = src->berp_info_valid = true; } dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; dst->dtim_period = src->dtim_period; dst->dtim_data = src->dtim_data; dst->last_dtim_sta_time = src->last_dtim_sta_time; memcpy(&dst->tim, &src->tim, sizeof(struct rtllib_tim_parameters)); dst->bssht.bdSupportHT = src->bssht.bdSupportHT; dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation; dst->bssht.bdHTCapLen = src->bssht.bdHTCapLen; memcpy(dst->bssht.bdHTCapBuf, src->bssht.bdHTCapBuf, src->bssht.bdHTCapLen); dst->bssht.bdHTInfoLen = src->bssht.bdHTInfoLen; memcpy(dst->bssht.bdHTInfoBuf, src->bssht.bdHTInfoBuf, src->bssht.bdHTInfoLen); dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer; dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime; dst->broadcom_cap_exist = src->broadcom_cap_exist; dst->ralink_cap_exist = src->ralink_cap_exist; dst->atheros_cap_exist = src->atheros_cap_exist; dst->realtek_cap_exit = src->realtek_cap_exit; dst->marvell_cap_exist = src->marvell_cap_exist; dst->cisco_cap_exist = src->cisco_cap_exist; dst->airgo_cap_exist = src->airgo_cap_exist; dst->unknown_cap_exist = src->unknown_cap_exist; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); dst->rsn_ie_len = src->rsn_ie_len; memcpy(dst->wzc_ie, src->wzc_ie, src->wzc_ie_len); dst->wzc_ie_len = src->wzc_ie_len; dst->last_scanned = jiffies; /* qos related parameters */ qos_active = dst->qos_data.active; old_param = dst->qos_data.param_count; dst->qos_data.supported = src->qos_data.supported; if (dst->flags & NETWORK_HAS_QOS_PARAMETERS) memcpy(&dst->qos_data, &src->qos_data, sizeof(struct rtllib_qos_data)); if (dst->qos_data.supported == 1) { if (dst->ssid_len) RTLLIB_DEBUG_QOS ("QoS the network %s is QoS supported\n", dst->ssid); else RTLLIB_DEBUG_QOS ("QoS the network is QoS supported\n"); } dst->qos_data.active = qos_active; dst->qos_data.old_param_count = old_param; /* dst->last_associate is not overwritten */ dst->wmm_info = src->wmm_info; if (src->wmm_param[0].ac_aci_acm_aifsn || src->wmm_param[1].ac_aci_acm_aifsn || src->wmm_param[2].ac_aci_acm_aifsn || src->wmm_param[3].ac_aci_acm_aifsn) memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); dst->SignalStrength = src->SignalStrength; dst->RSSI = src->RSSI; dst->Turbo_Enable = src->Turbo_Enable; dst->CountryIeLen = src->CountryIeLen; memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen); dst->bWithAironetIE = src->bWithAironetIE; dst->bCkipSupported = src->bCkipSupported; memcpy(dst->CcxRmState, src->CcxRmState, 2); dst->bCcxRmEnable = src->bCcxRmEnable; dst->MBssidMask = src->MBssidMask; dst->bMBssidValid = src->bMBssidValid; memcpy(dst->MBssid, src->MBssid, 6); dst->bWithCcxVerNum = src->bWithCcxVerNum; dst->BssCcxVerNumber = src->BssCcxVerNumber; } static inline int is_beacon(__le16 fc) { return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == RTLLIB_STYPE_BEACON); } static int IsPassiveChannel(struct rtllib_device *rtllib, u8 channel) { if (MAX_CHANNEL_NUMBER < channel) { printk(KERN_INFO "%s(): Invalid Channel\n", __func__); return 0; } if (rtllib->active_channel_map[channel] == 2) return 1; return 0; } int rtllib_legal_channel(struct rtllib_device *rtllib, u8 channel) { if (MAX_CHANNEL_NUMBER < channel) { printk(KERN_INFO "%s(): Invalid Channel\n", __func__); return 0; } if (rtllib->active_channel_map[channel] > 0) return 1; return 0; } EXPORT_SYMBOL(rtllib_legal_channel); static inline void rtllib_process_probe_response( struct rtllib_device *ieee, struct rtllib_probe_response *beacon, struct rtllib_rx_stats *stats) { struct rtllib_network *target; struct rtllib_network *oldest = NULL; struct rtllib_info_element *info_element = &beacon->info_element[0]; unsigned long flags; short renew; struct rtllib_network *network = kzalloc(sizeof(struct rtllib_network), GFP_ATOMIC); if (!network) return; RTLLIB_DEBUG_SCAN( "'%s' ( %pM ): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, (beacon->capability & (1<<0xf)) ? '1' : '0', (beacon->capability & (1<<0xe)) ? '1' : '0', (beacon->capability & (1<<0xd)) ? '1' : '0', (beacon->capability & (1<<0xc)) ? '1' : '0', (beacon->capability & (1<<0xb)) ? '1' : '0', (beacon->capability & (1<<0xa)) ? '1' : '0', (beacon->capability & (1<<0x9)) ? '1' : '0', (beacon->capability & (1<<0x8)) ? '1' : '0', (beacon->capability & (1<<0x7)) ? '1' : '0', (beacon->capability & (1<<0x6)) ? '1' : '0', (beacon->capability & (1<<0x5)) ? '1' : '0', (beacon->capability & (1<<0x4)) ? '1' : '0', (beacon->capability & (1<<0x3)) ? '1' : '0', (beacon->capability & (1<<0x2)) ? '1' : '0', (beacon->capability & (1<<0x1)) ? '1' : '0', (beacon->capability & (1<<0x0)) ? '1' : '0'); if (rtllib_network_init(ieee, beacon, network, stats)) { RTLLIB_DEBUG_SCAN("Dropped '%s' ( %pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); goto free_network; } if (!rtllib_legal_channel(ieee, network->channel)) goto free_network; if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP) { if (IsPassiveChannel(ieee, network->channel)) { printk(KERN_INFO "GetScanInfo(): For Global Domain, " "filter probe response at channel(%d).\n", network->channel); goto free_network; } } /* The network parsed correctly -- so now we scan our known networks * to see if we can find it in our list. * * NOTE: This search is definitely not optimized. Once its doing * the "right thing" we'll optimize it for efficiency if * necessary */ /* Search for this entry in the list and update it if it is * already there. */ spin_lock_irqsave(&ieee->lock, flags); if (is_same_network(&ieee->current_network, network, (network->ssid_len ? 1 : 0))) { update_network(&ieee->current_network, network); if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid) { if (ieee->current_network.erp_value & ERP_UseProtection) ieee->current_network.buseprotection = true; else ieee->current_network.buseprotection = false; } if (is_beacon(beacon->header.frame_ctl)) { if (ieee->state >= RTLLIB_LINKED) ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } } list_for_each_entry(target, &ieee->network_list, list) { if (is_same_network(target, network, (target->ssid_len ? 1 : 0))) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) oldest = target; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (&target->list == &ieee->network_list) { if (list_empty(&ieee->network_free_list)) { /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; RTLLIB_DEBUG_SCAN("Expired '%s' ( %pM) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), target->bssid); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, struct rtllib_network, list); list_del(ieee->network_free_list.next); } RTLLIB_DEBUG_SCAN("Adding '%s' ( %pM) via %s.\n", escape_essid(network->ssid, network->ssid_len), network->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); memcpy(target, network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) rtllib_softmac_new_net(ieee, network); } else { RTLLIB_DEBUG_SCAN("Updating '%s' ( %pM) via %s.\n", escape_essid(target->ssid, target->ssid_len), target->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == RTLLIB_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); /* we have an entry and we are going to update it. But this * entry may be already expired. In this case we do the same * as we found a new net and call the new_net handler */ renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); if ((!target->ssid_len) && (((network->ssid_len > 0) && (target->hidden_ssid_len == 0)) || ((ieee->current_network.ssid_len == network->ssid_len) && (strncmp(ieee->current_network.ssid, network->ssid, network->ssid_len) == 0) && (ieee->state == RTLLIB_NOLINK)))) renew = 1; update_network(target, network); if (renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) rtllib_softmac_new_net(ieee, network); } spin_unlock_irqrestore(&ieee->lock, flags); if (is_beacon(beacon->header.frame_ctl) && is_same_network(&ieee->current_network, network, (network->ssid_len ? 1 : 0)) && (ieee->state == RTLLIB_LINKED)) { if (ieee->handle_beacon != NULL) ieee->handle_beacon(ieee->dev, beacon, &ieee->current_network); } free_network: kfree(network); return; } void rtllib_rx_mgt(struct rtllib_device *ieee, struct sk_buff *skb, struct rtllib_rx_stats *stats) { struct rtllib_hdr_4addr *header = (struct rtllib_hdr_4addr *)skb->data ; if (WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_PROBE_RESP && WLAN_FC_GET_STYPE(header->frame_ctl) != RTLLIB_STYPE_BEACON) ieee->last_rx_ps_time = jiffies; switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case RTLLIB_STYPE_BEACON: RTLLIB_DEBUG_MGMT("received BEACON (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Beacon\n"); rtllib_process_probe_response( ieee, (struct rtllib_probe_response *)header, stats); if (ieee->sta_sleep || (ieee->ps != RTLLIB_PS_DISABLED && ieee->iw_mode == IW_MODE_INFRA && ieee->state == RTLLIB_LINKED)) tasklet_schedule(&ieee->ps_task); break; case RTLLIB_STYPE_PROBE_RESP: RTLLIB_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Probe response\n"); rtllib_process_probe_response(ieee, (struct rtllib_probe_response *)header, stats); break; case RTLLIB_STYPE_PROBE_REQ: RTLLIB_DEBUG_MGMT("received PROBE RESQUEST (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); RTLLIB_DEBUG_SCAN("Probe request\n"); if ((ieee->softmac_features & IEEE_SOFTMAC_PROBERS) && ((ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) && ieee->state == RTLLIB_LINKED)) rtllib_rx_probe_rq(ieee, skb); break; } }
gpl-2.0
Guazi/kernelwip
tools/perf/builtin-probe.c
4811
11262
/* * builtin-probe.c * * Builtin probe command: Set up probe events by C expression * * Written by Masami Hiramatsu <mhiramat@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <sys/utsname.h> #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <errno.h> #include <stdio.h> #include <unistd.h> #include <stdlib.h> #include <string.h> #include "perf.h" #include "builtin.h" #include "util/util.h" #include "util/strlist.h" #include "util/strfilter.h" #include "util/symbol.h" #include "util/debug.h" #include "util/debugfs.h" #include "util/parse-options.h" #include "util/probe-finder.h" #include "util/probe-event.h" #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*" #define DEFAULT_FUNC_FILTER "!_*" /* Session management structure */ static struct { bool list_events; bool force_add; bool show_lines; bool show_vars; bool show_ext_vars; bool show_funcs; bool mod_events; int nevents; struct perf_probe_event events[MAX_PROBES]; struct strlist *dellist; struct line_range line_range; const char *target; int max_probe_points; struct strfilter *filter; } params; /* Parse an event definition. Note that any error must die. */ static int parse_probe_event(const char *str) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; pr_debug("probe-definition(%d): %s\n", params.nevents, str); if (++params.nevents == MAX_PROBES) { pr_err("Too many probes (> %d) were specified.", MAX_PROBES); return -1; } /* Parse a perf-probe command into event */ ret = parse_perf_probe_command(str, pev); pr_debug("%d arguments\n", pev->nargs); return ret; } static int parse_probe_event_argv(int argc, const char **argv) { int i, len, ret; char *buf; /* Bind up rest arguments */ len = 0; for (i = 0; i < argc; i++) len += strlen(argv[i]) + 1; buf = zalloc(len + 1); if (buf == NULL) return -ENOMEM; len = 0; for (i = 0; i < argc; i++) len += sprintf(&buf[len], "%s ", argv[i]); params.mod_events = true; ret = parse_probe_event(buf); free(buf); return ret; } static int opt_add_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; return parse_probe_event(str); } else return 0; } static int opt_del_probe_event(const struct option *opt __used, const char *str, int unset __used) { if (str) { params.mod_events = true; if (!params.dellist) params.dellist = strlist__new(true, NULL); strlist__add(params.dellist, str); } return 0; } #ifdef DWARF_SUPPORT static int opt_show_lines(const struct option *opt __used, const char *str, int unset __used) { int ret = 0; if (!str) return 0; if (params.show_lines) { pr_warning("Warning: more than one --line options are" " detected. Only the first one is valid.\n"); return 0; } params.show_lines = true; ret = parse_line_range_desc(str, &params.line_range); INIT_LIST_HEAD(&params.line_range.line_list); return ret; } static int opt_show_vars(const struct option *opt __used, const char *str, int unset __used) { struct perf_probe_event *pev = &params.events[params.nevents]; int ret; if (!str) return 0; ret = parse_probe_event(str); if (!ret && pev->nargs != 0) { pr_err(" Error: '--vars' doesn't accept arguments.\n"); return -EINVAL; } params.show_vars = true; return ret; } #endif static int opt_set_filter(const struct option *opt __used, const char *str, int unset __used) { const char *err; if (str) { pr_debug2("Set filter: %s\n", str); if (params.filter) strfilter__delete(params.filter); params.filter = strfilter__new(str, &err); if (!params.filter) { pr_err("Filter parse error at %td.\n", err - str + 1); pr_err("Source: \"%s\"\n", str); pr_err(" %*c\n", (int)(err - str + 1), '^'); return -EINVAL; } } return 0; } static const char * const probe_usage[] = { "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]", "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]", "perf probe [<options>] --del '[GROUP:]EVENT' ...", "perf probe --list", #ifdef DWARF_SUPPORT "perf probe [<options>] --line 'LINEDESC'", "perf probe [<options>] --vars 'PROBEPOINT'", #endif NULL }; static const struct option options[] = { OPT_INCR('v', "verbose", &verbose, "be more verbose (show parsed arguments, etc)"), OPT_BOOLEAN('l', "list", &params.list_events, "list up current probe events"), OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.", opt_del_probe_event), OPT_CALLBACK('a', "add", NULL, #ifdef DWARF_SUPPORT "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT" " [[NAME=]ARG ...]", #else "[EVENT=]FUNC[+OFF|%return] [[NAME=]ARG ...]", #endif "probe point definition, where\n" "\t\tGROUP:\tGroup name (optional)\n" "\t\tEVENT:\tEvent name\n" "\t\tFUNC:\tFunction name\n" "\t\tOFF:\tOffset from function entry (in byte)\n" "\t\t%return:\tPut the probe at function return\n" #ifdef DWARF_SUPPORT "\t\tSRC:\tSource code path\n" "\t\tRL:\tRelative line number from function entry.\n" "\t\tAL:\tAbsolute line number in file.\n" "\t\tPT:\tLazy expression of line code.\n" "\t\tARG:\tProbe argument (local variable name or\n" "\t\t\tkprobe-tracer argument format.)\n", #else "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n", #endif opt_add_probe_event), OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events" " with existing name"), #ifdef DWARF_SUPPORT OPT_CALLBACK('L', "line", NULL, "FUNC[:RLN[+NUM|-RLN2]]|SRC:ALN[+NUM|-ALN2]", "Show source code lines.", opt_show_lines), OPT_CALLBACK('V', "vars", NULL, "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT", "Show accessible variables on PROBEDEF", opt_show_vars), OPT_BOOLEAN('\0', "externs", &params.show_ext_vars, "Show external variables too (with --vars only)"), OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name, "file", "vmlinux pathname"), OPT_STRING('s', "source", &symbol_conf.source_prefix, "directory", "path to kernel source"), OPT_STRING('m', "module", &params.target, "modname|path", "target module name (for online) or path (for offline)"), #endif OPT__DRY_RUN(&probe_event_dry_run), OPT_INTEGER('\0', "max-probes", &params.max_probe_points, "Set how many probe points can be found for a probe."), OPT_BOOLEAN('F', "funcs", &params.show_funcs, "Show potential probe-able functions."), OPT_CALLBACK('\0', "filter", NULL, "[!]FILTER", "Set a filter (with --vars/funcs only)\n" "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n" "\t\t\t \"" DEFAULT_FUNC_FILTER "\" for --funcs)", opt_set_filter), OPT_END() }; int cmd_probe(int argc, const char **argv, const char *prefix __used) { int ret; argc = parse_options(argc, argv, options, probe_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (argc > 0) { if (strcmp(argv[0], "-") == 0) { pr_warning(" Error: '-' is not supported.\n"); usage_with_options(probe_usage, options); } ret = parse_probe_event_argv(argc, argv); if (ret < 0) { pr_err(" Error: Parse Error. (%d)\n", ret); return ret; } } if (params.max_probe_points == 0) params.max_probe_points = MAX_PROBES; if ((!params.nevents && !params.dellist && !params.list_events && !params.show_lines && !params.show_funcs)) usage_with_options(probe_usage, options); /* * Only consider the user's kernel image path if given. */ symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); if (params.list_events) { if (params.mod_events) { pr_err(" Error: Don't use --list with --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --list with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --list with --vars.\n"); usage_with_options(probe_usage, options); } if (params.show_funcs) { pr_err(" Error: Don't use --list with --funcs.\n"); usage_with_options(probe_usage, options); } ret = show_perf_probe_events(); if (ret < 0) pr_err(" Error: Failed to show event list. (%d)\n", ret); return ret; } if (params.show_funcs) { if (params.nevents != 0 || params.dellist) { pr_err(" Error: Don't use --funcs with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_lines) { pr_err(" Error: Don't use --funcs with --line.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --funcs with --vars.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_FUNC_FILTER, NULL); ret = show_available_funcs(params.target, params.filter); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show functions." " (%d)\n", ret); return ret; } #ifdef DWARF_SUPPORT if (params.show_lines) { if (params.mod_events) { pr_err(" Error: Don't use --line with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (params.show_vars) { pr_err(" Error: Don't use --line with --vars.\n"); usage_with_options(probe_usage, options); } ret = show_line_range(&params.line_range, params.target); if (ret < 0) pr_err(" Error: Failed to show lines. (%d)\n", ret); return ret; } if (params.show_vars) { if (params.mod_events) { pr_err(" Error: Don't use --vars with" " --add/--del.\n"); usage_with_options(probe_usage, options); } if (!params.filter) params.filter = strfilter__new(DEFAULT_VAR_FILTER, NULL); ret = show_available_vars(params.events, params.nevents, params.max_probe_points, params.target, params.filter, params.show_ext_vars); strfilter__delete(params.filter); if (ret < 0) pr_err(" Error: Failed to show vars. (%d)\n", ret); return ret; } #endif if (params.dellist) { ret = del_perf_probe_events(params.dellist); strlist__delete(params.dellist); if (ret < 0) { pr_err(" Error: Failed to delete events. (%d)\n", ret); return ret; } } if (params.nevents) { ret = add_perf_probe_events(params.events, params.nevents, params.max_probe_points, params.target, params.force_add); if (ret < 0) { pr_err(" Error: Failed to add events. (%d)\n", ret); return ret; } } return 0; }
gpl-2.0
cnexus/kernel_d2spr_tw_44
net/netfilter/ipset/ip_set_list_set.c
4811
14284
/* Copyright (C) 2008-2011 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module implementing an IP set type: the list:set type */ #include <linux/module.h> #include <linux/ip.h> #include <linux/skbuff.h> #include <linux/errno.h> #include <linux/netfilter/ipset/ip_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> #include <linux/netfilter/ipset/ip_set_list.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("list:set type of IP sets"); MODULE_ALIAS("ip_set_list:set"); /* Member elements without and with timeout */ struct set_elem { ip_set_id_t id; }; struct set_telem { ip_set_id_t id; unsigned long timeout; }; /* Type structure */ struct list_set { size_t dsize; /* element size */ u32 size; /* size of set list array */ u32 timeout; /* timeout value */ struct timer_list gc; /* garbage collection */ struct set_elem members[0]; /* the set members */ }; static inline struct set_elem * list_set_elem(const struct list_set *map, u32 id) { return (struct set_elem *)((void *)map->members + id * map->dsize); } static inline struct set_telem * list_set_telem(const struct list_set *map, u32 id) { return (struct set_telem *)((void *)map->members + id * map->dsize); } static inline bool list_set_timeout(const struct list_set *map, u32 id) { const struct set_telem *elem = list_set_telem(map, id); return ip_set_timeout_test(elem->timeout); } static inline bool list_set_expired(const struct list_set *map, u32 id) { const struct set_telem *elem = list_set_telem(map, id); return ip_set_timeout_expired(elem->timeout); } /* Set list without and with timeout */ static int list_set_kadt(struct ip_set *set, const struct sk_buff *skb, const struct xt_action_param *par, enum ipset_adt adt, const struct ip_set_adt_opt *opt) { struct list_set *map = set->data; struct set_elem *elem; u32 i; int ret; for (i = 0; i < map->size; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID) return 0; if (with_timeout(map->timeout) && list_set_expired(map, i)) continue; switch (adt) { case IPSET_TEST: ret = ip_set_test(elem->id, skb, par, opt); if (ret > 0) return ret; break; case IPSET_ADD: ret = ip_set_add(elem->id, skb, par, opt); if (ret == 0) return ret; break; case IPSET_DEL: ret = ip_set_del(elem->id, skb, par, opt); if (ret == 0) return ret; break; default: break; } } return -EINVAL; } static bool id_eq(const struct list_set *map, u32 i, ip_set_id_t id) { const struct set_elem *elem; if (i < map->size) { elem = list_set_elem(map, i); return elem->id == id; } return 0; } static bool id_eq_timeout(const struct list_set *map, u32 i, ip_set_id_t id) { const struct set_elem *elem; if (i < map->size) { elem = list_set_elem(map, i); return !!(elem->id == id && !(with_timeout(map->timeout) && list_set_expired(map, i))); } return 0; } static void list_elem_add(struct list_set *map, u32 i, ip_set_id_t id) { struct set_elem *e; for (; i < map->size; i++) { e = list_set_elem(map, i); swap(e->id, id); if (e->id == IPSET_INVALID_ID) break; } } static void list_elem_tadd(struct list_set *map, u32 i, ip_set_id_t id, unsigned long timeout) { struct set_telem *e; for (; i < map->size; i++) { e = list_set_telem(map, i); swap(e->id, id); swap(e->timeout, timeout); if (e->id == IPSET_INVALID_ID) break; } } static int list_set_add(struct list_set *map, u32 i, ip_set_id_t id, unsigned long timeout) { const struct set_elem *e = list_set_elem(map, i); if (i == map->size - 1 && e->id != IPSET_INVALID_ID) /* Last element replaced: e.g. add new,before,last */ ip_set_put_byindex(e->id); if (with_timeout(map->timeout)) list_elem_tadd(map, i, id, ip_set_timeout_set(timeout)); else list_elem_add(map, i, id); return 0; } static int list_set_del(struct list_set *map, u32 i) { struct set_elem *a = list_set_elem(map, i), *b; ip_set_put_byindex(a->id); for (; i < map->size - 1; i++) { b = list_set_elem(map, i + 1); a->id = b->id; if (with_timeout(map->timeout)) ((struct set_telem *)a)->timeout = ((struct set_telem *)b)->timeout; a = b; if (a->id == IPSET_INVALID_ID) break; } /* Last element */ a->id = IPSET_INVALID_ID; return 0; } static void cleanup_entries(struct list_set *map) { struct set_telem *e; u32 i; for (i = 0; i < map->size; i++) { e = list_set_telem(map, i); if (e->id != IPSET_INVALID_ID && list_set_expired(map, i)) list_set_del(map, i); } } static int list_set_uadt(struct ip_set *set, struct nlattr *tb[], enum ipset_adt adt, u32 *lineno, u32 flags, bool retried) { struct list_set *map = set->data; bool with_timeout = with_timeout(map->timeout); bool flag_exist = flags & IPSET_FLAG_EXIST; int before = 0; u32 timeout = map->timeout; ip_set_id_t id, refid = IPSET_INVALID_ID; const struct set_elem *elem; struct ip_set *s; u32 i; int ret = 0; if (unlikely(!tb[IPSET_ATTR_NAME] || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT) || !ip_set_optattr_netorder(tb, IPSET_ATTR_CADT_FLAGS))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_LINENO]) *lineno = nla_get_u32(tb[IPSET_ATTR_LINENO]); id = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAME]), &s); if (id == IPSET_INVALID_ID) return -IPSET_ERR_NAME; /* "Loop detection" */ if (s->type->features & IPSET_TYPE_NAME) { ret = -IPSET_ERR_LOOP; goto finish; } if (tb[IPSET_ATTR_CADT_FLAGS]) { u32 f = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]); before = f & IPSET_FLAG_BEFORE; } if (before && !tb[IPSET_ATTR_NAMEREF]) { ret = -IPSET_ERR_BEFORE; goto finish; } if (tb[IPSET_ATTR_NAMEREF]) { refid = ip_set_get_byname(nla_data(tb[IPSET_ATTR_NAMEREF]), &s); if (refid == IPSET_INVALID_ID) { ret = -IPSET_ERR_NAMEREF; goto finish; } if (!before) before = -1; } if (tb[IPSET_ATTR_TIMEOUT]) { if (!with_timeout) { ret = -IPSET_ERR_TIMEOUT; goto finish; } timeout = ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]); } if (with_timeout && adt != IPSET_TEST) cleanup_entries(map); switch (adt) { case IPSET_TEST: for (i = 0; i < map->size && !ret; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID || (before != 0 && i + 1 >= map->size)) break; else if (with_timeout && list_set_expired(map, i)) continue; else if (before > 0 && elem->id == id) ret = id_eq_timeout(map, i + 1, refid); else if (before < 0 && elem->id == refid) ret = id_eq_timeout(map, i + 1, id); else if (before == 0 && elem->id == id) ret = 1; } break; case IPSET_ADD: for (i = 0; i < map->size; i++) { elem = list_set_elem(map, i); if (elem->id != id) continue; if (!(with_timeout && flag_exist)) { ret = -IPSET_ERR_EXIST; goto finish; } else { struct set_telem *e = list_set_telem(map, i); if ((before > 1 && !id_eq(map, i + 1, refid)) || (before < 0 && (i == 0 || !id_eq(map, i - 1, refid)))) { ret = -IPSET_ERR_EXIST; goto finish; } e->timeout = ip_set_timeout_set(timeout); ip_set_put_byindex(id); ret = 0; goto finish; } } ret = -IPSET_ERR_LIST_FULL; for (i = 0; i < map->size && ret == -IPSET_ERR_LIST_FULL; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID) ret = before != 0 ? -IPSET_ERR_REF_EXIST : list_set_add(map, i, id, timeout); else if (elem->id != refid) continue; else if (before > 0) ret = list_set_add(map, i, id, timeout); else if (i + 1 < map->size) ret = list_set_add(map, i + 1, id, timeout); } break; case IPSET_DEL: ret = -IPSET_ERR_EXIST; for (i = 0; i < map->size && ret == -IPSET_ERR_EXIST; i++) { elem = list_set_elem(map, i); if (elem->id == IPSET_INVALID_ID) { ret = before != 0 ? -IPSET_ERR_REF_EXIST : -IPSET_ERR_EXIST; break; } else if (elem->id == id && (before == 0 || (before > 0 && id_eq(map, i + 1, refid)))) ret = list_set_del(map, i); else if (elem->id == refid && before < 0 && id_eq(map, i + 1, id)) ret = list_set_del(map, i + 1); } break; default: break; } finish: if (refid != IPSET_INVALID_ID) ip_set_put_byindex(refid); if (adt != IPSET_ADD || ret) ip_set_put_byindex(id); return ip_set_eexist(ret, flags) ? 0 : ret; } static void list_set_flush(struct ip_set *set) { struct list_set *map = set->data; struct set_elem *elem; u32 i; for (i = 0; i < map->size; i++) { elem = list_set_elem(map, i); if (elem->id != IPSET_INVALID_ID) { ip_set_put_byindex(elem->id); elem->id = IPSET_INVALID_ID; } } } static void list_set_destroy(struct ip_set *set) { struct list_set *map = set->data; if (with_timeout(map->timeout)) del_timer_sync(&map->gc); list_set_flush(set); kfree(map); set->data = NULL; } static int list_set_head(struct ip_set *set, struct sk_buff *skb) { const struct list_set *map = set->data; struct nlattr *nested; nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) goto nla_put_failure; NLA_PUT_NET32(skb, IPSET_ATTR_SIZE, htonl(map->size)); if (with_timeout(map->timeout)) NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(map->timeout)); NLA_PUT_NET32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)); NLA_PUT_NET32(skb, IPSET_ATTR_MEMSIZE, htonl(sizeof(*map) + map->size * map->dsize)); ipset_nest_end(skb, nested); return 0; nla_put_failure: return -EMSGSIZE; } static int list_set_list(const struct ip_set *set, struct sk_buff *skb, struct netlink_callback *cb) { const struct list_set *map = set->data; struct nlattr *atd, *nested; u32 i, first = cb->args[2]; const struct set_elem *e; atd = ipset_nest_start(skb, IPSET_ATTR_ADT); if (!atd) return -EMSGSIZE; for (; cb->args[2] < map->size; cb->args[2]++) { i = cb->args[2]; e = list_set_elem(map, i); if (e->id == IPSET_INVALID_ID) goto finish; if (with_timeout(map->timeout) && list_set_expired(map, i)) continue; nested = ipset_nest_start(skb, IPSET_ATTR_DATA); if (!nested) { if (i == first) { nla_nest_cancel(skb, atd); return -EMSGSIZE; } else goto nla_put_failure; } NLA_PUT_STRING(skb, IPSET_ATTR_NAME, ip_set_name_byindex(e->id)); if (with_timeout(map->timeout)) { const struct set_telem *te = (const struct set_telem *) e; NLA_PUT_NET32(skb, IPSET_ATTR_TIMEOUT, htonl(ip_set_timeout_get(te->timeout))); } ipset_nest_end(skb, nested); } finish: ipset_nest_end(skb, atd); /* Set listing finished */ cb->args[2] = 0; return 0; nla_put_failure: nla_nest_cancel(skb, nested); ipset_nest_end(skb, atd); if (unlikely(i == first)) { cb->args[2] = 0; return -EMSGSIZE; } return 0; } static bool list_set_same_set(const struct ip_set *a, const struct ip_set *b) { const struct list_set *x = a->data; const struct list_set *y = b->data; return x->size == y->size && x->timeout == y->timeout; } static const struct ip_set_type_variant list_set = { .kadt = list_set_kadt, .uadt = list_set_uadt, .destroy = list_set_destroy, .flush = list_set_flush, .head = list_set_head, .list = list_set_list, .same_set = list_set_same_set, }; static void list_set_gc(unsigned long ul_set) { struct ip_set *set = (struct ip_set *) ul_set; struct list_set *map = set->data; write_lock_bh(&set->lock); cleanup_entries(map); write_unlock_bh(&set->lock); map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; add_timer(&map->gc); } static void list_set_gc_init(struct ip_set *set) { struct list_set *map = set->data; init_timer(&map->gc); map->gc.data = (unsigned long) set; map->gc.function = list_set_gc; map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ; add_timer(&map->gc); } /* Create list:set type of sets */ static bool init_list_set(struct ip_set *set, u32 size, size_t dsize, unsigned long timeout) { struct list_set *map; struct set_elem *e; u32 i; map = kzalloc(sizeof(*map) + size * dsize, GFP_KERNEL); if (!map) return false; map->size = size; map->dsize = dsize; map->timeout = timeout; set->data = map; for (i = 0; i < size; i++) { e = list_set_elem(map, i); e->id = IPSET_INVALID_ID; } return true; } static int list_set_create(struct ip_set *set, struct nlattr *tb[], u32 flags) { u32 size = IP_SET_LIST_DEFAULT_SIZE; if (unlikely(!ip_set_optattr_netorder(tb, IPSET_ATTR_SIZE) || !ip_set_optattr_netorder(tb, IPSET_ATTR_TIMEOUT))) return -IPSET_ERR_PROTOCOL; if (tb[IPSET_ATTR_SIZE]) size = ip_set_get_h32(tb[IPSET_ATTR_SIZE]); if (size < IP_SET_LIST_MIN_SIZE) size = IP_SET_LIST_MIN_SIZE; if (tb[IPSET_ATTR_TIMEOUT]) { if (!init_list_set(set, size, sizeof(struct set_telem), ip_set_timeout_uget(tb[IPSET_ATTR_TIMEOUT]))) return -ENOMEM; list_set_gc_init(set); } else { if (!init_list_set(set, size, sizeof(struct set_elem), IPSET_NO_TIMEOUT)) return -ENOMEM; } set->variant = &list_set; return 0; } static struct ip_set_type list_set_type __read_mostly = { .name = "list:set", .protocol = IPSET_PROTOCOL, .features = IPSET_TYPE_NAME | IPSET_DUMP_LAST, .dimension = IPSET_DIM_ONE, .family = NFPROTO_UNSPEC, .revision_min = 0, .revision_max = 0, .create = list_set_create, .create_policy = { [IPSET_ATTR_SIZE] = { .type = NLA_U32 }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, }, .adt_policy = { [IPSET_ATTR_NAME] = { .type = NLA_STRING, .len = IPSET_MAXNAMELEN }, [IPSET_ATTR_NAMEREF] = { .type = NLA_STRING, .len = IPSET_MAXNAMELEN }, [IPSET_ATTR_TIMEOUT] = { .type = NLA_U32 }, [IPSET_ATTR_LINENO] = { .type = NLA_U32 }, [IPSET_ATTR_CADT_FLAGS] = { .type = NLA_U32 }, }, .me = THIS_MODULE, }; static int __init list_set_init(void) { return ip_set_type_register(&list_set_type); } static void __exit list_set_fini(void) { ip_set_type_unregister(&list_set_type); } module_init(list_set_init); module_exit(list_set_fini);
gpl-2.0
willizambrano01/evolution_CM14.1
drivers/video/via/viafbdev.c
4811
61265
/* * Copyright 1998-2009 VIA Technologies, Inc. All Rights Reserved. * Copyright 2001-2008 S3 Graphics, Inc. All Rights Reserved. * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public * License as published by the Free Software Foundation; * either version 2, or (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTIES OR REPRESENTATIONS; without even * the implied warranty of MERCHANTABILITY or FITNESS FOR * A PARTICULAR PURPOSE.See the GNU General Public License * for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/stat.h> #include <linux/via-core.h> #include <linux/via_i2c.h> #include <asm/olpc.h> #define _MASTER_FILE #include "global.h" static char *viafb_name = "Via"; static u32 pseudo_pal[17]; /* video mode */ static char *viafb_mode; static char *viafb_mode1; static int viafb_bpp = 32; static int viafb_bpp1 = 32; static unsigned int viafb_second_offset; static int viafb_second_size; static int viafb_accel = 1; /* Added for specifying active devices.*/ static char *viafb_active_dev; /*Added for specify lcd output port*/ static char *viafb_lcd_port = ""; static char *viafb_dvi_port = ""; static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info); static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static struct fb_ops viafb_ops; /* supported output devices on each IGP * only CX700, VX800, VX855, VX900 were documented * VIA_CRT should be everywhere * VIA_6C can be onle pre-CX700 (probably only on CLE266) as 6C is used for PLL * source selection on CX700 and later * K400 seems to support VIA_96, VIA_DVP1, VIA_LVDS{1,2} as in viamode.c */ static const u32 supported_odev_map[] = { [UNICHROME_CLE266] = VIA_CRT | VIA_LDVP0 | VIA_LDVP1, [UNICHROME_K400] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_PM800] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN700] = VIA_CRT | VIA_DVP0 | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CX700] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_CN750] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_K8M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M890] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_P4M900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX800] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX855] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, [UNICHROME_VX900] = VIA_CRT | VIA_DVP1 | VIA_LVDS1 | VIA_LVDS2, }; static void viafb_fill_var_color_info(struct fb_var_screeninfo *var, u8 depth) { var->grayscale = 0; var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.offset = 0; var->transp.length = 0; var->transp.msb_right = 0; var->nonstd = 0; switch (depth) { case 8: var->bits_per_pixel = 8; var->red.offset = 0; var->green.offset = 0; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 15: var->bits_per_pixel = 16; var->red.offset = 10; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 5; var->blue.length = 5; break; case 16: var->bits_per_pixel = 16; var->red.offset = 11; var->green.offset = 5; var->blue.offset = 0; var->red.length = 5; var->green.length = 6; var->blue.length = 5; break; case 24: var->bits_per_pixel = 32; var->red.offset = 16; var->green.offset = 8; var->blue.offset = 0; var->red.length = 8; var->green.length = 8; var->blue.length = 8; break; case 30: var->bits_per_pixel = 32; var->red.offset = 20; var->green.offset = 10; var->blue.offset = 0; var->red.length = 10; var->green.length = 10; var->blue.length = 10; break; } } static void viafb_update_fix(struct fb_info *info) { u32 bpp = info->var.bits_per_pixel; info->fix.visual = bpp == 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_TRUECOLOR; info->fix.line_length = ALIGN(info->var.xres_virtual * bpp / 8, VIA_PITCH_SIZE); } static void viafb_setup_fixinfo(struct fb_fix_screeninfo *fix, struct viafb_par *viaparinfo) { memset(fix, 0, sizeof(struct fb_fix_screeninfo)); strcpy(fix->id, viafb_name); fix->smem_start = viaparinfo->fbmem; fix->smem_len = viaparinfo->fbmem_free; fix->type = FB_TYPE_PACKED_PIXELS; fix->type_aux = 0; fix->visual = FB_VISUAL_TRUECOLOR; fix->xpanstep = fix->ywrapstep = 0; fix->ypanstep = 1; /* Just tell the accel name */ viafbinfo->fix.accel = FB_ACCEL_VIA_UNICHROME; } static int viafb_open(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_open!\n"); return 0; } static int viafb_release(struct fb_info *info, int user) { DEBUG_MSG(KERN_INFO "viafb_release!\n"); return 0; } static inline int get_var_refresh(struct fb_var_screeninfo *var) { u32 htotal, vtotal; htotal = var->left_margin + var->xres + var->right_margin + var->hsync_len; vtotal = var->upper_margin + var->yres + var->lower_margin + var->vsync_len; return PICOS2KHZ(var->pixclock) * 1000 / (htotal * vtotal); } static int viafb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { int depth, refresh; struct viafb_par *ppar = info->par; u32 line; DEBUG_MSG(KERN_INFO "viafb_check_var!\n"); /* Sanity check */ /* HW neither support interlacte nor double-scaned mode */ if (var->vmode & FB_VMODE_INTERLACED || var->vmode & FB_VMODE_DOUBLE) return -EINVAL; /* the refresh rate is not important here, as we only want to know * whether the resolution exists */ if (!viafb_get_best_mode(var->xres, var->yres, 60)) { DEBUG_MSG(KERN_INFO "viafb: Mode %dx%dx%d not supported!!\n", var->xres, var->yres, var->bits_per_pixel); return -EINVAL; } depth = fb_get_color_depth(var, &info->fix); if (!depth) depth = var->bits_per_pixel; if (depth < 0 || depth > 32) return -EINVAL; else if (!depth) depth = 24; else if (depth == 15 && viafb_dual_fb && ppar->iga_path == IGA1) depth = 15; else if (depth == 30) depth = 30; else if (depth <= 8) depth = 8; else if (depth <= 16) depth = 16; else depth = 24; viafb_fill_var_color_info(var, depth); if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; line = ALIGN(var->xres_virtual * var->bits_per_pixel / 8, VIA_PITCH_SIZE); if (line > VIA_PITCH_MAX || line * var->yres_virtual > ppar->memsize) return -EINVAL; /* Based on var passed in to calculate the refresh, * because our driver use some modes special. */ refresh = viafb_get_refresh(var->xres, var->yres, get_var_refresh(var)); /* Adjust var according to our driver's own table */ viafb_fill_var_timing_info(var, viafb_get_best_mode(var->xres, var->yres, refresh)); if (var->accel_flags & FB_ACCELF_TEXT && !ppar->shared->vdev->engine_mmio) var->accel_flags = 0; return 0; } static int viafb_set_par(struct fb_info *info) { struct viafb_par *viapar = info->par; int refresh; DEBUG_MSG(KERN_INFO "viafb_set_par!\n"); viafb_update_fix(info); viapar->depth = fb_get_color_depth(&info->var, &info->fix); viafb_update_device_setting(viafbinfo->var.xres, viafbinfo->var.yres, viafbinfo->var.bits_per_pixel, 0); if (viafb_dual_fb) { viafb_update_device_setting(viafbinfo1->var.xres, viafbinfo1->var.yres, viafbinfo1->var.bits_per_pixel, 1); } else if (viafb_SAMM_ON == 1) { DEBUG_MSG(KERN_INFO "viafb_second_xres = %d, viafb_second_yres = %d, bpp = %d\n", viafb_second_xres, viafb_second_yres, viafb_bpp1); viafb_update_device_setting(viafb_second_xres, viafb_second_yres, viafb_bpp1, 1); } refresh = get_var_refresh(&info->var); if (viafb_dual_fb && viapar->iga_path == IGA2) { viafb_bpp1 = info->var.bits_per_pixel; viafb_refresh1 = refresh; } else { viafb_bpp = info->var.bits_per_pixel; viafb_refresh = refresh; } if (info->var.accel_flags & FB_ACCELF_TEXT) info->flags &= ~FBINFO_HWACCEL_DISABLED; else info->flags |= FBINFO_HWACCEL_DISABLED; viafb_setmode(); viafb_pan_display(&info->var, info); return 0; } /* Set one color register */ static int viafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 r, g, b; if (info->fix.visual == FB_VISUAL_PSEUDOCOLOR) { if (regno > 255) return -EINVAL; if (!viafb_dual_fb || viapar->iga_path == IGA1) viafb_set_primary_color_register(regno, red >> 8, green >> 8, blue >> 8); if (!viafb_dual_fb || viapar->iga_path == IGA2) viafb_set_secondary_color_register(regno, red >> 8, green >> 8, blue >> 8); } else { if (regno > 15) return -EINVAL; r = (red >> (16 - info->var.red.length)) << info->var.red.offset; b = (blue >> (16 - info->var.blue.length)) << info->var.blue.offset; g = (green >> (16 - info->var.green.length)) << info->var.green.offset; ((u32 *) info->pseudo_palette)[regno] = r | g | b; } return 0; } static int viafb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { struct viafb_par *viapar = info->par; u32 vram_addr = viapar->vram_addr + var->yoffset * info->fix.line_length + var->xoffset * info->var.bits_per_pixel / 8; DEBUG_MSG(KERN_DEBUG "viafb_pan_display, address = %d\n", vram_addr); if (!viafb_dual_fb) { via_set_primary_address(vram_addr); via_set_secondary_address(vram_addr); } else if (viapar->iga_path == IGA1) via_set_primary_address(vram_addr); else via_set_secondary_address(vram_addr); return 0; } static int viafb_blank(int blank_mode, struct fb_info *info) { DEBUG_MSG(KERN_INFO "viafb_blank!\n"); /* clear DPMS setting */ switch (blank_mode) { case FB_BLANK_UNBLANK: /* Screen: On, HSync: On, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_ON); break; case FB_BLANK_HSYNC_SUSPEND: /* Screen: Off, HSync: Off, VSync: On */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_STANDBY); break; case FB_BLANK_VSYNC_SUSPEND: /* Screen: Off, HSync: On, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_SUSPEND); break; case FB_BLANK_POWERDOWN: /* Screen: Off, HSync: Off, VSync: Off */ /* control CRT monitor power management */ via_set_state(VIA_CRT, VIA_STATE_OFF); break; } return 0; } static int viafb_ioctl(struct fb_info *info, u_int cmd, u_long arg) { union { struct viafb_ioctl_mode viamode; struct viafb_ioctl_samm viasamm; struct viafb_driver_version driver_version; struct fb_var_screeninfo sec_var; struct _panel_size_pos_info panel_pos_size_para; struct viafb_ioctl_setting viafb_setting; struct device_t active_dev; } u; u32 state_info = 0; u32 *viafb_gamma_table; char driver_name[] = "viafb"; u32 __user *argp = (u32 __user *) arg; u32 gpu32; DEBUG_MSG(KERN_INFO "viafb_ioctl: 0x%X !!\n", cmd); printk(KERN_WARNING "viafb_ioctl: Please avoid this interface as it is unstable and might change or vanish at any time!\n"); memset(&u, 0, sizeof(u)); switch (cmd) { case VIAFB_GET_CHIP_INFO: if (copy_to_user(argp, viaparinfo->chip_info, sizeof(struct chip_information))) return -EFAULT; break; case VIAFB_GET_INFO_SIZE: return put_user((u32)sizeof(struct viafb_ioctl_info), argp); case VIAFB_GET_INFO: return viafb_ioctl_get_viafb_info(arg); case VIAFB_HOTPLUG: return put_user(viafb_ioctl_hotplug(info->var.xres, info->var.yres, info->var.bits_per_pixel), argp); case VIAFB_SET_HOTPLUG_FLAG: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; viafb_hotplug = (gpu32) ? 1 : 0; break; case VIAFB_GET_RESOLUTION: u.viamode.xres = (u32) viafb_hotplug_Xres; u.viamode.yres = (u32) viafb_hotplug_Yres; u.viamode.refresh = (u32) viafb_hotplug_refresh; u.viamode.bpp = (u32) viafb_hotplug_bpp; if (viafb_SAMM_ON == 1) { u.viamode.xres_sec = viafb_second_xres; u.viamode.yres_sec = viafb_second_yres; u.viamode.virtual_xres_sec = viafb_dual_fb ? viafbinfo1->var.xres_virtual : viafbinfo->var.xres_virtual; u.viamode.virtual_yres_sec = viafb_dual_fb ? viafbinfo1->var.yres_virtual : viafbinfo->var.yres_virtual; u.viamode.refresh_sec = viafb_refresh1; u.viamode.bpp_sec = viafb_bpp1; } else { u.viamode.xres_sec = 0; u.viamode.yres_sec = 0; u.viamode.virtual_xres_sec = 0; u.viamode.virtual_yres_sec = 0; u.viamode.refresh_sec = 0; u.viamode.bpp_sec = 0; } if (copy_to_user(argp, &u.viamode, sizeof(u.viamode))) return -EFAULT; break; case VIAFB_GET_SAMM_INFO: u.viasamm.samm_status = viafb_SAMM_ON; if (viafb_SAMM_ON == 1) { if (viafb_dual_fb) { u.viasamm.size_prim = viaparinfo->fbmem_free; u.viasamm.size_sec = viaparinfo1->fbmem_free; } else { if (viafb_second_size) { u.viasamm.size_prim = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; u.viasamm.size_sec = viafb_second_size * 1024 * 1024; } else { u.viasamm.size_prim = viaparinfo->fbmem_free >> 1; u.viasamm.size_sec = (viaparinfo->fbmem_free >> 1); } } u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = viafb_second_offset; } else { u.viasamm.size_prim = viaparinfo->memsize - viaparinfo->fbmem_used; u.viasamm.size_sec = 0; u.viasamm.mem_base = viaparinfo->fbmem; u.viasamm.offset_sec = 0; } if (copy_to_user(argp, &u.viasamm, sizeof(u.viasamm))) return -EFAULT; break; case VIAFB_TURN_ON_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_ON); if (gpu32 & DVI_Device) viafb_dvi_enable(); if (gpu32 & LCD_Device) viafb_lcd_enable(); break; case VIAFB_TURN_OFF_OUTPUT_DEVICE: if (copy_from_user(&gpu32, argp, sizeof(gpu32))) return -EFAULT; if (gpu32 & CRT_Device) via_set_state(VIA_CRT, VIA_STATE_OFF); if (gpu32 & DVI_Device) viafb_dvi_disable(); if (gpu32 & LCD_Device) viafb_lcd_disable(); break; case VIAFB_GET_DEVICE: u.active_dev.crt = viafb_CRT_ON; u.active_dev.dvi = viafb_DVI_ON; u.active_dev.lcd = viafb_LCD_ON; u.active_dev.samm = viafb_SAMM_ON; u.active_dev.primary_dev = viafb_primary_dev; u.active_dev.lcd_dsp_cent = viafb_lcd_dsp_method; u.active_dev.lcd_panel_id = viafb_lcd_panel_id; u.active_dev.lcd_mode = viafb_lcd_mode; u.active_dev.xres = viafb_hotplug_Xres; u.active_dev.yres = viafb_hotplug_Yres; u.active_dev.xres1 = viafb_second_xres; u.active_dev.yres1 = viafb_second_yres; u.active_dev.bpp = viafb_bpp; u.active_dev.bpp1 = viafb_bpp1; u.active_dev.refresh = viafb_refresh; u.active_dev.refresh1 = viafb_refresh1; u.active_dev.epia_dvi = viafb_platform_epia_dvi; u.active_dev.lcd_dual_edge = viafb_device_lcd_dualedge; u.active_dev.bus_width = viafb_bus_width; if (copy_to_user(argp, &u.active_dev, sizeof(u.active_dev))) return -EFAULT; break; case VIAFB_GET_DRIVER_VERSION: u.driver_version.iMajorNum = VERSION_MAJOR; u.driver_version.iKernelNum = VERSION_KERNEL; u.driver_version.iOSNum = VERSION_OS; u.driver_version.iMinorNum = VERSION_MINOR; if (copy_to_user(argp, &u.driver_version, sizeof(u.driver_version))) return -EFAULT; break; case VIAFB_GET_DEVICE_INFO: retrieve_device_setting(&u.viafb_setting); if (copy_to_user(argp, &u.viafb_setting, sizeof(u.viafb_setting))) return -EFAULT; break; case VIAFB_GET_DEVICE_SUPPORT: viafb_get_device_support_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DEVICE_CONNECT: viafb_get_device_connect_state(&state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_PANEL_SUPPORT_EXPAND: state_info = viafb_lcd_get_support_expand_state(info->var.xres, info->var.yres); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_GET_DRIVER_NAME: if (copy_to_user(argp, driver_name, sizeof(driver_name))) return -EFAULT; break; case VIAFB_SET_GAMMA_LUT: viafb_gamma_table = memdup_user(argp, 256 * sizeof(u32)); if (IS_ERR(viafb_gamma_table)) return PTR_ERR(viafb_gamma_table); viafb_set_gamma_table(viafb_bpp, viafb_gamma_table); kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_LUT: viafb_gamma_table = kmalloc(256 * sizeof(u32), GFP_KERNEL); if (!viafb_gamma_table) return -ENOMEM; viafb_get_gamma_table(viafb_gamma_table); if (copy_to_user(argp, viafb_gamma_table, 256 * sizeof(u32))) { kfree(viafb_gamma_table); return -EFAULT; } kfree(viafb_gamma_table); break; case VIAFB_GET_GAMMA_SUPPORT_STATE: viafb_get_gamma_support_state(viafb_bpp, &state_info); if (put_user(state_info, argp)) return -EFAULT; break; case VIAFB_SYNC_SURFACE: DEBUG_MSG(KERN_INFO "lobo VIAFB_SYNC_SURFACE\n"); break; case VIAFB_GET_DRIVER_CAPS: break; case VIAFB_GET_PANEL_MAX_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_MAX_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_GET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; u.panel_pos_size_para.x = u.panel_pos_size_para.y = 0; if (copy_to_user(argp, &u.panel_pos_size_para, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_POSITION: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; case VIAFB_SET_PANEL_SIZE: if (copy_from_user(&u.panel_pos_size_para, argp, sizeof(u.panel_pos_size_para))) return -EFAULT; break; default: return -EINVAL; } return 0; } static void viafb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color; u8 rop; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_fillrect(info, rect); return; } if (!rect->width || !rect->height) return; if (info->fix.visual == FB_VISUAL_TRUECOLOR) fg_color = ((u32 *)info->pseudo_palette)[rect->color]; else fg_color = rect->color; if (rect->rop == ROP_XOR) rop = 0x5A; else rop = 0xF0; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: fillrect\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_FILL, rect->width, rect->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, rect->dx, rect->dy, NULL, 0, 0, 0, 0, fg_color, 0, rop)) cfb_fillrect(info, rect); } static void viafb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt) { cfb_copyarea(info, area); return; } if (!area->width || !area->height) return; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: copyarea\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, VIA_BITBLT_COLOR, area->width, area->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, area->dx, area->dy, NULL, viapar->vram_addr, info->fix.line_length, area->sx, area->sy, 0, 0, 0)) cfb_copyarea(info, area); } static void viafb_imageblit(struct fb_info *info, const struct fb_image *image) { struct viafb_par *viapar = info->par; struct viafb_shared *shared = viapar->shared; u32 fg_color = 0, bg_color = 0; u8 op; if (info->flags & FBINFO_HWACCEL_DISABLED || !shared->hw_bitblt || (image->depth != 1 && image->depth != viapar->depth)) { cfb_imageblit(info, image); return; } if (image->depth == 1) { op = VIA_BITBLT_MONO; if (info->fix.visual == FB_VISUAL_TRUECOLOR) { fg_color = ((u32 *)info->pseudo_palette)[image->fg_color]; bg_color = ((u32 *)info->pseudo_palette)[image->bg_color]; } else { fg_color = image->fg_color; bg_color = image->bg_color; } } else op = VIA_BITBLT_COLOR; DEBUG_MSG(KERN_DEBUG "viafb 2D engine: imageblit\n"); if (shared->hw_bitblt(shared->vdev->engine_mmio, op, image->width, image->height, info->var.bits_per_pixel, viapar->vram_addr, info->fix.line_length, image->dx, image->dy, (u32 *)image->data, 0, 0, 0, 0, fg_color, bg_color, 0)) cfb_imageblit(info, image); } static int viafb_cursor(struct fb_info *info, struct fb_cursor *cursor) { struct viafb_par *viapar = info->par; void __iomem *engine = viapar->shared->vdev->engine_mmio; u32 temp, xx, yy, bg_color = 0, fg_color = 0, chip_name = viapar->shared->chip_info.gfx_chip_name; int i, j = 0, cur_size = 64; if (info->flags & FBINFO_HWACCEL_DISABLED || info != viafbinfo) return -ENODEV; /* LCD ouput does not support hw cursors (at least on VN896) */ if ((chip_name == UNICHROME_CLE266 && viapar->iga_path == IGA2) || viafb_LCD_ON) return -ENODEV; viafb_show_hw_cursor(info, HW_Cursor_OFF); if (cursor->set & FB_CUR_SETHOT) { temp = (cursor->hot.x << 16) + cursor->hot.y; writel(temp, engine + VIA_REG_CURSOR_ORG); } if (cursor->set & FB_CUR_SETPOS) { yy = cursor->image.dy - info->var.yoffset; xx = cursor->image.dx - info->var.xoffset; temp = yy & 0xFFFF; temp |= (xx << 16); writel(temp, engine + VIA_REG_CURSOR_POS); } if (cursor->image.width <= 32 && cursor->image.height <= 32) cur_size = 32; else if (cursor->image.width <= 64 && cursor->image.height <= 64) cur_size = 64; else { printk(KERN_WARNING "viafb_cursor: The cursor is too large " "%dx%d", cursor->image.width, cursor->image.height); return -ENXIO; } if (cursor->set & FB_CUR_SETSIZE) { temp = readl(engine + VIA_REG_CURSOR_MODE); if (cur_size == 32) temp |= 0x2; else temp &= ~0x2; writel(temp, engine + VIA_REG_CURSOR_MODE); } if (cursor->set & FB_CUR_SETCMAP) { fg_color = cursor->image.fg_color; bg_color = cursor->image.bg_color; if (chip_name == UNICHROME_CX700 || chip_name == UNICHROME_VX800 || chip_name == UNICHROME_VX855 || chip_name == UNICHROME_VX900) { fg_color = ((info->cmap.red[fg_color] & 0xFFC0) << 14) | ((info->cmap.green[fg_color] & 0xFFC0) << 4) | ((info->cmap.blue[fg_color] & 0xFFC0) >> 6); bg_color = ((info->cmap.red[bg_color] & 0xFFC0) << 14) | ((info->cmap.green[bg_color] & 0xFFC0) << 4) | ((info->cmap.blue[bg_color] & 0xFFC0) >> 6); } else { fg_color = ((info->cmap.red[fg_color] & 0xFF00) << 8) | (info->cmap.green[fg_color] & 0xFF00) | ((info->cmap.blue[fg_color] & 0xFF00) >> 8); bg_color = ((info->cmap.red[bg_color] & 0xFF00) << 8) | (info->cmap.green[bg_color] & 0xFF00) | ((info->cmap.blue[bg_color] & 0xFF00) >> 8); } writel(bg_color, engine + VIA_REG_CURSOR_BG); writel(fg_color, engine + VIA_REG_CURSOR_FG); } if (cursor->set & FB_CUR_SETSHAPE) { struct { u8 data[CURSOR_SIZE]; u32 bak[CURSOR_SIZE / 4]; } *cr_data = kzalloc(sizeof(*cr_data), GFP_ATOMIC); int size = ((cursor->image.width + 7) >> 3) * cursor->image.height; if (!cr_data) return -ENOMEM; if (cur_size == 32) { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0xFFFFFFFF; i += 1; } } else { for (i = 0; i < (CURSOR_SIZE / 4); i++) { cr_data->bak[i] = 0x0; cr_data->bak[i + 1] = 0x0; cr_data->bak[i + 2] = 0xFFFFFFFF; cr_data->bak[i + 3] = 0xFFFFFFFF; i += 3; } } switch (cursor->rop) { case ROP_XOR: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; case ROP_COPY: for (i = 0; i < size; i++) cr_data->data[i] = cursor->mask[i]; break; default: break; } if (cur_size == 32) { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = ~cr_data->bak[j]; j += 2; } } else { for (i = 0; i < size; i++) { cr_data->bak[j] = (u32) cr_data->data[i]; cr_data->bak[j + 1] = 0x0; cr_data->bak[j + 2] = ~cr_data->bak[j]; cr_data->bak[j + 3] = ~cr_data->bak[j + 1]; j += 4; } } memcpy_toio(viafbinfo->screen_base + viapar->shared-> cursor_vram_addr, cr_data->bak, CURSOR_SIZE); kfree(cr_data); } if (cursor->enable) viafb_show_hw_cursor(info, HW_Cursor_ON); return 0; } static int viafb_sync(struct fb_info *info) { if (!(info->flags & FBINFO_HWACCEL_DISABLED)) viafb_wait_engine_idle(info); return 0; } static int get_primary_device(void) { int primary_device = 0; /* Rule: device on iga1 path are the primary device. */ if (viafb_SAMM_ON) { if (viafb_CRT_ON) { if (viaparinfo->shared->iga1_devices & VIA_CRT) { DEBUG_MSG(KERN_INFO "CRT IGA Path:%d\n", IGA1); primary_device = CRT_Device; } } if (viafb_DVI_ON) { if (viaparinfo->tmds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "DVI IGA Path:%d\n", viaparinfo-> tmds_setting_info->iga_path); primary_device = DVI_Device; } } if (viafb_LCD_ON) { if (viaparinfo->lvds_setting_info->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD IGA Path:%d\n", viaparinfo-> lvds_setting_info->iga_path); primary_device = LCD_Device; } } if (viafb_LCD2_ON) { if (viaparinfo->lvds_setting_info2->iga_path == IGA1) { DEBUG_MSG(KERN_INFO "LCD2 IGA Path:%d\n", viaparinfo-> lvds_setting_info2->iga_path); primary_device = LCD2_Device; } } } return primary_device; } static void retrieve_device_setting(struct viafb_ioctl_setting *setting_info) { /* get device status */ if (viafb_CRT_ON == 1) setting_info->device_status = CRT_Device; if (viafb_DVI_ON == 1) setting_info->device_status |= DVI_Device; if (viafb_LCD_ON == 1) setting_info->device_status |= LCD_Device; if (viafb_LCD2_ON == 1) setting_info->device_status |= LCD2_Device; setting_info->samm_status = viafb_SAMM_ON; setting_info->primary_device = get_primary_device(); setting_info->first_dev_bpp = viafb_bpp; setting_info->second_dev_bpp = viafb_bpp1; setting_info->first_dev_refresh = viafb_refresh; setting_info->second_dev_refresh = viafb_refresh1; setting_info->first_dev_hor_res = viafb_hotplug_Xres; setting_info->first_dev_ver_res = viafb_hotplug_Yres; setting_info->second_dev_hor_res = viafb_second_xres; setting_info->second_dev_ver_res = viafb_second_yres; /* Get lcd attributes */ setting_info->lcd_attributes.display_center = viafb_lcd_dsp_method; setting_info->lcd_attributes.panel_id = viafb_lcd_panel_id; setting_info->lcd_attributes.lcd_mode = viafb_lcd_mode; } static int __init parse_active_dev(void) { viafb_CRT_ON = STATE_OFF; viafb_DVI_ON = STATE_OFF; viafb_LCD_ON = STATE_OFF; viafb_LCD2_ON = STATE_OFF; /* 1. Modify the active status of devices. */ /* 2. Keep the order of devices, so we can set corresponding IGA path to devices in SAMM case. */ /* Note: The previous of active_dev is primary device, and the following is secondary device. */ if (!viafb_active_dev) { if (machine_is_olpc()) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else { viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } } else if (!strcmp(viafb_active_dev, "CRT+DVI")) { /* CRT+DVI */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "DVI+CRT")) { /* DVI+CRT */ viafb_CRT_ON = STATE_ON; viafb_DVI_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "CRT+LCD")) { /* CRT+LCD */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = CRT_Device; } else if (!strcmp(viafb_active_dev, "LCD+CRT")) { /* LCD+CRT */ viafb_CRT_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "DVI+LCD")) { /* DVI+LCD */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = DVI_Device; } else if (!strcmp(viafb_active_dev, "LCD+DVI")) { /* LCD+DVI */ viafb_DVI_ON = STATE_ON; viafb_LCD_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD+LCD2")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD_Device; } else if (!strcmp(viafb_active_dev, "LCD2+LCD")) { viafb_LCD_ON = STATE_ON; viafb_LCD2_ON = STATE_ON; viafb_primary_dev = LCD2_Device; } else if (!strcmp(viafb_active_dev, "CRT")) { /* CRT only */ viafb_CRT_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "DVI")) { /* DVI only */ viafb_DVI_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else if (!strcmp(viafb_active_dev, "LCD")) { /* LCD only */ viafb_LCD_ON = STATE_ON; viafb_SAMM_ON = STATE_OFF; } else return -EINVAL; return 0; } static int __devinit parse_port(char *opt_str, int *output_interface) { if (!strncmp(opt_str, "DVP0", 4)) *output_interface = INTERFACE_DVP0; else if (!strncmp(opt_str, "DVP1", 4)) *output_interface = INTERFACE_DVP1; else if (!strncmp(opt_str, "DFP_HIGHLOW", 11)) *output_interface = INTERFACE_DFP; else if (!strncmp(opt_str, "DFP_HIGH", 8)) *output_interface = INTERFACE_DFP_HIGH; else if (!strncmp(opt_str, "DFP_LOW", 7)) *output_interface = INTERFACE_DFP_LOW; else *output_interface = INTERFACE_NONE; return 0; } static void __devinit parse_lcd_port(void) { parse_port(viafb_lcd_port, &viaparinfo->chip_info->lvds_chip_info. output_interface); /*Initialize to avoid unexpected behavior */ viaparinfo->chip_info->lvds_chip_info2.output_interface = INTERFACE_NONE; DEBUG_MSG(KERN_INFO "parse_lcd_port: viafb_lcd_port:%s,interface:%d\n", viafb_lcd_port, viaparinfo->chip_info->lvds_chip_info. output_interface); } static void __devinit parse_dvi_port(void) { parse_port(viafb_dvi_port, &viaparinfo->chip_info->tmds_chip_info. output_interface); DEBUG_MSG(KERN_INFO "parse_dvi_port: viafb_dvi_port:%s,interface:%d\n", viafb_dvi_port, viaparinfo->chip_info->tmds_chip_info. output_interface); } #ifdef CONFIG_FB_VIA_DIRECT_PROCFS /* * The proc filesystem read/write function, a simple proc implement to * get/set the value of DPA DVP0, DVP0DataDriving, DVP0ClockDriving, DVP1, * DVP1Driving, DFPHigh, DFPLow CR96, SR2A[5], SR1B[1], SR2A[4], SR1E[2], * CR9B, SR65, CR97, CR99 */ static int viafb_dvp0_proc_show(struct seq_file *m, void *v) { u8 dvp0_data_dri = 0, dvp0_clk_dri = 0, dvp0 = 0; dvp0_data_dri = (viafb_read_reg(VIASR, SR2A) & BIT5) >> 4 | (viafb_read_reg(VIASR, SR1B) & BIT1) >> 1; dvp0_clk_dri = (viafb_read_reg(VIASR, SR2A) & BIT4) >> 3 | (viafb_read_reg(VIASR, SR1E) & BIT2) >> 2; dvp0 = viafb_read_reg(VIACR, CR96) & 0x0f; seq_printf(m, "%x %x %x\n", dvp0, dvp0_data_dri, dvp0_clk_dri); return 0; } static int viafb_dvp0_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp0_proc_show, NULL); } static ssize_t viafb_dvp0_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val) < 0) return -EINVAL; DEBUG_MSG(KERN_INFO "DVP0:reg_val[%l]=:%x\n", i, reg_val); switch (i) { case 0: viafb_write_reg_mask(CR96, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR2A, VIASR, reg_val << 4, BIT5); viafb_write_reg_mask(SR1B, VIASR, reg_val << 1, BIT1); break; case 2: viafb_write_reg_mask(SR2A, VIASR, reg_val << 3, BIT4); viafb_write_reg_mask(SR1E, VIASR, reg_val << 2, BIT2); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp0_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp0_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp0_proc_write, }; static int viafb_dvp1_proc_show(struct seq_file *m, void *v) { u8 dvp1 = 0, dvp1_data_dri = 0, dvp1_clk_dri = 0; dvp1 = viafb_read_reg(VIACR, CR9B) & 0x0f; dvp1_data_dri = (viafb_read_reg(VIASR, SR65) & 0x0c) >> 2; dvp1_clk_dri = viafb_read_reg(VIASR, SR65) & 0x03; seq_printf(m, "%x %x %x\n", dvp1, dvp1_data_dri, dvp1_clk_dri); return 0; } static int viafb_dvp1_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dvp1_proc_show, NULL); } static ssize_t viafb_dvp1_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20], *value, *pbuf; u8 reg_val = 0; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; for (i = 0; i < 3; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val) < 0) return -EINVAL; switch (i) { case 0: viafb_write_reg_mask(CR9B, VIACR, reg_val, 0x0f); break; case 1: viafb_write_reg_mask(SR65, VIASR, reg_val << 2, 0x0c); break; case 2: viafb_write_reg_mask(SR65, VIASR, reg_val, 0x03); break; default: break; } } else { break; } } return count; } static const struct file_operations viafb_dvp1_proc_fops = { .owner = THIS_MODULE, .open = viafb_dvp1_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dvp1_proc_write, }; static int viafb_dfph_proc_show(struct seq_file *m, void *v) { u8 dfp_high = 0; dfp_high = viafb_read_reg(VIACR, CR97) & 0x0f; seq_printf(m, "%x\n", dfp_high); return 0; } static int viafb_dfph_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfph_proc_show, NULL); } static ssize_t viafb_dfph_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ if (kstrtou8(buf, 0, &reg_val) < 0) return -EINVAL; viafb_write_reg_mask(CR97, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfph_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfph_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfph_proc_write, }; static int viafb_dfpl_proc_show(struct seq_file *m, void *v) { u8 dfp_low = 0; dfp_low = viafb_read_reg(VIACR, CR99) & 0x0f; seq_printf(m, "%x\n", dfp_low); return 0; } static int viafb_dfpl_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_dfpl_proc_show, NULL); } static ssize_t viafb_dfpl_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[20]; u8 reg_val = 0; unsigned long length; if (count < 1) return -EINVAL; length = count > 20 ? 20 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ if (kstrtou8(buf, 0, &reg_val) < 0) return -EINVAL; viafb_write_reg_mask(CR99, VIACR, reg_val, 0x0f); return count; } static const struct file_operations viafb_dfpl_proc_fops = { .owner = THIS_MODULE, .open = viafb_dfpl_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_dfpl_proc_write, }; static int viafb_vt1636_proc_show(struct seq_file *m, void *v) { u8 vt1636_08 = 0, vt1636_09 = 0; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info, &viaparinfo->chip_info->lvds_chip_info, 0x09) & 0x1f; seq_printf(m, "%x %x\n", vt1636_08, vt1636_09); break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: vt1636_08 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x08) & 0x0f; vt1636_09 = viafb_gpio_i2c_read_lvds(viaparinfo->lvds_setting_info2, &viaparinfo->chip_info->lvds_chip_info2, 0x09) & 0x1f; seq_printf(m, " %x %x\n", vt1636_08, vt1636_09); break; default: break; } return 0; } static int viafb_vt1636_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_vt1636_proc_show, NULL); } static ssize_t viafb_vt1636_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { char buf[30], *value, *pbuf; struct IODATA reg_val; unsigned long length, i; if (count < 1) return -EINVAL; length = count > 30 ? 30 : count; if (copy_from_user(&buf[0], buffer, length)) return -EFAULT; buf[length - 1] = '\0'; /*Ensure end string */ pbuf = &buf[0]; switch (viaparinfo->chip_info->lvds_chip_info.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val.Data) < 0) return -EINVAL; switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info, &viaparinfo-> chip_info->lvds_chip_info, reg_val); break; default: break; } } else { break; } } break; default: break; } switch (viaparinfo->chip_info->lvds_chip_info2.lvds_chip_name) { case VT1636_LVDS: for (i = 0; i < 2; i++) { value = strsep(&pbuf, " "); if (value != NULL) { if (kstrtou8(value, 0, &reg_val.Data) < 0) return -EINVAL; switch (i) { case 0: reg_val.Index = 0x08; reg_val.Mask = 0x0f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; case 1: reg_val.Index = 0x09; reg_val.Mask = 0x1f; viafb_gpio_i2c_write_mask_lvds (viaparinfo->lvds_setting_info2, &viaparinfo-> chip_info->lvds_chip_info2, reg_val); break; default: break; } } else { break; } } break; default: break; } return count; } static const struct file_operations viafb_vt1636_proc_fops = { .owner = THIS_MODULE, .open = viafb_vt1636_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_vt1636_proc_write, }; #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ static int viafb_sup_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, supported_odev_map[ viaparinfo->shared->chip_info.gfx_chip_name]); return 0; } static int viafb_sup_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_sup_odev_proc_show, NULL); } static const struct file_operations viafb_sup_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_sup_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static ssize_t odev_update(const char __user *buffer, size_t count, u32 *odev) { char buf[64], *ptr = buf; u32 devices; bool add, sub; if (count < 1 || count > 63) return -EINVAL; if (copy_from_user(&buf[0], buffer, count)) return -EFAULT; buf[count] = '\0'; add = buf[0] == '+'; sub = buf[0] == '-'; if (add || sub) ptr++; devices = via_parse_odev(ptr, &ptr); if (*ptr == '\n') ptr++; if (*ptr != 0) return -EINVAL; if (add) *odev |= devices; else if (sub) *odev &= ~devices; else *odev = devices; return count; } static int viafb_iga1_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga1_devices); return 0; } static int viafb_iga1_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga1_odev_proc_show, NULL); } static ssize_t viafb_iga1_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga1_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga1_devices = dev_new; viaparinfo->shared->iga2_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA1); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga1_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga1_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga1_odev_proc_write, }; static int viafb_iga2_odev_proc_show(struct seq_file *m, void *v) { via_odev_to_seq(m, viaparinfo->shared->iga2_devices); return 0; } static int viafb_iga2_odev_proc_open(struct inode *inode, struct file *file) { return single_open(file, viafb_iga2_odev_proc_show, NULL); } static ssize_t viafb_iga2_odev_proc_write(struct file *file, const char __user *buffer, size_t count, loff_t *pos) { u32 dev_on, dev_off, dev_old, dev_new; ssize_t res; dev_old = dev_new = viaparinfo->shared->iga2_devices; res = odev_update(buffer, count, &dev_new); if (res != count) return res; dev_off = dev_old & ~dev_new; dev_on = dev_new & ~dev_old; viaparinfo->shared->iga2_devices = dev_new; viaparinfo->shared->iga1_devices &= ~dev_new; via_set_state(dev_off, VIA_STATE_OFF); via_set_source(dev_new, IGA2); via_set_state(dev_on, VIA_STATE_ON); return res; } static const struct file_operations viafb_iga2_odev_proc_fops = { .owner = THIS_MODULE, .open = viafb_iga2_odev_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, .write = viafb_iga2_odev_proc_write, }; #define IS_VT1636(lvds_chip) ((lvds_chip).lvds_chip_name == VT1636_LVDS) static void viafb_init_proc(struct viafb_shared *shared) { struct proc_dir_entry *iga1_entry, *iga2_entry, *viafb_entry = proc_mkdir("viafb", NULL); shared->proc_entry = viafb_entry; if (viafb_entry) { #ifdef CONFIG_FB_VIA_DIRECT_PROCFS proc_create("dvp0", 0, viafb_entry, &viafb_dvp0_proc_fops); proc_create("dvp1", 0, viafb_entry, &viafb_dvp1_proc_fops); proc_create("dfph", 0, viafb_entry, &viafb_dfph_proc_fops); proc_create("dfpl", 0, viafb_entry, &viafb_dfpl_proc_fops); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) proc_create("vt1636", 0, viafb_entry, &viafb_vt1636_proc_fops); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ proc_create("supported_output_devices", 0, viafb_entry, &viafb_sup_odev_proc_fops); iga1_entry = proc_mkdir("iga1", viafb_entry); shared->iga1_proc_entry = iga1_entry; proc_create("output_devices", 0, iga1_entry, &viafb_iga1_odev_proc_fops); iga2_entry = proc_mkdir("iga2", viafb_entry); shared->iga2_proc_entry = iga2_entry; proc_create("output_devices", 0, iga2_entry, &viafb_iga2_odev_proc_fops); } } static void viafb_remove_proc(struct viafb_shared *shared) { struct proc_dir_entry *viafb_entry = shared->proc_entry, *iga1_entry = shared->iga1_proc_entry, *iga2_entry = shared->iga2_proc_entry; if (!viafb_entry) return; remove_proc_entry("output_devices", iga2_entry); remove_proc_entry("iga2", viafb_entry); remove_proc_entry("output_devices", iga1_entry); remove_proc_entry("iga1", viafb_entry); remove_proc_entry("supported_output_devices", viafb_entry); #ifdef CONFIG_FB_VIA_DIRECT_PROCFS remove_proc_entry("dvp0", viafb_entry);/* parent dir */ remove_proc_entry("dvp1", viafb_entry); remove_proc_entry("dfph", viafb_entry); remove_proc_entry("dfpl", viafb_entry); if (IS_VT1636(shared->chip_info.lvds_chip_info) || IS_VT1636(shared->chip_info.lvds_chip_info2)) remove_proc_entry("vt1636", viafb_entry); #endif /* CONFIG_FB_VIA_DIRECT_PROCFS */ remove_proc_entry("viafb", NULL); } #undef IS_VT1636 static int parse_mode(const char *str, u32 devices, u32 *xres, u32 *yres) { const struct fb_videomode *mode = NULL; char *ptr; if (!str) { if (devices == VIA_CRT) mode = via_aux_get_preferred_mode( viaparinfo->shared->i2c_26); else if (devices == VIA_DVP1) mode = via_aux_get_preferred_mode( viaparinfo->shared->i2c_31); if (mode) { *xres = mode->xres; *yres = mode->yres; } else if (machine_is_olpc()) { *xres = 1200; *yres = 900; } else { *xres = 640; *yres = 480; } return 0; } *xres = simple_strtoul(str, &ptr, 10); if (ptr[0] != 'x') return -EINVAL; *yres = simple_strtoul(&ptr[1], &ptr, 10); if (ptr[0]) return -EINVAL; return 0; } #ifdef CONFIG_PM static int viafb_suspend(void *unused) { console_lock(); fb_set_suspend(viafbinfo, 1); viafb_sync(viafbinfo); console_unlock(); return 0; } static int viafb_resume(void *unused) { console_lock(); if (viaparinfo->shared->vdev->engine_mmio) viafb_reset_engine(viaparinfo); viafb_set_par(viafbinfo); if (viafb_dual_fb) viafb_set_par(viafbinfo1); fb_set_suspend(viafbinfo, 0); console_unlock(); return 0; } static struct viafb_pm_hooks viafb_fb_pm_hooks = { .suspend = viafb_suspend, .resume = viafb_resume }; #endif static void __devinit i2c_bus_probe(struct viafb_shared *shared) { /* should be always CRT */ printk(KERN_INFO "viafb: Probing I2C bus 0x26\n"); shared->i2c_26 = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_26)); /* seems to be usually DVP1 */ printk(KERN_INFO "viafb: Probing I2C bus 0x31\n"); shared->i2c_31 = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_31)); /* FIXME: what is this? */ if (!machine_is_olpc()) { printk(KERN_INFO "viafb: Probing I2C bus 0x2C\n"); shared->i2c_2C = via_aux_probe(viafb_find_i2c_adapter(VIA_PORT_2C)); } printk(KERN_INFO "viafb: Finished I2C bus probing"); } static void i2c_bus_free(struct viafb_shared *shared) { via_aux_free(shared->i2c_26); via_aux_free(shared->i2c_31); via_aux_free(shared->i2c_2C); } int __devinit via_fb_pci_probe(struct viafb_dev *vdev) { u32 default_xres, default_yres; struct fb_var_screeninfo default_var; int rc; u32 viafb_par_length; DEBUG_MSG(KERN_INFO "VIAFB PCI Probe!!\n"); memset(&default_var, 0, sizeof(default_var)); viafb_par_length = ALIGN(sizeof(struct viafb_par), BITS_PER_LONG/8); /* Allocate fb_info and ***_par here, also including some other needed * variables */ viafbinfo = framebuffer_alloc(viafb_par_length + ALIGN(sizeof(struct viafb_shared), BITS_PER_LONG/8), &vdev->pdev->dev); if (!viafbinfo) { printk(KERN_ERR"Could not allocate memory for viafb_info.\n"); return -ENOMEM; } viaparinfo = (struct viafb_par *)viafbinfo->par; viaparinfo->shared = viafbinfo->par + viafb_par_length; viaparinfo->shared->vdev = vdev; viaparinfo->vram_addr = 0; viaparinfo->tmds_setting_info = &viaparinfo->shared->tmds_setting_info; viaparinfo->lvds_setting_info = &viaparinfo->shared->lvds_setting_info; viaparinfo->lvds_setting_info2 = &viaparinfo->shared->lvds_setting_info2; viaparinfo->chip_info = &viaparinfo->shared->chip_info; i2c_bus_probe(viaparinfo->shared); if (viafb_dual_fb) viafb_SAMM_ON = 1; parse_lcd_port(); parse_dvi_port(); viafb_init_chip_info(vdev->chip_type); /* * The framebuffer will have been successfully mapped by * the core (or we'd not be here), but we still need to * set up our own accounting. */ viaparinfo->fbmem = vdev->fbmem_start; viaparinfo->memsize = vdev->fbmem_len; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viafbinfo->screen_base = vdev->fbmem; viafbinfo->fix.mmio_start = vdev->engine_start; viafbinfo->fix.mmio_len = vdev->engine_len; viafbinfo->node = 0; viafbinfo->fbops = &viafb_ops; viafbinfo->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; viafbinfo->pseudo_palette = pseudo_pal; if (viafb_accel && !viafb_setup_engine(viafbinfo)) { viafbinfo->flags |= FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_IMAGEBLIT; default_var.accel_flags = FB_ACCELF_TEXT; } else { viafbinfo->flags |= FBINFO_HWACCEL_DISABLED; default_var.accel_flags = 0; } if (viafb_second_size && (viafb_second_size < 8)) { viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } else { viafb_second_size = 8; viafb_second_offset = viaparinfo->fbmem_free - viafb_second_size * 1024 * 1024; } parse_mode(viafb_mode, viaparinfo->shared->iga1_devices, &default_xres, &default_yres); if (viafb_SAMM_ON == 1) parse_mode(viafb_mode1, viaparinfo->shared->iga2_devices, &viafb_second_xres, &viafb_second_yres); default_var.xres = default_xres; default_var.yres = default_yres; default_var.xres_virtual = default_xres; default_var.yres_virtual = default_yres; default_var.bits_per_pixel = viafb_bpp; viafb_fill_var_timing_info(&default_var, viafb_get_best_mode( default_var.xres, default_var.yres, viafb_refresh)); viafb_setup_fixinfo(&viafbinfo->fix, viaparinfo); viafbinfo->var = default_var; if (viafb_dual_fb) { viafbinfo1 = framebuffer_alloc(viafb_par_length, &vdev->pdev->dev); if (!viafbinfo1) { printk(KERN_ERR "allocate the second framebuffer struct error\n"); rc = -ENOMEM; goto out_fb_release; } viaparinfo1 = viafbinfo1->par; memcpy(viaparinfo1, viaparinfo, viafb_par_length); viaparinfo1->vram_addr = viafb_second_offset; viaparinfo1->memsize = viaparinfo->memsize - viafb_second_offset; viaparinfo->memsize = viafb_second_offset; viaparinfo1->fbmem = viaparinfo->fbmem + viafb_second_offset; viaparinfo1->fbmem_used = viaparinfo->fbmem_used; viaparinfo1->fbmem_free = viaparinfo1->memsize - viaparinfo1->fbmem_used; viaparinfo->fbmem_free = viaparinfo->memsize; viaparinfo->fbmem_used = 0; viaparinfo->iga_path = IGA1; viaparinfo1->iga_path = IGA2; memcpy(viafbinfo1, viafbinfo, sizeof(struct fb_info)); viafbinfo1->par = viaparinfo1; viafbinfo1->screen_base = viafbinfo->screen_base + viafb_second_offset; default_var.xres = viafb_second_xres; default_var.yres = viafb_second_yres; default_var.xres_virtual = viafb_second_xres; default_var.yres_virtual = viafb_second_yres; default_var.bits_per_pixel = viafb_bpp1; viafb_fill_var_timing_info(&default_var, viafb_get_best_mode( default_var.xres, default_var.yres, viafb_refresh1)); viafb_setup_fixinfo(&viafbinfo1->fix, viaparinfo1); viafb_check_var(&default_var, viafbinfo1); viafbinfo1->var = default_var; viafb_update_fix(viafbinfo1); viaparinfo1->depth = fb_get_color_depth(&viafbinfo1->var, &viafbinfo1->fix); } viafb_check_var(&viafbinfo->var, viafbinfo); viafb_update_fix(viafbinfo); viaparinfo->depth = fb_get_color_depth(&viafbinfo->var, &viafbinfo->fix); default_var.activate = FB_ACTIVATE_NOW; rc = fb_alloc_cmap(&viafbinfo->cmap, 256, 0); if (rc) goto out_fb1_release; if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_dealloc_cmap; } rc = register_framebuffer(viafbinfo); if (rc) goto out_fb1_unreg_lcd_cle266; if (viafb_dual_fb && ((viafb_primary_dev != LCD_Device) || (viaparinfo->chip_info->gfx_chip_name != UNICHROME_CLE266))) { rc = register_framebuffer(viafbinfo1); if (rc) goto out_fb_unreg; } DEBUG_MSG(KERN_INFO "fb%d: %s frame buffer device %dx%d-%dbpp\n", viafbinfo->node, viafbinfo->fix.id, default_var.xres, default_var.yres, default_var.bits_per_pixel); viafb_init_proc(viaparinfo->shared); viafb_init_dac(IGA2); #ifdef CONFIG_PM viafb_pm_register(&viafb_fb_pm_hooks); #endif return 0; out_fb_unreg: unregister_framebuffer(viafbinfo); out_fb1_unreg_lcd_cle266: if (viafb_dual_fb && (viafb_primary_dev == LCD_Device) && (viaparinfo->chip_info->gfx_chip_name == UNICHROME_CLE266)) unregister_framebuffer(viafbinfo1); out_dealloc_cmap: fb_dealloc_cmap(&viafbinfo->cmap); out_fb1_release: if (viafbinfo1) framebuffer_release(viafbinfo1); out_fb_release: i2c_bus_free(viaparinfo->shared); framebuffer_release(viafbinfo); return rc; } void __devexit via_fb_pci_remove(struct pci_dev *pdev) { DEBUG_MSG(KERN_INFO "via_pci_remove!\n"); fb_dealloc_cmap(&viafbinfo->cmap); unregister_framebuffer(viafbinfo); if (viafb_dual_fb) unregister_framebuffer(viafbinfo1); viafb_remove_proc(viaparinfo->shared); i2c_bus_free(viaparinfo->shared); framebuffer_release(viafbinfo); if (viafb_dual_fb) framebuffer_release(viafbinfo1); } #ifndef MODULE static int __init viafb_setup(void) { char *this_opt; char *options; DEBUG_MSG(KERN_INFO "viafb_setup!\n"); if (fb_get_options("viafb", &options)) return -ENODEV; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; if (!strncmp(this_opt, "viafb_mode1=", 12)) { viafb_mode1 = kstrdup(this_opt + 12, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_mode=", 11)) { viafb_mode = kstrdup(this_opt + 11, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_bpp1=", 11)) { if (kstrtouint(this_opt + 11, 0, &viafb_bpp1) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_bpp=", 10)) { if (kstrtouint(this_opt + 10, 0, &viafb_bpp) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_refresh1=", 15)) { if (kstrtoint(this_opt + 15, 0, &viafb_refresh1) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_refresh=", 14)) { if (kstrtoint(this_opt + 14, 0, &viafb_refresh) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_dsp_method=", 21)) { if (kstrtoint(this_opt + 21, 0, &viafb_lcd_dsp_method) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_panel_id=", 19)) { if (kstrtoint(this_opt + 19, 0, &viafb_lcd_panel_id) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_accel=", 12)) { if (kstrtoint(this_opt + 12, 0, &viafb_accel) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_SAMM_ON=", 14)) { if (kstrtoint(this_opt + 14, 0, &viafb_SAMM_ON) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_active_dev=", 17)) { viafb_active_dev = kstrdup(this_opt + 17, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_display_hardware_layout=", 30)) { if (kstrtoint(this_opt + 30, 0, &viafb_display_hardware_layout) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_second_size=", 18)) { if (kstrtoint(this_opt + 18, 0, &viafb_second_size) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_platform_epia_dvi=", 24)) { if (kstrtoint(this_opt + 24, 0, &viafb_platform_epia_dvi) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_device_lcd_dualedge=", 26)) { if (kstrtoint(this_opt + 26, 0, &viafb_device_lcd_dualedge) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_bus_width=", 16)) { if (kstrtoint(this_opt + 16, 0, &viafb_bus_width) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_mode=", 15)) { if (kstrtoint(this_opt + 15, 0, &viafb_lcd_mode) < 0) return -EINVAL; } else if (!strncmp(this_opt, "viafb_lcd_port=", 15)) { viafb_lcd_port = kstrdup(this_opt + 15, GFP_KERNEL); } else if (!strncmp(this_opt, "viafb_dvi_port=", 15)) { viafb_dvi_port = kstrdup(this_opt + 15, GFP_KERNEL); } } return 0; } #endif /* * These are called out of via-core for now. */ int __init viafb_init(void) { u32 dummy_x, dummy_y; int r = 0; if (machine_is_olpc()) /* Apply XO-1.5-specific configuration. */ viafb_lcd_panel_id = 23; #ifndef MODULE r = viafb_setup(); if (r < 0) return r; #endif if (parse_mode(viafb_mode, 0, &dummy_x, &dummy_y) || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh) || parse_mode(viafb_mode1, 0, &dummy_x, &dummy_y) || !viafb_get_best_mode(dummy_x, dummy_y, viafb_refresh1) || viafb_bpp < 0 || viafb_bpp > 32 || viafb_bpp1 < 0 || viafb_bpp1 > 32 || parse_active_dev()) return -EINVAL; printk(KERN_INFO "VIA Graphics Integration Chipset framebuffer %d.%d initializing\n", VERSION_MAJOR, VERSION_MINOR); return r; } void __exit viafb_exit(void) { DEBUG_MSG(KERN_INFO "viafb_exit!\n"); } static struct fb_ops viafb_ops = { .owner = THIS_MODULE, .fb_open = viafb_open, .fb_release = viafb_release, .fb_check_var = viafb_check_var, .fb_set_par = viafb_set_par, .fb_setcolreg = viafb_setcolreg, .fb_pan_display = viafb_pan_display, .fb_blank = viafb_blank, .fb_fillrect = viafb_fillrect, .fb_copyarea = viafb_copyarea, .fb_imageblit = viafb_imageblit, .fb_cursor = viafb_cursor, .fb_ioctl = viafb_ioctl, .fb_sync = viafb_sync, }; #ifdef MODULE module_param(viafb_mode, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode, "Set resolution (default=640x480)"); module_param(viafb_mode1, charp, S_IRUSR); MODULE_PARM_DESC(viafb_mode1, "Set resolution (default=640x480)"); module_param(viafb_bpp, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp, "Set color depth (default=32bpp)"); module_param(viafb_bpp1, int, S_IRUSR); MODULE_PARM_DESC(viafb_bpp1, "Set color depth (default=32bpp)"); module_param(viafb_refresh, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh, "Set CRT viafb_refresh rate (default = 60)"); module_param(viafb_refresh1, int, S_IRUSR); MODULE_PARM_DESC(viafb_refresh1, "Set CRT refresh rate (default = 60)"); module_param(viafb_lcd_panel_id, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_panel_id, "Set Flat Panel type(Default=1024x768)"); module_param(viafb_lcd_dsp_method, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_dsp_method, "Set Flat Panel display scaling method.(Default=Expandsion)"); module_param(viafb_SAMM_ON, int, S_IRUSR); MODULE_PARM_DESC(viafb_SAMM_ON, "Turn on/off flag of SAMM(Default=OFF)"); module_param(viafb_accel, int, S_IRUSR); MODULE_PARM_DESC(viafb_accel, "Set 2D Hardware Acceleration: 0 = OFF, 1 = ON (default)"); module_param(viafb_active_dev, charp, S_IRUSR); MODULE_PARM_DESC(viafb_active_dev, "Specify active devices."); module_param(viafb_display_hardware_layout, int, S_IRUSR); MODULE_PARM_DESC(viafb_display_hardware_layout, "Display Hardware Layout (LCD Only, DVI Only...,etc)"); module_param(viafb_second_size, int, S_IRUSR); MODULE_PARM_DESC(viafb_second_size, "Set secondary device memory size"); module_param(viafb_dual_fb, int, S_IRUSR); MODULE_PARM_DESC(viafb_dual_fb, "Turn on/off flag of dual framebuffer devices.(Default = OFF)"); module_param(viafb_platform_epia_dvi, int, S_IRUSR); MODULE_PARM_DESC(viafb_platform_epia_dvi, "Turn on/off flag of DVI devices on EPIA board.(Default = OFF)"); module_param(viafb_device_lcd_dualedge, int, S_IRUSR); MODULE_PARM_DESC(viafb_device_lcd_dualedge, "Turn on/off flag of dual edge panel.(Default = OFF)"); module_param(viafb_bus_width, int, S_IRUSR); MODULE_PARM_DESC(viafb_bus_width, "Set bus width of panel.(Default = 12)"); module_param(viafb_lcd_mode, int, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_mode, "Set Flat Panel mode(Default=OPENLDI)"); module_param(viafb_lcd_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_lcd_port, "Specify LCD output port."); module_param(viafb_dvi_port, charp, S_IRUSR); MODULE_PARM_DESC(viafb_dvi_port, "Specify DVI output port."); MODULE_LICENSE("GPL"); #endif
gpl-2.0
khanfrd/Red-Kernel-Old
drivers/net/ethernet/packetengines/yellowfin.c
4811
45757
/* yellowfin.c: A Packet Engines G-NIC ethernet driver for linux. */ /* Written 1997-2001 by Donald Becker. This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on or derived from this code fall under the GPL and must retain the authorship, copyright and license notice. This file is not a complete program and may only be used when the entire operating system is licensed under the GPL. This driver is for the Packet Engines G-NIC PCI Gigabit Ethernet adapter. It also supports the Symbios Logic version of the same chip core. The author may be reached as becker@scyld.com, or C/O Scyld Computing Corporation 410 Severn Ave., Suite 210 Annapolis MD 21403 Support and updates available at http://www.scyld.com/network/yellowfin.html [link no longer provides useful info -jgarzik] */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "yellowfin" #define DRV_VERSION "2.1" #define DRV_RELDATE "Sep 11, 2006" /* The user-configurable values. These may be modified when a driver module is loaded.*/ static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */ /* Maximum events (Rx packets, etc.) to handle at each interrupt. */ static int max_interrupt_work = 20; static int mtu; #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ /* System-wide count of bogus-rx frames. */ static int bogus_rx; static int dma_ctrl = 0x004A0263; /* Constrained by errata */ static int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ #elif defined(YF_NEW) /* A future perfect board :->. */ static int dma_ctrl = 0x00CAC277; /* Override when loading module! */ static int fifo_cfg = 0x0028; #else static const int dma_ctrl = 0x004A0263; /* Constrained by errata */ static const int fifo_cfg = 0x0020; /* Bypass external Tx FIFO. */ #endif /* Set the copy breakpoint for the copy-only-tiny-frames scheme. Setting to > 1514 effectively disables this feature. */ static int rx_copybreak; /* Used to pass the media type, etc. No media types are currently defined. These exist for driver interoperability. */ #define MAX_UNITS 8 /* More are supported, limit only on options */ static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1}; /* Do ugly workaround for GX server chipset errata. */ static int gx_fix; /* Operational parameters that are set at compile time. */ /* Keep the ring sizes a power of two for efficiency. Making the Tx ring too long decreases the effectiveness of channel bonding and packet priority. There are no ill effects from too-large receive rings. */ #define TX_RING_SIZE 16 #define TX_QUEUE_SIZE 12 /* Must be > 4 && <= TX_RING_SIZE */ #define RX_RING_SIZE 64 #define STATUS_TOTAL_SIZE TX_RING_SIZE*sizeof(struct tx_status_words) #define TX_TOTAL_SIZE 2*TX_RING_SIZE*sizeof(struct yellowfin_desc) #define RX_TOTAL_SIZE RX_RING_SIZE*sizeof(struct yellowfin_desc) /* Operational parameters that usually are not changed. */ /* Time in jiffies before concluding the transmitter is hung. */ #define TX_TIMEOUT (2*HZ) #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/ #define yellowfin_debug debug #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/mii.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/processor.h> /* Processor type for cache alignment. */ #include <asm/unaligned.h> #include <asm/io.h> /* These identify the driver base version and may not be removed. */ static const char version[] __devinitconst = KERN_INFO DRV_NAME ".c:v1.05 1/09/2001 Written by Donald Becker <becker@scyld.com>\n" " (unofficial 2.4.x port, " DRV_VERSION ", " DRV_RELDATE ")\n"; MODULE_AUTHOR("Donald Becker <becker@scyld.com>"); MODULE_DESCRIPTION("Packet Engines Yellowfin G-NIC Gigabit Ethernet driver"); MODULE_LICENSE("GPL"); module_param(max_interrupt_work, int, 0); module_param(mtu, int, 0); module_param(debug, int, 0); module_param(rx_copybreak, int, 0); module_param_array(options, int, NULL, 0); module_param_array(full_duplex, int, NULL, 0); module_param(gx_fix, int, 0); MODULE_PARM_DESC(max_interrupt_work, "G-NIC maximum events handled per interrupt"); MODULE_PARM_DESC(mtu, "G-NIC MTU (all boards)"); MODULE_PARM_DESC(debug, "G-NIC debug level (0-7)"); MODULE_PARM_DESC(rx_copybreak, "G-NIC copy breakpoint for copy-only-tiny-frames"); MODULE_PARM_DESC(options, "G-NIC: Bits 0-3: media type, bit 17: full duplex"); MODULE_PARM_DESC(full_duplex, "G-NIC full duplex setting(s) (1)"); MODULE_PARM_DESC(gx_fix, "G-NIC: enable GX server chipset bug workaround (0-1)"); /* Theory of Operation I. Board Compatibility This device driver is designed for the Packet Engines "Yellowfin" Gigabit Ethernet adapter. The G-NIC 64-bit PCI card is supported, as well as the Symbios 53C885E dual function chip. II. Board-specific settings PCI bus devices are configured by the system at boot time, so no jumpers need to be set on the board. The system BIOS preferably should assign the PCI INTA signal to an otherwise unused system IRQ line. Note: Kernel versions earlier than 1.3.73 do not support shared PCI interrupt lines. III. Driver operation IIIa. Ring buffers The Yellowfin uses the Descriptor Based DMA Architecture specified by Apple. This is a descriptor list scheme similar to that used by the EEPro100 and Tulip. This driver uses two statically allocated fixed-size descriptor lists formed into rings by a branch from the final descriptor to the beginning of the list. The ring sizes are set at compile time by RX/TX_RING_SIZE. The driver allocates full frame size skbuffs for the Rx ring buffers at open() time and passes the skb->data field to the Yellowfin as receive data buffers. When an incoming frame is less than RX_COPYBREAK bytes long, a fresh skbuff is allocated and the frame is copied to the new skbuff. When the incoming frame is larger, the skbuff is passed directly up the protocol stack and replaced by a newly allocated skbuff. The RX_COPYBREAK value is chosen to trade-off the memory wasted by using a full-sized skbuff for small frames vs. the copying costs of larger frames. For small frames the copying cost is negligible (esp. considering that we are pre-loading the cache with immediately useful header information). For large frames the copying cost is non-trivial, and the larger copy might flush the cache of useful data. IIIC. Synchronization The driver runs as two independent, single-threaded flows of control. One is the send-packet routine, which enforces single-threaded use by the dev->tbusy flag. The other thread is the interrupt handler, which is single threaded by the hardware and other software. The send packet thread has partial control over the Tx ring and 'dev->tbusy' flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next queue slot is empty, it clears the tbusy flag when finished otherwise it sets the 'yp->tx_full' flag. The interrupt handler has exclusive control over the Rx ring and records stats from the Tx ring. After reaping the stats, it marks the Tx queue entry as empty by incrementing the dirty_tx mark. Iff the 'yp->tx_full' flag is set, it clears both the tx_full and tbusy flags. IV. Notes Thanks to Kim Stearns of Packet Engines for providing a pair of G-NIC boards. Thanks to Bruce Faust of Digitalscape for providing both their SYM53C885 board and an AlphaStation to verifty the Alpha port! IVb. References Yellowfin Engineering Design Specification, 4/23/97 Preliminary/Confidential Symbios SYM53C885 PCI-SCSI/Fast Ethernet Multifunction Controller Preliminary Data Manual v3.0 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html IVc. Errata See Packet Engines confidential appendix (prototype chips only). */ enum capability_flags { HasMII=1, FullTxStatus=2, IsGigabit=4, HasMulticastBug=8, FullRxStatus=16, HasMACAddrBug=32, /* Only on early revs. */ DontUseEeprom=64, /* Don't read the MAC from the EEPROm. */ }; /* The PCI I/O space extent. */ enum { YELLOWFIN_SIZE = 0x100, }; struct pci_id_info { const char *name; struct match_info { int pci, pci_mask, subsystem, subsystem_mask; int revision, revision_mask; /* Only 8 bits. */ } id; int drv_flags; /* Driver use, intended as capability flags. */ }; static const struct pci_id_info pci_id_tbl[] = { {"Yellowfin G-NIC Gigabit Ethernet", { 0x07021000, 0xffffffff}, FullTxStatus | IsGigabit | HasMulticastBug | HasMACAddrBug | DontUseEeprom}, {"Symbios SYM83C885", { 0x07011000, 0xffffffff}, HasMII | DontUseEeprom }, { } }; static DEFINE_PCI_DEVICE_TABLE(yellowfin_pci_tbl) = { { 0x1000, 0x0702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }, { 0x1000, 0x0701, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 }, { } }; MODULE_DEVICE_TABLE (pci, yellowfin_pci_tbl); /* Offsets to the Yellowfin registers. Various sizes and alignments. */ enum yellowfin_offsets { TxCtrl=0x00, TxStatus=0x04, TxPtr=0x0C, TxIntrSel=0x10, TxBranchSel=0x14, TxWaitSel=0x18, RxCtrl=0x40, RxStatus=0x44, RxPtr=0x4C, RxIntrSel=0x50, RxBranchSel=0x54, RxWaitSel=0x58, EventStatus=0x80, IntrEnb=0x82, IntrClear=0x84, IntrStatus=0x86, ChipRev=0x8C, DMACtrl=0x90, TxThreshold=0x94, Cnfg=0xA0, FrameGap0=0xA2, FrameGap1=0xA4, MII_Cmd=0xA6, MII_Addr=0xA8, MII_Wr_Data=0xAA, MII_Rd_Data=0xAC, MII_Status=0xAE, RxDepth=0xB8, FlowCtrl=0xBC, AddrMode=0xD0, StnAddr=0xD2, HashTbl=0xD8, FIFOcfg=0xF8, EEStatus=0xF0, EECtrl=0xF1, EEAddr=0xF2, EERead=0xF3, EEWrite=0xF4, EEFeature=0xF5, }; /* The Yellowfin Rx and Tx buffer descriptors. Elements are written as 32 bit for endian portability. */ struct yellowfin_desc { __le32 dbdma_cmd; __le32 addr; __le32 branch_addr; __le32 result_status; }; struct tx_status_words { #ifdef __BIG_ENDIAN u16 tx_errs; u16 tx_cnt; u16 paused; u16 total_tx_cnt; #else /* Little endian chips. */ u16 tx_cnt; u16 tx_errs; u16 total_tx_cnt; u16 paused; #endif /* __BIG_ENDIAN */ }; /* Bits in yellowfin_desc.cmd */ enum desc_cmd_bits { CMD_TX_PKT=0x10000000, CMD_RX_BUF=0x20000000, CMD_TXSTATUS=0x30000000, CMD_NOP=0x60000000, CMD_STOP=0x70000000, BRANCH_ALWAYS=0x0C0000, INTR_ALWAYS=0x300000, WAIT_ALWAYS=0x030000, BRANCH_IFTRUE=0x040000, }; /* Bits in yellowfin_desc.status */ enum desc_status_bits { RX_EOP=0x0040, }; /* Bits in the interrupt status/mask registers. */ enum intr_status_bits { IntrRxDone=0x01, IntrRxInvalid=0x02, IntrRxPCIFault=0x04,IntrRxPCIErr=0x08, IntrTxDone=0x10, IntrTxInvalid=0x20, IntrTxPCIFault=0x40,IntrTxPCIErr=0x80, IntrEarlyRx=0x100, IntrWakeup=0x200, }; #define PRIV_ALIGN 31 /* Required alignment mask */ #define MII_CNT 4 struct yellowfin_private { /* Descriptor rings first for alignment. Tx requires a second descriptor for status. */ struct yellowfin_desc *rx_ring; struct yellowfin_desc *tx_ring; struct sk_buff* rx_skbuff[RX_RING_SIZE]; struct sk_buff* tx_skbuff[TX_RING_SIZE]; dma_addr_t rx_ring_dma; dma_addr_t tx_ring_dma; struct tx_status_words *tx_status; dma_addr_t tx_status_dma; struct timer_list timer; /* Media selection timer. */ /* Frequently used and paired value: keep adjacent for cache effect. */ int chip_id, drv_flags; struct pci_dev *pci_dev; unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */ unsigned int rx_buf_sz; /* Based on MTU+slack. */ struct tx_status_words *tx_tail_desc; unsigned int cur_tx, dirty_tx; int tx_threshold; unsigned int tx_full:1; /* The Tx queue is full. */ unsigned int full_duplex:1; /* Full-duplex operation requested. */ unsigned int duplex_lock:1; unsigned int medialock:1; /* Do not sense media. */ unsigned int default_port:4; /* Last dev->if_port value. */ /* MII transceiver section. */ int mii_cnt; /* MII device addresses. */ u16 advertising; /* NWay media advertisement */ unsigned char phys[MII_CNT]; /* MII device addresses, only first one used */ spinlock_t lock; void __iomem *base; }; static int read_eeprom(void __iomem *ioaddr, int location); static int mdio_read(void __iomem *ioaddr, int phy_id, int location); static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value); static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); static int yellowfin_open(struct net_device *dev); static void yellowfin_timer(unsigned long data); static void yellowfin_tx_timeout(struct net_device *dev); static int yellowfin_init_ring(struct net_device *dev); static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev); static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance); static int yellowfin_rx(struct net_device *dev); static void yellowfin_error(struct net_device *dev, int intr_status); static int yellowfin_close(struct net_device *dev); static void set_rx_mode(struct net_device *dev); static const struct ethtool_ops ethtool_ops; static const struct net_device_ops netdev_ops = { .ndo_open = yellowfin_open, .ndo_stop = yellowfin_close, .ndo_start_xmit = yellowfin_start_xmit, .ndo_set_rx_mode = set_rx_mode, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_do_ioctl = netdev_ioctl, .ndo_tx_timeout = yellowfin_tx_timeout, }; static int __devinit yellowfin_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *dev; struct yellowfin_private *np; int irq; int chip_idx = ent->driver_data; static int find_cnt; void __iomem *ioaddr; int i, option = find_cnt < MAX_UNITS ? options[find_cnt] : 0; int drv_flags = pci_id_tbl[chip_idx].drv_flags; void *ring_space; dma_addr_t ring_dma; #ifdef USE_IO_OPS int bar = 0; #else int bar = 1; #endif /* when built into the kernel, we only print version if device is found */ #ifndef MODULE static int printed_version; if (!printed_version++) printk(version); #endif i = pci_enable_device(pdev); if (i) return i; dev = alloc_etherdev(sizeof(*np)); if (!dev) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); np = netdev_priv(dev); if (pci_request_regions(pdev, DRV_NAME)) goto err_out_free_netdev; pci_set_master (pdev); ioaddr = pci_iomap(pdev, bar, YELLOWFIN_SIZE); if (!ioaddr) goto err_out_free_res; irq = pdev->irq; if (drv_flags & DontUseEeprom) for (i = 0; i < 6; i++) dev->dev_addr[i] = ioread8(ioaddr + StnAddr + i); else { int ee_offset = (read_eeprom(ioaddr, 6) == 0xff ? 0x100 : 0); for (i = 0; i < 6; i++) dev->dev_addr[i] = read_eeprom(ioaddr, ee_offset + i); } /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); dev->base_addr = (unsigned long)ioaddr; dev->irq = irq; pci_set_drvdata(pdev, dev); spin_lock_init(&np->lock); np->pci_dev = pdev; np->chip_id = chip_idx; np->drv_flags = drv_flags; np->base = ioaddr; ring_space = pci_alloc_consistent(pdev, TX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_cleardev; np->tx_ring = ring_space; np->tx_ring_dma = ring_dma; ring_space = pci_alloc_consistent(pdev, RX_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_tx; np->rx_ring = ring_space; np->rx_ring_dma = ring_dma; ring_space = pci_alloc_consistent(pdev, STATUS_TOTAL_SIZE, &ring_dma); if (!ring_space) goto err_out_unmap_rx; np->tx_status = ring_space; np->tx_status_dma = ring_dma; if (dev->mem_start) option = dev->mem_start; /* The lower four bits are the media type. */ if (option > 0) { if (option & 0x200) np->full_duplex = 1; np->default_port = option & 15; if (np->default_port) np->medialock = 1; } if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0) np->full_duplex = 1; if (np->full_duplex) np->duplex_lock = 1; /* The Yellowfin-specific entries in the device structure. */ dev->netdev_ops = &netdev_ops; SET_ETHTOOL_OPS(dev, &ethtool_ops); dev->watchdog_timeo = TX_TIMEOUT; if (mtu) dev->mtu = mtu; i = register_netdev(dev); if (i) goto err_out_unmap_status; netdev_info(dev, "%s type %8x at %p, %pM, IRQ %d\n", pci_id_tbl[chip_idx].name, ioread32(ioaddr + ChipRev), ioaddr, dev->dev_addr, irq); if (np->drv_flags & HasMII) { int phy, phy_idx = 0; for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) { int mii_status = mdio_read(ioaddr, phy, 1); if (mii_status != 0xffff && mii_status != 0x0000) { np->phys[phy_idx++] = phy; np->advertising = mdio_read(ioaddr, phy, 4); netdev_info(dev, "MII PHY found at address %d, status 0x%04x advertising %04x\n", phy, mii_status, np->advertising); } } np->mii_cnt = phy_idx; } find_cnt++; return 0; err_out_unmap_status: pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, np->tx_status_dma); err_out_unmap_rx: pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); err_out_unmap_tx: pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); err_out_cleardev: pci_set_drvdata(pdev, NULL); pci_iounmap(pdev, ioaddr); err_out_free_res: pci_release_regions(pdev); err_out_free_netdev: free_netdev (dev); return -ENODEV; } static int __devinit read_eeprom(void __iomem *ioaddr, int location) { int bogus_cnt = 10000; /* Typical 33Mhz: 1050 ticks */ iowrite8(location, ioaddr + EEAddr); iowrite8(0x30 | ((location >> 8) & 7), ioaddr + EECtrl); while ((ioread8(ioaddr + EEStatus) & 0x80) && --bogus_cnt > 0) ; return ioread8(ioaddr + EERead); } /* MII Managemen Data I/O accesses. These routines assume the MDIO controller is idle, and do not exit until the command is finished. */ static int mdio_read(void __iomem *ioaddr, int phy_id, int location) { int i; iowrite16((phy_id<<8) + location, ioaddr + MII_Addr); iowrite16(1, ioaddr + MII_Cmd); for (i = 10000; i >= 0; i--) if ((ioread16(ioaddr + MII_Status) & 1) == 0) break; return ioread16(ioaddr + MII_Rd_Data); } static void mdio_write(void __iomem *ioaddr, int phy_id, int location, int value) { int i; iowrite16((phy_id<<8) + location, ioaddr + MII_Addr); iowrite16(value, ioaddr + MII_Wr_Data); /* Wait for the command to finish. */ for (i = 10000; i >= 0; i--) if ((ioread16(ioaddr + MII_Status) & 1) == 0) break; } static int yellowfin_open(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; int i, ret; /* Reset the chip. */ iowrite32(0x80000000, ioaddr + DMACtrl); ret = request_irq(dev->irq, yellowfin_interrupt, IRQF_SHARED, dev->name, dev); if (ret) return ret; if (yellowfin_debug > 1) netdev_printk(KERN_DEBUG, dev, "%s() irq %d\n", __func__, dev->irq); ret = yellowfin_init_ring(dev); if (ret) { free_irq(dev->irq, dev); return ret; } iowrite32(yp->rx_ring_dma, ioaddr + RxPtr); iowrite32(yp->tx_ring_dma, ioaddr + TxPtr); for (i = 0; i < 6; i++) iowrite8(dev->dev_addr[i], ioaddr + StnAddr + i); /* Set up various condition 'select' registers. There are no options here. */ iowrite32(0x00800080, ioaddr + TxIntrSel); /* Interrupt on Tx abort */ iowrite32(0x00800080, ioaddr + TxBranchSel); /* Branch on Tx abort */ iowrite32(0x00400040, ioaddr + TxWaitSel); /* Wait on Tx status */ iowrite32(0x00400040, ioaddr + RxIntrSel); /* Interrupt on Rx done */ iowrite32(0x00400040, ioaddr + RxBranchSel); /* Branch on Rx error */ iowrite32(0x00400040, ioaddr + RxWaitSel); /* Wait on Rx done */ /* Initialize other registers: with so many this eventually this will converted to an offset/value list. */ iowrite32(dma_ctrl, ioaddr + DMACtrl); iowrite16(fifo_cfg, ioaddr + FIFOcfg); /* Enable automatic generation of flow control frames, period 0xffff. */ iowrite32(0x0030FFFF, ioaddr + FlowCtrl); yp->tx_threshold = 32; iowrite32(yp->tx_threshold, ioaddr + TxThreshold); if (dev->if_port == 0) dev->if_port = yp->default_port; netif_start_queue(dev); /* Setting the Rx mode will start the Rx process. */ if (yp->drv_flags & IsGigabit) { /* We are always in full-duplex mode with gigabit! */ yp->full_duplex = 1; iowrite16(0x01CF, ioaddr + Cnfg); } else { iowrite16(0x0018, ioaddr + FrameGap0); /* 0060/4060 for non-MII 10baseT */ iowrite16(0x1018, ioaddr + FrameGap1); iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); } set_rx_mode(dev); /* Enable interrupts by setting the interrupt mask. */ iowrite16(0x81ff, ioaddr + IntrEnb); /* See enum intr_status_bits */ iowrite16(0x0000, ioaddr + EventStatus); /* Clear non-interrupting events */ iowrite32(0x80008000, ioaddr + RxCtrl); /* Start Rx and Tx channels. */ iowrite32(0x80008000, ioaddr + TxCtrl); if (yellowfin_debug > 2) { netdev_printk(KERN_DEBUG, dev, "Done %s()\n", __func__); } /* Set the timer to check for link beat. */ init_timer(&yp->timer); yp->timer.expires = jiffies + 3*HZ; yp->timer.data = (unsigned long)dev; yp->timer.function = yellowfin_timer; /* timer handler */ add_timer(&yp->timer); return 0; } static void yellowfin_timer(unsigned long data) { struct net_device *dev = (struct net_device *)data; struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; int next_tick = 60*HZ; if (yellowfin_debug > 3) { netdev_printk(KERN_DEBUG, dev, "Yellowfin timer tick, status %08x\n", ioread16(ioaddr + IntrStatus)); } if (yp->mii_cnt) { int bmsr = mdio_read(ioaddr, yp->phys[0], MII_BMSR); int lpa = mdio_read(ioaddr, yp->phys[0], MII_LPA); int negotiated = lpa & yp->advertising; if (yellowfin_debug > 1) netdev_printk(KERN_DEBUG, dev, "MII #%d status register is %04x, link partner capability %04x\n", yp->phys[0], bmsr, lpa); yp->full_duplex = mii_duplex(yp->duplex_lock, negotiated); iowrite16(0x101C | (yp->full_duplex ? 2 : 0), ioaddr + Cnfg); if (bmsr & BMSR_LSTATUS) next_tick = 60*HZ; else next_tick = 3*HZ; } yp->timer.expires = jiffies + next_tick; add_timer(&yp->timer); } static void yellowfin_tx_timeout(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; netdev_warn(dev, "Yellowfin transmit timed out at %d/%d Tx status %04x, Rx status %04x, resetting...\n", yp->cur_tx, yp->dirty_tx, ioread32(ioaddr + TxStatus), ioread32(ioaddr + RxStatus)); /* Note: these should be KERN_DEBUG. */ if (yellowfin_debug) { int i; pr_warning(" Rx ring %p: ", yp->rx_ring); for (i = 0; i < RX_RING_SIZE; i++) pr_cont(" %08x", yp->rx_ring[i].result_status); pr_cont("\n"); pr_warning(" Tx ring %p: ", yp->tx_ring); for (i = 0; i < TX_RING_SIZE; i++) pr_cont(" %04x /%08x", yp->tx_status[i].tx_errs, yp->tx_ring[i].result_status); pr_cont("\n"); } /* If the hardware is found to hang regularly, we will update the code to reinitialize the chip here. */ dev->if_port = 0; /* Wake the potentially-idle transmit channel. */ iowrite32(0x10001000, yp->base + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_wake_queue (dev); /* Typical path */ dev->trans_start = jiffies; /* prevent tx timeout */ dev->stats.tx_errors++; } /* Initialize the Rx and Tx rings, along with various 'dev' bits. */ static int yellowfin_init_ring(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); int i, j; yp->tx_full = 0; yp->cur_rx = yp->cur_tx = 0; yp->dirty_tx = 0; yp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32); for (i = 0; i < RX_RING_SIZE; i++) { yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); yp->rx_ring[i].branch_addr = cpu_to_le32(yp->rx_ring_dma + ((i+1)%RX_RING_SIZE)*sizeof(struct yellowfin_desc)); } for (i = 0; i < RX_RING_SIZE; i++) { struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); yp->rx_skbuff[i] = skb; if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header. */ yp->rx_ring[i].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } if (i != RX_RING_SIZE) { for (j = 0; j < i; j++) dev_kfree_skb(yp->rx_skbuff[j]); return -ENOMEM; } yp->rx_ring[i-1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->dirty_rx = (unsigned int)(i - RX_RING_SIZE); #define NO_TXSTATS #ifdef NO_TXSTATS /* In this mode the Tx ring needs only a single descriptor. */ for (i = 0; i < TX_RING_SIZE; i++) { yp->tx_skbuff[i] = NULL; yp->tx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[i].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((i+1)%TX_RING_SIZE)*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[--i].dbdma_cmd = cpu_to_le32(CMD_STOP | BRANCH_ALWAYS); #else { /* Tx ring needs a pair of descriptors, the second for the status. */ for (i = 0; i < TX_RING_SIZE; i++) { j = 2*i; yp->tx_skbuff[i] = 0; /* Branch on Tx error. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + (j+1)*sizeof(struct yellowfin_desc)); j++; if (yp->flags & FullTxStatus) { yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | sizeof(*yp->tx_status)); yp->tx_ring[j].request_cnt = sizeof(*yp->tx_status); yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words)); } else { /* Symbios chips write only tx_errs word. */ yp->tx_ring[j].dbdma_cmd = cpu_to_le32(CMD_TXSTATUS | INTR_ALWAYS | 2); yp->tx_ring[j].request_cnt = 2; /* Om pade ummmmm... */ yp->tx_ring[j].addr = cpu_to_le32(yp->tx_status_dma + i*sizeof(struct tx_status_words) + &(yp->tx_status[0].tx_errs) - &(yp->tx_status[0])); } yp->tx_ring[j].branch_addr = cpu_to_le32(yp->tx_ring_dma + ((j+1)%(2*TX_RING_SIZE))*sizeof(struct yellowfin_desc)); } /* Wrap ring */ yp->tx_ring[++j].dbdma_cmd |= cpu_to_le32(BRANCH_ALWAYS | INTR_ALWAYS); } #endif yp->tx_tail_desc = &yp->tx_status[0]; return 0; } static netdev_tx_t yellowfin_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); unsigned entry; int len = skb->len; netif_stop_queue (dev); /* Note: Ordering is important here, set the field with the "ownership" bit last, and only then increment cur_tx. */ /* Calculate the next Tx descriptor entry. */ entry = yp->cur_tx % TX_RING_SIZE; if (gx_fix) { /* Note: only works for paddable protocols e.g. IP. */ int cacheline_end = ((unsigned long)skb->data + skb->len) % 32; /* Fix GX chipset errata. */ if (cacheline_end > 24 || cacheline_end == 0) { len = skb->len + 32 - cacheline_end + 1; if (skb_padto(skb, len)) { yp->tx_skbuff[entry] = NULL; netif_wake_queue(dev); return NETDEV_TX_OK; } } } yp->tx_skbuff[entry] = skb; #ifdef NO_TXSTATS yp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, len, PCI_DMA_TODEVICE)); yp->tx_ring[entry].result_status = 0; if (entry >= TX_RING_SIZE-1) { /* New stop command. */ yp->tx_ring[0].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[TX_RING_SIZE-1].dbdma_cmd = cpu_to_le32(CMD_TX_PKT|BRANCH_ALWAYS | len); } else { yp->tx_ring[entry+1].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->tx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_TX_PKT | BRANCH_IFTRUE | len); } yp->cur_tx++; #else yp->tx_ring[entry<<1].request_cnt = len; yp->tx_ring[entry<<1].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, len, PCI_DMA_TODEVICE)); /* The input_last (status-write) command is constant, but we must rewrite the subsequent 'stop' command. */ yp->cur_tx++; { unsigned next_entry = yp->cur_tx % TX_RING_SIZE; yp->tx_ring[next_entry<<1].dbdma_cmd = cpu_to_le32(CMD_STOP); } /* Final step -- overwrite the old 'stop' command. */ yp->tx_ring[entry<<1].dbdma_cmd = cpu_to_le32( ((entry % 6) == 0 ? CMD_TX_PKT|INTR_ALWAYS|BRANCH_IFTRUE : CMD_TX_PKT | BRANCH_IFTRUE) | len); #endif /* Non-x86 Todo: explicitly flush cache lines here. */ /* Wake the potentially-idle transmit channel. */ iowrite32(0x10001000, yp->base + TxCtrl); if (yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE) netif_start_queue (dev); /* Typical path */ else yp->tx_full = 1; if (yellowfin_debug > 4) { netdev_printk(KERN_DEBUG, dev, "Yellowfin transmit frame #%d queued in slot %d\n", yp->cur_tx, entry); } return NETDEV_TX_OK; } /* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */ static irqreturn_t yellowfin_interrupt(int irq, void *dev_instance) { struct net_device *dev = dev_instance; struct yellowfin_private *yp; void __iomem *ioaddr; int boguscnt = max_interrupt_work; unsigned int handled = 0; yp = netdev_priv(dev); ioaddr = yp->base; spin_lock (&yp->lock); do { u16 intr_status = ioread16(ioaddr + IntrClear); if (yellowfin_debug > 4) netdev_printk(KERN_DEBUG, dev, "Yellowfin interrupt, status %04x\n", intr_status); if (intr_status == 0) break; handled = 1; if (intr_status & (IntrRxDone | IntrEarlyRx)) { yellowfin_rx(dev); iowrite32(0x10001000, ioaddr + RxCtrl); /* Wake Rx engine. */ } #ifdef NO_TXSTATS for (; yp->cur_tx - yp->dirty_tx > 0; yp->dirty_tx++) { int entry = yp->dirty_tx % TX_RING_SIZE; struct sk_buff *skb; if (yp->tx_ring[entry].result_status == 0) break; skb = yp->tx_skbuff[entry]; dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; /* Free the original skb. */ pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = NULL; } if (yp->tx_full && yp->cur_tx - yp->dirty_tx < TX_QUEUE_SIZE - 4) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); } #else if ((intr_status & IntrTxDone) || (yp->tx_tail_desc->tx_errs)) { unsigned dirty_tx = yp->dirty_tx; for (dirty_tx = yp->dirty_tx; yp->cur_tx - dirty_tx > 0; dirty_tx++) { /* Todo: optimize this. */ int entry = dirty_tx % TX_RING_SIZE; u16 tx_errs = yp->tx_status[entry].tx_errs; struct sk_buff *skb; #ifndef final_version if (yellowfin_debug > 5) netdev_printk(KERN_DEBUG, dev, "Tx queue %d check, Tx status %04x %04x %04x %04x\n", entry, yp->tx_status[entry].tx_cnt, yp->tx_status[entry].tx_errs, yp->tx_status[entry].total_tx_cnt, yp->tx_status[entry].paused); #endif if (tx_errs == 0) break; /* It still hasn't been Txed */ skb = yp->tx_skbuff[entry]; if (tx_errs & 0xF810) { /* There was an major error, log it. */ #ifndef final_version if (yellowfin_debug > 1) netdev_printk(KERN_DEBUG, dev, "Transmit error, Tx status %04x\n", tx_errs); #endif dev->stats.tx_errors++; if (tx_errs & 0xF800) dev->stats.tx_aborted_errors++; if (tx_errs & 0x0800) dev->stats.tx_carrier_errors++; if (tx_errs & 0x2000) dev->stats.tx_window_errors++; if (tx_errs & 0x8000) dev->stats.tx_fifo_errors++; } else { #ifndef final_version if (yellowfin_debug > 4) netdev_printk(KERN_DEBUG, dev, "Normal transmit, Tx status %04x\n", tx_errs); #endif dev->stats.tx_bytes += skb->len; dev->stats.collisions += tx_errs & 15; dev->stats.tx_packets++; } /* Free the original skb. */ pci_unmap_single(yp->pci_dev, yp->tx_ring[entry<<1].addr, skb->len, PCI_DMA_TODEVICE); dev_kfree_skb_irq(skb); yp->tx_skbuff[entry] = 0; /* Mark status as empty. */ yp->tx_status[entry].tx_errs = 0; } #ifndef final_version if (yp->cur_tx - dirty_tx > TX_RING_SIZE) { netdev_err(dev, "Out-of-sync dirty pointer, %d vs. %d, full=%d\n", dirty_tx, yp->cur_tx, yp->tx_full); dirty_tx += TX_RING_SIZE; } #endif if (yp->tx_full && yp->cur_tx - dirty_tx < TX_QUEUE_SIZE - 2) { /* The ring is no longer full, clear tbusy. */ yp->tx_full = 0; netif_wake_queue(dev); } yp->dirty_tx = dirty_tx; yp->tx_tail_desc = &yp->tx_status[dirty_tx % TX_RING_SIZE]; } #endif /* Log errors and other uncommon events. */ if (intr_status & 0x2ee) /* Abnormal error summary. */ yellowfin_error(dev, intr_status); if (--boguscnt < 0) { netdev_warn(dev, "Too much work at interrupt, status=%#04x\n", intr_status); break; } } while (1); if (yellowfin_debug > 3) netdev_printk(KERN_DEBUG, dev, "exiting interrupt, status=%#04x\n", ioread16(ioaddr + IntrStatus)); spin_unlock (&yp->lock); return IRQ_RETVAL(handled); } /* This routine is logically part of the interrupt handler, but separated for clarity and better register allocation. */ static int yellowfin_rx(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); int entry = yp->cur_rx % RX_RING_SIZE; int boguscnt = yp->dirty_rx + RX_RING_SIZE - yp->cur_rx; if (yellowfin_debug > 4) { printk(KERN_DEBUG " In yellowfin_rx(), entry %d status %08x\n", entry, yp->rx_ring[entry].result_status); printk(KERN_DEBUG " #%d desc. %08x %08x %08x\n", entry, yp->rx_ring[entry].dbdma_cmd, yp->rx_ring[entry].addr, yp->rx_ring[entry].result_status); } /* If EOP is set on the next entry, it's a new packet. Send it up. */ while (1) { struct yellowfin_desc *desc = &yp->rx_ring[entry]; struct sk_buff *rx_skb = yp->rx_skbuff[entry]; s16 frame_status; u16 desc_status; int data_size; u8 *buf_addr; if(!desc->result_status) break; pci_dma_sync_single_for_cpu(yp->pci_dev, le32_to_cpu(desc->addr), yp->rx_buf_sz, PCI_DMA_FROMDEVICE); desc_status = le32_to_cpu(desc->result_status) >> 16; buf_addr = rx_skb->data; data_size = (le32_to_cpu(desc->dbdma_cmd) - le32_to_cpu(desc->result_status)) & 0xffff; frame_status = get_unaligned_le16(&(buf_addr[data_size - 2])); if (yellowfin_debug > 4) printk(KERN_DEBUG " %s() status was %04x\n", __func__, frame_status); if (--boguscnt < 0) break; if ( ! (desc_status & RX_EOP)) { if (data_size != 0) netdev_warn(dev, "Oversized Ethernet frame spanned multiple buffers, status %04x, data_size %d!\n", desc_status, data_size); dev->stats.rx_length_errors++; } else if ((yp->drv_flags & IsGigabit) && (frame_status & 0x0038)) { /* There was a error. */ if (yellowfin_debug > 3) printk(KERN_DEBUG " %s() Rx error was %04x\n", __func__, frame_status); dev->stats.rx_errors++; if (frame_status & 0x0060) dev->stats.rx_length_errors++; if (frame_status & 0x0008) dev->stats.rx_frame_errors++; if (frame_status & 0x0010) dev->stats.rx_crc_errors++; if (frame_status < 0) dev->stats.rx_dropped++; } else if ( !(yp->drv_flags & IsGigabit) && ((buf_addr[data_size-1] & 0x85) || buf_addr[data_size-2] & 0xC0)) { u8 status1 = buf_addr[data_size-2]; u8 status2 = buf_addr[data_size-1]; dev->stats.rx_errors++; if (status1 & 0xC0) dev->stats.rx_length_errors++; if (status2 & 0x03) dev->stats.rx_frame_errors++; if (status2 & 0x04) dev->stats.rx_crc_errors++; if (status2 & 0x80) dev->stats.rx_dropped++; #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ } else if ((yp->flags & HasMACAddrBug) && memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), dev->dev_addr, 6) != 0 && memcmp(le32_to_cpu(yp->rx_ring_dma + entry*sizeof(struct yellowfin_desc)), "\377\377\377\377\377\377", 6) != 0) { if (bogus_rx++ == 0) netdev_warn(dev, "Bad frame to %pM\n", buf_addr); #endif } else { struct sk_buff *skb; int pkt_len = data_size - (yp->chip_id ? 7 : 8 + buf_addr[data_size - 8]); /* To verify: Yellowfin Length should omit the CRC! */ #ifndef final_version if (yellowfin_debug > 4) printk(KERN_DEBUG " %s() normal Rx pkt length %d of %d, bogus_cnt %d\n", __func__, pkt_len, data_size, boguscnt); #endif /* Check if the packet is long enough to just pass up the skbuff without copying to a properly sized skbuff. */ if (pkt_len > rx_copybreak) { skb_put(skb = rx_skb, pkt_len); pci_unmap_single(yp->pci_dev, le32_to_cpu(yp->rx_ring[entry].addr), yp->rx_buf_sz, PCI_DMA_FROMDEVICE); yp->rx_skbuff[entry] = NULL; } else { skb = netdev_alloc_skb(dev, pkt_len + 2); if (skb == NULL) break; skb_reserve(skb, 2); /* 16 byte align the IP header */ skb_copy_to_linear_data(skb, rx_skb->data, pkt_len); skb_put(skb, pkt_len); pci_dma_sync_single_for_device(yp->pci_dev, le32_to_cpu(desc->addr), yp->rx_buf_sz, PCI_DMA_FROMDEVICE); } skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); dev->stats.rx_packets++; dev->stats.rx_bytes += pkt_len; } entry = (++yp->cur_rx) % RX_RING_SIZE; } /* Refill the Rx ring buffers. */ for (; yp->cur_rx - yp->dirty_rx > 0; yp->dirty_rx++) { entry = yp->dirty_rx % RX_RING_SIZE; if (yp->rx_skbuff[entry] == NULL) { struct sk_buff *skb = netdev_alloc_skb(dev, yp->rx_buf_sz + 2); if (skb == NULL) break; /* Better luck next round. */ yp->rx_skbuff[entry] = skb; skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */ yp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(yp->pci_dev, skb->data, yp->rx_buf_sz, PCI_DMA_FROMDEVICE)); } yp->rx_ring[entry].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->rx_ring[entry].result_status = 0; /* Clear complete bit. */ if (entry != 0) yp->rx_ring[entry - 1].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | yp->rx_buf_sz); else yp->rx_ring[RX_RING_SIZE - 1].dbdma_cmd = cpu_to_le32(CMD_RX_BUF | INTR_ALWAYS | BRANCH_ALWAYS | yp->rx_buf_sz); } return 0; } static void yellowfin_error(struct net_device *dev, int intr_status) { netdev_err(dev, "Something Wicked happened! %04x\n", intr_status); /* Hmmmmm, it's not clear what to do here. */ if (intr_status & (IntrTxPCIErr | IntrTxPCIFault)) dev->stats.tx_errors++; if (intr_status & (IntrRxPCIErr | IntrRxPCIFault)) dev->stats.rx_errors++; } static int yellowfin_close(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; int i; netif_stop_queue (dev); if (yellowfin_debug > 1) { netdev_printk(KERN_DEBUG, dev, "Shutting down ethercard, status was Tx %04x Rx %04x Int %02x\n", ioread16(ioaddr + TxStatus), ioread16(ioaddr + RxStatus), ioread16(ioaddr + IntrStatus)); netdev_printk(KERN_DEBUG, dev, "Queue pointers were Tx %d / %d, Rx %d / %d\n", yp->cur_tx, yp->dirty_tx, yp->cur_rx, yp->dirty_rx); } /* Disable interrupts by clearing the interrupt mask. */ iowrite16(0x0000, ioaddr + IntrEnb); /* Stop the chip's Tx and Rx processes. */ iowrite32(0x80000000, ioaddr + RxCtrl); iowrite32(0x80000000, ioaddr + TxCtrl); del_timer(&yp->timer); #if defined(__i386__) if (yellowfin_debug > 2) { printk(KERN_DEBUG " Tx ring at %08llx:\n", (unsigned long long)yp->tx_ring_dma); for (i = 0; i < TX_RING_SIZE*2; i++) printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x %08x\n", ioread32(ioaddr + TxPtr) == (long)&yp->tx_ring[i] ? '>' : ' ', i, yp->tx_ring[i].dbdma_cmd, yp->tx_ring[i].addr, yp->tx_ring[i].branch_addr, yp->tx_ring[i].result_status); printk(KERN_DEBUG " Tx status %p:\n", yp->tx_status); for (i = 0; i < TX_RING_SIZE; i++) printk(KERN_DEBUG " #%d status %04x %04x %04x %04x\n", i, yp->tx_status[i].tx_cnt, yp->tx_status[i].tx_errs, yp->tx_status[i].total_tx_cnt, yp->tx_status[i].paused); printk(KERN_DEBUG " Rx ring %08llx:\n", (unsigned long long)yp->rx_ring_dma); for (i = 0; i < RX_RING_SIZE; i++) { printk(KERN_DEBUG " %c #%d desc. %08x %08x %08x\n", ioread32(ioaddr + RxPtr) == (long)&yp->rx_ring[i] ? '>' : ' ', i, yp->rx_ring[i].dbdma_cmd, yp->rx_ring[i].addr, yp->rx_ring[i].result_status); if (yellowfin_debug > 6) { if (get_unaligned((u8*)yp->rx_ring[i].addr) != 0x69) { int j; printk(KERN_DEBUG); for (j = 0; j < 0x50; j++) pr_cont(" %04x", get_unaligned(((u16*)yp->rx_ring[i].addr) + j)); pr_cont("\n"); } } } } #endif /* __i386__ debugging only */ free_irq(dev->irq, dev); /* Free all the skbuffs in the Rx queue. */ for (i = 0; i < RX_RING_SIZE; i++) { yp->rx_ring[i].dbdma_cmd = cpu_to_le32(CMD_STOP); yp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */ if (yp->rx_skbuff[i]) { dev_kfree_skb(yp->rx_skbuff[i]); } yp->rx_skbuff[i] = NULL; } for (i = 0; i < TX_RING_SIZE; i++) { if (yp->tx_skbuff[i]) dev_kfree_skb(yp->tx_skbuff[i]); yp->tx_skbuff[i] = NULL; } #ifdef YF_PROTOTYPE /* Support for prototype hardware errata. */ if (yellowfin_debug > 0) { netdev_printk(KERN_DEBUG, dev, "Received %d frames that we should not have\n", bogus_rx); } #endif return 0; } /* Set or clear the multicast filter for this adaptor. */ static void set_rx_mode(struct net_device *dev) { struct yellowfin_private *yp = netdev_priv(dev); void __iomem *ioaddr = yp->base; u16 cfg_value = ioread16(ioaddr + Cnfg); /* Stop the Rx process to change any value. */ iowrite16(cfg_value & ~0x1000, ioaddr + Cnfg); if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */ iowrite16(0x000F, ioaddr + AddrMode); } else if ((netdev_mc_count(dev) > 64) || (dev->flags & IFF_ALLMULTI)) { /* Too many to filter well, or accept all multicasts. */ iowrite16(0x000B, ioaddr + AddrMode); } else if (!netdev_mc_empty(dev)) { /* Must use the multicast hash table. */ struct netdev_hw_addr *ha; u16 hash_table[4]; int i; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, dev) { unsigned int bit; /* Due to a bug in the early chip versions, multiple filter slots must be set for each address. */ if (yp->drv_flags & HasMulticastBug) { bit = (ether_crc_le(3, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); bit = (ether_crc_le(4, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); bit = (ether_crc_le(5, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } bit = (ether_crc_le(6, ha->addr) >> 3) & 0x3f; hash_table[bit >> 4] |= (1 << bit); } /* Copy the hash table to the chip. */ for (i = 0; i < 4; i++) iowrite16(hash_table[i], ioaddr + HashTbl + i*2); iowrite16(0x0003, ioaddr + AddrMode); } else { /* Normal, unicast/broadcast-only mode. */ iowrite16(0x0001, ioaddr + AddrMode); } /* Restart the Rx process. */ iowrite16(cfg_value | 0x1000, ioaddr + Cnfg); } static void yellowfin_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct yellowfin_private *np = netdev_priv(dev); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pci_name(np->pci_dev)); } static const struct ethtool_ops ethtool_ops = { .get_drvinfo = yellowfin_get_drvinfo }; static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) { struct yellowfin_private *np = netdev_priv(dev); void __iomem *ioaddr = np->base; struct mii_ioctl_data *data = if_mii(rq); switch(cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = np->phys[0] & 0x1f; /* Fall Through */ case SIOCGMIIREG: /* Read MII PHY register. */ data->val_out = mdio_read(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f); return 0; case SIOCSMIIREG: /* Write MII PHY register. */ if (data->phy_id == np->phys[0]) { u16 value = data->val_in; switch (data->reg_num) { case 0: /* Check for autonegotiation on or reset. */ np->medialock = (value & 0x9000) ? 0 : 1; if (np->medialock) np->full_duplex = (value & 0x0100) ? 1 : 0; break; case 4: np->advertising = value; break; } /* Perhaps check_duplex(dev), depending on chip semantics. */ } mdio_write(ioaddr, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in); return 0; default: return -EOPNOTSUPP; } } static void __devexit yellowfin_remove_one (struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct yellowfin_private *np; BUG_ON(!dev); np = netdev_priv(dev); pci_free_consistent(pdev, STATUS_TOTAL_SIZE, np->tx_status, np->tx_status_dma); pci_free_consistent(pdev, RX_TOTAL_SIZE, np->rx_ring, np->rx_ring_dma); pci_free_consistent(pdev, TX_TOTAL_SIZE, np->tx_ring, np->tx_ring_dma); unregister_netdev (dev); pci_iounmap(pdev, np->base); pci_release_regions (pdev); free_netdev (dev); pci_set_drvdata(pdev, NULL); } static struct pci_driver yellowfin_driver = { .name = DRV_NAME, .id_table = yellowfin_pci_tbl, .probe = yellowfin_init_one, .remove = __devexit_p(yellowfin_remove_one), }; static int __init yellowfin_init (void) { /* when a module, this is printed whether or not devices are found in probe */ #ifdef MODULE printk(version); #endif return pci_register_driver(&yellowfin_driver); } static void __exit yellowfin_cleanup (void) { pci_unregister_driver (&yellowfin_driver); } module_init(yellowfin_init); module_exit(yellowfin_cleanup);
gpl-2.0
sub77/SM-T530_KK_Opensource
drivers/net/ethernet/qlogic/netxen/netxen_nic_ctx.c
4811
24113
/* * Copyright (C) 2003 - 2009 NetXen, Inc. * Copyright (C) 2009 - QLogic Corporation. * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, * MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution * in the file called "COPYING". * */ #include "netxen_nic_hw.h" #include "netxen_nic.h" #define NXHAL_VERSION 1 static u32 netxen_poll_rsp(struct netxen_adapter *adapter) { u32 rsp = NX_CDRP_RSP_OK; int timeout = 0; do { /* give atleast 1ms for firmware to respond */ msleep(1); if (++timeout > NX_OS_CRB_RETRY_COUNT) return NX_CDRP_RSP_TIMEOUT; rsp = NXRD32(adapter, NX_CDRP_CRB_OFFSET); } while (!NX_CDRP_IS_RSP(rsp)); return rsp; } static u32 netxen_issue_cmd(struct netxen_adapter *adapter, struct netxen_cmd_args *cmd) { u32 rsp; u32 signature = 0; u32 rcode = NX_RCODE_SUCCESS; signature = NX_CDRP_SIGNATURE_MAKE(adapter->ahw.pci_func, NXHAL_VERSION); /* Acquire semaphore before accessing CRB */ if (netxen_api_lock(adapter)) return NX_RCODE_TIMEOUT; NXWR32(adapter, NX_SIGN_CRB_OFFSET, signature); NXWR32(adapter, NX_ARG1_CRB_OFFSET, cmd->req.arg1); NXWR32(adapter, NX_ARG2_CRB_OFFSET, cmd->req.arg2); NXWR32(adapter, NX_ARG3_CRB_OFFSET, cmd->req.arg3); NXWR32(adapter, NX_CDRP_CRB_OFFSET, NX_CDRP_FORM_CMD(cmd->req.cmd)); rsp = netxen_poll_rsp(adapter); if (rsp == NX_CDRP_RSP_TIMEOUT) { printk(KERN_ERR "%s: card response timeout.\n", netxen_nic_driver_name); rcode = NX_RCODE_TIMEOUT; } else if (rsp == NX_CDRP_RSP_FAIL) { rcode = NXRD32(adapter, NX_ARG1_CRB_OFFSET); printk(KERN_ERR "%s: failed card response code:0x%x\n", netxen_nic_driver_name, rcode); } else if (rsp == NX_CDRP_RSP_OK) { cmd->rsp.cmd = NX_RCODE_SUCCESS; if (cmd->rsp.arg2) cmd->rsp.arg2 = NXRD32(adapter, NX_ARG2_CRB_OFFSET); if (cmd->rsp.arg3) cmd->rsp.arg3 = NXRD32(adapter, NX_ARG3_CRB_OFFSET); } if (cmd->rsp.arg1) cmd->rsp.arg1 = NXRD32(adapter, NX_ARG1_CRB_OFFSET); /* Release semaphore */ netxen_api_unlock(adapter); return rcode; } static int netxen_get_minidump_template_size(struct netxen_adapter *adapter) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_TEMP_SIZE; memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); netxen_issue_cmd(adapter, &cmd); if (cmd.rsp.cmd != NX_RCODE_SUCCESS) { dev_info(&adapter->pdev->dev, "Can't get template size %d\n", cmd.rsp.cmd); return -EIO; } adapter->mdump.md_template_size = cmd.rsp.arg2; adapter->mdump.md_template_ver = cmd.rsp.arg3; return 0; } static int netxen_get_minidump_template(struct netxen_adapter *adapter) { dma_addr_t md_template_addr; void *addr; u32 size; struct netxen_cmd_args cmd; size = adapter->mdump.md_template_size; if (size == 0) { dev_err(&adapter->pdev->dev, "Can not capture Minidump " "template. Invalid template size.\n"); return NX_RCODE_INVALID_ARGS; } addr = pci_alloc_consistent(adapter->pdev, size, &md_template_addr); if (!addr) { dev_err(&adapter->pdev->dev, "Unable to allocate dmable memory for template.\n"); return -ENOMEM; } memset(addr, 0, size); memset(&cmd, 0, sizeof(cmd)); memset(&cmd.rsp, 1, sizeof(struct _cdrp_cmd)); cmd.req.cmd = NX_CDRP_CMD_GET_TEMP_HDR; cmd.req.arg1 = LSD(md_template_addr); cmd.req.arg2 = MSD(md_template_addr); cmd.req.arg3 |= size; netxen_issue_cmd(adapter, &cmd); if ((cmd.rsp.cmd == NX_RCODE_SUCCESS) && (size == cmd.rsp.arg2)) { memcpy(adapter->mdump.md_template, addr, size); } else { dev_err(&adapter->pdev->dev, "Failed to get minidump template, " "err_code : %d, requested_size : %d, actual_size : %d\n ", cmd.rsp.cmd, size, cmd.rsp.arg2); } pci_free_consistent(adapter->pdev, size, addr, md_template_addr); return 0; } static u32 netxen_check_template_checksum(struct netxen_adapter *adapter) { u64 sum = 0 ; u32 *buff = adapter->mdump.md_template; int count = adapter->mdump.md_template_size/sizeof(uint32_t) ; while (count-- > 0) sum += *buff++ ; while (sum >> 32) sum = (sum & 0xFFFFFFFF) + (sum >> 32) ; return ~sum; } int netxen_setup_minidump(struct netxen_adapter *adapter) { int err = 0, i; u32 *template, *tmp_buf; struct netxen_minidump_template_hdr *hdr; err = netxen_get_minidump_template_size(adapter); if (err) { adapter->mdump.fw_supports_md = 0; if ((err == NX_RCODE_CMD_INVALID) || (err == NX_RCODE_CMD_NOT_IMPL)) { dev_info(&adapter->pdev->dev, "Flashed firmware version does not support minidump, " "minimum version required is [ %u.%u.%u ].\n ", NX_MD_SUPPORT_MAJOR, NX_MD_SUPPORT_MINOR, NX_MD_SUPPORT_SUBVERSION); } return err; } if (!adapter->mdump.md_template_size) { dev_err(&adapter->pdev->dev, "Error : Invalid template size " ",should be non-zero.\n"); return -EIO; } adapter->mdump.md_template = kmalloc(adapter->mdump.md_template_size, GFP_KERNEL); if (!adapter->mdump.md_template) { dev_err(&adapter->pdev->dev, "Unable to allocate memory " "for minidump template.\n"); return -ENOMEM; } err = netxen_get_minidump_template(adapter); if (err) { if (err == NX_RCODE_CMD_NOT_IMPL) adapter->mdump.fw_supports_md = 0; goto free_template; } if (netxen_check_template_checksum(adapter)) { dev_err(&adapter->pdev->dev, "Minidump template checksum Error\n"); err = -EIO; goto free_template; } adapter->mdump.md_capture_mask = NX_DUMP_MASK_DEF; tmp_buf = (u32 *) adapter->mdump.md_template; template = (u32 *) adapter->mdump.md_template; for (i = 0; i < adapter->mdump.md_template_size/sizeof(u32); i++) *template++ = __le32_to_cpu(*tmp_buf++); hdr = (struct netxen_minidump_template_hdr *) adapter->mdump.md_template; adapter->mdump.md_capture_buff = NULL; adapter->mdump.fw_supports_md = 1; adapter->mdump.md_enabled = 1; return err; free_template: kfree(adapter->mdump.md_template); adapter->mdump.md_template = NULL; return err; } int nx_fw_cmd_set_mtu(struct netxen_adapter *adapter, int mtu) { u32 rcode = NX_RCODE_SUCCESS; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_SET_MTU; cmd.req.arg1 = recv_ctx->context_id; cmd.req.arg2 = mtu; cmd.req.arg3 = 0; if (recv_ctx->state == NX_HOST_CTX_STATE_ACTIVE) netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; return 0; } int nx_fw_cmd_set_gbe_port(struct netxen_adapter *adapter, u32 speed, u32 duplex, u32 autoneg) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.cmd = NX_CDRP_CMD_CONFIG_GBE_PORT; cmd.req.arg1 = speed; cmd.req.arg2 = duplex; cmd.req.arg3 = autoneg; return netxen_issue_cmd(adapter, &cmd); } static int nx_fw_cmd_create_rx_ctx(struct netxen_adapter *adapter) { void *addr; nx_hostrq_rx_ctx_t *prq; nx_cardrsp_rx_ctx_t *prsp; nx_hostrq_rds_ring_t *prq_rds; nx_hostrq_sds_ring_t *prq_sds; nx_cardrsp_rds_ring_t *prsp_rds; nx_cardrsp_sds_ring_t *prsp_sds; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct netxen_cmd_args cmd; dma_addr_t hostrq_phys_addr, cardrsp_phys_addr; u64 phys_addr; int i, nrds_rings, nsds_rings; size_t rq_size, rsp_size; u32 cap, reg, val; int err; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; nrds_rings = adapter->max_rds_rings; nsds_rings = adapter->max_sds_rings; rq_size = SIZEOF_HOSTRQ_RX(nx_hostrq_rx_ctx_t, nrds_rings, nsds_rings); rsp_size = SIZEOF_CARDRSP_RX(nx_cardrsp_rx_ctx_t, nrds_rings, nsds_rings); addr = pci_alloc_consistent(adapter->pdev, rq_size, &hostrq_phys_addr); if (addr == NULL) return -ENOMEM; prq = addr; addr = pci_alloc_consistent(adapter->pdev, rsp_size, &cardrsp_phys_addr); if (addr == NULL) { err = -ENOMEM; goto out_free_rq; } prsp = addr; prq->host_rsp_dma_addr = cpu_to_le64(cardrsp_phys_addr); cap = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN); cap |= (NX_CAP0_JUMBO_CONTIGUOUS | NX_CAP0_LRO_CONTIGUOUS); prq->capabilities[0] = cpu_to_le32(cap); prq->host_int_crb_mode = cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); prq->host_rds_crb_mode = cpu_to_le32(NX_HOST_RDS_CRB_MODE_UNIQUE); prq->num_rds_rings = cpu_to_le16(nrds_rings); prq->num_sds_rings = cpu_to_le16(nsds_rings); prq->rds_ring_offset = cpu_to_le32(0); val = le32_to_cpu(prq->rds_ring_offset) + (sizeof(nx_hostrq_rds_ring_t) * nrds_rings); prq->sds_ring_offset = cpu_to_le32(val); prq_rds = (nx_hostrq_rds_ring_t *)(prq->data + le32_to_cpu(prq->rds_ring_offset)); for (i = 0; i < nrds_rings; i++) { rds_ring = &recv_ctx->rds_rings[i]; prq_rds[i].host_phys_addr = cpu_to_le64(rds_ring->phys_addr); prq_rds[i].ring_size = cpu_to_le32(rds_ring->num_desc); prq_rds[i].ring_kind = cpu_to_le32(i); prq_rds[i].buff_size = cpu_to_le64(rds_ring->dma_size); } prq_sds = (nx_hostrq_sds_ring_t *)(prq->data + le32_to_cpu(prq->sds_ring_offset)); for (i = 0; i < nsds_rings; i++) { sds_ring = &recv_ctx->sds_rings[i]; prq_sds[i].host_phys_addr = cpu_to_le64(sds_ring->phys_addr); prq_sds[i].ring_size = cpu_to_le32(sds_ring->num_desc); prq_sds[i].msi_index = cpu_to_le16(i); } phys_addr = hostrq_phys_addr; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = (u32)(phys_addr >> 32); cmd.req.arg2 = (u32)(phys_addr & 0xffffffff); cmd.req.arg3 = rq_size; cmd.req.cmd = NX_CDRP_CMD_CREATE_RX_CTX; err = netxen_issue_cmd(adapter, &cmd); if (err) { printk(KERN_WARNING "Failed to create rx ctx in firmware%d\n", err); goto out_free_rsp; } prsp_rds = ((nx_cardrsp_rds_ring_t *) &prsp->data[le32_to_cpu(prsp->rds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_rds_rings); i++) { rds_ring = &recv_ctx->rds_rings[i]; reg = le32_to_cpu(prsp_rds[i].host_producer_crb); rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); } prsp_sds = ((nx_cardrsp_sds_ring_t *) &prsp->data[le32_to_cpu(prsp->sds_ring_offset)]); for (i = 0; i < le16_to_cpu(prsp->num_sds_rings); i++) { sds_ring = &recv_ctx->sds_rings[i]; reg = le32_to_cpu(prsp_sds[i].host_consumer_crb); sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); reg = le32_to_cpu(prsp_sds[i].interrupt_crb); sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(reg - 0x200)); } recv_ctx->state = le32_to_cpu(prsp->host_ctx_state); recv_ctx->context_id = le16_to_cpu(prsp->context_id); recv_ctx->virt_port = prsp->virt_port; out_free_rsp: pci_free_consistent(adapter->pdev, rsp_size, prsp, cardrsp_phys_addr); out_free_rq: pci_free_consistent(adapter->pdev, rq_size, prq, hostrq_phys_addr); return err; } static void nx_fw_cmd_destroy_rx_ctx(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = recv_ctx->context_id; cmd.req.arg2 = NX_DESTROY_CTX_RESET; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_DESTROY_RX_CTX; if (netxen_issue_cmd(adapter, &cmd)) { printk(KERN_WARNING "%s: Failed to destroy rx ctx in firmware\n", netxen_nic_driver_name); } } static int nx_fw_cmd_create_tx_ctx(struct netxen_adapter *adapter) { nx_hostrq_tx_ctx_t *prq; nx_hostrq_cds_ring_t *prq_cds; nx_cardrsp_tx_ctx_t *prsp; void *rq_addr, *rsp_addr; size_t rq_size, rsp_size; u32 temp; int err = 0; u64 offset, phys_addr; dma_addr_t rq_phys_addr, rsp_phys_addr; struct nx_host_tx_ring *tx_ring = adapter->tx_ring; struct netxen_recv_context *recv_ctx = &adapter->recv_ctx; struct netxen_cmd_args cmd; rq_size = SIZEOF_HOSTRQ_TX(nx_hostrq_tx_ctx_t); rq_addr = pci_alloc_consistent(adapter->pdev, rq_size, &rq_phys_addr); if (!rq_addr) return -ENOMEM; rsp_size = SIZEOF_CARDRSP_TX(nx_cardrsp_tx_ctx_t); rsp_addr = pci_alloc_consistent(adapter->pdev, rsp_size, &rsp_phys_addr); if (!rsp_addr) { err = -ENOMEM; goto out_free_rq; } memset(rq_addr, 0, rq_size); prq = rq_addr; memset(rsp_addr, 0, rsp_size); prsp = rsp_addr; prq->host_rsp_dma_addr = cpu_to_le64(rsp_phys_addr); temp = (NX_CAP0_LEGACY_CONTEXT | NX_CAP0_LEGACY_MN | NX_CAP0_LSO); prq->capabilities[0] = cpu_to_le32(temp); prq->host_int_crb_mode = cpu_to_le32(NX_HOST_INT_CRB_MODE_SHARED); prq->interrupt_ctl = 0; prq->msi_index = 0; prq->dummy_dma_addr = cpu_to_le64(adapter->dummy_dma.phys_addr); offset = recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx); prq->cmd_cons_dma_addr = cpu_to_le64(offset); prq_cds = &prq->cds_ring; prq_cds->host_phys_addr = cpu_to_le64(tx_ring->phys_addr); prq_cds->ring_size = cpu_to_le32(tx_ring->num_desc); phys_addr = rq_phys_addr; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = (u32)(phys_addr >> 32); cmd.req.arg2 = ((u32)phys_addr & 0xffffffff); cmd.req.arg3 = rq_size; cmd.req.cmd = NX_CDRP_CMD_CREATE_TX_CTX; err = netxen_issue_cmd(adapter, &cmd); if (err == NX_RCODE_SUCCESS) { temp = le32_to_cpu(prsp->cds_ring.host_producer_crb); tx_ring->crb_cmd_producer = netxen_get_ioaddr(adapter, NETXEN_NIC_REG(temp - 0x200)); #if 0 adapter->tx_state = le32_to_cpu(prsp->host_ctx_state); #endif adapter->tx_context_id = le16_to_cpu(prsp->context_id); } else { printk(KERN_WARNING "Failed to create tx ctx in firmware%d\n", err); err = -EIO; } pci_free_consistent(adapter->pdev, rsp_size, rsp_addr, rsp_phys_addr); out_free_rq: pci_free_consistent(adapter->pdev, rq_size, rq_addr, rq_phys_addr); return err; } static void nx_fw_cmd_destroy_tx_ctx(struct netxen_adapter *adapter) { struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = adapter->tx_context_id; cmd.req.arg2 = NX_DESTROY_CTX_RESET; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_DESTROY_TX_CTX; if (netxen_issue_cmd(adapter, &cmd)) { printk(KERN_WARNING "%s: Failed to destroy tx ctx in firmware\n", netxen_nic_driver_name); } } int nx_fw_cmd_query_phy(struct netxen_adapter *adapter, u32 reg, u32 *val) { u32 rcode; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = reg; cmd.req.arg2 = 0; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_READ_PHY; cmd.rsp.arg1 = 1; rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; if (val == NULL) return -EIO; *val = cmd.rsp.arg1; return 0; } int nx_fw_cmd_set_phy(struct netxen_adapter *adapter, u32 reg, u32 val) { u32 rcode; struct netxen_cmd_args cmd; memset(&cmd, 0, sizeof(cmd)); cmd.req.arg1 = reg; cmd.req.arg2 = val; cmd.req.arg3 = 0; cmd.req.cmd = NX_CDRP_CMD_WRITE_PHY; rcode = netxen_issue_cmd(adapter, &cmd); if (rcode != NX_RCODE_SUCCESS) return -EIO; return 0; } static u64 ctx_addr_sig_regs[][3] = { {NETXEN_NIC_REG(0x188), NETXEN_NIC_REG(0x18c), NETXEN_NIC_REG(0x1c0)}, {NETXEN_NIC_REG(0x190), NETXEN_NIC_REG(0x194), NETXEN_NIC_REG(0x1c4)}, {NETXEN_NIC_REG(0x198), NETXEN_NIC_REG(0x19c), NETXEN_NIC_REG(0x1c8)}, {NETXEN_NIC_REG(0x1a0), NETXEN_NIC_REG(0x1a4), NETXEN_NIC_REG(0x1cc)} }; #define CRB_CTX_ADDR_REG_LO(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][0]) #define CRB_CTX_ADDR_REG_HI(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][2]) #define CRB_CTX_SIGNATURE_REG(FUNC_ID) (ctx_addr_sig_regs[FUNC_ID][1]) #define lower32(x) ((u32)((x) & 0xffffffff)) #define upper32(x) ((u32)(((u64)(x) >> 32) & 0xffffffff)) static struct netxen_recv_crb recv_crb_registers[] = { /* Instance 0 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x100), /* Jumbo frames */ NETXEN_NIC_REG(0x110), /* LRO */ NETXEN_NIC_REG(0x120) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x138), NETXEN_NIC_REG_2(0x000), NETXEN_NIC_REG_2(0x004), NETXEN_NIC_REG_2(0x008), }, /* sw_int_mask */ { CRB_SW_INT_MASK_0, NETXEN_NIC_REG_2(0x044), NETXEN_NIC_REG_2(0x048), NETXEN_NIC_REG_2(0x04c), }, }, /* Instance 1 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x144), /* Jumbo frames */ NETXEN_NIC_REG(0x154), /* LRO */ NETXEN_NIC_REG(0x164) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x17c), NETXEN_NIC_REG_2(0x020), NETXEN_NIC_REG_2(0x024), NETXEN_NIC_REG_2(0x028), }, /* sw_int_mask */ { CRB_SW_INT_MASK_1, NETXEN_NIC_REG_2(0x064), NETXEN_NIC_REG_2(0x068), NETXEN_NIC_REG_2(0x06c), }, }, /* Instance 2 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x1d8), /* Jumbo frames */ NETXEN_NIC_REG(0x1f8), /* LRO */ NETXEN_NIC_REG(0x208) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x220), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, /* sw_int_mask */ { CRB_SW_INT_MASK_2, NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, }, /* Instance 3 */ { /* crb_rcv_producer: */ { NETXEN_NIC_REG(0x22c), /* Jumbo frames */ NETXEN_NIC_REG(0x23c), /* LRO */ NETXEN_NIC_REG(0x24c) }, /* crb_sts_consumer: */ { NETXEN_NIC_REG(0x264), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, /* sw_int_mask */ { CRB_SW_INT_MASK_3, NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), NETXEN_NIC_REG_2(0x03c), }, }, }; static int netxen_init_old_ctx(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; int ring; int port = adapter->portnum; struct netxen_ring_ctx *hwctx; u32 signature; tx_ring = adapter->tx_ring; recv_ctx = &adapter->recv_ctx; hwctx = recv_ctx->hwctx; hwctx->cmd_ring_addr = cpu_to_le64(tx_ring->phys_addr); hwctx->cmd_ring_size = cpu_to_le32(tx_ring->num_desc); for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; hwctx->rcv_rings[ring].addr = cpu_to_le64(rds_ring->phys_addr); hwctx->rcv_rings[ring].size = cpu_to_le32(rds_ring->num_desc); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (ring == 0) { hwctx->sts_ring_addr = cpu_to_le64(sds_ring->phys_addr); hwctx->sts_ring_size = cpu_to_le32(sds_ring->num_desc); } hwctx->sts_rings[ring].addr = cpu_to_le64(sds_ring->phys_addr); hwctx->sts_rings[ring].size = cpu_to_le32(sds_ring->num_desc); hwctx->sts_rings[ring].msi_index = cpu_to_le16(ring); } hwctx->sts_ring_count = cpu_to_le32(adapter->max_sds_rings); signature = (adapter->max_sds_rings > 1) ? NETXEN_CTX_SIGNATURE_V2 : NETXEN_CTX_SIGNATURE; NXWR32(adapter, CRB_CTX_ADDR_REG_LO(port), lower32(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_ADDR_REG_HI(port), upper32(recv_ctx->phys_addr)); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), signature | port); return 0; } int netxen_alloc_hw_resources(struct netxen_adapter *adapter) { void *addr; int err = 0; int ring; struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; struct pci_dev *pdev = adapter->pdev; struct net_device *netdev = adapter->netdev; int port = adapter->portnum; recv_ctx = &adapter->recv_ctx; tx_ring = adapter->tx_ring; addr = pci_alloc_consistent(pdev, sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), &recv_ctx->phys_addr); if (addr == NULL) { dev_err(&pdev->dev, "failed to allocate hw context\n"); return -ENOMEM; } memset(addr, 0, sizeof(struct netxen_ring_ctx)); recv_ctx->hwctx = addr; recv_ctx->hwctx->ctx_id = cpu_to_le32(port); recv_ctx->hwctx->cmd_consumer_offset = cpu_to_le64(recv_ctx->phys_addr + sizeof(struct netxen_ring_ctx)); tx_ring->hw_consumer = (__le32 *)(((char *)addr) + sizeof(struct netxen_ring_ctx)); /* cmd desc ring */ addr = pci_alloc_consistent(pdev, TX_DESC_RINGSIZE(tx_ring), &tx_ring->phys_addr); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate tx desc ring\n", netdev->name); err = -ENOMEM; goto err_out_free; } tx_ring->desc_head = addr; for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; addr = pci_alloc_consistent(adapter->pdev, RCV_DESC_RINGSIZE(rds_ring), &rds_ring->phys_addr); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate rds ring [%d]\n", netdev->name, ring); err = -ENOMEM; goto err_out_free; } rds_ring->desc_head = addr; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) rds_ring->crb_rcv_producer = netxen_get_ioaddr(adapter, recv_crb_registers[port].crb_rcv_producer[ring]); } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; addr = pci_alloc_consistent(adapter->pdev, STATUS_DESC_RINGSIZE(sds_ring), &sds_ring->phys_addr); if (addr == NULL) { dev_err(&pdev->dev, "%s: failed to allocate sds ring [%d]\n", netdev->name, ring); err = -ENOMEM; goto err_out_free; } sds_ring->desc_head = addr; if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { sds_ring->crb_sts_consumer = netxen_get_ioaddr(adapter, recv_crb_registers[port].crb_sts_consumer[ring]); sds_ring->crb_intr_mask = netxen_get_ioaddr(adapter, recv_crb_registers[port].sw_int_mask[ring]); } } if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (test_and_set_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; err = nx_fw_cmd_create_rx_ctx(adapter); if (err) goto err_out_free; err = nx_fw_cmd_create_tx_ctx(adapter); if (err) goto err_out_free; } else { err = netxen_init_old_ctx(adapter); if (err) goto err_out_free; } done: return 0; err_out_free: netxen_free_hw_resources(adapter); return err; } void netxen_free_hw_resources(struct netxen_adapter *adapter) { struct netxen_recv_context *recv_ctx; struct nx_host_rds_ring *rds_ring; struct nx_host_sds_ring *sds_ring; struct nx_host_tx_ring *tx_ring; int ring; int port = adapter->portnum; if (!NX_IS_REVISION_P2(adapter->ahw.revision_id)) { if (!test_and_clear_bit(__NX_FW_ATTACHED, &adapter->state)) goto done; nx_fw_cmd_destroy_rx_ctx(adapter); nx_fw_cmd_destroy_tx_ctx(adapter); } else { netxen_api_lock(adapter); NXWR32(adapter, CRB_CTX_SIGNATURE_REG(port), NETXEN_CTX_D3_RESET | port); netxen_api_unlock(adapter); } /* Allow dma queues to drain after context reset */ msleep(20); done: recv_ctx = &adapter->recv_ctx; if (recv_ctx->hwctx != NULL) { pci_free_consistent(adapter->pdev, sizeof(struct netxen_ring_ctx) + sizeof(uint32_t), recv_ctx->hwctx, recv_ctx->phys_addr); recv_ctx->hwctx = NULL; } tx_ring = adapter->tx_ring; if (tx_ring->desc_head != NULL) { pci_free_consistent(adapter->pdev, TX_DESC_RINGSIZE(tx_ring), tx_ring->desc_head, tx_ring->phys_addr); tx_ring->desc_head = NULL; } for (ring = 0; ring < adapter->max_rds_rings; ring++) { rds_ring = &recv_ctx->rds_rings[ring]; if (rds_ring->desc_head != NULL) { pci_free_consistent(adapter->pdev, RCV_DESC_RINGSIZE(rds_ring), rds_ring->desc_head, rds_ring->phys_addr); rds_ring->desc_head = NULL; } } for (ring = 0; ring < adapter->max_sds_rings; ring++) { sds_ring = &recv_ctx->sds_rings[ring]; if (sds_ring->desc_head != NULL) { pci_free_consistent(adapter->pdev, STATUS_DESC_RINGSIZE(sds_ring), sds_ring->desc_head, sds_ring->phys_addr); sds_ring->desc_head = NULL; } } }
gpl-2.0