repo_name
string
path
string
copies
string
size
string
content
string
license
string
shutt1e/lge_kernel_msm7x30
sound/usb/usx2y/usbusx2y.c
4127
13768
/* * usbusy2y.c - ALSA USB US-428 Driver * 2005-04-14 Karsten Wiese Version 0.8.7.2: Call snd_card_free() instead of snd_card_free_in_thread() to prevent oops with dead keyboard symptom. Tested ok with kernel 2.6.12-rc2. 2004-12-14 Karsten Wiese Version 0.8.7.1: snd_pcm_open for rawusb pcm-devices now returns -EBUSY if called without rawusb's hwdep device being open. 2004-12-02 Karsten Wiese Version 0.8.7: Use macro usb_maxpacket() for portability. 2004-10-26 Karsten Wiese Version 0.8.6: wake_up() process waiting in usX2Y_urbs_start() on error. 2004-10-21 Karsten Wiese Version 0.8.5: nrpacks is runtime or compiletime configurable now with tested values from 1 to 4. 2004-10-03 Karsten Wiese Version 0.8.2: Avoid any possible racing while in prepare callback. 2004-09-30 Karsten Wiese Version 0.8.0: Simplified things and made ohci work again. 2004-09-20 Karsten Wiese Version 0.7.3: Use usb_kill_urb() instead of deprecated (kernel 2.6.9) usb_unlink_urb(). 2004-07-13 Karsten Wiese Version 0.7.1: Don't sleep in START/STOP callbacks anymore. us428 channels C/D not handled just for this version, sorry. 2004-06-21 Karsten Wiese Version 0.6.4: Temporarely suspend midi input to sanely call usb_set_interface() when setting format. 2004-06-12 Karsten Wiese Version 0.6.3: Made it thus the following rule is enforced: "All pcm substreams of one usX2Y have to operate at the same rate & format." 2004-04-06 Karsten Wiese Version 0.6.0: Runs on 2.6.5 kernel without any "--with-debug=" things. us224 reported running. 2004-01-14 Karsten Wiese Version 0.5.1: Runs with 2.6.1 kernel. 2003-12-30 Karsten Wiese Version 0.4.1: Fix 24Bit 4Channel capturing for the us428. 2003-11-27 Karsten Wiese, Martin Langer Version 0.4: us122 support. us224 could be tested by uncommenting the sections containing USB_ID_US224 2003-11-03 Karsten Wiese Version 0.3: 24Bit support. "arecord -D hw:1 -c 2 -r 48000 -M -f S24_3LE|aplay -D hw:1 -c 2 -r 48000 -M -f S24_3LE" works. 2003-08-22 Karsten Wiese Version 0.0.8: Removed EZUSB Firmware. First Stage Firmwaredownload is now done by tascam-firmware downloader. See: http://usb-midi-fw.sourceforge.net/tascam-firmware.tar.gz 2003-06-18 Karsten Wiese Version 0.0.5: changed to compile with kernel 2.4.21 and alsa 0.9.4 2002-10-16 Karsten Wiese Version 0.0.4: compiles again with alsa-current. USB_ISO_ASAP not used anymore (most of the time), instead urb->start_frame is calculated here now, some calls inside usb-driver don't need to happen anymore. To get the best out of this: Disable APM-support in the kernel as APM-BIOS calls (once each second) hard disable interrupt for many precious milliseconds. This helped me much on my slowish PII 400 & PIII 500. ACPI yet untested but might cause the same bad behaviour. Use a kernel with lowlatency and preemptiv patches applied. To autoload snd-usb-midi append a line post-install snd-usb-us428 modprobe snd-usb-midi to /etc/modules.conf. known problems: sliders, knobs, lights not yet handled except MASTER Volume slider. "pcm -c 2" doesn't work. "pcm -c 2 -m direct_interleaved" does. KDE3: "Enable full duplex operation" deadlocks. 2002-08-31 Karsten Wiese Version 0.0.3: audio also simplex; simplifying: iso urbs only 1 packet, melted structs. ASYNC_UNLINK not used anymore: no more crashes so far..... for alsa 0.9 rc3. 2002-08-09 Karsten Wiese Version 0.0.2: midi works with snd-usb-midi, audio (only fullduplex now) with i.e. bristol. The firmware has been sniffed from win2k us-428 driver 3.09. * Copyright (c) 2002 - 2004 Karsten Wiese * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/usb.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/rawmidi.h> #include "usx2y.h" #include "usbusx2y.h" #include "usX2Yhwdep.h" MODULE_AUTHOR("Karsten Wiese <annabellesgarden@yahoo.de>"); MODULE_DESCRIPTION("TASCAM "NAME_ALLCAPS" Version 0.8.7.2"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{TASCAM(0x1604), "NAME_ALLCAPS"(0x8001)(0x8005)(0x8007) }}"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-max */ static char* id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* Id for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* Enable this card */ module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for "NAME_ALLCAPS"."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for "NAME_ALLCAPS"."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable "NAME_ALLCAPS"."); static int snd_usX2Y_card_used[SNDRV_CARDS]; static void usX2Y_usb_disconnect(struct usb_device* usb_device, void* ptr); static void snd_usX2Y_card_private_free(struct snd_card *card); /* * pipe 4 is used for switching the lamps, setting samplerate, volumes .... */ static void i_usX2Y_Out04Int(struct urb *urb) { #ifdef CONFIG_SND_DEBUG if (urb->status) { int i; struct usX2Ydev *usX2Y = urb->context; for (i = 0; i < 10 && usX2Y->AS04.urb[i] != urb; i++); snd_printdd("i_usX2Y_Out04Int() urb %i status=%i\n", i, urb->status); } #endif } static void i_usX2Y_In04Int(struct urb *urb) { int err = 0; struct usX2Ydev *usX2Y = urb->context; struct us428ctls_sharedmem *us428ctls = usX2Y->us428ctls_sharedmem; usX2Y->In04IntCalls++; if (urb->status) { snd_printdd("Interrupt Pipe 4 came back with status=%i\n", urb->status); return; } // printk("%i:0x%02X ", 8, (int)((unsigned char*)usX2Y->In04Buf)[8]); Master volume shows 0 here if fader is at max during boot ?!? if (us428ctls) { int diff = -1; if (-2 == us428ctls->CtlSnapShotLast) { diff = 0; memcpy(usX2Y->In04Last, usX2Y->In04Buf, sizeof(usX2Y->In04Last)); us428ctls->CtlSnapShotLast = -1; } else { int i; for (i = 0; i < 21; i++) { if (usX2Y->In04Last[i] != ((char*)usX2Y->In04Buf)[i]) { if (diff < 0) diff = i; usX2Y->In04Last[i] = ((char*)usX2Y->In04Buf)[i]; } } } if (0 <= diff) { int n = us428ctls->CtlSnapShotLast + 1; if (n >= N_us428_ctl_BUFS || n < 0) n = 0; memcpy(us428ctls->CtlSnapShot + n, usX2Y->In04Buf, sizeof(us428ctls->CtlSnapShot[0])); us428ctls->CtlSnapShotDiffersAt[n] = diff; us428ctls->CtlSnapShotLast = n; wake_up(&usX2Y->us428ctls_wait_queue_head); } } if (usX2Y->US04) { if (0 == usX2Y->US04->submitted) do { err = usb_submit_urb(usX2Y->US04->urb[usX2Y->US04->submitted++], GFP_ATOMIC); } while (!err && usX2Y->US04->submitted < usX2Y->US04->len); } else if (us428ctls && us428ctls->p4outLast >= 0 && us428ctls->p4outLast < N_us428_p4out_BUFS) { if (us428ctls->p4outLast != us428ctls->p4outSent) { int j, send = us428ctls->p4outSent + 1; if (send >= N_us428_p4out_BUFS) send = 0; for (j = 0; j < URBS_AsyncSeq && !err; ++j) if (0 == usX2Y->AS04.urb[j]->status) { struct us428_p4out *p4out = us428ctls->p4out + send; // FIXME if more than 1 p4out is new, 1 gets lost. usb_fill_bulk_urb(usX2Y->AS04.urb[j], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), &p4out->val.vol, p4out->type == eLT_Light ? sizeof(struct us428_lights) : 5, i_usX2Y_Out04Int, usX2Y); err = usb_submit_urb(usX2Y->AS04.urb[j], GFP_ATOMIC); us428ctls->p4outSent = send; break; } } } if (err) snd_printk(KERN_ERR "In04Int() usb_submit_urb err=%i\n", err); urb->dev = usX2Y->dev; usb_submit_urb(urb, GFP_ATOMIC); } /* * Prepare some urbs */ int usX2Y_AsyncSeq04_init(struct usX2Ydev *usX2Y) { int err = 0, i; if (NULL == (usX2Y->AS04.buffer = kmalloc(URB_DataLen_AsyncSeq*URBS_AsyncSeq, GFP_KERNEL))) { err = -ENOMEM; } else for (i = 0; i < URBS_AsyncSeq; ++i) { if (NULL == (usX2Y->AS04.urb[i] = usb_alloc_urb(0, GFP_KERNEL))) { err = -ENOMEM; break; } usb_fill_bulk_urb( usX2Y->AS04.urb[i], usX2Y->dev, usb_sndbulkpipe(usX2Y->dev, 0x04), usX2Y->AS04.buffer + URB_DataLen_AsyncSeq*i, 0, i_usX2Y_Out04Int, usX2Y ); } return err; } int usX2Y_In04_init(struct usX2Ydev *usX2Y) { if (! (usX2Y->In04urb = usb_alloc_urb(0, GFP_KERNEL))) return -ENOMEM; if (! (usX2Y->In04Buf = kmalloc(21, GFP_KERNEL))) { usb_free_urb(usX2Y->In04urb); return -ENOMEM; } init_waitqueue_head(&usX2Y->In04WaitQueue); usb_fill_int_urb(usX2Y->In04urb, usX2Y->dev, usb_rcvintpipe(usX2Y->dev, 0x4), usX2Y->In04Buf, 21, i_usX2Y_In04Int, usX2Y, 10); return usb_submit_urb(usX2Y->In04urb, GFP_KERNEL); } static void usX2Y_unlinkSeq(struct snd_usX2Y_AsyncSeq *S) { int i; for (i = 0; i < URBS_AsyncSeq; ++i) { if (S[i].urb) { usb_kill_urb(S->urb[i]); usb_free_urb(S->urb[i]); S->urb[i] = NULL; } } kfree(S->buffer); } static struct usb_device_id snd_usX2Y_usb_id_table[] = { { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US428 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US122 }, { .match_flags = USB_DEVICE_ID_MATCH_DEVICE, .idVendor = 0x1604, .idProduct = USB_ID_US224 }, { /* terminator */ } }; static int usX2Y_create_card(struct usb_device *device, struct snd_card **cardp) { int dev; struct snd_card * card; int err; for (dev = 0; dev < SNDRV_CARDS; ++dev) if (enable[dev] && !snd_usX2Y_card_used[dev]) break; if (dev >= SNDRV_CARDS) return -ENODEV; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct usX2Ydev), &card); if (err < 0) return err; snd_usX2Y_card_used[usX2Y(card)->card_index = dev] = 1; card->private_free = snd_usX2Y_card_private_free; usX2Y(card)->dev = device; init_waitqueue_head(&usX2Y(card)->prepare_wait_queue); mutex_init(&usX2Y(card)->prepare_mutex); INIT_LIST_HEAD(&usX2Y(card)->midi_list); strcpy(card->driver, "USB "NAME_ALLCAPS""); sprintf(card->shortname, "TASCAM "NAME_ALLCAPS""); sprintf(card->longname, "%s (%x:%x if %d at %03d/%03d)", card->shortname, le16_to_cpu(device->descriptor.idVendor), le16_to_cpu(device->descriptor.idProduct), 0,//us428(card)->usbmidi.ifnum, usX2Y(card)->dev->bus->busnum, usX2Y(card)->dev->devnum ); *cardp = card; return 0; } static int usX2Y_usb_probe(struct usb_device *device, struct usb_interface *intf, const struct usb_device_id *device_id, struct snd_card **cardp) { int err; struct snd_card * card; *cardp = NULL; if (le16_to_cpu(device->descriptor.idVendor) != 0x1604 || (le16_to_cpu(device->descriptor.idProduct) != USB_ID_US122 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US224 && le16_to_cpu(device->descriptor.idProduct) != USB_ID_US428)) return -EINVAL; err = usX2Y_create_card(device, &card); if (err < 0) return err; snd_card_set_dev(card, &intf->dev); if ((err = usX2Y_hwdep_new(card, device)) < 0 || (err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } *cardp = card; return 0; } /* * new 2.5 USB kernel API */ static int snd_usX2Y_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct snd_card *card; int err; err = usX2Y_usb_probe(interface_to_usbdev(intf), intf, id, &card); if (err < 0) return err; dev_set_drvdata(&intf->dev, card); return 0; } static void snd_usX2Y_disconnect(struct usb_interface *intf) { usX2Y_usb_disconnect(interface_to_usbdev(intf), usb_get_intfdata(intf)); } MODULE_DEVICE_TABLE(usb, snd_usX2Y_usb_id_table); static struct usb_driver snd_usX2Y_usb_driver = { .name = "snd-usb-usx2y", .probe = snd_usX2Y_probe, .disconnect = snd_usX2Y_disconnect, .id_table = snd_usX2Y_usb_id_table, }; static void snd_usX2Y_card_private_free(struct snd_card *card) { kfree(usX2Y(card)->In04Buf); usb_free_urb(usX2Y(card)->In04urb); if (usX2Y(card)->us428ctls_sharedmem) snd_free_pages(usX2Y(card)->us428ctls_sharedmem, sizeof(*usX2Y(card)->us428ctls_sharedmem)); if (usX2Y(card)->card_index >= 0 && usX2Y(card)->card_index < SNDRV_CARDS) snd_usX2Y_card_used[usX2Y(card)->card_index] = 0; } /* * Frees the device. */ static void usX2Y_usb_disconnect(struct usb_device *device, void* ptr) { if (ptr) { struct snd_card *card = ptr; struct usX2Ydev *usX2Y = usX2Y(card); struct list_head *p; usX2Y->chip_status = USX2Y_STAT_CHIP_HUP; usX2Y_unlinkSeq(&usX2Y->AS04); usb_kill_urb(usX2Y->In04urb); snd_card_disconnect(card); /* release the midi resources */ list_for_each(p, &usX2Y->midi_list) { snd_usbmidi_disconnect(p); } if (usX2Y->us428ctls_sharedmem) wake_up(&usX2Y->us428ctls_wait_queue_head); snd_card_free(card); } } static int __init snd_usX2Y_module_init(void) { return usb_register(&snd_usX2Y_usb_driver); } static void __exit snd_usX2Y_module_exit(void) { usb_deregister(&snd_usX2Y_usb_driver); } module_init(snd_usX2Y_module_init) module_exit(snd_usX2Y_module_exit)
gpl-2.0
iconia-dev/android_kernel_acer_t20-common
sound/synth/emux/emux_seq.c
4639
8931
/* * Midi Sequencer interface routines. * * Copyright (C) 1999 Steve Ratcliffe * Copyright (c) 1999-2000 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emux_voice.h" #include <linux/slab.h> /* Prototypes for static functions */ static void free_port(void *private); static void snd_emux_init_port(struct snd_emux_port *p); static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info); static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info); /* * MIDI emulation operators */ static struct snd_midi_op emux_ops = { snd_emux_note_on, snd_emux_note_off, snd_emux_key_press, snd_emux_terminate_note, snd_emux_control, snd_emux_nrpn, snd_emux_sysex, }; /* * number of MIDI channels */ #define MIDI_CHANNELS 16 /* * type flags for MIDI sequencer port */ #define DEFAULT_MIDI_TYPE (SNDRV_SEQ_PORT_TYPE_MIDI_GENERIC |\ SNDRV_SEQ_PORT_TYPE_MIDI_GM |\ SNDRV_SEQ_PORT_TYPE_MIDI_GS |\ SNDRV_SEQ_PORT_TYPE_MIDI_XG |\ SNDRV_SEQ_PORT_TYPE_HARDWARE |\ SNDRV_SEQ_PORT_TYPE_SYNTHESIZER) /* * Initialise the EMUX Synth by creating a client and registering * a series of ports. * Each of the ports will contain the 16 midi channels. Applications * can connect to these ports to play midi data. */ int snd_emux_init_seq(struct snd_emux *emu, struct snd_card *card, int index) { int i; struct snd_seq_port_callback pinfo; char tmpname[64]; emu->client = snd_seq_create_kernel_client(card, index, "%s WaveTable", emu->name); if (emu->client < 0) { snd_printk(KERN_ERR "can't create client\n"); return -ENODEV; } if (emu->num_ports < 0) { snd_printk(KERN_WARNING "seqports must be greater than zero\n"); emu->num_ports = 1; } else if (emu->num_ports >= SNDRV_EMUX_MAX_PORTS) { snd_printk(KERN_WARNING "too many ports." "limited max. ports %d\n", SNDRV_EMUX_MAX_PORTS); emu->num_ports = SNDRV_EMUX_MAX_PORTS; } memset(&pinfo, 0, sizeof(pinfo)); pinfo.owner = THIS_MODULE; pinfo.use = snd_emux_use; pinfo.unuse = snd_emux_unuse; pinfo.event_input = snd_emux_event_input; for (i = 0; i < emu->num_ports; i++) { struct snd_emux_port *p; sprintf(tmpname, "%s Port %d", emu->name, i); p = snd_emux_create_port(emu, tmpname, MIDI_CHANNELS, 0, &pinfo); if (p == NULL) { snd_printk(KERN_ERR "can't create port\n"); return -ENOMEM; } p->port_mode = SNDRV_EMUX_PORT_MODE_MIDI; snd_emux_init_port(p); emu->ports[i] = p->chset.port; emu->portptrs[i] = p; } return 0; } /* * Detach from the ports that were set up for this synthesizer and * destroy the kernel client. */ void snd_emux_detach_seq(struct snd_emux *emu) { if (emu->voices) snd_emux_terminate_all(emu); mutex_lock(&emu->register_mutex); if (emu->client >= 0) { snd_seq_delete_kernel_client(emu->client); emu->client = -1; } mutex_unlock(&emu->register_mutex); } /* * create a sequencer port and channel_set */ struct snd_emux_port * snd_emux_create_port(struct snd_emux *emu, char *name, int max_channels, int oss_port, struct snd_seq_port_callback *callback) { struct snd_emux_port *p; int i, type, cap; /* Allocate structures for this channel */ if ((p = kzalloc(sizeof(*p), GFP_KERNEL)) == NULL) { snd_printk(KERN_ERR "no memory\n"); return NULL; } p->chset.channels = kcalloc(max_channels, sizeof(struct snd_midi_channel), GFP_KERNEL); if (p->chset.channels == NULL) { snd_printk(KERN_ERR "no memory\n"); kfree(p); return NULL; } for (i = 0; i < max_channels; i++) p->chset.channels[i].number = i; p->chset.private_data = p; p->chset.max_channels = max_channels; p->emu = emu; p->chset.client = emu->client; #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_create_effect(p); #endif callback->private_free = free_port; callback->private_data = p; cap = SNDRV_SEQ_PORT_CAP_WRITE; if (oss_port) { type = SNDRV_SEQ_PORT_TYPE_SPECIFIC; } else { type = DEFAULT_MIDI_TYPE; cap |= SNDRV_SEQ_PORT_CAP_SUBS_WRITE; } p->chset.port = snd_seq_event_port_attach(emu->client, callback, cap, type, max_channels, emu->max_voices, name); return p; } /* * release memory block for port */ static void free_port(void *private_data) { struct snd_emux_port *p; p = private_data; if (p) { #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_delete_effect(p); #endif kfree(p->chset.channels); kfree(p); } } #define DEFAULT_DRUM_FLAGS (1<<9) /* * initialize the port specific parameters */ static void snd_emux_init_port(struct snd_emux_port *p) { p->drum_flags = DEFAULT_DRUM_FLAGS; p->volume_atten = 0; snd_emux_reset_port(p); } /* * reset port */ void snd_emux_reset_port(struct snd_emux_port *port) { int i; /* stop all sounds */ snd_emux_sounds_off_all(port); snd_midi_channel_set_clear(&port->chset); #ifdef SNDRV_EMUX_USE_RAW_EFFECT snd_emux_clear_effect(port); #endif /* set port specific control parameters */ port->ctrls[EMUX_MD_DEF_BANK] = 0; port->ctrls[EMUX_MD_DEF_DRUM] = 0; port->ctrls[EMUX_MD_REALTIME_PAN] = 1; for (i = 0; i < port->chset.max_channels; i++) { struct snd_midi_channel *chan = port->chset.channels + i; chan->drum_channel = ((port->drum_flags >> i) & 1) ? 1 : 0; } } /* * input sequencer event */ int snd_emux_event_input(struct snd_seq_event *ev, int direct, void *private_data, int atomic, int hop) { struct snd_emux_port *port; port = private_data; if (snd_BUG_ON(!port || !ev)) return -EINVAL; snd_midi_process_event(&emux_ops, ev, &port->chset); return 0; } /* * increment usage count */ int snd_emux_inc_count(struct snd_emux *emu) { emu->used++; if (!try_module_get(emu->ops.owner)) goto __error; if (!try_module_get(emu->card->module)) { module_put(emu->ops.owner); __error: emu->used--; return 0; } return 1; } /* * decrease usage count */ void snd_emux_dec_count(struct snd_emux *emu) { module_put(emu->card->module); emu->used--; if (emu->used <= 0) snd_emux_terminate_all(emu); module_put(emu->ops.owner); } /* * Routine that is called upon a first use of a particular port */ static int snd_emux_use(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_init_port(p); snd_emux_inc_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * Routine that is called upon the last unuse() of a particular port. */ static int snd_emux_unuse(void *private_data, struct snd_seq_port_subscribe *info) { struct snd_emux_port *p; struct snd_emux *emu; p = private_data; if (snd_BUG_ON(!p)) return -EINVAL; emu = p->emu; if (snd_BUG_ON(!emu)) return -EINVAL; mutex_lock(&emu->register_mutex); snd_emux_sounds_off_all(p); snd_emux_dec_count(emu); mutex_unlock(&emu->register_mutex); return 0; } /* * attach virtual rawmidi devices */ int snd_emux_init_virmidi(struct snd_emux *emu, struct snd_card *card) { int i; emu->vmidi = NULL; if (emu->midi_ports <= 0) return 0; emu->vmidi = kcalloc(emu->midi_ports, sizeof(struct snd_rawmidi *), GFP_KERNEL); if (emu->vmidi == NULL) return -ENOMEM; for (i = 0; i < emu->midi_ports; i++) { struct snd_rawmidi *rmidi; struct snd_virmidi_dev *rdev; if (snd_virmidi_new(card, emu->midi_devidx + i, &rmidi) < 0) goto __error; rdev = rmidi->private_data; sprintf(rmidi->name, "%s Synth MIDI", emu->name); rdev->seq_mode = SNDRV_VIRMIDI_SEQ_ATTACH; rdev->client = emu->client; rdev->port = emu->ports[i]; if (snd_device_register(card, rmidi) < 0) { snd_device_free(card, rmidi); goto __error; } emu->vmidi[i] = rmidi; /* snd_printk(KERN_DEBUG "virmidi %d ok\n", i); */ } return 0; __error: /* snd_printk(KERN_DEBUG "error init..\n"); */ snd_emux_delete_virmidi(emu); return -ENOMEM; } int snd_emux_delete_virmidi(struct snd_emux *emu) { int i; if (emu->vmidi == NULL) return 0; for (i = 0; i < emu->midi_ports; i++) { if (emu->vmidi[i]) snd_device_free(emu->card, emu->vmidi[i]); } kfree(emu->vmidi); emu->vmidi = NULL; return 0; }
gpl-2.0
sandeshghimire/xlnx-3.17
drivers/scsi/isci/remote_node_context.c
4639
25675
/* * This file is provided under a dual BSD/GPLv2 license. When using or * redistributing this file, you may do so under either license. * * GPL LICENSE SUMMARY * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. * The full GNU General Public License is included in this distribution * in the file called LICENSE.GPL. * * BSD LICENSE * * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in * the documentation and/or other materials provided with the * distribution. * * Neither the name of Intel Corporation nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <scsi/sas_ata.h> #include "host.h" #include "isci.h" #include "remote_device.h" #include "remote_node_context.h" #include "scu_event_codes.h" #include "scu_task_context.h" #undef C #define C(a) (#a) const char *rnc_state_name(enum scis_sds_remote_node_context_states state) { static const char * const strings[] = RNC_STATES; return strings[state]; } #undef C /** * * @sci_rnc: The state of the remote node context object to check. * * This method will return true if the remote node context is in a READY state * otherwise it will return false bool true if the remote node context is in * the ready state. false if the remote node context is not in the ready state. */ bool sci_remote_node_context_is_ready( struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; if (current_state == SCI_RNC_READY) { return true; } return false; } bool sci_remote_node_context_is_suspended(struct sci_remote_node_context *sci_rnc) { u32 current_state = sci_rnc->sm.current_state_id; if (current_state == SCI_RNC_TX_RX_SUSPENDED) return true; return false; } static union scu_remote_node_context *sci_rnc_by_id(struct isci_host *ihost, u16 id) { if (id < ihost->remote_node_entries && ihost->device_table[id]) return &ihost->remote_node_context_table[id]; return NULL; } static void sci_remote_node_context_construct_buffer(struct sci_remote_node_context *sci_rnc) { struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; int rni = sci_rnc->remote_node_index; union scu_remote_node_context *rnc; struct isci_host *ihost; __le64 sas_addr; ihost = idev->owning_port->owning_controller; rnc = sci_rnc_by_id(ihost, rni); memset(rnc, 0, sizeof(union scu_remote_node_context) * sci_remote_device_node_count(idev)); rnc->ssp.remote_node_index = rni; rnc->ssp.remote_node_port_width = idev->device_port_width; rnc->ssp.logical_port_index = idev->owning_port->physical_port_index; /* sas address is __be64, context ram format is __le64 */ sas_addr = cpu_to_le64(SAS_ADDR(dev->sas_addr)); rnc->ssp.remote_sas_address_hi = upper_32_bits(sas_addr); rnc->ssp.remote_sas_address_lo = lower_32_bits(sas_addr); rnc->ssp.nexus_loss_timer_enable = true; rnc->ssp.check_bit = false; rnc->ssp.is_valid = false; rnc->ssp.is_remote_node_context = true; rnc->ssp.function_number = 0; rnc->ssp.arbitration_wait_time = 0; if (dev_is_sata(dev)) { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.stp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.stp_inactivity_timeout; } else { rnc->ssp.connection_occupancy_timeout = ihost->user_parameters.ssp_max_occupancy_timeout; rnc->ssp.connection_inactivity_timeout = ihost->user_parameters.ssp_inactivity_timeout; } rnc->ssp.initial_arbitration_wait_time = 0; /* Open Address Frame Parameters */ rnc->ssp.oaf_connection_rate = idev->connection_rate; rnc->ssp.oaf_features = 0; rnc->ssp.oaf_source_zone_group = 0; rnc->ssp.oaf_more_compatibility_features = 0; } /** * * @sci_rnc: * @callback: * @callback_parameter: * * This method will setup the remote node context object so it will transition * to its ready state. If the remote node context is already setup to * transition to its final state then this function does nothing. none */ static void sci_remote_node_context_setup_to_resume( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter, enum sci_remote_node_context_destination_state dest_param) { if (sci_rnc->destination_state != RNC_DEST_FINAL) { sci_rnc->destination_state = dest_param; if (callback != NULL) { sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; } } } static void sci_remote_node_context_setup_to_destroy( struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback callback, void *callback_parameter) { struct isci_host *ihost = idev_to_ihost(rnc_to_dev(sci_rnc)); sci_rnc->destination_state = RNC_DEST_FINAL; sci_rnc->user_callback = callback; sci_rnc->user_cookie = callback_parameter; wake_up(&ihost->eventq); } /** * * * This method just calls the user callback function and then resets the * callback. */ static void sci_remote_node_context_notify_user( struct sci_remote_node_context *rnc) { if (rnc->user_callback != NULL) { (*rnc->user_callback)(rnc->user_cookie); rnc->user_callback = NULL; rnc->user_cookie = NULL; } } static void sci_remote_node_context_continue_state_transitions(struct sci_remote_node_context *rnc) { switch (rnc->destination_state) { case RNC_DEST_READY: case RNC_DEST_SUSPENDED_RESUME: rnc->destination_state = RNC_DEST_READY; /* Fall through... */ case RNC_DEST_FINAL: sci_remote_node_context_resume(rnc, rnc->user_callback, rnc->user_cookie); break; default: rnc->destination_state = RNC_DEST_UNSPECIFIED; break; } } static void sci_remote_node_context_validate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct domain_device *dev = idev->domain_dev; struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = true; if (dev_is_sata(dev) && dev->parent) { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_96); } else { sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_32); if (!dev->parent) sci_port_setup_transports(idev->owning_port, sci_rnc->remote_node_index); } } static void sci_remote_node_context_invalidate_context_buffer(struct sci_remote_node_context *sci_rnc) { union scu_remote_node_context *rnc_buffer; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); struct isci_host *ihost = idev->owning_port->owning_controller; rnc_buffer = sci_rnc_by_id(ihost, sci_rnc->remote_node_index); rnc_buffer->ssp.is_valid = false; sci_remote_device_post_request(rnc_to_dev(sci_rnc), SCU_CONTEXT_COMMAND_POST_RNC_INVALIDATE); } static void sci_remote_node_context_initial_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); struct isci_host *ihost = idev->owning_port->owning_controller; /* Check to see if we have gotten back to the initial state because * someone requested to destroy the remote node context object. */ if (sm->previous_state_id == SCI_RNC_INVALIDATING) { rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_remote_node_context_notify_user(rnc); smp_wmb(); wake_up(&ihost->eventq); } } static void sci_remote_node_context_posting_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *sci_rnc = container_of(sm, typeof(*sci_rnc), sm); sci_remote_node_context_validate_context_buffer(sci_rnc); } static void sci_remote_node_context_invalidating_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); /* Terminate all outstanding requests. */ sci_remote_device_terminate_requests(rnc_to_dev(rnc)); sci_remote_node_context_invalidate_context_buffer(rnc); } static void sci_remote_node_context_resuming_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev; struct domain_device *dev; idev = rnc_to_dev(rnc); dev = idev->domain_dev; /* * For direct attached SATA devices we need to clear the TLCR * NCQ to TCi tag mapping on the phy and in cases where we * resume because of a target reset we also need to update * the STPTLDARNI register with the RNi of the device */ if (dev_is_sata(dev) && !dev->parent) sci_port_setup_transports(idev->owning_port, rnc->remote_node_index); sci_remote_device_post_request(idev, SCU_CONTEXT_COMMAND_POST_RNC_RESUME); } static void sci_remote_node_context_ready_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); enum sci_remote_node_context_destination_state dest_select; int tell_user = 1; dest_select = rnc->destination_state; rnc->destination_state = RNC_DEST_UNSPECIFIED; if ((dest_select == RNC_DEST_SUSPENDED) || (dest_select == RNC_DEST_SUSPENDED_RESUME)) { sci_remote_node_context_suspend( rnc, rnc->suspend_reason, SCI_SOFTWARE_SUSPEND_EXPECTED_EVENT); if (dest_select == RNC_DEST_SUSPENDED_RESUME) tell_user = 0; /* Wait until ready again. */ } if (tell_user) sci_remote_node_context_notify_user(rnc); } static void sci_remote_node_context_tx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); sci_remote_node_context_continue_state_transitions(rnc); } static void sci_remote_node_context_tx_rx_suspended_state_enter(struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); struct isci_host *ihost = idev->owning_port->owning_controller; u32 new_count = rnc->suspend_count + 1; if (new_count == 0) rnc->suspend_count = 1; else rnc->suspend_count = new_count; smp_wmb(); /* Terminate outstanding requests pending abort. */ sci_remote_device_abort_requests_pending_abort(idev); wake_up(&ihost->eventq); sci_remote_node_context_continue_state_transitions(rnc); } static void sci_remote_node_context_await_suspend_state_exit( struct sci_base_state_machine *sm) { struct sci_remote_node_context *rnc = container_of(sm, typeof(*rnc), sm); struct isci_remote_device *idev = rnc_to_dev(rnc); if (dev_is_sata(idev->domain_dev)) isci_dev_set_hang_detection_timeout(idev, 0); } static const struct sci_base_state sci_remote_node_context_state_table[] = { [SCI_RNC_INITIAL] = { .enter_state = sci_remote_node_context_initial_state_enter, }, [SCI_RNC_POSTING] = { .enter_state = sci_remote_node_context_posting_state_enter, }, [SCI_RNC_INVALIDATING] = { .enter_state = sci_remote_node_context_invalidating_state_enter, }, [SCI_RNC_RESUMING] = { .enter_state = sci_remote_node_context_resuming_state_enter, }, [SCI_RNC_READY] = { .enter_state = sci_remote_node_context_ready_state_enter, }, [SCI_RNC_TX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_suspended_state_enter, }, [SCI_RNC_TX_RX_SUSPENDED] = { .enter_state = sci_remote_node_context_tx_rx_suspended_state_enter, }, [SCI_RNC_AWAIT_SUSPENSION] = { .exit_state = sci_remote_node_context_await_suspend_state_exit, }, }; void sci_remote_node_context_construct(struct sci_remote_node_context *rnc, u16 remote_node_index) { memset(rnc, 0, sizeof(struct sci_remote_node_context)); rnc->remote_node_index = remote_node_index; rnc->destination_state = RNC_DEST_UNSPECIFIED; sci_init_sm(&rnc->sm, sci_remote_node_context_state_table, SCI_RNC_INITIAL); } enum sci_status sci_remote_node_context_event_handler(struct sci_remote_node_context *sci_rnc, u32 event_code) { enum scis_sds_remote_node_context_states state; u32 next_state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_POSTING: switch (scu_get_event_code(event_code)) { case SCU_EVENT_POST_RNC_COMPLETE: sci_change_state(&sci_rnc->sm, SCI_RNC_READY); break; default: goto out; } break; case SCI_RNC_INVALIDATING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RNC_INVALIDATE_COMPLETE) { if (sci_rnc->destination_state == RNC_DEST_FINAL) next_state = SCI_RNC_INITIAL; else next_state = SCI_RNC_POSTING; sci_change_state(&sci_rnc->sm, next_state); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being invalidated anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being " "invalidated.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_RESUMING: if (scu_get_event_code(event_code) == SCU_EVENT_POST_RCN_RELEASE) { sci_change_state(&sci_rnc->sm, SCI_RNC_READY); } else { switch (scu_get_event_type(event_code)) { case SCU_EVENT_TYPE_RNC_SUSPEND_TX: case SCU_EVENT_TYPE_RNC_SUSPEND_TX_RX: /* We really dont care if the hardware is going to suspend * the device since it's being resumed anyway */ dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: SCIC Remote Node Context 0x%p was " "suspeneded by hardware while being resumed.\n", __func__, sci_rnc); break; default: goto out; } } break; case SCI_RNC_READY: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_SUSPENDED); sci_rnc->suspend_type = scu_get_event_type(event_code); break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: sci_change_state(&sci_rnc->sm, SCI_RNC_TX_RX_SUSPENDED); sci_rnc->suspend_type = scu_get_event_type(event_code); break; default: goto out; } break; case SCI_RNC_AWAIT_SUSPENSION: switch (scu_get_event_type(event_code)) { case SCU_EVENT_TL_RNC_SUSPEND_TX: next_state = SCI_RNC_TX_SUSPENDED; break; case SCU_EVENT_TL_RNC_SUSPEND_TX_RX: next_state = SCI_RNC_TX_RX_SUSPENDED; break; default: goto out; } if (sci_rnc->suspend_type == scu_get_event_type(event_code)) sci_change_state(&sci_rnc->sm, next_state); break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state: %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } return SCI_SUCCESS; out: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: code: %#x state: %s\n", __func__, event_code, rnc_state_name(state)); return SCI_FAILURE; } enum sci_status sci_remote_node_context_destruct(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); return SCI_SUCCESS; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_destroy(sci_rnc, cb_fn, cb_p); return SCI_SUCCESS; case SCI_RNC_INITIAL: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state: %s\n", __func__, rnc_state_name(state)); /* We have decided that the destruct request on the remote node context * can not fail since it is either in the initial/destroyed state or is * can be destroyed. */ return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_suspend( struct sci_remote_node_context *sci_rnc, enum sci_remote_node_suspension_reasons suspend_reason, u32 suspend_type) { enum scis_sds_remote_node_context_states state = sci_rnc->sm.current_state_id; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); enum sci_status status = SCI_FAILURE_INVALID_STATE; enum sci_remote_node_context_destination_state dest_param = RNC_DEST_UNSPECIFIED; dev_dbg(scirdev_to_dev(idev), "%s: current state %s, current suspend_type %x dest state %d," " arg suspend_reason %d, arg suspend_type %x", __func__, rnc_state_name(state), sci_rnc->suspend_type, sci_rnc->destination_state, suspend_reason, suspend_type); /* Disable automatic state continuations if explicitly suspending. */ if ((suspend_reason == SCI_HW_SUSPEND) || (sci_rnc->destination_state == RNC_DEST_FINAL)) dest_param = sci_rnc->destination_state; switch (state) { case SCI_RNC_READY: break; case SCI_RNC_INVALIDATING: if (sci_rnc->destination_state == RNC_DEST_FINAL) { dev_warn(scirdev_to_dev(idev), "%s: already destroying %p\n", __func__, sci_rnc); return SCI_FAILURE_INVALID_STATE; } /* Fall through and handle like SCI_RNC_POSTING */ case SCI_RNC_RESUMING: /* Fall through and handle like SCI_RNC_POSTING */ case SCI_RNC_POSTING: /* Set the destination state to AWAIT - this signals the * entry into the SCI_RNC_READY state that a suspension * needs to be done immediately. */ if (sci_rnc->destination_state != RNC_DEST_FINAL) sci_rnc->destination_state = RNC_DEST_SUSPENDED; sci_rnc->suspend_type = suspend_type; sci_rnc->suspend_reason = suspend_reason; return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX) status = SCI_SUCCESS; break; case SCI_RNC_TX_RX_SUSPENDED: if (suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) status = SCI_SUCCESS; break; case SCI_RNC_AWAIT_SUSPENSION: if ((sci_rnc->suspend_type == SCU_EVENT_TL_RNC_SUSPEND_TX_RX) || (suspend_type == sci_rnc->suspend_type)) return SCI_SUCCESS; break; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } sci_rnc->destination_state = dest_param; sci_rnc->suspend_type = suspend_type; sci_rnc->suspend_reason = suspend_reason; if (status == SCI_SUCCESS) { /* Already in the destination state? */ struct isci_host *ihost = idev->owning_port->owning_controller; wake_up_all(&ihost->eventq); /* Let observers look. */ return SCI_SUCCESS; } if ((suspend_reason == SCI_SW_SUSPEND_NORMAL) || (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT)) { if (suspend_reason == SCI_SW_SUSPEND_LINKHANG_DETECT) isci_dev_set_hang_detection_timeout(idev, 0x00000001); sci_remote_device_post_request( idev, SCI_SOFTWARE_SUSPEND_CMD); } if (state != SCI_RNC_AWAIT_SUSPENSION) sci_change_state(&sci_rnc->sm, SCI_RNC_AWAIT_SUSPENSION); return SCI_SUCCESS; } enum sci_status sci_remote_node_context_resume(struct sci_remote_node_context *sci_rnc, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum scis_sds_remote_node_context_states state; struct isci_remote_device *idev = rnc_to_dev(sci_rnc); state = sci_rnc->sm.current_state_id; dev_dbg(scirdev_to_dev(idev), "%s: state %s, cb_fn = %p, cb_p = %p; dest_state = %d; " "dev resume path %s\n", __func__, rnc_state_name(state), cb_fn, cb_p, sci_rnc->destination_state, test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags) ? "<abort active>" : "<normal>"); switch (state) { case SCI_RNC_INITIAL: if (sci_rnc->remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX) return SCI_FAILURE_INVALID_STATE; sci_remote_node_context_setup_to_resume(sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { sci_remote_node_context_construct_buffer(sci_rnc); sci_change_state(&sci_rnc->sm, SCI_RNC_POSTING); } return SCI_SUCCESS; case SCI_RNC_POSTING: case SCI_RNC_INVALIDATING: case SCI_RNC_RESUMING: /* We are still waiting to post when a resume was * requested. */ switch (sci_rnc->destination_state) { case RNC_DEST_SUSPENDED: case RNC_DEST_SUSPENDED_RESUME: /* Previously waiting to suspend after posting. * Now continue onto resumption. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); break; default: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); break; } return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: { struct domain_device *dev = idev->domain_dev; /* If this is an expander attached SATA device we must * invalidate and repost the RNC since this is the only * way to clear the TCi to NCQ tag mapping table for * the RNi. All other device types we can just resume. */ sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_READY); if (!test_bit(IDEV_ABORT_PATH_ACTIVE, &idev->flags)) { if ((dev_is_sata(dev) && dev->parent) || (sci_rnc->destination_state == RNC_DEST_FINAL)) sci_change_state(&sci_rnc->sm, SCI_RNC_INVALIDATING); else sci_change_state(&sci_rnc->sm, SCI_RNC_RESUMING); } } return SCI_SUCCESS; case SCI_RNC_AWAIT_SUSPENSION: sci_remote_node_context_setup_to_resume( sci_rnc, cb_fn, cb_p, RNC_DEST_SUSPENDED_RESUME); return SCI_SUCCESS; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_start_io(struct sci_remote_node_context *sci_rnc, struct isci_request *ireq) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_READY: return SCI_SUCCESS; case SCI_RNC_TX_SUSPENDED: case SCI_RNC_TX_RX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED; default: dev_dbg(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %s\n", __func__, rnc_state_name(state)); return SCI_FAILURE_INVALID_STATE; } } enum sci_status sci_remote_node_context_start_task( struct sci_remote_node_context *sci_rnc, struct isci_request *ireq, scics_sds_remote_node_context_callback cb_fn, void *cb_p) { enum sci_status status = sci_remote_node_context_resume(sci_rnc, cb_fn, cb_p); if (status != SCI_SUCCESS) dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: resume failed: %d\n", __func__, status); return status; } int sci_remote_node_context_is_safe_to_abort( struct sci_remote_node_context *sci_rnc) { enum scis_sds_remote_node_context_states state; state = sci_rnc->sm.current_state_id; switch (state) { case SCI_RNC_INVALIDATING: case SCI_RNC_TX_RX_SUSPENDED: return 1; case SCI_RNC_POSTING: case SCI_RNC_RESUMING: case SCI_RNC_READY: case SCI_RNC_TX_SUSPENDED: case SCI_RNC_AWAIT_SUSPENSION: case SCI_RNC_INITIAL: return 0; default: dev_warn(scirdev_to_dev(rnc_to_dev(sci_rnc)), "%s: invalid state %d\n", __func__, state); return 0; } }
gpl-2.0
PatrikKT/android_kernel_huawei_y536a1
drivers/staging/comedi/drivers/ke_counter.c
4895
8308
/* comedi/drivers/ke_counter.c Comedi driver for Kolter-Electronic PCI Counter 1 Card COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: ke_counter Description: Driver for Kolter Electronic Counter Card Devices: [Kolter Electronic] PCI Counter Card (ke_counter) Author: Michael Hillmann Updated: Mon, 14 Apr 2008 15:42:42 +0100 Status: tested Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. This driver is a simple driver to read the counter values from Kolter Electronic PCI Counter Card. */ #include "../comedidev.h" #include "comedi_pci.h" #define CNT_DRIVER_NAME "ke_counter" #define PCI_VENDOR_ID_KOLTER 0x1001 #define CNT_CARD_DEVICE_ID 0x0014 /*-- function prototypes ----------------------------------------------------*/ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int cnt_detach(struct comedi_device *dev); static DEFINE_PCI_DEVICE_TABLE(cnt_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_KOLTER, CNT_CARD_DEVICE_ID) }, {0} }; MODULE_DEVICE_TABLE(pci, cnt_pci_table); /*-- board specification structure ------------------------------------------*/ struct cnt_board_struct { const char *name; int device_id; int cnt_channel_nbr; int cnt_bits; }; static const struct cnt_board_struct cnt_boards[] = { { .name = CNT_DRIVER_NAME, .device_id = CNT_CARD_DEVICE_ID, .cnt_channel_nbr = 3, .cnt_bits = 24} }; #define cnt_board_nbr (sizeof(cnt_boards)/sizeof(struct cnt_board_struct)) /*-- device private structure -----------------------------------------------*/ struct cnt_device_private { struct pci_dev *pcidev; }; #define devpriv ((struct cnt_device_private *)dev->private) static struct comedi_driver cnt_driver = { .driver_name = CNT_DRIVER_NAME, .module = THIS_MODULE, .attach = cnt_attach, .detach = cnt_detach, }; static int __devinit cnt_driver_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, cnt_driver.driver_name); } static void __devexit cnt_driver_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver cnt_driver_pci_driver = { .id_table = cnt_pci_table, .probe = &cnt_driver_pci_probe, .remove = __devexit_p(&cnt_driver_pci_remove) }; static int __init cnt_driver_init_module(void) { int retval; retval = comedi_driver_register(&cnt_driver); if (retval < 0) return retval; cnt_driver_pci_driver.name = (char *)cnt_driver.driver_name; return pci_register_driver(&cnt_driver_pci_driver); } static void __exit cnt_driver_cleanup_module(void) { pci_unregister_driver(&cnt_driver_pci_driver); comedi_driver_unregister(&cnt_driver); } module_init(cnt_driver_init_module); module_exit(cnt_driver_cleanup_module); /*-- counter write ----------------------------------------------------------*/ /* This should be used only for resetting the counters; maybe it is better to make a special command 'reset'. */ static int cnt_winsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int chan = CR_CHAN(insn->chanspec); outb((unsigned char)((data[0] >> 24) & 0xff), dev->iobase + chan * 0x20 + 0x10); outb((unsigned char)((data[0] >> 16) & 0xff), dev->iobase + chan * 0x20 + 0x0c); outb((unsigned char)((data[0] >> 8) & 0xff), dev->iobase + chan * 0x20 + 0x08); outb((unsigned char)((data[0] >> 0) & 0xff), dev->iobase + chan * 0x20 + 0x04); /* return the number of samples written */ return 1; } /*-- counter read -----------------------------------------------------------*/ static int cnt_rinsn(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned char a0, a1, a2, a3, a4; int chan = CR_CHAN(insn->chanspec); int result; a0 = inb(dev->iobase + chan * 0x20); a1 = inb(dev->iobase + chan * 0x20 + 0x04); a2 = inb(dev->iobase + chan * 0x20 + 0x08); a3 = inb(dev->iobase + chan * 0x20 + 0x0c); a4 = inb(dev->iobase + chan * 0x20 + 0x10); result = (a1 + (a2 * 256) + (a3 * 65536)); if (a4 > 0) result = result - s->maxdata; *data = (unsigned int)result; /* return the number of samples read */ return 1; } /*-- attach -----------------------------------------------------------------*/ static int cnt_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *subdevice; struct pci_dev *pci_device = NULL; struct cnt_board_struct *board; unsigned long io_base; int error, i; /* allocate device private structure */ error = alloc_private(dev, sizeof(struct cnt_device_private)); if (error < 0) return error; /* Probe the device to determine what device in the series it is. */ for_each_pci_dev(pci_device) { if (pci_device->vendor == PCI_VENDOR_ID_KOLTER) { for (i = 0; i < cnt_board_nbr; i++) { if (cnt_boards[i].device_id == pci_device->device) { /* was a particular bus/slot requested? */ if ((it->options[0] != 0) || (it->options[1] != 0)) { /* are we on the wrong bus/slot? */ if (pci_device->bus->number != it->options[0] || PCI_SLOT(pci_device->devfn) != it->options[1]) { continue; } } dev->board_ptr = cnt_boards + i; board = (struct cnt_board_struct *) dev->board_ptr; goto found; } } } } printk(KERN_WARNING "comedi%d: no supported board found! (req. bus/slot: %d/%d)\n", dev->minor, it->options[0], it->options[1]); return -EIO; found: printk(KERN_INFO "comedi%d: found %s at PCI bus %d, slot %d\n", dev->minor, board->name, pci_device->bus->number, PCI_SLOT(pci_device->devfn)); devpriv->pcidev = pci_device; dev->board_name = board->name; /* enable PCI device and request regions */ error = comedi_pci_enable(pci_device, CNT_DRIVER_NAME); if (error < 0) { printk(KERN_WARNING "comedi%d: " "failed to enable PCI device and request regions!\n", dev->minor); return error; } /* read register base address [PCI_BASE_ADDRESS #0] */ io_base = pci_resource_start(pci_device, 0); dev->iobase = io_base; /* allocate the subdevice structures */ error = alloc_subdevices(dev, 1); if (error < 0) return error; subdevice = dev->subdevices + 0; dev->read_subdev = subdevice; subdevice->type = COMEDI_SUBD_COUNTER; subdevice->subdev_flags = SDF_READABLE /* | SDF_COMMON */ ; subdevice->n_chan = board->cnt_channel_nbr; subdevice->maxdata = (1 << board->cnt_bits) - 1; subdevice->insn_read = cnt_rinsn; subdevice->insn_write = cnt_winsn; /* select 20MHz clock */ outb(3, dev->iobase + 248); /* reset all counters */ outb(0, dev->iobase); outb(0, dev->iobase + 0x20); outb(0, dev->iobase + 0x40); printk(KERN_INFO "comedi%d: " CNT_DRIVER_NAME " attached.\n", dev->minor); return 0; } /*-- detach -----------------------------------------------------------------*/ static int cnt_detach(struct comedi_device *dev) { if (devpriv && devpriv->pcidev) { if (dev->iobase) comedi_pci_disable(devpriv->pcidev); pci_dev_put(devpriv->pcidev); } printk(KERN_INFO "comedi%d: " CNT_DRIVER_NAME " remove\n", dev->minor); return 0; } MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
GearCM/android_kernel_samsung_exynos5410
drivers/staging/comedi/drivers/contec_pci_dio.c
4895
6847
/* comedi/drivers/contec_pci_dio.c COMEDI - Linux Control and Measurement Device Interface Copyright (C) 2000 David A. Schleef <ds@schleef.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: contec_pci_dio Description: Contec PIO1616L digital I/O board Devices: [Contec] PIO1616L (contec_pci_dio) Author: Stefano Rivoir <s.rivoir@gts.it> Updated: Wed, 27 Jun 2007 13:00:06 +0100 Status: works Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ #include "../comedidev.h" #include "comedi_pci.h" enum contec_model { PIO1616L = 0, }; struct contec_board { const char *name; int model; int in_ports; int out_ports; int in_offs; int out_offs; int out_boffs; }; static const struct contec_board contec_boards[] = { {"PIO1616L", PIO1616L, 16, 16, 0, 2, 10}, }; #define PCI_DEVICE_ID_PIO1616L 0x8172 static DEFINE_PCI_DEVICE_TABLE(contec_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_CONTEC, PCI_DEVICE_ID_PIO1616L), .driver_data = PIO1616L }, {0} }; MODULE_DEVICE_TABLE(pci, contec_pci_table); #define thisboard ((const struct contec_board *)dev->board_ptr) struct contec_private { int data; struct pci_dev *pci_dev; }; #define devpriv ((struct contec_private *)dev->private) static int contec_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int contec_detach(struct comedi_device *dev); static struct comedi_driver driver_contec = { .driver_name = "contec_pci_dio", .module = THIS_MODULE, .attach = contec_attach, .detach = contec_detach, }; /* Classic digital IO */ static int contec_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int contec_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); #if 0 static int contec_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd); static int contec_ns_to_timer(unsigned int *ns, int round); #endif static int contec_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; struct comedi_subdevice *s; printk("comedi%d: contec: ", dev->minor); dev->board_name = thisboard->name; if (alloc_private(dev, sizeof(struct contec_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 2) < 0) return -ENOMEM; for_each_pci_dev(pcidev) { if (pcidev->vendor == PCI_VENDOR_ID_CONTEC && pcidev->device == PCI_DEVICE_ID_PIO1616L) { if (it->options[0] || it->options[1]) { /* Check bus and slot. */ if (it->options[0] != pcidev->bus->number || it->options[1] != PCI_SLOT(pcidev->devfn)) { continue; } } devpriv->pci_dev = pcidev; if (comedi_pci_enable(pcidev, "contec_pci_dio")) { printk ("error enabling PCI device and request regions!\n"); return -EIO; } dev->iobase = pci_resource_start(pcidev, 0); printk(" base addr %lx ", dev->iobase); dev->board_ptr = contec_boards + 0; s = dev->subdevices + 0; s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = contec_di_insn_bits; s = dev->subdevices + 1; s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 16; s->maxdata = 1; s->range_table = &range_digital; s->insn_bits = contec_do_insn_bits; printk("attached\n"); return 1; } } printk("card not present!\n"); return -EIO; } static int contec_detach(struct comedi_device *dev) { printk("comedi%d: contec: remove\n", dev->minor); if (devpriv && devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } #if 0 static int contec_cmdtest(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_cmd *cmd) { printk("contec_cmdtest called\n"); return 0; } static int contec_ns_to_timer(unsigned int *ns, int round) { return *ns; } #endif static int contec_do_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { dev_dbg(dev->hw_dev, "contec_do_insn_bits called\n"); dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]); if (insn->n != 2) return -EINVAL; if (data[0]) { s->state &= ~data[0]; s->state |= data[0] & data[1]; dev_dbg(dev->hw_dev, "out: %d on %lx\n", s->state, dev->iobase + thisboard->out_offs); outw(s->state, dev->iobase + thisboard->out_offs); } return 2; } static int contec_di_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { dev_dbg(dev->hw_dev, "contec_di_insn_bits called\n"); dev_dbg(dev->hw_dev, "data: %d %d\n", data[0], data[1]); if (insn->n != 2) return -EINVAL; data[1] = inw(dev->iobase + thisboard->in_offs); return 2; } static int __devinit driver_contec_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_contec.driver_name); } static void __devexit driver_contec_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_contec_pci_driver = { .id_table = contec_pci_table, .probe = &driver_contec_pci_probe, .remove = __devexit_p(&driver_contec_pci_remove) }; static int __init driver_contec_init_module(void) { int retval; retval = comedi_driver_register(&driver_contec); if (retval < 0) return retval; driver_contec_pci_driver.name = (char *)driver_contec.driver_name; return pci_register_driver(&driver_contec_pci_driver); } static void __exit driver_contec_cleanup_module(void) { pci_unregister_driver(&driver_contec_pci_driver); comedi_driver_unregister(&driver_contec); } module_init(driver_contec_init_module); module_exit(driver_contec_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
bbedward/ZenKernel_Shamu
drivers/macintosh/mac_hid.c
10527
6194
/* * drivers/macintosh/mac_hid.c * * HID support stuff for Macintosh computers. * * Copyright (C) 2000 Franz Sirl. * * This file will soon be removed in favor of an uinput userspace tool. */ #include <linux/init.h> #include <linux/proc_fs.h> #include <linux/sysctl.h> #include <linux/input.h> #include <linux/module.h> #include <linux/slab.h> MODULE_LICENSE("GPL"); static int mouse_emulate_buttons; static int mouse_button2_keycode = KEY_RIGHTCTRL; /* right control key */ static int mouse_button3_keycode = KEY_RIGHTALT; /* right option key */ static struct input_dev *mac_hid_emumouse_dev; static DEFINE_MUTEX(mac_hid_emumouse_mutex); static int mac_hid_create_emumouse(void) { static struct lock_class_key mac_hid_emumouse_dev_event_class; static struct lock_class_key mac_hid_emumouse_dev_mutex_class; int err; mac_hid_emumouse_dev = input_allocate_device(); if (!mac_hid_emumouse_dev) return -ENOMEM; lockdep_set_class(&mac_hid_emumouse_dev->event_lock, &mac_hid_emumouse_dev_event_class); lockdep_set_class(&mac_hid_emumouse_dev->mutex, &mac_hid_emumouse_dev_mutex_class); mac_hid_emumouse_dev->name = "Macintosh mouse button emulation"; mac_hid_emumouse_dev->id.bustype = BUS_ADB; mac_hid_emumouse_dev->id.vendor = 0x0001; mac_hid_emumouse_dev->id.product = 0x0001; mac_hid_emumouse_dev->id.version = 0x0100; mac_hid_emumouse_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_REL); mac_hid_emumouse_dev->keybit[BIT_WORD(BTN_MOUSE)] = BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_MIDDLE) | BIT_MASK(BTN_RIGHT); mac_hid_emumouse_dev->relbit[0] = BIT_MASK(REL_X) | BIT_MASK(REL_Y); err = input_register_device(mac_hid_emumouse_dev); if (err) { input_free_device(mac_hid_emumouse_dev); mac_hid_emumouse_dev = NULL; return err; } return 0; } static void mac_hid_destroy_emumouse(void) { input_unregister_device(mac_hid_emumouse_dev); mac_hid_emumouse_dev = NULL; } static bool mac_hid_emumouse_filter(struct input_handle *handle, unsigned int type, unsigned int code, int value) { unsigned int btn; if (type != EV_KEY) return false; if (code == mouse_button2_keycode) btn = BTN_MIDDLE; else if (code == mouse_button3_keycode) btn = BTN_RIGHT; else return false; input_report_key(mac_hid_emumouse_dev, btn, value); input_sync(mac_hid_emumouse_dev); return true; } static int mac_hid_emumouse_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { struct input_handle *handle; int error; /* Don't bind to ourselves */ if (dev == mac_hid_emumouse_dev) return -ENODEV; handle = kzalloc(sizeof(struct input_handle), GFP_KERNEL); if (!handle) return -ENOMEM; handle->dev = dev; handle->handler = handler; handle->name = "mac-button-emul"; error = input_register_handle(handle); if (error) { printk(KERN_ERR "mac_hid: Failed to register button emulation handle, " "error %d\n", error); goto err_free; } error = input_open_device(handle); if (error) { printk(KERN_ERR "mac_hid: Failed to open input device, error %d\n", error); goto err_unregister; } return 0; err_unregister: input_unregister_handle(handle); err_free: kfree(handle); return error; } static void mac_hid_emumouse_disconnect(struct input_handle *handle) { input_close_device(handle); input_unregister_handle(handle); kfree(handle); } static const struct input_device_id mac_hid_emumouse_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT, .evbit = { BIT_MASK(EV_KEY) }, }, { }, }; MODULE_DEVICE_TABLE(input, mac_hid_emumouse_ids); static struct input_handler mac_hid_emumouse_handler = { .filter = mac_hid_emumouse_filter, .connect = mac_hid_emumouse_connect, .disconnect = mac_hid_emumouse_disconnect, .name = "mac-button-emul", .id_table = mac_hid_emumouse_ids, }; static int mac_hid_start_emulation(void) { int err; err = mac_hid_create_emumouse(); if (err) return err; err = input_register_handler(&mac_hid_emumouse_handler); if (err) { mac_hid_destroy_emumouse(); return err; } return 0; } static void mac_hid_stop_emulation(void) { input_unregister_handler(&mac_hid_emumouse_handler); mac_hid_destroy_emumouse(); } static int mac_hid_toggle_emumouse(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int *valp = table->data; int old_val = *valp; int rc; rc = mutex_lock_killable(&mac_hid_emumouse_mutex); if (rc) return rc; rc = proc_dointvec(table, write, buffer, lenp, ppos); if (rc == 0 && write && *valp != old_val) { if (*valp == 1) rc = mac_hid_start_emulation(); else if (*valp == 0) mac_hid_stop_emulation(); else rc = -EINVAL; } /* Restore the old value in case of error */ if (rc) *valp = old_val; mutex_unlock(&mac_hid_emumouse_mutex); return rc; } /* file(s) in /proc/sys/dev/mac_hid */ static ctl_table mac_hid_files[] = { { .procname = "mouse_button_emulation", .data = &mouse_emulate_buttons, .maxlen = sizeof(int), .mode = 0644, .proc_handler = mac_hid_toggle_emumouse, }, { .procname = "mouse_button2_keycode", .data = &mouse_button2_keycode, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "mouse_button3_keycode", .data = &mouse_button3_keycode, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; /* dir in /proc/sys/dev */ static ctl_table mac_hid_dir[] = { { .procname = "mac_hid", .maxlen = 0, .mode = 0555, .child = mac_hid_files, }, { } }; /* /proc/sys/dev itself, in case that is not there yet */ static ctl_table mac_hid_root_dir[] = { { .procname = "dev", .maxlen = 0, .mode = 0555, .child = mac_hid_dir, }, { } }; static struct ctl_table_header *mac_hid_sysctl_header; static int __init mac_hid_init(void) { mac_hid_sysctl_header = register_sysctl_table(mac_hid_root_dir); if (!mac_hid_sysctl_header) return -ENOMEM; return 0; } module_init(mac_hid_init); static void __exit mac_hid_exit(void) { unregister_sysctl_table(mac_hid_sysctl_header); if (mouse_emulate_buttons) mac_hid_stop_emulation(); } module_exit(mac_hid_exit);
gpl-2.0
TheTypoMaster/kernel_condor
drivers/staging/rtl8712/rtl8712_io.c
11295
4928
/****************************************************************************** * rtl8712_io.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com>. * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL8712_IO_C_ #include "osdep_service.h" #include "drv_types.h" #include "rtl871x_io.h" #include "osdep_intf.h" #include "usb_ops.h" u8 r8712_read8(struct _adapter *adapter, u32 addr) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); u8 (*_read8)(struct intf_hdl *pintfhdl, u32 addr); u8 r_val; _read8 = pintfhdl->io_ops._read8; r_val = _read8(pintfhdl, addr); return r_val; } u16 r8712_read16(struct _adapter *adapter, u32 addr) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); u16 (*_read16)(struct intf_hdl *pintfhdl, u32 addr); u16 r_val; _read16 = pintfhdl->io_ops._read16; r_val = _read16(pintfhdl, addr); return r_val; } u32 r8712_read32(struct _adapter *adapter, u32 addr) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); u32 (*_read32)(struct intf_hdl *pintfhdl, u32 addr); u32 r_val; _read32 = pintfhdl->io_ops._read32; r_val = _read32(pintfhdl, addr); return r_val; } void r8712_write8(struct _adapter *adapter, u32 addr, u8 val) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); void (*_write8)(struct intf_hdl *pintfhdl, u32 addr, u8 val); _write8 = pintfhdl->io_ops._write8; _write8(pintfhdl, addr, val); } void r8712_write16(struct _adapter *adapter, u32 addr, u16 val) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); void (*_write16)(struct intf_hdl *pintfhdl, u32 addr, u16 val); _write16 = pintfhdl->io_ops._write16; _write16(pintfhdl, addr, val); } void r8712_write32(struct _adapter *adapter, u32 addr, u32 val) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = (struct intf_hdl *)(&(pio_queue->intf)); void (*_write32)(struct intf_hdl *pintfhdl, u32 addr, u32 val); _write32 = pintfhdl->io_ops._write32; _write32(pintfhdl, addr, val); } void r8712_read_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); void (*_read_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true)) return; _read_mem = pintfhdl->io_ops._read_mem; _read_mem(pintfhdl, addr, cnt, pmem); } void r8712_write_mem(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); void (*_write_mem)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); _write_mem = pintfhdl->io_ops._write_mem; _write_mem(pintfhdl, addr, cnt, pmem); } void r8712_read_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); u32 (*_read_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); if ((adapter->bDriverStopped == true) || (adapter->bSurpriseRemoved == true)) return; _read_port = pintfhdl->io_ops._read_port; _read_port(pintfhdl, addr, cnt, pmem); } void r8712_write_port(struct _adapter *adapter, u32 addr, u32 cnt, u8 *pmem) { struct io_queue *pio_queue = (struct io_queue *)adapter->pio_queue; struct intf_hdl *pintfhdl = &(pio_queue->intf); u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt, u8 *pmem); _write_port = pintfhdl->io_ops._write_port; _write_port(pintfhdl, addr, cnt, pmem); }
gpl-2.0
keeeener/nicki
kernel/arch/arm/common/sharpsl_param.c
12319
1821
/* * Hardware parameter area specific to Sharp SL series devices * * Copyright (c) 2005 Richard Purdie * * Based on Sharp's 2.4 kernel patches * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <asm/mach/sharpsl_param.h> /* * Certain hardware parameters determined at the time of device manufacture, * typically including LCD parameters are loaded by the bootloader at the * address PARAM_BASE. As the kernel will overwrite them, we need to store * them early in the boot process, then pass them to the appropriate drivers. * Not all devices use all parameters but the format is common to all. */ #ifdef CONFIG_ARCH_SA1100 #define PARAM_BASE 0xe8ffc000 #else #define PARAM_BASE 0xa0000a00 #endif #define MAGIC_CHG(a,b,c,d) ( ( d << 24 ) | ( c << 16 ) | ( b << 8 ) | a ) #define COMADJ_MAGIC MAGIC_CHG('C','M','A','D') #define UUID_MAGIC MAGIC_CHG('U','U','I','D') #define TOUCH_MAGIC MAGIC_CHG('T','U','C','H') #define AD_MAGIC MAGIC_CHG('B','V','A','D') #define PHAD_MAGIC MAGIC_CHG('P','H','A','D') struct sharpsl_param_info sharpsl_param; EXPORT_SYMBOL(sharpsl_param); void sharpsl_save_param(void) { memcpy(&sharpsl_param, (void *)PARAM_BASE, sizeof(struct sharpsl_param_info)); if (sharpsl_param.comadj_keyword != COMADJ_MAGIC) sharpsl_param.comadj=-1; if (sharpsl_param.phad_keyword != PHAD_MAGIC) sharpsl_param.phadadj=-1; if (sharpsl_param.uuid_keyword != UUID_MAGIC) sharpsl_param.uuid[0]=-1; if (sharpsl_param.touch_keyword != TOUCH_MAGIC) sharpsl_param.touch_xp=-1; if (sharpsl_param.adadj_keyword != AD_MAGIC) sharpsl_param.adadj=-1; }
gpl-2.0
foxsat-hdr/linux-kernel
arch/um/drivers/hostaudio_kern.c
32
7992
/* * Copyright (C) 2002 Steve Schmidtke * Licensed under the GPL */ #include "linux/config.h" #include "linux/module.h" #include "linux/init.h" #include "linux/slab.h" #include "linux/fs.h" #include "linux/sound.h" #include "linux/soundcard.h" #include "asm/uaccess.h" #include "kern_util.h" #include "init.h" #include "os.h" struct hostaudio_state { int fd; }; struct hostmixer_state { int fd; }; #define HOSTAUDIO_DEV_DSP "/dev/sound/dsp" #define HOSTAUDIO_DEV_MIXER "/dev/sound/mixer" /* Only changed from linux_main at boot time */ char *dsp = HOSTAUDIO_DEV_DSP; char *mixer = HOSTAUDIO_DEV_MIXER; #define DSP_HELP \ " This is used to specify the host dsp device to the hostaudio driver.\n" \ " The default is \"" HOSTAUDIO_DEV_DSP "\".\n\n" #define MIXER_HELP \ " This is used to specify the host mixer device to the hostaudio driver.\n"\ " The default is \"" HOSTAUDIO_DEV_MIXER "\".\n\n" #ifndef MODULE static int set_dsp(char *name, int *add) { dsp = name; return(0); } __uml_setup("dsp=", set_dsp, "dsp=<dsp device>\n" DSP_HELP); static int set_mixer(char *name, int *add) { mixer = name; return(0); } __uml_setup("mixer=", set_mixer, "mixer=<mixer device>\n" MIXER_HELP); #else /*MODULE*/ MODULE_PARM(dsp, "s"); MODULE_PARM_DESC(dsp, DSP_HELP); MODULE_PARM(mixer, "s"); MODULE_PARM_DESC(mixer, MIXER_HELP); #endif /* /dev/dsp file operations */ static ssize_t hostaudio_read(struct file *file, char *buffer, size_t count, loff_t *ppos) { struct hostaudio_state *state = file->private_data; void *kbuf; int err; #ifdef DEBUG printk("hostaudio: read called, count = %d\n", count); #endif kbuf = kmalloc(count, GFP_KERNEL); if(kbuf == NULL) return(-ENOMEM); err = os_read_file(state->fd, kbuf, count); if(err < 0) goto out; if(copy_to_user(buffer, kbuf, err)) err = -EFAULT; out: kfree(kbuf); return(err); } static ssize_t hostaudio_write(struct file *file, const char *buffer, size_t count, loff_t *ppos) { struct hostaudio_state *state = file->private_data; void *kbuf; int err; #ifdef DEBUG printk("hostaudio: write called, count = %d\n", count); #endif kbuf = kmalloc(count, GFP_KERNEL); if(kbuf == NULL) return(-ENOMEM); err = -EFAULT; if(copy_from_user(kbuf, buffer, count)) goto out; err = os_write_file(state->fd, kbuf, count); if(err < 0) goto out; *ppos += err; out: kfree(kbuf); return(err); } static unsigned int hostaudio_poll(struct file *file, struct poll_table_struct *wait) { unsigned int mask = 0; #ifdef DEBUG printk("hostaudio: poll called (unimplemented)\n"); #endif return(mask); } static int hostaudio_ioctl(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct hostaudio_state *state = file->private_data; unsigned long data = 0; int err; #ifdef DEBUG printk("hostaudio: ioctl called, cmd = %u\n", cmd); #endif switch(cmd){ case SNDCTL_DSP_SPEED: case SNDCTL_DSP_STEREO: case SNDCTL_DSP_GETBLKSIZE: case SNDCTL_DSP_CHANNELS: case SNDCTL_DSP_SUBDIVIDE: case SNDCTL_DSP_SETFRAGMENT: if(get_user(data, (int *) arg)) return(-EFAULT); break; default: break; } err = os_ioctl_generic(state->fd, cmd, (unsigned long) &data); switch(cmd){ case SNDCTL_DSP_SPEED: case SNDCTL_DSP_STEREO: case SNDCTL_DSP_GETBLKSIZE: case SNDCTL_DSP_CHANNELS: case SNDCTL_DSP_SUBDIVIDE: case SNDCTL_DSP_SETFRAGMENT: if(put_user(data, (int *) arg)) return(-EFAULT); break; default: break; } return(err); } static int hostaudio_open(struct inode *inode, struct file *file) { struct hostaudio_state *state; int r = 0, w = 0; int ret; #ifdef DEBUG printk("hostaudio: open called (host: %s)\n", dsp); #endif state = kmalloc(sizeof(struct hostaudio_state), GFP_KERNEL); if(state == NULL) return(-ENOMEM); if(file->f_mode & FMODE_READ) r = 1; if(file->f_mode & FMODE_WRITE) w = 1; ret = os_open_file(dsp, of_set_rw(OPENFLAGS(), r, w), 0); if(ret < 0){ kfree(state); return(ret); } state->fd = ret; file->private_data = state; return(0); } static int hostaudio_release(struct inode *inode, struct file *file) { struct hostaudio_state *state = file->private_data; #ifdef DEBUG printk("hostaudio: release called\n"); #endif os_close_file(state->fd); kfree(state); return(0); } /* /dev/mixer file operations */ static int hostmixer_ioctl_mixdev(struct inode *inode, struct file *file, unsigned int cmd, unsigned long arg) { struct hostmixer_state *state = file->private_data; #ifdef DEBUG printk("hostmixer: ioctl called\n"); #endif return(os_ioctl_generic(state->fd, cmd, arg)); } static int hostmixer_open_mixdev(struct inode *inode, struct file *file) { struct hostmixer_state *state; int r = 0, w = 0; int ret; #ifdef DEBUG printk("hostmixer: open called (host: %s)\n", mixer); #endif state = kmalloc(sizeof(struct hostmixer_state), GFP_KERNEL); if(state == NULL) return(-ENOMEM); if(file->f_mode & FMODE_READ) r = 1; if(file->f_mode & FMODE_WRITE) w = 1; ret = os_open_file(mixer, of_set_rw(OPENFLAGS(), r, w), 0); if(ret < 0){ printk("hostaudio_open_mixdev failed to open '%s', err = %d\n", dsp, -ret); kfree(state); return(ret); } file->private_data = state; return(0); } static int hostmixer_release(struct inode *inode, struct file *file) { struct hostmixer_state *state = file->private_data; #ifdef DEBUG printk("hostmixer: release called\n"); #endif os_close_file(state->fd); kfree(state); return(0); } /* kernel module operations */ static struct file_operations hostaudio_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = hostaudio_read, .write = hostaudio_write, .poll = hostaudio_poll, .ioctl = hostaudio_ioctl, .mmap = NULL, .open = hostaudio_open, .release = hostaudio_release, }; static struct file_operations hostmixer_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .ioctl = hostmixer_ioctl_mixdev, .open = hostmixer_open_mixdev, .release = hostmixer_release, }; struct { int dev_audio; int dev_mixer; } module_data; MODULE_AUTHOR("Steve Schmidtke"); MODULE_DESCRIPTION("UML Audio Relay"); MODULE_LICENSE("GPL"); static int __init hostaudio_init_module(void) { printk(KERN_INFO "UML Audio Relay (host dsp = %s, host mixer = %s)\n", dsp, mixer); module_data.dev_audio = register_sound_dsp(&hostaudio_fops, -1); if(module_data.dev_audio < 0){ printk(KERN_ERR "hostaudio: couldn't register DSP device!\n"); return -ENODEV; } module_data.dev_mixer = register_sound_mixer(&hostmixer_fops, -1); if(module_data.dev_mixer < 0){ printk(KERN_ERR "hostmixer: couldn't register mixer " "device!\n"); unregister_sound_dsp(module_data.dev_audio); return -ENODEV; } return 0; } static void __exit hostaudio_cleanup_module (void) { unregister_sound_mixer(module_data.dev_mixer); unregister_sound_dsp(module_data.dev_audio); } module_init(hostaudio_init_module); module_exit(hostaudio_cleanup_module); /* * Overrides for Emacs so that we follow Linus's tabbing style. * Emacs will notice this stuff at the end of the file and automatically * adjust the settings for this buffer only. This must remain at the end * of the file. * --------------------------------------------------------------------------- * Local variables: * c-file-style: "linux" * End: */
gpl-2.0
iamroot9C-arm/linux
drivers/edac/edac_mc.c
32
32529
/* * edac_mc kernel module * (C) 2005, 2006 Linux Networx (http://lnxi.com) * This file may be distributed under the terms of the * GNU General Public License. * * Written by Thayne Harbaugh * Based on work by Dan Hollis <goemon at anime dot net> and others. * http://www.anime.net/~goemon/linux-ecc/ * * Modified by Dave Peterson and Doug Thompson * */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/sysctl.h> #include <linux/highmem.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/jiffies.h> #include <linux/spinlock.h> #include <linux/list.h> #include <linux/ctype.h> #include <linux/edac.h> #include <linux/bitops.h> #include <asm/uaccess.h> #include <asm/page.h> #include <asm/edac.h> #include "edac_core.h" #include "edac_module.h" #define CREATE_TRACE_POINTS #define TRACE_INCLUDE_PATH ../../include/ras #include <ras/ras_event.h> /* lock to memory controller's control array */ static DEFINE_MUTEX(mem_ctls_mutex); static LIST_HEAD(mc_devices); unsigned edac_dimm_info_location(struct dimm_info *dimm, char *buf, unsigned len) { struct mem_ctl_info *mci = dimm->mci; int i, n, count = 0; char *p = buf; for (i = 0; i < mci->n_layers; i++) { n = snprintf(p, len, "%s %d ", edac_layer_name[mci->layers[i].type], dimm->location[i]); p += n; len -= n; count += n; if (!len) break; } return count; } #ifdef CONFIG_EDAC_DEBUG static void edac_mc_dump_channel(struct rank_info *chan) { edac_dbg(4, " channel->chan_idx = %d\n", chan->chan_idx); edac_dbg(4, " channel = %p\n", chan); edac_dbg(4, " channel->csrow = %p\n", chan->csrow); edac_dbg(4, " channel->dimm = %p\n", chan->dimm); } static void edac_mc_dump_dimm(struct dimm_info *dimm, int number) { char location[80]; edac_dimm_info_location(dimm, location, sizeof(location)); edac_dbg(4, "%s%i: %smapped as virtual row %d, chan %d\n", dimm->mci->mem_is_per_rank ? "rank" : "dimm", number, location, dimm->csrow, dimm->cschannel); edac_dbg(4, " dimm = %p\n", dimm); edac_dbg(4, " dimm->label = '%s'\n", dimm->label); edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); edac_dbg(4, " dimm->grain = %d\n", dimm->grain); edac_dbg(4, " dimm->nr_pages = 0x%x\n", dimm->nr_pages); } static void edac_mc_dump_csrow(struct csrow_info *csrow) { edac_dbg(4, "csrow->csrow_idx = %d\n", csrow->csrow_idx); edac_dbg(4, " csrow = %p\n", csrow); edac_dbg(4, " csrow->first_page = 0x%lx\n", csrow->first_page); edac_dbg(4, " csrow->last_page = 0x%lx\n", csrow->last_page); edac_dbg(4, " csrow->page_mask = 0x%lx\n", csrow->page_mask); edac_dbg(4, " csrow->nr_channels = %d\n", csrow->nr_channels); edac_dbg(4, " csrow->channels = %p\n", csrow->channels); edac_dbg(4, " csrow->mci = %p\n", csrow->mci); } static void edac_mc_dump_mci(struct mem_ctl_info *mci) { edac_dbg(3, "\tmci = %p\n", mci); edac_dbg(3, "\tmci->mtype_cap = %lx\n", mci->mtype_cap); edac_dbg(3, "\tmci->edac_ctl_cap = %lx\n", mci->edac_ctl_cap); edac_dbg(3, "\tmci->edac_cap = %lx\n", mci->edac_cap); edac_dbg(4, "\tmci->edac_check = %p\n", mci->edac_check); edac_dbg(3, "\tmci->nr_csrows = %d, csrows = %p\n", mci->nr_csrows, mci->csrows); edac_dbg(3, "\tmci->nr_dimms = %d, dimms = %p\n", mci->tot_dimms, mci->dimms); edac_dbg(3, "\tdev = %p\n", mci->pdev); edac_dbg(3, "\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); edac_dbg(3, "\tpvt_info = %p\n\n", mci->pvt_info); } #endif /* CONFIG_EDAC_DEBUG */ /* * keep those in sync with the enum mem_type */ const char *edac_mem_types[] = { "Empty csrow", "Reserved csrow type", "Unknown csrow type", "Fast page mode RAM", "Extended data out RAM", "Burst Extended data out RAM", "Single data rate SDRAM", "Registered single data rate SDRAM", "Double data rate SDRAM", "Registered Double data rate SDRAM", "Rambus DRAM", "Unbuffered DDR2 RAM", "Fully buffered DDR2", "Registered DDR2 RAM", "Rambus XDR", "Unbuffered DDR3 RAM", "Registered DDR3 RAM", }; EXPORT_SYMBOL_GPL(edac_mem_types); /** * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation * @p: pointer to a pointer with the memory offset to be used. At * return, this will be incremented to point to the next offset * @size: Size of the data structure to be reserved * @n_elems: Number of elements that should be reserved * * If 'size' is a constant, the compiler will optimize this whole function * down to either a no-op or the addition of a constant to the value of '*p'. * * The 'p' pointer is absolutely needed to keep the proper advancing * further in memory to the proper offsets when allocating the struct along * with its embedded structs, as edac_device_alloc_ctl_info() does it * above, for example. * * At return, the pointer 'p' will be incremented to be used on a next call * to this function. */ void *edac_align_ptr(void **p, unsigned size, int n_elems) { unsigned align, r; void *ptr = *p; *p += size * n_elems; /* * 'p' can possibly be an unaligned item X such that sizeof(X) is * 'size'. Adjust 'p' so that its alignment is at least as * stringent as what the compiler would provide for X and return * the aligned result. * Here we assume that the alignment of a "long long" is the most * stringent alignment that the compiler will ever provide by default. * As far as I know, this is a reasonable assumption. */ if (size > sizeof(long)) align = sizeof(long long); else if (size > sizeof(int)) align = sizeof(long); else if (size > sizeof(short)) align = sizeof(int); else if (size > sizeof(char)) align = sizeof(short); else return (char *)ptr; r = (unsigned long)p % align; if (r == 0) return (char *)ptr; *p += align - r; return (void *)(((unsigned long)ptr) + align - r); } /** * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure * @mc_num: Memory controller number * @n_layers: Number of MC hierarchy layers * layers: Describes each layer as seen by the Memory Controller * @size_pvt: size of private storage needed * * * Everything is kmalloc'ed as one big chunk - more efficient. * Only can be used if all structures have the same lifetime - otherwise * you have to allocate and initialize your own structures. * * Use edac_mc_free() to free mc structures allocated by this function. * * NOTE: drivers handle multi-rank memories in different ways: in some * drivers, one multi-rank memory stick is mapped as one entry, while, in * others, a single multi-rank memory stick would be mapped into several * entries. Currently, this function will allocate multiple struct dimm_info * on such scenarios, as grouping the multiple ranks require drivers change. * * Returns: * On failure: NULL * On success: struct mem_ctl_info pointer */ struct mem_ctl_info *edac_mc_alloc(unsigned mc_num, unsigned n_layers, struct edac_mc_layer *layers, unsigned sz_pvt) { struct mem_ctl_info *mci; struct edac_mc_layer *layer; struct csrow_info *csr; struct rank_info *chan; struct dimm_info *dimm; u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS]; unsigned pos[EDAC_MAX_LAYERS]; unsigned size, tot_dimms = 1, count = 1; unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0; void *pvt, *p, *ptr = NULL; int i, j, row, chn, n, len, off; bool per_rank = false; BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0); /* * Calculate the total amount of dimms and csrows/cschannels while * in the old API emulation mode */ for (i = 0; i < n_layers; i++) { tot_dimms *= layers[i].size; if (layers[i].is_virt_csrow) tot_csrows *= layers[i].size; else tot_channels *= layers[i].size; if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT) per_rank = true; } /* Figure out the offsets of the various items from the start of an mc * structure. We want the alignment of each item to be at least as * stringent as what the compiler would provide if we could simply * hardcode everything into a single struct. */ mci = edac_align_ptr(&ptr, sizeof(*mci), 1); layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers); for (i = 0; i < n_layers; i++) { count *= layers[i].size; edac_dbg(4, "errcount layer %d size %d\n", i, count); ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count); tot_errcount += 2 * count; } edac_dbg(4, "allocating %d error counters\n", tot_errcount); pvt = edac_align_ptr(&ptr, sz_pvt, 1); size = ((unsigned long)pvt) + sz_pvt; edac_dbg(1, "allocating %u bytes for mci data (%d %s, %d csrows/channels)\n", size, tot_dimms, per_rank ? "ranks" : "dimms", tot_csrows * tot_channels); mci = kzalloc(size, GFP_KERNEL); if (mci == NULL) return NULL; /* Adjust pointers so they point within the memory we just allocated * rather than an imaginary chunk of memory located at address 0. */ layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer)); for (i = 0; i < n_layers; i++) { mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i])); mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i])); } pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; /* setup index and various internal pointers */ mci->mc_idx = mc_num; mci->tot_dimms = tot_dimms; mci->pvt_info = pvt; mci->n_layers = n_layers; mci->layers = layer; memcpy(mci->layers, layers, sizeof(*layer) * n_layers); mci->nr_csrows = tot_csrows; mci->num_cschannel = tot_channels; mci->mem_is_per_rank = per_rank; /* * Alocate and fill the csrow/channels structs */ mci->csrows = kcalloc(sizeof(*mci->csrows), tot_csrows, GFP_KERNEL); if (!mci->csrows) goto error; for (row = 0; row < tot_csrows; row++) { csr = kzalloc(sizeof(**mci->csrows), GFP_KERNEL); if (!csr) goto error; mci->csrows[row] = csr; csr->csrow_idx = row; csr->mci = mci; csr->nr_channels = tot_channels; csr->channels = kcalloc(sizeof(*csr->channels), tot_channels, GFP_KERNEL); if (!csr->channels) goto error; for (chn = 0; chn < tot_channels; chn++) { chan = kzalloc(sizeof(**csr->channels), GFP_KERNEL); if (!chan) goto error; csr->channels[chn] = chan; chan->chan_idx = chn; chan->csrow = csr; } } /* * Allocate and fill the dimm structs */ mci->dimms = kcalloc(sizeof(*mci->dimms), tot_dimms, GFP_KERNEL); if (!mci->dimms) goto error; memset(&pos, 0, sizeof(pos)); row = 0; chn = 0; for (i = 0; i < tot_dimms; i++) { chan = mci->csrows[row]->channels[chn]; off = EDAC_DIMM_OFF(layer, n_layers, pos[0], pos[1], pos[2]); if (off < 0 || off >= tot_dimms) { edac_mc_printk(mci, KERN_ERR, "EDAC core bug: EDAC_DIMM_OFF is trying to do an illegal data access\n"); goto error; } dimm = kzalloc(sizeof(**mci->dimms), GFP_KERNEL); if (!dimm) goto error; mci->dimms[off] = dimm; dimm->mci = mci; /* * Copy DIMM location and initialize it. */ len = sizeof(dimm->label); p = dimm->label; n = snprintf(p, len, "mc#%u", mc_num); p += n; len -= n; for (j = 0; j < n_layers; j++) { n = snprintf(p, len, "%s#%u", edac_layer_name[layers[j].type], pos[j]); p += n; len -= n; dimm->location[j] = pos[j]; if (len <= 0) break; } /* Link it to the csrows old API data */ chan->dimm = dimm; dimm->csrow = row; dimm->cschannel = chn; /* Increment csrow location */ row++; if (row == tot_csrows) { row = 0; chn++; } /* Increment dimm location */ for (j = n_layers - 1; j >= 0; j--) { pos[j]++; if (pos[j] < layers[j].size) break; pos[j] = 0; } } mci->op_state = OP_ALLOC; /* at this point, the root kobj is valid, and in order to * 'free' the object, then the function: * edac_mc_unregister_sysfs_main_kobj() must be called * which will perform kobj unregistration and the actual free * will occur during the kobject callback operation */ return mci; error: if (mci->dimms) { for (i = 0; i < tot_dimms; i++) kfree(mci->dimms[i]); kfree(mci->dimms); } if (mci->csrows) { for (chn = 0; chn < tot_channels; chn++) { csr = mci->csrows[chn]; if (csr) { for (chn = 0; chn < tot_channels; chn++) kfree(csr->channels[chn]); kfree(csr); } kfree(mci->csrows[i]); } kfree(mci->csrows); } kfree(mci); return NULL; } EXPORT_SYMBOL_GPL(edac_mc_alloc); /** * edac_mc_free * 'Free' a previously allocated 'mci' structure * @mci: pointer to a struct mem_ctl_info structure */ void edac_mc_free(struct mem_ctl_info *mci) { edac_dbg(1, "\n"); /* the mci instance is freed here, when the sysfs object is dropped */ edac_unregister_sysfs(mci); } EXPORT_SYMBOL_GPL(edac_mc_free); /** * find_mci_by_dev * * scan list of controllers looking for the one that manages * the 'dev' device * @dev: pointer to a struct device related with the MCI */ struct mem_ctl_info *find_mci_by_dev(struct device *dev) { struct mem_ctl_info *mci; struct list_head *item; edac_dbg(3, "\n"); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); if (mci->pdev == dev) return mci; } return NULL; } EXPORT_SYMBOL_GPL(find_mci_by_dev); /* * handler for EDAC to check if NMI type handler has asserted interrupt */ static int edac_mc_assert_error_check_and_clear(void) { int old_state; if (edac_op_state == EDAC_OPSTATE_POLL) return 1; old_state = edac_err_assert; edac_err_assert = 0; return old_state; } /* * edac_mc_workq_function * performs the operation scheduled by a workq request */ static void edac_mc_workq_function(struct work_struct *work_req) { struct delayed_work *d_work = to_delayed_work(work_req); struct mem_ctl_info *mci = to_edac_mem_ctl_work(d_work); mutex_lock(&mem_ctls_mutex); /* if this control struct has movd to offline state, we are done */ if (mci->op_state == OP_OFFLINE) { mutex_unlock(&mem_ctls_mutex); return; } /* Only poll controllers that are running polled and have a check */ if (edac_mc_assert_error_check_and_clear() && (mci->edac_check != NULL)) mci->edac_check(mci); mutex_unlock(&mem_ctls_mutex); /* Reschedule */ queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(edac_mc_get_poll_msec())); } /* * edac_mc_workq_setup * initialize a workq item for this mci * passing in the new delay period in msec * * locking model: * * called with the mem_ctls_mutex held */ static void edac_mc_workq_setup(struct mem_ctl_info *mci, unsigned msec) { edac_dbg(0, "\n"); /* if this instance is not in the POLL state, then simply return */ if (mci->op_state != OP_RUNNING_POLL) return; INIT_DELAYED_WORK(&mci->work, edac_mc_workq_function); queue_delayed_work(edac_workqueue, &mci->work, msecs_to_jiffies(msec)); } /* * edac_mc_workq_teardown * stop the workq processing on this mci * * locking model: * * called WITHOUT lock held */ static void edac_mc_workq_teardown(struct mem_ctl_info *mci) { int status; if (mci->op_state != OP_RUNNING_POLL) return; status = cancel_delayed_work(&mci->work); if (status == 0) { edac_dbg(0, "not canceled, flush the queue\n"); /* workq instance might be running, wait for it */ flush_workqueue(edac_workqueue); } } /* * edac_mc_reset_delay_period(unsigned long value) * * user space has updated our poll period value, need to * reset our workq delays */ void edac_mc_reset_delay_period(int value) { struct mem_ctl_info *mci; struct list_head *item; mutex_lock(&mem_ctls_mutex); /* scan the list and turn off all workq timers, doing so under lock */ list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); if (mci->op_state == OP_RUNNING_POLL) cancel_delayed_work(&mci->work); } mutex_unlock(&mem_ctls_mutex); /* re-walk the list, and reset the poll delay */ mutex_lock(&mem_ctls_mutex); list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); edac_mc_workq_setup(mci, (unsigned long) value); } mutex_unlock(&mem_ctls_mutex); } /* Return 0 on success, 1 on failure. * Before calling this function, caller must * assign a unique value to mci->mc_idx. * * locking model: * * called with the mem_ctls_mutex lock held */ static int add_mc_to_global_list(struct mem_ctl_info *mci) { struct list_head *item, *insert_before; struct mem_ctl_info *p; insert_before = &mc_devices; p = find_mci_by_dev(mci->pdev); if (unlikely(p != NULL)) goto fail0; list_for_each(item, &mc_devices) { p = list_entry(item, struct mem_ctl_info, link); if (p->mc_idx >= mci->mc_idx) { if (unlikely(p->mc_idx == mci->mc_idx)) goto fail1; insert_before = item; break; } } list_add_tail_rcu(&mci->link, insert_before); atomic_inc(&edac_handlers); return 0; fail0: edac_printk(KERN_WARNING, EDAC_MC, "%s (%s) %s %s already assigned %d\n", dev_name(p->pdev), edac_dev_name(mci), p->mod_name, p->ctl_name, p->mc_idx); return 1; fail1: edac_printk(KERN_WARNING, EDAC_MC, "bug in low-level driver: attempt to assign\n" " duplicate mc_idx %d in %s()\n", p->mc_idx, __func__); return 1; } static void del_mc_from_global_list(struct mem_ctl_info *mci) { atomic_dec(&edac_handlers); list_del_rcu(&mci->link); /* these are for safe removal of devices from global list while * NMI handlers may be traversing list */ synchronize_rcu(); INIT_LIST_HEAD(&mci->link); } /** * edac_mc_find: Search for a mem_ctl_info structure whose index is 'idx'. * * If found, return a pointer to the structure. * Else return NULL. * * Caller must hold mem_ctls_mutex. */ struct mem_ctl_info *edac_mc_find(int idx) { struct list_head *item; struct mem_ctl_info *mci; list_for_each(item, &mc_devices) { mci = list_entry(item, struct mem_ctl_info, link); if (mci->mc_idx >= idx) { if (mci->mc_idx == idx) return mci; break; } } return NULL; } EXPORT_SYMBOL(edac_mc_find); /** * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and * create sysfs entries associated with mci structure * @mci: pointer to the mci structure to be added to the list * * Return: * 0 Success * !0 Failure */ /* FIXME - should a warning be printed if no error detection? correction? */ int edac_mc_add_mc(struct mem_ctl_info *mci) { edac_dbg(0, "\n"); #ifdef CONFIG_EDAC_DEBUG if (edac_debug_level >= 3) edac_mc_dump_mci(mci); if (edac_debug_level >= 4) { int i; for (i = 0; i < mci->nr_csrows; i++) { struct csrow_info *csrow = mci->csrows[i]; u32 nr_pages = 0; int j; for (j = 0; j < csrow->nr_channels; j++) nr_pages += csrow->channels[j]->dimm->nr_pages; if (!nr_pages) continue; edac_mc_dump_csrow(csrow); for (j = 0; j < csrow->nr_channels; j++) if (csrow->channels[j]->dimm->nr_pages) edac_mc_dump_channel(csrow->channels[j]); } for (i = 0; i < mci->tot_dimms; i++) if (mci->dimms[i]->nr_pages) edac_mc_dump_dimm(mci->dimms[i], i); } #endif mutex_lock(&mem_ctls_mutex); if (add_mc_to_global_list(mci)) goto fail0; /* set load time so that error rate can be tracked */ mci->start_time = jiffies; if (edac_create_sysfs_mci_device(mci)) { edac_mc_printk(mci, KERN_WARNING, "failed to create sysfs device\n"); goto fail1; } /* If there IS a check routine, then we are running POLLED */ if (mci->edac_check != NULL) { /* This instance is NOW RUNNING */ mci->op_state = OP_RUNNING_POLL; edac_mc_workq_setup(mci, edac_mc_get_poll_msec()); } else { mci->op_state = OP_RUNNING_INTERRUPT; } /* Report action taken */ edac_mc_printk(mci, KERN_INFO, "Giving out device to '%s' '%s':" " DEV %s\n", mci->mod_name, mci->ctl_name, edac_dev_name(mci)); mutex_unlock(&mem_ctls_mutex); return 0; fail1: del_mc_from_global_list(mci); fail0: mutex_unlock(&mem_ctls_mutex); return 1; } EXPORT_SYMBOL_GPL(edac_mc_add_mc); /** * edac_mc_del_mc: Remove sysfs entries for specified mci structure and * remove mci structure from global list * @pdev: Pointer to 'struct device' representing mci structure to remove. * * Return pointer to removed mci structure, or NULL if device not found. */ struct mem_ctl_info *edac_mc_del_mc(struct device *dev) { struct mem_ctl_info *mci; edac_dbg(0, "\n"); mutex_lock(&mem_ctls_mutex); /* find the requested mci struct in the global list */ mci = find_mci_by_dev(dev); if (mci == NULL) { mutex_unlock(&mem_ctls_mutex); return NULL; } del_mc_from_global_list(mci); mutex_unlock(&mem_ctls_mutex); /* flush workq processes */ edac_mc_workq_teardown(mci); /* marking MCI offline */ mci->op_state = OP_OFFLINE; /* remove from sysfs */ edac_remove_sysfs_mci_device(mci); edac_printk(KERN_INFO, EDAC_MC, "Removed device %d for %s %s: DEV %s\n", mci->mc_idx, mci->mod_name, mci->ctl_name, edac_dev_name(mci)); return mci; } EXPORT_SYMBOL_GPL(edac_mc_del_mc); static void edac_mc_scrub_block(unsigned long page, unsigned long offset, u32 size) { struct page *pg; void *virt_addr; unsigned long flags = 0; edac_dbg(3, "\n"); /* ECC error page was not in our memory. Ignore it. */ if (!pfn_valid(page)) return; /* Find the actual page structure then map it and fix */ pg = pfn_to_page(page); if (PageHighMem(pg)) local_irq_save(flags); virt_addr = kmap_atomic(pg); /* Perform architecture specific atomic scrub operation */ atomic_scrub(virt_addr + offset, size); /* Unmap and complete */ kunmap_atomic(virt_addr); if (PageHighMem(pg)) local_irq_restore(flags); } /* FIXME - should return -1 */ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) { struct csrow_info **csrows = mci->csrows; int row, i, j, n; edac_dbg(1, "MC%d: 0x%lx\n", mci->mc_idx, page); row = -1; for (i = 0; i < mci->nr_csrows; i++) { struct csrow_info *csrow = csrows[i]; n = 0; for (j = 0; j < csrow->nr_channels; j++) { struct dimm_info *dimm = csrow->channels[j]->dimm; n += dimm->nr_pages; } if (n == 0) continue; edac_dbg(3, "MC%d: first(0x%lx) page(0x%lx) last(0x%lx) mask(0x%lx)\n", mci->mc_idx, csrow->first_page, page, csrow->last_page, csrow->page_mask); if ((page >= csrow->first_page) && (page <= csrow->last_page) && ((page & csrow->page_mask) == (csrow->first_page & csrow->page_mask))) { row = i; break; } } if (row == -1) edac_mc_printk(mci, KERN_ERR, "could not look up page error address %lx\n", (unsigned long)page); return row; } EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); const char *edac_layer_name[] = { [EDAC_MC_LAYER_BRANCH] = "branch", [EDAC_MC_LAYER_CHANNEL] = "channel", [EDAC_MC_LAYER_SLOT] = "slot", [EDAC_MC_LAYER_CHIP_SELECT] = "csrow", }; EXPORT_SYMBOL_GPL(edac_layer_name); static void edac_inc_ce_error(struct mem_ctl_info *mci, bool enable_per_layer_report, const int pos[EDAC_MAX_LAYERS], const u16 count) { int i, index = 0; mci->ce_mc += count; if (!enable_per_layer_report) { mci->ce_noinfo_count += count; return; } for (i = 0; i < mci->n_layers; i++) { if (pos[i] < 0) break; index += pos[i]; mci->ce_per_layer[i][index] += count; if (i < mci->n_layers - 1) index *= mci->layers[i + 1].size; } } static void edac_inc_ue_error(struct mem_ctl_info *mci, bool enable_per_layer_report, const int pos[EDAC_MAX_LAYERS], const u16 count) { int i, index = 0; mci->ue_mc += count; if (!enable_per_layer_report) { mci->ce_noinfo_count += count; return; } for (i = 0; i < mci->n_layers; i++) { if (pos[i] < 0) break; index += pos[i]; mci->ue_per_layer[i][index] += count; if (i < mci->n_layers - 1) index *= mci->layers[i + 1].size; } } static void edac_ce_error(struct mem_ctl_info *mci, const u16 error_count, const int pos[EDAC_MAX_LAYERS], const char *msg, const char *location, const char *label, const char *detail, const char *other_detail, const bool enable_per_layer_report, const unsigned long page_frame_number, const unsigned long offset_in_page, long grain) { unsigned long remapped_page; if (edac_mc_get_log_ce()) { if (other_detail && *other_detail) edac_mc_printk(mci, KERN_WARNING, "%d CE %s on %s (%s %s - %s)\n", error_count, msg, label, location, detail, other_detail); else edac_mc_printk(mci, KERN_WARNING, "%d CE %s on %s (%s %s)\n", error_count, msg, label, location, detail); } edac_inc_ce_error(mci, enable_per_layer_report, pos, error_count); if (mci->scrub_mode & SCRUB_SW_SRC) { /* * Some memory controllers (called MCs below) can remap * memory so that it is still available at a different * address when PCI devices map into memory. * MC's that can't do this, lose the memory where PCI * devices are mapped. This mapping is MC-dependent * and so we call back into the MC driver for it to * map the MC page to a physical (CPU) page which can * then be mapped to a virtual page - which can then * be scrubbed. */ remapped_page = mci->ctl_page_to_phys ? mci->ctl_page_to_phys(mci, page_frame_number) : page_frame_number; edac_mc_scrub_block(remapped_page, offset_in_page, grain); } } static void edac_ue_error(struct mem_ctl_info *mci, const u16 error_count, const int pos[EDAC_MAX_LAYERS], const char *msg, const char *location, const char *label, const char *detail, const char *other_detail, const bool enable_per_layer_report) { if (edac_mc_get_log_ue()) { if (other_detail && *other_detail) edac_mc_printk(mci, KERN_WARNING, "%d UE %s on %s (%s %s - %s)\n", error_count, msg, label, location, detail, other_detail); else edac_mc_printk(mci, KERN_WARNING, "%d UE %s on %s (%s %s)\n", error_count, msg, label, location, detail); } if (edac_mc_get_panic_on_ue()) { if (other_detail && *other_detail) panic("UE %s on %s (%s%s - %s)\n", msg, label, location, detail, other_detail); else panic("UE %s on %s (%s%s)\n", msg, label, location, detail); } edac_inc_ue_error(mci, enable_per_layer_report, pos, error_count); } #define OTHER_LABEL " or " /** * edac_mc_handle_error - reports a memory event to userspace * * @type: severity of the error (CE/UE/Fatal) * @mci: a struct mem_ctl_info pointer * @error_count: Number of errors of the same type * @page_frame_number: mem page where the error occurred * @offset_in_page: offset of the error inside the page * @syndrome: ECC syndrome * @top_layer: Memory layer[0] position * @mid_layer: Memory layer[1] position * @low_layer: Memory layer[2] position * @msg: Message meaningful to the end users that * explains the event * @other_detail: Technical details about the event that * may help hardware manufacturers and * EDAC developers to analyse the event */ void edac_mc_handle_error(const enum hw_event_mc_err_type type, struct mem_ctl_info *mci, const u16 error_count, const unsigned long page_frame_number, const unsigned long offset_in_page, const unsigned long syndrome, const int top_layer, const int mid_layer, const int low_layer, const char *msg, const char *other_detail) { /* FIXME: too much for stack: move it to some pre-alocated area */ char detail[80], location[80]; char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms]; char *p; int row = -1, chan = -1; int pos[EDAC_MAX_LAYERS] = { top_layer, mid_layer, low_layer }; int i; long grain; bool enable_per_layer_report = false; u8 grain_bits; edac_dbg(3, "MC%d\n", mci->mc_idx); /* * Check if the event report is consistent and if the memory * location is known. If it is known, enable_per_layer_report will be * true, the DIMM(s) label info will be filled and the per-layer * error counters will be incremented. */ for (i = 0; i < mci->n_layers; i++) { if (pos[i] >= (int)mci->layers[i].size) { if (type == HW_EVENT_ERR_CORRECTED) p = "CE"; else p = "UE"; edac_mc_printk(mci, KERN_ERR, "INTERNAL ERROR: %s value is out of range (%d >= %d)\n", edac_layer_name[mci->layers[i].type], pos[i], mci->layers[i].size); /* * Instead of just returning it, let's use what's * known about the error. The increment routines and * the DIMM filter logic will do the right thing by * pointing the likely damaged DIMMs. */ pos[i] = -1; } if (pos[i] >= 0) enable_per_layer_report = true; } /* * Get the dimm label/grain that applies to the match criteria. * As the error algorithm may not be able to point to just one memory * stick, the logic here will get all possible labels that could * pottentially be affected by the error. * On FB-DIMM memory controllers, for uncorrected errors, it is common * to have only the MC channel and the MC dimm (also called "branch") * but the channel is not known, as the memory is arranged in pairs, * where each memory belongs to a separate channel within the same * branch. */ grain = 0; p = label; *p = '\0'; for (i = 0; i < mci->tot_dimms; i++) { struct dimm_info *dimm = mci->dimms[i]; if (top_layer >= 0 && top_layer != dimm->location[0]) continue; if (mid_layer >= 0 && mid_layer != dimm->location[1]) continue; if (low_layer >= 0 && low_layer != dimm->location[2]) continue; /* get the max grain, over the error match range */ if (dimm->grain > grain) grain = dimm->grain; /* * If the error is memory-controller wide, there's no need to * seek for the affected DIMMs because the whole * channel/memory controller/... may be affected. * Also, don't show errors for empty DIMM slots. */ if (enable_per_layer_report && dimm->nr_pages) { if (p != label) { strcpy(p, OTHER_LABEL); p += strlen(OTHER_LABEL); } strcpy(p, dimm->label); p += strlen(p); *p = '\0'; /* * get csrow/channel of the DIMM, in order to allow * incrementing the compat API counters */ edac_dbg(4, "%s csrows map: (%d,%d)\n", mci->mem_is_per_rank ? "rank" : "dimm", dimm->csrow, dimm->cschannel); if (row == -1) row = dimm->csrow; else if (row >= 0 && row != dimm->csrow) row = -2; if (chan == -1) chan = dimm->cschannel; else if (chan >= 0 && chan != dimm->cschannel) chan = -2; } } if (!enable_per_layer_report) { strcpy(label, "any memory"); } else { edac_dbg(4, "csrow/channel to increment: (%d,%d)\n", row, chan); if (p == label) strcpy(label, "unknown memory"); if (type == HW_EVENT_ERR_CORRECTED) { if (row >= 0) { mci->csrows[row]->ce_count += error_count; if (chan >= 0) mci->csrows[row]->channels[chan]->ce_count += error_count; } } else if (row >= 0) mci->csrows[row]->ue_count += error_count; } /* Fill the RAM location data */ p = location; for (i = 0; i < mci->n_layers; i++) { if (pos[i] < 0) continue; p += sprintf(p, "%s:%d ", edac_layer_name[mci->layers[i].type], pos[i]); } if (p > location) *(p - 1) = '\0'; /* Report the error via the trace interface */ grain_bits = fls_long(grain) + 1; trace_mc_event(type, msg, label, error_count, mci->mc_idx, top_layer, mid_layer, low_layer, PAGES_TO_MiB(page_frame_number) | offset_in_page, grain_bits, syndrome, other_detail); /* Memory type dependent details about the error */ if (type == HW_EVENT_ERR_CORRECTED) { snprintf(detail, sizeof(detail), "page:0x%lx offset:0x%lx grain:%ld syndrome:0x%lx", page_frame_number, offset_in_page, grain, syndrome); edac_ce_error(mci, error_count, pos, msg, location, label, detail, other_detail, enable_per_layer_report, page_frame_number, offset_in_page, grain); } else { snprintf(detail, sizeof(detail), "page:0x%lx offset:0x%lx grain:%ld", page_frame_number, offset_in_page, grain); edac_ue_error(mci, error_count, pos, msg, location, label, detail, other_detail, enable_per_layer_report); } } EXPORT_SYMBOL_GPL(edac_mc_handle_error);
gpl-2.0
pengdonglin137/linux-3-14-y
drivers/clk/clk-si570.c
288
13540
/* * Driver for Silicon Labs Si570/Si571 Programmable XO/VCXO * * Copyright (C) 2010, 2011 Ericsson AB. * Copyright (C) 2011 Guenter Roeck. * Copyright (C) 2011 - 2013 Xilinx Inc. * * Author: Guenter Roeck <guenter.roeck@ericsson.com> * Sören Brinkmann <soren.brinkmann@xilinx.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/clk-provider.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/i2c.h> #include <linux/regmap.h> #include <linux/slab.h> /* Si570 registers */ #define SI570_REG_HS_N1 7 #define SI570_REG_N1_RFREQ0 8 #define SI570_REG_RFREQ1 9 #define SI570_REG_RFREQ2 10 #define SI570_REG_RFREQ3 11 #define SI570_REG_RFREQ4 12 #define SI570_REG_CONTROL 135 #define SI570_REG_FREEZE_DCO 137 #define SI570_DIV_OFFSET_7PPM 6 #define HS_DIV_SHIFT 5 #define HS_DIV_MASK 0xe0 #define HS_DIV_OFFSET 4 #define N1_6_2_MASK 0x1f #define N1_1_0_MASK 0xc0 #define RFREQ_37_32_MASK 0x3f #define SI570_MIN_FREQ 10000000L #define SI570_MAX_FREQ 1417500000L #define SI598_MAX_FREQ 525000000L #define FDCO_MIN 4850000000LL #define FDCO_MAX 5670000000LL #define SI570_CNTRL_RECALL (1 << 0) #define SI570_CNTRL_FREEZE_M (1 << 5) #define SI570_CNTRL_NEWFREQ (1 << 6) #define SI570_FREEZE_DCO (1 << 4) /** * struct clk_si570: * @hw: Clock hw struct * @regmap: Device's regmap * @div_offset: Rgister offset for dividers * @max_freq: Maximum frequency for this device * @fxtal: Factory xtal frequency * @n1: Clock divider N1 * @hs_div: Clock divider HSDIV * @rfreq: Clock multiplier RFREQ * @frequency: Current output frequency * @i2c_client: I2C client pointer */ struct clk_si570 { struct clk_hw hw; struct regmap *regmap; unsigned int div_offset; u64 max_freq; u64 fxtal; unsigned int n1; unsigned int hs_div; u64 rfreq; u64 frequency; struct i2c_client *i2c_client; }; #define to_clk_si570(_hw) container_of(_hw, struct clk_si570, hw) enum clk_si570_variant { si57x, si59x }; /** * si570_get_divs() - Read clock dividers from HW * @data: Pointer to struct clk_si570 * @rfreq: Fractional multiplier (output) * @n1: Divider N1 (output) * @hs_div: Divider HSDIV (output) * Returns 0 on success, negative errno otherwise. * * Retrieve clock dividers and multipliers from the HW. */ static int si570_get_divs(struct clk_si570 *data, u64 *rfreq, unsigned int *n1, unsigned int *hs_div) { int err; u8 reg[6]; u64 tmp; err = regmap_bulk_read(data->regmap, SI570_REG_HS_N1 + data->div_offset, reg, ARRAY_SIZE(reg)); if (err) return err; *hs_div = ((reg[0] & HS_DIV_MASK) >> HS_DIV_SHIFT) + HS_DIV_OFFSET; *n1 = ((reg[0] & N1_6_2_MASK) << 2) + ((reg[1] & N1_1_0_MASK) >> 6) + 1; /* Handle invalid cases */ if (*n1 > 1) *n1 &= ~1; tmp = reg[1] & RFREQ_37_32_MASK; tmp = (tmp << 8) + reg[2]; tmp = (tmp << 8) + reg[3]; tmp = (tmp << 8) + reg[4]; tmp = (tmp << 8) + reg[5]; *rfreq = tmp; return 0; } /** * si570_get_defaults() - Get default values * @data: Driver data structure * @fout: Factory frequency output * Returns 0 on success, negative errno otherwise. */ static int si570_get_defaults(struct clk_si570 *data, u64 fout) { int err; u64 fdco; regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_RECALL); err = si570_get_divs(data, &data->rfreq, &data->n1, &data->hs_div); if (err) return err; /* * Accept optional precision loss to avoid arithmetic overflows. * Acceptable per Silicon Labs Application Note AN334. */ fdco = fout * data->n1 * data->hs_div; if (fdco >= (1LL << 36)) data->fxtal = div64_u64(fdco << 24, data->rfreq >> 4); else data->fxtal = div64_u64(fdco << 28, data->rfreq); data->frequency = fout; return 0; } /** * si570_update_rfreq() - Update clock multiplier * @data: Driver data structure * Passes on regmap_bulk_write() return value. */ static int si570_update_rfreq(struct clk_si570 *data) { u8 reg[5]; reg[0] = ((data->n1 - 1) << 6) | ((data->rfreq >> 32) & RFREQ_37_32_MASK); reg[1] = (data->rfreq >> 24) & 0xff; reg[2] = (data->rfreq >> 16) & 0xff; reg[3] = (data->rfreq >> 8) & 0xff; reg[4] = data->rfreq & 0xff; return regmap_bulk_write(data->regmap, SI570_REG_N1_RFREQ0 + data->div_offset, reg, ARRAY_SIZE(reg)); } /** * si570_calc_divs() - Caluclate clock dividers * @frequency: Target frequency * @data: Driver data structure * @out_rfreq: RFREG fractional multiplier (output) * @out_n1: Clock divider N1 (output) * @out_hs_div: Clock divider HSDIV (output) * Returns 0 on success, negative errno otherwise. * * Calculate the clock dividers (@out_hs_div, @out_n1) and clock multiplier * (@out_rfreq) for a given target @frequency. */ static int si570_calc_divs(unsigned long frequency, struct clk_si570 *data, u64 *out_rfreq, unsigned int *out_n1, unsigned int *out_hs_div) { int i; unsigned int n1, hs_div; u64 fdco, best_fdco = ULLONG_MAX; static const uint8_t si570_hs_div_values[] = { 11, 9, 7, 6, 5, 4 }; for (i = 0; i < ARRAY_SIZE(si570_hs_div_values); i++) { hs_div = si570_hs_div_values[i]; /* Calculate lowest possible value for n1 */ n1 = div_u64(div_u64(FDCO_MIN, hs_div), frequency); if (!n1 || (n1 & 1)) n1++; while (n1 <= 128) { fdco = (u64)frequency * (u64)hs_div * (u64)n1; if (fdco > FDCO_MAX) break; if (fdco >= FDCO_MIN && fdco < best_fdco) { *out_n1 = n1; *out_hs_div = hs_div; *out_rfreq = div64_u64(fdco << 28, data->fxtal); best_fdco = fdco; } n1 += (n1 == 1 ? 1 : 2); } } if (best_fdco == ULLONG_MAX) return -EINVAL; return 0; } static unsigned long si570_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { int err; u64 rfreq, rate; unsigned int n1, hs_div; struct clk_si570 *data = to_clk_si570(hw); err = si570_get_divs(data, &rfreq, &n1, &hs_div); if (err) { dev_err(&data->i2c_client->dev, "unable to recalc rate\n"); return data->frequency; } rfreq = div_u64(rfreq, hs_div * n1); rate = (data->fxtal * rfreq) >> 28; return rate; } static long si570_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *parent_rate) { int err; u64 rfreq; unsigned int n1, hs_div; struct clk_si570 *data = to_clk_si570(hw); if (!rate) return 0; if (div64_u64(abs(rate - data->frequency) * 10000LL, data->frequency) < 35) { rfreq = div64_u64((data->rfreq * rate) + div64_u64(data->frequency, 2), data->frequency); n1 = data->n1; hs_div = data->hs_div; } else { err = si570_calc_divs(rate, data, &rfreq, &n1, &hs_div); if (err) { dev_err(&data->i2c_client->dev, "unable to round rate\n"); return 0; } } return rate; } /** * si570_set_frequency() - Adjust output frequency * @data: Driver data structure * @frequency: Target frequency * Returns 0 on success. * * Update output frequency for big frequency changes (> 3,500 ppm). */ static int si570_set_frequency(struct clk_si570 *data, unsigned long frequency) { int err; err = si570_calc_divs(frequency, data, &data->rfreq, &data->n1, &data->hs_div); if (err) return err; /* * The DCO reg should be accessed with a read-modify-write operation * per AN334 */ regmap_write(data->regmap, SI570_REG_FREEZE_DCO, SI570_FREEZE_DCO); regmap_write(data->regmap, SI570_REG_HS_N1 + data->div_offset, ((data->hs_div - HS_DIV_OFFSET) << HS_DIV_SHIFT) | (((data->n1 - 1) >> 2) & N1_6_2_MASK)); si570_update_rfreq(data); regmap_write(data->regmap, SI570_REG_FREEZE_DCO, 0); regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_NEWFREQ); /* Applying a new frequency can take up to 10ms */ usleep_range(10000, 12000); return 0; } /** * si570_set_frequency_small() - Adjust output frequency * @data: Driver data structure * @frequency: Target frequency * Returns 0 on success. * * Update output frequency for small frequency changes (< 3,500 ppm). */ static int si570_set_frequency_small(struct clk_si570 *data, unsigned long frequency) { /* * This is a re-implementation of DIV_ROUND_CLOSEST * using the div64_u64 function lieu of letting the compiler * insert EABI calls */ data->rfreq = div64_u64((data->rfreq * frequency) + div_u64(data->frequency, 2), data->frequency); regmap_write(data->regmap, SI570_REG_CONTROL, SI570_CNTRL_FREEZE_M); si570_update_rfreq(data); regmap_write(data->regmap, SI570_REG_CONTROL, 0); /* Applying a new frequency (small change) can take up to 100us */ usleep_range(100, 200); return 0; } static int si570_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_si570 *data = to_clk_si570(hw); struct i2c_client *client = data->i2c_client; int err; if (rate < SI570_MIN_FREQ || rate > data->max_freq) { dev_err(&client->dev, "requested frequency %lu Hz is out of range\n", rate); return -EINVAL; } if (div64_u64(abs(rate - data->frequency) * 10000LL, data->frequency) < 35) err = si570_set_frequency_small(data, rate); else err = si570_set_frequency(data, rate); if (err) return err; data->frequency = rate; return 0; } static const struct clk_ops si570_clk_ops = { .recalc_rate = si570_recalc_rate, .round_rate = si570_round_rate, .set_rate = si570_set_rate, }; static bool si570_regmap_is_volatile(struct device *dev, unsigned int reg) { switch (reg) { case SI570_REG_CONTROL: return true; default: return false; } } static bool si570_regmap_is_writeable(struct device *dev, unsigned int reg) { switch (reg) { case SI570_REG_HS_N1 ... (SI570_REG_RFREQ4 + SI570_DIV_OFFSET_7PPM): case SI570_REG_CONTROL: case SI570_REG_FREEZE_DCO: return true; default: return false; } } static struct regmap_config si570_regmap_config = { .reg_bits = 8, .val_bits = 8, .cache_type = REGCACHE_RBTREE, .max_register = 137, .writeable_reg = si570_regmap_is_writeable, .volatile_reg = si570_regmap_is_volatile, }; static int si570_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct clk_si570 *data; struct clk_init_data init; struct clk *clk; u32 initial_fout, factory_fout, stability; int err; enum clk_si570_variant variant = id->driver_data; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; init.ops = &si570_clk_ops; init.flags = CLK_IS_ROOT; init.num_parents = 0; data->hw.init = &init; data->i2c_client = client; if (variant == si57x) { err = of_property_read_u32(client->dev.of_node, "temperature-stability", &stability); if (err) { dev_err(&client->dev, "'temperature-stability' property missing\n"); return err; } /* adjust register offsets for 7ppm devices */ if (stability == 7) data->div_offset = SI570_DIV_OFFSET_7PPM; data->max_freq = SI570_MAX_FREQ; } else { data->max_freq = SI598_MAX_FREQ; } if (of_property_read_string(client->dev.of_node, "clock-output-names", &init.name)) init.name = client->dev.of_node->name; err = of_property_read_u32(client->dev.of_node, "factory-fout", &factory_fout); if (err) { dev_err(&client->dev, "'factory-fout' property missing\n"); return err; } data->regmap = devm_regmap_init_i2c(client, &si570_regmap_config); if (IS_ERR(data->regmap)) { dev_err(&client->dev, "failed to allocate register map\n"); return PTR_ERR(data->regmap); } i2c_set_clientdata(client, data); err = si570_get_defaults(data, factory_fout); if (err) return err; clk = devm_clk_register(&client->dev, &data->hw); if (IS_ERR(clk)) { dev_err(&client->dev, "clock registration failed\n"); return PTR_ERR(clk); } err = of_clk_add_provider(client->dev.of_node, of_clk_src_simple_get, clk); if (err) { dev_err(&client->dev, "unable to add clk provider\n"); return err; } /* Read the requested initial output frequency from device tree */ if (!of_property_read_u32(client->dev.of_node, "clock-frequency", &initial_fout)) { err = clk_set_rate(clk, initial_fout); if (err) { of_clk_del_provider(client->dev.of_node); return err; } } /* Display a message indicating that we've successfully registered */ dev_info(&client->dev, "registered, current frequency %llu Hz\n", data->frequency); return 0; } static int si570_remove(struct i2c_client *client) { of_clk_del_provider(client->dev.of_node); return 0; } static const struct i2c_device_id si570_id[] = { { "si570", si57x }, { "si571", si57x }, { "si598", si59x }, { "si599", si59x }, { } }; MODULE_DEVICE_TABLE(i2c, si570_id); static const struct of_device_id clk_si570_of_match[] = { { .compatible = "silabs,si570" }, { .compatible = "silabs,si571" }, { .compatible = "silabs,si598" }, { .compatible = "silabs,si599" }, { }, }; MODULE_DEVICE_TABLE(of, clk_si570_of_match); static struct i2c_driver si570_driver = { .driver = { .name = "si570", .of_match_table = clk_si570_of_match, }, .probe = si570_probe, .remove = si570_remove, .id_table = si570_id, }; module_i2c_driver(si570_driver); MODULE_AUTHOR("Guenter Roeck <guenter.roeck@ericsson.com>"); MODULE_AUTHOR("Soeren Brinkmann <soren.brinkmann@xilinx.com"); MODULE_DESCRIPTION("Si570 driver"); MODULE_LICENSE("GPL");
gpl-2.0
srfarias/cexstel-kernel
drivers/media/video/msm_vidc/msm_venc.c
288
36982
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/slab.h> #include "msm_vidc_internal.h" #include "msm_vidc_common.h" #include "vidc_hal_api.h" #include "msm_smem.h" #define MSM_VENC_DVC_NAME "msm_venc_8974" #define DEFAULT_HEIGHT 720 #define DEFAULT_WIDTH 1280 #define MIN_NUM_OUTPUT_BUFFERS 2 #define MAX_NUM_OUTPUT_BUFFERS 8 #define MIN_BIT_RATE 64 #define MAX_BIT_RATE 20000 #define DEFAULT_BIT_RATE 64 #define BIT_RATE_STEP 1 #define MIN_FRAME_RATE 1 #define MAX_FRAME_RATE 120 #define DEFAULT_FRAME_RATE 30 #define MAX_SLICE_BYTE_SIZE 1024 #define MIN_SLICE_BYTE_SIZE 1024 #define MAX_SLICE_MB_SIZE 300 #define I_FRAME_QP 26 #define P_FRAME_QP 28 #define B_FRAME_QP 30 #define MAX_INTRA_REFRESH_MBS 300 #define L_MODE V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED_AT_SLICE_BOUNDARY #define CODING V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_CODING_EFFICIENCY static const char *const mpeg_video_rate_control[] = { "No Rate Control", "VBR VFR", "VBR CFR", "CBR VFR", "CBR CFR", NULL }; static const char *const mpeg_video_rotation[] = { "No Rotation", "90 Degree Rotation", "180 Degree Rotation", "270 Degree Rotation", NULL }; static const char *const h264_video_entropy_cabac_model[] = { "Model 0", "Model 1", "Model 2", NULL }; static const struct msm_vidc_ctrl msm_venc_ctrls[] = { { .id = V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE, .name = "Frame Rate", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = MIN_FRAME_RATE, .maximum = MAX_FRAME_RATE, .default_value = DEFAULT_FRAME_RATE, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD, .name = "IDR Period", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = 10*MAX_FRAME_RATE, .default_value = DEFAULT_FRAME_RATE, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES, .name = "Intra Period for P frames", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = 10*DEFAULT_FRAME_RATE, .default_value = 2*DEFAULT_FRAME_RATE-1, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES, .name = "Intra Period for B frames", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = 10*DEFAULT_FRAME_RATE, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME, .name = "Request I Frame", .type = V4L2_CTRL_TYPE_BOOLEAN, .minimum = 0, .maximum = 1, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL, .name = "Rate Control", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF, .maximum = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR, .default_value = V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF, .step = 0, .menu_skip_mask = ~( (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_OFF) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_VFR) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_VBR_CFR) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_VFR) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL_CBR_CFR) ), .qmenu = mpeg_video_rate_control, }, { .id = V4L2_CID_MPEG_VIDEO_BITRATE, .name = "Bit Rate", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = MIN_BIT_RATE, .maximum = MAX_BIT_RATE, .default_value = DEFAULT_BIT_RATE, .step = BIT_RATE_STEP, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE, .name = "Entropy Mode", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC, .maximum = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC, .default_value = V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC, .step = 0, .menu_skip_mask = ~( (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CAVLC) | (1 << V4L2_MPEG_VIDEO_H264_ENTROPY_MODE_CABAC) ), }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL, .name = "CABAC Model", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0, .maximum = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1, .default_value = V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0, .step = 0, .menu_skip_mask = ~( (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_0) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_1) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL_2) ), .qmenu = h264_video_entropy_cabac_model, }, { .id = V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE, .name = "MPEG4 Profile", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, .maximum = CODING, .default_value = V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE, .step = 1, .menu_skip_mask = 0, }, { .id = V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL, .name = "MPEG4 Level", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0, .maximum = V4L2_MPEG_VIDEO_MPEG4_LEVEL_5, .default_value = V4L2_MPEG_VIDEO_MPEG4_LEVEL_0, .step = 1, .menu_skip_mask = 0, }, { .id = V4L2_CID_MPEG_VIDEO_H264_PROFILE, .name = "H264 Profile", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, .maximum = V4L2_MPEG_VIDEO_H264_PROFILE_MULTIVIEW_HIGH, .default_value = V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE, .step = 1, .menu_skip_mask = 0, }, { .id = V4L2_CID_MPEG_VIDEO_H264_LEVEL, .name = "H264 Level", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, .maximum = V4L2_MPEG_VIDEO_H264_LEVEL_5_1, .default_value = V4L2_MPEG_VIDEO_H264_LEVEL_1_0, .step = 1, .menu_skip_mask = 0, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION, .name = "Rotation", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE, .maximum = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270, .default_value = V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE, .step = 0, .menu_skip_mask = ~( (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_NONE) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_180) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270) ), .qmenu = mpeg_video_rotation, }, { .id = V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP, .name = "I Frame Quantization", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 1, .maximum = 51, .default_value = I_FRAME_QP, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP, .name = "P Frame Quantization", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 1, .maximum = 51, .default_value = P_FRAME_QP, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP, .name = "B Frame Quantization", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 1, .maximum = 51, .default_value = B_FRAME_QP, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE, .name = "Slice Mode", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, .maximum = V4L2_MPEG_VIDEO_MULTI_SICE_MODE_MAX_BYTES, .default_value = V4L2_MPEG_VIDEO_MULTI_SLICE_MODE_SINGLE, .step = 1, .menu_skip_mask = 0, }, { .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES, .name = "Slice Byte Size", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = MIN_SLICE_BYTE_SIZE, .maximum = MAX_SLICE_BYTE_SIZE, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB, .name = "Slice MB Size", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 1, .maximum = MAX_SLICE_MB_SIZE, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE, .name = "Intra Refresh Mode", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE, .maximum = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM, .default_value = V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE, .step = 0, .menu_skip_mask = ~( (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_NONE) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_ADAPTIVE) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_CYCLIC_ADAPTIVE) | (1 << V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_RANDOM) ), }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS, .name = "Intra Refresh AIR MBS", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = MAX_INTRA_REFRESH_MBS, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF, .name = "Intra Refresh AIR REF", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = MAX_INTRA_REFRESH_MBS, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS, .name = "Intra Refresh CIR MBS", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = 0, .maximum = MAX_INTRA_REFRESH_MBS, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA, .name = "H.264 Loop Filter Alpha Offset", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = -6, .maximum = 6, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA, .name = "H.264 Loop Filter Beta Offset", .type = V4L2_CTRL_TYPE_INTEGER, .minimum = -6, .maximum = 6, .default_value = 0, .step = 1, .menu_skip_mask = 0, .qmenu = NULL, }, { .id = V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE, .name = "H.264 Loop Filter Mode", .type = V4L2_CTRL_TYPE_MENU, .minimum = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED, .maximum = L_MODE, .default_value = V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED, .step = 1, .menu_skip_mask = ~( (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_ENABLED) | (1 << V4L2_MPEG_VIDEO_H264_LOOP_FILTER_MODE_DISABLED) | (1 << L_MODE) ), }, }; #define NUM_CTRLS ARRAY_SIZE(msm_venc_ctrls) static u32 get_frame_size_nv12(int plane, u32 height, u32 width) { return ((height + 31) & (~31)) * ((width + 31) & (~31)) * 3/2; } static u32 get_frame_size_nv21(int plane, u32 height, u32 width) { return height * width * 2; } static u32 get_frame_size_compressed(int plane, u32 height, u32 width) { int sz = ((height + 31) & (~31)) * ((width + 31) & (~31)) * 3/2; sz = (sz + 4095) & (~4095); return sz; } static struct hal_quantization venc_quantization = {I_FRAME_QP, P_FRAME_QP, B_FRAME_QP}; static struct hal_intra_period venc_intra_period = {2*DEFAULT_FRAME_RATE-1 , 0}; static struct hal_profile_level venc_h264_profile_level = {HAL_H264_PROFILE_BASELINE, HAL_H264_LEVEL_1}; static struct hal_profile_level venc_mpeg4_profile_level = {HAL_H264_PROFILE_BASELINE, HAL_H264_LEVEL_1}; static struct hal_h264_entropy_control venc_h264_entropy_control = {HAL_H264_ENTROPY_CAVLC, HAL_H264_CABAC_MODEL_0}; static struct hal_multi_slice_control venc_multi_slice_control = {HAL_MULTI_SLICE_OFF , 0}; static const struct msm_vidc_format venc_formats[] = { { .name = "YCbCr Semiplanar 4:2:0", .description = "Y/CbCr 4:2:0", .fourcc = V4L2_PIX_FMT_NV12, .num_planes = 1, .get_frame_size = get_frame_size_nv12, .type = OUTPUT_PORT, }, { .name = "Mpeg4", .description = "Mpeg4 compressed format", .fourcc = V4L2_PIX_FMT_MPEG4, .num_planes = 1, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, }, { .name = "H263", .description = "H263 compressed format", .fourcc = V4L2_PIX_FMT_H263, .num_planes = 1, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, }, { .name = "H264", .description = "H264 compressed format", .fourcc = V4L2_PIX_FMT_H264, .num_planes = 1, .get_frame_size = get_frame_size_compressed, .type = CAPTURE_PORT, }, { .name = "YCrCb Semiplanar 4:2:0", .description = "Y/CrCb 4:2:0", .fourcc = V4L2_PIX_FMT_NV21, .num_planes = 1, .get_frame_size = get_frame_size_nv21, .type = OUTPUT_PORT, }, }; static int msm_venc_queue_setup(struct vb2_queue *q, const struct v4l2_format *fmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { int i, rc = 0; struct msm_vidc_inst *inst; struct hal_frame_size frame_sz; unsigned long flags; if (!q || !q->drv_priv) { pr_err("Invalid input, q = %p\n", q); return -EINVAL; } inst = q->drv_priv; switch (q->type) { case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: *num_planes = 1; if (*num_buffers < MIN_NUM_OUTPUT_BUFFERS || *num_buffers > MAX_NUM_OUTPUT_BUFFERS) *num_buffers = MIN_NUM_OUTPUT_BUFFERS; for (i = 0; i < *num_planes; i++) { sizes[i] = inst->fmts[OUTPUT_PORT]->get_frame_size( i, inst->height, inst->width); } break; case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); if (rc) { pr_err("Failed to open instance\n"); break; } frame_sz.buffer_type = HAL_BUFFER_INPUT; frame_sz.width = inst->width; frame_sz.height = inst->height; pr_debug("width = %d, height = %d\n", frame_sz.width, frame_sz.height); rc = vidc_hal_session_set_property((void *)inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz); if (rc) { pr_err("Failed to set framesize for Output port\n"); break; } frame_sz.buffer_type = HAL_BUFFER_OUTPUT; rc = vidc_hal_session_set_property((void *)inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz); if (rc) { pr_err("Failed to set framesize for Capture port\n"); break; } rc = msm_comm_try_get_bufreqs(inst); if (rc) { pr_err("Failed to get buffer requirements: %d\n", rc); break; } *num_planes = 1; spin_lock_irqsave(&inst->lock, flags); *num_buffers = inst->buff_req.buffer[0].buffer_count_actual; spin_unlock_irqrestore(&inst->lock, flags); pr_debug("size = %d, alignment = %d, count = %d\n", inst->buff_req.buffer[0].buffer_size, inst->buff_req.buffer[0].buffer_alignment, inst->buff_req.buffer[0].buffer_count_actual); for (i = 0; i < *num_planes; i++) { sizes[i] = inst->fmts[CAPTURE_PORT]->get_frame_size( i, inst->height, inst->width); } break; default: pr_err("Invalid q type = %d\n", q->type); rc = -EINVAL; break; } return rc; } static inline int start_streaming(struct msm_vidc_inst *inst) { int rc = 0; unsigned long flags; struct vb2_buf_entry *temp; struct list_head *ptr, *next; rc = msm_comm_set_scratch_buffers(inst); if (rc) { pr_err("Failed to set scratch buffers: %d\n", rc); goto fail_start; } rc = msm_comm_try_state(inst, MSM_VIDC_START_DONE); if (rc) { pr_err("Failed to move inst: %p to start done state\n", inst); goto fail_start; } spin_lock_irqsave(&inst->lock, flags); if (!list_empty(&inst->pendingq)) { list_for_each_safe(ptr, next, &inst->pendingq) { temp = list_entry(ptr, struct vb2_buf_entry, list); rc = msm_comm_qbuf(temp->vb); if (rc) { pr_err("Failed to qbuf to hardware\n"); break; } list_del(&temp->list); kfree(temp); } } spin_unlock_irqrestore(&inst->lock, flags); return rc; fail_start: return rc; } static int msm_venc_start_streaming(struct vb2_queue *q, unsigned int count) { struct msm_vidc_inst *inst; int rc = 0; if (!q || !q->drv_priv) { pr_err("Invalid input, q = %p\n", q); return -EINVAL; } inst = q->drv_priv; pr_debug("Streamon called on: %d capability\n", q->type); switch (q->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: if (inst->vb2_bufq[CAPTURE_PORT].streaming) rc = start_streaming(inst); break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: if (inst->vb2_bufq[OUTPUT_PORT].streaming) rc = start_streaming(inst); break; default: pr_err("Q-type is not supported: %d\n", q->type); rc = -EINVAL; break; } return rc; } static int msm_venc_stop_streaming(struct vb2_queue *q) { struct msm_vidc_inst *inst; int rc = 0; if (!q || !q->drv_priv) { pr_err("Invalid input, q = %p\n", q); return -EINVAL; } inst = q->drv_priv; pr_debug("Streamoff called on: %d capability\n", q->type); switch (q->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: rc = msm_comm_try_state(inst, MSM_VIDC_CLOSE_DONE); break; default: pr_err("Q-type is not supported: %d\n", q->type); rc = -EINVAL; break; } if (rc) pr_err("Failed to move inst: %p, cap = %d to state: %d\n", inst, q->type, MSM_VIDC_CLOSE_DONE); return rc; } static void msm_venc_buf_queue(struct vb2_buffer *vb) { int rc; rc = msm_comm_qbuf(vb); if (rc) pr_err("Failed to queue buffer: %d\n", rc); } static const struct vb2_ops msm_venc_vb2q_ops = { .queue_setup = msm_venc_queue_setup, .start_streaming = msm_venc_start_streaming, .buf_queue = msm_venc_buf_queue, .stop_streaming = msm_venc_stop_streaming, }; const struct vb2_ops *msm_venc_get_vb2q_ops(void) { return &msm_venc_vb2q_ops; } static int msm_venc_op_s_ctrl(struct v4l2_ctrl *ctrl) { int rc = 0; struct v4l2_control control; struct hal_frame_rate frame_rate; struct hal_request_iframe request_iframe; struct hal_bitrate bitrate; struct hal_profile_level profile_level; struct hal_h264_entropy_control h264_entropy_control; struct hal_quantization quantization; struct hal_intra_period intra_period; struct hal_idr_period idr_period; struct hal_operations operations; struct hal_intra_refresh intra_refresh; struct hal_multi_slice_control multi_slice_control; struct hal_h264_db_control h264_db_control; u32 control_idx = 0; u32 property_id = 0; u32 property_val = 0; void *pdata; struct msm_vidc_inst *inst = container_of(ctrl->handler, struct msm_vidc_inst, ctrl_handler); control.id = ctrl->id; control.value = ctrl->val; switch (control.id) { case V4L2_CID_MPEG_VIDC_VIDEO_FRAME_RATE: property_id = HAL_CONFIG_FRAME_RATE; frame_rate.frame_rate = control.value; frame_rate.buffer_type = HAL_BUFFER_OUTPUT; pdata = &frame_rate; break; case V4L2_CID_MPEG_VIDC_VIDEO_IDR_PERIOD: property_id = HAL_CONFIG_VENC_IDR_PERIOD; idr_period.idr_period = control.value; pdata = &idr_period; break; case V4L2_CID_MPEG_VIDC_VIDEO_NUM_P_FRAMES: property_id = HAL_CONFIG_VENC_INTRA_PERIOD; intra_period.pframes = control.value; venc_intra_period.pframes = control.value; intra_period.bframes = venc_intra_period.bframes; pdata = &intra_period; break; case V4L2_CID_MPEG_VIDC_VIDEO_NUM_B_FRAMES: property_id = HAL_CONFIG_VENC_INTRA_PERIOD; intra_period.bframes = control.value; venc_intra_period.bframes = control.value; intra_period.pframes = venc_intra_period.pframes; pdata = &intra_period; break; case V4L2_CID_MPEG_VIDC_VIDEO_REQUEST_IFRAME: property_id = HAL_CONFIG_VENC_REQUEST_IFRAME; request_iframe.enable = control.value; pdata = &request_iframe; break; case V4L2_CID_MPEG_VIDC_VIDEO_RATE_CONTROL: property_id = HAL_PARAM_VENC_RATE_CONTROL; property_val = control.value; pdata = &property_val; break; case V4L2_CID_MPEG_VIDEO_BITRATE: property_id = HAL_CONFIG_VENC_TARGET_BITRATE; bitrate.bit_rate = control.value; pdata = &bitrate; break; case V4L2_CID_MPEG_VIDEO_H264_ENTROPY_MODE: property_id = HAL_PARAM_VENC_H264_ENTROPY_CONTROL; h264_entropy_control.entropy_mode = control.value; venc_h264_entropy_control.entropy_mode = control.value; h264_entropy_control.cabac_model = venc_h264_entropy_control.cabac_model; pdata = &h264_entropy_control; break; case V4L2_CID_MPEG_VIDC_VIDEO_H264_CABAC_MODEL: property_id = HAL_PARAM_VENC_H264_ENTROPY_CONTROL; h264_entropy_control.cabac_model = control.value; venc_h264_entropy_control.cabac_model = control.value; h264_entropy_control.entropy_mode = venc_h264_entropy_control.entropy_mode; pdata = &h264_entropy_control; break; case V4L2_CID_MPEG_VIDEO_MPEG4_PROFILE: property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT; switch (control.value) { case V4L2_MPEG_VIDEO_MPEG4_PROFILE_SIMPLE: control.value = HAL_MPEG4_PROFILE_SIMPLE; break; case V4L2_MPEG_VIDEO_MPEG4_PROFILE_ADVANCED_SIMPLE: control.value = HAL_MPEG4_PROFILE_ADVANCEDSIMPLE; break; default: break; } profile_level.profile = control.value; venc_mpeg4_profile_level.profile = control.value; profile_level.level = venc_mpeg4_profile_level.level; pdata = &profile_level; break; case V4L2_CID_MPEG_VIDEO_MPEG4_LEVEL: property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT; switch (control.value) { case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0: control.value = HAL_MPEG4_LEVEL_0; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_0B: control.value = HAL_MPEG4_LEVEL_0b; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_1: control.value = HAL_MPEG4_LEVEL_1; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_2: control.value = HAL_MPEG4_LEVEL_2; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_3: control.value = HAL_MPEG4_LEVEL_3; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_4: control.value = HAL_MPEG4_LEVEL_4; break; case V4L2_MPEG_VIDEO_MPEG4_LEVEL_5: control.value = HAL_MPEG4_LEVEL_5; break; default: break; } profile_level.level = control.value; venc_mpeg4_profile_level.level = control.value; profile_level.profile = venc_mpeg4_profile_level.profile; pdata = &profile_level; break; case V4L2_CID_MPEG_VIDEO_H264_PROFILE: property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT; switch (control.value) { case V4L2_MPEG_VIDEO_H264_PROFILE_BASELINE: control.value = HAL_H264_PROFILE_BASELINE; break; case V4L2_MPEG_VIDEO_H264_PROFILE_MAIN: control.value = HAL_H264_PROFILE_MAIN; break; case V4L2_MPEG_VIDEO_H264_PROFILE_EXTENDED: control.value = HAL_H264_PROFILE_EXTENDED; break; case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH: control.value = HAL_H264_PROFILE_HIGH; break; case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_10: control.value = HAL_H264_PROFILE_HIGH10; break; case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_422: control.value = HAL_H264_PROFILE_HIGH422; break; case V4L2_MPEG_VIDEO_H264_PROFILE_HIGH_444_PREDICTIVE: control.value = HAL_H264_PROFILE_HIGH444; break; default: break; } profile_level.profile = control.value; venc_h264_profile_level.profile = control.value; profile_level.level = venc_h264_profile_level.level; pdata = &profile_level; pr_debug("\nprofile: %d\n", profile_level.profile); break; case V4L2_CID_MPEG_VIDEO_H264_LEVEL: property_id = HAL_PARAM_PROFILE_LEVEL_CURRENT; switch (control.value) { case V4L2_MPEG_VIDEO_H264_LEVEL_1_0: control.value = HAL_H264_LEVEL_1; break; case V4L2_MPEG_VIDEO_H264_LEVEL_1B: control.value = HAL_H264_LEVEL_1b; break; case V4L2_MPEG_VIDEO_H264_LEVEL_1_1: control.value = HAL_H264_LEVEL_11; break; case V4L2_MPEG_VIDEO_H264_LEVEL_1_2: control.value = HAL_H264_LEVEL_12; break; case V4L2_MPEG_VIDEO_H264_LEVEL_1_3: control.value = HAL_H264_LEVEL_13; break; case V4L2_MPEG_VIDEO_H264_LEVEL_2_0: control.value = HAL_H264_LEVEL_2; break; case V4L2_MPEG_VIDEO_H264_LEVEL_2_1: control.value = HAL_H264_LEVEL_21; break; case V4L2_MPEG_VIDEO_H264_LEVEL_2_2: control.value = HAL_H264_LEVEL_22; break; case V4L2_MPEG_VIDEO_H264_LEVEL_3_0: control.value = HAL_H264_LEVEL_3; break; case V4L2_MPEG_VIDEO_H264_LEVEL_3_1: control.value = HAL_H264_LEVEL_31; break; case V4L2_MPEG_VIDEO_H264_LEVEL_3_2: control.value = HAL_H264_LEVEL_32; break; case V4L2_MPEG_VIDEO_H264_LEVEL_4_0: control.value = HAL_H264_LEVEL_4; break; case V4L2_MPEG_VIDEO_H264_LEVEL_4_1: control.value = HAL_H264_LEVEL_41; break; case V4L2_MPEG_VIDEO_H264_LEVEL_4_2: control.value = HAL_H264_LEVEL_42; break; case V4L2_MPEG_VIDEO_H264_LEVEL_5_0: control.value = HAL_H264_LEVEL_3; break; case V4L2_MPEG_VIDEO_H264_LEVEL_5_1: control.value = HAL_H264_LEVEL_51; break; default: break; } profile_level.level = control.value; venc_h264_profile_level.level = control.value; profile_level.profile = venc_h264_profile_level.profile; pdata = &profile_level; pdata = &profile_level; pr_debug("\nLevel: %d\n", profile_level.level); break; case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION: property_id = HAL_CONFIG_VPE_OPERATIONS; operations.rotate = control.value; pdata = &operations; break; case V4L2_CID_MPEG_VIDEO_H264_I_FRAME_QP: property_id = HAL_PARAM_VENC_SESSION_QP; quantization.qpi = control.value; venc_quantization.qpi = control.value; quantization.qpp = venc_quantization.qpp; quantization.qpb = venc_quantization.qpb; pdata = &quantization; break; case V4L2_CID_MPEG_VIDEO_H264_P_FRAME_QP: property_id = HAL_PARAM_VENC_SESSION_QP; quantization.qpp = control.value; venc_quantization.qpp = control.value; quantization.qpi = venc_quantization.qpi; quantization.qpb = venc_quantization.qpb; pdata = &quantization; break; case V4L2_CID_MPEG_VIDEO_H264_B_FRAME_QP: property_id = HAL_PARAM_VENC_SESSION_QP; quantization.qpb = control.value; venc_quantization.qpb = control.value; quantization.qpi = venc_quantization.qpi; quantization.qpp = venc_quantization.qpp; pdata = &quantization; break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: property_id = HAL_PARAM_VENC_MULTI_SLICE_CONTROL; multi_slice_control.multi_slice = control.value; venc_multi_slice_control.multi_slice = control.value; multi_slice_control.slice_size = venc_multi_slice_control.slice_size; pdata = &multi_slice_control; break; case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES: case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_MB: property_id = HAL_PARAM_VENC_MULTI_SLICE_CONTROL; multi_slice_control.multi_slice = venc_multi_slice_control.multi_slice; multi_slice_control.slice_size = control.value; venc_multi_slice_control.slice_size = control.value; pdata = &multi_slice_control; break; case V4L2_CID_MPEG_VIDC_VIDEO_INTRA_REFRESH_MODE: property_id = HAL_PARAM_VENC_INTRA_REFRESH; intra_refresh.mode = control.value; pdata = &intra_refresh; break; case V4L2_CID_MPEG_VIDC_VIDEO_AIR_MBS: property_id = HAL_PARAM_VENC_INTRA_REFRESH; intra_refresh.air_mbs = control.value; pdata = &intra_refresh; break; case V4L2_CID_MPEG_VIDC_VIDEO_AIR_REF: property_id = HAL_PARAM_VENC_INTRA_REFRESH; intra_refresh.air_ref = control.value; pdata = &intra_refresh; break; case V4L2_CID_MPEG_VIDC_VIDEO_CIR_MBS: property_id = HAL_PARAM_VENC_INTRA_REFRESH; intra_refresh.cir_mbs = control.value; pdata = &intra_refresh; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_MODE: property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL; h264_db_control.mode = control.value; pdata = &h264_db_control; break; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_ALPHA: property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL; h264_db_control.slice_alpha_offset = control.value; pdata = &h264_db_control; case V4L2_CID_MPEG_VIDEO_H264_LOOP_FILTER_BETA: property_id = HAL_PARAM_VENC_H264_DEBLOCK_CONTROL; h264_db_control.slice_beta_offset = control.value; pdata = &h264_db_control; default: break; } if (property_id) { pr_debug("Control: HAL property=%d,ctrl_id=%d,ctrl_value=%d\n", property_id, msm_venc_ctrls[control_idx].id, control.value); rc = vidc_hal_session_set_property((void *)inst->session, property_id, pdata); } if (rc) pr_err("Failed to set hal property for framesize\n"); return rc; } static int msm_venc_op_g_volatile_ctrl(struct v4l2_ctrl *ctrl) { return 0; } static const struct v4l2_ctrl_ops msm_venc_ctrl_ops = { .s_ctrl = msm_venc_op_s_ctrl, .g_volatile_ctrl = msm_venc_op_g_volatile_ctrl, }; const struct v4l2_ctrl_ops *msm_venc_get_ctrl_ops(void) { return &msm_venc_ctrl_ops; } int msm_venc_inst_init(struct msm_vidc_inst *inst) { int rc = 0; if (!inst) { pr_err("Invalid input = %p\n", inst); return -EINVAL; } inst->fmts[CAPTURE_PORT] = &venc_formats[1]; inst->fmts[OUTPUT_PORT] = &venc_formats[0]; inst->height = DEFAULT_HEIGHT; inst->width = DEFAULT_WIDTH; return rc; } int msm_venc_s_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl) { return v4l2_s_ctrl(NULL, &inst->ctrl_handler, ctrl); } int msm_venc_g_ctrl(struct msm_vidc_inst *inst, struct v4l2_control *ctrl) { return v4l2_g_ctrl(&inst->ctrl_handler, ctrl); } int msm_venc_querycap(struct msm_vidc_inst *inst, struct v4l2_capability *cap) { if (!inst || !cap) { pr_err("Invalid input, inst = %p, cap = %p\n", inst, cap); return -EINVAL; } strlcpy(cap->driver, MSM_VIDC_DRV_NAME, sizeof(cap->driver)); strlcpy(cap->card, MSM_VENC_DVC_NAME, sizeof(cap->card)); cap->bus_info[0] = 0; cap->version = MSM_VIDC_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE_MPLANE | V4L2_CAP_VIDEO_OUTPUT_MPLANE | V4L2_CAP_STREAMING; memset(cap->reserved, 0, sizeof(cap->reserved)); return 0; } int msm_venc_enum_fmt(struct msm_vidc_inst *inst, struct v4l2_fmtdesc *f) { const struct msm_vidc_format *fmt = NULL; int rc = 0; if (!inst || !f) { pr_err("Invalid input, inst = %p, f = %p\n", inst, f); return -EINVAL; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { fmt = msm_comm_get_pixel_fmt_index(venc_formats, ARRAY_SIZE(venc_formats), f->index, CAPTURE_PORT); } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { fmt = msm_comm_get_pixel_fmt_index(venc_formats, ARRAY_SIZE(venc_formats), f->index, OUTPUT_PORT); f->flags = V4L2_FMT_FLAG_COMPRESSED; } memset(f->reserved, 0 , sizeof(f->reserved)); if (fmt) { strlcpy(f->description, fmt->description, sizeof(f->description)); f->pixelformat = fmt->fourcc; } else { pr_err("No more formats found\n"); rc = -EINVAL; } return rc; } int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) { const struct msm_vidc_format *fmt = NULL; int rc = 0; int i; if (!inst || !f) { pr_err("Invalid input, inst = %p, format = %p\n", inst, f); return -EINVAL; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats, ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat, CAPTURE_PORT); if (fmt && fmt->type != CAPTURE_PORT) { pr_err("Format: %d not supported on CAPTURE port\n", f->fmt.pix_mp.pixelformat); rc = -EINVAL; goto exit; } } else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) { inst->width = f->fmt.pix_mp.width; inst->height = f->fmt.pix_mp.height; fmt = msm_comm_get_pixel_fmt_fourcc(venc_formats, ARRAY_SIZE(venc_formats), f->fmt.pix_mp.pixelformat, OUTPUT_PORT); if (fmt && fmt->type != OUTPUT_PORT) { pr_err("Format: %d not supported on OUTPUT port\n", f->fmt.pix_mp.pixelformat); rc = -EINVAL; goto exit; } } if (fmt) { for (i = 0; i < fmt->num_planes; ++i) { f->fmt.pix_mp.plane_fmt[i].sizeimage = fmt->get_frame_size(i, f->fmt.pix_mp.height, f->fmt.pix_mp.width); } inst->fmts[fmt->type] = fmt; if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) { rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE); if (rc) { pr_err("Failed to open instance\n"); goto exit; } } } else { pr_err("Buf type not recognized, type = %d\n", f->type); rc = -EINVAL; } exit: return rc; } int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f) { const struct msm_vidc_format *fmt = NULL; int rc = 0; int i; if (!inst || !f) { pr_err("Invalid input, inst = %p, format = %p\n", inst, f); return -EINVAL; } if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) fmt = inst->fmts[CAPTURE_PORT]; else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) fmt = inst->fmts[OUTPUT_PORT]; if (fmt) { f->fmt.pix_mp.pixelformat = fmt->fourcc; f->fmt.pix_mp.height = inst->height; f->fmt.pix_mp.width = inst->width; for (i = 0; i < fmt->num_planes; ++i) { f->fmt.pix_mp.plane_fmt[i].sizeimage = fmt->get_frame_size(i, inst->height, inst->width); } } else { pr_err("Buf type not recognized, type = %d\n", f->type); rc = -EINVAL; } return rc; } int msm_venc_reqbufs(struct msm_vidc_inst *inst, struct v4l2_requestbuffers *b) { struct vb2_queue *q = NULL; int rc = 0; if (!inst || !b) { pr_err("Invalid input, inst = %p, buffer = %p\n", inst, b); return -EINVAL; } q = msm_comm_get_vb2q(inst, b->type); if (!q) { pr_err("Failed to find buffer queue for type = %d\n", b->type); return -EINVAL; } rc = vb2_reqbufs(q, b); if (rc) pr_err("Failed to get reqbufs, %d\n", rc); return rc; } int msm_venc_prepare_buf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { int rc = 0; int i; struct vidc_buffer_addr_info buffer_info; switch (b->type) { case V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE: break; case V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE: for (i = 0; i < b->length; i++) { pr_debug("device_addr = %ld, size = %d\n", b->m.planes[i].m.userptr, b->m.planes[i].length); buffer_info.buffer_size = b->m.planes[i].length; buffer_info.buffer_type = HAL_BUFFER_OUTPUT; buffer_info.num_buffers = 1; buffer_info.align_device_addr = b->m.planes[i].m.userptr; buffer_info.extradata_size = 0; buffer_info.extradata_addr = 0; rc = vidc_hal_session_set_buffers((void *)inst->session, &buffer_info); if (rc) pr_err("vidc_hal_session_set_buffers failed"); } break; default: pr_err("Buffer type not recognized: %d\n", b->type); break; } return rc; } int msm_venc_qbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { struct vb2_queue *q = NULL; int rc = 0; q = msm_comm_get_vb2q(inst, b->type); if (!q) { pr_err("Failed to find buffer queue for type = %d\n", b->type); return -EINVAL; } rc = vb2_qbuf(q, b); if (rc) pr_err("Failed to qbuf, %d\n", rc); return rc; } int msm_venc_dqbuf(struct msm_vidc_inst *inst, struct v4l2_buffer *b) { struct vb2_queue *q = NULL; int rc = 0; q = msm_comm_get_vb2q(inst, b->type); if (!q) { pr_err("Failed to find buffer queue for type = %d\n", b->type); return -EINVAL; } rc = vb2_dqbuf(q, b, true); if (rc) pr_err("Failed to dqbuf, %d\n", rc); return rc; } int msm_venc_streamon(struct msm_vidc_inst *inst, enum v4l2_buf_type i) { int rc = 0; struct vb2_queue *q; q = msm_comm_get_vb2q(inst, i); if (!q) { pr_err("Failed to find buffer queue for type = %d\n", i); return -EINVAL; } pr_debug("Calling streamon\n"); rc = vb2_streamon(q, i); if (rc) pr_err("streamon failed on port: %d\n", i); return rc; } int msm_venc_streamoff(struct msm_vidc_inst *inst, enum v4l2_buf_type i) { int rc = 0; struct vb2_queue *q; q = msm_comm_get_vb2q(inst, i); if (!q) { pr_err("Failed to find buffer queue for type = %d\n", i); return -EINVAL; } pr_debug("Calling streamoff\n"); rc = vb2_streamoff(q, i); if (rc) pr_err("streamoff failed on port: %d\n", i); return rc; } int msm_venc_ctrl_init(struct msm_vidc_inst *inst) { int idx = 0; struct v4l2_ctrl_config ctrl_cfg; int ret_val = 0; ret_val = v4l2_ctrl_handler_init(&inst->ctrl_handler, NUM_CTRLS); if (ret_val) { pr_err("CTRL ERR: Control handler init failed, %d\n", inst->ctrl_handler.error); return ret_val; } for (; idx < NUM_CTRLS; idx++) { if (IS_PRIV_CTRL(msm_venc_ctrls[idx].id)) { ctrl_cfg.def = msm_venc_ctrls[idx].default_value; ctrl_cfg.flags = 0; ctrl_cfg.id = msm_venc_ctrls[idx].id; ctrl_cfg.max = msm_venc_ctrls[idx].maximum; ctrl_cfg.min = msm_venc_ctrls[idx].minimum; ctrl_cfg.menu_skip_mask = msm_venc_ctrls[idx].menu_skip_mask; ctrl_cfg.name = msm_venc_ctrls[idx].name; ctrl_cfg.ops = &msm_venc_ctrl_ops; ctrl_cfg.step = msm_venc_ctrls[idx].step; ctrl_cfg.type = msm_venc_ctrls[idx].type; ctrl_cfg.qmenu = msm_venc_ctrls[idx].qmenu; v4l2_ctrl_new_custom(&inst->ctrl_handler, &ctrl_cfg, NULL); } else { if (msm_venc_ctrls[idx].type == V4L2_CTRL_TYPE_MENU) { v4l2_ctrl_new_std_menu(&inst->ctrl_handler, &msm_venc_ctrl_ops, msm_venc_ctrls[idx].id, msm_venc_ctrls[idx].maximum, msm_venc_ctrls[idx].menu_skip_mask, msm_venc_ctrls[idx].default_value); } else { v4l2_ctrl_new_std(&inst->ctrl_handler, &msm_venc_ctrl_ops, msm_venc_ctrls[idx].id, msm_venc_ctrls[idx].minimum, msm_venc_ctrls[idx].maximum, msm_venc_ctrls[idx].step, msm_venc_ctrls[idx].default_value); } } } ret_val = inst->ctrl_handler.error; if (ret_val) pr_err("CTRL ERR: Error adding ctrls to ctrl handle, %d\n", inst->ctrl_handler.error); return ret_val; }
gpl-2.0
gunine/boeffla-kernel-jb-lte
mm/swap.c
288
21421
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mm_inline.h> #include <linux/buffer_head.h> /* for try_to_release_page() */ #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); spin_unlock_irqrestore(&zone->lru_lock, flags); } } static void __put_single_page(struct page *page) { __page_cache_release(page); free_hot_cold_page(page, 0); } static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); } static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } } void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) __put_single_page(page); } EXPORT_SYMBOL(put_page); /* * This function is exported but must not be called by anything other * than get_page(). It implements the slow path of get_page(). */ bool __get_page_tail(struct page *page) { /* * This takes care of get_page() if run on a tail page * returned by one of the get_user_pages/follow_page variants. * get_user_pages/follow_page itself doesn't need the compound * lock because it runs __get_page_tail_foll() under the * proper PT lock that already serializes against * split_huge_page(). */ unsigned long flags; bool got = false; struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); got = true; } compound_unlock_irqrestore(page_head, flags); if (unlikely(!got)) put_page(page_head); } return got; } EXPORT_SYMBOL(__get_page_tail); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, void *arg), void *arg) { int i; struct zone *zone = NULL; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } (*move_fn)(page, arg); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); list_move_tail(&page->lru, &zone->lru[lru].list); mem_cgroup_rotate_reclaimable_page(page); (*pgmoved)++; } } /* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); } /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } } static void update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; struct zone_reclaim_stat *memcg_reclaim_stat; memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; if (!memcg_reclaim_stat) return; memcg_reclaim_stat->recent_scanned[file]++; if (rotated) memcg_reclaim_stat->recent_rotated[file]++; } static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); } void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } } #else static inline void activate_page_drain(int cpu) { } void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } #endif /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { #ifndef CONFIG_DMA_CMA struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) ____pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); #else struct pagevec *pvec; int is_cma; /* FIXME: too slow */ is_cma = is_cma_pageblock(page); pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page) || is_cma) ____pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); #endif } EXPORT_SYMBOL(__lru_cache_add); /** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); ClearPageActive(page); } else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); } VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); __lru_cache_add(page, lru); } /** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } /* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ list_move_tail(&page->lru, &zone->lru[lru].list); mem_cgroup_rotate_reclaimable_page(page); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); } /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ static void drain_cpu_pagevecs(int cpu) { struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); struct pagevec *pvec; int lru; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) ____pagevec_lru_add(pvec, lru); } pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); activate_page_drain(cpu); } /** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { drain_cpu_pagevecs(get_cpu()); put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { return schedule_on_each_cpu(lru_add_drain_per_cpu); } /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; struct pagevec pages_to_free; struct zone *zone = NULL; unsigned long uninitialized_var(flags); pagevec_init(&pages_to_free, cold); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru(zone, page); } if (!pagevec_add(&pages_to_free, page)) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } __pagevec_free(&pages_to_free); pagevec_reinit(&pages_to_free); } } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); pagevec_free(&pages_to_free); } EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_release); /* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int active; enum lru_list lru; const int file = 0; struct list_head *head; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } update_page_reclaim_stat(zone, page_tail, file, active); if (likely(PageLRU(page))) head = page->lru.prev; else head = &zone->lru[lru].list; __add_page_to_lru_list(zone, page_tail, lru, head); } else { SetPageUnevictable(page_tail); add_page_to_lru_list(zone, page_tail, LRU_UNEVICTABLE); } } static void ____pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); update_page_reclaim_stat(zone, page, file, active); add_page_to_lru_list(zone, page, lru); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, ____pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(____pagevec_lru_add); /* * Try to drop buffers from the pages in a pagevec */ void pagevec_strip(struct pagevec *pvec) { int i; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; if (page_has_private(page) && trylock_page(page)) { if (page_has_private(page)) try_to_release_page(page, 0); unlock_page(page); } } } /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup); unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup_tag); /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
gpl-2.0
jiezeng/git
diffcore-order.c
288
2492
/* * Copyright (C) 2005 Junio C Hamano */ #include "cache.h" #include "diff.h" #include "diffcore.h" static char **order; static int order_cnt; static void prepare_order(const char *orderfile) { int cnt, pass; struct strbuf sb = STRBUF_INIT; void *map; char *cp, *endp; ssize_t sz; if (order) return; sz = strbuf_read_file(&sb, orderfile, 0); if (sz < 0) die_errno(_("failed to read orderfile '%s'"), orderfile); map = strbuf_detach(&sb, NULL); endp = (char *) map + sz; for (pass = 0; pass < 2; pass++) { cnt = 0; cp = map; while (cp < endp) { char *ep; for (ep = cp; ep < endp && *ep != '\n'; ep++) ; /* cp to ep has one line */ if (*cp == '\n' || *cp == '#') ; /* comment */ else if (pass == 0) cnt++; else { if (*ep == '\n') { *ep = 0; order[cnt] = cp; } else { order[cnt] = xmemdupz(cp, ep - cp); } cnt++; } if (ep < endp) ep++; cp = ep; } if (pass == 0) { order_cnt = cnt; order = xmalloc(sizeof(*order) * cnt); } } } static int match_order(const char *path) { int i; static struct strbuf p = STRBUF_INIT; for (i = 0; i < order_cnt; i++) { strbuf_reset(&p); strbuf_addstr(&p, path); while (p.buf[0]) { char *cp; if (!wildmatch(order[i], p.buf, 0, NULL)) return i; cp = strrchr(p.buf, '/'); if (!cp) break; *cp = 0; } } return order_cnt; } static int compare_objs_order(const void *a_, const void *b_) { struct obj_order const *a, *b; a = (struct obj_order const *)a_; b = (struct obj_order const *)b_; if (a->order != b->order) return a->order - b->order; return a->orig_order - b->orig_order; } void order_objects(const char *orderfile, obj_path_fn_t obj_path, struct obj_order *objs, int nr) { int i; if (!nr) return; prepare_order(orderfile); for (i = 0; i < nr; i++) { objs[i].orig_order = i; objs[i].order = match_order(obj_path(objs[i].obj)); } qsort(objs, nr, sizeof(*objs), compare_objs_order); } static const char *pair_pathtwo(void *obj) { struct diff_filepair *pair = (struct diff_filepair *)obj; return pair->two->path; } void diffcore_order(const char *orderfile) { struct diff_queue_struct *q = &diff_queued_diff; struct obj_order *o; int i; if (!q->nr) return; o = xmalloc(sizeof(*o) * q->nr); for (i = 0; i < q->nr; i++) o[i].obj = q->queue[i]; order_objects(orderfile, pair_pathtwo, o, q->nr); for (i = 0; i < q->nr; i++) q->queue[i] = o[i].obj; free(o); return; }
gpl-2.0
yajnab/linux_raspberryPi
drivers/phy/phy-omap-usb2.c
288
9487
/* * omap-usb2.c - USB PHY, talking to musb controller in OMAP. * * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Author: Kishon Vijay Abraham I <kishon@ti.com> * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/io.h> #include <linux/phy/omap_usb.h> #include <linux/usb/phy_companion.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include <linux/phy/omap_control_phy.h> #include <linux/phy/phy.h> #include <linux/of_platform.h> #define USB2PHY_DISCON_BYP_LATCH (1 << 31) #define USB2PHY_ANA_CONFIG1 0x4c /** * omap_usb2_set_comparator - links the comparator present in the sytem with * this phy * @comparator - the companion phy(comparator) for this phy * * The phy companion driver should call this API passing the phy_companion * filled with set_vbus and start_srp to be used by usb phy. * * For use by phy companion driver */ int omap_usb2_set_comparator(struct phy_companion *comparator) { struct omap_usb *phy; struct usb_phy *x = usb_get_phy(USB_PHY_TYPE_USB2); if (IS_ERR(x)) return -ENODEV; phy = phy_to_omapusb(x); phy->comparator = comparator; return 0; } EXPORT_SYMBOL_GPL(omap_usb2_set_comparator); static int omap_usb_set_vbus(struct usb_otg *otg, bool enabled) { struct omap_usb *phy = phy_to_omapusb(otg->phy); if (!phy->comparator) return -ENODEV; return phy->comparator->set_vbus(phy->comparator, enabled); } static int omap_usb_start_srp(struct usb_otg *otg) { struct omap_usb *phy = phy_to_omapusb(otg->phy); if (!phy->comparator) return -ENODEV; return phy->comparator->start_srp(phy->comparator); } static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host) { struct usb_phy *phy = otg->phy; otg->host = host; if (!host) phy->state = OTG_STATE_UNDEFINED; return 0; } static int omap_usb_set_peripheral(struct usb_otg *otg, struct usb_gadget *gadget) { struct usb_phy *phy = otg->phy; otg->gadget = gadget; if (!gadget) phy->state = OTG_STATE_UNDEFINED; return 0; } static int omap_usb_power_off(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); omap_control_phy_power(phy->control_dev, 0); return 0; } static int omap_usb_power_on(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); omap_control_phy_power(phy->control_dev, 1); return 0; } static int omap_usb_init(struct phy *x) { struct omap_usb *phy = phy_get_drvdata(x); u32 val; if (phy->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { /* * * Reduce the sensitivity of internal PHY by enabling the * DISCON_BYP_LATCH of the USB2PHY_ANA_CONFIG1 register. This * resolves issues with certain devices which can otherwise * be prone to false disconnects. * */ val = omap_usb_readl(phy->phy_base, USB2PHY_ANA_CONFIG1); val |= USB2PHY_DISCON_BYP_LATCH; omap_usb_writel(phy->phy_base, USB2PHY_ANA_CONFIG1, val); } return 0; } static struct phy_ops ops = { .init = omap_usb_init, .power_on = omap_usb_power_on, .power_off = omap_usb_power_off, .owner = THIS_MODULE, }; #ifdef CONFIG_OF static const struct usb_phy_data omap_usb2_data = { .label = "omap_usb2", .flags = OMAP_USB2_HAS_START_SRP | OMAP_USB2_HAS_SET_VBUS, }; static const struct usb_phy_data omap5_usb2_data = { .label = "omap5_usb2", .flags = 0, }; static const struct usb_phy_data dra7x_usb2_data = { .label = "dra7x_usb2", .flags = OMAP_USB2_CALIBRATE_FALSE_DISCONNECT, }; static const struct usb_phy_data am437x_usb2_data = { .label = "am437x_usb2", .flags = 0, }; static const struct of_device_id omap_usb2_id_table[] = { { .compatible = "ti,omap-usb2", .data = &omap_usb2_data, }, { .compatible = "ti,omap5-usb2", .data = &omap5_usb2_data, }, { .compatible = "ti,dra7x-usb2", .data = &dra7x_usb2_data, }, { .compatible = "ti,am437x-usb2", .data = &am437x_usb2_data, }, {}, }; MODULE_DEVICE_TABLE(of, omap_usb2_id_table); #endif static int omap_usb2_probe(struct platform_device *pdev) { struct omap_usb *phy; struct phy *generic_phy; struct resource *res; struct phy_provider *phy_provider; struct usb_otg *otg; struct device_node *node = pdev->dev.of_node; struct device_node *control_node; struct platform_device *control_pdev; const struct of_device_id *of_id; struct usb_phy_data *phy_data; of_id = of_match_device(of_match_ptr(omap_usb2_id_table), &pdev->dev); if (!of_id) return -EINVAL; phy_data = (struct usb_phy_data *)of_id->data; phy = devm_kzalloc(&pdev->dev, sizeof(*phy), GFP_KERNEL); if (!phy) return -ENOMEM; otg = devm_kzalloc(&pdev->dev, sizeof(*otg), GFP_KERNEL); if (!otg) return -ENOMEM; phy->dev = &pdev->dev; phy->phy.dev = phy->dev; phy->phy.label = phy_data->label; phy->phy.otg = otg; phy->phy.type = USB_PHY_TYPE_USB2; if (phy_data->flags & OMAP_USB2_CALIBRATE_FALSE_DISCONNECT) { res = platform_get_resource(pdev, IORESOURCE_MEM, 0); phy->phy_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(phy->phy_base)) return PTR_ERR(phy->phy_base); phy->flags |= OMAP_USB2_CALIBRATE_FALSE_DISCONNECT; } control_node = of_parse_phandle(node, "ctrl-module", 0); if (!control_node) { dev_err(&pdev->dev, "Failed to get control device phandle\n"); return -EINVAL; } control_pdev = of_find_device_by_node(control_node); if (!control_pdev) { dev_err(&pdev->dev, "Failed to get control device\n"); return -EINVAL; } phy->control_dev = &control_pdev->dev; omap_control_phy_power(phy->control_dev, 0); otg->set_host = omap_usb_set_host; otg->set_peripheral = omap_usb_set_peripheral; if (phy_data->flags & OMAP_USB2_HAS_SET_VBUS) otg->set_vbus = omap_usb_set_vbus; if (phy_data->flags & OMAP_USB2_HAS_START_SRP) otg->start_srp = omap_usb_start_srp; otg->phy = &phy->phy; platform_set_drvdata(pdev, phy); pm_runtime_enable(phy->dev); generic_phy = devm_phy_create(phy->dev, NULL, &ops, NULL); if (IS_ERR(generic_phy)) { pm_runtime_disable(phy->dev); return PTR_ERR(generic_phy); } phy_set_drvdata(generic_phy, phy); phy_provider = devm_of_phy_provider_register(phy->dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { pm_runtime_disable(phy->dev); return PTR_ERR(phy_provider); } phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { dev_warn(&pdev->dev, "unable to get wkupclk, trying old name\n"); phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k"); if (IS_ERR(phy->wkupclk)) { dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); return PTR_ERR(phy->wkupclk); } else { dev_warn(&pdev->dev, "found usb_phy_cm_clk32k, please fix DTS\n"); } } clk_prepare(phy->wkupclk); phy->optclk = devm_clk_get(phy->dev, "refclk"); if (IS_ERR(phy->optclk)) { dev_dbg(&pdev->dev, "unable to get refclk, trying old name\n"); phy->optclk = devm_clk_get(phy->dev, "usb_otg_ss_refclk960m"); if (IS_ERR(phy->optclk)) { dev_dbg(&pdev->dev, "unable to get usb_otg_ss_refclk960m\n"); } else { dev_warn(&pdev->dev, "found usb_otg_ss_refclk960m, please fix DTS\n"); } } else { clk_prepare(phy->optclk); } usb_add_phy_dev(&phy->phy); return 0; } static int omap_usb2_remove(struct platform_device *pdev) { struct omap_usb *phy = platform_get_drvdata(pdev); clk_unprepare(phy->wkupclk); if (!IS_ERR(phy->optclk)) clk_unprepare(phy->optclk); usb_remove_phy(&phy->phy); pm_runtime_disable(phy->dev); return 0; } #ifdef CONFIG_PM_RUNTIME static int omap_usb2_runtime_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_usb *phy = platform_get_drvdata(pdev); clk_disable(phy->wkupclk); if (!IS_ERR(phy->optclk)) clk_disable(phy->optclk); return 0; } static int omap_usb2_runtime_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct omap_usb *phy = platform_get_drvdata(pdev); int ret; ret = clk_enable(phy->wkupclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable wkupclk %d\n", ret); goto err0; } if (!IS_ERR(phy->optclk)) { ret = clk_enable(phy->optclk); if (ret < 0) { dev_err(phy->dev, "Failed to enable optclk %d\n", ret); goto err1; } } return 0; err1: clk_disable(phy->wkupclk); err0: return ret; } static const struct dev_pm_ops omap_usb2_pm_ops = { SET_RUNTIME_PM_OPS(omap_usb2_runtime_suspend, omap_usb2_runtime_resume, NULL) }; #define DEV_PM_OPS (&omap_usb2_pm_ops) #else #define DEV_PM_OPS NULL #endif static struct platform_driver omap_usb2_driver = { .probe = omap_usb2_probe, .remove = omap_usb2_remove, .driver = { .name = "omap-usb2", .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(omap_usb2_id_table), }, }; module_platform_driver(omap_usb2_driver); MODULE_ALIAS("platform: omap_usb2"); MODULE_AUTHOR("Texas Instruments Inc."); MODULE_DESCRIPTION("OMAP USB2 phy driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
NieNs/IM-A840S-kernel-1
drivers/net/wireless/bcmdhd_org/wl_cfgp2p.c
544
43211
/* * Linux cfgp2p driver * * Copyright (C) 1999-2011, Broadcom Corporation * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2 (the "GPL"), * available at http://www.broadcom.com/licenses/GPLv2.php, with the * following added to such license: * * As a special exception, the copyright holders of this software give you * permission to link this software with independent modules, and to copy and * distribute the resulting executable under terms of your choice, provided that * you also meet, for each linked independent module, the terms and conditions of * the license of that module. An independent module is a module which is not * derived from this software. The special exception does not apply to any * modifications of the software. * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a license * other than the GPL, without Broadcom's express prior written consent. * * $Id: wl_cfgp2p.c,v 1.1.4.1.2.14 2011-02-09 01:40:07 $ * */ #include <typedefs.h> #include <linuxver.h> #include <osl.h> #include <linux/kernel.h> #include <linux/kthread.h> #include <linux/netdevice.h> #include <linux/types.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/if_arp.h> #include <asm/uaccess.h> #include <bcmutils.h> #include <bcmendian.h> #include <proto/ethernet.h> #include <dngl_stats.h> #include <dhd.h> #include <dhdioctl.h> #include <wlioctl.h> #include <wl_cfg80211.h> #include <wl_cfgp2p.h> #include <wldev_common.h> static s8 ioctlbuf[WLC_IOCTL_MAXLEN]; static s8 scanparambuf[WLC_IOCTL_SMLEN]; static s8 *smbuf = ioctlbuf; static bool wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type); static s32 wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag, s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete); /* * Initialize variables related to P2P * */ s32 wl_cfgp2p_init_priv(struct wl_priv *wl) { if (!(wl->p2p = kzalloc(sizeof(struct p2p_info), GFP_KERNEL))) { CFGP2P_ERR(("struct p2p_info allocation failed\n")); return -ENOMEM; } #define INIT_IE(IE_TYPE, BSS_TYPE) \ do { \ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ } while (0); INIT_IE(probe_req, P2PAPI_BSSCFG_PRIMARY); INIT_IE(probe_res, P2PAPI_BSSCFG_PRIMARY); INIT_IE(assoc_req, P2PAPI_BSSCFG_PRIMARY); INIT_IE(assoc_res, P2PAPI_BSSCFG_PRIMARY); INIT_IE(beacon, P2PAPI_BSSCFG_PRIMARY); INIT_IE(probe_req, P2PAPI_BSSCFG_DEVICE); INIT_IE(probe_res, P2PAPI_BSSCFG_DEVICE); INIT_IE(assoc_req, P2PAPI_BSSCFG_DEVICE); INIT_IE(assoc_res, P2PAPI_BSSCFG_DEVICE); INIT_IE(beacon, P2PAPI_BSSCFG_DEVICE); INIT_IE(probe_req, P2PAPI_BSSCFG_CONNECTION); INIT_IE(probe_res, P2PAPI_BSSCFG_CONNECTION); INIT_IE(assoc_req, P2PAPI_BSSCFG_CONNECTION); INIT_IE(assoc_res, P2PAPI_BSSCFG_CONNECTION); INIT_IE(beacon, P2PAPI_BSSCFG_CONNECTION); #undef INIT_IE wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY) = wl_to_prmry_ndev(wl); wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY) = 0; wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION) = NULL; wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_CONNECTION) = 0; spin_lock_init(&wl->p2p->timer_lock); return BCME_OK; } /* * Deinitialize variables related to P2P * */ void wl_cfgp2p_deinit_priv(struct wl_priv *wl) { if (wl->p2p) { kfree(wl->p2p); wl->p2p = NULL; } wl->p2p_supported = 0; } /* * Set P2P functions into firmware */ s32 wl_cfgp2p_set_firm_p2p(struct wl_priv *wl) { struct net_device *ndev = wl_to_prmry_ndev(wl); struct ether_addr null_eth_addr = { { 0, 0, 0, 0, 0, 0 } }; s32 ret = BCME_OK; s32 val = 0; /* Do we have to check whether APSTA is enabled or not ? */ wldev_iovar_getint(ndev, "apsta", &val); if (val == 0) { val = 1; wldev_ioctl(ndev, WLC_DOWN, &val, sizeof(s32), true); wldev_iovar_setint(ndev, "apsta", val); wldev_ioctl(ndev, WLC_UP, &val, sizeof(s32), true); } val = 1; /* Disable firmware roaming for P2P */ wldev_iovar_setint(ndev, "roam_off", val); /* In case of COB type, firmware has default mac address * After Initializing firmware, we have to set current mac address to * firmware for P2P device address */ ret = wldev_iovar_setbuf_bsscfg(ndev, "p2p_da_override", &null_eth_addr, sizeof(null_eth_addr), ioctlbuf, sizeof(ioctlbuf), 0); if (ret && ret != BCME_UNSUPPORTED) { CFGP2P_ERR(("failed to update device address\n")); } return ret; } /* Create a new P2P BSS. * Parameters: * @mac : MAC address of the BSS to create * @if_type : interface type: WL_P2P_IF_GO or WL_P2P_IF_CLIENT * @chspec : chspec to use if creating a GO BSS. * Returns 0 if success. */ s32 wl_cfgp2p_ifadd(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec) { wl_p2p_if_t ifreq; s32 err; struct net_device *ndev = wl_to_prmry_ndev(wl); ifreq.type = if_type; ifreq.chspec = chspec; memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); CFGP2P_INFO(("---wl p2p_ifadd %02x:%02x:%02x:%02x:%02x:%02x %s %u\n", ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2], ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5], (if_type == WL_P2P_IF_GO) ? "go" : "client", (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); err = wldev_iovar_setbuf(ndev, "p2p_ifadd", &ifreq, sizeof(ifreq), ioctlbuf, sizeof(ioctlbuf)); return err; } /* Delete a P2P BSS. * Parameters: * @mac : MAC address of the BSS to create * Returns 0 if success. */ s32 wl_cfgp2p_ifdel(struct wl_priv *wl, struct ether_addr *mac) { s32 ret; struct net_device *netdev = wl_to_prmry_ndev(wl); CFGP2P_INFO(("------primary idx %d : wl p2p_ifdel %02x:%02x:%02x:%02x:%02x:%02x\n", netdev->ifindex, mac->octet[0], mac->octet[1], mac->octet[2], mac->octet[3], mac->octet[4], mac->octet[5])); ret = wldev_iovar_setbuf(netdev, "p2p_ifdel", mac, sizeof(*mac), ioctlbuf, sizeof(ioctlbuf)); if (unlikely(ret < 0)) { printk("'wl p2p_ifdel' error %d\n", ret); } return ret; } /* Change a P2P Role. * Parameters: * @mac : MAC address of the BSS to change a role * Returns 0 if success. */ s32 wl_cfgp2p_ifchange(struct wl_priv *wl, struct ether_addr *mac, u8 if_type, chanspec_t chspec) { wl_p2p_if_t ifreq; s32 err; struct net_device *netdev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION); ifreq.type = if_type; ifreq.chspec = chspec; memcpy(ifreq.addr.octet, mac->octet, sizeof(ifreq.addr.octet)); CFGP2P_INFO(("---wl p2p_ifchange %02x:%02x:%02x:%02x:%02x:%02x %s %u\n", ifreq.addr.octet[0], ifreq.addr.octet[1], ifreq.addr.octet[2], ifreq.addr.octet[3], ifreq.addr.octet[4], ifreq.addr.octet[5], (if_type == WL_P2P_IF_GO) ? "go" : "client", (chspec & WL_CHANSPEC_CHAN_MASK) >> WL_CHANSPEC_CHAN_SHIFT)); err = wldev_iovar_setbuf(netdev, "p2p_ifupd", &ifreq, sizeof(ifreq), ioctlbuf, sizeof(ioctlbuf)); if (unlikely(err < 0)) { printk("'wl p2p_ifupd' error %d\n", err); } return err; } /* Get the index of a created P2P BSS. * Parameters: * @mac : MAC address of the created BSS * @index : output: index of created BSS * Returns 0 if success. */ s32 wl_cfgp2p_ifidx(struct wl_priv *wl, struct ether_addr *mac, s32 *index) { s32 ret; u8 getbuf[64]; struct net_device *dev = wl_to_prmry_ndev(wl); CFGP2P_INFO(("---wl p2p_if %02x:%02x:%02x:%02x:%02x:%02x\n", mac->octet[0], mac->octet[1], mac->octet[2], mac->octet[3], mac->octet[4], mac->octet[5])); ret = wldev_iovar_getbuf_bsscfg(dev, "p2p_if", mac, sizeof(*mac), getbuf, sizeof(getbuf), wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_PRIMARY)); if (ret == 0) { memcpy(index, getbuf, sizeof(index)); CFGP2P_INFO(("---wl p2p_if ==> %d\n", *index)); } return ret; } s32 wl_cfgp2p_set_discovery(struct wl_priv *wl, s32 on) { s32 ret = BCME_OK; struct net_device *ndev = wl_to_prmry_ndev(wl); CFGP2P_DBG(("enter\n")); ret = wldev_iovar_setint(ndev, "p2p_disc", on); if (unlikely(ret < 0)) { CFGP2P_ERR(("p2p_disc %d error %d\n", on, ret)); } return ret; } /* Set the WL driver's P2P mode. * Parameters : * @mode : is one of WL_P2P_DISC_ST_{SCAN,LISTEN,SEARCH}. * @channel : the channel to listen * @listen_ms : the time (milli seconds) to wait * @bssidx : bss index for BSSCFG * Returns 0 if success */ s32 wl_cfgp2p_set_p2p_mode(struct wl_priv *wl, u8 mode, u32 channel, u16 listen_ms, int bssidx) { wl_p2p_disc_st_t discovery_mode; s32 ret; struct net_device *dev; CFGP2P_DBG(("enter\n")); if (unlikely(bssidx >= P2PAPI_BSSCFG_MAX)) { CFGP2P_ERR((" %d index out of range\n", bssidx)); return -1; } dev = wl_to_p2p_bss_ndev(wl, bssidx); if (unlikely(dev == NULL)) { CFGP2P_ERR(("bssidx %d is not assigned\n", bssidx)); return BCME_NOTFOUND; } /* Put the WL driver into P2P Listen Mode to respond to P2P probe reqs */ discovery_mode.state = mode; discovery_mode.chspec = CH20MHZ_CHSPEC(channel); discovery_mode.dwell = listen_ms; ret = wldev_iovar_setbuf_bsscfg(dev, "p2p_state", &discovery_mode, sizeof(discovery_mode), ioctlbuf, sizeof(ioctlbuf), bssidx); return ret; } /* Get the index of the P2P Discovery BSS */ s32 wl_cfgp2p_get_disc_idx(struct wl_priv *wl, s32 *index) { s32 ret; struct net_device *dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); ret = wldev_iovar_getint(dev, "p2p_dev", index); CFGP2P_INFO(("p2p_dev bsscfg_idx=%d ret=%d\n", *index, ret)); if (unlikely(ret < 0)) { CFGP2P_ERR(("'p2p_dev' error %d\n", ret)); return ret; } return ret; } s32 wl_cfgp2p_init_discovery(struct wl_priv *wl) { s32 index = 0; s32 ret = BCME_OK; CFGP2P_DBG(("enter\n")); if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) != 0) { CFGP2P_ERR(("do nothing, already initialized\n")); return ret; } ret = wl_cfgp2p_set_discovery(wl, 1); if (ret < 0) { CFGP2P_ERR(("set discover error\n")); return ret; } /* Enable P2P Discovery in the WL Driver */ ret = wl_cfgp2p_get_disc_idx(wl, &index); if (ret < 0) { return ret; } wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = index; /* Set the initial discovery state to SCAN */ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); if (unlikely(ret != 0)) { CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); wl_cfgp2p_set_discovery(wl, 0); wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; return 0; } return ret; } /* Deinitialize P2P Discovery * Parameters : * @wl : wl_private data * Returns 0 if succes */ s32 wl_cfgp2p_deinit_discovery(struct wl_priv *wl) { s32 ret = BCME_OK; CFGP2P_DBG(("enter\n")); if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) { CFGP2P_ERR(("do nothing, not initialized\n")); return -1; } /* Set the discovery state to SCAN */ ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); /* Disable P2P discovery in the WL driver (deletes the discovery BSSCFG) */ ret = wl_cfgp2p_set_discovery(wl, 0); /* Clear our saved WPS and P2P IEs for the discovery BSS. The driver * deleted these IEs when wl_cfgp2p_set_discovery() deleted the discovery * BSS. */ /* Clear the saved bsscfg index of the discovery BSSCFG to indicate we * have no discovery BSS. */ wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) = 0; wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE) = NULL; return ret; } /* Enable P2P Discovery * Parameters: * @wl : wl_private data * @ie : probe request ie (WPS IE + P2P IE) * @ie_len : probe request ie length * Returns 0 if success. */ s32 wl_cfgp2p_enable_discovery(struct wl_priv *wl, struct net_device *dev, const u8 *ie, u32 ie_len) { s32 ret = BCME_OK; if (wl_get_p2p_status(wl, DISCOVERY_ON)) { CFGP2P_INFO((" DISCOVERY is already initialized, we have nothing to do\n")); goto set_ie; } wl_set_p2p_status(wl, DISCOVERY_ON); CFGP2P_DBG(("enter\n")); ret = wl_cfgp2p_init_discovery(wl); if (unlikely(ret < 0)) { CFGP2P_ERR((" init discovery error %d\n", ret)); goto exit; } /* Set wsec to any non-zero value in the discovery bsscfg to ensure our * P2P probe responses have the privacy bit set in the 802.11 WPA IE. * Some peer devices may not initiate WPS with us if this bit is not set. */ ret = wldev_iovar_setint_bsscfg(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), "wsec", AES_ENABLED, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); if (unlikely(ret < 0)) { CFGP2P_ERR((" wsec error %d\n", ret)); } set_ie: ret = wl_cfgp2p_set_management_ie(wl, dev, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE), VNDR_IE_PRBREQ_FLAG, ie, ie_len); if (unlikely(ret < 0)) { CFGP2P_ERR(("set probreq ie occurs error %d\n", ret)); goto exit; } exit: return ret; } /* Disable P2P Discovery * Parameters: * @wl : wl_private_data * Returns 0 if success. */ s32 wl_cfgp2p_disable_discovery(struct wl_priv *wl) { s32 ret = BCME_OK; CFGP2P_DBG((" enter\n")); wl_clr_p2p_status(wl, DISCOVERY_ON); if (wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE) == 0) { CFGP2P_ERR((" do nothing, not initialized\n")); goto exit; } ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); if (unlikely(ret < 0)) { CFGP2P_ERR(("unable to set WL_P2P_DISC_ST_SCAN\n")); } /* Do a scan abort to stop the driver's scan engine in case it is still * waiting out an action frame tx dwell time. */ #ifdef NOT_YET if (wl_get_p2p_status(wl, SCANNING)) { p2pwlu_scan_abort(hdl, FALSE); } #endif wl_clr_p2p_status(wl, DISCOVERY_ON); ret = wl_cfgp2p_deinit_discovery(wl); exit: return ret; } s32 wl_cfgp2p_escan(struct wl_priv *wl, struct net_device *dev, u16 active, u32 num_chans, u16 *channels, s32 search_state, u16 action, u32 bssidx) { s32 ret = BCME_OK; s32 memsize; s32 eparams_size; u32 i; s8 *memblk; wl_p2p_scan_t *p2p_params; wl_escan_params_t *eparams; wlc_ssid_t ssid; /* Scan parameters */ #define P2PAPI_SCAN_NPROBES 4 #define P2PAPI_SCAN_DWELL_TIME_MS 80 #define P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS 100 #define P2PAPI_SCAN_HOME_TIME_MS 10 struct net_device *pri_dev = wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_PRIMARY); wl_set_p2p_status(wl, SCANNING); /* Allocate scan params which need space for 3 channels and 0 ssids */ eparams_size = (WL_SCAN_PARAMS_FIXED_SIZE + OFFSETOF(wl_escan_params_t, params)) + num_chans * sizeof(eparams->params.channel_list[0]); memsize = sizeof(wl_p2p_scan_t) + eparams_size; memblk = scanparambuf; if (memsize > sizeof(scanparambuf)) { CFGP2P_ERR((" scanpar buf too small (%u > %u)\n", memsize, sizeof(scanparambuf))); return -1; } memset(memblk, 0, memsize); memset(ioctlbuf, 0, sizeof(ioctlbuf)); if (search_state == WL_P2P_DISC_ST_SEARCH) { /* * If we in SEARCH STATE, we don't need to set SSID explictly * because dongle use P2P WILDCARD internally by default */ wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SEARCH, 0, 0, bssidx); ssid.SSID_len = htod32(0); } else if (search_state == WL_P2P_DISC_ST_SCAN) { /* SCAN STATE 802.11 SCAN * WFD Supplicant has p2p_find command with (type=progressive, type= full) * So if P2P_find command with type=progressive, * we have to set ssid to P2P WILDCARD because * we just do broadcast scan unless setting SSID */ strcpy(ssid.SSID, WL_P2P_WILDCARD_SSID); ssid.SSID_len = htod32(WL_P2P_WILDCARD_SSID_LEN); wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, bssidx); } /* Fill in the P2P scan structure at the start of the iovar param block */ p2p_params = (wl_p2p_scan_t*) memblk; p2p_params->type = 'E'; /* Fill in the Scan structure that follows the P2P scan structure */ eparams = (wl_escan_params_t*) (p2p_params + 1); eparams->params.bss_type = DOT11_BSSTYPE_ANY; if (active) eparams->params.scan_type = DOT11_SCANTYPE_ACTIVE; else eparams->params.scan_type = DOT11_SCANTYPE_PASSIVE; memcpy(&eparams->params.bssid, &ether_bcast, ETHER_ADDR_LEN); if (ssid.SSID_len) memcpy(&eparams->params.ssid, &ssid, sizeof(wlc_ssid_t)); eparams->params.nprobes = htod32(P2PAPI_SCAN_NPROBES); eparams->params.home_time = htod32(P2PAPI_SCAN_HOME_TIME_MS); if (wl_get_drv_status(wl, CONNECTED)) eparams->params.active_time = htod32(-1); else if (num_chans == 3) eparams->params.active_time = htod32(P2PAPI_SCAN_SOCIAL_DWELL_TIME_MS); else eparams->params.active_time = htod32(P2PAPI_SCAN_DWELL_TIME_MS); eparams->params.passive_time = htod32(-1); eparams->params.channel_num = htod32((0 << WL_SCAN_PARAMS_NSSID_SHIFT) | (num_chans & WL_SCAN_PARAMS_COUNT_MASK)); for (i = 0; i < num_chans; i++) { eparams->params.channel_list[i] = htodchanspec(channels[i]); } eparams->version = htod32(ESCAN_REQ_VERSION); eparams->action = htod16(action); eparams->sync_id = htod16(0x1234); CFGP2P_INFO(("SCAN CHANNELS : ")); for (i = 0; i < num_chans; i++) { if (i == 0) CFGP2P_INFO(("%d", channels[i])); else CFGP2P_INFO((",%d", channels[i])); } CFGP2P_INFO(("\n")); ret = wldev_iovar_setbuf_bsscfg(pri_dev, "p2p_scan", memblk, memsize, smbuf, sizeof(ioctlbuf), bssidx); return ret; } /* Check whether pointed-to IE looks like WPA. */ #define wl_cfgp2p_is_wpa_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPA_OUI_TYPE) /* Check whether pointed-to IE looks like WPS. */ #define wl_cfgp2p_is_wps_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ (const uint8 *)WPS_OUI, WPS_OUI_LEN, WPS_OUI_TYPE) /* Check whether the given IE looks like WFA P2P IE. */ #define wl_cfgp2p_is_p2p_ie(ie, tlvs, len) wl_cfgp2p_has_ie(ie, tlvs, len, \ (const uint8 *)WFA_OUI, WFA_OUI_LEN, WFA_OUI_TYPE_P2P) /* Delete and Set a management vndr ie to firmware * Parameters: * @wl : wl_private data * @ndev : net device for bssidx * @bssidx : bssidx for BSS * @pktflag : packet flag for IE (VNDR_IE_PRBREQ_FLAG,VNDR_IE_PRBRSP_FLAG, VNDR_IE_ASSOCRSP_FLAG, * VNDR_IE_ASSOCREQ_FLAG) * @ie : VNDR IE (such as P2P IE , WPS IE) * @ie_len : VNDR IE Length * Returns 0 if success. */ s32 wl_cfgp2p_set_management_ie(struct wl_priv *wl, struct net_device *ndev, s32 bssidx, s32 pktflag, const u8 *vndr_ie, u32 vndr_ie_len) { /* Vendor-specific Information Element ID */ #define VNDR_SPEC_ELEMENT_ID 0xdd s32 ret = BCME_OK; u32 pos; u8 *ie_buf; u8 *mgmt_ie_buf = NULL; u32 mgmt_ie_buf_len = 0; u32 *mgmt_ie_len = 0; u8 ie_id, ie_len; u8 delete = 0; #define IE_TYPE(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie) #define IE_TYPE_LEN(type, bsstype) (wl_to_p2p_bss_saved_ie(wl, bsstype).p2p_ ## type ## _ie_len) if (wl->p2p_supported && p2p_on(wl) && bssidx != -1) { if (bssidx == P2PAPI_BSSCFG_PRIMARY) bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); switch (pktflag) { case VNDR_IE_PRBREQ_FLAG : mgmt_ie_buf = IE_TYPE(probe_req, bssidx); mgmt_ie_len = &IE_TYPE_LEN(probe_req, bssidx); mgmt_ie_buf_len = sizeof(IE_TYPE(probe_req, bssidx)); break; case VNDR_IE_PRBRSP_FLAG : mgmt_ie_buf = IE_TYPE(probe_res, bssidx); mgmt_ie_len = &IE_TYPE_LEN(probe_res, bssidx); mgmt_ie_buf_len = sizeof(IE_TYPE(probe_res, bssidx)); break; case VNDR_IE_ASSOCREQ_FLAG : mgmt_ie_buf = IE_TYPE(assoc_req, bssidx); mgmt_ie_len = &IE_TYPE_LEN(assoc_req, bssidx); mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_req, bssidx)); break; case VNDR_IE_ASSOCRSP_FLAG : mgmt_ie_buf = IE_TYPE(assoc_res, bssidx); mgmt_ie_len = &IE_TYPE_LEN(assoc_res, bssidx); mgmt_ie_buf_len = sizeof(IE_TYPE(assoc_res, bssidx)); break; case VNDR_IE_BEACON_FLAG : mgmt_ie_buf = IE_TYPE(beacon, bssidx); mgmt_ie_len = &IE_TYPE_LEN(beacon, bssidx); mgmt_ie_buf_len = sizeof(IE_TYPE(beacon, bssidx)); break; default: mgmt_ie_buf = NULL; mgmt_ie_len = NULL; CFGP2P_ERR(("not suitable type\n")); return -1; } } else if (get_mode_by_netdev(wl, ndev) == WL_MODE_AP) { switch (pktflag) { case VNDR_IE_PRBRSP_FLAG : mgmt_ie_buf = wl->ap_info->probe_res_ie; mgmt_ie_len = &wl->ap_info->probe_res_ie_len; mgmt_ie_buf_len = sizeof(wl->ap_info->probe_res_ie); break; case VNDR_IE_BEACON_FLAG : mgmt_ie_buf = wl->ap_info->beacon_ie; mgmt_ie_len = &wl->ap_info->beacon_ie_len; mgmt_ie_buf_len = sizeof(wl->ap_info->beacon_ie); break; default: mgmt_ie_buf = NULL; mgmt_ie_len = NULL; CFGP2P_ERR(("not suitable type\n")); return -1; } bssidx = 0; } else if (bssidx == -1 && get_mode_by_netdev(wl, ndev) == WL_MODE_BSS) { switch (pktflag) { case VNDR_IE_PRBREQ_FLAG : mgmt_ie_buf = wl->sta_info->probe_req_ie; mgmt_ie_len = &wl->sta_info->probe_req_ie_len; mgmt_ie_buf_len = sizeof(wl->sta_info->probe_req_ie); break; case VNDR_IE_ASSOCREQ_FLAG : mgmt_ie_buf = wl->sta_info->assoc_req_ie; mgmt_ie_len = &wl->sta_info->assoc_req_ie_len; mgmt_ie_buf_len = sizeof(wl->sta_info->assoc_req_ie); break; default: mgmt_ie_buf = NULL; mgmt_ie_len = NULL; CFGP2P_ERR(("not suitable type\n")); return -1; } bssidx = 0; } else { CFGP2P_ERR(("not suitable type\n")); return -1; } if (vndr_ie_len > mgmt_ie_buf_len) { CFGP2P_ERR(("extra IE size too big\n")); ret = -ENOMEM; } else { if (mgmt_ie_buf != NULL) { if (vndr_ie_len && (vndr_ie_len == *mgmt_ie_len) && (memcmp(mgmt_ie_buf, vndr_ie, vndr_ie_len) == 0)) { CFGP2P_INFO(("Previous mgmt IE is equals to current IE")); goto exit; } pos = 0; delete = 1; ie_buf = (u8 *) mgmt_ie_buf; while (pos < *mgmt_ie_len) { ie_id = ie_buf[pos++]; ie_len = ie_buf[pos++]; if ((ie_id == DOT11_MNG_VS_ID) && (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) { CFGP2P_INFO(("DELELED ID : %d, Len : %d , OUI :" "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], ie_buf[pos+1], ie_buf[pos+2])); ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete); } pos += ie_len; } } *mgmt_ie_len = 0; /* Add if there is any extra IE */ if (vndr_ie && vndr_ie_len) { /* save the current IE in wl struct */ memcpy(mgmt_ie_buf, vndr_ie, vndr_ie_len); *mgmt_ie_len = vndr_ie_len; pos = 0; ie_buf = (u8 *) vndr_ie; delete = 0; while (pos < vndr_ie_len) { ie_id = ie_buf[pos++]; ie_len = ie_buf[pos++]; if ((ie_id == DOT11_MNG_VS_ID) && (wl_cfgp2p_is_wps_ie(&ie_buf[pos-2], NULL, 0) || wl_cfgp2p_is_p2p_ie(&ie_buf[pos-2], NULL, 0))) { CFGP2P_INFO(("ADDED ID : %d, Len : %d , OUI :" "%02x:%02x:%02x\n", ie_id, ie_len, ie_buf[pos], ie_buf[pos+1], ie_buf[pos+2])); ret = wl_cfgp2p_vndr_ie(ndev, bssidx, pktflag, ie_buf+pos, VNDR_SPEC_ELEMENT_ID, ie_buf+pos+3, ie_len-3, delete); } pos += ie_len; } } } #undef IE_TYPE #undef IE_TYPE_LEN exit: return ret; } /* Clear the manament IE buffer of BSSCFG * Parameters: * @wl : wl_private data * @bssidx : bssidx for BSS * * Returns 0 if success. */ s32 wl_cfgp2p_clear_management_ie(struct wl_priv *wl, s32 bssidx) { #define INIT_IE(IE_TYPE, BSS_TYPE) \ do { \ memset(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie, 0, \ sizeof(wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie)); \ wl_to_p2p_bss_saved_ie(wl, BSS_TYPE).p2p_ ## IE_TYPE ## _ie_len = 0; \ } while (0); if (bssidx < 0) { CFGP2P_ERR(("invalid bssidx\n")); return BCME_BADARG; } INIT_IE(probe_req, bssidx); INIT_IE(probe_res, bssidx); INIT_IE(assoc_req, bssidx); INIT_IE(assoc_res, bssidx); INIT_IE(beacon, bssidx); return BCME_OK; } /* Is any of the tlvs the expected entry? If * not update the tlvs buffer pointer/length. */ static bool wl_cfgp2p_has_ie(u8 *ie, u8 **tlvs, u32 *tlvs_len, const u8 *oui, u32 oui_len, u8 type) { /* If the contents match the OUI and the type */ if (ie[TLV_LEN_OFF] >= oui_len + 1 && !bcmp(&ie[TLV_BODY_OFF], oui, oui_len) && type == ie[TLV_BODY_OFF + oui_len]) { return TRUE; } if (tlvs == NULL) return FALSE; /* point to the next ie */ ie += ie[TLV_LEN_OFF] + TLV_HDR_LEN; /* calculate the length of the rest of the buffer */ *tlvs_len -= (int)(ie - *tlvs); /* update the pointer to the start of the buffer */ *tlvs = ie; return FALSE; } wpa_ie_fixed_t * wl_cfgp2p_find_wpaie(u8 *parse, u32 len) { bcm_tlv_t *ie; while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { if (wl_cfgp2p_is_wpa_ie((u8*)ie, &parse, &len)) { return (wpa_ie_fixed_t *)ie; } } return NULL; } wpa_ie_fixed_t * wl_cfgp2p_find_wpsie(u8 *parse, u32 len) { bcm_tlv_t *ie; while ((ie = bcm_parse_tlvs(parse, (u32)len, DOT11_MNG_VS_ID))) { if (wl_cfgp2p_is_wps_ie((u8*)ie, &parse, &len)) { return (wpa_ie_fixed_t *)ie; } } return NULL; } wifi_p2p_ie_t * wl_cfgp2p_find_p2pie(u8 *parse, u32 len) { bcm_tlv_t *ie; while ((ie = bcm_parse_tlvs(parse, (int)len, DOT11_MNG_VS_ID))) { if (wl_cfgp2p_is_p2p_ie((uint8*)ie, &parse, &len)) { return (wifi_p2p_ie_t *)ie; } } return NULL; } static s32 wl_cfgp2p_vndr_ie(struct net_device *ndev, s32 bssidx, s32 pktflag, s8 *oui, s32 ie_id, s8 *data, s32 data_len, s32 delete) { s32 err = BCME_OK; s32 buf_len; s32 iecount; vndr_ie_setbuf_t *ie_setbuf; /* Validate the pktflag parameter */ if ((pktflag & ~(VNDR_IE_BEACON_FLAG | VNDR_IE_PRBRSP_FLAG | VNDR_IE_ASSOCRSP_FLAG | VNDR_IE_AUTHRSP_FLAG | VNDR_IE_PRBREQ_FLAG | VNDR_IE_ASSOCREQ_FLAG))) { CFGP2P_ERR(("p2pwl_vndr_ie: Invalid packet flag 0x%x\n", pktflag)); return -1; } buf_len = sizeof(vndr_ie_setbuf_t) + data_len - 1; ie_setbuf = (vndr_ie_setbuf_t *) kzalloc(buf_len, GFP_KERNEL); CFGP2P_INFO((" ie_id : %02x, data length : %d\n", ie_id, data_len)); if (!ie_setbuf) { CFGP2P_ERR(("Error allocating buffer for IE\n")); return -ENOMEM; } if (delete) strcpy(ie_setbuf->cmd, "del"); else strcpy(ie_setbuf->cmd, "add"); /* Buffer contains only 1 IE */ iecount = htod32(1); memcpy((void *)&ie_setbuf->vndr_ie_buffer.iecount, &iecount, sizeof(int)); pktflag = htod32(pktflag); memcpy((void *)&ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].pktflag, &pktflag, sizeof(uint32)); ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.id = ie_id; ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.len = (uchar)(data_len + VNDR_IE_MIN_LEN); memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.oui, oui, 3); memcpy(ie_setbuf->vndr_ie_buffer.vndr_ie_list[0].vndr_ie_data.data, data, data_len); err = wldev_iovar_setbuf_bsscfg(ndev, "vndr_ie", ie_setbuf, buf_len, ioctlbuf, sizeof(ioctlbuf), bssidx); CFGP2P_INFO(("vndr_ie iovar returns %d\n", err)); kfree(ie_setbuf); return err; } /* * Search the bssidx based on dev argument * Parameters: * @wl : wl_private data * @ndev : net device to search bssidx * Returns bssidx for ndev */ s32 wl_cfgp2p_find_idx(struct wl_priv *wl, struct net_device *ndev) { u32 i; s32 index = -1; if (ndev == NULL) { CFGP2P_ERR((" ndev is NULL\n")); goto exit; } if (!wl->p2p_supported) { return P2PAPI_BSSCFG_PRIMARY; } for (i = 0; i < P2PAPI_BSSCFG_MAX; i++) { if (ndev == wl_to_p2p_bss_ndev(wl, i)) { index = wl_to_p2p_bss_bssidx(wl, i); break; } } if (index == -1) return P2PAPI_BSSCFG_PRIMARY; exit: return index; } /* * Callback function for WLC_E_P2P_DISC_LISTEN_COMPLETE */ s32 wl_cfgp2p_listen_complete(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { s32 ret = BCME_OK; CFGP2P_DBG((" Enter\n")); if (wl_get_p2p_status(wl, LISTEN_EXPIRED) == 0) { wl_set_p2p_status(wl, LISTEN_EXPIRED); if (timer_pending(&wl->p2p->listen_timer)) { spin_lock_bh(&wl->p2p->timer_lock); del_timer_sync(&wl->p2p->listen_timer); spin_unlock_bh(&wl->p2p->timer_lock); } cfg80211_remain_on_channel_expired(ndev, wl->cache_cookie, &wl->remain_on_chan, wl->remain_on_chan_type, GFP_KERNEL); } else wl_clr_p2p_status(wl, LISTEN_EXPIRED); return ret; } /* * Timer expire callback function for LISTEN * We can't report cfg80211_remain_on_channel_expired from Timer ISR context, * so lets do it from thread context. */ static void wl_cfgp2p_listen_expired(unsigned long data) { wl_event_msg_t msg; struct wl_priv *wl = (struct wl_priv *) data; CFGP2P_DBG((" Enter\n")); msg.event_type = hton32(WLC_E_P2P_DISC_LISTEN_COMPLETE); wl_cfg80211_event(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_DEVICE), &msg, NULL); } /* * Do a P2P Listen on the given channel for the given duration. * A listen consists of sitting idle and responding to P2P probe requests * with a P2P probe response. * * This fn assumes dongle p2p device discovery is already enabled. * Parameters : * @wl : wl_private data * @channel : channel to listen * @duration_ms : the time (milli seconds) to wait */ s32 wl_cfgp2p_discover_listen(struct wl_priv *wl, s32 channel, u32 duration_ms) { #define INIT_TIMER(timer, func, duration, extra_delay) \ do { \ init_timer(timer); \ timer->function = func; \ timer->expires = jiffies + msecs_to_jiffies(duration + extra_delay); \ timer->data = (unsigned long) wl; \ add_timer(timer); \ } while (0); s32 ret = BCME_OK; struct timer_list *_timer; CFGP2P_DBG((" Enter Channel : %d, Duration : %d\n", channel, duration_ms)); if (unlikely(wl_get_p2p_status(wl, DISCOVERY_ON) == 0)) { CFGP2P_ERR((" Discovery is not set, so we have noting to do\n")); ret = BCME_NOTREADY; goto exit; } if (timer_pending(&wl->p2p->listen_timer)) { CFGP2P_DBG(("previous LISTEN is not completed yet\n")); goto exit; } else wl_clr_p2p_status(wl, LISTEN_EXPIRED); wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_LISTEN, channel, (u16) duration_ms, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); _timer = &wl->p2p->listen_timer; /* We will wait to receive WLC_E_P2P_DISC_LISTEN_COMPLETE from dongle , * otherwise we will wait up to duration_ms + 200ms */ INIT_TIMER(_timer, wl_cfgp2p_listen_expired, duration_ms, 200); #undef INIT_TIMER exit: return ret; } s32 wl_cfgp2p_discover_enable_search(struct wl_priv *wl, u8 enable) { s32 ret = BCME_OK; CFGP2P_DBG((" Enter\n")); if (!wl_get_p2p_status(wl, DISCOVERY_ON)) { CFGP2P_DBG((" do nothing, discovery is off\n")); return ret; } if (wl_get_p2p_status(wl, SEARCH_ENABLED) == enable) { CFGP2P_DBG(("already : %d\n", enable)); return ret; } wl_chg_p2p_status(wl, SEARCH_ENABLED); /* When disabling Search, reset the WL driver's p2p discovery state to * WL_P2P_DISC_ST_SCAN. */ if (!enable) { wl_clr_p2p_status(wl, SCANNING); ret = wl_cfgp2p_set_p2p_mode(wl, WL_P2P_DISC_ST_SCAN, 0, 0, wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE)); } return ret; } /* * Callback function for WLC_E_ACTION_FRAME_COMPLETE, WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE */ s32 wl_cfgp2p_action_tx_complete(struct wl_priv *wl, struct net_device *ndev, const wl_event_msg_t *e, void *data) { s32 ret = BCME_OK; u32 event_type = ntoh32(e->event_type); u32 status = ntoh32(e->status); CFGP2P_DBG((" Enter\n")); if (event_type == WLC_E_ACTION_FRAME_COMPLETE) { CFGP2P_INFO((" WLC_E_ACTION_FRAME_COMPLETE is received : %d\n", status)); if (status == WLC_E_STATUS_SUCCESS) { wl_set_p2p_status(wl, ACTION_TX_COMPLETED); } else { wl_set_p2p_status(wl, ACTION_TX_NOACK); CFGP2P_ERR(("WLC_E_ACTION_FRAME_COMPLETE : NO ACK\n")); } wake_up_interruptible(&wl->dongle_event_wait); } else { CFGP2P_INFO((" WLC_E_ACTION_FRAME_OFFCHAN_COMPLETE is received," "status : %d\n", status)); } return ret; } /* Send an action frame immediately without doing channel synchronization. * * This function does not wait for a completion event before returning. * The WLC_E_ACTION_FRAME_COMPLETE event will be received when the action * frame is transmitted. * The WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE event will be received when an * 802.11 ack has been received for the sent action frame. */ s32 wl_cfgp2p_tx_action_frame(struct wl_priv *wl, struct net_device *dev, wl_af_params_t *af_params, s32 bssidx) { s32 ret = BCME_OK; s32 timeout = 0; CFGP2P_INFO(("\n")); CFGP2P_INFO(("channel : %u , dwell time : %u\n", af_params->channel, af_params->dwell_time)); wl_clr_p2p_status(wl, ACTION_TX_COMPLETED); wl_clr_p2p_status(wl, ACTION_TX_NOACK); #define MAX_WAIT_TIME 2000 if (bssidx == P2PAPI_BSSCFG_PRIMARY) bssidx = wl_to_p2p_bss_bssidx(wl, P2PAPI_BSSCFG_DEVICE); ret = wldev_iovar_setbuf_bsscfg(dev, "actframe", af_params, sizeof(*af_params), ioctlbuf, sizeof(ioctlbuf), bssidx); if (ret < 0) { CFGP2P_ERR((" sending action frame is failed\n")); goto exit; } timeout = wait_event_interruptible_timeout(wl->dongle_event_wait, (wl_get_p2p_status(wl, ACTION_TX_COMPLETED) || wl_get_p2p_status(wl, ACTION_TX_NOACK)), msecs_to_jiffies(MAX_WAIT_TIME)); if (timeout > 0 && wl_get_p2p_status(wl, ACTION_TX_COMPLETED)) { CFGP2P_INFO(("tx action frame operation is completed\n")); ret = BCME_OK; } else { ret = BCME_ERROR; CFGP2P_INFO(("tx action frame operation is failed\n")); } exit: CFGP2P_INFO((" via act frame iovar : status = %d\n", ret)); #undef MAX_WAIT_TIME return ret; } /* Generate our P2P Device Address and P2P Interface Address from our primary * MAC address. */ void wl_cfgp2p_generate_bss_mac(struct ether_addr *primary_addr, struct ether_addr *out_dev_addr, struct ether_addr *out_int_addr) { memset(out_dev_addr, 0, sizeof(*out_dev_addr)); memset(out_int_addr, 0, sizeof(*out_int_addr)); /* Generate the P2P Device Address. This consists of the device's * primary MAC address with the locally administered bit set. */ memcpy(out_dev_addr, primary_addr, sizeof(*out_dev_addr)); out_dev_addr->octet[0] |= 0x02; /* Generate the P2P Interface Address. If the discovery and connection * BSSCFGs need to simultaneously co-exist, then this address must be * different from the P2P Device Address. */ memcpy(out_int_addr, out_dev_addr, sizeof(*out_int_addr)); out_int_addr->octet[4] ^= 0x80; } /* P2P IF Address change to Virtual Interface MAC Address */ void wl_cfg80211_change_ifaddr(u8* buf, struct ether_addr *p2p_int_addr, u8 element_id) { wifi_p2p_ie_t *ie = (wifi_p2p_ie_t*) buf; u16 len = ie->len; u8 *subel; u8 subelt_id; u16 subelt_len; CFGP2P_DBG((" Enter\n")); /* Point subel to the P2P IE's subelt field. * Subtract the preceding fields (id, len, OUI, oui_type) from the length. */ subel = ie->subelts; len -= 4; /* exclude OUI + OUI_TYPE */ while (len >= 3) { /* attribute id */ subelt_id = *subel; subel += 1; len -= 1; /* 2-byte little endian */ subelt_len = *subel++; subelt_len |= *subel++ << 8; len -= 2; len -= subelt_len; /* for the remaining subelt fields */ if (subelt_id == element_id) { if (subelt_id == P2P_SEID_INTINTADDR) { memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); CFGP2P_INFO(("Intended P2P Interface Address ATTR FOUND\n")); } else if (subelt_id == P2P_SEID_DEV_ID) { memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); CFGP2P_INFO(("Device ID ATTR FOUND\n")); } else if (subelt_id == P2P_SEID_DEV_INFO) { memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); CFGP2P_INFO(("Device INFO ATTR FOUND\n")); } else if (subelt_id == P2P_SEID_GROUP_ID) { memcpy(subel, p2p_int_addr->octet, ETHER_ADDR_LEN); CFGP2P_INFO(("GROUP ID ATTR FOUND\n")); } return; } else { CFGP2P_DBG(("OTHER id : %d\n", subelt_id)); } subel += subelt_len; } } /* * Check if a BSS is up. * This is a common implementation called by most OSL implementations of * p2posl_bss_isup(). DO NOT call this function directly from the * common code -- call p2posl_bss_isup() instead to allow the OSL to * override the common implementation if necessary. */ bool wl_cfgp2p_bss_isup(struct net_device *ndev, int bsscfg_idx) { s32 result, val; bool isup = false; s8 getbuf[64]; /* Check if the BSS is up */ *(int*)getbuf = -1; result = wldev_iovar_getbuf_bsscfg(ndev, "bss", &bsscfg_idx, sizeof(bsscfg_idx), getbuf, sizeof(getbuf), 0); if (result != 0) { CFGP2P_ERR(("'wl bss -C %d' failed: %d\n", bsscfg_idx, result)); CFGP2P_ERR(("NOTE: this ioctl error is normal " "when the BSS has not been created yet.\n")); } else { val = *(int*)getbuf; val = dtoh32(val); CFGP2P_INFO(("---wl bss -C %d ==> %d\n", bsscfg_idx, val)); isup = (val ? TRUE : FALSE); } return isup; } /* Bring up or down a BSS */ s32 wl_cfgp2p_bss(struct net_device *ndev, s32 bsscfg_idx, s32 up) { s32 ret = BCME_OK; s32 val = up ? 1 : 0; struct { s32 cfg; s32 val; } bss_setbuf; bss_setbuf.cfg = htod32(bsscfg_idx); bss_setbuf.val = htod32(val); CFGP2P_INFO(("---wl bss -C %d %s\n", bsscfg_idx, up ? "up" : "down")); ret = wldev_iovar_setbuf(ndev, "bss", &bss_setbuf, sizeof(bss_setbuf), ioctlbuf, sizeof(ioctlbuf)); if (ret != 0) { CFGP2P_ERR(("'bss %d' failed with %d\n", up, ret)); } return ret; } /* Check if 'p2p' is supported in the driver */ s32 wl_cfgp2p_supported(struct wl_priv *wl, struct net_device *ndev) { s32 ret = BCME_OK; s32 p2p_supported = 0; ret = wldev_iovar_getint(ndev, "p2p", &p2p_supported); if (ret < 0) { CFGP2P_ERR(("wl p2p error %d\n", ret)); return 0; } if (p2p_supported == 1) { CFGP2P_INFO(("p2p is supported\n")); } else { CFGP2P_INFO(("p2p is unsupported\n")); p2p_supported = 0; } return p2p_supported; } /* Cleanup P2P resources */ s32 wl_cfgp2p_down(struct wl_priv *wl) { if (timer_pending(&wl->p2p->listen_timer)) del_timer_sync(&wl->p2p->listen_timer); wl_cfgp2p_deinit_priv(wl); return 0; } s32 wl_cfgp2p_set_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int len) { s32 ret = -1; int count, start, duration; wl_p2p_sched_t dongle_noa; CFGP2P_DBG((" Enter\n")); memset(&dongle_noa, 0, sizeof(dongle_noa)); if (wl->p2p && wl->p2p->vif_created) { wl->p2p->noa.desc[0].start = 0; sscanf(buf, "%d %d %d", &count, &start, &duration); CFGP2P_DBG(("set_p2p_noa count %d start %d duration %d\n", count, start, duration)); if (count != -1) wl->p2p->noa.desc[0].count = count; /* supplicant gives interval as start */ if (start != -1) wl->p2p->noa.desc[0].interval = start; if (duration != -1) wl->p2p->noa.desc[0].duration = duration; if (wl->p2p->noa.desc[0].count != 255) { wl->p2p->noa.desc[0].start = 200; dongle_noa.type = WL_P2P_SCHED_TYPE_REQ_ABS; dongle_noa.action = WL_P2P_SCHED_ACTION_GOOFF; dongle_noa.option = WL_P2P_SCHED_OPTION_TSFOFS; } else { /* Continuous NoA interval. */ dongle_noa.action = WL_P2P_SCHED_ACTION_NONE; dongle_noa.type = WL_P2P_SCHED_TYPE_ABS; if ((wl->p2p->noa.desc[0].interval == 102) || (wl->p2p->noa.desc[0].interval == 100)) { wl->p2p->noa.desc[0].start = 100 - wl->p2p->noa.desc[0].duration; dongle_noa.option = WL_P2P_SCHED_OPTION_BCNPCT; } else { dongle_noa.option = WL_P2P_SCHED_OPTION_NORMAL; } } /* Put the noa descriptor in dongle format for dongle */ dongle_noa.desc[0].count = htod32(wl->p2p->noa.desc[0].count); if (dongle_noa.option == WL_P2P_SCHED_OPTION_BCNPCT) { dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start); dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration); } else { dongle_noa.desc[0].start = htod32(wl->p2p->noa.desc[0].start*1000); dongle_noa.desc[0].duration = htod32(wl->p2p->noa.desc[0].duration*1000); } dongle_noa.desc[0].interval = htod32(wl->p2p->noa.desc[0].interval*1000); ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), "p2p_noa", &dongle_noa, sizeof(dongle_noa), ioctlbuf, sizeof(ioctlbuf)); if (ret < 0) { CFGP2P_ERR(("fw set p2p_noa failed %d\n", ret)); } } else { CFGP2P_ERR(("ERROR: set_noa in non-p2p mode\n")); } return ret; } s32 wl_cfgp2p_get_p2p_noa(struct wl_priv *wl, struct net_device *ndev, char* buf, int buf_len) { wifi_p2p_noa_desc_t *noa_desc; int len = 0, i; char _buf[200]; CFGP2P_DBG((" Enter\n")); buf[0] = '\0'; if (wl->p2p && wl->p2p->vif_created) { if (wl->p2p->noa.desc[0].count || wl->p2p->ops.ops) { _buf[0] = 1; /* noa index */ _buf[1] = (wl->p2p->ops.ops ? 0x80: 0) | (wl->p2p->ops.ctw & 0x7f); /* ops + ctw */ len += 2; if (wl->p2p->noa.desc[0].count) { noa_desc = (wifi_p2p_noa_desc_t*)&_buf[len]; noa_desc->cnt_type = wl->p2p->noa.desc[0].count; noa_desc->duration = wl->p2p->noa.desc[0].duration; noa_desc->interval = wl->p2p->noa.desc[0].interval; noa_desc->start = wl->p2p->noa.desc[0].start; len += sizeof(wifi_p2p_noa_desc_t); } if (buf_len <= len * 2) { CFGP2P_ERR(("ERROR: buf_len %d in not enough for" "returning noa in string format\n", buf_len)); return -1; } /* We have to convert the buffer data into ASCII strings */ for (i = 0; i < len; i++) { sprintf(buf, "%02x", _buf[i]); buf += 2; } buf[i*2] = '\0'; } } else { CFGP2P_ERR(("ERROR: get_noa in non-p2p mode\n")); return -1; } return len * 2; } s32 wl_cfgp2p_set_p2p_ps(struct wl_priv *wl, struct net_device *ndev, char* buf, int len) { int ps, ctw; int ret = -1; s32 legacy_ps; CFGP2P_DBG((" Enter\n")); if (wl->p2p && wl->p2p->vif_created) { sscanf(buf, "%d %d %d", &legacy_ps, &ps, &ctw); CFGP2P_DBG((" Enter legacy_ps %d ps %d ctw %d\n", legacy_ps, ps, ctw)); if (ctw != -1) { wl->p2p->ops.ctw = ctw; ret = 0; } if (ps != -1) { wl->p2p->ops.ops = ps; ret = wldev_iovar_setbuf(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), "p2p_ops", &wl->p2p->ops, sizeof(wl->p2p->ops), ioctlbuf, sizeof(ioctlbuf)); if (ret < 0) { CFGP2P_ERR(("fw set p2p_ops failed %d\n", ret)); } } if (legacy_ps != -1) { s32 pm = legacy_ps ? PM_MAX : PM_OFF; ret = wldev_ioctl(wl_to_p2p_bss_ndev(wl, P2PAPI_BSSCFG_CONNECTION), WLC_SET_PM, &pm, sizeof(pm), true); if (unlikely(ret)) { CFGP2P_ERR(("error (%d)\n", ret)); } } } else { CFGP2P_ERR(("ERROR: set_p2p_ps in non-p2p mode\n")); ret = -1; } return ret; }
gpl-2.0
BrateloSlava/kernel_apq8064
drivers/video/backlight/atmel-pwm-bl.c
800
6353
/* * Copyright (C) 2008 Atmel Corporation * * Backlight driver using Atmel PWM peripheral. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/fb.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/backlight.h> #include <linux/atmel_pwm.h> #include <linux/atmel-pwm-bl.h> #include <linux/slab.h> struct atmel_pwm_bl { const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct platform_device *pdev; struct pwm_channel pwmc; int gpio_on; }; static int atmel_pwm_bl_set_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); int intensity = bd->props.brightness; int pwm_duty; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; if (pwmbl->pdata->pwm_active_low) pwm_duty = pwmbl->pdata->pwm_duty_min + intensity; else pwm_duty = pwmbl->pdata->pwm_duty_max - intensity; if (pwm_duty > pwmbl->pdata->pwm_duty_max) pwm_duty = pwmbl->pdata->pwm_duty_max; if (pwm_duty < pwmbl->pdata->pwm_duty_min) pwm_duty = pwmbl->pdata->pwm_duty_min; if (!intensity) { if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0 ^ pwmbl->pdata->on_active_low); } pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); pwm_channel_disable(&pwmbl->pwmc); } else { pwm_channel_enable(&pwmbl->pwmc); pwm_channel_writel(&pwmbl->pwmc, PWM_CUPD, pwm_duty); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 1 ^ pwmbl->pdata->on_active_low); } } return 0; } static int atmel_pwm_bl_get_intensity(struct backlight_device *bd) { struct atmel_pwm_bl *pwmbl = bl_get_data(bd); u32 intensity; if (pwmbl->pdata->pwm_active_low) { intensity = pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY) - pwmbl->pdata->pwm_duty_min; } else { intensity = pwmbl->pdata->pwm_duty_max - pwm_channel_readl(&pwmbl->pwmc, PWM_CDTY); } return intensity & 0xffff; } static int atmel_pwm_bl_init_pwm(struct atmel_pwm_bl *pwmbl) { unsigned long pwm_rate = pwmbl->pwmc.mck; unsigned long prescale = DIV_ROUND_UP(pwm_rate, (pwmbl->pdata->pwm_frequency * pwmbl->pdata->pwm_compare_max)) - 1; /* * Prescale must be power of two and maximum 0xf in size because of * hardware limit. PWM speed will be: * PWM module clock speed / (2 ^ prescale). */ prescale = fls(prescale); if (prescale > 0xf) prescale = 0xf; pwm_channel_writel(&pwmbl->pwmc, PWM_CMR, prescale); pwm_channel_writel(&pwmbl->pwmc, PWM_CDTY, pwmbl->pdata->pwm_duty_min + pwmbl->bldev->props.brightness); pwm_channel_writel(&pwmbl->pwmc, PWM_CPRD, pwmbl->pdata->pwm_compare_max); dev_info(&pwmbl->pdev->dev, "Atmel PWM backlight driver " "(%lu Hz)\n", pwmbl->pwmc.mck / pwmbl->pdata->pwm_compare_max / (1 << prescale)); return pwm_channel_enable(&pwmbl->pwmc); } static const struct backlight_ops atmel_pwm_bl_ops = { .get_brightness = atmel_pwm_bl_get_intensity, .update_status = atmel_pwm_bl_set_intensity, }; static int atmel_pwm_bl_probe(struct platform_device *pdev) { struct backlight_properties props; const struct atmel_pwm_bl_platform_data *pdata; struct backlight_device *bldev; struct atmel_pwm_bl *pwmbl; int retval; pwmbl = kzalloc(sizeof(struct atmel_pwm_bl), GFP_KERNEL); if (!pwmbl) return -ENOMEM; pwmbl->pdev = pdev; pdata = pdev->dev.platform_data; if (!pdata) { retval = -ENODEV; goto err_free_mem; } if (pdata->pwm_compare_max < pdata->pwm_duty_max || pdata->pwm_duty_min > pdata->pwm_duty_max || pdata->pwm_frequency == 0) { retval = -EINVAL; goto err_free_mem; } pwmbl->pdata = pdata; pwmbl->gpio_on = pdata->gpio_on; retval = pwm_channel_alloc(pdata->pwm_channel, &pwmbl->pwmc); if (retval) goto err_free_mem; if (pwmbl->gpio_on != -1) { retval = gpio_request(pwmbl->gpio_on, "gpio_atmel_pwm_bl"); if (retval) { pwmbl->gpio_on = -1; goto err_free_pwm; } /* Turn display off by default. */ retval = gpio_direction_output(pwmbl->gpio_on, 0 ^ pdata->on_active_low); if (retval) goto err_free_gpio; } memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_RAW; props.max_brightness = pdata->pwm_duty_max - pdata->pwm_duty_min; bldev = backlight_device_register("atmel-pwm-bl", &pdev->dev, pwmbl, &atmel_pwm_bl_ops, &props); if (IS_ERR(bldev)) { retval = PTR_ERR(bldev); goto err_free_gpio; } pwmbl->bldev = bldev; platform_set_drvdata(pdev, pwmbl); /* Power up the backlight by default at middle intesity. */ bldev->props.power = FB_BLANK_UNBLANK; bldev->props.brightness = bldev->props.max_brightness / 2; retval = atmel_pwm_bl_init_pwm(pwmbl); if (retval) goto err_free_bl_dev; atmel_pwm_bl_set_intensity(bldev); return 0; err_free_bl_dev: platform_set_drvdata(pdev, NULL); backlight_device_unregister(bldev); err_free_gpio: if (pwmbl->gpio_on != -1) gpio_free(pwmbl->gpio_on); err_free_pwm: pwm_channel_free(&pwmbl->pwmc); err_free_mem: kfree(pwmbl); return retval; } static int __exit atmel_pwm_bl_remove(struct platform_device *pdev) { struct atmel_pwm_bl *pwmbl = platform_get_drvdata(pdev); if (pwmbl->gpio_on != -1) { gpio_set_value(pwmbl->gpio_on, 0 ^ pwmbl->pdata->on_active_low); gpio_free(pwmbl->gpio_on); } pwm_channel_disable(&pwmbl->pwmc); pwm_channel_free(&pwmbl->pwmc); backlight_device_unregister(pwmbl->bldev); platform_set_drvdata(pdev, NULL); kfree(pwmbl); return 0; } static struct platform_driver atmel_pwm_bl_driver = { .driver = { .name = "atmel-pwm-bl", }, /* REVISIT add suspend() and resume() */ .remove = __exit_p(atmel_pwm_bl_remove), }; static int __init atmel_pwm_bl_init(void) { return platform_driver_probe(&atmel_pwm_bl_driver, atmel_pwm_bl_probe); } module_init(atmel_pwm_bl_init); static void __exit atmel_pwm_bl_exit(void) { platform_driver_unregister(&atmel_pwm_bl_driver); } module_exit(atmel_pwm_bl_exit); MODULE_AUTHOR("Hans-Christian egtvedt <hans-christian.egtvedt@atmel.com>"); MODULE_DESCRIPTION("Atmel PWM backlight driver"); MODULE_LICENSE("GPL");
gpl-2.0
IntelBUAP/Repo-Linux-RT
drivers/regulator/twl-regulator.c
1056
33305
/* * twl-regulator.c -- support regulators in twl4030/twl6030 family chips * * Copyright (C) 2008 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/err.h> #include <linux/platform_device.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/i2c/twl.h> /* * The TWL4030/TW5030/TPS659x0/TWL6030 family chips include power management, a * USB OTG transceiver, an RTC, ADC, PWM, and lots more. Some versions * include an audio codec, battery charger, and more voltage regulators. * These chips are often used in OMAP-based systems. * * This driver implements software-based resource control for various * voltage regulators. This is usually augmented with state machine * based control. */ struct twlreg_info { /* start of regulator's PM_RECEIVER control register bank */ u8 base; /* twl resource ID, for resource control state machine */ u8 id; /* voltage in mV = table[VSEL]; table_len must be a power-of-two */ u8 table_len; const u16 *table; /* State REMAP default configuration */ u8 remap; /* chip constraints on regulator behavior */ u16 min_mV; u16 max_mV; u8 flags; /* used by regulator core */ struct regulator_desc desc; /* chip specific features */ unsigned long features; /* * optional override functions for voltage set/get * these are currently only used for SMPS regulators */ int (*get_voltage)(void *data); int (*set_voltage)(void *data, int target_uV); /* data passed from board for external get/set voltage */ void *data; }; /* LDO control registers ... offset is from the base of its register bank. * The first three registers of all power resource banks help hardware to * manage the various resource groups. */ /* Common offset in TWL4030/6030 */ #define VREG_GRP 0 /* TWL4030 register offsets */ #define VREG_TYPE 1 #define VREG_REMAP 2 #define VREG_DEDICATED 3 /* LDO control */ #define VREG_VOLTAGE_SMPS_4030 9 /* TWL6030 register offsets */ #define VREG_TRANS 1 #define VREG_STATE 2 #define VREG_VOLTAGE 3 #define VREG_VOLTAGE_SMPS 4 /* TWL6030 Misc register offsets */ #define VREG_BC_ALL 1 #define VREG_BC_REF 2 #define VREG_BC_PROC 3 #define VREG_BC_CLK_RST 4 /* TWL6030 LDO register values for CFG_STATE */ #define TWL6030_CFG_STATE_OFF 0x00 #define TWL6030_CFG_STATE_ON 0x01 #define TWL6030_CFG_STATE_OFF2 0x02 #define TWL6030_CFG_STATE_SLEEP 0x03 #define TWL6030_CFG_STATE_GRP_SHIFT 5 #define TWL6030_CFG_STATE_APP_SHIFT 2 #define TWL6030_CFG_STATE_APP_MASK (0x03 << TWL6030_CFG_STATE_APP_SHIFT) #define TWL6030_CFG_STATE_APP(v) (((v) & TWL6030_CFG_STATE_APP_MASK) >>\ TWL6030_CFG_STATE_APP_SHIFT) /* Flags for SMPS Voltage reading */ #define SMPS_OFFSET_EN BIT(0) #define SMPS_EXTENDED_EN BIT(1) /* twl6032 SMPS EPROM values */ #define TWL6030_SMPS_OFFSET 0xB0 #define TWL6030_SMPS_MULT 0xB3 #define SMPS_MULTOFFSET_SMPS4 BIT(0) #define SMPS_MULTOFFSET_VIO BIT(1) #define SMPS_MULTOFFSET_SMPS3 BIT(6) static inline int twlreg_read(struct twlreg_info *info, unsigned slave_subgp, unsigned offset) { u8 value; int status; status = twl_i2c_read_u8(slave_subgp, &value, info->base + offset); return (status < 0) ? status : value; } static inline int twlreg_write(struct twlreg_info *info, unsigned slave_subgp, unsigned offset, u8 value) { return twl_i2c_write_u8(slave_subgp, value, info->base + offset); } /*----------------------------------------------------------------------*/ /* generic power resource operations, which work on all regulators */ static int twlreg_grp(struct regulator_dev *rdev) { return twlreg_read(rdev_get_drvdata(rdev), TWL_MODULE_PM_RECEIVER, VREG_GRP); } /* * Enable/disable regulators by joining/leaving the P1 (processor) group. * We assume nobody else is updating the DEV_GRP registers. */ /* definition for 4030 family */ #define P3_GRP_4030 BIT(7) /* "peripherals" */ #define P2_GRP_4030 BIT(6) /* secondary processor, modem, etc */ #define P1_GRP_4030 BIT(5) /* CPU/Linux */ /* definition for 6030 family */ #define P3_GRP_6030 BIT(2) /* secondary processor, modem, etc */ #define P2_GRP_6030 BIT(1) /* "peripherals" */ #define P1_GRP_6030 BIT(0) /* CPU/Linux */ static int twl4030reg_is_enabled(struct regulator_dev *rdev) { int state = twlreg_grp(rdev); if (state < 0) return state; return state & P1_GRP_4030; } static int twl6030reg_is_enabled(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp = 0, val; if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) { grp = twlreg_grp(rdev); if (grp < 0) return grp; grp &= P1_GRP_6030; } else { grp = 1; } val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); val = TWL6030_CFG_STATE_APP(val); return grp && (val == TWL6030_CFG_STATE_ON); } static int twl4030reg_enable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; int ret; grp = twlreg_grp(rdev); if (grp < 0) return grp; grp |= P1_GRP_4030; ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); return ret; } static int twl6030reg_enable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp = 0; int ret; if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) grp = twlreg_grp(rdev); if (grp < 0) return grp; ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, grp << TWL6030_CFG_STATE_GRP_SHIFT | TWL6030_CFG_STATE_ON); return ret; } static int twl4030reg_disable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp; int ret; grp = twlreg_grp(rdev); if (grp < 0) return grp; grp &= ~(P1_GRP_4030 | P2_GRP_4030 | P3_GRP_4030); ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_GRP, grp); return ret; } static int twl6030reg_disable(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp = 0; int ret; if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) grp = P1_GRP_6030 | P2_GRP_6030 | P3_GRP_6030; /* For 6030, set the off state for all grps enabled */ ret = twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, (grp) << TWL6030_CFG_STATE_GRP_SHIFT | TWL6030_CFG_STATE_OFF); return ret; } static int twl4030reg_get_status(struct regulator_dev *rdev) { int state = twlreg_grp(rdev); if (state < 0) return state; state &= 0x0f; /* assume state != WARM_RESET; we'd not be running... */ if (!state) return REGULATOR_STATUS_OFF; return (state & BIT(3)) ? REGULATOR_STATUS_NORMAL : REGULATOR_STATUS_STANDBY; } static int twl6030reg_get_status(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int val; val = twlreg_grp(rdev); if (val < 0) return val; val = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_STATE); switch (TWL6030_CFG_STATE_APP(val)) { case TWL6030_CFG_STATE_ON: return REGULATOR_STATUS_NORMAL; case TWL6030_CFG_STATE_SLEEP: return REGULATOR_STATUS_STANDBY; case TWL6030_CFG_STATE_OFF: case TWL6030_CFG_STATE_OFF2: default: break; } return REGULATOR_STATUS_OFF; } static int twl4030reg_set_mode(struct regulator_dev *rdev, unsigned mode) { struct twlreg_info *info = rdev_get_drvdata(rdev); unsigned message; int status; /* We can only set the mode through state machine commands... */ switch (mode) { case REGULATOR_MODE_NORMAL: message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_ACTIVE); break; case REGULATOR_MODE_STANDBY: message = MSG_SINGULAR(DEV_GRP_P1, info->id, RES_STATE_SLEEP); break; default: return -EINVAL; } /* Ensure the resource is associated with some group */ status = twlreg_grp(rdev); if (status < 0) return status; if (!(status & (P3_GRP_4030 | P2_GRP_4030 | P1_GRP_4030))) return -EACCES; status = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, message >> 8, TWL4030_PM_MASTER_PB_WORD_MSB); if (status < 0) return status; return twl_i2c_write_u8(TWL_MODULE_PM_MASTER, message & 0xff, TWL4030_PM_MASTER_PB_WORD_LSB); } static int twl6030reg_set_mode(struct regulator_dev *rdev, unsigned mode) { struct twlreg_info *info = rdev_get_drvdata(rdev); int grp = 0; int val; if (!(twl_class_is_6030() && (info->features & TWL6032_SUBCLASS))) grp = twlreg_grp(rdev); if (grp < 0) return grp; /* Compose the state register settings */ val = grp << TWL6030_CFG_STATE_GRP_SHIFT; /* We can only set the mode through state machine commands... */ switch (mode) { case REGULATOR_MODE_NORMAL: val |= TWL6030_CFG_STATE_ON; break; case REGULATOR_MODE_STANDBY: val |= TWL6030_CFG_STATE_SLEEP; break; default: return -EINVAL; } return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_STATE, val); } /*----------------------------------------------------------------------*/ /* * Support for adjustable-voltage LDOs uses a four bit (or less) voltage * select field in its control register. We use tables indexed by VSEL * to record voltages in milliVolts. (Accuracy is about three percent.) * * Note that VSEL values for VAUX2 changed in twl5030 and newer silicon; * currently handled by listing two slightly different VAUX2 regulators, * only one of which will be configured. * * VSEL values documented as "TI cannot support these values" are flagged * in these tables as UNSUP() values; we normally won't assign them. * * VAUX3 at 3V is incorrectly listed in some TI manuals as unsupported. * TI are revising the twl5030/tps659x0 specs to support that 3.0V setting. */ #define UNSUP_MASK 0x8000 #define UNSUP(x) (UNSUP_MASK | (x)) #define IS_UNSUP(info, x) \ ((UNSUP_MASK & (x)) && \ !((info)->features & TWL4030_ALLOW_UNSUPPORTED)) #define LDO_MV(x) (~UNSUP_MASK & (x)) static const u16 VAUX1_VSEL_table[] = { UNSUP(1500), UNSUP(1800), 2500, 2800, 3000, 3000, 3000, 3000, }; static const u16 VAUX2_4030_VSEL_table[] = { UNSUP(1000), UNSUP(1000), UNSUP(1200), 1300, 1500, 1800, UNSUP(1850), 2500, UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VAUX2_VSEL_table[] = { 1700, 1700, 1900, 1300, 1500, 1800, 2000, 2500, 2100, 2800, 2200, 2300, 2400, 2400, 2400, 2400, }; static const u16 VAUX3_VSEL_table[] = { 1500, 1800, 2500, 2800, 3000, 3000, 3000, 3000, }; static const u16 VAUX4_VSEL_table[] = { 700, 1000, 1200, UNSUP(1300), 1500, 1800, UNSUP(1850), 2500, UNSUP(2600), 2800, UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VMMC1_VSEL_table[] = { 1850, 2850, 3000, 3150, }; static const u16 VMMC2_VSEL_table[] = { UNSUP(1000), UNSUP(1000), UNSUP(1200), UNSUP(1300), UNSUP(1500), UNSUP(1800), 1850, UNSUP(2500), 2600, 2800, 2850, 3000, 3150, 3150, 3150, 3150, }; static const u16 VPLL1_VSEL_table[] = { 1000, 1200, 1300, 1800, UNSUP(2800), UNSUP(3000), UNSUP(3000), UNSUP(3000), }; static const u16 VPLL2_VSEL_table[] = { 700, 1000, 1200, 1300, UNSUP(1500), 1800, UNSUP(1850), UNSUP(2500), UNSUP(2600), UNSUP(2800), UNSUP(2850), UNSUP(3000), UNSUP(3150), UNSUP(3150), UNSUP(3150), UNSUP(3150), }; static const u16 VSIM_VSEL_table[] = { UNSUP(1000), UNSUP(1200), UNSUP(1300), 1800, 2800, 3000, 3000, 3000, }; static const u16 VDAC_VSEL_table[] = { 1200, 1300, 1800, 1800, }; static const u16 VIO_VSEL_table[] = { 1800, 1850, }; static const u16 VINTANA2_VSEL_table[] = { 2500, 2750, }; static int twl4030ldo_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); int mV = info->table[index]; return IS_UNSUP(info, mV) ? 0 : (LDO_MV(mV) * 1000); } static int twl4030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, selector); } static int twl4030ldo_get_voltage_sel(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE); if (vsel < 0) return vsel; vsel &= info->table_len - 1; return vsel; } static struct regulator_ops twl4030ldo_ops = { .list_voltage = twl4030ldo_list_voltage, .set_voltage_sel = twl4030ldo_set_voltage_sel, .get_voltage_sel = twl4030ldo_get_voltage_sel, .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, .set_mode = twl4030reg_set_mode, .get_status = twl4030reg_get_status, }; static int twl4030smps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = DIV_ROUND_UP(min_uV - 600000, 12500); if (info->set_voltage) { return info->set_voltage(info->data, min_uV); } else { twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030, vsel); } return 0; } static int twl4030smps_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel; if (info->get_voltage) return info->get_voltage(info->data); vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS_4030); return vsel * 12500 + 600000; } static struct regulator_ops twl4030smps_ops = { .set_voltage = twl4030smps_set_voltage, .get_voltage = twl4030smps_get_voltage, }; static int twl6030coresmps_set_voltage(struct regulator_dev *rdev, int min_uV, int max_uV, unsigned *selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); if (info->set_voltage) return info->set_voltage(info->data, min_uV); return -ENODEV; } static int twl6030coresmps_get_voltage(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); if (info->get_voltage) return info->get_voltage(info->data); return -ENODEV; } static struct regulator_ops twl6030coresmps_ops = { .set_voltage = twl6030coresmps_set_voltage, .get_voltage = twl6030coresmps_get_voltage, }; static int twl6030ldo_list_voltage(struct regulator_dev *rdev, unsigned sel) { struct twlreg_info *info = rdev_get_drvdata(rdev); switch (sel) { case 0: return 0; case 1 ... 24: /* Linear mapping from 00000001 to 00011000: * Absolute voltage value = 1.0 V + 0.1 V × (sel – 00000001) */ return (info->min_mV + 100 * (sel - 1)) * 1000; case 25 ... 30: return -EINVAL; case 31: return 2750000; default: return -EINVAL; } } static int twl6030ldo_set_voltage_sel(struct regulator_dev *rdev, unsigned selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE, selector); } static int twl6030ldo_get_voltage_sel(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE); return vsel; } static struct regulator_ops twl6030ldo_ops = { .list_voltage = twl6030ldo_list_voltage, .set_voltage_sel = twl6030ldo_set_voltage_sel, .get_voltage_sel = twl6030ldo_get_voltage_sel, .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, .set_mode = twl6030reg_set_mode, .get_status = twl6030reg_get_status, }; /*----------------------------------------------------------------------*/ static struct regulator_ops twl4030fixed_ops = { .list_voltage = regulator_list_voltage_linear, .enable = twl4030reg_enable, .disable = twl4030reg_disable, .is_enabled = twl4030reg_is_enabled, .set_mode = twl4030reg_set_mode, .get_status = twl4030reg_get_status, }; static struct regulator_ops twl6030fixed_ops = { .list_voltage = regulator_list_voltage_linear, .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, .set_mode = twl6030reg_set_mode, .get_status = twl6030reg_get_status, }; /* * SMPS status and control */ static int twl6030smps_list_voltage(struct regulator_dev *rdev, unsigned index) { struct twlreg_info *info = rdev_get_drvdata(rdev); int voltage = 0; switch (info->flags) { case SMPS_OFFSET_EN: voltage = 100000; /* fall through */ case 0: switch (index) { case 0: voltage = 0; break; case 58: voltage = 1350 * 1000; break; case 59: voltage = 1500 * 1000; break; case 60: voltage = 1800 * 1000; break; case 61: voltage = 1900 * 1000; break; case 62: voltage = 2100 * 1000; break; default: voltage += (600000 + (12500 * (index - 1))); } break; case SMPS_EXTENDED_EN: switch (index) { case 0: voltage = 0; break; case 58: voltage = 2084 * 1000; break; case 59: voltage = 2315 * 1000; break; case 60: voltage = 2778 * 1000; break; case 61: voltage = 2932 * 1000; break; case 62: voltage = 3241 * 1000; break; default: voltage = (1852000 + (38600 * (index - 1))); } break; case SMPS_OFFSET_EN | SMPS_EXTENDED_EN: switch (index) { case 0: voltage = 0; break; case 58: voltage = 4167 * 1000; break; case 59: voltage = 2315 * 1000; break; case 60: voltage = 2778 * 1000; break; case 61: voltage = 2932 * 1000; break; case 62: voltage = 3241 * 1000; break; default: voltage = (2161000 + (38600 * (index - 1))); } break; } return voltage; } static int twl6030smps_map_voltage(struct regulator_dev *rdev, int min_uV, int max_uV) { struct twlreg_info *info = rdev_get_drvdata(rdev); int vsel = 0; switch (info->flags) { case 0: if (min_uV == 0) vsel = 0; else if ((min_uV >= 600000) && (min_uV <= 1300000)) { vsel = DIV_ROUND_UP(min_uV - 600000, 12500); vsel++; } /* Values 1..57 for vsel are linear and can be calculated * values 58..62 are non linear. */ else if ((min_uV > 1900000) && (min_uV <= 2100000)) vsel = 62; else if ((min_uV > 1800000) && (min_uV <= 1900000)) vsel = 61; else if ((min_uV > 1500000) && (min_uV <= 1800000)) vsel = 60; else if ((min_uV > 1350000) && (min_uV <= 1500000)) vsel = 59; else if ((min_uV > 1300000) && (min_uV <= 1350000)) vsel = 58; else return -EINVAL; break; case SMPS_OFFSET_EN: if (min_uV == 0) vsel = 0; else if ((min_uV >= 700000) && (min_uV <= 1420000)) { vsel = DIV_ROUND_UP(min_uV - 700000, 12500); vsel++; } /* Values 1..57 for vsel are linear and can be calculated * values 58..62 are non linear. */ else if ((min_uV > 1900000) && (min_uV <= 2100000)) vsel = 62; else if ((min_uV > 1800000) && (min_uV <= 1900000)) vsel = 61; else if ((min_uV > 1350000) && (min_uV <= 1800000)) vsel = 60; else if ((min_uV > 1350000) && (min_uV <= 1500000)) vsel = 59; else if ((min_uV > 1300000) && (min_uV <= 1350000)) vsel = 58; else return -EINVAL; break; case SMPS_EXTENDED_EN: if (min_uV == 0) { vsel = 0; } else if ((min_uV >= 1852000) && (max_uV <= 4013600)) { vsel = DIV_ROUND_UP(min_uV - 1852000, 38600); vsel++; } break; case SMPS_OFFSET_EN|SMPS_EXTENDED_EN: if (min_uV == 0) { vsel = 0; } else if ((min_uV >= 2161000) && (min_uV <= 4321000)) { vsel = DIV_ROUND_UP(min_uV - 2161000, 38600); vsel++; } break; } return vsel; } static int twl6030smps_set_voltage_sel(struct regulator_dev *rdev, unsigned int selector) { struct twlreg_info *info = rdev_get_drvdata(rdev); return twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS, selector); } static int twl6030smps_get_voltage_sel(struct regulator_dev *rdev) { struct twlreg_info *info = rdev_get_drvdata(rdev); return twlreg_read(info, TWL_MODULE_PM_RECEIVER, VREG_VOLTAGE_SMPS); } static struct regulator_ops twlsmps_ops = { .list_voltage = twl6030smps_list_voltage, .map_voltage = twl6030smps_map_voltage, .set_voltage_sel = twl6030smps_set_voltage_sel, .get_voltage_sel = twl6030smps_get_voltage_sel, .enable = twl6030reg_enable, .disable = twl6030reg_disable, .is_enabled = twl6030reg_is_enabled, .set_mode = twl6030reg_set_mode, .get_status = twl6030reg_get_status, }; /*----------------------------------------------------------------------*/ #define TWL4030_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf) \ TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, \ remap_conf, TWL4030, twl4030fixed_ops) #define TWL6030_FIXED_LDO(label, offset, mVolts, turnon_delay) \ TWL_FIXED_LDO(label, offset, mVolts, 0x0, turnon_delay, \ 0x0, TWL6030, twl6030fixed_ops) #define TWL4030_ADJUSTABLE_LDO(label, offset, num, turnon_delay, remap_conf) \ static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ .table_len = ARRAY_SIZE(label##_VSEL_table), \ .table = label##_VSEL_table, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ .id = TWL4030_REG_##label, \ .n_voltages = ARRAY_SIZE(label##_VSEL_table), \ .ops = &twl4030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .enable_time = turnon_delay, \ }, \ } #define TWL4030_ADJUSTABLE_SMPS(label, offset, num, turnon_delay, remap_conf) \ static const struct twlreg_info TWL4030_INFO_##label = { \ .base = offset, \ .id = num, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ .id = TWL4030_REG_##label, \ .ops = &twl4030smps_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .enable_time = turnon_delay, \ }, \ } #define TWL6030_ADJUSTABLE_SMPS(label) \ static const struct twlreg_info TWL6030_INFO_##label = { \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ .ops = &twl6030coresmps_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } #define TWL6030_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ static const struct twlreg_info TWL6030_INFO_##label = { \ .base = offset, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6030_REG_##label, \ .n_voltages = 32, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } #define TWL6032_ADJUSTABLE_LDO(label, offset, min_mVolts, max_mVolts) \ static const struct twlreg_info TWL6032_INFO_##label = { \ .base = offset, \ .min_mV = min_mVolts, \ .max_mV = max_mVolts, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ .n_voltages = 32, \ .ops = &twl6030ldo_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } #define TWL_FIXED_LDO(label, offset, mVolts, num, turnon_delay, remap_conf, \ family, operations) \ static const struct twlreg_info TWLFIXED_INFO_##label = { \ .base = offset, \ .id = num, \ .min_mV = mVolts, \ .remap = remap_conf, \ .desc = { \ .name = #label, \ .id = family##_REG_##label, \ .n_voltages = 1, \ .ops = &operations, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ .min_uV = mVolts * 1000, \ .enable_time = turnon_delay, \ }, \ } #define TWL6032_ADJUSTABLE_SMPS(label, offset) \ static const struct twlreg_info TWLSMPS_INFO_##label = { \ .base = offset, \ .min_mV = 600, \ .max_mV = 2100, \ .desc = { \ .name = #label, \ .id = TWL6032_REG_##label, \ .n_voltages = 63, \ .ops = &twlsmps_ops, \ .type = REGULATOR_VOLTAGE, \ .owner = THIS_MODULE, \ }, \ } /* * We list regulators here if systems need some level of * software control over them after boot. */ TWL4030_ADJUSTABLE_LDO(VAUX1, 0x17, 1, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VAUX2_4030, 0x1b, 2, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VAUX2, 0x1b, 2, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VAUX3, 0x1f, 3, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VAUX4, 0x23, 4, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VMMC1, 0x27, 5, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VMMC2, 0x2b, 6, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VPLL1, 0x2f, 7, 100, 0x00); TWL4030_ADJUSTABLE_LDO(VPLL2, 0x33, 8, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VSIM, 0x37, 9, 100, 0x00); TWL4030_ADJUSTABLE_LDO(VDAC, 0x3b, 10, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VINTANA2, 0x43, 12, 100, 0x08); TWL4030_ADJUSTABLE_LDO(VIO, 0x4b, 14, 1000, 0x08); TWL4030_ADJUSTABLE_SMPS(VDD1, 0x55, 15, 1000, 0x08); TWL4030_ADJUSTABLE_SMPS(VDD2, 0x63, 16, 1000, 0x08); /* VUSBCP is managed *only* by the USB subchip */ /* 6030 REG with base as PMC Slave Misc : 0x0030 */ /* Turnon-delay and remap configuration values for 6030 are not verified since the specification is not public */ TWL6030_ADJUSTABLE_SMPS(VDD1); TWL6030_ADJUSTABLE_SMPS(VDD2); TWL6030_ADJUSTABLE_SMPS(VDD3); TWL6030_ADJUSTABLE_LDO(VAUX1_6030, 0x54, 1000, 3300); TWL6030_ADJUSTABLE_LDO(VAUX2_6030, 0x58, 1000, 3300); TWL6030_ADJUSTABLE_LDO(VAUX3_6030, 0x5c, 1000, 3300); TWL6030_ADJUSTABLE_LDO(VMMC, 0x68, 1000, 3300); TWL6030_ADJUSTABLE_LDO(VPP, 0x6c, 1000, 3300); TWL6030_ADJUSTABLE_LDO(VUSIM, 0x74, 1000, 3300); /* 6025 are renamed compared to 6030 versions */ TWL6032_ADJUSTABLE_LDO(LDO2, 0x54, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO4, 0x58, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO3, 0x5c, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO5, 0x68, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO1, 0x6c, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO7, 0x74, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDO6, 0x60, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDOLN, 0x64, 1000, 3300); TWL6032_ADJUSTABLE_LDO(LDOUSB, 0x70, 1000, 3300); TWL4030_FIXED_LDO(VINTANA1, 0x3f, 1500, 11, 100, 0x08); TWL4030_FIXED_LDO(VINTDIG, 0x47, 1500, 13, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V5, 0x71, 1500, 17, 100, 0x08); TWL4030_FIXED_LDO(VUSB1V8, 0x74, 1800, 18, 100, 0x08); TWL4030_FIXED_LDO(VUSB3V1, 0x77, 3100, 19, 150, 0x08); TWL6030_FIXED_LDO(VANA, 0x50, 2100, 0); TWL6030_FIXED_LDO(VCXIO, 0x60, 1800, 0); TWL6030_FIXED_LDO(VDAC, 0x64, 1800, 0); TWL6030_FIXED_LDO(VUSB, 0x70, 3300, 0); TWL6030_FIXED_LDO(V1V8, 0x16, 1800, 0); TWL6030_FIXED_LDO(V2V1, 0x1c, 2100, 0); TWL6032_ADJUSTABLE_SMPS(SMPS3, 0x34); TWL6032_ADJUSTABLE_SMPS(SMPS4, 0x10); TWL6032_ADJUSTABLE_SMPS(VIO, 0x16); static u8 twl_get_smps_offset(void) { u8 value; twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, TWL6030_SMPS_OFFSET); return value; } static u8 twl_get_smps_mult(void) { u8 value; twl_i2c_read_u8(TWL_MODULE_PM_RECEIVER, &value, TWL6030_SMPS_MULT); return value; } #define TWL_OF_MATCH(comp, family, label) \ { \ .compatible = comp, \ .data = &family##_INFO_##label, \ } #define TWL4030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL4030, label) #define TWL6030_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6030, label) #define TWL6032_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWL6032, label) #define TWLFIXED_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLFIXED, label) #define TWLSMPS_OF_MATCH(comp, label) TWL_OF_MATCH(comp, TWLSMPS, label) static const struct of_device_id twl_of_match[] = { TWL4030_OF_MATCH("ti,twl4030-vaux1", VAUX1), TWL4030_OF_MATCH("ti,twl4030-vaux2", VAUX2_4030), TWL4030_OF_MATCH("ti,twl5030-vaux2", VAUX2), TWL4030_OF_MATCH("ti,twl4030-vaux3", VAUX3), TWL4030_OF_MATCH("ti,twl4030-vaux4", VAUX4), TWL4030_OF_MATCH("ti,twl4030-vmmc1", VMMC1), TWL4030_OF_MATCH("ti,twl4030-vmmc2", VMMC2), TWL4030_OF_MATCH("ti,twl4030-vpll1", VPLL1), TWL4030_OF_MATCH("ti,twl4030-vpll2", VPLL2), TWL4030_OF_MATCH("ti,twl4030-vsim", VSIM), TWL4030_OF_MATCH("ti,twl4030-vdac", VDAC), TWL4030_OF_MATCH("ti,twl4030-vintana2", VINTANA2), TWL4030_OF_MATCH("ti,twl4030-vio", VIO), TWL4030_OF_MATCH("ti,twl4030-vdd1", VDD1), TWL4030_OF_MATCH("ti,twl4030-vdd2", VDD2), TWL6030_OF_MATCH("ti,twl6030-vdd1", VDD1), TWL6030_OF_MATCH("ti,twl6030-vdd2", VDD2), TWL6030_OF_MATCH("ti,twl6030-vdd3", VDD3), TWL6030_OF_MATCH("ti,twl6030-vaux1", VAUX1_6030), TWL6030_OF_MATCH("ti,twl6030-vaux2", VAUX2_6030), TWL6030_OF_MATCH("ti,twl6030-vaux3", VAUX3_6030), TWL6030_OF_MATCH("ti,twl6030-vmmc", VMMC), TWL6030_OF_MATCH("ti,twl6030-vpp", VPP), TWL6030_OF_MATCH("ti,twl6030-vusim", VUSIM), TWL6032_OF_MATCH("ti,twl6032-ldo2", LDO2), TWL6032_OF_MATCH("ti,twl6032-ldo4", LDO4), TWL6032_OF_MATCH("ti,twl6032-ldo3", LDO3), TWL6032_OF_MATCH("ti,twl6032-ldo5", LDO5), TWL6032_OF_MATCH("ti,twl6032-ldo1", LDO1), TWL6032_OF_MATCH("ti,twl6032-ldo7", LDO7), TWL6032_OF_MATCH("ti,twl6032-ldo6", LDO6), TWL6032_OF_MATCH("ti,twl6032-ldoln", LDOLN), TWL6032_OF_MATCH("ti,twl6032-ldousb", LDOUSB), TWLFIXED_OF_MATCH("ti,twl4030-vintana1", VINTANA1), TWLFIXED_OF_MATCH("ti,twl4030-vintdig", VINTDIG), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v5", VUSB1V5), TWLFIXED_OF_MATCH("ti,twl4030-vusb1v8", VUSB1V8), TWLFIXED_OF_MATCH("ti,twl4030-vusb3v1", VUSB3V1), TWLFIXED_OF_MATCH("ti,twl6030-vana", VANA), TWLFIXED_OF_MATCH("ti,twl6030-vcxio", VCXIO), TWLFIXED_OF_MATCH("ti,twl6030-vdac", VDAC), TWLFIXED_OF_MATCH("ti,twl6030-vusb", VUSB), TWLFIXED_OF_MATCH("ti,twl6030-v1v8", V1V8), TWLFIXED_OF_MATCH("ti,twl6030-v2v1", V2V1), TWLSMPS_OF_MATCH("ti,twl6032-smps3", SMPS3), TWLSMPS_OF_MATCH("ti,twl6032-smps4", SMPS4), TWLSMPS_OF_MATCH("ti,twl6032-vio", VIO), {}, }; MODULE_DEVICE_TABLE(of, twl_of_match); static int twlreg_probe(struct platform_device *pdev) { int i, id; struct twlreg_info *info; const struct twlreg_info *template; struct regulator_init_data *initdata; struct regulation_constraints *c; struct regulator_dev *rdev; struct twl_regulator_driver_data *drvdata; const struct of_device_id *match; struct regulator_config config = { }; match = of_match_device(twl_of_match, &pdev->dev); if (match) { template = match->data; id = template->desc.id; initdata = of_get_regulator_init_data(&pdev->dev, pdev->dev.of_node, &template->desc); drvdata = NULL; } else { id = pdev->id; initdata = dev_get_platdata(&pdev->dev); for (i = 0, template = NULL; i < ARRAY_SIZE(twl_of_match); i++) { template = twl_of_match[i].data; if (template && template->desc.id == id) break; } if (i == ARRAY_SIZE(twl_of_match)) return -ENODEV; drvdata = initdata->driver_data; if (!drvdata) return -EINVAL; } if (!template) return -ENODEV; if (!initdata) return -EINVAL; info = devm_kmemdup(&pdev->dev, template, sizeof(*info), GFP_KERNEL); if (!info) return -ENOMEM; if (drvdata) { /* copy the driver data into regulator data */ info->features = drvdata->features; info->data = drvdata->data; info->set_voltage = drvdata->set_voltage; info->get_voltage = drvdata->get_voltage; } /* Constrain board-specific capabilities according to what * this driver and the chip itself can actually do. */ c = &initdata->constraints; c->valid_modes_mask &= REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY; c->valid_ops_mask &= REGULATOR_CHANGE_VOLTAGE | REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS; switch (id) { case TWL4030_REG_VIO: case TWL4030_REG_VDD1: case TWL4030_REG_VDD2: case TWL4030_REG_VPLL1: case TWL4030_REG_VINTANA1: case TWL4030_REG_VINTANA2: case TWL4030_REG_VINTDIG: c->always_on = true; break; default: break; } switch (id) { case TWL6032_REG_SMPS3: if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS3) info->flags |= SMPS_EXTENDED_EN; if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS3) info->flags |= SMPS_OFFSET_EN; break; case TWL6032_REG_SMPS4: if (twl_get_smps_mult() & SMPS_MULTOFFSET_SMPS4) info->flags |= SMPS_EXTENDED_EN; if (twl_get_smps_offset() & SMPS_MULTOFFSET_SMPS4) info->flags |= SMPS_OFFSET_EN; break; case TWL6032_REG_VIO: if (twl_get_smps_mult() & SMPS_MULTOFFSET_VIO) info->flags |= SMPS_EXTENDED_EN; if (twl_get_smps_offset() & SMPS_MULTOFFSET_VIO) info->flags |= SMPS_OFFSET_EN; break; } config.dev = &pdev->dev; config.init_data = initdata; config.driver_data = info; config.of_node = pdev->dev.of_node; rdev = devm_regulator_register(&pdev->dev, &info->desc, &config); if (IS_ERR(rdev)) { dev_err(&pdev->dev, "can't register %s, %ld\n", info->desc.name, PTR_ERR(rdev)); return PTR_ERR(rdev); } platform_set_drvdata(pdev, rdev); if (twl_class_is_4030()) twlreg_write(info, TWL_MODULE_PM_RECEIVER, VREG_REMAP, info->remap); /* NOTE: many regulators support short-circuit IRQs (presentable * as REGULATOR_OVER_CURRENT notifications?) configured via: * - SC_CONFIG * - SC_DETECT1 (vintana2, vmmc1/2, vaux1/2/3/4) * - SC_DETECT2 (vusb, vdac, vio, vdd1/2, vpll2) * - IT_CONFIG */ return 0; } MODULE_ALIAS("platform:twl_reg"); static struct platform_driver twlreg_driver = { .probe = twlreg_probe, /* NOTE: short name, to work around driver model truncation of * "twl_regulator.12" (and friends) to "twl_regulator.1". */ .driver = { .name = "twl_reg", .of_match_table = of_match_ptr(twl_of_match), }, }; static int __init twlreg_init(void) { return platform_driver_register(&twlreg_driver); } subsys_initcall(twlreg_init); static void __exit twlreg_exit(void) { platform_driver_unregister(&twlreg_driver); } module_exit(twlreg_exit) MODULE_DESCRIPTION("TWL regulator driver"); MODULE_LICENSE("GPL");
gpl-2.0
lbule/ALPS.L0.MP6.V3.18_LCSH6795_LWT_L_KERNEL
drivers/net/wireless/ipw2x00/ipw2100.c
2080
230710
/****************************************************************************** Copyright(c) 2003 - 2006 Intel Corporation. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of version 2 of the GNU General Public License as published by the Free Software Foundation. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. The full GNU General Public License is included in this distribution in the file called LICENSE. Contact Information: Intel Linux Wireless <ilw@linux.intel.com> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 Portions of this file are based on the sample_* files provided by Wireless Extensions 0.26 package and copyright (c) 1997-2003 Jean Tourrilhes <jt@hpl.hp.com> Portions of this file are based on the Host AP project, Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen <j@w1.fi> Copyright (c) 2002-2003, Jouni Malinen <j@w1.fi> Portions of ipw2100_mod_firmware_load, ipw2100_do_mod_firmware_load, and ipw2100_fw_load are loosely based on drivers/sound/sound_firmware.c available in the 2.4.25 kernel sources, and are copyright (c) Alan Cox ******************************************************************************/ /* Initial driver on which this is based was developed by Janusz Gorycki, Maciej Urbaniak, and Maciej Sosnowski. Promiscuous mode support added by Jacek Wysoczynski and Maciej Urbaniak. Theory of Operation Tx - Commands and Data Firmware and host share a circular queue of Transmit Buffer Descriptors (TBDs) Each TBD contains a pointer to the physical (dma_addr_t) address of data being sent to the firmware as well as the length of the data. The host writes to the TBD queue at the WRITE index. The WRITE index points to the _next_ packet to be written and is advanced when after the TBD has been filled. The firmware pulls from the TBD queue at the READ index. The READ index points to the currently being read entry, and is advanced once the firmware is done with a packet. When data is sent to the firmware, the first TBD is used to indicate to the firmware if a Command or Data is being sent. If it is Command, all of the command information is contained within the physical address referred to by the TBD. If it is Data, the first TBD indicates the type of data packet, number of fragments, etc. The next TBD then refers to the actual packet location. The Tx flow cycle is as follows: 1) ipw2100_tx() is called by kernel with SKB to transmit 2) Packet is move from the tx_free_list and appended to the transmit pending list (tx_pend_list) 3) work is scheduled to move pending packets into the shared circular queue. 4) when placing packet in the circular queue, the incoming SKB is DMA mapped to a physical address. That address is entered into a TBD. Two TBDs are filled out. The first indicating a data packet, the second referring to the actual payload data. 5) the packet is removed from tx_pend_list and placed on the end of the firmware pending list (fw_pend_list) 6) firmware is notified that the WRITE index has 7) Once the firmware has processed the TBD, INTA is triggered. 8) For each Tx interrupt received from the firmware, the READ index is checked to see which TBDs are done being processed. 9) For each TBD that has been processed, the ISR pulls the oldest packet from the fw_pend_list. 10)The packet structure contained in the fw_pend_list is then used to unmap the DMA address and to free the SKB originally passed to the driver from the kernel. 11)The packet structure is placed onto the tx_free_list The above steps are the same for commands, only the msg_free_list/msg_pend_list are used instead of tx_free_list/tx_pend_list ... Critical Sections / Locking : There are two locks utilized. The first is the low level lock (priv->low_lock) that protects the following: - Access to the Tx/Rx queue lists via priv->low_lock. The lists are as follows: tx_free_list : Holds pre-allocated Tx buffers. TAIL modified in __ipw2100_tx_process() HEAD modified in ipw2100_tx() tx_pend_list : Holds used Tx buffers waiting to go into the TBD ring TAIL modified ipw2100_tx() HEAD modified by ipw2100_tx_send_data() msg_free_list : Holds pre-allocated Msg (Command) buffers TAIL modified in __ipw2100_tx_process() HEAD modified in ipw2100_hw_send_command() msg_pend_list : Holds used Msg buffers waiting to go into the TBD ring TAIL modified in ipw2100_hw_send_command() HEAD modified in ipw2100_tx_send_commands() The flow of data on the TX side is as follows: MSG_FREE_LIST + COMMAND => MSG_PEND_LIST => TBD => MSG_FREE_LIST TX_FREE_LIST + DATA => TX_PEND_LIST => TBD => TX_FREE_LIST The methods that work on the TBD ring are protected via priv->low_lock. - The internal data state of the device itself - Access to the firmware read/write indexes for the BD queues and associated logic All external entry functions are locked with the priv->action_lock to ensure that only one external action is invoked at a time. */ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/ethtool.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <asm/uaccess.h> #include <asm/io.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/unistd.h> #include <linux/stringify.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/time.h> #include <linux/firmware.h> #include <linux/acpi.h> #include <linux/ctype.h> #include <linux/pm_qos.h> #include <net/lib80211.h> #include "ipw2100.h" #include "ipw.h" #define IPW2100_VERSION "git-1.2.2" #define DRV_NAME "ipw2100" #define DRV_VERSION IPW2100_VERSION #define DRV_DESCRIPTION "Intel(R) PRO/Wireless 2100 Network Driver" #define DRV_COPYRIGHT "Copyright(c) 2003-2006 Intel Corporation" static struct pm_qos_request ipw2100_pm_qos_req; /* Debugging stuff */ #ifdef CONFIG_IPW2100_DEBUG #define IPW2100_RX_DEBUG /* Reception debugging */ #endif MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_VERSION(DRV_VERSION); MODULE_AUTHOR(DRV_COPYRIGHT); MODULE_LICENSE("GPL"); static int debug = 0; static int network_mode = 0; static int channel = 0; static int associate = 0; static int disable = 0; #ifdef CONFIG_PM static struct ipw2100_fw ipw2100_firmware; #endif #include <linux/moduleparam.h> module_param(debug, int, 0444); module_param_named(mode, network_mode, int, 0444); module_param(channel, int, 0444); module_param(associate, int, 0444); module_param(disable, int, 0444); MODULE_PARM_DESC(debug, "debug level"); MODULE_PARM_DESC(mode, "network mode (0=BSS,1=IBSS,2=Monitor)"); MODULE_PARM_DESC(channel, "channel"); MODULE_PARM_DESC(associate, "auto associate when scanning (default off)"); MODULE_PARM_DESC(disable, "manually disable the radio (default 0 [radio on])"); static u32 ipw2100_debug_level = IPW_DL_NONE; #ifdef CONFIG_IPW2100_DEBUG #define IPW_DEBUG(level, message...) \ do { \ if (ipw2100_debug_level & (level)) { \ printk(KERN_DEBUG "ipw2100: %c %s ", \ in_interrupt() ? 'I' : 'U', __func__); \ printk(message); \ } \ } while (0) #else #define IPW_DEBUG(level, message...) do {} while (0) #endif /* CONFIG_IPW2100_DEBUG */ #ifdef CONFIG_IPW2100_DEBUG static const char *command_types[] = { "undefined", "unused", /* HOST_ATTENTION */ "HOST_COMPLETE", "unused", /* SLEEP */ "unused", /* HOST_POWER_DOWN */ "unused", "SYSTEM_CONFIG", "unused", /* SET_IMR */ "SSID", "MANDATORY_BSSID", "AUTHENTICATION_TYPE", "ADAPTER_ADDRESS", "PORT_TYPE", "INTERNATIONAL_MODE", "CHANNEL", "RTS_THRESHOLD", "FRAG_THRESHOLD", "POWER_MODE", "TX_RATES", "BASIC_TX_RATES", "WEP_KEY_INFO", "unused", "unused", "unused", "unused", "WEP_KEY_INDEX", "WEP_FLAGS", "ADD_MULTICAST", "CLEAR_ALL_MULTICAST", "BEACON_INTERVAL", "ATIM_WINDOW", "CLEAR_STATISTICS", "undefined", "undefined", "undefined", "undefined", "TX_POWER_INDEX", "undefined", "undefined", "undefined", "undefined", "undefined", "undefined", "BROADCAST_SCAN", "CARD_DISABLE", "PREFERRED_BSSID", "SET_SCAN_OPTIONS", "SCAN_DWELL_TIME", "SWEEP_TABLE", "AP_OR_STATION_TABLE", "GROUP_ORDINALS", "SHORT_RETRY_LIMIT", "LONG_RETRY_LIMIT", "unused", /* SAVE_CALIBRATION */ "unused", /* RESTORE_CALIBRATION */ "undefined", "undefined", "undefined", "HOST_PRE_POWER_DOWN", "unused", /* HOST_INTERRUPT_COALESCING */ "undefined", "CARD_DISABLE_PHY_OFF", "MSDU_TX_RATES", "undefined", "SET_STATION_STAT_BITS", "CLEAR_STATIONS_STAT_BITS", "LEAP_ROGUE_MODE", "SET_SECURITY_INFORMATION", "DISASSOCIATION_BSSID", "SET_WPA_ASS_IE" }; #endif static const long ipw2100_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; #define FREQ_COUNT ARRAY_SIZE(ipw2100_frequencies) static struct ieee80211_rate ipw2100_bg_rates[] = { { .bitrate = 10 }, { .bitrate = 20, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 55, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, { .bitrate = 110, .flags = IEEE80211_RATE_SHORT_PREAMBLE }, }; #define RATE_COUNT ARRAY_SIZE(ipw2100_bg_rates) /* Pre-decl until we get the code solid and then we can clean it up */ static void ipw2100_tx_send_commands(struct ipw2100_priv *priv); static void ipw2100_tx_send_data(struct ipw2100_priv *priv); static int ipw2100_adapter_setup(struct ipw2100_priv *priv); static void ipw2100_queues_initialize(struct ipw2100_priv *priv); static void ipw2100_queues_free(struct ipw2100_priv *priv); static int ipw2100_queues_allocate(struct ipw2100_priv *priv); static int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max); static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max); static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw); static void ipw2100_wx_event_work(struct work_struct *work); static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev); static struct iw_handler_def ipw2100_wx_handler_def; static inline void read_register(struct net_device *dev, u32 reg, u32 * val) { struct ipw2100_priv *priv = libipw_priv(dev); *val = ioread32(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => 0x%08X\n", reg, *val); } static inline void write_register(struct net_device *dev, u32 reg, u32 val) { struct ipw2100_priv *priv = libipw_priv(dev); iowrite32(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X <= 0x%08X\n", reg, val); } static inline void read_register_word(struct net_device *dev, u32 reg, u16 * val) { struct ipw2100_priv *priv = libipw_priv(dev); *val = ioread16(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => %04X\n", reg, *val); } static inline void read_register_byte(struct net_device *dev, u32 reg, u8 * val) { struct ipw2100_priv *priv = libipw_priv(dev); *val = ioread8(priv->ioaddr + reg); IPW_DEBUG_IO("r: 0x%08X => %02X\n", reg, *val); } static inline void write_register_word(struct net_device *dev, u32 reg, u16 val) { struct ipw2100_priv *priv = libipw_priv(dev); iowrite16(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X <= %04X\n", reg, val); } static inline void write_register_byte(struct net_device *dev, u32 reg, u8 val) { struct ipw2100_priv *priv = libipw_priv(dev); iowrite8(val, priv->ioaddr + reg); IPW_DEBUG_IO("w: 0x%08X =< %02X\n", reg, val); } static inline void read_nic_dword(struct net_device *dev, u32 addr, u32 * val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); read_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void write_nic_dword(struct net_device *dev, u32 addr, u32 val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); write_register(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void read_nic_word(struct net_device *dev, u32 addr, u16 * val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); read_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void write_nic_word(struct net_device *dev, u32 addr, u16 val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); write_register_word(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void read_nic_byte(struct net_device *dev, u32 addr, u8 * val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void write_nic_byte(struct net_device *dev, u32 addr, u8 val) { write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA, val); } static inline void write_nic_auto_inc_address(struct net_device *dev, u32 addr) { write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, addr & IPW_REG_INDIRECT_ADDR_MASK); } static inline void write_nic_dword_auto_inc(struct net_device *dev, u32 val) { write_register(dev, IPW_REG_AUTOINCREMENT_DATA, val); } static void write_nic_memory(struct net_device *dev, u32 addr, u32 len, const u8 * buf) { u32 aligned_addr; u32 aligned_len; u32 dif_len; u32 i; /* read first nibble byte by byte */ aligned_addr = addr & (~0x3); dif_len = addr - aligned_addr; if (dif_len) { /* Start reading at aligned_addr + dif_len */ write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); for (i = dif_len; i < 4; i++, buf++) write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, *buf); len -= dif_len; aligned_addr += 4; } /* read DWs through autoincrement registers */ write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr); aligned_len = len & (~0x3); for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) write_register(dev, IPW_REG_AUTOINCREMENT_DATA, *(u32 *) buf); /* copy the last nibble */ dif_len = len - aligned_len; write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); for (i = 0; i < dif_len; i++, buf++) write_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, *buf); } static void read_nic_memory(struct net_device *dev, u32 addr, u32 len, u8 * buf) { u32 aligned_addr; u32 aligned_len; u32 dif_len; u32 i; /* read first nibble byte by byte */ aligned_addr = addr & (~0x3); dif_len = addr - aligned_addr; if (dif_len) { /* Start reading at aligned_addr + dif_len */ write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); for (i = dif_len; i < 4; i++, buf++) read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); len -= dif_len; aligned_addr += 4; } /* read DWs through autoincrement registers */ write_register(dev, IPW_REG_AUTOINCREMENT_ADDRESS, aligned_addr); aligned_len = len & (~0x3); for (i = 0; i < aligned_len; i += 4, buf += 4, aligned_addr += 4) read_register(dev, IPW_REG_AUTOINCREMENT_DATA, (u32 *) buf); /* copy the last nibble */ dif_len = len - aligned_len; write_register(dev, IPW_REG_INDIRECT_ACCESS_ADDRESS, aligned_addr); for (i = 0; i < dif_len; i++, buf++) read_register_byte(dev, IPW_REG_INDIRECT_ACCESS_DATA + i, buf); } static bool ipw2100_hw_is_adapter_in_system(struct net_device *dev) { u32 dbg; read_register(dev, IPW_REG_DOA_DEBUG_AREA_START, &dbg); return dbg == IPW_DATA_DOA_DEBUG_VALUE; } static int ipw2100_get_ordinal(struct ipw2100_priv *priv, u32 ord, void *val, u32 * len) { struct ipw2100_ordinals *ordinals = &priv->ordinals; u32 addr; u32 field_info; u16 field_len; u16 field_count; u32 total_length; if (ordinals->table1_addr == 0) { printk(KERN_WARNING DRV_NAME ": attempt to use fw ordinals " "before they have been loaded.\n"); return -EINVAL; } if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) { if (*len < IPW_ORD_TAB_1_ENTRY_SIZE) { *len = IPW_ORD_TAB_1_ENTRY_SIZE; printk(KERN_WARNING DRV_NAME ": ordinal buffer length too small, need %zd\n", IPW_ORD_TAB_1_ENTRY_SIZE); return -EINVAL; } read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2), &addr); read_nic_dword(priv->net_dev, addr, val); *len = IPW_ORD_TAB_1_ENTRY_SIZE; return 0; } if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) { ord -= IPW_START_ORD_TAB_2; /* get the address of statistic */ read_nic_dword(priv->net_dev, ordinals->table2_addr + (ord << 3), &addr); /* get the second DW of statistics ; * two 16-bit words - first is length, second is count */ read_nic_dword(priv->net_dev, ordinals->table2_addr + (ord << 3) + sizeof(u32), &field_info); /* get each entry length */ field_len = *((u16 *) & field_info); /* get number of entries */ field_count = *(((u16 *) & field_info) + 1); /* abort if no enough memory */ total_length = field_len * field_count; if (total_length > *len) { *len = total_length; return -EINVAL; } *len = total_length; if (!total_length) return 0; /* read the ordinal data from the SRAM */ read_nic_memory(priv->net_dev, addr, total_length, val); return 0; } printk(KERN_WARNING DRV_NAME ": ordinal %d neither in table 1 nor " "in table 2\n", ord); return -EINVAL; } static int ipw2100_set_ordinal(struct ipw2100_priv *priv, u32 ord, u32 * val, u32 * len) { struct ipw2100_ordinals *ordinals = &priv->ordinals; u32 addr; if (IS_ORDINAL_TABLE_ONE(ordinals, ord)) { if (*len != IPW_ORD_TAB_1_ENTRY_SIZE) { *len = IPW_ORD_TAB_1_ENTRY_SIZE; IPW_DEBUG_INFO("wrong size\n"); return -EINVAL; } read_nic_dword(priv->net_dev, ordinals->table1_addr + (ord << 2), &addr); write_nic_dword(priv->net_dev, addr, *val); *len = IPW_ORD_TAB_1_ENTRY_SIZE; return 0; } IPW_DEBUG_INFO("wrong table\n"); if (IS_ORDINAL_TABLE_TWO(ordinals, ord)) return -EINVAL; return -EINVAL; } static char *snprint_line(char *buf, size_t count, const u8 * data, u32 len, u32 ofs) { int out, i, j, l; char c; out = snprintf(buf, count, "%08X", ofs); for (l = 0, i = 0; i < 2; i++) { out += snprintf(buf + out, count - out, " "); for (j = 0; j < 8 && l < len; j++, l++) out += snprintf(buf + out, count - out, "%02X ", data[(i * 8 + j)]); for (; j < 8; j++) out += snprintf(buf + out, count - out, " "); } out += snprintf(buf + out, count - out, " "); for (l = 0, i = 0; i < 2; i++) { out += snprintf(buf + out, count - out, " "); for (j = 0; j < 8 && l < len; j++, l++) { c = data[(i * 8 + j)]; if (!isascii(c) || !isprint(c)) c = '.'; out += snprintf(buf + out, count - out, "%c", c); } for (; j < 8; j++) out += snprintf(buf + out, count - out, " "); } return buf; } static void printk_buf(int level, const u8 * data, u32 len) { char line[81]; u32 ofs = 0; if (!(ipw2100_debug_level & level)) return; while (len) { printk(KERN_DEBUG "%s\n", snprint_line(line, sizeof(line), &data[ofs], min(len, 16U), ofs)); ofs += 16; len -= min(len, 16U); } } #define MAX_RESET_BACKOFF 10 static void schedule_reset(struct ipw2100_priv *priv) { unsigned long now = get_seconds(); /* If we haven't received a reset request within the backoff period, * then we can reset the backoff interval so this reset occurs * immediately */ if (priv->reset_backoff && (now - priv->last_reset > priv->reset_backoff)) priv->reset_backoff = 0; priv->last_reset = get_seconds(); if (!(priv->status & STATUS_RESET_PENDING)) { IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n", priv->net_dev->name, priv->reset_backoff); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); priv->status |= STATUS_RESET_PENDING; if (priv->reset_backoff) schedule_delayed_work(&priv->reset_work, priv->reset_backoff * HZ); else schedule_delayed_work(&priv->reset_work, 0); if (priv->reset_backoff < MAX_RESET_BACKOFF) priv->reset_backoff++; wake_up_interruptible(&priv->wait_command_queue); } else IPW_DEBUG_INFO("%s: Firmware restart already in progress.\n", priv->net_dev->name); } #define HOST_COMPLETE_TIMEOUT (2 * HZ) static int ipw2100_hw_send_command(struct ipw2100_priv *priv, struct host_command *cmd) { struct list_head *element; struct ipw2100_tx_packet *packet; unsigned long flags; int err = 0; IPW_DEBUG_HC("Sending %s command (#%d), %d bytes\n", command_types[cmd->host_command], cmd->host_command, cmd->host_command_length); printk_buf(IPW_DL_HC, (u8 *) cmd->host_command_parameters, cmd->host_command_length); spin_lock_irqsave(&priv->low_lock, flags); if (priv->fatal_error) { IPW_DEBUG_INFO ("Attempt to send command while hardware in fatal error condition.\n"); err = -EIO; goto fail_unlock; } if (!(priv->status & STATUS_RUNNING)) { IPW_DEBUG_INFO ("Attempt to send command while hardware is not running.\n"); err = -EIO; goto fail_unlock; } if (priv->status & STATUS_CMD_ACTIVE) { IPW_DEBUG_INFO ("Attempt to send command while another command is pending.\n"); err = -EBUSY; goto fail_unlock; } if (list_empty(&priv->msg_free_list)) { IPW_DEBUG_INFO("no available msg buffers\n"); goto fail_unlock; } priv->status |= STATUS_CMD_ACTIVE; priv->messages_sent++; element = priv->msg_free_list.next; packet = list_entry(element, struct ipw2100_tx_packet, list); packet->jiffy_start = jiffies; /* initialize the firmware command packet */ packet->info.c_struct.cmd->host_command_reg = cmd->host_command; packet->info.c_struct.cmd->host_command_reg1 = cmd->host_command1; packet->info.c_struct.cmd->host_command_len_reg = cmd->host_command_length; packet->info.c_struct.cmd->sequence = cmd->host_command_sequence; memcpy(packet->info.c_struct.cmd->host_command_params_reg, cmd->host_command_parameters, sizeof(packet->info.c_struct.cmd->host_command_params_reg)); list_del(element); DEC_STAT(&priv->msg_free_stat); list_add_tail(element, &priv->msg_pend_list); INC_STAT(&priv->msg_pend_stat); ipw2100_tx_send_commands(priv); ipw2100_tx_send_data(priv); spin_unlock_irqrestore(&priv->low_lock, flags); /* * We must wait for this command to complete before another * command can be sent... but if we wait more than 3 seconds * then there is a problem. */ err = wait_event_interruptible_timeout(priv->wait_command_queue, !(priv-> status & STATUS_CMD_ACTIVE), HOST_COMPLETE_TIMEOUT); if (err == 0) { IPW_DEBUG_INFO("Command completion failed out after %dms.\n", 1000 * (HOST_COMPLETE_TIMEOUT / HZ)); priv->fatal_error = IPW2100_ERR_MSG_TIMEOUT; priv->status &= ~STATUS_CMD_ACTIVE; schedule_reset(priv); return -EIO; } if (priv->fatal_error) { printk(KERN_WARNING DRV_NAME ": %s: firmware fatal error\n", priv->net_dev->name); return -EIO; } /* !!!!! HACK TEST !!!!! * When lots of debug trace statements are enabled, the driver * doesn't seem to have as many firmware restart cycles... * * As a test, we're sticking in a 1/100s delay here */ schedule_timeout_uninterruptible(msecs_to_jiffies(10)); return 0; fail_unlock: spin_unlock_irqrestore(&priv->low_lock, flags); return err; } /* * Verify the values and data access of the hardware * No locks needed or used. No functions called. */ static int ipw2100_verify(struct ipw2100_priv *priv) { u32 data1, data2; u32 address; u32 val1 = 0x76543210; u32 val2 = 0xFEDCBA98; /* Domain 0 check - all values should be DOA_DEBUG */ for (address = IPW_REG_DOA_DEBUG_AREA_START; address < IPW_REG_DOA_DEBUG_AREA_END; address += sizeof(u32)) { read_register(priv->net_dev, address, &data1); if (data1 != IPW_DATA_DOA_DEBUG_VALUE) return -EIO; } /* Domain 1 check - use arbitrary read/write compare */ for (address = 0; address < 5; address++) { /* The memory area is not used now */ write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32, val1); write_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36, val2); read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x32, &data1); read_register(priv->net_dev, IPW_REG_DOMAIN_1_OFFSET + 0x36, &data2); if (val1 == data1 && val2 == data2) return 0; } return -EIO; } /* * * Loop until the CARD_DISABLED bit is the same value as the * supplied parameter * * TODO: See if it would be more efficient to do a wait/wake * cycle and have the completion event trigger the wakeup * */ #define IPW_CARD_DISABLE_COMPLETE_WAIT 100 // 100 milli static int ipw2100_wait_for_card_state(struct ipw2100_priv *priv, int state) { int i; u32 card_state; u32 len = sizeof(card_state); int err; for (i = 0; i <= IPW_CARD_DISABLE_COMPLETE_WAIT * 1000; i += 50) { err = ipw2100_get_ordinal(priv, IPW_ORD_CARD_DISABLED, &card_state, &len); if (err) { IPW_DEBUG_INFO("Query of CARD_DISABLED ordinal " "failed.\n"); return 0; } /* We'll break out if either the HW state says it is * in the state we want, or if HOST_COMPLETE command * finishes */ if ((card_state == state) || ((priv->status & STATUS_ENABLED) ? IPW_HW_STATE_ENABLED : IPW_HW_STATE_DISABLED) == state) { if (state == IPW_HW_STATE_ENABLED) priv->status |= STATUS_ENABLED; else priv->status &= ~STATUS_ENABLED; return 0; } udelay(50); } IPW_DEBUG_INFO("ipw2100_wait_for_card_state to %s state timed out\n", state ? "DISABLED" : "ENABLED"); return -EIO; } /********************************************************************* Procedure : sw_reset_and_clock Purpose : Asserts s/w reset, asserts clock initialization and waits for clock stabilization ********************************************************************/ static int sw_reset_and_clock(struct ipw2100_priv *priv) { int i; u32 r; // assert s/w reset write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_SW_RESET); // wait for clock stabilization for (i = 0; i < 1000; i++) { udelay(IPW_WAIT_RESET_ARC_COMPLETE_DELAY); // check clock ready bit read_register(priv->net_dev, IPW_REG_RESET_REG, &r); if (r & IPW_AUX_HOST_RESET_REG_PRINCETON_RESET) break; } if (i == 1000) return -EIO; // TODO: better error value /* set "initialization complete" bit to move adapter to * D0 state */ write_register(priv->net_dev, IPW_REG_GP_CNTRL, IPW_AUX_HOST_GP_CNTRL_BIT_INIT_DONE); /* wait for clock stabilization */ for (i = 0; i < 10000; i++) { udelay(IPW_WAIT_CLOCK_STABILIZATION_DELAY * 4); /* check clock ready bit */ read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r); if (r & IPW_AUX_HOST_GP_CNTRL_BIT_CLOCK_READY) break; } if (i == 10000) return -EIO; /* TODO: better error value */ /* set D0 standby bit */ read_register(priv->net_dev, IPW_REG_GP_CNTRL, &r); write_register(priv->net_dev, IPW_REG_GP_CNTRL, r | IPW_AUX_HOST_GP_CNTRL_BIT_HOST_ALLOWS_STANDBY); return 0; } /********************************************************************* Procedure : ipw2100_download_firmware Purpose : Initiaze adapter after power on. The sequence is: 1. assert s/w reset first! 2. awake clocks & wait for clock stabilization 3. hold ARC (don't ask me why...) 4. load Dino ucode and reset/clock init again 5. zero-out shared mem 6. download f/w *******************************************************************/ static int ipw2100_download_firmware(struct ipw2100_priv *priv) { u32 address; int err; #ifndef CONFIG_PM /* Fetch the firmware and microcode */ struct ipw2100_fw ipw2100_firmware; #endif if (priv->fatal_error) { IPW_DEBUG_ERROR("%s: ipw2100_download_firmware called after " "fatal error %d. Interface must be brought down.\n", priv->net_dev->name, priv->fatal_error); return -EINVAL; } #ifdef CONFIG_PM if (!ipw2100_firmware.version) { err = ipw2100_get_firmware(priv, &ipw2100_firmware); if (err) { IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n", priv->net_dev->name, err); priv->fatal_error = IPW2100_ERR_FW_LOAD; goto fail; } } #else err = ipw2100_get_firmware(priv, &ipw2100_firmware); if (err) { IPW_DEBUG_ERROR("%s: ipw2100_get_firmware failed: %d\n", priv->net_dev->name, err); priv->fatal_error = IPW2100_ERR_FW_LOAD; goto fail; } #endif priv->firmware_version = ipw2100_firmware.version; /* s/w reset and clock stabilization */ err = sw_reset_and_clock(priv); if (err) { IPW_DEBUG_ERROR("%s: sw_reset_and_clock failed: %d\n", priv->net_dev->name, err); goto fail; } err = ipw2100_verify(priv); if (err) { IPW_DEBUG_ERROR("%s: ipw2100_verify failed: %d\n", priv->net_dev->name, err); goto fail; } /* Hold ARC */ write_nic_dword(priv->net_dev, IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x80000000); /* allow ARC to run */ write_register(priv->net_dev, IPW_REG_RESET_REG, 0); /* load microcode */ err = ipw2100_ucode_download(priv, &ipw2100_firmware); if (err) { printk(KERN_ERR DRV_NAME ": %s: Error loading microcode: %d\n", priv->net_dev->name, err); goto fail; } /* release ARC */ write_nic_dword(priv->net_dev, IPW_INTERNAL_REGISTER_HALT_AND_RESET, 0x00000000); /* s/w reset and clock stabilization (again!!!) */ err = sw_reset_and_clock(priv); if (err) { printk(KERN_ERR DRV_NAME ": %s: sw_reset_and_clock failed: %d\n", priv->net_dev->name, err); goto fail; } /* load f/w */ err = ipw2100_fw_download(priv, &ipw2100_firmware); if (err) { IPW_DEBUG_ERROR("%s: Error loading firmware: %d\n", priv->net_dev->name, err); goto fail; } #ifndef CONFIG_PM /* * When the .resume method of the driver is called, the other * part of the system, i.e. the ide driver could still stay in * the suspend stage. This prevents us from loading the firmware * from the disk. --YZ */ /* free any storage allocated for firmware image */ ipw2100_release_firmware(priv, &ipw2100_firmware); #endif /* zero out Domain 1 area indirectly (Si requirement) */ for (address = IPW_HOST_FW_SHARED_AREA0; address < IPW_HOST_FW_SHARED_AREA0_END; address += 4) write_nic_dword(priv->net_dev, address, 0); for (address = IPW_HOST_FW_SHARED_AREA1; address < IPW_HOST_FW_SHARED_AREA1_END; address += 4) write_nic_dword(priv->net_dev, address, 0); for (address = IPW_HOST_FW_SHARED_AREA2; address < IPW_HOST_FW_SHARED_AREA2_END; address += 4) write_nic_dword(priv->net_dev, address, 0); for (address = IPW_HOST_FW_SHARED_AREA3; address < IPW_HOST_FW_SHARED_AREA3_END; address += 4) write_nic_dword(priv->net_dev, address, 0); for (address = IPW_HOST_FW_INTERRUPT_AREA; address < IPW_HOST_FW_INTERRUPT_AREA_END; address += 4) write_nic_dword(priv->net_dev, address, 0); return 0; fail: ipw2100_release_firmware(priv, &ipw2100_firmware); return err; } static inline void ipw2100_enable_interrupts(struct ipw2100_priv *priv) { if (priv->status & STATUS_INT_ENABLED) return; priv->status |= STATUS_INT_ENABLED; write_register(priv->net_dev, IPW_REG_INTA_MASK, IPW_INTERRUPT_MASK); } static inline void ipw2100_disable_interrupts(struct ipw2100_priv *priv) { if (!(priv->status & STATUS_INT_ENABLED)) return; priv->status &= ~STATUS_INT_ENABLED; write_register(priv->net_dev, IPW_REG_INTA_MASK, 0x0); } static void ipw2100_initialize_ordinals(struct ipw2100_priv *priv) { struct ipw2100_ordinals *ord = &priv->ordinals; IPW_DEBUG_INFO("enter\n"); read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_1, &ord->table1_addr); read_register(priv->net_dev, IPW_MEM_HOST_SHARED_ORDINALS_TABLE_2, &ord->table2_addr); read_nic_dword(priv->net_dev, ord->table1_addr, &ord->table1_size); read_nic_dword(priv->net_dev, ord->table2_addr, &ord->table2_size); ord->table2_size &= 0x0000FFFF; IPW_DEBUG_INFO("table 1 size: %d\n", ord->table1_size); IPW_DEBUG_INFO("table 2 size: %d\n", ord->table2_size); IPW_DEBUG_INFO("exit\n"); } static inline void ipw2100_hw_set_gpio(struct ipw2100_priv *priv) { u32 reg = 0; /* * Set GPIO 3 writable by FW; GPIO 1 writable * by driver and enable clock */ reg = (IPW_BIT_GPIO_GPIO3_MASK | IPW_BIT_GPIO_GPIO1_ENABLE | IPW_BIT_GPIO_LED_OFF); write_register(priv->net_dev, IPW_REG_GPIO, reg); } static int rf_kill_active(struct ipw2100_priv *priv) { #define MAX_RF_KILL_CHECKS 5 #define RF_KILL_CHECK_DELAY 40 unsigned short value = 0; u32 reg = 0; int i; if (!(priv->hw_features & HW_FEATURE_RFKILL)) { wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); priv->status &= ~STATUS_RF_KILL_HW; return 0; } for (i = 0; i < MAX_RF_KILL_CHECKS; i++) { udelay(RF_KILL_CHECK_DELAY); read_register(priv->net_dev, IPW_REG_GPIO, &reg); value = (value << 1) | ((reg & IPW_BIT_GPIO_RF_KILL) ? 0 : 1); } if (value == 0) { wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); priv->status |= STATUS_RF_KILL_HW; } else { wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, false); priv->status &= ~STATUS_RF_KILL_HW; } return (value == 0); } static int ipw2100_get_hw_features(struct ipw2100_priv *priv) { u32 addr, len; u32 val; /* * EEPROM_SRAM_DB_START_ADDRESS using ordinal in ordinal table 1 */ len = sizeof(addr); if (ipw2100_get_ordinal (priv, IPW_ORD_EEPROM_SRAM_DB_BLOCK_START_ADDRESS, &addr, &len)) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); return -EIO; } IPW_DEBUG_INFO("EEPROM address: %08X\n", addr); /* * EEPROM version is the byte at offset 0xfd in firmware * We read 4 bytes, then shift out the byte we actually want */ read_nic_dword(priv->net_dev, addr + 0xFC, &val); priv->eeprom_version = (val >> 24) & 0xFF; IPW_DEBUG_INFO("EEPROM version: %d\n", priv->eeprom_version); /* * HW RF Kill enable is bit 0 in byte at offset 0x21 in firmware * * notice that the EEPROM bit is reverse polarity, i.e. * bit = 0 signifies HW RF kill switch is supported * bit = 1 signifies HW RF kill switch is NOT supported */ read_nic_dword(priv->net_dev, addr + 0x20, &val); if (!((val >> 24) & 0x01)) priv->hw_features |= HW_FEATURE_RFKILL; IPW_DEBUG_INFO("HW RF Kill: %ssupported.\n", (priv->hw_features & HW_FEATURE_RFKILL) ? "" : "not "); return 0; } /* * Start firmware execution after power on and intialization * The sequence is: * 1. Release ARC * 2. Wait for f/w initialization completes; */ static int ipw2100_start_adapter(struct ipw2100_priv *priv) { int i; u32 inta, inta_mask, gpio; IPW_DEBUG_INFO("enter\n"); if (priv->status & STATUS_RUNNING) return 0; /* * Initialize the hw - drive adapter to DO state by setting * init_done bit. Wait for clk_ready bit and Download * fw & dino ucode */ if (ipw2100_download_firmware(priv)) { printk(KERN_ERR DRV_NAME ": %s: Failed to power on the adapter.\n", priv->net_dev->name); return -EIO; } /* Clear the Tx, Rx and Msg queues and the r/w indexes * in the firmware RBD and TBD ring queue */ ipw2100_queues_initialize(priv); ipw2100_hw_set_gpio(priv); /* TODO -- Look at disabling interrupts here to make sure none * get fired during FW initialization */ /* Release ARC - clear reset bit */ write_register(priv->net_dev, IPW_REG_RESET_REG, 0); /* wait for f/w intialization complete */ IPW_DEBUG_FW("Waiting for f/w initialization to complete...\n"); i = 5000; do { schedule_timeout_uninterruptible(msecs_to_jiffies(40)); /* Todo... wait for sync command ... */ read_register(priv->net_dev, IPW_REG_INTA, &inta); /* check "init done" bit */ if (inta & IPW2100_INTA_FW_INIT_DONE) { /* reset "init done" bit */ write_register(priv->net_dev, IPW_REG_INTA, IPW2100_INTA_FW_INIT_DONE); break; } /* check error conditions : we check these after the firmware * check so that if there is an error, the interrupt handler * will see it and the adapter will be reset */ if (inta & (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) { /* clear error conditions */ write_register(priv->net_dev, IPW_REG_INTA, IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR); } } while (--i); /* Clear out any pending INTAs since we aren't supposed to have * interrupts enabled at this point... */ read_register(priv->net_dev, IPW_REG_INTA, &inta); read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask); inta &= IPW_INTERRUPT_MASK; /* Clear out any pending interrupts */ if (inta & inta_mask) write_register(priv->net_dev, IPW_REG_INTA, inta); IPW_DEBUG_FW("f/w initialization complete: %s\n", i ? "SUCCESS" : "FAILED"); if (!i) { printk(KERN_WARNING DRV_NAME ": %s: Firmware did not initialize.\n", priv->net_dev->name); return -EIO; } /* allow firmware to write to GPIO1 & GPIO3 */ read_register(priv->net_dev, IPW_REG_GPIO, &gpio); gpio |= (IPW_BIT_GPIO_GPIO1_MASK | IPW_BIT_GPIO_GPIO3_MASK); write_register(priv->net_dev, IPW_REG_GPIO, gpio); /* Ready to receive commands */ priv->status |= STATUS_RUNNING; /* The adapter has been reset; we are not associated */ priv->status &= ~(STATUS_ASSOCIATING | STATUS_ASSOCIATED); IPW_DEBUG_INFO("exit\n"); return 0; } static inline void ipw2100_reset_fatalerror(struct ipw2100_priv *priv) { if (!priv->fatal_error) return; priv->fatal_errors[priv->fatal_index++] = priv->fatal_error; priv->fatal_index %= IPW2100_ERROR_QUEUE; priv->fatal_error = 0; } /* NOTE: Our interrupt is disabled when this method is called */ static int ipw2100_power_cycle_adapter(struct ipw2100_priv *priv) { u32 reg; int i; IPW_DEBUG_INFO("Power cycling the hardware.\n"); ipw2100_hw_set_gpio(priv); /* Step 1. Stop Master Assert */ write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_STOP_MASTER); /* Step 2. Wait for stop Master Assert * (not more than 50us, otherwise ret error */ i = 5; do { udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY); read_register(priv->net_dev, IPW_REG_RESET_REG, &reg); if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) break; } while (--i); priv->status &= ~STATUS_RESET_PENDING; if (!i) { IPW_DEBUG_INFO ("exit - waited too long for master assert stop\n"); return -EIO; } write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_SW_RESET); /* Reset any fatal_error conditions */ ipw2100_reset_fatalerror(priv); /* At this point, the adapter is now stopped and disabled */ priv->status &= ~(STATUS_RUNNING | STATUS_ASSOCIATING | STATUS_ASSOCIATED | STATUS_ENABLED); return 0; } /* * Send the CARD_DISABLE_PHY_OFF command to the card to disable it * * After disabling, if the card was associated, a STATUS_ASSN_LOST will be sent. * * STATUS_CARD_DISABLE_NOTIFICATION will be sent regardless of * if STATUS_ASSN_LOST is sent. */ static int ipw2100_hw_phy_off(struct ipw2100_priv *priv) { #define HW_PHY_OFF_LOOP_DELAY (HZ / 5000) struct host_command cmd = { .host_command = CARD_DISABLE_PHY_OFF, .host_command_sequence = 0, .host_command_length = 0, }; int err, i; u32 val1, val2; IPW_DEBUG_HC("CARD_DISABLE_PHY_OFF\n"); /* Turn off the radio */ err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; for (i = 0; i < 2500; i++) { read_nic_dword(priv->net_dev, IPW2100_CONTROL_REG, &val1); read_nic_dword(priv->net_dev, IPW2100_COMMAND, &val2); if ((val1 & IPW2100_CONTROL_PHY_OFF) && (val2 & IPW2100_COMMAND_PHY_OFF)) return 0; schedule_timeout_uninterruptible(HW_PHY_OFF_LOOP_DELAY); } return -EIO; } static int ipw2100_enable_adapter(struct ipw2100_priv *priv) { struct host_command cmd = { .host_command = HOST_COMPLETE, .host_command_sequence = 0, .host_command_length = 0 }; int err = 0; IPW_DEBUG_HC("HOST_COMPLETE\n"); if (priv->status & STATUS_ENABLED) return 0; mutex_lock(&priv->adapter_mutex); if (rf_kill_active(priv)) { IPW_DEBUG_HC("Command aborted due to RF kill active.\n"); goto fail_up; } err = ipw2100_hw_send_command(priv, &cmd); if (err) { IPW_DEBUG_INFO("Failed to send HOST_COMPLETE command\n"); goto fail_up; } err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_ENABLED); if (err) { IPW_DEBUG_INFO("%s: card not responding to init command.\n", priv->net_dev->name); goto fail_up; } if (priv->stop_hang_check) { priv->stop_hang_check = 0; schedule_delayed_work(&priv->hang_check, HZ / 2); } fail_up: mutex_unlock(&priv->adapter_mutex); return err; } static int ipw2100_hw_stop_adapter(struct ipw2100_priv *priv) { #define HW_POWER_DOWN_DELAY (msecs_to_jiffies(100)) struct host_command cmd = { .host_command = HOST_PRE_POWER_DOWN, .host_command_sequence = 0, .host_command_length = 0, }; int err, i; u32 reg; if (!(priv->status & STATUS_RUNNING)) return 0; priv->status |= STATUS_STOPPING; /* We can only shut down the card if the firmware is operational. So, * if we haven't reset since a fatal_error, then we can not send the * shutdown commands. */ if (!priv->fatal_error) { /* First, make sure the adapter is enabled so that the PHY_OFF * command can shut it down */ ipw2100_enable_adapter(priv); err = ipw2100_hw_phy_off(priv); if (err) printk(KERN_WARNING DRV_NAME ": Error disabling radio %d\n", err); /* * If in D0-standby mode going directly to D3 may cause a * PCI bus violation. Therefore we must change out of the D0 * state. * * Sending the PREPARE_FOR_POWER_DOWN will restrict the * hardware from going into standby mode and will transition * out of D0-standby if it is already in that state. * * STATUS_PREPARE_POWER_DOWN_COMPLETE will be sent by the * driver upon completion. Once received, the driver can * proceed to the D3 state. * * Prepare for power down command to fw. This command would * take HW out of D0-standby and prepare it for D3 state. * * Currently FW does not support event notification for this * event. Therefore, skip waiting for it. Just wait a fixed * 100ms */ IPW_DEBUG_HC("HOST_PRE_POWER_DOWN\n"); err = ipw2100_hw_send_command(priv, &cmd); if (err) printk(KERN_WARNING DRV_NAME ": " "%s: Power down command failed: Error %d\n", priv->net_dev->name, err); else schedule_timeout_uninterruptible(HW_POWER_DOWN_DELAY); } priv->status &= ~STATUS_ENABLED; /* * Set GPIO 3 writable by FW; GPIO 1 writable * by driver and enable clock */ ipw2100_hw_set_gpio(priv); /* * Power down adapter. Sequence: * 1. Stop master assert (RESET_REG[9]=1) * 2. Wait for stop master (RESET_REG[8]==1) * 3. S/w reset assert (RESET_REG[7] = 1) */ /* Stop master assert */ write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_STOP_MASTER); /* wait stop master not more than 50 usec. * Otherwise return error. */ for (i = 5; i > 0; i--) { udelay(10); /* Check master stop bit */ read_register(priv->net_dev, IPW_REG_RESET_REG, &reg); if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) break; } if (i == 0) printk(KERN_WARNING DRV_NAME ": %s: Could now power down adapter.\n", priv->net_dev->name); /* assert s/w reset */ write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_SW_RESET); priv->status &= ~(STATUS_RUNNING | STATUS_STOPPING); return 0; } static int ipw2100_disable_adapter(struct ipw2100_priv *priv) { struct host_command cmd = { .host_command = CARD_DISABLE, .host_command_sequence = 0, .host_command_length = 0 }; int err = 0; IPW_DEBUG_HC("CARD_DISABLE\n"); if (!(priv->status & STATUS_ENABLED)) return 0; /* Make sure we clear the associated state */ priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); if (!priv->stop_hang_check) { priv->stop_hang_check = 1; cancel_delayed_work(&priv->hang_check); } mutex_lock(&priv->adapter_mutex); err = ipw2100_hw_send_command(priv, &cmd); if (err) { printk(KERN_WARNING DRV_NAME ": exit - failed to send CARD_DISABLE command\n"); goto fail_up; } err = ipw2100_wait_for_card_state(priv, IPW_HW_STATE_DISABLED); if (err) { printk(KERN_WARNING DRV_NAME ": exit - card failed to change to DISABLED\n"); goto fail_up; } IPW_DEBUG_INFO("TODO: implement scan state machine\n"); fail_up: mutex_unlock(&priv->adapter_mutex); return err; } static int ipw2100_set_scan_options(struct ipw2100_priv *priv) { struct host_command cmd = { .host_command = SET_SCAN_OPTIONS, .host_command_sequence = 0, .host_command_length = 8 }; int err; IPW_DEBUG_INFO("enter\n"); IPW_DEBUG_SCAN("setting scan options\n"); cmd.host_command_parameters[0] = 0; if (!(priv->config & CFG_ASSOCIATE)) cmd.host_command_parameters[0] |= IPW_SCAN_NOASSOCIATE; if ((priv->ieee->sec.flags & SEC_ENABLED) && priv->ieee->sec.enabled) cmd.host_command_parameters[0] |= IPW_SCAN_MIXED_CELL; if (priv->config & CFG_PASSIVE_SCAN) cmd.host_command_parameters[0] |= IPW_SCAN_PASSIVE; cmd.host_command_parameters[1] = priv->channel_mask; err = ipw2100_hw_send_command(priv, &cmd); IPW_DEBUG_HC("SET_SCAN_OPTIONS 0x%04X\n", cmd.host_command_parameters[0]); return err; } static int ipw2100_start_scan(struct ipw2100_priv *priv) { struct host_command cmd = { .host_command = BROADCAST_SCAN, .host_command_sequence = 0, .host_command_length = 4 }; int err; IPW_DEBUG_HC("START_SCAN\n"); cmd.host_command_parameters[0] = 0; /* No scanning if in monitor mode */ if (priv->ieee->iw_mode == IW_MODE_MONITOR) return 1; if (priv->status & STATUS_SCANNING) { IPW_DEBUG_SCAN("Scan requested while already in scan...\n"); return 0; } IPW_DEBUG_INFO("enter\n"); /* Not clearing here; doing so makes iwlist always return nothing... * * We should modify the table logic to use aging tables vs. clearing * the table on each scan start. */ IPW_DEBUG_SCAN("starting scan\n"); priv->status |= STATUS_SCANNING; err = ipw2100_hw_send_command(priv, &cmd); if (err) priv->status &= ~STATUS_SCANNING; IPW_DEBUG_INFO("exit\n"); return err; } static const struct libipw_geo ipw_geos[] = { { /* Restricted */ "---", .bg_channels = 14, .bg = {{2412, 1}, {2417, 2}, {2422, 3}, {2427, 4}, {2432, 5}, {2437, 6}, {2442, 7}, {2447, 8}, {2452, 9}, {2457, 10}, {2462, 11}, {2467, 12}, {2472, 13}, {2484, 14}}, }, }; static int ipw2100_up(struct ipw2100_priv *priv, int deferred) { unsigned long flags; int rc = 0; u32 lock; u32 ord_len = sizeof(lock); /* Age scan list entries found before suspend */ if (priv->suspend_time) { libipw_networks_age(priv->ieee, priv->suspend_time); priv->suspend_time = 0; } /* Quiet if manually disabled. */ if (priv->status & STATUS_RF_KILL_SW) { IPW_DEBUG_INFO("%s: Radio is disabled by Manual Disable " "switch\n", priv->net_dev->name); return 0; } /* the ipw2100 hardware really doesn't want power management delays * longer than 175usec */ pm_qos_update_request(&ipw2100_pm_qos_req, 175); /* If the interrupt is enabled, turn it off... */ spin_lock_irqsave(&priv->low_lock, flags); ipw2100_disable_interrupts(priv); /* Reset any fatal_error conditions */ ipw2100_reset_fatalerror(priv); spin_unlock_irqrestore(&priv->low_lock, flags); if (priv->status & STATUS_POWERED || (priv->status & STATUS_RESET_PENDING)) { /* Power cycle the card ... */ if (ipw2100_power_cycle_adapter(priv)) { printk(KERN_WARNING DRV_NAME ": %s: Could not cycle adapter.\n", priv->net_dev->name); rc = 1; goto exit; } } else priv->status |= STATUS_POWERED; /* Load the firmware, start the clocks, etc. */ if (ipw2100_start_adapter(priv)) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the firmware.\n", priv->net_dev->name); rc = 1; goto exit; } ipw2100_initialize_ordinals(priv); /* Determine capabilities of this particular HW configuration */ if (ipw2100_get_hw_features(priv)) { printk(KERN_ERR DRV_NAME ": %s: Failed to determine HW features.\n", priv->net_dev->name); rc = 1; goto exit; } /* Initialize the geo */ libipw_set_geo(priv->ieee, &ipw_geos[0]); priv->ieee->freq_band = LIBIPW_24GHZ_BAND; lock = LOCK_NONE; if (ipw2100_set_ordinal(priv, IPW_ORD_PERS_DB_LOCK, &lock, &ord_len)) { printk(KERN_ERR DRV_NAME ": %s: Failed to clear ordinal lock.\n", priv->net_dev->name); rc = 1; goto exit; } priv->status &= ~STATUS_SCANNING; if (rf_kill_active(priv)) { printk(KERN_INFO "%s: Radio is disabled by RF switch.\n", priv->net_dev->name); if (priv->stop_rf_kill) { priv->stop_rf_kill = 0; schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); } deferred = 1; } /* Turn on the interrupt so that commands can be processed */ ipw2100_enable_interrupts(priv); /* Send all of the commands that must be sent prior to * HOST_COMPLETE */ if (ipw2100_adapter_setup(priv)) { printk(KERN_ERR DRV_NAME ": %s: Failed to start the card.\n", priv->net_dev->name); rc = 1; goto exit; } if (!deferred) { /* Enable the adapter - sends HOST_COMPLETE */ if (ipw2100_enable_adapter(priv)) { printk(KERN_ERR DRV_NAME ": " "%s: failed in call to enable adapter.\n", priv->net_dev->name); ipw2100_hw_stop_adapter(priv); rc = 1; goto exit; } /* Start a scan . . . */ ipw2100_set_scan_options(priv); ipw2100_start_scan(priv); } exit: return rc; } static void ipw2100_down(struct ipw2100_priv *priv) { unsigned long flags; union iwreq_data wrqu = { .ap_addr = { .sa_family = ARPHRD_ETHER} }; int associated = priv->status & STATUS_ASSOCIATED; /* Kill the RF switch timer */ if (!priv->stop_rf_kill) { priv->stop_rf_kill = 1; cancel_delayed_work(&priv->rf_kill); } /* Kill the firmware hang check timer */ if (!priv->stop_hang_check) { priv->stop_hang_check = 1; cancel_delayed_work(&priv->hang_check); } /* Kill any pending resets */ if (priv->status & STATUS_RESET_PENDING) cancel_delayed_work(&priv->reset_work); /* Make sure the interrupt is on so that FW commands will be * processed correctly */ spin_lock_irqsave(&priv->low_lock, flags); ipw2100_enable_interrupts(priv); spin_unlock_irqrestore(&priv->low_lock, flags); if (ipw2100_hw_stop_adapter(priv)) printk(KERN_ERR DRV_NAME ": %s: Error stopping adapter.\n", priv->net_dev->name); /* Do not disable the interrupt until _after_ we disable * the adaptor. Otherwise the CARD_DISABLE command will never * be ack'd by the firmware */ spin_lock_irqsave(&priv->low_lock, flags); ipw2100_disable_interrupts(priv); spin_unlock_irqrestore(&priv->low_lock, flags); pm_qos_update_request(&ipw2100_pm_qos_req, PM_QOS_DEFAULT_VALUE); /* We have to signal any supplicant if we are disassociating */ if (associated) wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); } static int ipw2100_wdev_init(struct net_device *dev) { struct ipw2100_priv *priv = libipw_priv(dev); const struct libipw_geo *geo = libipw_get_geo(priv->ieee); struct wireless_dev *wdev = &priv->ieee->wdev; int i; memcpy(wdev->wiphy->perm_addr, priv->mac_addr, ETH_ALEN); /* fill-out priv->ieee->bg_band */ if (geo->bg_channels) { struct ieee80211_supported_band *bg_band = &priv->ieee->bg_band; bg_band->band = IEEE80211_BAND_2GHZ; bg_band->n_channels = geo->bg_channels; bg_band->channels = kcalloc(geo->bg_channels, sizeof(struct ieee80211_channel), GFP_KERNEL); if (!bg_band->channels) { ipw2100_down(priv); return -ENOMEM; } /* translate geo->bg to bg_band.channels */ for (i = 0; i < geo->bg_channels; i++) { bg_band->channels[i].band = IEEE80211_BAND_2GHZ; bg_band->channels[i].center_freq = geo->bg[i].freq; bg_band->channels[i].hw_value = geo->bg[i].channel; bg_band->channels[i].max_power = geo->bg[i].max_power; if (geo->bg[i].flags & LIBIPW_CH_PASSIVE_ONLY) bg_band->channels[i].flags |= IEEE80211_CHAN_PASSIVE_SCAN; if (geo->bg[i].flags & LIBIPW_CH_NO_IBSS) bg_band->channels[i].flags |= IEEE80211_CHAN_NO_IBSS; if (geo->bg[i].flags & LIBIPW_CH_RADAR_DETECT) bg_band->channels[i].flags |= IEEE80211_CHAN_RADAR; /* No equivalent for LIBIPW_CH_80211H_RULES, LIBIPW_CH_UNIFORM_SPREADING, or LIBIPW_CH_B_ONLY... */ } /* point at bitrate info */ bg_band->bitrates = ipw2100_bg_rates; bg_band->n_bitrates = RATE_COUNT; wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = bg_band; } wdev->wiphy->cipher_suites = ipw_cipher_suites; wdev->wiphy->n_cipher_suites = ARRAY_SIZE(ipw_cipher_suites); set_wiphy_dev(wdev->wiphy, &priv->pci_dev->dev); if (wiphy_register(wdev->wiphy)) return -EIO; return 0; } static void ipw2100_reset_adapter(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, reset_work.work); unsigned long flags; union iwreq_data wrqu = { .ap_addr = { .sa_family = ARPHRD_ETHER} }; int associated = priv->status & STATUS_ASSOCIATED; spin_lock_irqsave(&priv->low_lock, flags); IPW_DEBUG_INFO(": %s: Restarting adapter.\n", priv->net_dev->name); priv->resets++; priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); priv->status |= STATUS_SECURITY_UPDATED; /* Force a power cycle even if interface hasn't been opened * yet */ cancel_delayed_work(&priv->reset_work); priv->status |= STATUS_RESET_PENDING; spin_unlock_irqrestore(&priv->low_lock, flags); mutex_lock(&priv->action_mutex); /* stop timed checks so that they don't interfere with reset */ priv->stop_hang_check = 1; cancel_delayed_work(&priv->hang_check); /* We have to signal any supplicant if we are disassociating */ if (associated) wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); ipw2100_up(priv, 0); mutex_unlock(&priv->action_mutex); } static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) { #define MAC_ASSOCIATION_READ_DELAY (HZ) int ret; unsigned int len, essid_len; char essid[IW_ESSID_MAX_SIZE]; u32 txrate; u32 chan; char *txratename; u8 bssid[ETH_ALEN]; DECLARE_SSID_BUF(ssid); /* * TBD: BSSID is usually 00:00:00:00:00:00 here and not * an actual MAC of the AP. Seems like FW sets this * address too late. Read it later and expose through * /proc or schedule a later task to query and update */ essid_len = IW_ESSID_MAX_SIZE; ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &essid_len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); return; } len = sizeof(u32); ret = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &txrate, &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); return; } len = sizeof(u32); ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); return; } len = ETH_ALEN; ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, &len); if (ret) { IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); return; } memcpy(priv->ieee->bssid, bssid, ETH_ALEN); switch (txrate) { case TX_RATE_1_MBIT: txratename = "1Mbps"; break; case TX_RATE_2_MBIT: txratename = "2Mbsp"; break; case TX_RATE_5_5_MBIT: txratename = "5.5Mbps"; break; case TX_RATE_11_MBIT: txratename = "11Mbps"; break; default: IPW_DEBUG_INFO("Unknown rate: %d\n", txrate); txratename = "unknown rate"; break; } IPW_DEBUG_INFO("%s: Associated with '%s' at %s, channel %d (BSSID=%pM)\n", priv->net_dev->name, print_ssid(ssid, essid, essid_len), txratename, chan, bssid); /* now we copy read ssid into dev */ if (!(priv->config & CFG_STATIC_ESSID)) { priv->essid_len = min((u8) essid_len, (u8) IW_ESSID_MAX_SIZE); memcpy(priv->essid, essid, priv->essid_len); } priv->channel = chan; memcpy(priv->bssid, bssid, ETH_ALEN); priv->status |= STATUS_ASSOCIATING; priv->connect_start = get_seconds(); schedule_delayed_work(&priv->wx_event_work, HZ / 10); } static int ipw2100_set_essid(struct ipw2100_priv *priv, char *essid, int length, int batch_mode) { int ssid_len = min(length, IW_ESSID_MAX_SIZE); struct host_command cmd = { .host_command = SSID, .host_command_sequence = 0, .host_command_length = ssid_len }; int err; DECLARE_SSID_BUF(ssid); IPW_DEBUG_HC("SSID: '%s'\n", print_ssid(ssid, essid, ssid_len)); if (ssid_len) memcpy(cmd.host_command_parameters, essid, ssid_len); if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } /* Bug in FW currently doesn't honor bit 0 in SET_SCAN_OPTIONS to * disable auto association -- so we cheat by setting a bogus SSID */ if (!ssid_len && !(priv->config & CFG_ASSOCIATE)) { int i; u8 *bogus = (u8 *) cmd.host_command_parameters; for (i = 0; i < IW_ESSID_MAX_SIZE; i++) bogus[i] = 0x18 + i; cmd.host_command_length = IW_ESSID_MAX_SIZE; } /* NOTE: We always send the SSID command even if the provided ESSID is * the same as what we currently think is set. */ err = ipw2100_hw_send_command(priv, &cmd); if (!err) { memset(priv->essid + ssid_len, 0, IW_ESSID_MAX_SIZE - ssid_len); memcpy(priv->essid, essid, ssid_len); priv->essid_len = ssid_len; } if (!batch_mode) { if (ipw2100_enable_adapter(priv)) err = -EIO; } return err; } static void isr_indicate_association_lost(struct ipw2100_priv *priv, u32 status) { DECLARE_SSID_BUF(ssid); IPW_DEBUG(IPW_DL_NOTIF | IPW_DL_STATE | IPW_DL_ASSOC, "disassociated: '%s' %pM\n", print_ssid(ssid, priv->essid, priv->essid_len), priv->bssid); priv->status &= ~(STATUS_ASSOCIATED | STATUS_ASSOCIATING); if (priv->status & STATUS_STOPPING) { IPW_DEBUG_INFO("Card is stopping itself, discard ASSN_LOST.\n"); return; } memset(priv->bssid, 0, ETH_ALEN); memset(priv->ieee->bssid, 0, ETH_ALEN); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); if (!(priv->status & STATUS_RUNNING)) return; if (priv->status & STATUS_SECURITY_UPDATED) schedule_delayed_work(&priv->security_work, 0); schedule_delayed_work(&priv->wx_event_work, 0); } static void isr_indicate_rf_kill(struct ipw2100_priv *priv, u32 status) { IPW_DEBUG_INFO("%s: RF Kill state changed to radio OFF.\n", priv->net_dev->name); /* RF_KILL is now enabled (else we wouldn't be here) */ wiphy_rfkill_set_hw_state(priv->ieee->wdev.wiphy, true); priv->status |= STATUS_RF_KILL_HW; /* Make sure the RF Kill check timer is running */ priv->stop_rf_kill = 0; mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); } static void ipw2100_scan_event(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, scan_event.work); union iwreq_data wrqu; wrqu.data.length = 0; wrqu.data.flags = 0; wireless_send_event(priv->net_dev, SIOCGIWSCAN, &wrqu, NULL); } static void isr_scan_complete(struct ipw2100_priv *priv, u32 status) { IPW_DEBUG_SCAN("scan complete\n"); /* Age the scan results... */ priv->ieee->scans++; priv->status &= ~STATUS_SCANNING; /* Only userspace-requested scan completion events go out immediately */ if (!priv->user_requested_scan) { schedule_delayed_work(&priv->scan_event, round_jiffies_relative(msecs_to_jiffies(4000))); } else { priv->user_requested_scan = 0; mod_delayed_work(system_wq, &priv->scan_event, 0); } } #ifdef CONFIG_IPW2100_DEBUG #define IPW2100_HANDLER(v, f) { v, f, # v } struct ipw2100_status_indicator { int status; void (*cb) (struct ipw2100_priv * priv, u32 status); char *name; }; #else #define IPW2100_HANDLER(v, f) { v, f } struct ipw2100_status_indicator { int status; void (*cb) (struct ipw2100_priv * priv, u32 status); }; #endif /* CONFIG_IPW2100_DEBUG */ static void isr_indicate_scanning(struct ipw2100_priv *priv, u32 status) { IPW_DEBUG_SCAN("Scanning...\n"); priv->status |= STATUS_SCANNING; } static const struct ipw2100_status_indicator status_handlers[] = { IPW2100_HANDLER(IPW_STATE_INITIALIZED, NULL), IPW2100_HANDLER(IPW_STATE_COUNTRY_FOUND, NULL), IPW2100_HANDLER(IPW_STATE_ASSOCIATED, isr_indicate_associated), IPW2100_HANDLER(IPW_STATE_ASSN_LOST, isr_indicate_association_lost), IPW2100_HANDLER(IPW_STATE_ASSN_CHANGED, NULL), IPW2100_HANDLER(IPW_STATE_SCAN_COMPLETE, isr_scan_complete), IPW2100_HANDLER(IPW_STATE_ENTERED_PSP, NULL), IPW2100_HANDLER(IPW_STATE_LEFT_PSP, NULL), IPW2100_HANDLER(IPW_STATE_RF_KILL, isr_indicate_rf_kill), IPW2100_HANDLER(IPW_STATE_DISABLED, NULL), IPW2100_HANDLER(IPW_STATE_POWER_DOWN, NULL), IPW2100_HANDLER(IPW_STATE_SCANNING, isr_indicate_scanning), IPW2100_HANDLER(-1, NULL) }; static void isr_status_change(struct ipw2100_priv *priv, int status) { int i; if (status == IPW_STATE_SCANNING && priv->status & STATUS_ASSOCIATED && !(priv->status & STATUS_SCANNING)) { IPW_DEBUG_INFO("Scan detected while associated, with " "no scan request. Restarting firmware.\n"); /* Wake up any sleeping jobs */ schedule_reset(priv); } for (i = 0; status_handlers[i].status != -1; i++) { if (status == status_handlers[i].status) { IPW_DEBUG_NOTIF("Status change: %s\n", status_handlers[i].name); if (status_handlers[i].cb) status_handlers[i].cb(priv, status); priv->wstats.status = status; return; } } IPW_DEBUG_NOTIF("unknown status received: %04x\n", status); } static void isr_rx_complete_command(struct ipw2100_priv *priv, struct ipw2100_cmd_header *cmd) { #ifdef CONFIG_IPW2100_DEBUG if (cmd->host_command_reg < ARRAY_SIZE(command_types)) { IPW_DEBUG_HC("Command completed '%s (%d)'\n", command_types[cmd->host_command_reg], cmd->host_command_reg); } #endif if (cmd->host_command_reg == HOST_COMPLETE) priv->status |= STATUS_ENABLED; if (cmd->host_command_reg == CARD_DISABLE) priv->status &= ~STATUS_ENABLED; priv->status &= ~STATUS_CMD_ACTIVE; wake_up_interruptible(&priv->wait_command_queue); } #ifdef CONFIG_IPW2100_DEBUG static const char *frame_types[] = { "COMMAND_STATUS_VAL", "STATUS_CHANGE_VAL", "P80211_DATA_VAL", "P8023_DATA_VAL", "HOST_NOTIFICATION_VAL" }; #endif static int ipw2100_alloc_skb(struct ipw2100_priv *priv, struct ipw2100_rx_packet *packet) { packet->skb = dev_alloc_skb(sizeof(struct ipw2100_rx)); if (!packet->skb) return -ENOMEM; packet->rxp = (struct ipw2100_rx *)packet->skb->data; packet->dma_addr = pci_map_single(priv->pci_dev, packet->skb->data, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); /* NOTE: pci_map_single does not return an error code, and 0 is a valid * dma_addr */ return 0; } #define SEARCH_ERROR 0xffffffff #define SEARCH_FAIL 0xfffffffe #define SEARCH_SUCCESS 0xfffffff0 #define SEARCH_DISCARD 0 #define SEARCH_SNAPSHOT 1 #define SNAPSHOT_ADDR(ofs) (priv->snapshot[((ofs) >> 12) & 0xff] + ((ofs) & 0xfff)) static void ipw2100_snapshot_free(struct ipw2100_priv *priv) { int i; if (!priv->snapshot[0]) return; for (i = 0; i < 0x30; i++) kfree(priv->snapshot[i]); priv->snapshot[0] = NULL; } #ifdef IPW2100_DEBUG_C3 static int ipw2100_snapshot_alloc(struct ipw2100_priv *priv) { int i; if (priv->snapshot[0]) return 1; for (i = 0; i < 0x30; i++) { priv->snapshot[i] = kmalloc(0x1000, GFP_ATOMIC); if (!priv->snapshot[i]) { IPW_DEBUG_INFO("%s: Error allocating snapshot " "buffer %d\n", priv->net_dev->name, i); while (i > 0) kfree(priv->snapshot[--i]); priv->snapshot[0] = NULL; return 0; } } return 1; } static u32 ipw2100_match_buf(struct ipw2100_priv *priv, u8 * in_buf, size_t len, int mode) { u32 i, j; u32 tmp; u8 *s, *d; u32 ret; s = in_buf; if (mode == SEARCH_SNAPSHOT) { if (!ipw2100_snapshot_alloc(priv)) mode = SEARCH_DISCARD; } for (ret = SEARCH_FAIL, i = 0; i < 0x30000; i += 4) { read_nic_dword(priv->net_dev, i, &tmp); if (mode == SEARCH_SNAPSHOT) *(u32 *) SNAPSHOT_ADDR(i) = tmp; if (ret == SEARCH_FAIL) { d = (u8 *) & tmp; for (j = 0; j < 4; j++) { if (*s != *d) { s = in_buf; continue; } s++; d++; if ((s - in_buf) == len) ret = (i + j) - len + 1; } } else if (mode == SEARCH_DISCARD) return ret; } return ret; } #endif /* * * 0) Disconnect the SKB from the firmware (just unmap) * 1) Pack the ETH header into the SKB * 2) Pass the SKB to the network stack * * When packet is provided by the firmware, it contains the following: * * . libipw_hdr * . libipw_snap_hdr * * The size of the constructed ethernet * */ #ifdef IPW2100_RX_DEBUG static u8 packet_data[IPW_RX_NIC_BUFFER_LENGTH]; #endif static void ipw2100_corruption_detected(struct ipw2100_priv *priv, int i) { #ifdef IPW2100_DEBUG_C3 struct ipw2100_status *status = &priv->status_queue.drv[i]; u32 match, reg; int j; #endif IPW_DEBUG_INFO(": PCI latency error detected at 0x%04zX.\n", i * sizeof(struct ipw2100_status)); #ifdef IPW2100_DEBUG_C3 /* Halt the firmware so we can get a good image */ write_register(priv->net_dev, IPW_REG_RESET_REG, IPW_AUX_HOST_RESET_REG_STOP_MASTER); j = 5; do { udelay(IPW_WAIT_RESET_MASTER_ASSERT_COMPLETE_DELAY); read_register(priv->net_dev, IPW_REG_RESET_REG, &reg); if (reg & IPW_AUX_HOST_RESET_REG_MASTER_DISABLED) break; } while (j--); match = ipw2100_match_buf(priv, (u8 *) status, sizeof(struct ipw2100_status), SEARCH_SNAPSHOT); if (match < SEARCH_SUCCESS) IPW_DEBUG_INFO("%s: DMA status match in Firmware at " "offset 0x%06X, length %d:\n", priv->net_dev->name, match, sizeof(struct ipw2100_status)); else IPW_DEBUG_INFO("%s: No DMA status match in " "Firmware.\n", priv->net_dev->name); printk_buf((u8 *) priv->status_queue.drv, sizeof(struct ipw2100_status) * RX_QUEUE_LENGTH); #endif priv->fatal_error = IPW2100_ERR_C3_CORRUPTION; priv->net_dev->stats.rx_errors++; schedule_reset(priv); } static void isr_rx(struct ipw2100_priv *priv, int i, struct libipw_rx_stats *stats) { struct net_device *dev = priv->net_dev; struct ipw2100_status *status = &priv->status_queue.drv[i]; struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; IPW_DEBUG_RX("Handler...\n"); if (unlikely(status->frame_size > skb_tailroom(packet->skb))) { IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" " Dropping.\n", dev->name, status->frame_size, skb_tailroom(packet->skb)); dev->stats.rx_errors++; return; } if (unlikely(!netif_running(dev))) { dev->stats.rx_errors++; priv->wstats.discard.misc++; IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); return; } if (unlikely(priv->ieee->iw_mode != IW_MODE_MONITOR && !(priv->status & STATUS_ASSOCIATED))) { IPW_DEBUG_DROP("Dropping packet while not associated.\n"); priv->wstats.discard.misc++; return; } pci_unmap_single(priv->pci_dev, packet->dma_addr, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); skb_put(packet->skb, status->frame_size); #ifdef IPW2100_RX_DEBUG /* Make a copy of the frame so we can dump it to the logs if * libipw_rx fails */ skb_copy_from_linear_data(packet->skb, packet_data, min_t(u32, status->frame_size, IPW_RX_NIC_BUFFER_LENGTH)); #endif if (!libipw_rx(priv->ieee, packet->skb, stats)) { #ifdef IPW2100_RX_DEBUG IPW_DEBUG_DROP("%s: Non consumed packet:\n", dev->name); printk_buf(IPW_DL_DROP, packet_data, status->frame_size); #endif dev->stats.rx_errors++; /* libipw_rx failed, so it didn't free the SKB */ dev_kfree_skb_any(packet->skb); packet->skb = NULL; } /* We need to allocate a new SKB and attach it to the RDB. */ if (unlikely(ipw2100_alloc_skb(priv, packet))) { printk(KERN_WARNING DRV_NAME ": " "%s: Unable to allocate SKB onto RBD ring - disabling " "adapter.\n", dev->name); /* TODO: schedule adapter shutdown */ IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); } /* Update the RDB entry */ priv->rx_queue.drv[i].host_addr = packet->dma_addr; } #ifdef CONFIG_IPW2100_MONITOR static void isr_rx_monitor(struct ipw2100_priv *priv, int i, struct libipw_rx_stats *stats) { struct net_device *dev = priv->net_dev; struct ipw2100_status *status = &priv->status_queue.drv[i]; struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; /* Magic struct that slots into the radiotap header -- no reason * to build this manually element by element, we can write it much * more efficiently than we can parse it. ORDER MATTERS HERE */ struct ipw_rt_hdr { struct ieee80211_radiotap_header rt_hdr; s8 rt_dbmsignal; /* signal in dbM, kluged to signed */ } *ipw_rt; IPW_DEBUG_RX("Handler...\n"); if (unlikely(status->frame_size > skb_tailroom(packet->skb) - sizeof(struct ipw_rt_hdr))) { IPW_DEBUG_INFO("%s: frame_size (%u) > skb_tailroom (%u)!" " Dropping.\n", dev->name, status->frame_size, skb_tailroom(packet->skb)); dev->stats.rx_errors++; return; } if (unlikely(!netif_running(dev))) { dev->stats.rx_errors++; priv->wstats.discard.misc++; IPW_DEBUG_DROP("Dropping packet while interface is not up.\n"); return; } if (unlikely(priv->config & CFG_CRC_CHECK && status->flags & IPW_STATUS_FLAG_CRC_ERROR)) { IPW_DEBUG_RX("CRC error in packet. Dropping.\n"); dev->stats.rx_errors++; return; } pci_unmap_single(priv->pci_dev, packet->dma_addr, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); memmove(packet->skb->data + sizeof(struct ipw_rt_hdr), packet->skb->data, status->frame_size); ipw_rt = (struct ipw_rt_hdr *) packet->skb->data; ipw_rt->rt_hdr.it_version = PKTHDR_RADIOTAP_VERSION; ipw_rt->rt_hdr.it_pad = 0; /* always good to zero */ ipw_rt->rt_hdr.it_len = cpu_to_le16(sizeof(struct ipw_rt_hdr)); /* total hdr+data */ ipw_rt->rt_hdr.it_present = cpu_to_le32(1 << IEEE80211_RADIOTAP_DBM_ANTSIGNAL); ipw_rt->rt_dbmsignal = status->rssi + IPW2100_RSSI_TO_DBM; skb_put(packet->skb, status->frame_size + sizeof(struct ipw_rt_hdr)); if (!libipw_rx(priv->ieee, packet->skb, stats)) { dev->stats.rx_errors++; /* libipw_rx failed, so it didn't free the SKB */ dev_kfree_skb_any(packet->skb); packet->skb = NULL; } /* We need to allocate a new SKB and attach it to the RDB. */ if (unlikely(ipw2100_alloc_skb(priv, packet))) { IPW_DEBUG_WARNING( "%s: Unable to allocate SKB onto RBD ring - disabling " "adapter.\n", dev->name); /* TODO: schedule adapter shutdown */ IPW_DEBUG_INFO("TODO: Shutdown adapter...\n"); } /* Update the RDB entry */ priv->rx_queue.drv[i].host_addr = packet->dma_addr; } #endif static int ipw2100_corruption_check(struct ipw2100_priv *priv, int i) { struct ipw2100_status *status = &priv->status_queue.drv[i]; struct ipw2100_rx *u = priv->rx_buffers[i].rxp; u16 frame_type = status->status_fields & STATUS_TYPE_MASK; switch (frame_type) { case COMMAND_STATUS_VAL: return (status->frame_size != sizeof(u->rx_data.command)); case STATUS_CHANGE_VAL: return (status->frame_size != sizeof(u->rx_data.status)); case HOST_NOTIFICATION_VAL: return (status->frame_size < sizeof(u->rx_data.notification)); case P80211_DATA_VAL: case P8023_DATA_VAL: #ifdef CONFIG_IPW2100_MONITOR return 0; #else switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) { case IEEE80211_FTYPE_MGMT: case IEEE80211_FTYPE_CTL: return 0; case IEEE80211_FTYPE_DATA: return (status->frame_size > IPW_MAX_802_11_PAYLOAD_LENGTH); } #endif } return 1; } /* * ipw2100 interrupts are disabled at this point, and the ISR * is the only code that calls this method. So, we do not need * to play with any locks. * * RX Queue works as follows: * * Read index - firmware places packet in entry identified by the * Read index and advances Read index. In this manner, * Read index will always point to the next packet to * be filled--but not yet valid. * * Write index - driver fills this entry with an unused RBD entry. * This entry has not filled by the firmware yet. * * In between the W and R indexes are the RBDs that have been received * but not yet processed. * * The process of handling packets will start at WRITE + 1 and advance * until it reaches the READ index. * * The WRITE index is cached in the variable 'priv->rx_queue.next'. * */ static void __ipw2100_rx_process(struct ipw2100_priv *priv) { struct ipw2100_bd_queue *rxq = &priv->rx_queue; struct ipw2100_status_queue *sq = &priv->status_queue; struct ipw2100_rx_packet *packet; u16 frame_type; u32 r, w, i, s; struct ipw2100_rx *u; struct libipw_rx_stats stats = { .mac_time = jiffies, }; read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_READ_INDEX, &r); read_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, &w); if (r >= rxq->entries) { IPW_DEBUG_RX("exit - bad read index\n"); return; } i = (rxq->next + 1) % rxq->entries; s = i; while (i != r) { /* IPW_DEBUG_RX("r = %d : w = %d : processing = %d\n", r, rxq->next, i); */ packet = &priv->rx_buffers[i]; /* Sync the DMA for the RX buffer so CPU is sure to get * the correct values */ pci_dma_sync_single_for_cpu(priv->pci_dev, packet->dma_addr, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); if (unlikely(ipw2100_corruption_check(priv, i))) { ipw2100_corruption_detected(priv, i); goto increment; } u = packet->rxp; frame_type = sq->drv[i].status_fields & STATUS_TYPE_MASK; stats.rssi = sq->drv[i].rssi + IPW2100_RSSI_TO_DBM; stats.len = sq->drv[i].frame_size; stats.mask = 0; if (stats.rssi != 0) stats.mask |= LIBIPW_STATMASK_RSSI; stats.freq = LIBIPW_24GHZ_BAND; IPW_DEBUG_RX("%s: '%s' frame type received (%d).\n", priv->net_dev->name, frame_types[frame_type], stats.len); switch (frame_type) { case COMMAND_STATUS_VAL: /* Reset Rx watchdog */ isr_rx_complete_command(priv, &u->rx_data.command); break; case STATUS_CHANGE_VAL: isr_status_change(priv, u->rx_data.status); break; case P80211_DATA_VAL: case P8023_DATA_VAL: #ifdef CONFIG_IPW2100_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { isr_rx_monitor(priv, i, &stats); break; } #endif if (stats.len < sizeof(struct libipw_hdr_3addr)) break; switch (WLAN_FC_GET_TYPE(le16_to_cpu(u->rx_data.header.frame_ctl))) { case IEEE80211_FTYPE_MGMT: libipw_rx_mgt(priv->ieee, &u->rx_data.header, &stats); break; case IEEE80211_FTYPE_CTL: break; case IEEE80211_FTYPE_DATA: isr_rx(priv, i, &stats); break; } break; } increment: /* clear status field associated with this RBD */ rxq->drv[i].status.info.field = 0; i = (i + 1) % rxq->entries; } if (i != s) { /* backtrack one entry, wrapping to end if at 0 */ rxq->next = (i ? i : rxq->entries) - 1; write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX, rxq->next); } } /* * __ipw2100_tx_process * * This routine will determine whether the next packet on * the fw_pend_list has been processed by the firmware yet. * * If not, then it does nothing and returns. * * If so, then it removes the item from the fw_pend_list, frees * any associated storage, and places the item back on the * free list of its source (either msg_free_list or tx_free_list) * * TX Queue works as follows: * * Read index - points to the next TBD that the firmware will * process. The firmware will read the data, and once * done processing, it will advance the Read index. * * Write index - driver fills this entry with an constructed TBD * entry. The Write index is not advanced until the * packet has been configured. * * In between the W and R indexes are the TBDs that have NOT been * processed. Lagging behind the R index are packets that have * been processed but have not been freed by the driver. * * In order to free old storage, an internal index will be maintained * that points to the next packet to be freed. When all used * packets have been freed, the oldest index will be the same as the * firmware's read index. * * The OLDEST index is cached in the variable 'priv->tx_queue.oldest' * * Because the TBD structure can not contain arbitrary data, the * driver must keep an internal queue of cached allocations such that * it can put that data back into the tx_free_list and msg_free_list * for use by future command and data packets. * */ static int __ipw2100_tx_process(struct ipw2100_priv *priv) { struct ipw2100_bd_queue *txq = &priv->tx_queue; struct ipw2100_bd *tbd; struct list_head *element; struct ipw2100_tx_packet *packet; int descriptors_used; int e, i; u32 r, w, frag_num = 0; if (list_empty(&priv->fw_pend_list)) return 0; element = priv->fw_pend_list.next; packet = list_entry(element, struct ipw2100_tx_packet, list); tbd = &txq->drv[packet->index]; /* Determine how many TBD entries must be finished... */ switch (packet->type) { case COMMAND: /* COMMAND uses only one slot; don't advance */ descriptors_used = 1; e = txq->oldest; break; case DATA: /* DATA uses two slots; advance and loop position. */ descriptors_used = tbd->num_fragments; frag_num = tbd->num_fragments - 1; e = txq->oldest + frag_num; e %= txq->entries; break; default: printk(KERN_WARNING DRV_NAME ": %s: Bad fw_pend_list entry!\n", priv->net_dev->name); return 0; } /* if the last TBD is not done by NIC yet, then packet is * not ready to be released. * */ read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX, &r); read_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, &w); if (w != txq->next) printk(KERN_WARNING DRV_NAME ": %s: write index mismatch\n", priv->net_dev->name); /* * txq->next is the index of the last packet written txq->oldest is * the index of the r is the index of the next packet to be read by * firmware */ /* * Quick graphic to help you visualize the following * if / else statement * * ===>| s---->|=============== * e>| * | a | b | c | d | e | f | g | h | i | j | k | l * r---->| * w * * w - updated by driver * r - updated by firmware * s - start of oldest BD entry (txq->oldest) * e - end of oldest BD entry * */ if (!((r <= w && (e < r || e >= w)) || (e < r && e >= w))) { IPW_DEBUG_TX("exit - no processed packets ready to release.\n"); return 0; } list_del(element); DEC_STAT(&priv->fw_pend_stat); #ifdef CONFIG_IPW2100_DEBUG { i = txq->oldest; IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i, &txq->drv[i], (u32) (txq->nic + i * sizeof(struct ipw2100_bd)), txq->drv[i].host_addr, txq->drv[i].buf_length); if (packet->type == DATA) { i = (i + 1) % txq->entries; IPW_DEBUG_TX("TX%d V=%p P=%04X T=%04X L=%d\n", i, &txq->drv[i], (u32) (txq->nic + i * sizeof(struct ipw2100_bd)), (u32) txq->drv[i].host_addr, txq->drv[i].buf_length); } } #endif switch (packet->type) { case DATA: if (txq->drv[txq->oldest].status.info.fields.txType != 0) printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. " "Expecting DATA TBD but pulled " "something else: ids %d=%d.\n", priv->net_dev->name, txq->oldest, packet->index); /* DATA packet; we have to unmap and free the SKB */ for (i = 0; i < frag_num; i++) { tbd = &txq->drv[(packet->index + 1 + i) % txq->entries]; IPW_DEBUG_TX("TX%d P=%08x L=%d\n", (packet->index + 1 + i) % txq->entries, tbd->host_addr, tbd->buf_length); pci_unmap_single(priv->pci_dev, tbd->host_addr, tbd->buf_length, PCI_DMA_TODEVICE); } libipw_txb_free(packet->info.d_struct.txb); packet->info.d_struct.txb = NULL; list_add_tail(element, &priv->tx_free_list); INC_STAT(&priv->tx_free_stat); /* We have a free slot in the Tx queue, so wake up the * transmit layer if it is stopped. */ if (priv->status & STATUS_ASSOCIATED) netif_wake_queue(priv->net_dev); /* A packet was processed by the hardware, so update the * watchdog */ priv->net_dev->trans_start = jiffies; break; case COMMAND: if (txq->drv[txq->oldest].status.info.fields.txType != 1) printk(KERN_WARNING DRV_NAME ": %s: Queue mismatch. " "Expecting COMMAND TBD but pulled " "something else: ids %d=%d.\n", priv->net_dev->name, txq->oldest, packet->index); #ifdef CONFIG_IPW2100_DEBUG if (packet->info.c_struct.cmd->host_command_reg < ARRAY_SIZE(command_types)) IPW_DEBUG_TX("Command '%s (%d)' processed: %d.\n", command_types[packet->info.c_struct.cmd-> host_command_reg], packet->info.c_struct.cmd-> host_command_reg, packet->info.c_struct.cmd->cmd_status_reg); #endif list_add_tail(element, &priv->msg_free_list); INC_STAT(&priv->msg_free_stat); break; } /* advance oldest used TBD pointer to start of next entry */ txq->oldest = (e + 1) % txq->entries; /* increase available TBDs number */ txq->available += descriptors_used; SET_STAT(&priv->txq_stat, txq->available); IPW_DEBUG_TX("packet latency (send to process) %ld jiffies\n", jiffies - packet->jiffy_start); return (!list_empty(&priv->fw_pend_list)); } static inline void __ipw2100_tx_complete(struct ipw2100_priv *priv) { int i = 0; while (__ipw2100_tx_process(priv) && i < 200) i++; if (i == 200) { printk(KERN_WARNING DRV_NAME ": " "%s: Driver is running slow (%d iters).\n", priv->net_dev->name, i); } } static void ipw2100_tx_send_commands(struct ipw2100_priv *priv) { struct list_head *element; struct ipw2100_tx_packet *packet; struct ipw2100_bd_queue *txq = &priv->tx_queue; struct ipw2100_bd *tbd; int next = txq->next; while (!list_empty(&priv->msg_pend_list)) { /* if there isn't enough space in TBD queue, then * don't stuff a new one in. * NOTE: 3 are needed as a command will take one, * and there is a minimum of 2 that must be * maintained between the r and w indexes */ if (txq->available <= 3) { IPW_DEBUG_TX("no room in tx_queue\n"); break; } element = priv->msg_pend_list.next; list_del(element); DEC_STAT(&priv->msg_pend_stat); packet = list_entry(element, struct ipw2100_tx_packet, list); IPW_DEBUG_TX("using TBD at virt=%p, phys=%04X\n", &txq->drv[txq->next], (u32) (txq->nic + txq->next * sizeof(struct ipw2100_bd))); packet->index = txq->next; tbd = &txq->drv[txq->next]; /* initialize TBD */ tbd->host_addr = packet->info.c_struct.cmd_phys; tbd->buf_length = sizeof(struct ipw2100_cmd_header); /* not marking number of fragments causes problems * with f/w debug version */ tbd->num_fragments = 1; tbd->status.info.field = IPW_BD_STATUS_TX_FRAME_COMMAND | IPW_BD_STATUS_TX_INTERRUPT_ENABLE; /* update TBD queue counters */ txq->next++; txq->next %= txq->entries; txq->available--; DEC_STAT(&priv->txq_stat); list_add_tail(element, &priv->fw_pend_list); INC_STAT(&priv->fw_pend_stat); } if (txq->next != next) { /* kick off the DMA by notifying firmware the * write index has moved; make sure TBD stores are sync'd */ wmb(); write_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, txq->next); } } /* * ipw2100_tx_send_data * */ static void ipw2100_tx_send_data(struct ipw2100_priv *priv) { struct list_head *element; struct ipw2100_tx_packet *packet; struct ipw2100_bd_queue *txq = &priv->tx_queue; struct ipw2100_bd *tbd; int next = txq->next; int i = 0; struct ipw2100_data_header *ipw_hdr; struct libipw_hdr_3addr *hdr; while (!list_empty(&priv->tx_pend_list)) { /* if there isn't enough space in TBD queue, then * don't stuff a new one in. * NOTE: 4 are needed as a data will take two, * and there is a minimum of 2 that must be * maintained between the r and w indexes */ element = priv->tx_pend_list.next; packet = list_entry(element, struct ipw2100_tx_packet, list); if (unlikely(1 + packet->info.d_struct.txb->nr_frags > IPW_MAX_BDS)) { /* TODO: Support merging buffers if more than * IPW_MAX_BDS are used */ IPW_DEBUG_INFO("%s: Maximum BD threshold exceeded. " "Increase fragmentation level.\n", priv->net_dev->name); } if (txq->available <= 3 + packet->info.d_struct.txb->nr_frags) { IPW_DEBUG_TX("no room in tx_queue\n"); break; } list_del(element); DEC_STAT(&priv->tx_pend_stat); tbd = &txq->drv[txq->next]; packet->index = txq->next; ipw_hdr = packet->info.d_struct.data; hdr = (struct libipw_hdr_3addr *)packet->info.d_struct.txb-> fragments[0]->data; if (priv->ieee->iw_mode == IW_MODE_INFRA) { /* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */ memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN); memcpy(ipw_hdr->dst_addr, hdr->addr3, ETH_ALEN); } else if (priv->ieee->iw_mode == IW_MODE_ADHOC) { /* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */ memcpy(ipw_hdr->src_addr, hdr->addr2, ETH_ALEN); memcpy(ipw_hdr->dst_addr, hdr->addr1, ETH_ALEN); } ipw_hdr->host_command_reg = SEND; ipw_hdr->host_command_reg1 = 0; /* For now we only support host based encryption */ ipw_hdr->needs_encryption = 0; ipw_hdr->encrypted = packet->info.d_struct.txb->encrypted; if (packet->info.d_struct.txb->nr_frags > 1) ipw_hdr->fragment_size = packet->info.d_struct.txb->frag_size - LIBIPW_3ADDR_LEN; else ipw_hdr->fragment_size = 0; tbd->host_addr = packet->info.d_struct.data_phys; tbd->buf_length = sizeof(struct ipw2100_data_header); tbd->num_fragments = 1 + packet->info.d_struct.txb->nr_frags; tbd->status.info.field = IPW_BD_STATUS_TX_FRAME_802_3 | IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT; txq->next++; txq->next %= txq->entries; IPW_DEBUG_TX("data header tbd TX%d P=%08x L=%d\n", packet->index, tbd->host_addr, tbd->buf_length); #ifdef CONFIG_IPW2100_DEBUG if (packet->info.d_struct.txb->nr_frags > 1) IPW_DEBUG_FRAG("fragment Tx: %d frames\n", packet->info.d_struct.txb->nr_frags); #endif for (i = 0; i < packet->info.d_struct.txb->nr_frags; i++) { tbd = &txq->drv[txq->next]; if (i == packet->info.d_struct.txb->nr_frags - 1) tbd->status.info.field = IPW_BD_STATUS_TX_FRAME_802_3 | IPW_BD_STATUS_TX_INTERRUPT_ENABLE; else tbd->status.info.field = IPW_BD_STATUS_TX_FRAME_802_3 | IPW_BD_STATUS_TX_FRAME_NOT_LAST_FRAGMENT; tbd->buf_length = packet->info.d_struct.txb-> fragments[i]->len - LIBIPW_3ADDR_LEN; tbd->host_addr = pci_map_single(priv->pci_dev, packet->info.d_struct. txb->fragments[i]-> data + LIBIPW_3ADDR_LEN, tbd->buf_length, PCI_DMA_TODEVICE); IPW_DEBUG_TX("data frag tbd TX%d P=%08x L=%d\n", txq->next, tbd->host_addr, tbd->buf_length); pci_dma_sync_single_for_device(priv->pci_dev, tbd->host_addr, tbd->buf_length, PCI_DMA_TODEVICE); txq->next++; txq->next %= txq->entries; } txq->available -= 1 + packet->info.d_struct.txb->nr_frags; SET_STAT(&priv->txq_stat, txq->available); list_add_tail(element, &priv->fw_pend_list); INC_STAT(&priv->fw_pend_stat); } if (txq->next != next) { /* kick off the DMA by notifying firmware the * write index has moved; make sure TBD stores are sync'd */ write_register(priv->net_dev, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX, txq->next); } } static void ipw2100_irq_tasklet(struct ipw2100_priv *priv) { struct net_device *dev = priv->net_dev; unsigned long flags; u32 inta, tmp; spin_lock_irqsave(&priv->low_lock, flags); ipw2100_disable_interrupts(priv); read_register(dev, IPW_REG_INTA, &inta); IPW_DEBUG_ISR("enter - INTA: 0x%08lX\n", (unsigned long)inta & IPW_INTERRUPT_MASK); priv->in_isr++; priv->interrupts++; /* We do not loop and keep polling for more interrupts as this * is frowned upon and doesn't play nicely with other potentially * chained IRQs */ IPW_DEBUG_ISR("INTA: 0x%08lX\n", (unsigned long)inta & IPW_INTERRUPT_MASK); if (inta & IPW2100_INTA_FATAL_ERROR) { printk(KERN_WARNING DRV_NAME ": Fatal interrupt. Scheduling firmware restart.\n"); priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_FATAL_ERROR); read_nic_dword(dev, IPW_NIC_FATAL_ERROR, &priv->fatal_error); IPW_DEBUG_INFO("%s: Fatal error value: 0x%08X\n", priv->net_dev->name, priv->fatal_error); read_nic_dword(dev, IPW_ERROR_ADDR(priv->fatal_error), &tmp); IPW_DEBUG_INFO("%s: Fatal error address value: 0x%08X\n", priv->net_dev->name, tmp); /* Wake up any sleeping jobs */ schedule_reset(priv); } if (inta & IPW2100_INTA_PARITY_ERROR) { printk(KERN_ERR DRV_NAME ": ***** PARITY ERROR INTERRUPT !!!!\n"); priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_PARITY_ERROR); } if (inta & IPW2100_INTA_RX_TRANSFER) { IPW_DEBUG_ISR("RX interrupt\n"); priv->rx_interrupts++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_RX_TRANSFER); __ipw2100_rx_process(priv); __ipw2100_tx_complete(priv); } if (inta & IPW2100_INTA_TX_TRANSFER) { IPW_DEBUG_ISR("TX interrupt\n"); priv->tx_interrupts++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_TRANSFER); __ipw2100_tx_complete(priv); ipw2100_tx_send_commands(priv); ipw2100_tx_send_data(priv); } if (inta & IPW2100_INTA_TX_COMPLETE) { IPW_DEBUG_ISR("TX complete\n"); priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_TX_COMPLETE); __ipw2100_tx_complete(priv); } if (inta & IPW2100_INTA_EVENT_INTERRUPT) { /* ipw2100_handle_event(dev); */ priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_EVENT_INTERRUPT); } if (inta & IPW2100_INTA_FW_INIT_DONE) { IPW_DEBUG_ISR("FW init done interrupt\n"); priv->inta_other++; read_register(dev, IPW_REG_INTA, &tmp); if (tmp & (IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR)) { write_register(dev, IPW_REG_INTA, IPW2100_INTA_FATAL_ERROR | IPW2100_INTA_PARITY_ERROR); } write_register(dev, IPW_REG_INTA, IPW2100_INTA_FW_INIT_DONE); } if (inta & IPW2100_INTA_STATUS_CHANGE) { IPW_DEBUG_ISR("Status change interrupt\n"); priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_STATUS_CHANGE); } if (inta & IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE) { IPW_DEBUG_ISR("slave host mode interrupt\n"); priv->inta_other++; write_register(dev, IPW_REG_INTA, IPW2100_INTA_SLAVE_MODE_HOST_COMMAND_DONE); } priv->in_isr--; ipw2100_enable_interrupts(priv); spin_unlock_irqrestore(&priv->low_lock, flags); IPW_DEBUG_ISR("exit\n"); } static irqreturn_t ipw2100_interrupt(int irq, void *data) { struct ipw2100_priv *priv = data; u32 inta, inta_mask; if (!data) return IRQ_NONE; spin_lock(&priv->low_lock); /* We check to see if we should be ignoring interrupts before * we touch the hardware. During ucode load if we try and handle * an interrupt we can cause keyboard problems as well as cause * the ucode to fail to initialize */ if (!(priv->status & STATUS_INT_ENABLED)) { /* Shared IRQ */ goto none; } read_register(priv->net_dev, IPW_REG_INTA_MASK, &inta_mask); read_register(priv->net_dev, IPW_REG_INTA, &inta); if (inta == 0xFFFFFFFF) { /* Hardware disappeared */ printk(KERN_WARNING DRV_NAME ": IRQ INTA == 0xFFFFFFFF\n"); goto none; } inta &= IPW_INTERRUPT_MASK; if (!(inta & inta_mask)) { /* Shared interrupt */ goto none; } /* We disable the hardware interrupt here just to prevent unneeded * calls to be made. We disable this again within the actual * work tasklet, so if another part of the code re-enables the * interrupt, that is fine */ ipw2100_disable_interrupts(priv); tasklet_schedule(&priv->irq_tasklet); spin_unlock(&priv->low_lock); return IRQ_HANDLED; none: spin_unlock(&priv->low_lock); return IRQ_NONE; } static netdev_tx_t ipw2100_tx(struct libipw_txb *txb, struct net_device *dev, int pri) { struct ipw2100_priv *priv = libipw_priv(dev); struct list_head *element; struct ipw2100_tx_packet *packet; unsigned long flags; spin_lock_irqsave(&priv->low_lock, flags); if (!(priv->status & STATUS_ASSOCIATED)) { IPW_DEBUG_INFO("Can not transmit when not connected.\n"); priv->net_dev->stats.tx_carrier_errors++; netif_stop_queue(dev); goto fail_unlock; } if (list_empty(&priv->tx_free_list)) goto fail_unlock; element = priv->tx_free_list.next; packet = list_entry(element, struct ipw2100_tx_packet, list); packet->info.d_struct.txb = txb; IPW_DEBUG_TX("Sending fragment (%d bytes):\n", txb->fragments[0]->len); printk_buf(IPW_DL_TX, txb->fragments[0]->data, txb->fragments[0]->len); packet->jiffy_start = jiffies; list_del(element); DEC_STAT(&priv->tx_free_stat); list_add_tail(element, &priv->tx_pend_list); INC_STAT(&priv->tx_pend_stat); ipw2100_tx_send_data(priv); spin_unlock_irqrestore(&priv->low_lock, flags); return NETDEV_TX_OK; fail_unlock: netif_stop_queue(dev); spin_unlock_irqrestore(&priv->low_lock, flags); return NETDEV_TX_BUSY; } static int ipw2100_msg_allocate(struct ipw2100_priv *priv) { int i, j, err = -EINVAL; void *v; dma_addr_t p; priv->msg_buffers = kmalloc(IPW_COMMAND_POOL_SIZE * sizeof(struct ipw2100_tx_packet), GFP_KERNEL); if (!priv->msg_buffers) return -ENOMEM; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { v = pci_alloc_consistent(priv->pci_dev, sizeof(struct ipw2100_cmd_header), &p); if (!v) { printk(KERN_ERR DRV_NAME ": " "%s: PCI alloc failed for msg " "buffers.\n", priv->net_dev->name); err = -ENOMEM; break; } memset(v, 0, sizeof(struct ipw2100_cmd_header)); priv->msg_buffers[i].type = COMMAND; priv->msg_buffers[i].info.c_struct.cmd = (struct ipw2100_cmd_header *)v; priv->msg_buffers[i].info.c_struct.cmd_phys = p; } if (i == IPW_COMMAND_POOL_SIZE) return 0; for (j = 0; j < i; j++) { pci_free_consistent(priv->pci_dev, sizeof(struct ipw2100_cmd_header), priv->msg_buffers[j].info.c_struct.cmd, priv->msg_buffers[j].info.c_struct. cmd_phys); } kfree(priv->msg_buffers); priv->msg_buffers = NULL; return err; } static int ipw2100_msg_initialize(struct ipw2100_priv *priv) { int i; INIT_LIST_HEAD(&priv->msg_free_list); INIT_LIST_HEAD(&priv->msg_pend_list); for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) list_add_tail(&priv->msg_buffers[i].list, &priv->msg_free_list); SET_STAT(&priv->msg_free_stat, i); return 0; } static void ipw2100_msg_free(struct ipw2100_priv *priv) { int i; if (!priv->msg_buffers) return; for (i = 0; i < IPW_COMMAND_POOL_SIZE; i++) { pci_free_consistent(priv->pci_dev, sizeof(struct ipw2100_cmd_header), priv->msg_buffers[i].info.c_struct.cmd, priv->msg_buffers[i].info.c_struct. cmd_phys); } kfree(priv->msg_buffers); priv->msg_buffers = NULL; } static ssize_t show_pci(struct device *d, struct device_attribute *attr, char *buf) { struct pci_dev *pci_dev = container_of(d, struct pci_dev, dev); char *out = buf; int i, j; u32 val; for (i = 0; i < 16; i++) { out += sprintf(out, "[%08X] ", i * 16); for (j = 0; j < 16; j += 4) { pci_read_config_dword(pci_dev, i * 16 + j, &val); out += sprintf(out, "%08X ", val); } out += sprintf(out, "\n"); } return out - buf; } static DEVICE_ATTR(pci, S_IRUGO, show_pci, NULL); static ssize_t show_cfg(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->config); } static DEVICE_ATTR(cfg, S_IRUGO, show_cfg, NULL); static ssize_t show_status(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->status); } static DEVICE_ATTR(status, S_IRUGO, show_status, NULL); static ssize_t show_capability(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *p = dev_get_drvdata(d); return sprintf(buf, "0x%08x\n", (int)p->capability); } static DEVICE_ATTR(capability, S_IRUGO, show_capability, NULL); #define IPW2100_REG(x) { IPW_ ##x, #x } static const struct { u32 addr; const char *name; } hw_data[] = { IPW2100_REG(REG_GP_CNTRL), IPW2100_REG(REG_GPIO), IPW2100_REG(REG_INTA), IPW2100_REG(REG_INTA_MASK), IPW2100_REG(REG_RESET_REG),}; #define IPW2100_NIC(x, s) { x, #x, s } static const struct { u32 addr; const char *name; size_t size; } nic_data[] = { IPW2100_NIC(IPW2100_CONTROL_REG, 2), IPW2100_NIC(0x210014, 1), IPW2100_NIC(0x210000, 1),}; #define IPW2100_ORD(x, d) { IPW_ORD_ ##x, #x, d } static const struct { u8 index; const char *name; const char *desc; } ord_data[] = { IPW2100_ORD(STAT_TX_HOST_REQUESTS, "requested Host Tx's (MSDU)"), IPW2100_ORD(STAT_TX_HOST_COMPLETE, "successful Host Tx's (MSDU)"), IPW2100_ORD(STAT_TX_DIR_DATA, "successful Directed Tx's (MSDU)"), IPW2100_ORD(STAT_TX_DIR_DATA1, "successful Directed Tx's (MSDU) @ 1MB"), IPW2100_ORD(STAT_TX_DIR_DATA2, "successful Directed Tx's (MSDU) @ 2MB"), IPW2100_ORD(STAT_TX_DIR_DATA5_5, "successful Directed Tx's (MSDU) @ 5_5MB"), IPW2100_ORD(STAT_TX_DIR_DATA11, "successful Directed Tx's (MSDU) @ 11MB"), IPW2100_ORD(STAT_TX_NODIR_DATA1, "successful Non_Directed Tx's (MSDU) @ 1MB"), IPW2100_ORD(STAT_TX_NODIR_DATA2, "successful Non_Directed Tx's (MSDU) @ 2MB"), IPW2100_ORD(STAT_TX_NODIR_DATA5_5, "successful Non_Directed Tx's (MSDU) @ 5.5MB"), IPW2100_ORD(STAT_TX_NODIR_DATA11, "successful Non_Directed Tx's (MSDU) @ 11MB"), IPW2100_ORD(STAT_NULL_DATA, "successful NULL data Tx's"), IPW2100_ORD(STAT_TX_RTS, "successful Tx RTS"), IPW2100_ORD(STAT_TX_CTS, "successful Tx CTS"), IPW2100_ORD(STAT_TX_ACK, "successful Tx ACK"), IPW2100_ORD(STAT_TX_ASSN, "successful Association Tx's"), IPW2100_ORD(STAT_TX_ASSN_RESP, "successful Association response Tx's"), IPW2100_ORD(STAT_TX_REASSN, "successful Reassociation Tx's"), IPW2100_ORD(STAT_TX_REASSN_RESP, "successful Reassociation response Tx's"), IPW2100_ORD(STAT_TX_PROBE, "probes successfully transmitted"), IPW2100_ORD(STAT_TX_PROBE_RESP, "probe responses successfully transmitted"), IPW2100_ORD(STAT_TX_BEACON, "tx beacon"), IPW2100_ORD(STAT_TX_ATIM, "Tx ATIM"), IPW2100_ORD(STAT_TX_DISASSN, "successful Disassociation TX"), IPW2100_ORD(STAT_TX_AUTH, "successful Authentication Tx"), IPW2100_ORD(STAT_TX_DEAUTH, "successful Deauthentication TX"), IPW2100_ORD(STAT_TX_TOTAL_BYTES, "Total successful Tx data bytes"), IPW2100_ORD(STAT_TX_RETRIES, "Tx retries"), IPW2100_ORD(STAT_TX_RETRY1, "Tx retries at 1MBPS"), IPW2100_ORD(STAT_TX_RETRY2, "Tx retries at 2MBPS"), IPW2100_ORD(STAT_TX_RETRY5_5, "Tx retries at 5.5MBPS"), IPW2100_ORD(STAT_TX_RETRY11, "Tx retries at 11MBPS"), IPW2100_ORD(STAT_TX_FAILURES, "Tx Failures"), IPW2100_ORD(STAT_TX_MAX_TRIES_IN_HOP, "times max tries in a hop failed"), IPW2100_ORD(STAT_TX_DISASSN_FAIL, "times disassociation failed"), IPW2100_ORD(STAT_TX_ERR_CTS, "missed/bad CTS frames"), IPW2100_ORD(STAT_TX_ERR_ACK, "tx err due to acks"), IPW2100_ORD(STAT_RX_HOST, "packets passed to host"), IPW2100_ORD(STAT_RX_DIR_DATA, "directed packets"), IPW2100_ORD(STAT_RX_DIR_DATA1, "directed packets at 1MB"), IPW2100_ORD(STAT_RX_DIR_DATA2, "directed packets at 2MB"), IPW2100_ORD(STAT_RX_DIR_DATA5_5, "directed packets at 5.5MB"), IPW2100_ORD(STAT_RX_DIR_DATA11, "directed packets at 11MB"), IPW2100_ORD(STAT_RX_NODIR_DATA, "nondirected packets"), IPW2100_ORD(STAT_RX_NODIR_DATA1, "nondirected packets at 1MB"), IPW2100_ORD(STAT_RX_NODIR_DATA2, "nondirected packets at 2MB"), IPW2100_ORD(STAT_RX_NODIR_DATA5_5, "nondirected packets at 5.5MB"), IPW2100_ORD(STAT_RX_NODIR_DATA11, "nondirected packets at 11MB"), IPW2100_ORD(STAT_RX_NULL_DATA, "null data rx's"), IPW2100_ORD(STAT_RX_RTS, "Rx RTS"), IPW2100_ORD(STAT_RX_CTS, "Rx CTS"), IPW2100_ORD(STAT_RX_ACK, "Rx ACK"), IPW2100_ORD(STAT_RX_CFEND, "Rx CF End"), IPW2100_ORD(STAT_RX_CFEND_ACK, "Rx CF End + CF Ack"), IPW2100_ORD(STAT_RX_ASSN, "Association Rx's"), IPW2100_ORD(STAT_RX_ASSN_RESP, "Association response Rx's"), IPW2100_ORD(STAT_RX_REASSN, "Reassociation Rx's"), IPW2100_ORD(STAT_RX_REASSN_RESP, "Reassociation response Rx's"), IPW2100_ORD(STAT_RX_PROBE, "probe Rx's"), IPW2100_ORD(STAT_RX_PROBE_RESP, "probe response Rx's"), IPW2100_ORD(STAT_RX_BEACON, "Rx beacon"), IPW2100_ORD(STAT_RX_ATIM, "Rx ATIM"), IPW2100_ORD(STAT_RX_DISASSN, "disassociation Rx"), IPW2100_ORD(STAT_RX_AUTH, "authentication Rx"), IPW2100_ORD(STAT_RX_DEAUTH, "deauthentication Rx"), IPW2100_ORD(STAT_RX_TOTAL_BYTES, "Total rx data bytes received"), IPW2100_ORD(STAT_RX_ERR_CRC, "packets with Rx CRC error"), IPW2100_ORD(STAT_RX_ERR_CRC1, "Rx CRC errors at 1MB"), IPW2100_ORD(STAT_RX_ERR_CRC2, "Rx CRC errors at 2MB"), IPW2100_ORD(STAT_RX_ERR_CRC5_5, "Rx CRC errors at 5.5MB"), IPW2100_ORD(STAT_RX_ERR_CRC11, "Rx CRC errors at 11MB"), IPW2100_ORD(STAT_RX_DUPLICATE1, "duplicate rx packets at 1MB"), IPW2100_ORD(STAT_RX_DUPLICATE2, "duplicate rx packets at 2MB"), IPW2100_ORD(STAT_RX_DUPLICATE5_5, "duplicate rx packets at 5.5MB"), IPW2100_ORD(STAT_RX_DUPLICATE11, "duplicate rx packets at 11MB"), IPW2100_ORD(STAT_RX_DUPLICATE, "duplicate rx packets"), IPW2100_ORD(PERS_DB_LOCK, "locking fw permanent db"), IPW2100_ORD(PERS_DB_SIZE, "size of fw permanent db"), IPW2100_ORD(PERS_DB_ADDR, "address of fw permanent db"), IPW2100_ORD(STAT_RX_INVALID_PROTOCOL, "rx frames with invalid protocol"), IPW2100_ORD(SYS_BOOT_TIME, "Boot time"), IPW2100_ORD(STAT_RX_NO_BUFFER, "rx frames rejected due to no buffer"), IPW2100_ORD(STAT_RX_MISSING_FRAG, "rx frames dropped due to missing fragment"), IPW2100_ORD(STAT_RX_ORPHAN_FRAG, "rx frames dropped due to non-sequential fragment"), IPW2100_ORD(STAT_RX_ORPHAN_FRAME, "rx frames dropped due to unmatched 1st frame"), IPW2100_ORD(STAT_RX_FRAG_AGEOUT, "rx frames dropped due to uncompleted frame"), IPW2100_ORD(STAT_RX_ICV_ERRORS, "ICV errors during decryption"), IPW2100_ORD(STAT_PSP_SUSPENSION, "times adapter suspended"), IPW2100_ORD(STAT_PSP_BCN_TIMEOUT, "beacon timeout"), IPW2100_ORD(STAT_PSP_POLL_TIMEOUT, "poll response timeouts"), IPW2100_ORD(STAT_PSP_NONDIR_TIMEOUT, "timeouts waiting for last {broad,multi}cast pkt"), IPW2100_ORD(STAT_PSP_RX_DTIMS, "PSP DTIMs received"), IPW2100_ORD(STAT_PSP_RX_TIMS, "PSP TIMs received"), IPW2100_ORD(STAT_PSP_STATION_ID, "PSP Station ID"), IPW2100_ORD(LAST_ASSN_TIME, "RTC time of last association"), IPW2100_ORD(STAT_PERCENT_MISSED_BCNS, "current calculation of % missed beacons"), IPW2100_ORD(STAT_PERCENT_RETRIES, "current calculation of % missed tx retries"), IPW2100_ORD(ASSOCIATED_AP_PTR, "0 if not associated, else pointer to AP table entry"), IPW2100_ORD(AVAILABLE_AP_CNT, "AP's decsribed in the AP table"), IPW2100_ORD(AP_LIST_PTR, "Ptr to list of available APs"), IPW2100_ORD(STAT_AP_ASSNS, "associations"), IPW2100_ORD(STAT_ASSN_FAIL, "association failures"), IPW2100_ORD(STAT_ASSN_RESP_FAIL, "failures due to response fail"), IPW2100_ORD(STAT_FULL_SCANS, "full scans"), IPW2100_ORD(CARD_DISABLED, "Card Disabled"), IPW2100_ORD(STAT_ROAM_INHIBIT, "times roaming was inhibited due to activity"), IPW2100_ORD(RSSI_AT_ASSN, "RSSI of associated AP at time of association"), IPW2100_ORD(STAT_ASSN_CAUSE1, "reassociation: no probe response or TX on hop"), IPW2100_ORD(STAT_ASSN_CAUSE2, "reassociation: poor tx/rx quality"), IPW2100_ORD(STAT_ASSN_CAUSE3, "reassociation: tx/rx quality (excessive AP load"), IPW2100_ORD(STAT_ASSN_CAUSE4, "reassociation: AP RSSI level"), IPW2100_ORD(STAT_ASSN_CAUSE5, "reassociations due to load leveling"), IPW2100_ORD(STAT_AUTH_FAIL, "times authentication failed"), IPW2100_ORD(STAT_AUTH_RESP_FAIL, "times authentication response failed"), IPW2100_ORD(STATION_TABLE_CNT, "entries in association table"), IPW2100_ORD(RSSI_AVG_CURR, "Current avg RSSI"), IPW2100_ORD(POWER_MGMT_MODE, "Power mode - 0=CAM, 1=PSP"), IPW2100_ORD(COUNTRY_CODE, "IEEE country code as recv'd from beacon"), IPW2100_ORD(COUNTRY_CHANNELS, "channels supported by country"), IPW2100_ORD(RESET_CNT, "adapter resets (warm)"), IPW2100_ORD(BEACON_INTERVAL, "Beacon interval"), IPW2100_ORD(ANTENNA_DIVERSITY, "TRUE if antenna diversity is disabled"), IPW2100_ORD(DTIM_PERIOD, "beacon intervals between DTIMs"), IPW2100_ORD(OUR_FREQ, "current radio freq lower digits - channel ID"), IPW2100_ORD(RTC_TIME, "current RTC time"), IPW2100_ORD(PORT_TYPE, "operating mode"), IPW2100_ORD(CURRENT_TX_RATE, "current tx rate"), IPW2100_ORD(SUPPORTED_RATES, "supported tx rates"), IPW2100_ORD(ATIM_WINDOW, "current ATIM Window"), IPW2100_ORD(BASIC_RATES, "basic tx rates"), IPW2100_ORD(NIC_HIGHEST_RATE, "NIC highest tx rate"), IPW2100_ORD(AP_HIGHEST_RATE, "AP highest tx rate"), IPW2100_ORD(CAPABILITIES, "Management frame capability field"), IPW2100_ORD(AUTH_TYPE, "Type of authentication"), IPW2100_ORD(RADIO_TYPE, "Adapter card platform type"), IPW2100_ORD(RTS_THRESHOLD, "Min packet length for RTS handshaking"), IPW2100_ORD(INT_MODE, "International mode"), IPW2100_ORD(FRAGMENTATION_THRESHOLD, "protocol frag threshold"), IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_START_ADDRESS, "EEPROM offset in SRAM"), IPW2100_ORD(EEPROM_SRAM_DB_BLOCK_SIZE, "EEPROM size in SRAM"), IPW2100_ORD(EEPROM_SKU_CAPABILITY, "EEPROM SKU Capability"), IPW2100_ORD(EEPROM_IBSS_11B_CHANNELS, "EEPROM IBSS 11b channel set"), IPW2100_ORD(MAC_VERSION, "MAC Version"), IPW2100_ORD(MAC_REVISION, "MAC Revision"), IPW2100_ORD(RADIO_VERSION, "Radio Version"), IPW2100_ORD(NIC_MANF_DATE_TIME, "MANF Date/Time STAMP"), IPW2100_ORD(UCODE_VERSION, "Ucode Version"),}; static ssize_t show_registers(struct device *d, struct device_attribute *attr, char *buf) { int i; struct ipw2100_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; char *out = buf; u32 val = 0; out += sprintf(out, "%30s [Address ] : Hex\n", "Register"); for (i = 0; i < ARRAY_SIZE(hw_data); i++) { read_register(dev, hw_data[i].addr, &val); out += sprintf(out, "%30s [%08X] : %08X\n", hw_data[i].name, hw_data[i].addr, val); } return out - buf; } static DEVICE_ATTR(registers, S_IRUGO, show_registers, NULL); static ssize_t show_hardware(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; char *out = buf; int i; out += sprintf(out, "%30s [Address ] : Hex\n", "NIC entry"); for (i = 0; i < ARRAY_SIZE(nic_data); i++) { u8 tmp8; u16 tmp16; u32 tmp32; switch (nic_data[i].size) { case 1: read_nic_byte(dev, nic_data[i].addr, &tmp8); out += sprintf(out, "%30s [%08X] : %02X\n", nic_data[i].name, nic_data[i].addr, tmp8); break; case 2: read_nic_word(dev, nic_data[i].addr, &tmp16); out += sprintf(out, "%30s [%08X] : %04X\n", nic_data[i].name, nic_data[i].addr, tmp16); break; case 4: read_nic_dword(dev, nic_data[i].addr, &tmp32); out += sprintf(out, "%30s [%08X] : %08X\n", nic_data[i].name, nic_data[i].addr, tmp32); break; } } return out - buf; } static DEVICE_ATTR(hardware, S_IRUGO, show_hardware, NULL); static ssize_t show_memory(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; static unsigned long loop = 0; int len = 0; u32 buffer[4]; int i; char line[81]; if (loop >= 0x30000) loop = 0; /* sysfs provides us PAGE_SIZE buffer */ while (len < PAGE_SIZE - 128 && loop < 0x30000) { if (priv->snapshot[0]) for (i = 0; i < 4; i++) buffer[i] = *(u32 *) SNAPSHOT_ADDR(loop + i * 4); else for (i = 0; i < 4; i++) read_nic_dword(dev, loop + i * 4, &buffer[i]); if (priv->dump_raw) len += sprintf(buf + len, "%c%c%c%c" "%c%c%c%c" "%c%c%c%c" "%c%c%c%c", ((u8 *) buffer)[0x0], ((u8 *) buffer)[0x1], ((u8 *) buffer)[0x2], ((u8 *) buffer)[0x3], ((u8 *) buffer)[0x4], ((u8 *) buffer)[0x5], ((u8 *) buffer)[0x6], ((u8 *) buffer)[0x7], ((u8 *) buffer)[0x8], ((u8 *) buffer)[0x9], ((u8 *) buffer)[0xa], ((u8 *) buffer)[0xb], ((u8 *) buffer)[0xc], ((u8 *) buffer)[0xd], ((u8 *) buffer)[0xe], ((u8 *) buffer)[0xf]); else len += sprintf(buf + len, "%s\n", snprint_line(line, sizeof(line), (u8 *) buffer, 16, loop)); loop += 16; } return len; } static ssize_t store_memory(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; const char *p = buf; (void)dev; /* kill unused-var warning for debug-only code */ if (count < 1) return count; if (p[0] == '1' || (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'n')) { IPW_DEBUG_INFO("%s: Setting memory dump to RAW mode.\n", dev->name); priv->dump_raw = 1; } else if (p[0] == '0' || (count >= 2 && tolower(p[0]) == 'o' && tolower(p[1]) == 'f')) { IPW_DEBUG_INFO("%s: Setting memory dump to HEX mode.\n", dev->name); priv->dump_raw = 0; } else if (tolower(p[0]) == 'r') { IPW_DEBUG_INFO("%s: Resetting firmware snapshot.\n", dev->name); ipw2100_snapshot_free(priv); } else IPW_DEBUG_INFO("%s: Usage: 0|on = HEX, 1|off = RAW, " "reset = clear memory snapshot\n", dev->name); return count; } static DEVICE_ATTR(memory, S_IWUSR | S_IRUGO, show_memory, store_memory); static ssize_t show_ordinals(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); u32 val = 0; int len = 0; u32 val_len; static int loop = 0; if (priv->status & STATUS_RF_KILL_MASK) return 0; if (loop >= ARRAY_SIZE(ord_data)) loop = 0; /* sysfs provides us PAGE_SIZE buffer */ while (len < PAGE_SIZE - 128 && loop < ARRAY_SIZE(ord_data)) { val_len = sizeof(u32); if (ipw2100_get_ordinal(priv, ord_data[loop].index, &val, &val_len)) len += sprintf(buf + len, "[0x%02X] = ERROR %s\n", ord_data[loop].index, ord_data[loop].desc); else len += sprintf(buf + len, "[0x%02X] = 0x%08X %s\n", ord_data[loop].index, val, ord_data[loop].desc); loop++; } return len; } static DEVICE_ATTR(ordinals, S_IRUGO, show_ordinals, NULL); static ssize_t show_stats(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); char *out = buf; out += sprintf(out, "interrupts: %d {tx: %d, rx: %d, other: %d}\n", priv->interrupts, priv->tx_interrupts, priv->rx_interrupts, priv->inta_other); out += sprintf(out, "firmware resets: %d\n", priv->resets); out += sprintf(out, "firmware hangs: %d\n", priv->hangs); #ifdef CONFIG_IPW2100_DEBUG out += sprintf(out, "packet mismatch image: %s\n", priv->snapshot[0] ? "YES" : "NO"); #endif return out - buf; } static DEVICE_ATTR(stats, S_IRUGO, show_stats, NULL); static int ipw2100_switch_mode(struct ipw2100_priv *priv, u32 mode) { int err; if (mode == priv->ieee->iw_mode) return 0; err = ipw2100_disable_adapter(priv); if (err) { printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", priv->net_dev->name, err); return err; } switch (mode) { case IW_MODE_INFRA: priv->net_dev->type = ARPHRD_ETHER; break; case IW_MODE_ADHOC: priv->net_dev->type = ARPHRD_ETHER; break; #ifdef CONFIG_IPW2100_MONITOR case IW_MODE_MONITOR: priv->last_mode = priv->ieee->iw_mode; priv->net_dev->type = ARPHRD_IEEE80211_RADIOTAP; break; #endif /* CONFIG_IPW2100_MONITOR */ } priv->ieee->iw_mode = mode; #ifdef CONFIG_PM /* Indicate ipw2100_download_firmware download firmware * from disk instead of memory. */ ipw2100_firmware.version = 0; #endif printk(KERN_INFO "%s: Resetting on mode change.\n", priv->net_dev->name); priv->reset_backoff = 0; schedule_reset(priv); return 0; } static ssize_t show_internals(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); int len = 0; #define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x) if (priv->status & STATUS_ASSOCIATED) len += sprintf(buf + len, "connected: %lu\n", get_seconds() - priv->connect_start); else len += sprintf(buf + len, "not connected\n"); DUMP_VAR(ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx], "p"); DUMP_VAR(status, "08lx"); DUMP_VAR(config, "08lx"); DUMP_VAR(capability, "08lx"); len += sprintf(buf + len, "last_rtc: %lu\n", (unsigned long)priv->last_rtc); DUMP_VAR(fatal_error, "d"); DUMP_VAR(stop_hang_check, "d"); DUMP_VAR(stop_rf_kill, "d"); DUMP_VAR(messages_sent, "d"); DUMP_VAR(tx_pend_stat.value, "d"); DUMP_VAR(tx_pend_stat.hi, "d"); DUMP_VAR(tx_free_stat.value, "d"); DUMP_VAR(tx_free_stat.lo, "d"); DUMP_VAR(msg_free_stat.value, "d"); DUMP_VAR(msg_free_stat.lo, "d"); DUMP_VAR(msg_pend_stat.value, "d"); DUMP_VAR(msg_pend_stat.hi, "d"); DUMP_VAR(fw_pend_stat.value, "d"); DUMP_VAR(fw_pend_stat.hi, "d"); DUMP_VAR(txq_stat.value, "d"); DUMP_VAR(txq_stat.lo, "d"); DUMP_VAR(ieee->scans, "d"); DUMP_VAR(reset_backoff, "d"); return len; } static DEVICE_ATTR(internals, S_IRUGO, show_internals, NULL); static ssize_t show_bssinfo(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); char essid[IW_ESSID_MAX_SIZE + 1]; u8 bssid[ETH_ALEN]; u32 chan = 0; char *out = buf; unsigned int length; int ret; if (priv->status & STATUS_RF_KILL_MASK) return 0; memset(essid, 0, sizeof(essid)); memset(bssid, 0, sizeof(bssid)); length = IW_ESSID_MAX_SIZE; ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_SSID, essid, &length); if (ret) IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); length = sizeof(bssid); ret = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, bssid, &length); if (ret) IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); length = sizeof(u32); ret = ipw2100_get_ordinal(priv, IPW_ORD_OUR_FREQ, &chan, &length); if (ret) IPW_DEBUG_INFO("failed querying ordinals at line %d\n", __LINE__); out += sprintf(out, "ESSID: %s\n", essid); out += sprintf(out, "BSSID: %pM\n", bssid); out += sprintf(out, "Channel: %d\n", chan); return out - buf; } static DEVICE_ATTR(bssinfo, S_IRUGO, show_bssinfo, NULL); #ifdef CONFIG_IPW2100_DEBUG static ssize_t show_debug_level(struct device_driver *d, char *buf) { return sprintf(buf, "0x%08X\n", ipw2100_debug_level); } static ssize_t store_debug_level(struct device_driver *d, const char *buf, size_t count) { u32 val; int ret; ret = kstrtou32(buf, 0, &val); if (ret) IPW_DEBUG_INFO(": %s is not in hex or decimal form.\n", buf); else ipw2100_debug_level = val; return strnlen(buf, count); } static DRIVER_ATTR(debug_level, S_IWUSR | S_IRUGO, show_debug_level, store_debug_level); #endif /* CONFIG_IPW2100_DEBUG */ static ssize_t show_fatal_error(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); char *out = buf; int i; if (priv->fatal_error) out += sprintf(out, "0x%08X\n", priv->fatal_error); else out += sprintf(out, "0\n"); for (i = 1; i <= IPW2100_ERROR_QUEUE; i++) { if (!priv->fatal_errors[(priv->fatal_index - i) % IPW2100_ERROR_QUEUE]) continue; out += sprintf(out, "%d. 0x%08X\n", i, priv->fatal_errors[(priv->fatal_index - i) % IPW2100_ERROR_QUEUE]); } return out - buf; } static ssize_t store_fatal_error(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); schedule_reset(priv); return count; } static DEVICE_ATTR(fatal_error, S_IWUSR | S_IRUGO, show_fatal_error, store_fatal_error); static ssize_t show_scan_age(struct device *d, struct device_attribute *attr, char *buf) { struct ipw2100_priv *priv = dev_get_drvdata(d); return sprintf(buf, "%d\n", priv->ieee->scan_age); } static ssize_t store_scan_age(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); struct net_device *dev = priv->net_dev; unsigned long val; int ret; (void)dev; /* kill unused-var warning for debug-only code */ IPW_DEBUG_INFO("enter\n"); ret = kstrtoul(buf, 0, &val); if (ret) { IPW_DEBUG_INFO("%s: user supplied invalid value.\n", dev->name); } else { priv->ieee->scan_age = val; IPW_DEBUG_INFO("set scan_age = %u\n", priv->ieee->scan_age); } IPW_DEBUG_INFO("exit\n"); return strnlen(buf, count); } static DEVICE_ATTR(scan_age, S_IWUSR | S_IRUGO, show_scan_age, store_scan_age); static ssize_t show_rf_kill(struct device *d, struct device_attribute *attr, char *buf) { /* 0 - RF kill not enabled 1 - SW based RF kill active (sysfs) 2 - HW based RF kill active 3 - Both HW and SW baed RF kill active */ struct ipw2100_priv *priv = dev_get_drvdata(d); int val = ((priv->status & STATUS_RF_KILL_SW) ? 0x1 : 0x0) | (rf_kill_active(priv) ? 0x2 : 0x0); return sprintf(buf, "%i\n", val); } static int ipw_radio_kill_sw(struct ipw2100_priv *priv, int disable_radio) { if ((disable_radio ? 1 : 0) == (priv->status & STATUS_RF_KILL_SW ? 1 : 0)) return 0; IPW_DEBUG_RF_KILL("Manual SW RF Kill set to: RADIO %s\n", disable_radio ? "OFF" : "ON"); mutex_lock(&priv->action_mutex); if (disable_radio) { priv->status |= STATUS_RF_KILL_SW; ipw2100_down(priv); } else { priv->status &= ~STATUS_RF_KILL_SW; if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("Can not turn radio back on - " "disabled by HW switch\n"); /* Make sure the RF_KILL check timer is running */ priv->stop_rf_kill = 0; mod_delayed_work(system_wq, &priv->rf_kill, round_jiffies_relative(HZ)); } else schedule_reset(priv); } mutex_unlock(&priv->action_mutex); return 1; } static ssize_t store_rf_kill(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct ipw2100_priv *priv = dev_get_drvdata(d); ipw_radio_kill_sw(priv, buf[0] == '1'); return count; } static DEVICE_ATTR(rf_kill, S_IWUSR | S_IRUGO, show_rf_kill, store_rf_kill); static struct attribute *ipw2100_sysfs_entries[] = { &dev_attr_hardware.attr, &dev_attr_registers.attr, &dev_attr_ordinals.attr, &dev_attr_pci.attr, &dev_attr_stats.attr, &dev_attr_internals.attr, &dev_attr_bssinfo.attr, &dev_attr_memory.attr, &dev_attr_scan_age.attr, &dev_attr_fatal_error.attr, &dev_attr_rf_kill.attr, &dev_attr_cfg.attr, &dev_attr_status.attr, &dev_attr_capability.attr, NULL, }; static struct attribute_group ipw2100_attribute_group = { .attrs = ipw2100_sysfs_entries, }; static int status_queue_allocate(struct ipw2100_priv *priv, int entries) { struct ipw2100_status_queue *q = &priv->status_queue; IPW_DEBUG_INFO("enter\n"); q->size = entries * sizeof(struct ipw2100_status); q->drv = (struct ipw2100_status *)pci_alloc_consistent(priv->pci_dev, q->size, &q->nic); if (!q->drv) { IPW_DEBUG_WARNING("Can not allocate status queue.\n"); return -ENOMEM; } memset(q->drv, 0, q->size); IPW_DEBUG_INFO("exit\n"); return 0; } static void status_queue_free(struct ipw2100_priv *priv) { IPW_DEBUG_INFO("enter\n"); if (priv->status_queue.drv) { pci_free_consistent(priv->pci_dev, priv->status_queue.size, priv->status_queue.drv, priv->status_queue.nic); priv->status_queue.drv = NULL; } IPW_DEBUG_INFO("exit\n"); } static int bd_queue_allocate(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q, int entries) { IPW_DEBUG_INFO("enter\n"); memset(q, 0, sizeof(struct ipw2100_bd_queue)); q->entries = entries; q->size = entries * sizeof(struct ipw2100_bd); q->drv = pci_alloc_consistent(priv->pci_dev, q->size, &q->nic); if (!q->drv) { IPW_DEBUG_INFO ("can't allocate shared memory for buffer descriptors\n"); return -ENOMEM; } memset(q->drv, 0, q->size); IPW_DEBUG_INFO("exit\n"); return 0; } static void bd_queue_free(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q) { IPW_DEBUG_INFO("enter\n"); if (!q) return; if (q->drv) { pci_free_consistent(priv->pci_dev, q->size, q->drv, q->nic); q->drv = NULL; } IPW_DEBUG_INFO("exit\n"); } static void bd_queue_initialize(struct ipw2100_priv *priv, struct ipw2100_bd_queue *q, u32 base, u32 size, u32 r, u32 w) { IPW_DEBUG_INFO("enter\n"); IPW_DEBUG_INFO("initializing bd queue at virt=%p, phys=%08x\n", q->drv, (u32) q->nic); write_register(priv->net_dev, base, q->nic); write_register(priv->net_dev, size, q->entries); write_register(priv->net_dev, r, q->oldest); write_register(priv->net_dev, w, q->next); IPW_DEBUG_INFO("exit\n"); } static void ipw2100_kill_works(struct ipw2100_priv *priv) { priv->stop_rf_kill = 1; priv->stop_hang_check = 1; cancel_delayed_work_sync(&priv->reset_work); cancel_delayed_work_sync(&priv->security_work); cancel_delayed_work_sync(&priv->wx_event_work); cancel_delayed_work_sync(&priv->hang_check); cancel_delayed_work_sync(&priv->rf_kill); cancel_delayed_work_sync(&priv->scan_event); } static int ipw2100_tx_allocate(struct ipw2100_priv *priv) { int i, j, err = -EINVAL; void *v; dma_addr_t p; IPW_DEBUG_INFO("enter\n"); err = bd_queue_allocate(priv, &priv->tx_queue, TX_QUEUE_LENGTH); if (err) { IPW_DEBUG_ERROR("%s: failed bd_queue_allocate\n", priv->net_dev->name); return err; } priv->tx_buffers = kmalloc_array(TX_PENDED_QUEUE_LENGTH, sizeof(struct ipw2100_tx_packet), GFP_ATOMIC); if (!priv->tx_buffers) { bd_queue_free(priv, &priv->tx_queue); return -ENOMEM; } for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { v = pci_alloc_consistent(priv->pci_dev, sizeof(struct ipw2100_data_header), &p); if (!v) { printk(KERN_ERR DRV_NAME ": %s: PCI alloc failed for tx " "buffers.\n", priv->net_dev->name); err = -ENOMEM; break; } priv->tx_buffers[i].type = DATA; priv->tx_buffers[i].info.d_struct.data = (struct ipw2100_data_header *)v; priv->tx_buffers[i].info.d_struct.data_phys = p; priv->tx_buffers[i].info.d_struct.txb = NULL; } if (i == TX_PENDED_QUEUE_LENGTH) return 0; for (j = 0; j < i; j++) { pci_free_consistent(priv->pci_dev, sizeof(struct ipw2100_data_header), priv->tx_buffers[j].info.d_struct.data, priv->tx_buffers[j].info.d_struct. data_phys); } kfree(priv->tx_buffers); priv->tx_buffers = NULL; return err; } static void ipw2100_tx_initialize(struct ipw2100_priv *priv) { int i; IPW_DEBUG_INFO("enter\n"); /* * reinitialize packet info lists */ INIT_LIST_HEAD(&priv->fw_pend_list); INIT_STAT(&priv->fw_pend_stat); /* * reinitialize lists */ INIT_LIST_HEAD(&priv->tx_pend_list); INIT_LIST_HEAD(&priv->tx_free_list); INIT_STAT(&priv->tx_pend_stat); INIT_STAT(&priv->tx_free_stat); for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { /* We simply drop any SKBs that have been queued for * transmit */ if (priv->tx_buffers[i].info.d_struct.txb) { libipw_txb_free(priv->tx_buffers[i].info.d_struct. txb); priv->tx_buffers[i].info.d_struct.txb = NULL; } list_add_tail(&priv->tx_buffers[i].list, &priv->tx_free_list); } SET_STAT(&priv->tx_free_stat, i); priv->tx_queue.oldest = 0; priv->tx_queue.available = priv->tx_queue.entries; priv->tx_queue.next = 0; INIT_STAT(&priv->txq_stat); SET_STAT(&priv->txq_stat, priv->tx_queue.available); bd_queue_initialize(priv, &priv->tx_queue, IPW_MEM_HOST_SHARED_TX_QUEUE_BD_BASE, IPW_MEM_HOST_SHARED_TX_QUEUE_BD_SIZE, IPW_MEM_HOST_SHARED_TX_QUEUE_READ_INDEX, IPW_MEM_HOST_SHARED_TX_QUEUE_WRITE_INDEX); IPW_DEBUG_INFO("exit\n"); } static void ipw2100_tx_free(struct ipw2100_priv *priv) { int i; IPW_DEBUG_INFO("enter\n"); bd_queue_free(priv, &priv->tx_queue); if (!priv->tx_buffers) return; for (i = 0; i < TX_PENDED_QUEUE_LENGTH; i++) { if (priv->tx_buffers[i].info.d_struct.txb) { libipw_txb_free(priv->tx_buffers[i].info.d_struct. txb); priv->tx_buffers[i].info.d_struct.txb = NULL; } if (priv->tx_buffers[i].info.d_struct.data) pci_free_consistent(priv->pci_dev, sizeof(struct ipw2100_data_header), priv->tx_buffers[i].info.d_struct. data, priv->tx_buffers[i].info.d_struct. data_phys); } kfree(priv->tx_buffers); priv->tx_buffers = NULL; IPW_DEBUG_INFO("exit\n"); } static int ipw2100_rx_allocate(struct ipw2100_priv *priv) { int i, j, err = -EINVAL; IPW_DEBUG_INFO("enter\n"); err = bd_queue_allocate(priv, &priv->rx_queue, RX_QUEUE_LENGTH); if (err) { IPW_DEBUG_INFO("failed bd_queue_allocate\n"); return err; } err = status_queue_allocate(priv, RX_QUEUE_LENGTH); if (err) { IPW_DEBUG_INFO("failed status_queue_allocate\n"); bd_queue_free(priv, &priv->rx_queue); return err; } /* * allocate packets */ priv->rx_buffers = kmalloc(RX_QUEUE_LENGTH * sizeof(struct ipw2100_rx_packet), GFP_KERNEL); if (!priv->rx_buffers) { IPW_DEBUG_INFO("can't allocate rx packet buffer table\n"); bd_queue_free(priv, &priv->rx_queue); status_queue_free(priv); return -ENOMEM; } for (i = 0; i < RX_QUEUE_LENGTH; i++) { struct ipw2100_rx_packet *packet = &priv->rx_buffers[i]; err = ipw2100_alloc_skb(priv, packet); if (unlikely(err)) { err = -ENOMEM; break; } /* The BD holds the cache aligned address */ priv->rx_queue.drv[i].host_addr = packet->dma_addr; priv->rx_queue.drv[i].buf_length = IPW_RX_NIC_BUFFER_LENGTH; priv->status_queue.drv[i].status_fields = 0; } if (i == RX_QUEUE_LENGTH) return 0; for (j = 0; j < i; j++) { pci_unmap_single(priv->pci_dev, priv->rx_buffers[j].dma_addr, sizeof(struct ipw2100_rx_packet), PCI_DMA_FROMDEVICE); dev_kfree_skb(priv->rx_buffers[j].skb); } kfree(priv->rx_buffers); priv->rx_buffers = NULL; bd_queue_free(priv, &priv->rx_queue); status_queue_free(priv); return err; } static void ipw2100_rx_initialize(struct ipw2100_priv *priv) { IPW_DEBUG_INFO("enter\n"); priv->rx_queue.oldest = 0; priv->rx_queue.available = priv->rx_queue.entries - 1; priv->rx_queue.next = priv->rx_queue.entries - 1; INIT_STAT(&priv->rxq_stat); SET_STAT(&priv->rxq_stat, priv->rx_queue.available); bd_queue_initialize(priv, &priv->rx_queue, IPW_MEM_HOST_SHARED_RX_BD_BASE, IPW_MEM_HOST_SHARED_RX_BD_SIZE, IPW_MEM_HOST_SHARED_RX_READ_INDEX, IPW_MEM_HOST_SHARED_RX_WRITE_INDEX); /* set up the status queue */ write_register(priv->net_dev, IPW_MEM_HOST_SHARED_RX_STATUS_BASE, priv->status_queue.nic); IPW_DEBUG_INFO("exit\n"); } static void ipw2100_rx_free(struct ipw2100_priv *priv) { int i; IPW_DEBUG_INFO("enter\n"); bd_queue_free(priv, &priv->rx_queue); status_queue_free(priv); if (!priv->rx_buffers) return; for (i = 0; i < RX_QUEUE_LENGTH; i++) { if (priv->rx_buffers[i].rxp) { pci_unmap_single(priv->pci_dev, priv->rx_buffers[i].dma_addr, sizeof(struct ipw2100_rx), PCI_DMA_FROMDEVICE); dev_kfree_skb(priv->rx_buffers[i].skb); } } kfree(priv->rx_buffers); priv->rx_buffers = NULL; IPW_DEBUG_INFO("exit\n"); } static int ipw2100_read_mac_address(struct ipw2100_priv *priv) { u32 length = ETH_ALEN; u8 addr[ETH_ALEN]; int err; err = ipw2100_get_ordinal(priv, IPW_ORD_STAT_ADAPTER_MAC, addr, &length); if (err) { IPW_DEBUG_INFO("MAC address read failed\n"); return -EIO; } memcpy(priv->net_dev->dev_addr, addr, ETH_ALEN); IPW_DEBUG_INFO("card MAC is %pM\n", priv->net_dev->dev_addr); return 0; } /******************************************************************** * * Firmware Commands * ********************************************************************/ static int ipw2100_set_mac_address(struct ipw2100_priv *priv, int batch_mode) { struct host_command cmd = { .host_command = ADAPTER_ADDRESS, .host_command_sequence = 0, .host_command_length = ETH_ALEN }; int err; IPW_DEBUG_HC("SET_MAC_ADDRESS\n"); IPW_DEBUG_INFO("enter\n"); if (priv->config & CFG_CUSTOM_MAC) { memcpy(cmd.host_command_parameters, priv->mac_addr, ETH_ALEN); memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); } else memcpy(cmd.host_command_parameters, priv->net_dev->dev_addr, ETH_ALEN); err = ipw2100_hw_send_command(priv, &cmd); IPW_DEBUG_INFO("exit\n"); return err; } static int ipw2100_set_port_type(struct ipw2100_priv *priv, u32 port_type, int batch_mode) { struct host_command cmd = { .host_command = PORT_TYPE, .host_command_sequence = 0, .host_command_length = sizeof(u32) }; int err; switch (port_type) { case IW_MODE_INFRA: cmd.host_command_parameters[0] = IPW_BSS; break; case IW_MODE_ADHOC: cmd.host_command_parameters[0] = IPW_IBSS; break; } IPW_DEBUG_HC("PORT_TYPE: %s\n", port_type == IPW_IBSS ? "Ad-Hoc" : "Managed"); if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) { printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", priv->net_dev->name, err); return err; } } /* send cmd to firmware */ err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); return err; } static int ipw2100_set_channel(struct ipw2100_priv *priv, u32 channel, int batch_mode) { struct host_command cmd = { .host_command = CHANNEL, .host_command_sequence = 0, .host_command_length = sizeof(u32) }; int err; cmd.host_command_parameters[0] = channel; IPW_DEBUG_HC("CHANNEL: %d\n", channel); /* If BSS then we don't support channel selection */ if (priv->ieee->iw_mode == IW_MODE_INFRA) return 0; if ((channel != 0) && ((channel < REG_MIN_CHANNEL) || (channel > REG_MAX_CHANNEL))) return -EINVAL; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } err = ipw2100_hw_send_command(priv, &cmd); if (err) { IPW_DEBUG_INFO("Failed to set channel to %d", channel); return err; } if (channel) priv->config |= CFG_STATIC_CHANNEL; else priv->config &= ~CFG_STATIC_CHANNEL; priv->channel = channel; if (!batch_mode) { err = ipw2100_enable_adapter(priv); if (err) return err; } return 0; } static int ipw2100_system_config(struct ipw2100_priv *priv, int batch_mode) { struct host_command cmd = { .host_command = SYSTEM_CONFIG, .host_command_sequence = 0, .host_command_length = 12, }; u32 ibss_mask, len = sizeof(u32); int err; /* Set system configuration */ if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } if (priv->ieee->iw_mode == IW_MODE_ADHOC) cmd.host_command_parameters[0] |= IPW_CFG_IBSS_AUTO_START; cmd.host_command_parameters[0] |= IPW_CFG_IBSS_MASK | IPW_CFG_BSS_MASK | IPW_CFG_802_1x_ENABLE; if (!(priv->config & CFG_LONG_PREAMBLE)) cmd.host_command_parameters[0] |= IPW_CFG_PREAMBLE_AUTO; err = ipw2100_get_ordinal(priv, IPW_ORD_EEPROM_IBSS_11B_CHANNELS, &ibss_mask, &len); if (err) ibss_mask = IPW_IBSS_11B_DEFAULT_MASK; cmd.host_command_parameters[1] = REG_CHANNEL_MASK; cmd.host_command_parameters[2] = REG_CHANNEL_MASK & ibss_mask; /* 11b only */ /*cmd.host_command_parameters[0] |= DIVERSITY_ANTENNA_A; */ err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; /* If IPv6 is configured in the kernel then we don't want to filter out all * of the multicast packets as IPv6 needs some. */ #if !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) cmd.host_command = ADD_MULTICAST; cmd.host_command_sequence = 0; cmd.host_command_length = 0; ipw2100_hw_send_command(priv, &cmd); #endif if (!batch_mode) { err = ipw2100_enable_adapter(priv); if (err) return err; } return 0; } static int ipw2100_set_tx_rates(struct ipw2100_priv *priv, u32 rate, int batch_mode) { struct host_command cmd = { .host_command = BASIC_TX_RATES, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = rate & TX_RATE_MASK; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } /* Set BASIC TX Rate first */ ipw2100_hw_send_command(priv, &cmd); /* Set TX Rate */ cmd.host_command = TX_RATES; ipw2100_hw_send_command(priv, &cmd); /* Set MSDU TX Rate */ cmd.host_command = MSDU_TX_RATES; ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) { err = ipw2100_enable_adapter(priv); if (err) return err; } priv->tx_rates = rate; return 0; } static int ipw2100_set_power_mode(struct ipw2100_priv *priv, int power_level) { struct host_command cmd = { .host_command = POWER_MODE, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = power_level; err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; if (power_level == IPW_POWER_MODE_CAM) priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); else priv->power_mode = IPW_POWER_ENABLED | power_level; #ifdef IPW2100_TX_POWER if (priv->port_type == IBSS && priv->adhoc_power != DFTL_IBSS_TX_POWER) { /* Set beacon interval */ cmd.host_command = TX_POWER_INDEX; cmd.host_command_parameters[0] = (u32) priv->adhoc_power; err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; } #endif return 0; } static int ipw2100_set_rts_threshold(struct ipw2100_priv *priv, u32 threshold) { struct host_command cmd = { .host_command = RTS_THRESHOLD, .host_command_sequence = 0, .host_command_length = 4 }; int err; if (threshold & RTS_DISABLED) cmd.host_command_parameters[0] = MAX_RTS_THRESHOLD; else cmd.host_command_parameters[0] = threshold & ~RTS_DISABLED; err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; priv->rts_threshold = threshold; return 0; } #if 0 int ipw2100_set_fragmentation_threshold(struct ipw2100_priv *priv, u32 threshold, int batch_mode) { struct host_command cmd = { .host_command = FRAG_THRESHOLD, .host_command_sequence = 0, .host_command_length = 4, .host_command_parameters[0] = 0, }; int err; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } if (threshold == 0) threshold = DEFAULT_FRAG_THRESHOLD; else { threshold = max(threshold, MIN_FRAG_THRESHOLD); threshold = min(threshold, MAX_FRAG_THRESHOLD); } cmd.host_command_parameters[0] = threshold; IPW_DEBUG_HC("FRAG_THRESHOLD: %u\n", threshold); err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); if (!err) priv->frag_threshold = threshold; return err; } #endif static int ipw2100_set_short_retry(struct ipw2100_priv *priv, u32 retry) { struct host_command cmd = { .host_command = SHORT_RETRY_LIMIT, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = retry; err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; priv->short_retry_limit = retry; return 0; } static int ipw2100_set_long_retry(struct ipw2100_priv *priv, u32 retry) { struct host_command cmd = { .host_command = LONG_RETRY_LIMIT, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = retry; err = ipw2100_hw_send_command(priv, &cmd); if (err) return err; priv->long_retry_limit = retry; return 0; } static int ipw2100_set_mandatory_bssid(struct ipw2100_priv *priv, u8 * bssid, int batch_mode) { struct host_command cmd = { .host_command = MANDATORY_BSSID, .host_command_sequence = 0, .host_command_length = (bssid == NULL) ? 0 : ETH_ALEN }; int err; #ifdef CONFIG_IPW2100_DEBUG if (bssid != NULL) IPW_DEBUG_HC("MANDATORY_BSSID: %pM\n", bssid); else IPW_DEBUG_HC("MANDATORY_BSSID: <clear>\n"); #endif /* if BSSID is empty then we disable mandatory bssid mode */ if (bssid != NULL) memcpy(cmd.host_command_parameters, bssid, ETH_ALEN); if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); return err; } static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv) { struct host_command cmd = { .host_command = DISASSOCIATION_BSSID, .host_command_sequence = 0, .host_command_length = ETH_ALEN }; int err; int len; IPW_DEBUG_HC("DISASSOCIATION_BSSID\n"); len = ETH_ALEN; /* The Firmware currently ignores the BSSID and just disassociates from * the currently associated AP -- but in the off chance that a future * firmware does use the BSSID provided here, we go ahead and try and * set it to the currently associated AP's BSSID */ memcpy(cmd.host_command_parameters, priv->bssid, ETH_ALEN); err = ipw2100_hw_send_command(priv, &cmd); return err; } static int ipw2100_set_wpa_ie(struct ipw2100_priv *, struct ipw2100_wpa_assoc_frame *, int) __attribute__ ((unused)); static int ipw2100_set_wpa_ie(struct ipw2100_priv *priv, struct ipw2100_wpa_assoc_frame *wpa_frame, int batch_mode) { struct host_command cmd = { .host_command = SET_WPA_IE, .host_command_sequence = 0, .host_command_length = sizeof(struct ipw2100_wpa_assoc_frame), }; int err; IPW_DEBUG_HC("SET_WPA_IE\n"); if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } memcpy(cmd.host_command_parameters, wpa_frame, sizeof(struct ipw2100_wpa_assoc_frame)); err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) { if (ipw2100_enable_adapter(priv)) err = -EIO; } return err; } struct security_info_params { u32 allowed_ciphers; u16 version; u8 auth_mode; u8 replay_counters_number; u8 unicast_using_group; } __packed; static int ipw2100_set_security_information(struct ipw2100_priv *priv, int auth_mode, int security_level, int unicast_using_group, int batch_mode) { struct host_command cmd = { .host_command = SET_SECURITY_INFORMATION, .host_command_sequence = 0, .host_command_length = sizeof(struct security_info_params) }; struct security_info_params *security = (struct security_info_params *)&cmd.host_command_parameters; int err; memset(security, 0, sizeof(*security)); /* If shared key AP authentication is turned on, then we need to * configure the firmware to try and use it. * * Actual data encryption/decryption is handled by the host. */ security->auth_mode = auth_mode; security->unicast_using_group = unicast_using_group; switch (security_level) { default: case SEC_LEVEL_0: security->allowed_ciphers = IPW_NONE_CIPHER; break; case SEC_LEVEL_1: security->allowed_ciphers = IPW_WEP40_CIPHER | IPW_WEP104_CIPHER; break; case SEC_LEVEL_2: security->allowed_ciphers = IPW_WEP40_CIPHER | IPW_WEP104_CIPHER | IPW_TKIP_CIPHER; break; case SEC_LEVEL_2_CKIP: security->allowed_ciphers = IPW_WEP40_CIPHER | IPW_WEP104_CIPHER | IPW_CKIP_CIPHER; break; case SEC_LEVEL_3: security->allowed_ciphers = IPW_WEP40_CIPHER | IPW_WEP104_CIPHER | IPW_TKIP_CIPHER | IPW_CCMP_CIPHER; break; } IPW_DEBUG_HC ("SET_SECURITY_INFORMATION: auth:%d cipher:0x%02X (level %d)\n", security->auth_mode, security->allowed_ciphers, security_level); security->replay_counters_number = 0; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); return err; } static int ipw2100_set_tx_power(struct ipw2100_priv *priv, u32 tx_power) { struct host_command cmd = { .host_command = TX_POWER_INDEX, .host_command_sequence = 0, .host_command_length = 4 }; int err = 0; u32 tmp = tx_power; if (tx_power != IPW_TX_POWER_DEFAULT) tmp = (tx_power - IPW_TX_POWER_MIN_DBM) * 16 / (IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM); cmd.host_command_parameters[0] = tmp; if (priv->ieee->iw_mode == IW_MODE_ADHOC) err = ipw2100_hw_send_command(priv, &cmd); if (!err) priv->tx_power = tx_power; return 0; } static int ipw2100_set_ibss_beacon_interval(struct ipw2100_priv *priv, u32 interval, int batch_mode) { struct host_command cmd = { .host_command = BEACON_INTERVAL, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = interval; IPW_DEBUG_INFO("enter\n"); if (priv->ieee->iw_mode == IW_MODE_ADHOC) { if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) { err = ipw2100_enable_adapter(priv); if (err) return err; } } IPW_DEBUG_INFO("exit\n"); return 0; } static void ipw2100_queues_initialize(struct ipw2100_priv *priv) { ipw2100_tx_initialize(priv); ipw2100_rx_initialize(priv); ipw2100_msg_initialize(priv); } static void ipw2100_queues_free(struct ipw2100_priv *priv) { ipw2100_tx_free(priv); ipw2100_rx_free(priv); ipw2100_msg_free(priv); } static int ipw2100_queues_allocate(struct ipw2100_priv *priv) { if (ipw2100_tx_allocate(priv) || ipw2100_rx_allocate(priv) || ipw2100_msg_allocate(priv)) goto fail; return 0; fail: ipw2100_tx_free(priv); ipw2100_rx_free(priv); ipw2100_msg_free(priv); return -ENOMEM; } #define IPW_PRIVACY_CAPABLE 0x0008 static int ipw2100_set_wep_flags(struct ipw2100_priv *priv, u32 flags, int batch_mode) { struct host_command cmd = { .host_command = WEP_FLAGS, .host_command_sequence = 0, .host_command_length = 4 }; int err; cmd.host_command_parameters[0] = flags; IPW_DEBUG_HC("WEP_FLAGS: flags = 0x%08X\n", flags); if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) { printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", priv->net_dev->name, err); return err; } } /* send cmd to firmware */ err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); return err; } struct ipw2100_wep_key { u8 idx; u8 len; u8 key[13]; }; /* Macros to ease up priting WEP keys */ #define WEP_FMT_64 "%02X%02X%02X%02X-%02X" #define WEP_FMT_128 "%02X%02X%02X%02X-%02X%02X%02X%02X-%02X%02X%02X" #define WEP_STR_64(x) x[0],x[1],x[2],x[3],x[4] #define WEP_STR_128(x) x[0],x[1],x[2],x[3],x[4],x[5],x[6],x[7],x[8],x[9],x[10] /** * Set a the wep key * * @priv: struct to work on * @idx: index of the key we want to set * @key: ptr to the key data to set * @len: length of the buffer at @key * @batch_mode: FIXME perform the operation in batch mode, not * disabling the device. * * @returns 0 if OK, < 0 errno code on error. * * Fill out a command structure with the new wep key, length an * index and send it down the wire. */ static int ipw2100_set_key(struct ipw2100_priv *priv, int idx, char *key, int len, int batch_mode) { int keylen = len ? (len <= 5 ? 5 : 13) : 0; struct host_command cmd = { .host_command = WEP_KEY_INFO, .host_command_sequence = 0, .host_command_length = sizeof(struct ipw2100_wep_key), }; struct ipw2100_wep_key *wep_key = (void *)cmd.host_command_parameters; int err; IPW_DEBUG_HC("WEP_KEY_INFO: index = %d, len = %d/%d\n", idx, keylen, len); /* NOTE: We don't check cached values in case the firmware was reset * or some other problem is occurring. If the user is setting the key, * then we push the change */ wep_key->idx = idx; wep_key->len = keylen; if (keylen) { memcpy(wep_key->key, key, len); memset(wep_key->key + len, 0, keylen - len); } /* Will be optimized out on debug not being configured in */ if (keylen == 0) IPW_DEBUG_WEP("%s: Clearing key %d\n", priv->net_dev->name, wep_key->idx); else if (keylen == 5) IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_64 "\n", priv->net_dev->name, wep_key->idx, wep_key->len, WEP_STR_64(wep_key->key)); else IPW_DEBUG_WEP("%s: idx: %d, len: %d key: " WEP_FMT_128 "\n", priv->net_dev->name, wep_key->idx, wep_key->len, WEP_STR_128(wep_key->key)); if (!batch_mode) { err = ipw2100_disable_adapter(priv); /* FIXME: IPG: shouldn't this prink be in _disable_adapter()? */ if (err) { printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", priv->net_dev->name, err); return err; } } /* send cmd to firmware */ err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) { int err2 = ipw2100_enable_adapter(priv); if (err == 0) err = err2; } return err; } static int ipw2100_set_key_index(struct ipw2100_priv *priv, int idx, int batch_mode) { struct host_command cmd = { .host_command = WEP_KEY_INDEX, .host_command_sequence = 0, .host_command_length = 4, .host_command_parameters = {idx}, }; int err; IPW_DEBUG_HC("WEP_KEY_INDEX: index = %d\n", idx); if (idx < 0 || idx > 3) return -EINVAL; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) { printk(KERN_ERR DRV_NAME ": %s: Could not disable adapter %d\n", priv->net_dev->name, err); return err; } } /* send cmd to firmware */ err = ipw2100_hw_send_command(priv, &cmd); if (!batch_mode) ipw2100_enable_adapter(priv); return err; } static int ipw2100_configure_security(struct ipw2100_priv *priv, int batch_mode) { int i, err, auth_mode, sec_level, use_group; if (!(priv->status & STATUS_RUNNING)) return 0; if (!batch_mode) { err = ipw2100_disable_adapter(priv); if (err) return err; } if (!priv->ieee->sec.enabled) { err = ipw2100_set_security_information(priv, IPW_AUTH_OPEN, SEC_LEVEL_0, 0, 1); } else { auth_mode = IPW_AUTH_OPEN; if (priv->ieee->sec.flags & SEC_AUTH_MODE) { if (priv->ieee->sec.auth_mode == WLAN_AUTH_SHARED_KEY) auth_mode = IPW_AUTH_SHARED; else if (priv->ieee->sec.auth_mode == WLAN_AUTH_LEAP) auth_mode = IPW_AUTH_LEAP_CISCO_ID; } sec_level = SEC_LEVEL_0; if (priv->ieee->sec.flags & SEC_LEVEL) sec_level = priv->ieee->sec.level; use_group = 0; if (priv->ieee->sec.flags & SEC_UNICAST_GROUP) use_group = priv->ieee->sec.unicast_uses_group; err = ipw2100_set_security_information(priv, auth_mode, sec_level, use_group, 1); } if (err) goto exit; if (priv->ieee->sec.enabled) { for (i = 0; i < 4; i++) { if (!(priv->ieee->sec.flags & (1 << i))) { memset(priv->ieee->sec.keys[i], 0, WEP_KEY_LEN); priv->ieee->sec.key_sizes[i] = 0; } else { err = ipw2100_set_key(priv, i, priv->ieee->sec.keys[i], priv->ieee->sec. key_sizes[i], 1); if (err) goto exit; } } ipw2100_set_key_index(priv, priv->ieee->crypt_info.tx_keyidx, 1); } /* Always enable privacy so the Host can filter WEP packets if * encrypted data is sent up */ err = ipw2100_set_wep_flags(priv, priv->ieee->sec. enabled ? IPW_PRIVACY_CAPABLE : 0, 1); if (err) goto exit; priv->status &= ~STATUS_SECURITY_UPDATED; exit: if (!batch_mode) ipw2100_enable_adapter(priv); return err; } static void ipw2100_security_work(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, security_work.work); /* If we happen to have reconnected before we get a chance to * process this, then update the security settings--which causes * a disassociation to occur */ if (!(priv->status & STATUS_ASSOCIATED) && priv->status & STATUS_SECURITY_UPDATED) ipw2100_configure_security(priv, 0); } static void shim__set_security(struct net_device *dev, struct libipw_security *sec) { struct ipw2100_priv *priv = libipw_priv(dev); int i, force_update = 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) goto done; for (i = 0; i < 4; i++) { if (sec->flags & (1 << i)) { priv->ieee->sec.key_sizes[i] = sec->key_sizes[i]; if (sec->key_sizes[i] == 0) priv->ieee->sec.flags &= ~(1 << i); else memcpy(priv->ieee->sec.keys[i], sec->keys[i], sec->key_sizes[i]); if (sec->level == SEC_LEVEL_1) { priv->ieee->sec.flags |= (1 << i); priv->status |= STATUS_SECURITY_UPDATED; } else priv->ieee->sec.flags &= ~(1 << i); } } if ((sec->flags & SEC_ACTIVE_KEY) && priv->ieee->sec.active_key != sec->active_key) { if (sec->active_key <= 3) { priv->ieee->sec.active_key = sec->active_key; priv->ieee->sec.flags |= SEC_ACTIVE_KEY; } else priv->ieee->sec.flags &= ~SEC_ACTIVE_KEY; priv->status |= STATUS_SECURITY_UPDATED; } if ((sec->flags & SEC_AUTH_MODE) && (priv->ieee->sec.auth_mode != sec->auth_mode)) { priv->ieee->sec.auth_mode = sec->auth_mode; priv->ieee->sec.flags |= SEC_AUTH_MODE; priv->status |= STATUS_SECURITY_UPDATED; } if (sec->flags & SEC_ENABLED && priv->ieee->sec.enabled != sec->enabled) { priv->ieee->sec.flags |= SEC_ENABLED; priv->ieee->sec.enabled = sec->enabled; priv->status |= STATUS_SECURITY_UPDATED; force_update = 1; } if (sec->flags & SEC_ENCRYPT) priv->ieee->sec.encrypt = sec->encrypt; if (sec->flags & SEC_LEVEL && priv->ieee->sec.level != sec->level) { priv->ieee->sec.level = sec->level; priv->ieee->sec.flags |= SEC_LEVEL; priv->status |= STATUS_SECURITY_UPDATED; } IPW_DEBUG_WEP("Security flags: %c %c%c%c%c %c%c%c%c\n", priv->ieee->sec.flags & (1 << 8) ? '1' : '0', priv->ieee->sec.flags & (1 << 7) ? '1' : '0', priv->ieee->sec.flags & (1 << 6) ? '1' : '0', priv->ieee->sec.flags & (1 << 5) ? '1' : '0', priv->ieee->sec.flags & (1 << 4) ? '1' : '0', priv->ieee->sec.flags & (1 << 3) ? '1' : '0', priv->ieee->sec.flags & (1 << 2) ? '1' : '0', priv->ieee->sec.flags & (1 << 1) ? '1' : '0', priv->ieee->sec.flags & (1 << 0) ? '1' : '0'); /* As a temporary work around to enable WPA until we figure out why * wpa_supplicant toggles the security capability of the driver, which * forces a disassocation with force_update... * * if (force_update || !(priv->status & STATUS_ASSOCIATED))*/ if (!(priv->status & (STATUS_ASSOCIATED | STATUS_ASSOCIATING))) ipw2100_configure_security(priv, 0); done: mutex_unlock(&priv->action_mutex); } static int ipw2100_adapter_setup(struct ipw2100_priv *priv) { int err; int batch_mode = 1; u8 *bssid; IPW_DEBUG_INFO("enter\n"); err = ipw2100_disable_adapter(priv); if (err) return err; #ifdef CONFIG_IPW2100_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) { err = ipw2100_set_channel(priv, priv->channel, batch_mode); if (err) return err; IPW_DEBUG_INFO("exit\n"); return 0; } #endif /* CONFIG_IPW2100_MONITOR */ err = ipw2100_read_mac_address(priv); if (err) return -EIO; err = ipw2100_set_mac_address(priv, batch_mode); if (err) return err; err = ipw2100_set_port_type(priv, priv->ieee->iw_mode, batch_mode); if (err) return err; if (priv->ieee->iw_mode == IW_MODE_ADHOC) { err = ipw2100_set_channel(priv, priv->channel, batch_mode); if (err) return err; } err = ipw2100_system_config(priv, batch_mode); if (err) return err; err = ipw2100_set_tx_rates(priv, priv->tx_rates, batch_mode); if (err) return err; /* Default to power mode OFF */ err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM); if (err) return err; err = ipw2100_set_rts_threshold(priv, priv->rts_threshold); if (err) return err; if (priv->config & CFG_STATIC_BSSID) bssid = priv->bssid; else bssid = NULL; err = ipw2100_set_mandatory_bssid(priv, bssid, batch_mode); if (err) return err; if (priv->config & CFG_STATIC_ESSID) err = ipw2100_set_essid(priv, priv->essid, priv->essid_len, batch_mode); else err = ipw2100_set_essid(priv, NULL, 0, batch_mode); if (err) return err; err = ipw2100_configure_security(priv, batch_mode); if (err) return err; if (priv->ieee->iw_mode == IW_MODE_ADHOC) { err = ipw2100_set_ibss_beacon_interval(priv, priv->beacon_interval, batch_mode); if (err) return err; err = ipw2100_set_tx_power(priv, priv->tx_power); if (err) return err; } /* err = ipw2100_set_fragmentation_threshold( priv, priv->frag_threshold, batch_mode); if (err) return err; */ IPW_DEBUG_INFO("exit\n"); return 0; } /************************************************************************* * * EXTERNALLY CALLED METHODS * *************************************************************************/ /* This method is called by the network layer -- not to be confused with * ipw2100_set_mac_address() declared above called by this driver (and this * method as well) to talk to the firmware */ static int ipw2100_set_address(struct net_device *dev, void *p) { struct ipw2100_priv *priv = libipw_priv(dev); struct sockaddr *addr = p; int err = 0; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; mutex_lock(&priv->action_mutex); priv->config |= CFG_CUSTOM_MAC; memcpy(priv->mac_addr, addr->sa_data, ETH_ALEN); err = ipw2100_set_mac_address(priv, 0); if (err) goto done; priv->reset_backoff = 0; mutex_unlock(&priv->action_mutex); ipw2100_reset_adapter(&priv->reset_work.work); return 0; done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_open(struct net_device *dev) { struct ipw2100_priv *priv = libipw_priv(dev); unsigned long flags; IPW_DEBUG_INFO("dev->open\n"); spin_lock_irqsave(&priv->low_lock, flags); if (priv->status & STATUS_ASSOCIATED) { netif_carrier_on(dev); netif_start_queue(dev); } spin_unlock_irqrestore(&priv->low_lock, flags); return 0; } static int ipw2100_close(struct net_device *dev) { struct ipw2100_priv *priv = libipw_priv(dev); unsigned long flags; struct list_head *element; struct ipw2100_tx_packet *packet; IPW_DEBUG_INFO("enter\n"); spin_lock_irqsave(&priv->low_lock, flags); if (priv->status & STATUS_ASSOCIATED) netif_carrier_off(dev); netif_stop_queue(dev); /* Flush the TX queue ... */ while (!list_empty(&priv->tx_pend_list)) { element = priv->tx_pend_list.next; packet = list_entry(element, struct ipw2100_tx_packet, list); list_del(element); DEC_STAT(&priv->tx_pend_stat); libipw_txb_free(packet->info.d_struct.txb); packet->info.d_struct.txb = NULL; list_add_tail(element, &priv->tx_free_list); INC_STAT(&priv->tx_free_stat); } spin_unlock_irqrestore(&priv->low_lock, flags); IPW_DEBUG_INFO("exit\n"); return 0; } /* * TODO: Fix this function... its just wrong */ static void ipw2100_tx_timeout(struct net_device *dev) { struct ipw2100_priv *priv = libipw_priv(dev); dev->stats.tx_errors++; #ifdef CONFIG_IPW2100_MONITOR if (priv->ieee->iw_mode == IW_MODE_MONITOR) return; #endif IPW_DEBUG_INFO("%s: TX timed out. Scheduling firmware restart.\n", dev->name); schedule_reset(priv); } static int ipw2100_wpa_enable(struct ipw2100_priv *priv, int value) { /* This is called when wpa_supplicant loads and closes the driver * interface. */ priv->ieee->wpa_enabled = value; return 0; } static int ipw2100_wpa_set_auth_algs(struct ipw2100_priv *priv, int value) { struct libipw_device *ieee = priv->ieee; struct libipw_security sec = { .flags = SEC_AUTH_MODE, }; int ret = 0; if (value & IW_AUTH_ALG_SHARED_KEY) { sec.auth_mode = WLAN_AUTH_SHARED_KEY; ieee->open_wep = 0; } else if (value & IW_AUTH_ALG_OPEN_SYSTEM) { sec.auth_mode = WLAN_AUTH_OPEN; ieee->open_wep = 1; } else if (value & IW_AUTH_ALG_LEAP) { sec.auth_mode = WLAN_AUTH_LEAP; ieee->open_wep = 1; } else return -EINVAL; if (ieee->set_security) ieee->set_security(ieee->dev, &sec); else ret = -EOPNOTSUPP; return ret; } static void ipw2100_wpa_assoc_frame(struct ipw2100_priv *priv, char *wpa_ie, int wpa_ie_len) { struct ipw2100_wpa_assoc_frame frame; frame.fixed_ie_mask = 0; /* copy WPA IE */ memcpy(frame.var_ie, wpa_ie, wpa_ie_len); frame.var_ie_len = wpa_ie_len; /* make sure WPA is enabled */ ipw2100_wpa_enable(priv, 1); ipw2100_set_wpa_ie(priv, &frame, 0); } static void ipw_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct ipw2100_priv *priv = libipw_priv(dev); char fw_ver[64], ucode_ver[64]; strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); ipw2100_get_fwversion(priv, fw_ver, sizeof(fw_ver)); ipw2100_get_ucodeversion(priv, ucode_ver, sizeof(ucode_ver)); snprintf(info->fw_version, sizeof(info->fw_version), "%s:%d:%s", fw_ver, priv->eeprom_version, ucode_ver); strlcpy(info->bus_info, pci_name(priv->pci_dev), sizeof(info->bus_info)); } static u32 ipw2100_ethtool_get_link(struct net_device *dev) { struct ipw2100_priv *priv = libipw_priv(dev); return (priv->status & STATUS_ASSOCIATED) ? 1 : 0; } static const struct ethtool_ops ipw2100_ethtool_ops = { .get_link = ipw2100_ethtool_get_link, .get_drvinfo = ipw_ethtool_get_drvinfo, }; static void ipw2100_hang_check(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, hang_check.work); unsigned long flags; u32 rtc = 0xa5a5a5a5; u32 len = sizeof(rtc); int restart = 0; spin_lock_irqsave(&priv->low_lock, flags); if (priv->fatal_error != 0) { /* If fatal_error is set then we need to restart */ IPW_DEBUG_INFO("%s: Hardware fatal error detected.\n", priv->net_dev->name); restart = 1; } else if (ipw2100_get_ordinal(priv, IPW_ORD_RTC_TIME, &rtc, &len) || (rtc == priv->last_rtc)) { /* Check if firmware is hung */ IPW_DEBUG_INFO("%s: Firmware RTC stalled.\n", priv->net_dev->name); restart = 1; } if (restart) { /* Kill timer */ priv->stop_hang_check = 1; priv->hangs++; /* Restart the NIC */ schedule_reset(priv); } priv->last_rtc = rtc; if (!priv->stop_hang_check) schedule_delayed_work(&priv->hang_check, HZ / 2); spin_unlock_irqrestore(&priv->low_lock, flags); } static void ipw2100_rf_kill(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, rf_kill.work); unsigned long flags; spin_lock_irqsave(&priv->low_lock, flags); if (rf_kill_active(priv)) { IPW_DEBUG_RF_KILL("RF Kill active, rescheduling GPIO check\n"); if (!priv->stop_rf_kill) schedule_delayed_work(&priv->rf_kill, round_jiffies_relative(HZ)); goto exit_unlock; } /* RF Kill is now disabled, so bring the device back up */ if (!(priv->status & STATUS_RF_KILL_MASK)) { IPW_DEBUG_RF_KILL("HW RF Kill no longer active, restarting " "device\n"); schedule_reset(priv); } else IPW_DEBUG_RF_KILL("HW RF Kill deactivated. SW RF Kill still " "enabled\n"); exit_unlock: spin_unlock_irqrestore(&priv->low_lock, flags); } static void ipw2100_irq_tasklet(struct ipw2100_priv *priv); static const struct net_device_ops ipw2100_netdev_ops = { .ndo_open = ipw2100_open, .ndo_stop = ipw2100_close, .ndo_start_xmit = libipw_xmit, .ndo_change_mtu = libipw_change_mtu, .ndo_tx_timeout = ipw2100_tx_timeout, .ndo_set_mac_address = ipw2100_set_address, .ndo_validate_addr = eth_validate_addr, }; /* Look into using netdev destructor to shutdown libipw? */ static struct net_device *ipw2100_alloc_device(struct pci_dev *pci_dev, void __iomem * ioaddr) { struct ipw2100_priv *priv; struct net_device *dev; dev = alloc_libipw(sizeof(struct ipw2100_priv), 0); if (!dev) return NULL; priv = libipw_priv(dev); priv->ieee = netdev_priv(dev); priv->pci_dev = pci_dev; priv->net_dev = dev; priv->ioaddr = ioaddr; priv->ieee->hard_start_xmit = ipw2100_tx; priv->ieee->set_security = shim__set_security; priv->ieee->perfect_rssi = -20; priv->ieee->worst_rssi = -85; dev->netdev_ops = &ipw2100_netdev_ops; dev->ethtool_ops = &ipw2100_ethtool_ops; dev->wireless_handlers = &ipw2100_wx_handler_def; priv->wireless_data.libipw = priv->ieee; dev->wireless_data = &priv->wireless_data; dev->watchdog_timeo = 3 * HZ; dev->irq = 0; /* NOTE: We don't use the wireless_handlers hook * in dev as the system will start throwing WX requests * to us before we're actually initialized and it just * ends up causing problems. So, we just handle * the WX extensions through the ipw2100_ioctl interface */ /* memset() puts everything to 0, so we only have explicitly set * those values that need to be something else */ /* If power management is turned on, default to AUTO mode */ priv->power_mode = IPW_POWER_AUTO; #ifdef CONFIG_IPW2100_MONITOR priv->config |= CFG_CRC_CHECK; #endif priv->ieee->wpa_enabled = 0; priv->ieee->drop_unencrypted = 0; priv->ieee->privacy_invoked = 0; priv->ieee->ieee802_1x = 1; /* Set module parameters */ switch (network_mode) { case 1: priv->ieee->iw_mode = IW_MODE_ADHOC; break; #ifdef CONFIG_IPW2100_MONITOR case 2: priv->ieee->iw_mode = IW_MODE_MONITOR; break; #endif default: case 0: priv->ieee->iw_mode = IW_MODE_INFRA; break; } if (disable == 1) priv->status |= STATUS_RF_KILL_SW; if (channel != 0 && ((channel >= REG_MIN_CHANNEL) && (channel <= REG_MAX_CHANNEL))) { priv->config |= CFG_STATIC_CHANNEL; priv->channel = channel; } if (associate) priv->config |= CFG_ASSOCIATE; priv->beacon_interval = DEFAULT_BEACON_INTERVAL; priv->short_retry_limit = DEFAULT_SHORT_RETRY_LIMIT; priv->long_retry_limit = DEFAULT_LONG_RETRY_LIMIT; priv->rts_threshold = DEFAULT_RTS_THRESHOLD | RTS_DISABLED; priv->frag_threshold = DEFAULT_FTS | FRAG_DISABLED; priv->tx_power = IPW_TX_POWER_DEFAULT; priv->tx_rates = DEFAULT_TX_RATES; strcpy(priv->nick, "ipw2100"); spin_lock_init(&priv->low_lock); mutex_init(&priv->action_mutex); mutex_init(&priv->adapter_mutex); init_waitqueue_head(&priv->wait_command_queue); netif_carrier_off(dev); INIT_LIST_HEAD(&priv->msg_free_list); INIT_LIST_HEAD(&priv->msg_pend_list); INIT_STAT(&priv->msg_free_stat); INIT_STAT(&priv->msg_pend_stat); INIT_LIST_HEAD(&priv->tx_free_list); INIT_LIST_HEAD(&priv->tx_pend_list); INIT_STAT(&priv->tx_free_stat); INIT_STAT(&priv->tx_pend_stat); INIT_LIST_HEAD(&priv->fw_pend_list); INIT_STAT(&priv->fw_pend_stat); INIT_DELAYED_WORK(&priv->reset_work, ipw2100_reset_adapter); INIT_DELAYED_WORK(&priv->security_work, ipw2100_security_work); INIT_DELAYED_WORK(&priv->wx_event_work, ipw2100_wx_event_work); INIT_DELAYED_WORK(&priv->hang_check, ipw2100_hang_check); INIT_DELAYED_WORK(&priv->rf_kill, ipw2100_rf_kill); INIT_DELAYED_WORK(&priv->scan_event, ipw2100_scan_event); tasklet_init(&priv->irq_tasklet, (void (*)(unsigned long)) ipw2100_irq_tasklet, (unsigned long)priv); /* NOTE: We do not start the deferred work for status checks yet */ priv->stop_rf_kill = 1; priv->stop_hang_check = 1; return dev; } static int ipw2100_pci_init_one(struct pci_dev *pci_dev, const struct pci_device_id *ent) { void __iomem *ioaddr; struct net_device *dev = NULL; struct ipw2100_priv *priv = NULL; int err = 0; int registered = 0; u32 val; IPW_DEBUG_INFO("enter\n"); if (!(pci_resource_flags(pci_dev, 0) & IORESOURCE_MEM)) { IPW_DEBUG_INFO("weird - resource type is not memory\n"); err = -ENODEV; goto out; } ioaddr = pci_iomap(pci_dev, 0, 0); if (!ioaddr) { printk(KERN_WARNING DRV_NAME "Error calling ioremap_nocache.\n"); err = -EIO; goto fail; } /* allocate and initialize our net_device */ dev = ipw2100_alloc_device(pci_dev, ioaddr); if (!dev) { printk(KERN_WARNING DRV_NAME "Error calling ipw2100_alloc_device.\n"); err = -ENOMEM; goto fail; } /* set up PCI mappings for device */ err = pci_enable_device(pci_dev); if (err) { printk(KERN_WARNING DRV_NAME "Error calling pci_enable_device.\n"); return err; } priv = libipw_priv(dev); pci_set_master(pci_dev); pci_set_drvdata(pci_dev, priv); err = pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32)); if (err) { printk(KERN_WARNING DRV_NAME "Error calling pci_set_dma_mask.\n"); pci_disable_device(pci_dev); return err; } err = pci_request_regions(pci_dev, DRV_NAME); if (err) { printk(KERN_WARNING DRV_NAME "Error calling pci_request_regions.\n"); pci_disable_device(pci_dev); return err; } /* We disable the RETRY_TIMEOUT register (0x41) to keep * PCI Tx retries from interfering with C3 CPU state */ pci_read_config_dword(pci_dev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); pci_set_power_state(pci_dev, PCI_D0); if (!ipw2100_hw_is_adapter_in_system(dev)) { printk(KERN_WARNING DRV_NAME "Device not found via register read.\n"); err = -ENODEV; goto fail; } SET_NETDEV_DEV(dev, &pci_dev->dev); /* Force interrupts to be shut off on the device */ priv->status |= STATUS_INT_ENABLED; ipw2100_disable_interrupts(priv); /* Allocate and initialize the Tx/Rx queues and lists */ if (ipw2100_queues_allocate(priv)) { printk(KERN_WARNING DRV_NAME "Error calling ipw2100_queues_allocate.\n"); err = -ENOMEM; goto fail; } ipw2100_queues_initialize(priv); err = request_irq(pci_dev->irq, ipw2100_interrupt, IRQF_SHARED, dev->name, priv); if (err) { printk(KERN_WARNING DRV_NAME "Error calling request_irq: %d.\n", pci_dev->irq); goto fail; } dev->irq = pci_dev->irq; IPW_DEBUG_INFO("Attempting to register device...\n"); printk(KERN_INFO DRV_NAME ": Detected Intel PRO/Wireless 2100 Network Connection\n"); err = ipw2100_up(priv, 1); if (err) goto fail; err = ipw2100_wdev_init(dev); if (err) goto fail; registered = 1; /* Bring up the interface. Pre 0.46, after we registered the * network device we would call ipw2100_up. This introduced a race * condition with newer hotplug configurations (network was coming * up and making calls before the device was initialized). */ err = register_netdev(dev); if (err) { printk(KERN_WARNING DRV_NAME "Error calling register_netdev.\n"); goto fail; } registered = 2; mutex_lock(&priv->action_mutex); IPW_DEBUG_INFO("%s: Bound to %s\n", dev->name, pci_name(pci_dev)); /* perform this after register_netdev so that dev->name is set */ err = sysfs_create_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); if (err) goto fail_unlock; /* If the RF Kill switch is disabled, go ahead and complete the * startup sequence */ if (!(priv->status & STATUS_RF_KILL_MASK)) { /* Enable the adapter - sends HOST_COMPLETE */ if (ipw2100_enable_adapter(priv)) { printk(KERN_WARNING DRV_NAME ": %s: failed in call to enable adapter.\n", priv->net_dev->name); ipw2100_hw_stop_adapter(priv); err = -EIO; goto fail_unlock; } /* Start a scan . . . */ ipw2100_set_scan_options(priv); ipw2100_start_scan(priv); } IPW_DEBUG_INFO("exit\n"); priv->status |= STATUS_INITIALIZED; mutex_unlock(&priv->action_mutex); out: return err; fail_unlock: mutex_unlock(&priv->action_mutex); fail: if (dev) { if (registered >= 2) unregister_netdev(dev); if (registered) { wiphy_unregister(priv->ieee->wdev.wiphy); kfree(priv->ieee->bg_band.channels); } ipw2100_hw_stop_adapter(priv); ipw2100_disable_interrupts(priv); if (dev->irq) free_irq(dev->irq, priv); ipw2100_kill_works(priv); /* These are safe to call even if they weren't allocated */ ipw2100_queues_free(priv); sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); free_libipw(dev, 0); pci_set_drvdata(pci_dev, NULL); } pci_iounmap(pci_dev, ioaddr); pci_release_regions(pci_dev); pci_disable_device(pci_dev); goto out; } static void ipw2100_pci_remove_one(struct pci_dev *pci_dev) { struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); struct net_device *dev = priv->net_dev; mutex_lock(&priv->action_mutex); priv->status &= ~STATUS_INITIALIZED; sysfs_remove_group(&pci_dev->dev.kobj, &ipw2100_attribute_group); #ifdef CONFIG_PM if (ipw2100_firmware.version) ipw2100_release_firmware(priv, &ipw2100_firmware); #endif /* Take down the hardware */ ipw2100_down(priv); /* Release the mutex so that the network subsystem can * complete any needed calls into the driver... */ mutex_unlock(&priv->action_mutex); /* Unregister the device first - this results in close() * being called if the device is open. If we free storage * first, then close() will crash. * FIXME: remove the comment above. */ unregister_netdev(dev); ipw2100_kill_works(priv); ipw2100_queues_free(priv); /* Free potential debugging firmware snapshot */ ipw2100_snapshot_free(priv); free_irq(dev->irq, priv); pci_iounmap(pci_dev, priv->ioaddr); /* wiphy_unregister needs to be here, before free_libipw */ wiphy_unregister(priv->ieee->wdev.wiphy); kfree(priv->ieee->bg_band.channels); free_libipw(dev, 0); pci_release_regions(pci_dev); pci_disable_device(pci_dev); IPW_DEBUG_INFO("exit\n"); } #ifdef CONFIG_PM static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) { struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); struct net_device *dev = priv->net_dev; IPW_DEBUG_INFO("%s: Going into suspend...\n", dev->name); mutex_lock(&priv->action_mutex); if (priv->status & STATUS_INITIALIZED) { /* Take down the device; powers it off, etc. */ ipw2100_down(priv); } /* Remove the PRESENT state of the device */ netif_device_detach(dev); pci_save_state(pci_dev); pci_disable_device(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); priv->suspend_at = get_seconds(); mutex_unlock(&priv->action_mutex); return 0; } static int ipw2100_resume(struct pci_dev *pci_dev) { struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); struct net_device *dev = priv->net_dev; int err; u32 val; if (IPW2100_PM_DISABLED) return 0; mutex_lock(&priv->action_mutex); IPW_DEBUG_INFO("%s: Coming out of suspend...\n", dev->name); pci_set_power_state(pci_dev, PCI_D0); err = pci_enable_device(pci_dev); if (err) { printk(KERN_ERR "%s: pci_enable_device failed on resume\n", dev->name); mutex_unlock(&priv->action_mutex); return err; } pci_restore_state(pci_dev); /* * Suspend/Resume resets the PCI configuration space, so we have to * re-disable the RETRY_TIMEOUT register (0x41) to keep PCI Tx retries * from interfering with C3 CPU state. pci_restore_state won't help * here since it only restores the first 64 bytes pci config header. */ pci_read_config_dword(pci_dev, 0x40, &val); if ((val & 0x0000ff00) != 0) pci_write_config_dword(pci_dev, 0x40, val & 0xffff00ff); /* Set the device back into the PRESENT state; this will also wake * the queue of needed */ netif_device_attach(dev); priv->suspend_time = get_seconds() - priv->suspend_at; /* Bring the device back up */ if (!(priv->status & STATUS_RF_KILL_SW)) ipw2100_up(priv, 0); mutex_unlock(&priv->action_mutex); return 0; } #endif static void ipw2100_shutdown(struct pci_dev *pci_dev) { struct ipw2100_priv *priv = pci_get_drvdata(pci_dev); /* Take down the device; powers it off, etc. */ ipw2100_down(priv); pci_disable_device(pci_dev); } #define IPW2100_DEV_ID(x) { PCI_VENDOR_ID_INTEL, 0x1043, 0x8086, x } static DEFINE_PCI_DEVICE_TABLE(ipw2100_pci_id_table) = { IPW2100_DEV_ID(0x2520), /* IN 2100A mPCI 3A */ IPW2100_DEV_ID(0x2521), /* IN 2100A mPCI 3B */ IPW2100_DEV_ID(0x2524), /* IN 2100A mPCI 3B */ IPW2100_DEV_ID(0x2525), /* IN 2100A mPCI 3B */ IPW2100_DEV_ID(0x2526), /* IN 2100A mPCI Gen A3 */ IPW2100_DEV_ID(0x2522), /* IN 2100 mPCI 3B */ IPW2100_DEV_ID(0x2523), /* IN 2100 mPCI 3A */ IPW2100_DEV_ID(0x2527), /* IN 2100 mPCI 3B */ IPW2100_DEV_ID(0x2528), /* IN 2100 mPCI 3B */ IPW2100_DEV_ID(0x2529), /* IN 2100 mPCI 3B */ IPW2100_DEV_ID(0x252B), /* IN 2100 mPCI 3A */ IPW2100_DEV_ID(0x252C), /* IN 2100 mPCI 3A */ IPW2100_DEV_ID(0x252D), /* IN 2100 mPCI 3A */ IPW2100_DEV_ID(0x2550), /* IB 2100A mPCI 3B */ IPW2100_DEV_ID(0x2551), /* IB 2100 mPCI 3B */ IPW2100_DEV_ID(0x2553), /* IB 2100 mPCI 3B */ IPW2100_DEV_ID(0x2554), /* IB 2100 mPCI 3B */ IPW2100_DEV_ID(0x2555), /* IB 2100 mPCI 3B */ IPW2100_DEV_ID(0x2560), /* DE 2100A mPCI 3A */ IPW2100_DEV_ID(0x2562), /* DE 2100A mPCI 3A */ IPW2100_DEV_ID(0x2563), /* DE 2100A mPCI 3A */ IPW2100_DEV_ID(0x2561), /* DE 2100 mPCI 3A */ IPW2100_DEV_ID(0x2565), /* DE 2100 mPCI 3A */ IPW2100_DEV_ID(0x2566), /* DE 2100 mPCI 3A */ IPW2100_DEV_ID(0x2567), /* DE 2100 mPCI 3A */ IPW2100_DEV_ID(0x2570), /* GA 2100 mPCI 3B */ IPW2100_DEV_ID(0x2580), /* TO 2100A mPCI 3B */ IPW2100_DEV_ID(0x2582), /* TO 2100A mPCI 3B */ IPW2100_DEV_ID(0x2583), /* TO 2100A mPCI 3B */ IPW2100_DEV_ID(0x2581), /* TO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2585), /* TO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2586), /* TO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2587), /* TO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2590), /* SO 2100A mPCI 3B */ IPW2100_DEV_ID(0x2592), /* SO 2100A mPCI 3B */ IPW2100_DEV_ID(0x2591), /* SO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2593), /* SO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2596), /* SO 2100 mPCI 3B */ IPW2100_DEV_ID(0x2598), /* SO 2100 mPCI 3B */ IPW2100_DEV_ID(0x25A0), /* HP 2100 mPCI 3B */ {0,}, }; MODULE_DEVICE_TABLE(pci, ipw2100_pci_id_table); static struct pci_driver ipw2100_pci_driver = { .name = DRV_NAME, .id_table = ipw2100_pci_id_table, .probe = ipw2100_pci_init_one, .remove = ipw2100_pci_remove_one, #ifdef CONFIG_PM .suspend = ipw2100_suspend, .resume = ipw2100_resume, #endif .shutdown = ipw2100_shutdown, }; /** * Initialize the ipw2100 driver/module * * @returns 0 if ok, < 0 errno node con error. * * Note: we cannot init the /proc stuff until the PCI driver is there, * or we risk an unlikely race condition on someone accessing * uninitialized data in the PCI dev struct through /proc. */ static int __init ipw2100_init(void) { int ret; printk(KERN_INFO DRV_NAME ": %s, %s\n", DRV_DESCRIPTION, DRV_VERSION); printk(KERN_INFO DRV_NAME ": %s\n", DRV_COPYRIGHT); pm_qos_add_request(&ipw2100_pm_qos_req, PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE); ret = pci_register_driver(&ipw2100_pci_driver); if (ret) goto out; #ifdef CONFIG_IPW2100_DEBUG ipw2100_debug_level = debug; ret = driver_create_file(&ipw2100_pci_driver.driver, &driver_attr_debug_level); #endif out: return ret; } /** * Cleanup ipw2100 driver registration */ static void __exit ipw2100_exit(void) { /* FIXME: IPG: check that we have no instances of the devices open */ #ifdef CONFIG_IPW2100_DEBUG driver_remove_file(&ipw2100_pci_driver.driver, &driver_attr_debug_level); #endif pci_unregister_driver(&ipw2100_pci_driver); pm_qos_remove_request(&ipw2100_pm_qos_req); } module_init(ipw2100_init); module_exit(ipw2100_exit); static int ipw2100_wx_get_name(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (!(priv->status & STATUS_ASSOCIATED)) strcpy(wrqu->name, "unassociated"); else snprintf(wrqu->name, IFNAMSIZ, "IEEE 802.11b"); IPW_DEBUG_WX("Name: %s\n", wrqu->name); return 0; } static int ipw2100_wx_set_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct iw_freq *fwrq = &wrqu->freq; int err = 0; if (priv->ieee->iw_mode == IW_MODE_INFRA) return -EOPNOTSUPP; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int)2.412e8 && fwrq->m <= (int)2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < REG_MAX_CHANNEL) && (f != ipw2100_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 1000) { err = -EOPNOTSUPP; goto done; } else { /* Set the channel */ IPW_DEBUG_WX("SET Freq/Channel -> %d\n", fwrq->m); err = ipw2100_set_channel(priv, fwrq->m, 0); } done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_freq(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->freq.e = 0; /* If we are associated, trying to associate, or have a statically * configured CHANNEL then return that; otherwise return ANY */ if (priv->config & CFG_STATIC_CHANNEL || priv->status & STATUS_ASSOCIATED) wrqu->freq.m = priv->channel; else wrqu->freq.m = 0; IPW_DEBUG_WX("GET Freq/Channel -> %d\n", priv->channel); return 0; } static int ipw2100_wx_set_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0; IPW_DEBUG_WX("SET Mode -> %d\n", wrqu->mode); if (wrqu->mode == priv->ieee->iw_mode) return 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } switch (wrqu->mode) { #ifdef CONFIG_IPW2100_MONITOR case IW_MODE_MONITOR: err = ipw2100_switch_mode(priv, IW_MODE_MONITOR); break; #endif /* CONFIG_IPW2100_MONITOR */ case IW_MODE_ADHOC: err = ipw2100_switch_mode(priv, IW_MODE_ADHOC); break; case IW_MODE_INFRA: case IW_MODE_AUTO: default: err = ipw2100_switch_mode(priv, IW_MODE_INFRA); break; } done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_mode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->mode = priv->ieee->iw_mode; IPW_DEBUG_WX("GET Mode -> %d\n", wrqu->mode); return 0; } #define POWER_MODES 5 /* Values are in microsecond */ static const s32 timeout_duration[POWER_MODES] = { 350000, 250000, 75000, 37000, 25000, }; static const s32 period_duration[POWER_MODES] = { 400000, 700000, 1000000, 1000000, 1000000 }; static int ipw2100_wx_get_range(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); struct iw_range *range = (struct iw_range *)extra; u16 val; int i, level; wrqu->data.length = sizeof(*range); memset(range, 0, sizeof(*range)); /* Let's try to keep this struct in the same order as in * linux/include/wireless.h */ /* TODO: See what values we can set, and remove the ones we can't * set, or fill them with some default data. */ /* ~5 Mb/s real (802.11b) */ range->throughput = 5 * 1000 * 1000; // range->sensitivity; /* signal level threshold range */ range->max_qual.qual = 100; /* TODO: Find real max RSSI and stick here */ range->max_qual.level = 0; range->max_qual.noise = 0; range->max_qual.updated = 7; /* Updated all three */ range->avg_qual.qual = 70; /* > 8% missed beacons is 'bad' */ /* TODO: Find real 'good' to 'bad' threshold value for RSSI */ range->avg_qual.level = 20 + IPW2100_RSSI_TO_DBM; range->avg_qual.noise = 0; range->avg_qual.updated = 7; /* Updated all three */ range->num_bitrates = RATE_COUNT; for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) { range->bitrate[i] = ipw2100_bg_rates[i].bitrate * 100 * 1000; } range->min_rts = MIN_RTS_THRESHOLD; range->max_rts = MAX_RTS_THRESHOLD; range->min_frag = MIN_FRAG_THRESHOLD; range->max_frag = MAX_FRAG_THRESHOLD; range->min_pmp = period_duration[0]; /* Minimal PM period */ range->max_pmp = period_duration[POWER_MODES - 1]; /* Maximal PM period */ range->min_pmt = timeout_duration[POWER_MODES - 1]; /* Minimal PM timeout */ range->max_pmt = timeout_duration[0]; /* Maximal PM timeout */ /* How to decode max/min PM period */ range->pmp_flags = IW_POWER_PERIOD; /* How to decode max/min PM period */ range->pmt_flags = IW_POWER_TIMEOUT; /* What PM options are supported */ range->pm_capa = IW_POWER_TIMEOUT | IW_POWER_PERIOD; range->encoding_size[0] = 5; range->encoding_size[1] = 13; /* Different token sizes */ range->num_encoding_sizes = 2; /* Number of entry in the list */ range->max_encoding_tokens = WEP_KEYS; /* Max number of tokens */ // range->encoding_login_index; /* token index for login token */ if (priv->ieee->iw_mode == IW_MODE_ADHOC) { range->txpower_capa = IW_TXPOW_DBM; range->num_txpower = IW_MAX_TXPOWER; for (i = 0, level = (IPW_TX_POWER_MAX_DBM * 16); i < IW_MAX_TXPOWER; i++, level -= ((IPW_TX_POWER_MAX_DBM - IPW_TX_POWER_MIN_DBM) * 16) / (IW_MAX_TXPOWER - 1)) range->txpower[i] = level / 16; } else { range->txpower_capa = 0; range->num_txpower = 0; } /* Set the Wireless Extension versions */ range->we_version_compiled = WIRELESS_EXT; range->we_version_source = 18; // range->retry_capa; /* What retry options are supported */ // range->retry_flags; /* How to decode max/min retry limit */ // range->r_time_flags; /* How to decode max/min retry life */ // range->min_retry; /* Minimal number of retries */ // range->max_retry; /* Maximal number of retries */ // range->min_r_time; /* Minimal retry lifetime */ // range->max_r_time; /* Maximal retry lifetime */ range->num_channels = FREQ_COUNT; val = 0; for (i = 0; i < FREQ_COUNT; i++) { // TODO: Include only legal frequencies for some countries // if (local->channel_mask & (1 << i)) { range->freq[val].i = i + 1; range->freq[val].m = ipw2100_frequencies[i] * 100000; range->freq[val].e = 1; val++; // } if (val == IW_MAX_FREQUENCIES) break; } range->num_frequency = val; /* Event capability (kernel + driver) */ range->event_capa[0] = (IW_EVENT_CAPA_K_0 | IW_EVENT_CAPA_MASK(SIOCGIWAP)); range->event_capa[1] = IW_EVENT_CAPA_K_1; range->enc_capa = IW_ENC_CAPA_WPA | IW_ENC_CAPA_WPA2 | IW_ENC_CAPA_CIPHER_TKIP | IW_ENC_CAPA_CIPHER_CCMP; IPW_DEBUG_WX("GET Range\n"); return 0; } static int ipw2100_wx_set_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0; // sanity checks if (wrqu->ap_addr.sa_family != ARPHRD_ETHER) return -EINVAL; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (is_broadcast_ether_addr(wrqu->ap_addr.sa_data) || is_zero_ether_addr(wrqu->ap_addr.sa_data)) { /* we disable mandatory BSSID association */ IPW_DEBUG_WX("exit - disable mandatory BSSID\n"); priv->config &= ~CFG_STATIC_BSSID; err = ipw2100_set_mandatory_bssid(priv, NULL, 0); goto done; } priv->config |= CFG_STATIC_BSSID; memcpy(priv->mandatory_bssid_mac, wrqu->ap_addr.sa_data, ETH_ALEN); err = ipw2100_set_mandatory_bssid(priv, wrqu->ap_addr.sa_data, 0); IPW_DEBUG_WX("SET BSSID -> %pM\n", wrqu->ap_addr.sa_data); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_wap(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); /* If we are associated, trying to associate, or have a statically * configured BSSID then return that; otherwise return ANY */ if (priv->config & CFG_STATIC_BSSID || priv->status & STATUS_ASSOCIATED) { wrqu->ap_addr.sa_family = ARPHRD_ETHER; memcpy(wrqu->ap_addr.sa_data, priv->bssid, ETH_ALEN); } else memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); IPW_DEBUG_WX("Getting WAP BSSID: %pM\n", wrqu->ap_addr.sa_data); return 0; } static int ipw2100_wx_set_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); char *essid = ""; /* ANY */ int length = 0; int err = 0; DECLARE_SSID_BUF(ssid); mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (wrqu->essid.flags && wrqu->essid.length) { length = wrqu->essid.length; essid = extra; } if (length == 0) { IPW_DEBUG_WX("Setting ESSID to ANY\n"); priv->config &= ~CFG_STATIC_ESSID; err = ipw2100_set_essid(priv, NULL, 0, 0); goto done; } length = min(length, IW_ESSID_MAX_SIZE); priv->config |= CFG_STATIC_ESSID; if (priv->essid_len == length && !memcmp(priv->essid, extra, length)) { IPW_DEBUG_WX("ESSID set to current ESSID.\n"); err = 0; goto done; } IPW_DEBUG_WX("Setting ESSID: '%s' (%d)\n", print_ssid(ssid, essid, length), length); priv->essid_len = length; memcpy(priv->essid, essid, priv->essid_len); err = ipw2100_set_essid(priv, essid, length, 0); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_essid(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); DECLARE_SSID_BUF(ssid); /* If we are associated, trying to associate, or have a statically * configured ESSID then return that; otherwise return ANY */ if (priv->config & CFG_STATIC_ESSID || priv->status & STATUS_ASSOCIATED) { IPW_DEBUG_WX("Getting essid: '%s'\n", print_ssid(ssid, priv->essid, priv->essid_len)); memcpy(extra, priv->essid, priv->essid_len); wrqu->essid.length = priv->essid_len; wrqu->essid.flags = 1; /* active */ } else { IPW_DEBUG_WX("Getting essid: ANY\n"); wrqu->essid.length = 0; wrqu->essid.flags = 0; /* active */ } return 0; } static int ipw2100_wx_set_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (wrqu->data.length > IW_ESSID_MAX_SIZE) return -E2BIG; wrqu->data.length = min((size_t) wrqu->data.length, sizeof(priv->nick)); memset(priv->nick, 0, sizeof(priv->nick)); memcpy(priv->nick, extra, wrqu->data.length); IPW_DEBUG_WX("SET Nickname -> %s\n", priv->nick); return 0; } static int ipw2100_wx_get_nick(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->data.length = strlen(priv->nick); memcpy(extra, priv->nick, wrqu->data.length); wrqu->data.flags = 1; /* active */ IPW_DEBUG_WX("GET Nickname -> %s\n", extra); return 0; } static int ipw2100_wx_set_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); u32 target_rate = wrqu->bitrate.value; u32 rate; int err = 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } rate = 0; if (target_rate == 1000000 || (!wrqu->bitrate.fixed && target_rate > 1000000)) rate |= TX_RATE_1_MBIT; if (target_rate == 2000000 || (!wrqu->bitrate.fixed && target_rate > 2000000)) rate |= TX_RATE_2_MBIT; if (target_rate == 5500000 || (!wrqu->bitrate.fixed && target_rate > 5500000)) rate |= TX_RATE_5_5_MBIT; if (target_rate == 11000000 || (!wrqu->bitrate.fixed && target_rate > 11000000)) rate |= TX_RATE_11_MBIT; if (rate == 0) rate = DEFAULT_TX_RATES; err = ipw2100_set_tx_rates(priv, rate, 0); IPW_DEBUG_WX("SET Rate -> %04X\n", rate); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_rate(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int val; unsigned int len = sizeof(val); int err = 0; if (!(priv->status & STATUS_ENABLED) || priv->status & STATUS_RF_KILL_MASK || !(priv->status & STATUS_ASSOCIATED)) { wrqu->bitrate.value = 0; return 0; } mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } err = ipw2100_get_ordinal(priv, IPW_ORD_CURRENT_TX_RATE, &val, &len); if (err) { IPW_DEBUG_WX("failed querying ordinals.\n"); goto done; } switch (val & TX_RATE_MASK) { case TX_RATE_1_MBIT: wrqu->bitrate.value = 1000000; break; case TX_RATE_2_MBIT: wrqu->bitrate.value = 2000000; break; case TX_RATE_5_5_MBIT: wrqu->bitrate.value = 5500000; break; case TX_RATE_11_MBIT: wrqu->bitrate.value = 11000000; break; default: wrqu->bitrate.value = 0; } IPW_DEBUG_WX("GET Rate -> %d\n", wrqu->bitrate.value); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_set_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int value, err; /* Auto RTS not yet supported */ if (wrqu->rts.fixed == 0) return -EINVAL; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (wrqu->rts.disabled) value = priv->rts_threshold | RTS_DISABLED; else { if (wrqu->rts.value < 1 || wrqu->rts.value > 2304) { err = -EINVAL; goto done; } value = wrqu->rts.value; } err = ipw2100_set_rts_threshold(priv, value); IPW_DEBUG_WX("SET RTS Threshold -> 0x%08X\n", value); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_rts(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->rts.value = priv->rts_threshold & ~RTS_DISABLED; wrqu->rts.fixed = 1; /* no auto select */ /* If RTS is set to the default value, then it is disabled */ wrqu->rts.disabled = (priv->rts_threshold & RTS_DISABLED) ? 1 : 0; IPW_DEBUG_WX("GET RTS Threshold -> 0x%08X\n", wrqu->rts.value); return 0; } static int ipw2100_wx_set_txpow(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0, value; if (ipw_radio_kill_sw(priv, wrqu->txpower.disabled)) return -EINPROGRESS; if (priv->ieee->iw_mode != IW_MODE_ADHOC) return 0; if ((wrqu->txpower.flags & IW_TXPOW_TYPE) != IW_TXPOW_DBM) return -EINVAL; if (wrqu->txpower.fixed == 0) value = IPW_TX_POWER_DEFAULT; else { if (wrqu->txpower.value < IPW_TX_POWER_MIN_DBM || wrqu->txpower.value > IPW_TX_POWER_MAX_DBM) return -EINVAL; value = wrqu->txpower.value; } mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } err = ipw2100_set_tx_power(priv, value); IPW_DEBUG_WX("SET TX Power -> %d\n", value); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_txpow(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->txpower.disabled = (priv->status & STATUS_RF_KILL_MASK) ? 1 : 0; if (priv->tx_power == IPW_TX_POWER_DEFAULT) { wrqu->txpower.fixed = 0; wrqu->txpower.value = IPW_TX_POWER_MAX_DBM; } else { wrqu->txpower.fixed = 1; wrqu->txpower.value = priv->tx_power; } wrqu->txpower.flags = IW_TXPOW_DBM; IPW_DEBUG_WX("GET TX Power -> %d\n", wrqu->txpower.value); return 0; } static int ipw2100_wx_set_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (!wrqu->frag.fixed) return -EINVAL; if (wrqu->frag.disabled) { priv->frag_threshold |= FRAG_DISABLED; priv->ieee->fts = DEFAULT_FTS; } else { if (wrqu->frag.value < MIN_FRAG_THRESHOLD || wrqu->frag.value > MAX_FRAG_THRESHOLD) return -EINVAL; priv->ieee->fts = wrqu->frag.value & ~0x1; priv->frag_threshold = priv->ieee->fts; } IPW_DEBUG_WX("SET Frag Threshold -> %d\n", priv->ieee->fts); return 0; } static int ipw2100_wx_get_frag(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->frag.value = priv->frag_threshold & ~FRAG_DISABLED; wrqu->frag.fixed = 0; /* no auto select */ wrqu->frag.disabled = (priv->frag_threshold & FRAG_DISABLED) ? 1 : 0; IPW_DEBUG_WX("GET Frag Threshold -> %d\n", wrqu->frag.value); return 0; } static int ipw2100_wx_set_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0; if (wrqu->retry.flags & IW_RETRY_LIFETIME || wrqu->retry.disabled) return -EINVAL; if (!(wrqu->retry.flags & IW_RETRY_LIMIT)) return 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (wrqu->retry.flags & IW_RETRY_SHORT) { err = ipw2100_set_short_retry(priv, wrqu->retry.value); IPW_DEBUG_WX("SET Short Retry Limit -> %d\n", wrqu->retry.value); goto done; } if (wrqu->retry.flags & IW_RETRY_LONG) { err = ipw2100_set_long_retry(priv, wrqu->retry.value); IPW_DEBUG_WX("SET Long Retry Limit -> %d\n", wrqu->retry.value); goto done; } err = ipw2100_set_short_retry(priv, wrqu->retry.value); if (!err) err = ipw2100_set_long_retry(priv, wrqu->retry.value); IPW_DEBUG_WX("SET Both Retry Limits -> %d\n", wrqu->retry.value); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_retry(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); wrqu->retry.disabled = 0; /* can't be disabled */ if ((wrqu->retry.flags & IW_RETRY_TYPE) == IW_RETRY_LIFETIME) return -EINVAL; if (wrqu->retry.flags & IW_RETRY_LONG) { wrqu->retry.flags = IW_RETRY_LIMIT | IW_RETRY_LONG; wrqu->retry.value = priv->long_retry_limit; } else { wrqu->retry.flags = (priv->short_retry_limit != priv->long_retry_limit) ? IW_RETRY_LIMIT | IW_RETRY_SHORT : IW_RETRY_LIMIT; wrqu->retry.value = priv->short_retry_limit; } IPW_DEBUG_WX("GET Retry -> %d\n", wrqu->retry.value); return 0; } static int ipw2100_wx_set_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } IPW_DEBUG_WX("Initiating scan...\n"); priv->user_requested_scan = 1; if (ipw2100_set_scan_options(priv) || ipw2100_start_scan(priv)) { IPW_DEBUG_WX("Start scan failed.\n"); /* TODO: Mark a scan as pending so when hardware initialized * a scan starts */ } done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_scan(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); return libipw_wx_get_scan(priv->ieee, info, wrqu, extra); } /* * Implementation based on code in hostap-driver v0.1.3 hostap_ioctl.c */ static int ipw2100_wx_set_encode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { /* * No check of STATUS_INITIALIZED required */ struct ipw2100_priv *priv = libipw_priv(dev); return libipw_wx_set_encode(priv->ieee, info, wrqu, key); } static int ipw2100_wx_get_encode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *key) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); return libipw_wx_get_encode(priv->ieee, info, wrqu, key); } static int ipw2100_wx_set_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (wrqu->power.disabled) { priv->power_mode = IPW_POWER_LEVEL(priv->power_mode); err = ipw2100_set_power_mode(priv, IPW_POWER_MODE_CAM); IPW_DEBUG_WX("SET Power Management Mode -> off\n"); goto done; } switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_ON: /* If not specified */ case IW_POWER_MODE: /* If set all mask */ case IW_POWER_ALL_R: /* If explicitly state all */ break; default: /* Otherwise we don't support it */ IPW_DEBUG_WX("SET PM Mode: %X not supported.\n", wrqu->power.flags); err = -EOPNOTSUPP; goto done; } /* If the user hasn't specified a power management mode yet, default * to BATTERY */ priv->power_mode = IPW_POWER_ENABLED | priv->power_mode; err = ipw2100_set_power_mode(priv, IPW_POWER_LEVEL(priv->power_mode)); IPW_DEBUG_WX("SET Power Management Mode -> 0x%02X\n", priv->power_mode); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_power(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (!(priv->power_mode & IPW_POWER_ENABLED)) wrqu->power.disabled = 1; else { wrqu->power.disabled = 0; wrqu->power.flags = 0; } IPW_DEBUG_WX("GET Power Management Mode -> %02X\n", priv->power_mode); return 0; } /* * WE-18 WPA support */ /* SIOCSIWGENIE */ static int ipw2100_wx_set_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct libipw_device *ieee = priv->ieee; u8 *buf; if (!ieee->wpa_enabled) return -EOPNOTSUPP; if (wrqu->data.length > MAX_WPA_IE_LEN || (wrqu->data.length && extra == NULL)) return -EINVAL; if (wrqu->data.length) { buf = kmemdup(extra, wrqu->data.length, GFP_KERNEL); if (buf == NULL) return -ENOMEM; kfree(ieee->wpa_ie); ieee->wpa_ie = buf; ieee->wpa_ie_len = wrqu->data.length; } else { kfree(ieee->wpa_ie); ieee->wpa_ie = NULL; ieee->wpa_ie_len = 0; } ipw2100_wpa_assoc_frame(priv, ieee->wpa_ie, ieee->wpa_ie_len); return 0; } /* SIOCGIWGENIE */ static int ipw2100_wx_get_genie(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct libipw_device *ieee = priv->ieee; if (ieee->wpa_ie_len == 0 || ieee->wpa_ie == NULL) { wrqu->data.length = 0; return 0; } if (wrqu->data.length < ieee->wpa_ie_len) return -E2BIG; wrqu->data.length = ieee->wpa_ie_len; memcpy(extra, ieee->wpa_ie, ieee->wpa_ie_len); return 0; } /* SIOCSIWAUTH */ static int ipw2100_wx_set_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct libipw_device *ieee = priv->ieee; struct iw_param *param = &wrqu->param; struct lib80211_crypt_data *crypt; unsigned long flags; int ret = 0; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * ipw2200 does not use these parameters */ break; case IW_AUTH_TKIP_COUNTERMEASURES: crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; if (!crypt || !crypt->ops->set_flags || !crypt->ops->get_flags) break; flags = crypt->ops->get_flags(crypt->priv); if (param->value) flags |= IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; else flags &= ~IEEE80211_CRYPTO_TKIP_COUNTERMEASURES; crypt->ops->set_flags(flags, crypt->priv); break; case IW_AUTH_DROP_UNENCRYPTED:{ /* HACK: * * wpa_supplicant calls set_wpa_enabled when the driver * is loaded and unloaded, regardless of if WPA is being * used. No other calls are made which can be used to * determine if encryption will be used or not prior to * association being expected. If encryption is not being * used, drop_unencrypted is set to false, else true -- we * can use this to determine if the CAP_PRIVACY_ON bit should * be set. */ struct libipw_security sec = { .flags = SEC_ENABLED, .enabled = param->value, }; priv->ieee->drop_unencrypted = param->value; /* We only change SEC_LEVEL for open mode. Others * are set by ipw_wpa_set_encryption. */ if (!param->value) { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_0; } else { sec.flags |= SEC_LEVEL; sec.level = SEC_LEVEL_1; } if (priv->ieee->set_security) priv->ieee->set_security(priv->ieee->dev, &sec); break; } case IW_AUTH_80211_AUTH_ALG: ret = ipw2100_wpa_set_auth_algs(priv, param->value); break; case IW_AUTH_WPA_ENABLED: ret = ipw2100_wpa_enable(priv, param->value); break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: ieee->ieee802_1x = param->value; break; //case IW_AUTH_ROAMING_CONTROL: case IW_AUTH_PRIVACY_INVOKED: ieee->privacy_invoked = param->value; break; default: return -EOPNOTSUPP; } return ret; } /* SIOCGIWAUTH */ static int ipw2100_wx_get_auth(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct libipw_device *ieee = priv->ieee; struct lib80211_crypt_data *crypt; struct iw_param *param = &wrqu->param; int ret = 0; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: case IW_AUTH_CIPHER_PAIRWISE: case IW_AUTH_CIPHER_GROUP: case IW_AUTH_KEY_MGMT: /* * wpa_supplicant will control these internally */ ret = -EOPNOTSUPP; break; case IW_AUTH_TKIP_COUNTERMEASURES: crypt = priv->ieee->crypt_info.crypt[priv->ieee->crypt_info.tx_keyidx]; if (!crypt || !crypt->ops->get_flags) { IPW_DEBUG_WARNING("Can't get TKIP countermeasures: " "crypt not set!\n"); break; } param->value = (crypt->ops->get_flags(crypt->priv) & IEEE80211_CRYPTO_TKIP_COUNTERMEASURES) ? 1 : 0; break; case IW_AUTH_DROP_UNENCRYPTED: param->value = ieee->drop_unencrypted; break; case IW_AUTH_80211_AUTH_ALG: param->value = priv->ieee->sec.auth_mode; break; case IW_AUTH_WPA_ENABLED: param->value = ieee->wpa_enabled; break; case IW_AUTH_RX_UNENCRYPTED_EAPOL: param->value = ieee->ieee802_1x; break; case IW_AUTH_ROAMING_CONTROL: case IW_AUTH_PRIVACY_INVOKED: param->value = ieee->privacy_invoked; break; default: return -EOPNOTSUPP; } return 0; } /* SIOCSIWENCODEEXT */ static int ipw2100_wx_set_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); return libipw_wx_set_encodeext(priv->ieee, info, wrqu, extra); } /* SIOCGIWENCODEEXT */ static int ipw2100_wx_get_encodeext(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); return libipw_wx_get_encodeext(priv->ieee, info, wrqu, extra); } /* SIOCSIWMLME */ static int ipw2100_wx_set_mlme(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; __le16 reason; reason = cpu_to_le16(mlme->reason_code); switch (mlme->cmd) { case IW_MLME_DEAUTH: // silently ignore break; case IW_MLME_DISASSOC: ipw2100_disassociate_bssid(priv); break; default: return -EOPNOTSUPP; } return 0; } /* * * IWPRIV handlers * */ #ifdef CONFIG_IPW2100_MONITOR static int ipw2100_wx_set_promisc(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int *parms = (int *)extra; int enable = (parms[0] > 0); int err = 0; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (enable) { if (priv->ieee->iw_mode == IW_MODE_MONITOR) { err = ipw2100_set_channel(priv, parms[1], 0); goto done; } priv->channel = parms[1]; err = ipw2100_switch_mode(priv, IW_MODE_MONITOR); } else { if (priv->ieee->iw_mode == IW_MODE_MONITOR) err = ipw2100_switch_mode(priv, priv->last_mode); } done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_reset(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); if (priv->status & STATUS_INITIALIZED) schedule_reset(priv); return 0; } #endif static int ipw2100_wx_set_powermode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err = 0, mode = *(int *)extra; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if ((mode < 0) || (mode > POWER_MODES)) mode = IPW_POWER_AUTO; if (IPW_POWER_LEVEL(priv->power_mode) != mode) err = ipw2100_set_power_mode(priv, mode); done: mutex_unlock(&priv->action_mutex); return err; } #define MAX_POWER_STRING 80 static int ipw2100_wx_get_powermode(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); int level = IPW_POWER_LEVEL(priv->power_mode); s32 timeout, period; if (!(priv->power_mode & IPW_POWER_ENABLED)) { snprintf(extra, MAX_POWER_STRING, "Power save level: %d (Off)", level); } else { switch (level) { case IPW_POWER_MODE_CAM: snprintf(extra, MAX_POWER_STRING, "Power save level: %d (None)", level); break; case IPW_POWER_AUTO: snprintf(extra, MAX_POWER_STRING, "Power save level: %d (Auto)", level); break; default: timeout = timeout_duration[level - 1] / 1000; period = period_duration[level - 1] / 1000; snprintf(extra, MAX_POWER_STRING, "Power save level: %d " "(Timeout %dms, Period %dms)", level, timeout, period); } } wrqu->data.length = strlen(extra) + 1; return 0; } static int ipw2100_wx_set_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err, mode = *(int *)extra; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (mode == 1) priv->config |= CFG_LONG_PREAMBLE; else if (mode == 0) priv->config &= ~CFG_LONG_PREAMBLE; else { err = -EINVAL; goto done; } err = ipw2100_system_config(priv, 0); done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_preamble(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (priv->config & CFG_LONG_PREAMBLE) snprintf(wrqu->name, IFNAMSIZ, "long (1)"); else snprintf(wrqu->name, IFNAMSIZ, "auto (0)"); return 0; } #ifdef CONFIG_IPW2100_MONITOR static int ipw2100_wx_set_crc_check(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { struct ipw2100_priv *priv = libipw_priv(dev); int err, mode = *(int *)extra; mutex_lock(&priv->action_mutex); if (!(priv->status & STATUS_INITIALIZED)) { err = -EIO; goto done; } if (mode == 1) priv->config |= CFG_CRC_CHECK; else if (mode == 0) priv->config &= ~CFG_CRC_CHECK; else { err = -EINVAL; goto done; } err = 0; done: mutex_unlock(&priv->action_mutex); return err; } static int ipw2100_wx_get_crc_check(struct net_device *dev, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { /* * This can be called at any time. No action lock required */ struct ipw2100_priv *priv = libipw_priv(dev); if (priv->config & CFG_CRC_CHECK) snprintf(wrqu->name, IFNAMSIZ, "CRC checked (1)"); else snprintf(wrqu->name, IFNAMSIZ, "CRC ignored (0)"); return 0; } #endif /* CONFIG_IPW2100_MONITOR */ static iw_handler ipw2100_wx_handlers[] = { IW_HANDLER(SIOCGIWNAME, ipw2100_wx_get_name), IW_HANDLER(SIOCSIWFREQ, ipw2100_wx_set_freq), IW_HANDLER(SIOCGIWFREQ, ipw2100_wx_get_freq), IW_HANDLER(SIOCSIWMODE, ipw2100_wx_set_mode), IW_HANDLER(SIOCGIWMODE, ipw2100_wx_get_mode), IW_HANDLER(SIOCGIWRANGE, ipw2100_wx_get_range), IW_HANDLER(SIOCSIWAP, ipw2100_wx_set_wap), IW_HANDLER(SIOCGIWAP, ipw2100_wx_get_wap), IW_HANDLER(SIOCSIWMLME, ipw2100_wx_set_mlme), IW_HANDLER(SIOCSIWSCAN, ipw2100_wx_set_scan), IW_HANDLER(SIOCGIWSCAN, ipw2100_wx_get_scan), IW_HANDLER(SIOCSIWESSID, ipw2100_wx_set_essid), IW_HANDLER(SIOCGIWESSID, ipw2100_wx_get_essid), IW_HANDLER(SIOCSIWNICKN, ipw2100_wx_set_nick), IW_HANDLER(SIOCGIWNICKN, ipw2100_wx_get_nick), IW_HANDLER(SIOCSIWRATE, ipw2100_wx_set_rate), IW_HANDLER(SIOCGIWRATE, ipw2100_wx_get_rate), IW_HANDLER(SIOCSIWRTS, ipw2100_wx_set_rts), IW_HANDLER(SIOCGIWRTS, ipw2100_wx_get_rts), IW_HANDLER(SIOCSIWFRAG, ipw2100_wx_set_frag), IW_HANDLER(SIOCGIWFRAG, ipw2100_wx_get_frag), IW_HANDLER(SIOCSIWTXPOW, ipw2100_wx_set_txpow), IW_HANDLER(SIOCGIWTXPOW, ipw2100_wx_get_txpow), IW_HANDLER(SIOCSIWRETRY, ipw2100_wx_set_retry), IW_HANDLER(SIOCGIWRETRY, ipw2100_wx_get_retry), IW_HANDLER(SIOCSIWENCODE, ipw2100_wx_set_encode), IW_HANDLER(SIOCGIWENCODE, ipw2100_wx_get_encode), IW_HANDLER(SIOCSIWPOWER, ipw2100_wx_set_power), IW_HANDLER(SIOCGIWPOWER, ipw2100_wx_get_power), IW_HANDLER(SIOCSIWGENIE, ipw2100_wx_set_genie), IW_HANDLER(SIOCGIWGENIE, ipw2100_wx_get_genie), IW_HANDLER(SIOCSIWAUTH, ipw2100_wx_set_auth), IW_HANDLER(SIOCGIWAUTH, ipw2100_wx_get_auth), IW_HANDLER(SIOCSIWENCODEEXT, ipw2100_wx_set_encodeext), IW_HANDLER(SIOCGIWENCODEEXT, ipw2100_wx_get_encodeext), }; #define IPW2100_PRIV_SET_MONITOR SIOCIWFIRSTPRIV #define IPW2100_PRIV_RESET SIOCIWFIRSTPRIV+1 #define IPW2100_PRIV_SET_POWER SIOCIWFIRSTPRIV+2 #define IPW2100_PRIV_GET_POWER SIOCIWFIRSTPRIV+3 #define IPW2100_PRIV_SET_LONGPREAMBLE SIOCIWFIRSTPRIV+4 #define IPW2100_PRIV_GET_LONGPREAMBLE SIOCIWFIRSTPRIV+5 #define IPW2100_PRIV_SET_CRC_CHECK SIOCIWFIRSTPRIV+6 #define IPW2100_PRIV_GET_CRC_CHECK SIOCIWFIRSTPRIV+7 static const struct iw_priv_args ipw2100_private_args[] = { #ifdef CONFIG_IPW2100_MONITOR { IPW2100_PRIV_SET_MONITOR, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 2, 0, "monitor"}, { IPW2100_PRIV_RESET, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 0, 0, "reset"}, #endif /* CONFIG_IPW2100_MONITOR */ { IPW2100_PRIV_SET_POWER, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_power"}, { IPW2100_PRIV_GET_POWER, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | MAX_POWER_STRING, "get_power"}, { IPW2100_PRIV_SET_LONGPREAMBLE, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_preamble"}, { IPW2100_PRIV_GET_LONGPREAMBLE, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_preamble"}, #ifdef CONFIG_IPW2100_MONITOR { IPW2100_PRIV_SET_CRC_CHECK, IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "set_crc_check"}, { IPW2100_PRIV_GET_CRC_CHECK, 0, IW_PRIV_TYPE_CHAR | IW_PRIV_SIZE_FIXED | IFNAMSIZ, "get_crc_check"}, #endif /* CONFIG_IPW2100_MONITOR */ }; static iw_handler ipw2100_private_handler[] = { #ifdef CONFIG_IPW2100_MONITOR ipw2100_wx_set_promisc, ipw2100_wx_reset, #else /* CONFIG_IPW2100_MONITOR */ NULL, NULL, #endif /* CONFIG_IPW2100_MONITOR */ ipw2100_wx_set_powermode, ipw2100_wx_get_powermode, ipw2100_wx_set_preamble, ipw2100_wx_get_preamble, #ifdef CONFIG_IPW2100_MONITOR ipw2100_wx_set_crc_check, ipw2100_wx_get_crc_check, #else /* CONFIG_IPW2100_MONITOR */ NULL, NULL, #endif /* CONFIG_IPW2100_MONITOR */ }; /* * Get wireless statistics. * Called by /proc/net/wireless * Also called by SIOCGIWSTATS */ static struct iw_statistics *ipw2100_wx_wireless_stats(struct net_device *dev) { enum { POOR = 30, FAIR = 60, GOOD = 80, VERY_GOOD = 90, EXCELLENT = 95, PERFECT = 100 }; int rssi_qual; int tx_qual; int beacon_qual; int quality; struct ipw2100_priv *priv = libipw_priv(dev); struct iw_statistics *wstats; u32 rssi, tx_retries, missed_beacons, tx_failures; u32 ord_len = sizeof(u32); if (!priv) return (struct iw_statistics *)NULL; wstats = &priv->wstats; /* if hw is disabled, then ipw2100_get_ordinal() can't be called. * ipw2100_wx_wireless_stats seems to be called before fw is * initialized. STATUS_ASSOCIATED will only be set if the hw is up * and associated; if not associcated, the values are all meaningless * anyway, so set them all to NULL and INVALID */ if (!(priv->status & STATUS_ASSOCIATED)) { wstats->miss.beacon = 0; wstats->discard.retries = 0; wstats->qual.qual = 0; wstats->qual.level = 0; wstats->qual.noise = 0; wstats->qual.updated = 7; wstats->qual.updated |= IW_QUAL_NOISE_INVALID | IW_QUAL_QUAL_INVALID | IW_QUAL_LEVEL_INVALID; return wstats; } if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_MISSED_BCNS, &missed_beacons, &ord_len)) goto fail_get_ordinal; /* If we don't have a connection the quality and level is 0 */ if (!(priv->status & STATUS_ASSOCIATED)) { wstats->qual.qual = 0; wstats->qual.level = 0; } else { if (ipw2100_get_ordinal(priv, IPW_ORD_RSSI_AVG_CURR, &rssi, &ord_len)) goto fail_get_ordinal; wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM; if (rssi < 10) rssi_qual = rssi * POOR / 10; else if (rssi < 15) rssi_qual = (rssi - 10) * (FAIR - POOR) / 5 + POOR; else if (rssi < 20) rssi_qual = (rssi - 15) * (GOOD - FAIR) / 5 + FAIR; else if (rssi < 30) rssi_qual = (rssi - 20) * (VERY_GOOD - GOOD) / 10 + GOOD; else rssi_qual = (rssi - 30) * (PERFECT - VERY_GOOD) / 10 + VERY_GOOD; if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_PERCENT_RETRIES, &tx_retries, &ord_len)) goto fail_get_ordinal; if (tx_retries > 75) tx_qual = (90 - tx_retries) * POOR / 15; else if (tx_retries > 70) tx_qual = (75 - tx_retries) * (FAIR - POOR) / 5 + POOR; else if (tx_retries > 65) tx_qual = (70 - tx_retries) * (GOOD - FAIR) / 5 + FAIR; else if (tx_retries > 50) tx_qual = (65 - tx_retries) * (VERY_GOOD - GOOD) / 15 + GOOD; else tx_qual = (50 - tx_retries) * (PERFECT - VERY_GOOD) / 50 + VERY_GOOD; if (missed_beacons > 50) beacon_qual = (60 - missed_beacons) * POOR / 10; else if (missed_beacons > 40) beacon_qual = (50 - missed_beacons) * (FAIR - POOR) / 10 + POOR; else if (missed_beacons > 32) beacon_qual = (40 - missed_beacons) * (GOOD - FAIR) / 18 + FAIR; else if (missed_beacons > 20) beacon_qual = (32 - missed_beacons) * (VERY_GOOD - GOOD) / 20 + GOOD; else beacon_qual = (20 - missed_beacons) * (PERFECT - VERY_GOOD) / 20 + VERY_GOOD; quality = min(tx_qual, rssi_qual); quality = min(beacon_qual, quality); #ifdef CONFIG_IPW2100_DEBUG if (beacon_qual == quality) IPW_DEBUG_WX("Quality clamped by Missed Beacons\n"); else if (tx_qual == quality) IPW_DEBUG_WX("Quality clamped by Tx Retries\n"); else if (quality != 100) IPW_DEBUG_WX("Quality clamped by Signal Strength\n"); else IPW_DEBUG_WX("Quality not clamped.\n"); #endif wstats->qual.qual = quality; wstats->qual.level = rssi + IPW2100_RSSI_TO_DBM; } wstats->qual.noise = 0; wstats->qual.updated = 7; wstats->qual.updated |= IW_QUAL_NOISE_INVALID; /* FIXME: this is percent and not a # */ wstats->miss.beacon = missed_beacons; if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_TX_FAILURES, &tx_failures, &ord_len)) goto fail_get_ordinal; wstats->discard.retries = tx_failures; return wstats; fail_get_ordinal: IPW_DEBUG_WX("failed querying ordinals.\n"); return (struct iw_statistics *)NULL; } static struct iw_handler_def ipw2100_wx_handler_def = { .standard = ipw2100_wx_handlers, .num_standard = ARRAY_SIZE(ipw2100_wx_handlers), .num_private = ARRAY_SIZE(ipw2100_private_handler), .num_private_args = ARRAY_SIZE(ipw2100_private_args), .private = (iw_handler *) ipw2100_private_handler, .private_args = (struct iw_priv_args *)ipw2100_private_args, .get_wireless_stats = ipw2100_wx_wireless_stats, }; static void ipw2100_wx_event_work(struct work_struct *work) { struct ipw2100_priv *priv = container_of(work, struct ipw2100_priv, wx_event_work.work); union iwreq_data wrqu; unsigned int len = ETH_ALEN; if (priv->status & STATUS_STOPPING) return; mutex_lock(&priv->action_mutex); IPW_DEBUG_WX("enter\n"); mutex_unlock(&priv->action_mutex); wrqu.ap_addr.sa_family = ARPHRD_ETHER; /* Fetch BSSID from the hardware */ if (!(priv->status & (STATUS_ASSOCIATING | STATUS_ASSOCIATED)) || priv->status & STATUS_RF_KILL_MASK || ipw2100_get_ordinal(priv, IPW_ORD_STAT_ASSN_AP_BSSID, &priv->bssid, &len)) { memset(wrqu.ap_addr.sa_data, 0, ETH_ALEN); } else { /* We now have the BSSID, so can finish setting to the full * associated state */ memcpy(wrqu.ap_addr.sa_data, priv->bssid, ETH_ALEN); memcpy(priv->ieee->bssid, priv->bssid, ETH_ALEN); priv->status &= ~STATUS_ASSOCIATING; priv->status |= STATUS_ASSOCIATED; netif_carrier_on(priv->net_dev); netif_wake_queue(priv->net_dev); } if (!(priv->status & STATUS_ASSOCIATED)) { IPW_DEBUG_WX("Configuring ESSID\n"); mutex_lock(&priv->action_mutex); /* This is a disassociation event, so kick the firmware to * look for another AP */ if (priv->config & CFG_STATIC_ESSID) ipw2100_set_essid(priv, priv->essid, priv->essid_len, 0); else ipw2100_set_essid(priv, NULL, 0, 0); mutex_unlock(&priv->action_mutex); } wireless_send_event(priv->net_dev, SIOCGIWAP, &wrqu, NULL); } #define IPW2100_FW_MAJOR_VERSION 1 #define IPW2100_FW_MINOR_VERSION 3 #define IPW2100_FW_MINOR(x) ((x & 0xff) >> 8) #define IPW2100_FW_MAJOR(x) (x & 0xff) #define IPW2100_FW_VERSION ((IPW2100_FW_MINOR_VERSION << 8) | \ IPW2100_FW_MAJOR_VERSION) #define IPW2100_FW_PREFIX "ipw2100-" __stringify(IPW2100_FW_MAJOR_VERSION) \ "." __stringify(IPW2100_FW_MINOR_VERSION) #define IPW2100_FW_NAME(x) IPW2100_FW_PREFIX "" x ".fw" /* BINARY FIRMWARE HEADER FORMAT offset length desc 0 2 version 2 2 mode == 0:BSS,1:IBSS,2:MONITOR 4 4 fw_len 8 4 uc_len C fw_len firmware data 12 + fw_len uc_len microcode data */ struct ipw2100_fw_header { short version; short mode; unsigned int fw_size; unsigned int uc_size; } __packed; static int ipw2100_mod_firmware_load(struct ipw2100_fw *fw) { struct ipw2100_fw_header *h = (struct ipw2100_fw_header *)fw->fw_entry->data; if (IPW2100_FW_MAJOR(h->version) != IPW2100_FW_MAJOR_VERSION) { printk(KERN_WARNING DRV_NAME ": Firmware image not compatible " "(detected version id of %u). " "See Documentation/networking/README.ipw2100\n", h->version); return 1; } fw->version = h->version; fw->fw.data = fw->fw_entry->data + sizeof(struct ipw2100_fw_header); fw->fw.size = h->fw_size; fw->uc.data = fw->fw.data + h->fw_size; fw->uc.size = h->uc_size; return 0; } static int ipw2100_get_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw) { char *fw_name; int rc; IPW_DEBUG_INFO("%s: Using hotplug firmware load.\n", priv->net_dev->name); switch (priv->ieee->iw_mode) { case IW_MODE_ADHOC: fw_name = IPW2100_FW_NAME("-i"); break; #ifdef CONFIG_IPW2100_MONITOR case IW_MODE_MONITOR: fw_name = IPW2100_FW_NAME("-p"); break; #endif case IW_MODE_INFRA: default: fw_name = IPW2100_FW_NAME(""); break; } rc = request_firmware(&fw->fw_entry, fw_name, &priv->pci_dev->dev); if (rc < 0) { printk(KERN_ERR DRV_NAME ": " "%s: Firmware '%s' not available or load failed.\n", priv->net_dev->name, fw_name); return rc; } IPW_DEBUG_INFO("firmware data %p size %zd\n", fw->fw_entry->data, fw->fw_entry->size); ipw2100_mod_firmware_load(fw); return 0; } MODULE_FIRMWARE(IPW2100_FW_NAME("-i")); #ifdef CONFIG_IPW2100_MONITOR MODULE_FIRMWARE(IPW2100_FW_NAME("-p")); #endif MODULE_FIRMWARE(IPW2100_FW_NAME("")); static void ipw2100_release_firmware(struct ipw2100_priv *priv, struct ipw2100_fw *fw) { fw->version = 0; release_firmware(fw->fw_entry); fw->fw_entry = NULL; } static int ipw2100_get_fwversion(struct ipw2100_priv *priv, char *buf, size_t max) { char ver[MAX_FW_VERSION_LEN]; u32 len = MAX_FW_VERSION_LEN; u32 tmp; int i; /* firmware version is an ascii string (max len of 14) */ if (ipw2100_get_ordinal(priv, IPW_ORD_STAT_FW_VER_NUM, ver, &len)) return -EIO; tmp = max; if (len >= max) len = max - 1; for (i = 0; i < len; i++) buf[i] = ver[i]; buf[i] = '\0'; return tmp; } static int ipw2100_get_ucodeversion(struct ipw2100_priv *priv, char *buf, size_t max) { u32 ver; u32 len = sizeof(ver); /* microcode version is a 32 bit integer */ if (ipw2100_get_ordinal(priv, IPW_ORD_UCODE_VERSION, &ver, &len)) return -EIO; return snprintf(buf, max, "%08X", ver); } /* * On exit, the firmware will have been freed from the fw list */ static int ipw2100_fw_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw) { /* firmware is constructed of N contiguous entries, each entry is * structured as: * * offset sie desc * 0 4 address to write to * 4 2 length of data run * 6 length data */ unsigned int addr; unsigned short len; const unsigned char *firmware_data = fw->fw.data; unsigned int firmware_data_left = fw->fw.size; while (firmware_data_left > 0) { addr = *(u32 *) (firmware_data); firmware_data += 4; firmware_data_left -= 4; len = *(u16 *) (firmware_data); firmware_data += 2; firmware_data_left -= 2; if (len > 32) { printk(KERN_ERR DRV_NAME ": " "Invalid firmware run-length of %d bytes\n", len); return -EINVAL; } write_nic_memory(priv->net_dev, addr, len, firmware_data); firmware_data += len; firmware_data_left -= len; } return 0; } struct symbol_alive_response { u8 cmd_id; u8 seq_num; u8 ucode_rev; u8 eeprom_valid; u16 valid_flags; u8 IEEE_addr[6]; u16 flags; u16 pcb_rev; u16 clock_settle_time; // 1us LSB u16 powerup_settle_time; // 1us LSB u16 hop_settle_time; // 1us LSB u8 date[3]; // month, day, year u8 time[2]; // hours, minutes u8 ucode_valid; }; static int ipw2100_ucode_download(struct ipw2100_priv *priv, struct ipw2100_fw *fw) { struct net_device *dev = priv->net_dev; const unsigned char *microcode_data = fw->uc.data; unsigned int microcode_data_left = fw->uc.size; void __iomem *reg = priv->ioaddr; struct symbol_alive_response response; int i, j; u8 data; /* Symbol control */ write_nic_word(dev, IPW2100_CONTROL_REG, 0x703); readl(reg); write_nic_word(dev, IPW2100_CONTROL_REG, 0x707); readl(reg); /* HW config */ write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */ readl(reg); write_nic_byte(dev, 0x210014, 0x72); /* fifo width =16 */ readl(reg); /* EN_CS_ACCESS bit to reset control store pointer */ write_nic_byte(dev, 0x210000, 0x40); readl(reg); write_nic_byte(dev, 0x210000, 0x0); readl(reg); write_nic_byte(dev, 0x210000, 0x40); readl(reg); /* copy microcode from buffer into Symbol */ while (microcode_data_left > 0) { write_nic_byte(dev, 0x210010, *microcode_data++); write_nic_byte(dev, 0x210010, *microcode_data++); microcode_data_left -= 2; } /* EN_CS_ACCESS bit to reset the control store pointer */ write_nic_byte(dev, 0x210000, 0x0); readl(reg); /* Enable System (Reg 0) * first enable causes garbage in RX FIFO */ write_nic_byte(dev, 0x210000, 0x0); readl(reg); write_nic_byte(dev, 0x210000, 0x80); readl(reg); /* Reset External Baseband Reg */ write_nic_word(dev, IPW2100_CONTROL_REG, 0x703); readl(reg); write_nic_word(dev, IPW2100_CONTROL_REG, 0x707); readl(reg); /* HW Config (Reg 5) */ write_nic_byte(dev, 0x210014, 0x72); // fifo width =16 readl(reg); write_nic_byte(dev, 0x210014, 0x72); // fifo width =16 readl(reg); /* Enable System (Reg 0) * second enable should be OK */ write_nic_byte(dev, 0x210000, 0x00); // clear enable system readl(reg); write_nic_byte(dev, 0x210000, 0x80); // set enable system /* check Symbol is enabled - upped this from 5 as it wasn't always * catching the update */ for (i = 0; i < 10; i++) { udelay(10); /* check Dino is enabled bit */ read_nic_byte(dev, 0x210000, &data); if (data & 0x1) break; } if (i == 10) { printk(KERN_ERR DRV_NAME ": %s: Error initializing Symbol\n", dev->name); return -EIO; } /* Get Symbol alive response */ for (i = 0; i < 30; i++) { /* Read alive response structure */ for (j = 0; j < (sizeof(struct symbol_alive_response) >> 1); j++) read_nic_word(dev, 0x210004, ((u16 *) & response) + j); if ((response.cmd_id == 1) && (response.ucode_valid == 0x1)) break; udelay(10); } if (i == 30) { printk(KERN_ERR DRV_NAME ": %s: No response from Symbol - hw not alive\n", dev->name); printk_buf(IPW_DL_ERROR, (u8 *) & response, sizeof(response)); return -EIO; } return 0; }
gpl-2.0
libertyjin/arm-linux-kernel
drivers/acpi/acpica/hwacpi.c
2080
5654
/****************************************************************************** * * Module Name: hwacpi - ACPI Hardware Initialization/Mode Interface * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #define _COMPONENT ACPI_HARDWARE ACPI_MODULE_NAME("hwacpi") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /****************************************************************************** * * FUNCTION: acpi_hw_set_mode * * PARAMETERS: mode - SYS_MODE_ACPI or SYS_MODE_LEGACY * * RETURN: Status * * DESCRIPTION: Transitions the system into the requested mode. * ******************************************************************************/ acpi_status acpi_hw_set_mode(u32 mode) { acpi_status status; ACPI_FUNCTION_TRACE(hw_set_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_ACPI_STATUS(AE_OK); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { ACPI_ERROR((AE_INFO, "No SMI_CMD in FADT, mode transition failed")); return_ACPI_STATUS(AE_NO_HARDWARE_RESPONSE); } /* * ACPI 2.0 clarified the meaning of ACPI_ENABLE and ACPI_DISABLE * in FADT: If it is zero, enabling or disabling is not supported. * As old systems may have used zero for mode transition, * we make sure both the numbers are zero to determine these * transitions are not supported. */ if (!acpi_gbl_FADT.acpi_enable && !acpi_gbl_FADT.acpi_disable) { ACPI_ERROR((AE_INFO, "No ACPI mode transition supported in this system " "(enable/disable both zero)")); return_ACPI_STATUS(AE_OK); } switch (mode) { case ACPI_SYS_MODE_ACPI: /* BIOS should have disabled ALL fixed and GP events */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32) acpi_gbl_FADT.acpi_enable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable ACPI mode\n")); break; case ACPI_SYS_MODE_LEGACY: /* * BIOS should clear all fixed status bits and restore fixed event * enable bits to default */ status = acpi_hw_write_port(acpi_gbl_FADT.smi_command, (u32)acpi_gbl_FADT.acpi_disable, 8); ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Attempting to enable Legacy (non-ACPI) mode\n")); break; default: return_ACPI_STATUS(AE_BAD_PARAMETER); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not write ACPI mode change")); return_ACPI_STATUS(status); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_hw_get_mode * * PARAMETERS: none * * RETURN: SYS_MODE_ACPI or SYS_MODE_LEGACY * * DESCRIPTION: Return current operating state of system. Determined by * querying the SCI_EN bit. * ******************************************************************************/ u32 acpi_hw_get_mode(void) { acpi_status status; u32 value; ACPI_FUNCTION_TRACE(hw_get_mode); /* If the Hardware Reduced flag is set, machine is always in acpi mode */ if (acpi_gbl_reduced_hardware) { return_UINT32(ACPI_SYS_MODE_ACPI); } /* * ACPI 2.0 clarified that if SMI_CMD in FADT is zero, * system does not support mode transition. */ if (!acpi_gbl_FADT.smi_command) { return_UINT32(ACPI_SYS_MODE_ACPI); } status = acpi_read_bit_register(ACPI_BITREG_SCI_ENABLE, &value); if (ACPI_FAILURE(status)) { return_UINT32(ACPI_SYS_MODE_LEGACY); } if (value) { return_UINT32(ACPI_SYS_MODE_ACPI); } else { return_UINT32(ACPI_SYS_MODE_LEGACY); } } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
animania260/android_ani-kernel_galaxy_reverb
arch/m68k/platform/68360/ints.c
2336
4371
/* * linux/arch/$(ARCH)/platform/$(PLATFORM)/ints.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * * Copyright (c) 2000 Michael Leslie <mleslie@lineo.com> * Copyright (c) 1996 Roman Zippel * Copyright (c) 1999 D. Jeff Dionne <jeff@uclinux.org> */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/m68360.h> /* from quicc/commproc.c: */ extern QUICC *pquicc; extern void cpm_interrupt_init(void); #define INTERNAL_IRQS (96) /* assembler routines */ asmlinkage void system_call(void); asmlinkage void buserr(void); asmlinkage void trap(void); asmlinkage void bad_interrupt(void); asmlinkage void inthandler(void); extern void *_ramvec[]; /* The number of spurious interrupts */ volatile unsigned int num_spurious; static void intc_irq_unmask(struct irq_data *d) { pquicc->intr_cimr |= (1 << d->irq); } static void intc_irq_mask(struct irq_data *d) { pquicc->intr_cimr &= ~(1 << d->irq); } static void intc_irq_ack(struct irq_data *d) { pquicc->intr_cisr = (1 << d->irq); } static struct irq_chip intc_irq_chip = { .name = "M68K-INTC", .irq_mask = intc_irq_mask, .irq_unmask = intc_irq_unmask, .irq_ack = intc_irq_ack, }; /* * This function should be called during kernel startup to initialize * the vector table. */ void init_IRQ(void) { int i; int vba = (CPM_VECTOR_BASE<<4); /* set up the vectors */ _ramvec[2] = buserr; _ramvec[3] = trap; _ramvec[4] = trap; _ramvec[5] = trap; _ramvec[6] = trap; _ramvec[7] = trap; _ramvec[8] = trap; _ramvec[9] = trap; _ramvec[10] = trap; _ramvec[11] = trap; _ramvec[12] = trap; _ramvec[13] = trap; _ramvec[14] = trap; _ramvec[15] = trap; _ramvec[32] = system_call; _ramvec[33] = trap; cpm_interrupt_init(); /* set up CICR for vector base address and irq level */ /* irl = 4, hp = 1f - see MC68360UM p 7-377 */ pquicc->intr_cicr = 0x00e49f00 | vba; /* CPM interrupt vectors: (p 7-376) */ _ramvec[vba+CPMVEC_ERROR] = bad_interrupt; /* Error */ _ramvec[vba+CPMVEC_PIO_PC11] = inthandler; /* pio - pc11 */ _ramvec[vba+CPMVEC_PIO_PC10] = inthandler; /* pio - pc10 */ _ramvec[vba+CPMVEC_SMC2] = inthandler; /* smc2/pip */ _ramvec[vba+CPMVEC_SMC1] = inthandler; /* smc1 */ _ramvec[vba+CPMVEC_SPI] = inthandler; /* spi */ _ramvec[vba+CPMVEC_PIO_PC9] = inthandler; /* pio - pc9 */ _ramvec[vba+CPMVEC_TIMER4] = inthandler; /* timer 4 */ _ramvec[vba+CPMVEC_RESERVED1] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_PIO_PC8] = inthandler; /* pio - pc8 */ _ramvec[vba+CPMVEC_PIO_PC7] = inthandler; /* pio - pc7 */ _ramvec[vba+CPMVEC_PIO_PC6] = inthandler; /* pio - pc6 */ _ramvec[vba+CPMVEC_TIMER3] = inthandler; /* timer 3 */ _ramvec[vba+CPMVEC_PIO_PC5] = inthandler; /* pio - pc5 */ _ramvec[vba+CPMVEC_PIO_PC4] = inthandler; /* pio - pc4 */ _ramvec[vba+CPMVEC_RESERVED2] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_RISCTIMER] = inthandler; /* timer table */ _ramvec[vba+CPMVEC_TIMER2] = inthandler; /* timer 2 */ _ramvec[vba+CPMVEC_RESERVED3] = inthandler; /* reserved */ _ramvec[vba+CPMVEC_IDMA2] = inthandler; /* idma 2 */ _ramvec[vba+CPMVEC_IDMA1] = inthandler; /* idma 1 */ _ramvec[vba+CPMVEC_SDMA_CB_ERR] = inthandler; /* sdma channel bus error */ _ramvec[vba+CPMVEC_PIO_PC3] = inthandler; /* pio - pc3 */ _ramvec[vba+CPMVEC_PIO_PC2] = inthandler; /* pio - pc2 */ /* _ramvec[vba+CPMVEC_TIMER1] = cpm_isr_timer1; */ /* timer 1 */ _ramvec[vba+CPMVEC_TIMER1] = inthandler; /* timer 1 */ _ramvec[vba+CPMVEC_PIO_PC1] = inthandler; /* pio - pc1 */ _ramvec[vba+CPMVEC_SCC4] = inthandler; /* scc 4 */ _ramvec[vba+CPMVEC_SCC3] = inthandler; /* scc 3 */ _ramvec[vba+CPMVEC_SCC2] = inthandler; /* scc 2 */ _ramvec[vba+CPMVEC_SCC1] = inthandler; /* scc 1 */ _ramvec[vba+CPMVEC_PIO_PC0] = inthandler; /* pio - pc0 */ /* turn off all CPM interrupts */ pquicc->intr_cimr = 0x00000000; for (i = 0; (i < NR_IRQS); i++) { irq_set_chip(i, &intc_irq_chip); irq_set_handler(i, handle_level_irq); } }
gpl-2.0
cybermx/linux-2.6-imx
arch/blackfin/mach-bf537/boards/tcm_bf537.c
2336
18434
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/etherdevice.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <linux/ata_platform.h> #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> #include <linux/spi/mmc_spi.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix TCM BF537"; #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif #if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE) /* SPI ADC chip */ static struct bfin5xx_spi_chip spi_adc_chip_info = { .enable_dma = 1, /* use dma transfer with this chip*/ .bits_per_word = 16, }; #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE) { .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. */ .platform_data = NULL, /* No spi_driver specific config */ .controller_data = &spi_adc_chip_info, }, #endif #if defined(CONFIG_SND_BF5XX_SOC_AD183X) || defined(CONFIG_SND_BF5XX_SOC_AD183X_MODULE) { .modalias = "ad183x", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 4, .controller_data = &ad1836_spi_chip_info, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) static struct platform_device hitachi_fb_device = { .name = "hitachi-tx09", }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .start = 0x20200300, .end = 0x20200300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF14, .end = IRQ_PF14, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20308000, .end = 0x20308000, .flags = IORESOURCE_MEM, }, { .start = 0x20308004, .end = 0x20308004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG15, .end = IRQ_PG15, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PG13, .end = IRQ_PG13, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) static struct mtd_partition cm_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux kernel(nor)", .size = 0x100000, .offset = MTDPART_OFS_APPEND, }, { .name = "file system(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct physmap_flash_data cm_flash_data = { .width = 2, .parts = cm_partitions, .nr_parts = ARRAY_SIZE(cm_partitions), }; static unsigned cm_flash_gpios[] = { GPIO_PF4, GPIO_PF5 }; static struct resource cm_flash_resource[] = { { .name = "cfi_probe", .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }, { .start = (unsigned long)cm_flash_gpios, .end = ARRAY_SIZE(cm_flash_gpios), .flags = IORESOURCE_IRQ, } }; static struct platform_device cm_flash_device = { .name = "gpio-addr-flash", .id = 0, .dev = { .platform_data = &cm_flash_data, }, .num_resources = ARRAY_SIZE(cm_flash_resource), .resource = cm_flash_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 static struct resource bfin_uart0_resources[] = { { .start = UART0_THR, .end = UART0_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART0_ERROR, .end = IRQ_UART0_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_TX, .end = CH_UART0_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART0_RX, .end = CH_UART0_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart0_peripherals[] = { P_UART0_TX, P_UART0_RX, 0 }; static struct platform_device bfin_uart0_device = { .name = "bfin-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_uart0_resources), .resource = bfin_uart0_resources, .dev = { .platform_data = &bfin_uart0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_UART1 static struct resource bfin_uart1_resources[] = { { .start = UART1_THR, .end = UART1_GCTL+2, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_UART1_ERROR, .end = IRQ_UART1_ERROR, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_TX, .end = CH_UART1_TX, .flags = IORESOURCE_DMA, }, { .start = CH_UART1_RX, .end = CH_UART1_RX, .flags = IORESOURCE_DMA, }, }; static unsigned short bfin_uart1_peripherals[] = { P_UART1_TX, P_UART1_RX, 0 }; static struct platform_device bfin_uart1_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart1_resources), .resource = bfin_uart1_resources, .dev = { .platform_data = &bfin_uart1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #ifdef CONFIG_BFIN_SIR1 static struct resource bfin_sir1_resources[] = { { .start = 0xFFC02000, .end = 0xFFC020FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART1_RX, .end = IRQ_UART1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART1_RX, .end = CH_UART1_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir1_device = { .name = "bfin_sir", .id = 1, .num_resources = ARRAY_SIZE(bfin_sir1_resources), .resource = bfin_sir1_resources, }; #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) static struct resource bfin_twi0_resource[] = { [0] = { .start = TWI0_REGBASE, .end = TWI0_REGBASE, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_TWI, .end = IRQ_TWI, .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c_bfin_twi_device = { .name = "i2c-bfin-twi", .id = 0, .num_resources = ARRAY_SIZE(bfin_twi0_resource), .resource = bfin_twi0_resource, }; #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART static struct resource bfin_sport0_uart_resources[] = { { .start = SPORT0_TCR1, .end = SPORT0_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT0_RX, .end = IRQ_SPORT0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT0_ERROR, .end = IRQ_SPORT0_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport0_peripherals[] = { P_SPORT0_TFS, P_SPORT0_DTPRI, P_SPORT0_TSCLK, P_SPORT0_RFS, P_SPORT0_DRPRI, P_SPORT0_RSCLK, 0 }; static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, .num_resources = ARRAY_SIZE(bfin_sport0_uart_resources), .resource = bfin_sport0_uart_resources, .dev = { .platform_data = &bfin_sport0_peripherals, /* Passed to driver */ }, }; #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART static struct resource bfin_sport1_uart_resources[] = { { .start = SPORT1_TCR1, .end = SPORT1_MRCS3+4, .flags = IORESOURCE_MEM, }, { .start = IRQ_SPORT1_RX, .end = IRQ_SPORT1_RX+1, .flags = IORESOURCE_IRQ, }, { .start = IRQ_SPORT1_ERROR, .end = IRQ_SPORT1_ERROR, .flags = IORESOURCE_IRQ, }, }; static unsigned short bfin_sport1_peripherals[] = { P_SPORT1_TFS, P_SPORT1_DTPRI, P_SPORT1_TSCLK, P_SPORT1_RFS, P_SPORT1_DRPRI, P_SPORT1_RSCLK, 0 }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_sport1_uart_resources), .resource = bfin_sport1_uart_resources, .dev = { .platform_data = &bfin_sport1_peripherals, /* Passed to driver */ }, }; #endif #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) #include <linux/bfin_mac.h> static const unsigned short bfin_mac_peripherals[] = P_MII0; static struct bfin_phydev_platform_data bfin_phydev_data[] = { { .addr = 1, .irq = IRQ_MAC_PHYINT, }, }; static struct bfin_mii_bus_platform_data bfin_mii_bus_data = { .phydev_number = 1, .phydev_data = bfin_phydev_data, .phy_mode = PHY_INTERFACE_MODE_MII, .mac_peripherals = bfin_mac_peripherals, }; static struct platform_device bfin_mii_bus = { .name = "bfin_mii_bus", .dev = { .platform_data = &bfin_mii_bus_data, } }; static struct platform_device bfin_mac_device = { .name = "bfin_mac", .dev = { .platform_data = &bfin_mii_bus, } }; #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) #define PATA_INT IRQ_PF14 static struct pata_platform_info bfin_pata_platform_data = { .ioport_shift = 2, .irq_type = IRQF_TRIGGER_HIGH | IRQF_DISABLED, }; static struct resource bfin_pata_resources[] = { { .start = 0x2030C000, .end = 0x2030C01F, .flags = IORESOURCE_MEM, }, { .start = 0x2030D018, .end = 0x2030D01B, .flags = IORESOURCE_MEM, }, { .start = PATA_INT, .end = PATA_INT, .flags = IORESOURCE_IRQ, }, }; static struct platform_device bfin_pata_device = { .name = "pata_platform", .id = -1, .num_resources = ARRAY_SIZE(bfin_pata_resources), .resource = bfin_pata_resources, .dev = { .platform_data = &bfin_pata_platform_data, } }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 376000000), VRPAIR(VLEV_095, 426000000), VRPAIR(VLEV_100, 426000000), VRPAIR(VLEV_105, 476000000), VRPAIR(VLEV_110, 476000000), VRPAIR(VLEV_115, 476000000), VRPAIR(VLEV_120, 500000000), VRPAIR(VLEV_125, 533000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cm_bf537_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_FB_HITACHI_TX09) || defined(CONFIG_FB_HITACHI_TX09_MODULE) &hitachi_fb_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #ifdef CONFIG_BFIN_SIR1 &bfin_sir1_device, #endif #endif #if defined(CONFIG_I2C_BLACKFIN_TWI) || defined(CONFIG_I2C_BLACKFIN_TWI_MODULE) &i2c_bfin_twi_device, #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_BFIN_MAC) || defined(CONFIG_BFIN_MAC_MODULE) &bfin_mii_bus, &bfin_mac_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) &bfin_pata_device, #endif #if defined(CONFIG_MTD_GPIO_ADDR) || defined(CONFIG_MTD_GPIO_ADDR_MODULE) &cm_flash_device, #endif }; static int __init tcm_bf537_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf537_devices, ARRAY_SIZE(cm_bf537_devices)); #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif #if defined(CONFIG_PATA_PLATFORM) || defined(CONFIG_PATA_PLATFORM_MODULE) irq_set_status_flags(PATA_INT, IRQ_NOAUTOEN); #endif return 0; } arch_initcall(tcm_bf537_init); static struct platform_device *cm_bf537_early_devices[] __initdata = { #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) #ifdef CONFIG_SERIAL_BFIN_UART0 &bfin_uart0_device, #endif #ifdef CONFIG_SERIAL_BFIN_UART1 &bfin_uart1_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT_CONSOLE) #ifdef CONFIG_SERIAL_BFIN_SPORT0_UART &bfin_sport0_uart_device, #endif #ifdef CONFIG_SERIAL_BFIN_SPORT1_UART &bfin_sport1_uart_device, #endif #endif }; void __init native_machine_early_platform_add_devices(void) { printk(KERN_INFO "register early platform devices\n"); early_platform_add_devices(cm_bf537_early_devices, ARRAY_SIZE(cm_bf537_early_devices)); } void bfin_get_ether_addr(char *addr) { random_ether_addr(addr); printk(KERN_WARNING "%s:%s: Setting Ethernet MAC to a random one\n", __FILE__, __func__); } EXPORT_SYMBOL(bfin_get_ether_addr);
gpl-2.0
kirananto/RAZOR
drivers/net/ethernet/stmicro/stmmac/chain_mode.c
2336
4937
/******************************************************************************* Specialised functions for managing Chained mode Copyright(C) 2011 STMicroelectronics Ltd It defines all the functions used to handle the normal/enhanced descriptors in case of the DMA is configured to work in chained or in ring mode. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Author: Giuseppe Cavallaro <peppe.cavallaro@st.com> *******************************************************************************/ #include "stmmac.h" static unsigned int stmmac_jumbo_frm(void *p, struct sk_buff *skb, int csum) { struct stmmac_priv *priv = (struct stmmac_priv *)p; unsigned int txsize = priv->dma_tx_size; unsigned int entry = priv->cur_tx % txsize; struct dma_desc *desc = priv->dma_tx + entry; unsigned int nopaged_len = skb_headlen(skb); unsigned int bmax; unsigned int i = 1, len; if (priv->plat->enh_desc) bmax = BUF_SIZE_8KiB; else bmax = BUF_SIZE_2KiB; len = nopaged_len - bmax; desc->des2 = dma_map_single(priv->device, skb->data, bmax, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 1, bmax, csum, STMMAC_CHAIN_MODE); while (len != 0) { entry = (++priv->cur_tx) % txsize; desc = priv->dma_tx + entry; if (len > bmax) { desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), bmax, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, bmax, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; len -= bmax; i++; } else { desc->des2 = dma_map_single(priv->device, (skb->data + bmax * i), len, DMA_TO_DEVICE); priv->tx_skbuff_dma[entry] = desc->des2; priv->hw->desc->prepare_tx_desc(desc, 0, len, csum, STMMAC_CHAIN_MODE); priv->hw->desc->set_tx_owner(desc); priv->tx_skbuff[entry] = NULL; len = 0; } } return entry; } static unsigned int stmmac_is_jumbo_frm(int len, int enh_desc) { unsigned int ret = 0; if ((enh_desc && (len > BUF_SIZE_8KiB)) || (!enh_desc && (len > BUF_SIZE_2KiB))) { ret = 1; } return ret; } static void stmmac_init_dma_chain(void *des, dma_addr_t phy_addr, unsigned int size, unsigned int extend_desc) { /* * In chained mode the des3 points to the next element in the ring. * The latest element has to point to the head. */ int i; dma_addr_t dma_phy = phy_addr; if (extend_desc) { struct dma_extended_desc *p = (struct dma_extended_desc *)des; for (i = 0; i < (size - 1); i++) { dma_phy += sizeof(struct dma_extended_desc); p->basic.des3 = (unsigned int)dma_phy; p++; } p->basic.des3 = (unsigned int)phy_addr; } else { struct dma_desc *p = (struct dma_desc *)des; for (i = 0; i < (size - 1); i++) { dma_phy += sizeof(struct dma_desc); p->des3 = (unsigned int)dma_phy; p++; } p->des3 = (unsigned int)phy_addr; } } static void stmmac_refill_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; if (priv->hwts_rx_en && !priv->extend_desc) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ p->des3 = (unsigned int)(priv->dma_rx_phy + (((priv->dirty_rx) + 1) % priv->dma_rx_size) * sizeof(struct dma_desc)); } static void stmmac_clean_desc3(void *priv_ptr, struct dma_desc *p) { struct stmmac_priv *priv = (struct stmmac_priv *)priv_ptr; if (priv->hw->desc->get_tx_ls(p) && !priv->extend_desc) /* NOTE: Device will overwrite des3 with timestamp value if * 1588-2002 time stamping is enabled, hence reinitialize it * to keep explicit chaining in the descriptor. */ p->des3 = (unsigned int)(priv->dma_tx_phy + (((priv->dirty_tx + 1) % priv->dma_tx_size) * sizeof(struct dma_desc))); } const struct stmmac_chain_mode_ops chain_mode_ops = { .init = stmmac_init_dma_chain, .is_jumbo_frm = stmmac_is_jumbo_frm, .jumbo_frm = stmmac_jumbo_frm, .refill_desc3 = stmmac_refill_desc3, .clean_desc3 = stmmac_clean_desc3, };
gpl-2.0
arter97/odroid
net/bridge/br_stp_timer.c
2336
4610
/* * Spanning tree protocol; timer-related code * Linux ethernet bridge * * Authors: * Lennert Buytenhek <buytenh@gnu.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/kernel.h> #include <linux/times.h> #include "br_private.h" #include "br_private_stp.h" /* called under bridge lock */ static int br_is_designated_for_some_port(const struct net_bridge *br) { struct net_bridge_port *p; list_for_each_entry(p, &br->port_list, list) { if (p->state != BR_STATE_DISABLED && !memcmp(&p->designated_bridge, &br->bridge_id, 8)) return 1; } return 0; } static void br_hello_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *)arg; br_debug(br, "hello timer expired\n"); spin_lock(&br->lock); if (br->dev->flags & IFF_UP) { br_config_bpdu_generation(br); mod_timer(&br->hello_timer, round_jiffies(jiffies + br->hello_time)); } spin_unlock(&br->lock); } static void br_message_age_timer_expired(unsigned long arg) { struct net_bridge_port *p = (struct net_bridge_port *) arg; struct net_bridge *br = p->br; const bridge_id *id = &p->designated_bridge; int was_root; if (p->state == BR_STATE_DISABLED) return; br_info(br, "port %u(%s) neighbor %.2x%.2x.%pM lost\n", (unsigned int) p->port_no, p->dev->name, id->prio[0], id->prio[1], &id->addr); /* * According to the spec, the message age timer cannot be * running when we are the root bridge. So.. this was_root * check is redundant. I'm leaving it in for now, though. */ spin_lock(&br->lock); if (p->state == BR_STATE_DISABLED) goto unlock; was_root = br_is_root_bridge(br); br_become_designated_port(p); br_configuration_update(br); br_port_state_selection(br); if (br_is_root_bridge(br) && !was_root) br_become_root_bridge(br); unlock: spin_unlock(&br->lock); } static void br_forward_delay_timer_expired(unsigned long arg) { struct net_bridge_port *p = (struct net_bridge_port *) arg; struct net_bridge *br = p->br; br_debug(br, "port %u(%s) forward delay timer\n", (unsigned int) p->port_no, p->dev->name); spin_lock(&br->lock); if (p->state == BR_STATE_LISTENING) { p->state = BR_STATE_LEARNING; mod_timer(&p->forward_delay_timer, jiffies + br->forward_delay); } else if (p->state == BR_STATE_LEARNING) { p->state = BR_STATE_FORWARDING; if (br_is_designated_for_some_port(br)) br_topology_change_detection(br); netif_carrier_on(br->dev); } br_log_state(p); br_ifinfo_notify(RTM_NEWLINK, p); spin_unlock(&br->lock); } static void br_tcn_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *) arg; br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) { br_transmit_tcn(br); mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); } spin_unlock(&br->lock); } static void br_topology_change_timer_expired(unsigned long arg) { struct net_bridge *br = (struct net_bridge *) arg; br_debug(br, "topo change timer expired\n"); spin_lock(&br->lock); br->topology_change_detected = 0; br->topology_change = 0; spin_unlock(&br->lock); } static void br_hold_timer_expired(unsigned long arg) { struct net_bridge_port *p = (struct net_bridge_port *) arg; br_debug(p->br, "port %u(%s) hold timer expired\n", (unsigned int) p->port_no, p->dev->name); spin_lock(&p->br->lock); if (p->config_pending) br_transmit_config(p); spin_unlock(&p->br->lock); } void br_stp_timer_init(struct net_bridge *br) { setup_timer(&br->hello_timer, br_hello_timer_expired, (unsigned long) br); setup_timer(&br->tcn_timer, br_tcn_timer_expired, (unsigned long) br); setup_timer(&br->topology_change_timer, br_topology_change_timer_expired, (unsigned long) br); setup_timer(&br->gc_timer, br_fdb_cleanup, (unsigned long) br); } void br_stp_port_timer_init(struct net_bridge_port *p) { setup_timer(&p->message_age_timer, br_message_age_timer_expired, (unsigned long) p); setup_timer(&p->forward_delay_timer, br_forward_delay_timer_expired, (unsigned long) p); setup_timer(&p->hold_timer, br_hold_timer_expired, (unsigned long) p); } /* Report ticks left (in USER_HZ) used for API */ unsigned long br_timer_value(const struct timer_list *timer) { return timer_pending(timer) ? jiffies_delta_to_clock_t(timer->expires - jiffies) : 0; }
gpl-2.0
JoinTheRealms/TF700-dualboot-stockbased
drivers/media/dvb/frontends/stv0297.c
3104
17757
/* Driver for STV0297 demodulator Copyright (C) 2004 Andrew de Quincey <adq_dvb@lidskialf.net> Copyright (C) 2003-2004 Dennis Noermann <dennis.noermann@noernet.de> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/jiffies.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "stv0297.h" struct stv0297_state { struct i2c_adapter *i2c; const struct stv0297_config *config; struct dvb_frontend frontend; unsigned long last_ber; unsigned long base_freq; }; #if 1 #define dprintk(x...) printk(x) #else #define dprintk(x...) #endif #define STV0297_CLOCK_KHZ 28900 static int stv0297_writereg(struct stv0297_state *state, u8 reg, u8 data) { int ret; u8 buf[] = { reg, data }; struct i2c_msg msg = {.addr = state->config->demod_address,.flags = 0,.buf = buf,.len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: writereg error (reg == 0x%02x, val == 0x%02x, " "ret == %i)\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } static int stv0297_readreg(struct stv0297_state *state, u8 reg) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { {.addr = state->config->demod_address,.flags = 0,.buf = b0,.len = 1}, {.addr = state->config->demod_address,.flags = I2C_M_RD,.buf = b1,.len = 1} }; // this device needs a STOP between the register and data if (state->config->stop_during_read) { if ((ret = i2c_transfer(state->i2c, &msg[0], 1)) != 1) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return -1; } if ((ret = i2c_transfer(state->i2c, &msg[1], 1)) != 1) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return -1; } } else { if ((ret = i2c_transfer(state->i2c, msg, 2)) != 2) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg, ret); return -1; } } return b1[0]; } static int stv0297_writereg_mask(struct stv0297_state *state, u8 reg, u8 mask, u8 data) { int val; val = stv0297_readreg(state, reg); val &= ~mask; val |= (data & mask); stv0297_writereg(state, reg, val); return 0; } static int stv0297_readregs(struct stv0297_state *state, u8 reg1, u8 * b, u8 len) { int ret; struct i2c_msg msg[] = { {.addr = state->config->demod_address,.flags = 0,.buf = &reg1,.len = 1}, {.addr = state->config->demod_address,.flags = I2C_M_RD,.buf = b,.len = len} }; // this device needs a STOP between the register and data if (state->config->stop_during_read) { if ((ret = i2c_transfer(state->i2c, &msg[0], 1)) != 1) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg1, ret); return -1; } if ((ret = i2c_transfer(state->i2c, &msg[1], 1)) != 1) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg1, ret); return -1; } } else { if ((ret = i2c_transfer(state->i2c, msg, 2)) != 2) { dprintk("%s: readreg error (reg == 0x%02x, ret == %i)\n", __func__, reg1, ret); return -1; } } return 0; } static u32 stv0297_get_symbolrate(struct stv0297_state *state) { u64 tmp; tmp = stv0297_readreg(state, 0x55); tmp |= stv0297_readreg(state, 0x56) << 8; tmp |= stv0297_readreg(state, 0x57) << 16; tmp |= stv0297_readreg(state, 0x58) << 24; tmp *= STV0297_CLOCK_KHZ; tmp >>= 32; return (u32) tmp; } static void stv0297_set_symbolrate(struct stv0297_state *state, u32 srate) { long tmp; tmp = 131072L * srate; /* 131072 = 2^17 */ tmp = tmp / (STV0297_CLOCK_KHZ / 4); /* 1/4 = 2^-2 */ tmp = tmp * 8192L; /* 8192 = 2^13 */ stv0297_writereg(state, 0x55, (unsigned char) (tmp & 0xFF)); stv0297_writereg(state, 0x56, (unsigned char) (tmp >> 8)); stv0297_writereg(state, 0x57, (unsigned char) (tmp >> 16)); stv0297_writereg(state, 0x58, (unsigned char) (tmp >> 24)); } static void stv0297_set_sweeprate(struct stv0297_state *state, short fshift, long symrate) { long tmp; tmp = (long) fshift *262144L; /* 262144 = 2*18 */ tmp /= symrate; tmp *= 1024; /* 1024 = 2*10 */ // adjust if (tmp >= 0) { tmp += 500000; } else { tmp -= 500000; } tmp /= 1000000; stv0297_writereg(state, 0x60, tmp & 0xFF); stv0297_writereg_mask(state, 0x69, 0xF0, (tmp >> 4) & 0xf0); } static void stv0297_set_carrieroffset(struct stv0297_state *state, long offset) { long tmp; /* symrate is hardcoded to 10000 */ tmp = offset * 26844L; /* (2**28)/10000 */ if (tmp < 0) tmp += 0x10000000; tmp &= 0x0FFFFFFF; stv0297_writereg(state, 0x66, (unsigned char) (tmp & 0xFF)); stv0297_writereg(state, 0x67, (unsigned char) (tmp >> 8)); stv0297_writereg(state, 0x68, (unsigned char) (tmp >> 16)); stv0297_writereg_mask(state, 0x69, 0x0F, (tmp >> 24) & 0x0f); } /* static long stv0297_get_carrieroffset(struct stv0297_state *state) { s64 tmp; stv0297_writereg(state, 0x6B, 0x00); tmp = stv0297_readreg(state, 0x66); tmp |= (stv0297_readreg(state, 0x67) << 8); tmp |= (stv0297_readreg(state, 0x68) << 16); tmp |= (stv0297_readreg(state, 0x69) & 0x0F) << 24; tmp *= stv0297_get_symbolrate(state); tmp >>= 28; return (s32) tmp; } */ static void stv0297_set_initialdemodfreq(struct stv0297_state *state, long freq) { s32 tmp; if (freq > 10000) freq -= STV0297_CLOCK_KHZ; tmp = (STV0297_CLOCK_KHZ * 1000) / (1 << 16); tmp = (freq * 1000) / tmp; if (tmp > 0xffff) tmp = 0xffff; stv0297_writereg_mask(state, 0x25, 0x80, 0x80); stv0297_writereg(state, 0x21, tmp >> 8); stv0297_writereg(state, 0x20, tmp); } static int stv0297_set_qam(struct stv0297_state *state, fe_modulation_t modulation) { int val = 0; switch (modulation) { case QAM_16: val = 0; break; case QAM_32: val = 1; break; case QAM_64: val = 4; break; case QAM_128: val = 2; break; case QAM_256: val = 3; break; default: return -EINVAL; } stv0297_writereg_mask(state, 0x00, 0x70, val << 4); return 0; } static int stv0297_set_inversion(struct stv0297_state *state, fe_spectral_inversion_t inversion) { int val = 0; switch (inversion) { case INVERSION_OFF: val = 0; break; case INVERSION_ON: val = 1; break; default: return -EINVAL; } stv0297_writereg_mask(state, 0x83, 0x08, val << 3); return 0; } static int stv0297_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct stv0297_state *state = fe->demodulator_priv; if (enable) { stv0297_writereg(state, 0x87, 0x78); stv0297_writereg(state, 0x86, 0xc8); } return 0; } static int stv0297_init(struct dvb_frontend *fe) { struct stv0297_state *state = fe->demodulator_priv; int i; /* load init table */ for (i=0; !(state->config->inittab[i] == 0xff && state->config->inittab[i+1] == 0xff); i+=2) stv0297_writereg(state, state->config->inittab[i], state->config->inittab[i+1]); msleep(200); state->last_ber = 0; return 0; } static int stv0297_sleep(struct dvb_frontend *fe) { struct stv0297_state *state = fe->demodulator_priv; stv0297_writereg_mask(state, 0x80, 1, 1); return 0; } static int stv0297_read_status(struct dvb_frontend *fe, fe_status_t * status) { struct stv0297_state *state = fe->demodulator_priv; u8 sync = stv0297_readreg(state, 0xDF); *status = 0; if (sync & 0x80) *status |= FE_HAS_SYNC | FE_HAS_SIGNAL | FE_HAS_CARRIER | FE_HAS_VITERBI | FE_HAS_LOCK; return 0; } static int stv0297_read_ber(struct dvb_frontend *fe, u32 * ber) { struct stv0297_state *state = fe->demodulator_priv; u8 BER[3]; stv0297_readregs(state, 0xA0, BER, 3); if (!(BER[0] & 0x80)) { state->last_ber = BER[2] << 8 | BER[1]; stv0297_writereg_mask(state, 0xA0, 0x80, 0x80); } *ber = state->last_ber; return 0; } static int stv0297_read_signal_strength(struct dvb_frontend *fe, u16 * strength) { struct stv0297_state *state = fe->demodulator_priv; u8 STRENGTH[3]; u16 tmp; stv0297_readregs(state, 0x41, STRENGTH, 3); tmp = (STRENGTH[1] & 0x03) << 8 | STRENGTH[0]; if (STRENGTH[2] & 0x20) { if (tmp < 0x200) tmp = 0; else tmp = tmp - 0x200; } else { if (tmp > 0x1ff) tmp = 0; else tmp = 0x1ff - tmp; } *strength = (tmp << 7) | (tmp >> 2); return 0; } static int stv0297_read_snr(struct dvb_frontend *fe, u16 * snr) { struct stv0297_state *state = fe->demodulator_priv; u8 SNR[2]; stv0297_readregs(state, 0x07, SNR, 2); *snr = SNR[1] << 8 | SNR[0]; return 0; } static int stv0297_read_ucblocks(struct dvb_frontend *fe, u32 * ucblocks) { struct stv0297_state *state = fe->demodulator_priv; stv0297_writereg_mask(state, 0xDF, 0x03, 0x03); /* freeze the counters */ *ucblocks = (stv0297_readreg(state, 0xD5) << 8) | stv0297_readreg(state, 0xD4); stv0297_writereg_mask(state, 0xDF, 0x03, 0x02); /* clear the counters */ stv0297_writereg_mask(state, 0xDF, 0x03, 0x01); /* re-enable the counters */ return 0; } static int stv0297_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct stv0297_state *state = fe->demodulator_priv; int u_threshold; int initial_u; int blind_u; int delay; int sweeprate; int carrieroffset; unsigned long starttime; unsigned long timeout; fe_spectral_inversion_t inversion; switch (p->u.qam.modulation) { case QAM_16: case QAM_32: case QAM_64: delay = 100; sweeprate = 1000; break; case QAM_128: case QAM_256: delay = 200; sweeprate = 500; break; default: return -EINVAL; } // determine inversion dependent parameters inversion = p->inversion; if (state->config->invert) inversion = (inversion == INVERSION_ON) ? INVERSION_OFF : INVERSION_ON; carrieroffset = -330; switch (inversion) { case INVERSION_OFF: break; case INVERSION_ON: sweeprate = -sweeprate; carrieroffset = -carrieroffset; break; default: return -EINVAL; } stv0297_init(fe); if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* clear software interrupts */ stv0297_writereg(state, 0x82, 0x0); /* set initial demodulation frequency */ stv0297_set_initialdemodfreq(state, 7250); /* setup AGC */ stv0297_writereg_mask(state, 0x43, 0x10, 0x00); stv0297_writereg(state, 0x41, 0x00); stv0297_writereg_mask(state, 0x42, 0x03, 0x01); stv0297_writereg_mask(state, 0x36, 0x60, 0x00); stv0297_writereg_mask(state, 0x36, 0x18, 0x00); stv0297_writereg_mask(state, 0x71, 0x80, 0x80); stv0297_writereg(state, 0x72, 0x00); stv0297_writereg(state, 0x73, 0x00); stv0297_writereg_mask(state, 0x74, 0x0F, 0x00); stv0297_writereg_mask(state, 0x43, 0x08, 0x00); stv0297_writereg_mask(state, 0x71, 0x80, 0x00); /* setup STL */ stv0297_writereg_mask(state, 0x5a, 0x20, 0x20); stv0297_writereg_mask(state, 0x5b, 0x02, 0x02); stv0297_writereg_mask(state, 0x5b, 0x02, 0x00); stv0297_writereg_mask(state, 0x5b, 0x01, 0x00); stv0297_writereg_mask(state, 0x5a, 0x40, 0x40); /* disable frequency sweep */ stv0297_writereg_mask(state, 0x6a, 0x01, 0x00); /* reset deinterleaver */ stv0297_writereg_mask(state, 0x81, 0x01, 0x01); stv0297_writereg_mask(state, 0x81, 0x01, 0x00); /* ??? */ stv0297_writereg_mask(state, 0x83, 0x20, 0x20); stv0297_writereg_mask(state, 0x83, 0x20, 0x00); /* reset equaliser */ u_threshold = stv0297_readreg(state, 0x00) & 0xf; initial_u = stv0297_readreg(state, 0x01) >> 4; blind_u = stv0297_readreg(state, 0x01) & 0xf; stv0297_writereg_mask(state, 0x84, 0x01, 0x01); stv0297_writereg_mask(state, 0x84, 0x01, 0x00); stv0297_writereg_mask(state, 0x00, 0x0f, u_threshold); stv0297_writereg_mask(state, 0x01, 0xf0, initial_u << 4); stv0297_writereg_mask(state, 0x01, 0x0f, blind_u); /* data comes from internal A/D */ stv0297_writereg_mask(state, 0x87, 0x80, 0x00); /* clear phase registers */ stv0297_writereg(state, 0x63, 0x00); stv0297_writereg(state, 0x64, 0x00); stv0297_writereg(state, 0x65, 0x00); stv0297_writereg(state, 0x66, 0x00); stv0297_writereg(state, 0x67, 0x00); stv0297_writereg(state, 0x68, 0x00); stv0297_writereg_mask(state, 0x69, 0x0f, 0x00); /* set parameters */ stv0297_set_qam(state, p->u.qam.modulation); stv0297_set_symbolrate(state, p->u.qam.symbol_rate / 1000); stv0297_set_sweeprate(state, sweeprate, p->u.qam.symbol_rate / 1000); stv0297_set_carrieroffset(state, carrieroffset); stv0297_set_inversion(state, inversion); /* kick off lock */ /* Disable corner detection for higher QAMs */ if (p->u.qam.modulation == QAM_128 || p->u.qam.modulation == QAM_256) stv0297_writereg_mask(state, 0x88, 0x08, 0x00); else stv0297_writereg_mask(state, 0x88, 0x08, 0x08); stv0297_writereg_mask(state, 0x5a, 0x20, 0x00); stv0297_writereg_mask(state, 0x6a, 0x01, 0x01); stv0297_writereg_mask(state, 0x43, 0x40, 0x40); stv0297_writereg_mask(state, 0x5b, 0x30, 0x00); stv0297_writereg_mask(state, 0x03, 0x0c, 0x0c); stv0297_writereg_mask(state, 0x03, 0x03, 0x03); stv0297_writereg_mask(state, 0x43, 0x10, 0x10); /* wait for WGAGC lock */ starttime = jiffies; timeout = jiffies + msecs_to_jiffies(2000); while (time_before(jiffies, timeout)) { msleep(10); if (stv0297_readreg(state, 0x43) & 0x08) break; } if (time_after(jiffies, timeout)) { goto timeout; } msleep(20); /* wait for equaliser partial convergence */ timeout = jiffies + msecs_to_jiffies(500); while (time_before(jiffies, timeout)) { msleep(10); if (stv0297_readreg(state, 0x82) & 0x04) { break; } } if (time_after(jiffies, timeout)) { goto timeout; } /* wait for equaliser full convergence */ timeout = jiffies + msecs_to_jiffies(delay); while (time_before(jiffies, timeout)) { msleep(10); if (stv0297_readreg(state, 0x82) & 0x08) { break; } } if (time_after(jiffies, timeout)) { goto timeout; } /* disable sweep */ stv0297_writereg_mask(state, 0x6a, 1, 0); stv0297_writereg_mask(state, 0x88, 8, 0); /* wait for main lock */ timeout = jiffies + msecs_to_jiffies(20); while (time_before(jiffies, timeout)) { msleep(10); if (stv0297_readreg(state, 0xDF) & 0x80) { break; } } if (time_after(jiffies, timeout)) { goto timeout; } msleep(100); /* is it still locked after that delay? */ if (!(stv0297_readreg(state, 0xDF) & 0x80)) { goto timeout; } /* success!! */ stv0297_writereg_mask(state, 0x5a, 0x40, 0x00); state->base_freq = p->frequency; return 0; timeout: stv0297_writereg_mask(state, 0x6a, 0x01, 0x00); return 0; } static int stv0297_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct stv0297_state *state = fe->demodulator_priv; int reg_00, reg_83; reg_00 = stv0297_readreg(state, 0x00); reg_83 = stv0297_readreg(state, 0x83); p->frequency = state->base_freq; p->inversion = (reg_83 & 0x08) ? INVERSION_ON : INVERSION_OFF; if (state->config->invert) p->inversion = (p->inversion == INVERSION_ON) ? INVERSION_OFF : INVERSION_ON; p->u.qam.symbol_rate = stv0297_get_symbolrate(state) * 1000; p->u.qam.fec_inner = FEC_NONE; switch ((reg_00 >> 4) & 0x7) { case 0: p->u.qam.modulation = QAM_16; break; case 1: p->u.qam.modulation = QAM_32; break; case 2: p->u.qam.modulation = QAM_128; break; case 3: p->u.qam.modulation = QAM_256; break; case 4: p->u.qam.modulation = QAM_64; break; } return 0; } static void stv0297_release(struct dvb_frontend *fe) { struct stv0297_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops stv0297_ops; struct dvb_frontend *stv0297_attach(const struct stv0297_config *config, struct i2c_adapter *i2c) { struct stv0297_state *state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct stv0297_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; state->last_ber = 0; state->base_freq = 0; /* check if the demod is there */ if ((stv0297_readreg(state, 0x80) & 0x70) != 0x20) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &stv0297_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } static struct dvb_frontend_ops stv0297_ops = { .info = { .name = "ST STV0297 DVB-C", .type = FE_QAM, .frequency_min = 47000000, .frequency_max = 862000000, .frequency_stepsize = 62500, .symbol_rate_min = 870000, .symbol_rate_max = 11700000, .caps = FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_128 | FE_CAN_QAM_256 | FE_CAN_FEC_AUTO}, .release = stv0297_release, .init = stv0297_init, .sleep = stv0297_sleep, .i2c_gate_ctrl = stv0297_i2c_gate_ctrl, .set_frontend = stv0297_set_frontend, .get_frontend = stv0297_get_frontend, .read_status = stv0297_read_status, .read_ber = stv0297_read_ber, .read_signal_strength = stv0297_read_signal_strength, .read_snr = stv0297_read_snr, .read_ucblocks = stv0297_read_ucblocks, }; MODULE_DESCRIPTION("ST STV0297 DVB-C Demodulator driver"); MODULE_AUTHOR("Dennis Noermann and Andrew de Quincey"); MODULE_LICENSE("GPL"); EXPORT_SYMBOL(stv0297_attach);
gpl-2.0
drowningchild/kernel_JB_I9100ZSLS6
drivers/media/dvb/frontends/mt312.c
3104
18520
/* Driver for Zarlink VP310/MT312/ZL10313 Satellite Channel Decoder Copyright (C) 2003 Andreas Oberritter <obi@linuxtv.org> Copyright (C) 2008 Matthias Schwarzott <zzam@gentoo.org> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. References: http://products.zarlink.com/product_profiles/MT312.htm http://products.zarlink.com/product_profiles/SL1935.htm */ #include <linux/delay.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mt312_priv.h" #include "mt312.h" struct mt312_state { struct i2c_adapter *i2c; /* configuration settings */ const struct mt312_config *config; struct dvb_frontend frontend; u8 id; unsigned long xtal; u8 freq_mult; }; static int debug; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "mt312: " args); \ } while (0) #define MT312_PLL_CLK 10000000UL /* 10 MHz */ #define MT312_PLL_CLK_10_111 10111000UL /* 10.111 MHz */ static int mt312_read(struct mt312_state *state, const enum mt312_reg_addr reg, u8 *buf, const size_t count) { int ret; struct i2c_msg msg[2]; u8 regbuf[1] = { reg }; msg[0].addr = state->config->demod_address; msg[0].flags = 0; msg[0].buf = regbuf; msg[0].len = 1; msg[1].addr = state->config->demod_address; msg[1].flags = I2C_M_RD; msg[1].buf = buf; msg[1].len = count; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) { printk(KERN_DEBUG "%s: ret == %d\n", __func__, ret); return -EREMOTEIO; } if (debug) { int i; dprintk("R(%d):", reg & 0x7f); for (i = 0; i < count; i++) printk(KERN_CONT " %02x", buf[i]); printk("\n"); } return 0; } static int mt312_write(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 *src, const size_t count) { int ret; u8 buf[count + 1]; struct i2c_msg msg; if (debug) { int i; dprintk("W(%d):", reg & 0x7f); for (i = 0; i < count; i++) printk(KERN_CONT " %02x", src[i]); printk("\n"); } buf[0] = reg; memcpy(&buf[1], src, count); msg.addr = state->config->demod_address; msg.flags = 0; msg.buf = buf; msg.len = count + 1; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) { dprintk("%s: ret == %d\n", __func__, ret); return -EREMOTEIO; } return 0; } static inline int mt312_readreg(struct mt312_state *state, const enum mt312_reg_addr reg, u8 *val) { return mt312_read(state, reg, val, 1); } static inline int mt312_writereg(struct mt312_state *state, const enum mt312_reg_addr reg, const u8 val) { return mt312_write(state, reg, &val, 1); } static inline u32 mt312_div(u32 a, u32 b) { return (a + (b / 2)) / b; } static int mt312_reset(struct mt312_state *state, const u8 full) { return mt312_writereg(state, RESET, full ? 0x80 : 0x40); } static int mt312_get_inversion(struct mt312_state *state, fe_spectral_inversion_t *i) { int ret; u8 vit_mode; ret = mt312_readreg(state, VIT_MODE, &vit_mode); if (ret < 0) return ret; if (vit_mode & 0x80) /* auto inversion was used */ *i = (vit_mode & 0x40) ? INVERSION_ON : INVERSION_OFF; return 0; } static int mt312_get_symbol_rate(struct mt312_state *state, u32 *sr) { int ret; u8 sym_rate_h; u8 dec_ratio; u16 sym_rat_op; u16 monitor; u8 buf[2]; ret = mt312_readreg(state, SYM_RATE_H, &sym_rate_h); if (ret < 0) return ret; if (sym_rate_h & 0x80) { /* symbol rate search was used */ ret = mt312_writereg(state, MON_CTRL, 0x03); if (ret < 0) return ret; ret = mt312_read(state, MONITOR_H, buf, sizeof(buf)); if (ret < 0) return ret; monitor = (buf[0] << 8) | buf[1]; dprintk("sr(auto) = %u\n", mt312_div(monitor * 15625, 4)); } else { ret = mt312_writereg(state, MON_CTRL, 0x05); if (ret < 0) return ret; ret = mt312_read(state, MONITOR_H, buf, sizeof(buf)); if (ret < 0) return ret; dec_ratio = ((buf[0] >> 5) & 0x07) * 32; ret = mt312_read(state, SYM_RAT_OP_H, buf, sizeof(buf)); if (ret < 0) return ret; sym_rat_op = (buf[0] << 8) | buf[1]; dprintk("sym_rat_op=%d dec_ratio=%d\n", sym_rat_op, dec_ratio); dprintk("*sr(manual) = %lu\n", (((state->xtal * 8192) / (sym_rat_op + 8192)) * 2) - dec_ratio); } return 0; } static int mt312_get_code_rate(struct mt312_state *state, fe_code_rate_t *cr) { const fe_code_rate_t fec_tab[8] = { FEC_1_2, FEC_2_3, FEC_3_4, FEC_5_6, FEC_6_7, FEC_7_8, FEC_AUTO, FEC_AUTO }; int ret; u8 fec_status; ret = mt312_readreg(state, FEC_STATUS, &fec_status); if (ret < 0) return ret; *cr = fec_tab[(fec_status >> 4) & 0x07]; return 0; } static int mt312_initfe(struct dvb_frontend *fe) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[2]; /* wake up */ ret = mt312_writereg(state, CONFIG, (state->freq_mult == 6 ? 0x88 : 0x8c)); if (ret < 0) return ret; /* wait at least 150 usec */ udelay(150); /* full reset */ ret = mt312_reset(state, 1); if (ret < 0) return ret; /* Per datasheet, write correct values. 09/28/03 ACCJr. * If we don't do this, we won't get FE_HAS_VITERBI in the VP310. */ { u8 buf_def[8] = { 0x14, 0x12, 0x03, 0x02, 0x01, 0x00, 0x00, 0x00 }; ret = mt312_write(state, VIT_SETUP, buf_def, sizeof(buf_def)); if (ret < 0) return ret; } switch (state->id) { case ID_ZL10313: /* enable ADC */ ret = mt312_writereg(state, GPP_CTRL, 0x80); if (ret < 0) return ret; /* configure ZL10313 for optimal ADC performance */ buf[0] = 0x80; buf[1] = 0xB0; ret = mt312_write(state, HW_CTRL, buf, 2); if (ret < 0) return ret; /* enable MPEG output and ADCs */ ret = mt312_writereg(state, HW_CTRL, 0x00); if (ret < 0) return ret; ret = mt312_writereg(state, MPEG_CTRL, 0x00); if (ret < 0) return ret; break; } /* SYS_CLK */ buf[0] = mt312_div(state->xtal * state->freq_mult * 2, 1000000); /* DISEQC_RATIO */ buf[1] = mt312_div(state->xtal, 22000 * 4); ret = mt312_write(state, SYS_CLK, buf, sizeof(buf)); if (ret < 0) return ret; ret = mt312_writereg(state, SNR_THS_HIGH, 0x32); if (ret < 0) return ret; /* different MOCLK polarity */ switch (state->id) { case ID_ZL10313: buf[0] = 0x33; break; default: buf[0] = 0x53; break; } ret = mt312_writereg(state, OP_CTRL, buf[0]); if (ret < 0) return ret; /* TS_SW_LIM */ buf[0] = 0x8c; buf[1] = 0x98; ret = mt312_write(state, TS_SW_LIM_L, buf, sizeof(buf)); if (ret < 0) return ret; ret = mt312_writereg(state, CS_SW_LIM, 0x69); if (ret < 0) return ret; return 0; } static int mt312_send_master_cmd(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *c) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 diseqc_mode; if ((c->msg_len == 0) || (c->msg_len > sizeof(c->msg))) return -EINVAL; ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode); if (ret < 0) return ret; ret = mt312_write(state, (0x80 | DISEQC_INSTR), c->msg, c->msg_len); if (ret < 0) return ret; ret = mt312_writereg(state, DISEQC_MODE, (diseqc_mode & 0x40) | ((c->msg_len - 1) << 3) | 0x04); if (ret < 0) return ret; /* is there a better way to wait for message to be transmitted */ msleep(100); /* set DISEQC_MODE[2:0] to zero if a return message is expected */ if (c->msg[0] & 0x02) { ret = mt312_writereg(state, DISEQC_MODE, (diseqc_mode & 0x40)); if (ret < 0) return ret; } return 0; } static int mt312_send_burst(struct dvb_frontend *fe, const fe_sec_mini_cmd_t c) { struct mt312_state *state = fe->demodulator_priv; const u8 mini_tab[2] = { 0x02, 0x03 }; int ret; u8 diseqc_mode; if (c > SEC_MINI_B) return -EINVAL; ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode); if (ret < 0) return ret; ret = mt312_writereg(state, DISEQC_MODE, (diseqc_mode & 0x40) | mini_tab[c]); if (ret < 0) return ret; return 0; } static int mt312_set_tone(struct dvb_frontend *fe, const fe_sec_tone_mode_t t) { struct mt312_state *state = fe->demodulator_priv; const u8 tone_tab[2] = { 0x01, 0x00 }; int ret; u8 diseqc_mode; if (t > SEC_TONE_OFF) return -EINVAL; ret = mt312_readreg(state, DISEQC_MODE, &diseqc_mode); if (ret < 0) return ret; ret = mt312_writereg(state, DISEQC_MODE, (diseqc_mode & 0x40) | tone_tab[t]); if (ret < 0) return ret; return 0; } static int mt312_set_voltage(struct dvb_frontend *fe, const fe_sec_voltage_t v) { struct mt312_state *state = fe->demodulator_priv; const u8 volt_tab[3] = { 0x00, 0x40, 0x00 }; u8 val; if (v > SEC_VOLTAGE_OFF) return -EINVAL; val = volt_tab[v]; if (state->config->voltage_inverted) val ^= 0x40; return mt312_writereg(state, DISEQC_MODE, val); } static int mt312_read_status(struct dvb_frontend *fe, fe_status_t *s) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 status[3]; *s = 0; ret = mt312_read(state, QPSK_STAT_H, status, sizeof(status)); if (ret < 0) return ret; dprintk("QPSK_STAT_H: 0x%02x, QPSK_STAT_L: 0x%02x," " FEC_STATUS: 0x%02x\n", status[0], status[1], status[2]); if (status[0] & 0xc0) *s |= FE_HAS_SIGNAL; /* signal noise ratio */ if (status[0] & 0x04) *s |= FE_HAS_CARRIER; /* qpsk carrier lock */ if (status[2] & 0x02) *s |= FE_HAS_VITERBI; /* viterbi lock */ if (status[2] & 0x04) *s |= FE_HAS_SYNC; /* byte align lock */ if (status[0] & 0x01) *s |= FE_HAS_LOCK; /* qpsk lock */ return 0; } static int mt312_read_ber(struct dvb_frontend *fe, u32 *ber) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[3]; ret = mt312_read(state, RS_BERCNT_H, buf, 3); if (ret < 0) return ret; *ber = ((buf[0] << 16) | (buf[1] << 8) | buf[2]) * 64; return 0; } static int mt312_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[3]; u16 agc; s16 err_db; ret = mt312_read(state, AGC_H, buf, sizeof(buf)); if (ret < 0) return ret; agc = (buf[0] << 6) | (buf[1] >> 2); err_db = (s16) (((buf[1] & 0x03) << 14) | buf[2] << 6) >> 6; *signal_strength = agc; dprintk("agc=%08x err_db=%hd\n", agc, err_db); return 0; } static int mt312_read_snr(struct dvb_frontend *fe, u16 *snr) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[2]; ret = mt312_read(state, M_SNR_H, buf, sizeof(buf)); if (ret < 0) return ret; *snr = 0xFFFF - ((((buf[0] & 0x7f) << 8) | buf[1]) << 1); return 0; } static int mt312_read_ucblocks(struct dvb_frontend *fe, u32 *ubc) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[2]; ret = mt312_read(state, RS_UBC_H, buf, sizeof(buf)); if (ret < 0) return ret; *ubc = (buf[0] << 8) | buf[1]; return 0; } static int mt312_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 buf[5], config_val; u16 sr; const u8 fec_tab[10] = { 0x00, 0x01, 0x02, 0x04, 0x3f, 0x08, 0x10, 0x20, 0x3f, 0x3f }; const u8 inv_tab[3] = { 0x00, 0x40, 0x80 }; dprintk("%s: Freq %d\n", __func__, p->frequency); if ((p->frequency < fe->ops.info.frequency_min) || (p->frequency > fe->ops.info.frequency_max)) return -EINVAL; if ((p->inversion < INVERSION_OFF) || (p->inversion > INVERSION_ON)) return -EINVAL; if ((p->u.qpsk.symbol_rate < fe->ops.info.symbol_rate_min) || (p->u.qpsk.symbol_rate > fe->ops.info.symbol_rate_max)) return -EINVAL; if ((p->u.qpsk.fec_inner < FEC_NONE) || (p->u.qpsk.fec_inner > FEC_AUTO)) return -EINVAL; if ((p->u.qpsk.fec_inner == FEC_4_5) || (p->u.qpsk.fec_inner == FEC_8_9)) return -EINVAL; switch (state->id) { case ID_VP310: /* For now we will do this only for the VP310. * It should be better for the mt312 as well, * but tuning will be slower. ACCJr 09/29/03 */ ret = mt312_readreg(state, CONFIG, &config_val); if (ret < 0) return ret; if (p->u.qpsk.symbol_rate >= 30000000) { /* Note that 30MS/s should use 90MHz */ if (state->freq_mult == 6) { /* We are running 60MHz */ state->freq_mult = 9; ret = mt312_initfe(fe); if (ret < 0) return ret; } } else { if (state->freq_mult == 9) { /* We are running 90MHz */ state->freq_mult = 6; ret = mt312_initfe(fe); if (ret < 0) return ret; } } break; case ID_MT312: case ID_ZL10313: break; default: return -EINVAL; } if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe, p); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* sr = (u16)(sr * 256.0 / 1000000.0) */ sr = mt312_div(p->u.qpsk.symbol_rate * 4, 15625); /* SYM_RATE */ buf[0] = (sr >> 8) & 0x3f; buf[1] = (sr >> 0) & 0xff; /* VIT_MODE */ buf[2] = inv_tab[p->inversion] | fec_tab[p->u.qpsk.fec_inner]; /* QPSK_CTRL */ buf[3] = 0x40; /* swap I and Q before QPSK demodulation */ if (p->u.qpsk.symbol_rate < 10000000) buf[3] |= 0x04; /* use afc mode */ /* GO */ buf[4] = 0x01; ret = mt312_write(state, SYM_RATE_H, buf, sizeof(buf)); if (ret < 0) return ret; mt312_reset(state, 0); return 0; } static int mt312_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *p) { struct mt312_state *state = fe->demodulator_priv; int ret; ret = mt312_get_inversion(state, &p->inversion); if (ret < 0) return ret; ret = mt312_get_symbol_rate(state, &p->u.qpsk.symbol_rate); if (ret < 0) return ret; ret = mt312_get_code_rate(state, &p->u.qpsk.fec_inner); if (ret < 0) return ret; return 0; } static int mt312_i2c_gate_ctrl(struct dvb_frontend *fe, int enable) { struct mt312_state *state = fe->demodulator_priv; u8 val = 0x00; int ret; switch (state->id) { case ID_ZL10313: ret = mt312_readreg(state, GPP_CTRL, &val); if (ret < 0) goto error; /* preserve this bit to not accidentally shutdown ADC */ val &= 0x80; break; } if (enable) val |= 0x40; else val &= ~0x40; ret = mt312_writereg(state, GPP_CTRL, val); error: return ret; } static int mt312_sleep(struct dvb_frontend *fe) { struct mt312_state *state = fe->demodulator_priv; int ret; u8 config; /* reset all registers to defaults */ ret = mt312_reset(state, 1); if (ret < 0) return ret; if (state->id == ID_ZL10313) { /* reset ADC */ ret = mt312_writereg(state, GPP_CTRL, 0x00); if (ret < 0) return ret; /* full shutdown of ADCs, mpeg bus tristated */ ret = mt312_writereg(state, HW_CTRL, 0x0d); if (ret < 0) return ret; } ret = mt312_readreg(state, CONFIG, &config); if (ret < 0) return ret; /* enter standby */ ret = mt312_writereg(state, CONFIG, config & 0x7f); if (ret < 0) return ret; return 0; } static int mt312_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { fesettings->min_delay_ms = 50; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void mt312_release(struct dvb_frontend *fe) { struct mt312_state *state = fe->demodulator_priv; kfree(state); } #define MT312_SYS_CLK 90000000UL /* 90 MHz */ static struct dvb_frontend_ops mt312_ops = { .info = { .name = "Zarlink ???? DVB-S", .type = FE_QPSK, .frequency_min = 950000, .frequency_max = 2150000, /* FIXME: adjust freq to real used xtal */ .frequency_stepsize = (MT312_PLL_CLK / 1000) / 128, .symbol_rate_min = MT312_SYS_CLK / 128, /* FIXME as above */ .symbol_rate_max = MT312_SYS_CLK / 2, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_MUTE_TS | FE_CAN_RECOVER }, .release = mt312_release, .init = mt312_initfe, .sleep = mt312_sleep, .i2c_gate_ctrl = mt312_i2c_gate_ctrl, .set_frontend = mt312_set_frontend, .get_frontend = mt312_get_frontend, .get_tune_settings = mt312_get_tune_settings, .read_status = mt312_read_status, .read_ber = mt312_read_ber, .read_signal_strength = mt312_read_signal_strength, .read_snr = mt312_read_snr, .read_ucblocks = mt312_read_ucblocks, .diseqc_send_master_cmd = mt312_send_master_cmd, .diseqc_send_burst = mt312_send_burst, .set_tone = mt312_set_tone, .set_voltage = mt312_set_voltage, }; struct dvb_frontend *mt312_attach(const struct mt312_config *config, struct i2c_adapter *i2c) { struct mt312_state *state = NULL; /* allocate memory for the internal state */ state = kzalloc(sizeof(struct mt312_state), GFP_KERNEL); if (state == NULL) goto error; /* setup the state */ state->config = config; state->i2c = i2c; /* check if the demod is there */ if (mt312_readreg(state, ID, &state->id) < 0) goto error; /* create dvb_frontend */ memcpy(&state->frontend.ops, &mt312_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; switch (state->id) { case ID_VP310: strcpy(state->frontend.ops.info.name, "Zarlink VP310 DVB-S"); state->xtal = MT312_PLL_CLK; state->freq_mult = 9; break; case ID_MT312: strcpy(state->frontend.ops.info.name, "Zarlink MT312 DVB-S"); state->xtal = MT312_PLL_CLK; state->freq_mult = 6; break; case ID_ZL10313: strcpy(state->frontend.ops.info.name, "Zarlink ZL10313 DVB-S"); state->xtal = MT312_PLL_CLK_10_111; state->freq_mult = 9; break; default: printk(KERN_WARNING "Only Zarlink VP310/MT312/ZL10313" " are supported chips.\n"); goto error; } return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(mt312_attach); module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Zarlink VP310/MT312/ZL10313 DVB-S Demodulator driver"); MODULE_AUTHOR("Andreas Oberritter <obi@linuxtv.org>"); MODULE_AUTHOR("Matthias Schwarzott <zzam@gentoo.org>"); MODULE_LICENSE("GPL");
gpl-2.0
Asderdd/android_kernel_google_msm8952
drivers/scsi/aic7xxx/aic79xx_core.c
3360
299699
/* * Core routines and tables shareable across OS platforms. * * Copyright (c) 1994-2002 Justin T. Gibbs. * Copyright (c) 2000-2003 Adaptec Inc. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. * * $Id: //depot/aic7xxx/aic7xxx/aic79xx.c#250 $ */ #ifdef __linux__ #include "aic79xx_osm.h" #include "aic79xx_inline.h" #include "aicasm/aicasm_insformat.h" #else #include <dev/aic7xxx/aic79xx_osm.h> #include <dev/aic7xxx/aic79xx_inline.h> #include <dev/aic7xxx/aicasm/aicasm_insformat.h> #endif /***************************** Lookup Tables **********************************/ static const char *const ahd_chip_names[] = { "NONE", "aic7901", "aic7902", "aic7901A" }; static const u_int num_chip_names = ARRAY_SIZE(ahd_chip_names); /* * Hardware error codes. */ struct ahd_hard_error_entry { uint8_t errno; const char *errmesg; }; static const struct ahd_hard_error_entry ahd_hard_errors[] = { { DSCTMOUT, "Discard Timer has timed out" }, { ILLOPCODE, "Illegal Opcode in sequencer program" }, { SQPARERR, "Sequencer Parity Error" }, { DPARERR, "Data-path Parity Error" }, { MPARERR, "Scratch or SCB Memory Parity Error" }, { CIOPARERR, "CIOBUS Parity Error" }, }; static const u_int num_errors = ARRAY_SIZE(ahd_hard_errors); static const struct ahd_phase_table_entry ahd_phase_table[] = { { P_DATAOUT, MSG_NOOP, "in Data-out phase" }, { P_DATAIN, MSG_INITIATOR_DET_ERR, "in Data-in phase" }, { P_DATAOUT_DT, MSG_NOOP, "in DT Data-out phase" }, { P_DATAIN_DT, MSG_INITIATOR_DET_ERR, "in DT Data-in phase" }, { P_COMMAND, MSG_NOOP, "in Command phase" }, { P_MESGOUT, MSG_NOOP, "in Message-out phase" }, { P_STATUS, MSG_INITIATOR_DET_ERR, "in Status phase" }, { P_MESGIN, MSG_PARITY_ERROR, "in Message-in phase" }, { P_BUSFREE, MSG_NOOP, "while idle" }, { 0, MSG_NOOP, "in unknown phase" } }; /* * In most cases we only wish to itterate over real phases, so * exclude the last element from the count. */ static const u_int num_phases = ARRAY_SIZE(ahd_phase_table) - 1; /* Our Sequencer Program */ #include "aic79xx_seq.h" /**************************** Function Declarations ***************************/ static void ahd_handle_transmission_error(struct ahd_softc *ahd); static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1); static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime); static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd); static void ahd_handle_proto_violation(struct ahd_softc *ahd); static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static struct ahd_tmode_tstate* ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel); #ifdef AHD_TARGET_MODE static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force); #endif static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *, u_int *period, u_int *ppr_options, role_t role); static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo); static void ahd_update_pending_scbs(struct ahd_softc *ahd); static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset); static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width); static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options); static void ahd_clear_msg_state(struct ahd_softc *ahd); static void ahd_handle_message_phase(struct ahd_softc *ahd); typedef enum { AHDMSG_1B, AHDMSG_2B, AHDMSG_EXT } ahd_msgtype; static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full); static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo); static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd); static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level); #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb); #endif static u_int ahd_sglist_size(struct ahd_softc *ahd); static u_int ahd_sglist_allocsize(struct ahd_softc *ahd); static bus_dmamap_callback_t ahd_dmamap_cb; static void ahd_initialize_hscbs(struct ahd_softc *ahd); static int ahd_init_scbdata(struct ahd_softc *ahd); static void ahd_fini_scbdata(struct ahd_softc *ahd); static void ahd_setup_iocell_workaround(struct ahd_softc *ahd); static void ahd_iocell_first_selection(struct ahd_softc *ahd); static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx); static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb); static void ahd_chip_init(struct ahd_softc *ahd); static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb); static int ahd_qinfifo_count(struct ahd_softc *ahd); static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid); static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next); static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid); static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid); static void ahd_reset_current_bus(struct ahd_softc *ahd); static ahd_callback_t ahd_stat_timer; #ifdef AHD_DUMP_SEQ static void ahd_dumpseq(struct ahd_softc *ahd); #endif static void ahd_loadseq(struct ahd_softc *ahd); static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr); static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address); static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts); static int ahd_probe_stack_size(struct ahd_softc *ahd); static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb); static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb); #ifdef AHD_TARGET_MODE static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg); static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask); static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd); #endif static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status); static void ahd_alloc_scbs(struct ahd_softc *ahd); static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid); static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb); static void ahd_clear_critical_section(struct ahd_softc *ahd); static void ahd_clear_intstat(struct ahd_softc *ahd); static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable); static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl); static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb); static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb); static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase); static void ahd_shutdown(void *arg); static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds); static int ahd_verify_vpd_cksum(struct vpd_config *vpd); static int ahd_wait_seeprom(struct ahd_softc *ahd); static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role); static void ahd_reset_cmds_pending(struct ahd_softc *ahd); /*************************** Interrupt Services *******************************/ static void ahd_run_qoutfifo(struct ahd_softc *ahd); #ifdef AHD_TARGET_MODE static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused); #endif static void ahd_handle_hwerrint(struct ahd_softc *ahd); static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat); static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat); /************************ Sequencer Execution Control *************************/ void ahd_set_modes(struct ahd_softc *ahd, ahd_mode src, ahd_mode dst) { if (ahd->src_mode == src && ahd->dst_mode == dst) return; #ifdef AHD_DEBUG if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) panic("Setting mode prior to saving it.\n"); if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("%s: Setting mode 0x%x\n", ahd_name(ahd), ahd_build_mode_state(ahd, src, dst)); #endif ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, src, dst)); ahd->src_mode = src; ahd->dst_mode = dst; } static void ahd_update_modes(struct ahd_softc *ahd) { ahd_mode_state mode_ptr; ahd_mode src; ahd_mode dst; mode_ptr = ahd_inb(ahd, MODE_PTR); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MODEPTR) != 0) printk("Reading mode 0x%x\n", mode_ptr); #endif ahd_extract_mode_state(ahd, mode_ptr, &src, &dst); ahd_known_modes(ahd, src, dst); } static void ahd_assert_modes(struct ahd_softc *ahd, ahd_mode srcmode, ahd_mode dstmode, const char *file, int line) { #ifdef AHD_DEBUG if ((srcmode & AHD_MK_MSK(ahd->src_mode)) == 0 || (dstmode & AHD_MK_MSK(ahd->dst_mode)) == 0) { panic("%s:%s:%d: Mode assertion failed.\n", ahd_name(ahd), file, line); } #endif } #define AHD_ASSERT_MODES(ahd, source, dest) \ ahd_assert_modes(ahd, source, dest, __FILE__, __LINE__); ahd_mode_state ahd_save_modes(struct ahd_softc *ahd) { if (ahd->src_mode == AHD_MODE_UNKNOWN || ahd->dst_mode == AHD_MODE_UNKNOWN) ahd_update_modes(ahd); return (ahd_build_mode_state(ahd, ahd->src_mode, ahd->dst_mode)); } void ahd_restore_modes(struct ahd_softc *ahd, ahd_mode_state state) { ahd_mode src; ahd_mode dst; ahd_extract_mode_state(ahd, state, &src, &dst); ahd_set_modes(ahd, src, dst); } /* * Determine whether the sequencer has halted code execution. * Returns non-zero status if the sequencer is stopped. */ int ahd_is_paused(struct ahd_softc *ahd) { return ((ahd_inb(ahd, HCNTRL) & PAUSE) != 0); } /* * Request that the sequencer stop and wait, indefinitely, for it * to stop. The sequencer will only acknowledge that it is paused * once it has reached an instruction boundary and PAUSEDIS is * cleared in the SEQCTL register. The sequencer may use PAUSEDIS * for critical sections. */ void ahd_pause(struct ahd_softc *ahd) { ahd_outb(ahd, HCNTRL, ahd->pause); /* * Since the sequencer can disable pausing in a critical section, we * must loop until it actually stops. */ while (ahd_is_paused(ahd) == 0) ; } /* * Allow the sequencer to continue program execution. * We check here to ensure that no additional interrupt * sources that would cause the sequencer to halt have been * asserted. If, for example, a SCSI bus reset is detected * while we are fielding a different, pausing, interrupt type, * we don't want to release the sequencer before going back * into our interrupt handler and dealing with this new * condition. */ void ahd_unpause(struct ahd_softc *ahd) { /* * Automatically restore our modes to those saved * prior to the first change of the mode. */ if (ahd->saved_src_mode != AHD_MODE_UNKNOWN && ahd->saved_dst_mode != AHD_MODE_UNKNOWN) { if ((ahd->flags & AHD_UPDATE_PEND_CMDS) != 0) ahd_reset_cmds_pending(ahd); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); } if ((ahd_inb(ahd, INTSTAT) & ~CMDCMPLT) == 0) ahd_outb(ahd, HCNTRL, ahd->unpause); ahd_known_modes(ahd, AHD_MODE_UNKNOWN, AHD_MODE_UNKNOWN); } /*********************** Scatter Gather List Handling *************************/ void * ahd_sg_setup(struct ahd_softc *ahd, struct scb *scb, void *sgptr, dma_addr_t addr, bus_size_t len, int last) { scb->sg_count++; if (sizeof(dma_addr_t) > 4 && (ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)sgptr; sg->addr = ahd_htole64(addr); sg->len = ahd_htole32(len | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } else { struct ahd_dma_seg *sg; sg = (struct ahd_dma_seg *)sgptr; sg->addr = ahd_htole32(addr & 0xFFFFFFFF); sg->len = ahd_htole32(len | ((addr >> 8) & 0x7F000000) | (last ? AHD_DMA_LAST_SEG : 0)); return (sg + 1); } } static void ahd_setup_scb_common(struct ahd_softc *ahd, struct scb *scb) { /* XXX Handle target mode SCBs. */ scb->crc_retry_count = 0; if ((scb->flags & SCB_PACKETIZED) != 0) { /* XXX what about ACA?? It is type 4, but TAG_TYPE == 0x3. */ scb->hscb->task_attribute = scb->hscb->control & SCB_TAG_TYPE; } else { if (ahd_get_transfer_length(scb) & 0x01) scb->hscb->task_attribute = SCB_XFERLEN_ODD; else scb->hscb->task_attribute = 0; } if (scb->hscb->cdb_len <= MAX_CDB_LEN_WITH_SENSE_ADDR || (scb->hscb->cdb_len & SCB_CDB_LEN_PTR) != 0) scb->hscb->shared_data.idata.cdb_plus_saddr.sense_addr = ahd_htole32(scb->sense_busaddr); } static void ahd_setup_data_scb(struct ahd_softc *ahd, struct scb *scb) { /* * Copy the first SG into the "current" data ponter area. */ if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = (struct ahd_dma64_seg *)scb->sg_list; scb->hscb->dataptr = sg->addr; scb->hscb->datacnt = sg->len; } else { struct ahd_dma_seg *sg; uint32_t *dataptr_words; sg = (struct ahd_dma_seg *)scb->sg_list; dataptr_words = (uint32_t*)&scb->hscb->dataptr; dataptr_words[0] = sg->addr; dataptr_words[1] = 0; if ((ahd->flags & AHD_39BIT_ADDRESSING) != 0) { uint64_t high_addr; high_addr = ahd_le32toh(sg->len) & 0x7F000000; scb->hscb->dataptr |= ahd_htole64(high_addr << 8); } scb->hscb->datacnt = sg->len; } /* * Note where to find the SG entries in bus space. * We also set the full residual flag which the * sequencer will clear as soon as a data transfer * occurs. */ scb->hscb->sgptr = ahd_htole32(scb->sg_list_busaddr|SG_FULL_RESID); } static void ahd_setup_noxfer_scb(struct ahd_softc *ahd, struct scb *scb) { scb->hscb->sgptr = ahd_htole32(SG_LIST_NULL); scb->hscb->dataptr = 0; scb->hscb->datacnt = 0; } /************************** Memory mapping routines ***************************/ static void * ahd_sg_bus_to_virt(struct ahd_softc *ahd, struct scb *scb, uint32_t sg_busaddr) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = sg_busaddr - (scb->sg_list_busaddr - ahd_sg_size(ahd)); return ((uint8_t *)scb->sg_list + sg_offset); } static uint32_t ahd_sg_virt_to_bus(struct ahd_softc *ahd, struct scb *scb, void *sg) { dma_addr_t sg_offset; /* sg_list_phys points to entry 1, not 0 */ sg_offset = ((uint8_t *)sg - (uint8_t *)scb->sg_list) - ahd_sg_size(ahd); return (scb->sg_list_busaddr + sg_offset); } static void ahd_sync_scb(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.hscb_dmat, scb->hscb_map->dmamap, /*offset*/(uint8_t*)scb->hscb - scb->hscb_map->vaddr, /*len*/sizeof(*scb->hscb), op); } void ahd_sync_sglist(struct ahd_softc *ahd, struct scb *scb, int op) { if (scb->sg_count == 0) return; ahd_dmamap_sync(ahd, ahd->scb_data.sg_dmat, scb->sg_map->dmamap, /*offset*/scb->sg_list_busaddr - ahd_sg_size(ahd), /*len*/ahd_sg_size(ahd) * scb->sg_count, op); } static void ahd_sync_sense(struct ahd_softc *ahd, struct scb *scb, int op) { ahd_dmamap_sync(ahd, ahd->scb_data.sense_dmat, scb->sense_map->dmamap, /*offset*/scb->sense_busaddr, /*len*/AHD_SENSE_BUFSIZE, op); } #ifdef AHD_TARGET_MODE static uint32_t ahd_targetcmd_offset(struct ahd_softc *ahd, u_int index) { return (((uint8_t *)&ahd->targetcmds[index]) - (uint8_t *)ahd->qoutfifo); } #endif /*********************** Miscellaneous Support Functions ***********************/ /* * Return pointers to the transfer negotiation information * for the specified our_id/remote_id pair. */ struct ahd_initiator_tinfo * ahd_fetch_transinfo(struct ahd_softc *ahd, char channel, u_int our_id, u_int remote_id, struct ahd_tmode_tstate **tstate) { /* * Transfer data structures are stored from the perspective * of the target role. Since the parameters for a connection * in the initiator role to a given target are the same as * when the roles are reversed, we pretend we are the target. */ if (channel == 'B') our_id += 8; *tstate = ahd->enabled_targets[our_id]; return (&(*tstate)->transinfo[remote_id]); } uint16_t ahd_inw(struct ahd_softc *ahd, u_int port) { /* * Read high byte first as some registers increment * or have other side effects when the low byte is * read. */ uint16_t r = ahd_inb(ahd, port+1) << 8; return r | ahd_inb(ahd, port); } void ahd_outw(struct ahd_softc *ahd, u_int port, u_int value) { /* * Write low byte first to accommodate registers * such as PRGMCNT where the order maters. */ ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); } uint32_t ahd_inl(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24)); } void ahd_outl(struct ahd_softc *ahd, u_int port, uint32_t value) { ahd_outb(ahd, port, (value) & 0xFF); ahd_outb(ahd, port+1, ((value) >> 8) & 0xFF); ahd_outb(ahd, port+2, ((value) >> 16) & 0xFF); ahd_outb(ahd, port+3, ((value) >> 24) & 0xFF); } uint64_t ahd_inq(struct ahd_softc *ahd, u_int port) { return ((ahd_inb(ahd, port)) | (ahd_inb(ahd, port+1) << 8) | (ahd_inb(ahd, port+2) << 16) | (ahd_inb(ahd, port+3) << 24) | (((uint64_t)ahd_inb(ahd, port+4)) << 32) | (((uint64_t)ahd_inb(ahd, port+5)) << 40) | (((uint64_t)ahd_inb(ahd, port+6)) << 48) | (((uint64_t)ahd_inb(ahd, port+7)) << 56)); } void ahd_outq(struct ahd_softc *ahd, u_int port, uint64_t value) { ahd_outb(ahd, port, value & 0xFF); ahd_outb(ahd, port+1, (value >> 8) & 0xFF); ahd_outb(ahd, port+2, (value >> 16) & 0xFF); ahd_outb(ahd, port+3, (value >> 24) & 0xFF); ahd_outb(ahd, port+4, (value >> 32) & 0xFF); ahd_outb(ahd, port+5, (value >> 40) & 0xFF); ahd_outb(ahd, port+6, (value >> 48) & 0xFF); ahd_outb(ahd, port+7, (value >> 56) & 0xFF); } u_int ahd_get_scbptr(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); return (ahd_inb(ahd, SCBPTR) | (ahd_inb(ahd, SCBPTR + 1) << 8)); } void ahd_set_scbptr(struct ahd_softc *ahd, u_int scbptr) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); ahd_outb(ahd, SCBPTR, scbptr & 0xFF); ahd_outb(ahd, SCBPTR+1, (scbptr >> 8) & 0xFF); } #if 0 /* unused */ static u_int ahd_get_hnscb_qoff(struct ahd_softc *ahd) { return (ahd_inw_atomic(ahd, HNSCB_QOFF)); } #endif static void ahd_set_hnscb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outw_atomic(ahd, HNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_hescb_qoff(struct ahd_softc *ahd) { return (ahd_inb(ahd, HESCB_QOFF)); } #endif static void ahd_set_hescb_qoff(struct ahd_softc *ahd, u_int value) { ahd_outb(ahd, HESCB_QOFF, value); } static u_int ahd_get_snscb_qoff(struct ahd_softc *ahd) { u_int oldvalue; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); oldvalue = ahd_inw(ahd, SNSCB_QOFF); ahd_outw(ahd, SNSCB_QOFF, oldvalue); return (oldvalue); } static void ahd_set_snscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outw(ahd, SNSCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sescb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SESCB_QOFF)); } #endif static void ahd_set_sescb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SESCB_QOFF, value); } #if 0 /* unused */ static u_int ahd_get_sdscb_qoff(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); return (ahd_inb(ahd, SDSCB_QOFF) | (ahd_inb(ahd, SDSCB_QOFF + 1) << 8)); } #endif static void ahd_set_sdscb_qoff(struct ahd_softc *ahd, u_int value) { AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); ahd_outb(ahd, SDSCB_QOFF, value & 0xFF); ahd_outb(ahd, SDSCB_QOFF+1, (value >> 8) & 0xFF); } u_int ahd_inb_scbram(struct ahd_softc *ahd, u_int offset) { u_int value; /* * Workaround PCI-X Rev A. hardware bug. * After a host read of SCB memory, the chip * may become confused into thinking prefetch * was required. This starts the discard timer * running and can cause an unexpected discard * timer interrupt. The work around is to read * a normal register prior to the exhaustion of * the discard timer. The mode pointer register * has no side effects and so serves well for * this purpose. * * Razor #528 */ value = ahd_inb(ahd, offset); if ((ahd->bugs & AHD_PCIX_SCBRAM_RD_BUG) != 0) ahd_inb(ahd, MODE_PTR); return (value); } u_int ahd_inw_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inb_scbram(ahd, offset) | (ahd_inb_scbram(ahd, offset+1) << 8)); } static uint32_t ahd_inl_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inw_scbram(ahd, offset) | (ahd_inw_scbram(ahd, offset+2) << 16)); } static uint64_t ahd_inq_scbram(struct ahd_softc *ahd, u_int offset) { return (ahd_inl_scbram(ahd, offset) | ((uint64_t)ahd_inl_scbram(ahd, offset+4)) << 32); } struct scb * ahd_lookup_scb(struct ahd_softc *ahd, u_int tag) { struct scb* scb; if (tag >= AHD_SCB_MAX) return (NULL); scb = ahd->scb_data.scbindex[tag]; if (scb != NULL) ahd_sync_scb(ahd, scb, BUS_DMASYNC_POSTREAD|BUS_DMASYNC_POSTWRITE); return (scb); } static void ahd_swap_with_next_hscb(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *q_hscb; struct map_node *q_hscb_map; uint32_t saved_hscb_busaddr; /* * Our queuing method is a bit tricky. The card * knows in advance which HSCB (by address) to download, * and we can't disappoint it. To achieve this, the next * HSCB to download is saved off in ahd->next_queued_hscb. * When we are called to queue "an arbitrary scb", * we copy the contents of the incoming HSCB to the one * the sequencer knows about, swap HSCB pointers and * finally assign the SCB to the tag indexed location * in the scb_array. This makes sure that we can still * locate the correct SCB by SCB_TAG. */ q_hscb = ahd->next_queued_hscb; q_hscb_map = ahd->next_queued_hscb_map; saved_hscb_busaddr = q_hscb->hscb_busaddr; memcpy(q_hscb, scb->hscb, sizeof(*scb->hscb)); q_hscb->hscb_busaddr = saved_hscb_busaddr; q_hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; /* Now swap HSCB pointers. */ ahd->next_queued_hscb = scb->hscb; ahd->next_queued_hscb_map = scb->hscb_map; scb->hscb = q_hscb; scb->hscb_map = q_hscb_map; /* Now define the mapping from tag to SCB in the scbindex */ ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = scb; } /* * Tell the sequencer about a new transaction to execute. */ void ahd_queue_scb(struct ahd_softc *ahd, struct scb *scb) { ahd_swap_with_next_hscb(ahd, scb); if (SCBID_IS_NULL(SCB_GET_TAG(scb))) panic("Attempt to queue invalid SCB tag %x\n", SCB_GET_TAG(scb)); /* * Keep a history of SCBs we've downloaded in the qinfifo. */ ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; if (scb->sg_count != 0) ahd_setup_data_scb(ahd, scb); else ahd_setup_noxfer_scb(ahd, scb); ahd_setup_scb_common(ahd, scb); /* * Make sure our data is consistent from the * perspective of the adapter. */ ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_QUEUE) != 0) { uint64_t host_dataptr; host_dataptr = ahd_le64toh(scb->hscb->dataptr); printk("%s: Queueing SCB %d:0x%x bus addr 0x%x - 0x%x%x/0x%x\n", ahd_name(ahd), SCB_GET_TAG(scb), scb->hscb->scsiid, ahd_le32toh(scb->hscb->hscb_busaddr), (u_int)((host_dataptr >> 32) & 0xFFFFFFFF), (u_int)(host_dataptr & 0xFFFFFFFF), ahd_le32toh(scb->hscb->datacnt)); } #endif /* Tell the adapter about the newly queued SCB */ ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); } /************************** Interrupt Processing ******************************/ static void ahd_sync_qoutfifo(struct ahd_softc *ahd, int op) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/0, /*len*/AHD_SCB_MAX * sizeof(struct ahd_completion), op); } static void ahd_sync_tqinfifo(struct ahd_softc *ahd, int op) { #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, 0), sizeof(struct target_cmd) * AHD_TMODE_CMDS, op); } #endif } /* * See if the firmware has posted any completed commands * into our in-core command complete fifos. */ #define AHD_RUN_QOUTFIFO 0x1 #define AHD_RUN_TQINFIFO 0x2 static u_int ahd_check_cmdcmpltqueues(struct ahd_softc *ahd) { u_int retval; retval = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, /*offset*/ahd->qoutfifonext * sizeof(*ahd->qoutfifo), /*len*/sizeof(*ahd->qoutfifo), BUS_DMASYNC_POSTREAD); if (ahd->qoutfifo[ahd->qoutfifonext].valid_tag == ahd->qoutfifonext_valid_tag) retval |= AHD_RUN_QOUTFIFO; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0 && (ahd->flags & AHD_TQINFIFO_BLOCKED) == 0) { ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifofnext), /*len*/sizeof(struct target_cmd), BUS_DMASYNC_POSTREAD); if (ahd->targetcmds[ahd->tqinfifonext].cmd_valid != 0) retval |= AHD_RUN_TQINFIFO; } #endif return (retval); } /* * Catch an interrupt from the adapter */ int ahd_intr(struct ahd_softc *ahd) { u_int intstat; if ((ahd->pause & INTEN) == 0) { /* * Our interrupt is not enabled on the chip * and may be disabled for re-entrancy reasons, * so just return. This is likely just a shared * interrupt. */ return (0); } /* * Instead of directly reading the interrupt status register, * infer the cause of the interrupt by checking our in-core * completion queues. This avoids a costly PCI bus read in * most cases. */ if ((ahd->flags & AHD_ALL_INTERRUPTS) == 0 && (ahd_check_cmdcmpltqueues(ahd) != 0)) intstat = CMDCMPLT; else intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) return (0); if (intstat & CMDCMPLT) { ahd_outb(ahd, CLRINT, CLRCMDINT); /* * Ensure that the chip sees that we've cleared * this interrupt before we walk the output fifo. * Otherwise, we may, due to posted bus writes, * clear the interrupt after we finish the scan, * and after the sequencer has added new entries * and asserted the interrupt again. */ if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { if (ahd_is_paused(ahd)) { /* * Potentially lost SEQINT. * If SEQINTCODE is non-zero, * simulate the SEQINT. */ if (ahd_inb(ahd, SEQINTCODE) != NO_SEQINT) intstat |= SEQINT; } } else { ahd_flush_device_writes(ahd); } ahd_run_qoutfifo(ahd); ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]++; ahd->cmdcmplt_total++; #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) ahd_run_tqinfifo(ahd, /*paused*/FALSE); #endif } /* * Handle statuses that may invalidate our cached * copy of INTSTAT separately. */ if (intstat == 0xFF && (ahd->features & AHD_REMOVABLE) != 0) { /* Hot eject. Do nothing */ } else if (intstat & HWERRINT) { ahd_handle_hwerrint(ahd); } else if ((intstat & (PCIINT|SPLTINT)) != 0) { ahd->bus_intr(ahd); } else { if ((intstat & SEQINT) != 0) ahd_handle_seqint(ahd, intstat); if ((intstat & SCSIINT) != 0) ahd_handle_scsiint(ahd, intstat); } return (1); } /******************************** Private Inlines *****************************/ static inline void ahd_assert_atn(struct ahd_softc *ahd) { ahd_outb(ahd, SCSISIGO, ATNO); } /* * Determine if the current connection has a packetized * agreement. This does not necessarily mean that we * are currently in a packetized transfer. We could * just as easily be sending or receiving a message. */ static int ahd_currently_packetized(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int packetized; saved_modes = ahd_save_modes(ahd); if ((ahd->bugs & AHD_PKTIZED_STATUS_BUG) != 0) { /* * The packetized bit refers to the last * connection, not the current one. Check * for non-zero LQISTATE instead. */ ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); packetized = ahd_inb(ahd, LQISTATE) != 0; } else { ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); packetized = ahd_inb(ahd, LQISTAT2) & PACKETIZED; } ahd_restore_modes(ahd, saved_modes); return (packetized); } static inline int ahd_set_active_fifo(struct ahd_softc *ahd) { u_int active_fifo; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); active_fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; switch (active_fifo) { case 0: case 1: ahd_set_modes(ahd, active_fifo, active_fifo); return (1); default: return (0); } } static inline void ahd_unbusy_tcl(struct ahd_softc *ahd, u_int tcl) { ahd_busy_tcl(ahd, tcl, SCB_LIST_NULL); } /* * Determine whether the sequencer reported a residual * for this SCB/transaction. */ static inline void ahd_update_residual(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_calc_residual(ahd, scb); } static inline void ahd_complete_scb(struct ahd_softc *ahd, struct scb *scb) { uint32_t sgptr; sgptr = ahd_le32toh(scb->hscb->sgptr); if ((sgptr & SG_STATUS_VALID) != 0) ahd_handle_scb_status(ahd, scb); else ahd_done(ahd, scb); } /************************* Sequencer Execution Control ************************/ /* * Restart the sequencer program from address zero */ static void ahd_restart(struct ahd_softc *ahd) { ahd_pause(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* No more pending messages */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SCSISIGO, 0); /* De-assert BSY */ ahd_outb(ahd, MSG_OUT, MSG_NOOP); /* No message to send */ ahd_outb(ahd, SXFRCTL1, ahd_inb(ahd, SXFRCTL1) & ~BITBUCKET); ahd_outb(ahd, SEQINTCTL, 0); ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SAVED_SCSIID, 0xFF); ahd_outb(ahd, SAVED_LUN, 0xFF); /* * Ensure that the sequencer's idea of TQINPOS * matches our own. The sequencer increments TQINPOS * only after it sees a DMA complete and a reset could * occur before the increment leaving the kernel to believe * the command arrived but the sequencer to not. */ ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); /* Always allow reselection */ ahd_outb(ahd, SCSISEQ1, ahd_inb(ahd, SCSISEQ_TEMPLATE) & (ENSELI|ENRSELI|ENAUTOATNP)); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Clear any pending sequencer interrupt. It is no * longer relevant since we're resetting the Program * Counter. */ ahd_outb(ahd, CLRINT, CLRSEQINT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); ahd_unpause(ahd); } static void ahd_clear_fifo(struct ahd_softc *ahd, u_int fifo) { ahd_mode_state saved_modes; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_FIFOS) != 0) printk("%s: Clearing FIFO %d\n", ahd_name(ahd), fifo); #endif saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, fifo, fifo); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, CCSGRESET); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_restore_modes(ahd, saved_modes); } /************************* Input/Output Queues ********************************/ /* * Flush and completed commands that are sitting in the command * complete queues down on the chip but have yet to be dma'ed back up. */ static void ahd_flush_qoutfifo(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int saved_scbptr; u_int ccscbctl; u_int scbid; u_int next_scbid; saved_modes = ahd_save_modes(ahd); /* * Flush the good status FIFO for completed packetized commands. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scbptr = ahd_get_scbptr(ahd); while ((ahd_inb(ahd, LQISTAT2) & LQIGSAVAIL) != 0) { u_int fifo_mode; u_int i; scbid = ahd_inw(ahd, GSFIFO); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - GSFIFO SCB %d invalid\n", ahd_name(ahd), scbid); continue; } /* * Determine if this transaction is still active in * any FIFO. If it is, we must flush that FIFO to * the host before completing the command. */ fifo_mode = 0; rescan_fifos: for (i = 0; i < 2; i++) { /* Toggle to the other mode. */ fifo_mode ^= 1; ahd_set_modes(ahd, fifo_mode, fifo_mode); if (ahd_scb_active_in_fifo(ahd, scb) == 0) continue; ahd_run_data_fifo(ahd, scb); /* * Running this FIFO may cause a CFG4DATA for * this same transaction to assert in the other * FIFO or a new snapshot SAVEPTRS interrupt * in this FIFO. Even running a FIFO may not * clear the transaction if we are still waiting * for data to drain to the host. We must loop * until the transaction is not active in either * FIFO just to be sure. Reset our loop counter * so we will visit both FIFOs again before * declaring this transaction finished. We * also delay a bit so that status has a chance * to change before we look at this FIFO again. */ ahd_delay(200); goto rescan_fifos; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_set_scbptr(ahd, scbid); if ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_LIST_NULL) == 0 && ((ahd_inb_scbram(ahd, SCB_SGPTR) & SG_FULL_RESID) != 0 || (ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR) & SG_LIST_NULL) != 0)) { u_int comp_head; /* * The transfer completed with a residual. * Place this SCB on the complete DMA list * so that we update our in-core copy of the * SCB before completing the command. */ ahd_outb(ahd, SCB_SCSI_STATUS, 0); ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_STATUS_VALID); ahd_outw(ahd, SCB_TAG, scbid); ahd_outw(ahd, SCB_NEXT_COMPLETE, SCB_LIST_NULL); comp_head = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); if (SCBID_IS_NULL(comp_head)) { ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); } else { u_int tail; tail = ahd_inw(ahd, COMPLETE_DMA_SCB_TAIL); ahd_set_scbptr(ahd, tail); ahd_outw(ahd, SCB_NEXT_COMPLETE, scbid); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, scbid); ahd_set_scbptr(ahd, scbid); } } else ahd_complete_scb(ahd, scb); } ahd_set_scbptr(ahd, saved_scbptr); /* * Setup for command channel portion of flush. */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Wait for any inprogress DMA to complete and clear DMA state * if this if for an SCB in the qinfifo. */ while (((ccscbctl = ahd_inb(ahd, CCSCBCTL)) & (CCARREN|CCSCBEN)) != 0) { if ((ccscbctl & (CCSCBDIR|CCARREN)) == (CCSCBDIR|CCARREN)) { if ((ccscbctl & ARRDONE) != 0) break; } else if ((ccscbctl & CCSCBDONE) != 0) break; ahd_delay(200); } /* * We leave the sequencer to cleanup in the case of DMA's to * update the qoutfifo. In all other cases (DMA's to the * chip or a push of an SCB from the COMPLETE_DMA_SCB list), * we disable the DMA engine so that the sequencer will not * attempt to handle the DMA completion. */ if ((ccscbctl & CCSCBDIR) != 0 || (ccscbctl & ARRDONE) != 0) ahd_outb(ahd, CCSCBCTL, ccscbctl & ~(CCARREN|CCSCBEN)); /* * Complete any SCBs that just finished * being DMA'ed into the qoutfifo. */ ahd_run_qoutfifo(ahd); saved_scbptr = ahd_get_scbptr(ahd); /* * Manually update/complete any completed SCBs that are waiting to be * DMA'ed back up to the host. */ scbid = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { uint8_t *hscb_ptr; u_int i; ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - DMA-up and complete " "SCB %d invalid\n", ahd_name(ahd), scbid); continue; } hscb_ptr = (uint8_t *)scb->hscb; for (i = 0; i < sizeof(struct hardware_scb); i++) *hscb_ptr++ = ahd_inb_scbram(ahd, SCB_BASE + i); ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete Qfrz SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); scbid = ahd_inw(ahd, COMPLETE_SCB_HEAD); while (!SCBID_IS_NULL(scbid)) { ahd_set_scbptr(ahd, scbid); next_scbid = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Warning - Complete SCB %d invalid\n", ahd_name(ahd), scbid); continue; } ahd_complete_scb(ahd, scb); scbid = next_scbid; } ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); /* * Restore state. */ ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_UPDATE_PEND_CMDS; } /* * Determine if an SCB for a packetized transaction * is active in a FIFO. */ static int ahd_scb_active_in_fifo(struct ahd_softc *ahd, struct scb *scb) { /* * The FIFO is only active for our transaction if * the SCBPTR matches the SCB's ID and the firmware * has installed a handler for the FIFO or we have * a pending SAVEPTRS or CFG4DATA interrupt. */ if (ahd_get_scbptr(ahd) != SCB_GET_TAG(scb) || ((ahd_inb(ahd, LONGJMP_ADDR+1) & INVALID_ADDR) != 0 && (ahd_inb(ahd, SEQINTSRC) & (CFG4DATA|SAVEPTRS)) == 0)) return (0); return (1); } /* * Run a data fifo to completion for a transaction we know * has completed across the SCSI bus (good status has been * received). We are already set to the correct FIFO mode * on entry to this routine. * * This function attempts to operate exactly as the firmware * would when running this FIFO. Care must be taken to update * this routine any time the firmware's FIFO algorithm is * changed. */ static void ahd_run_data_fifo(struct ahd_softc *ahd, struct scb *scb) { u_int seqintsrc; seqintsrc = ahd_inb(ahd, SEQINTSRC); if ((seqintsrc & CFG4DATA) != 0) { uint32_t datacnt; uint32_t sgptr; /* * Clear full residual flag. */ sgptr = ahd_inl_scbram(ahd, SCB_SGPTR) & ~SG_FULL_RESID; ahd_outb(ahd, SCB_SGPTR, sgptr); /* * Load datacnt and address. */ datacnt = ahd_inl_scbram(ahd, SCB_DATACNT); if ((datacnt & AHD_DMA_LAST_SEG) != 0) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } else ahd_outb(ahd, SG_STATE, LOADING_NEEDED); ahd_outq(ahd, HADDR, ahd_inq_scbram(ahd, SCB_DATAPTR)); ahd_outl(ahd, HCNT, datacnt & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); /* * Initialize Residual Fields. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, datacnt >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr & SG_PTR_MASK); /* * Mark the SCB as having a FIFO in use. */ ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) + 1); /* * Install a "fake" handler for this FIFO. */ ahd_outw(ahd, LONGJMP_ADDR, 0); /* * Notify the hardware that we have satisfied * this sequencer interrupt. */ ahd_outb(ahd, CLRSEQINTSRC, CLRCFG4DATA); } else if ((seqintsrc & SAVEPTRS) != 0) { uint32_t sgptr; uint32_t resid; if ((ahd_inb(ahd, LONGJMP_ADDR+1)&INVALID_ADDR) != 0) { /* * Snapshot Save Pointers. All that * is necessary to clear the snapshot * is a CLRCHN. */ goto clrchn; } /* * Disable S/G fetch so the DMA engine * is available to future users. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, 0); /* * Flush the data FIFO. Strickly only * necessary for Rev A parts. */ ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) | FIFOFLUSH); /* * Calculate residual. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); resid = ahd_inl(ahd, SHCNT); resid |= ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT+3) << 24; ahd_outl(ahd, SCB_RESIDUAL_DATACNT, resid); if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG) == 0) { /* * Must back up to the correct S/G element. * Typically this just means resetting our * low byte to the offset in the SG_CACHE, * but if we wrapped, we have to correct * the other bytes of the sgptr too. */ if ((ahd_inb(ahd, SG_CACHE_SHADOW) & 0x80) != 0 && (sgptr & 0x80) == 0) sgptr -= 0x100; sgptr &= ~0xFF; sgptr |= ahd_inb(ahd, SG_CACHE_SHADOW) & SG_ADDR_MASK; ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outb(ahd, SCB_RESIDUAL_DATACNT + 3, 0); } else if ((resid & AHD_SG_LEN_MASK) == 0) { ahd_outb(ahd, SCB_RESIDUAL_SGPTR, sgptr | SG_LIST_NULL); } /* * Save Pointers. */ ahd_outq(ahd, SCB_DATAPTR, ahd_inq(ahd, SHADDR)); ahd_outl(ahd, SCB_DATACNT, resid); ahd_outl(ahd, SCB_SGPTR, sgptr); ahd_outb(ahd, CLRSEQINTSRC, CLRSAVEPTRS); ahd_outb(ahd, SEQIMODE, ahd_inb(ahd, SEQIMODE) | ENSAVEPTRS); /* * If the data is to the SCSI bus, we are * done, otherwise wait for FIFOEMP. */ if ((ahd_inb(ahd, DFCNTRL) & DIRECTION) != 0) goto clrchn; } else if ((ahd_inb(ahd, SG_STATE) & LOADING_NEEDED) != 0) { uint32_t sgptr; uint64_t data_addr; uint32_t data_len; u_int dfcntrl; /* * Disable S/G fetch so the DMA engine * is available to future users. We won't * be using the DMA engine to load segments. */ if ((ahd_inb(ahd, SG_STATE) & FETCH_INPROG) != 0) { ahd_outb(ahd, CCSGCTL, 0); ahd_outb(ahd, SG_STATE, LOADING_NEEDED); } /* * Wait for the DMA engine to notice that the * host transfer is enabled and that there is * space in the S/G FIFO for new segments before * loading more segments. */ if ((ahd_inb(ahd, DFSTATUS) & PRELOAD_AVAIL) != 0 && (ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) { /* * Determine the offset of the next S/G * element to load. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); data_addr = sg->len & AHD_SG_HIGH_ADDR_MASK; data_addr <<= 8; data_addr |= sg->addr; data_len = sg->len; sgptr += sizeof(*sg); } /* * Update residual information. */ ahd_outb(ahd, SCB_RESIDUAL_DATACNT+3, data_len >> 24); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); /* * Load the S/G. */ if (data_len & AHD_DMA_LAST_SEG) { sgptr |= LAST_SEG; ahd_outb(ahd, SG_STATE, 0); } ahd_outq(ahd, HADDR, data_addr); ahd_outl(ahd, HCNT, data_len & AHD_SG_LEN_MASK); ahd_outb(ahd, SG_CACHE_PRE, sgptr & 0xFF); /* * Advertise the segment to the hardware. */ dfcntrl = ahd_inb(ahd, DFCNTRL)|PRELOADEN|HDMAEN; if ((ahd->features & AHD_NEW_DFCNTRL_OPTS) != 0) { /* * Use SCSIENWRDIS so that SCSIEN * is never modified by this * operation. */ dfcntrl |= SCSIENWRDIS; } ahd_outb(ahd, DFCNTRL, dfcntrl); } } else if ((ahd_inb(ahd, SG_CACHE_SHADOW) & LAST_SEG_DONE) != 0) { /* * Transfer completed to the end of SG list * and has flushed to the host. */ ahd_outb(ahd, SCB_SGPTR, ahd_inb_scbram(ahd, SCB_SGPTR) | SG_LIST_NULL); goto clrchn; } else if ((ahd_inb(ahd, DFSTATUS) & FIFOEMP) != 0) { clrchn: /* * Clear any handler for this FIFO, decrement * the FIFO use count for the SCB, and release * the FIFO. */ ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SCB_FIFO_USE_COUNT, ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT) - 1); ahd_outb(ahd, DFFSXFRCTL, CLRCHN); } } /* * Look for entries in the QoutFIFO that have completed. * The valid_tag completion field indicates the validity * of the entry - the valid value toggles each time through * the queue. We use the sg_status field in the completion * entry to avoid referencing the hscb if the completion * occurred with no errors and no residual. sg_status is * a copy of the first byte (little endian) of the sgptr * hscb field. */ static void ahd_run_qoutfifo(struct ahd_softc *ahd) { struct ahd_completion *completion; struct scb *scb; u_int scb_index; if ((ahd->flags & AHD_RUNNING_QOUTFIFO) != 0) panic("ahd_run_qoutfifo recursion"); ahd->flags |= AHD_RUNNING_QOUTFIFO; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_POSTREAD); for (;;) { completion = &ahd->qoutfifo[ahd->qoutfifonext]; if (completion->valid_tag != ahd->qoutfifonext_valid_tag) break; scb_index = ahd_le16toh(completion->tag); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { printk("%s: WARNING no command for scb %d " "(cmdcmplt)\nQOUTPOS = %d\n", ahd_name(ahd), scb_index, ahd->qoutfifonext); ahd_dump_card_state(ahd); } else if ((completion->sg_status & SG_STATUS_VALID) != 0) { ahd_handle_scb_status(ahd, scb); } else { ahd_done(ahd, scb); } ahd->qoutfifonext = (ahd->qoutfifonext+1) & (AHD_QOUT_SIZE-1); if (ahd->qoutfifonext == 0) ahd->qoutfifonext_valid_tag ^= QOUTFIFO_ENTRY_VALID; } ahd->flags &= ~AHD_RUNNING_QOUTFIFO; } /************************* Interrupt Handling *********************************/ static void ahd_handle_hwerrint(struct ahd_softc *ahd) { /* * Some catastrophic hardware error has occurred. * Print it for the user and disable the controller. */ int i; int error; error = ahd_inb(ahd, ERROR); for (i = 0; i < num_errors; i++) { if ((error & ahd_hard_errors[i].errno) != 0) printk("%s: hwerrint, %s\n", ahd_name(ahd), ahd_hard_errors[i].errmesg); } ahd_dump_card_state(ahd); panic("BRKADRINT"); /* Tell everyone that this HBA is no longer available */ ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_NO_HBA); /* Tell the system that this controller has gone away. */ ahd_free(ahd); } #ifdef AHD_DEBUG static void ahd_dump_sglist(struct scb *scb) { int i; if (scb->sg_count > 0) { if ((scb->ahd_softc->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg_list; sg_list = (struct ahd_dma64_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint64_t addr; uint32_t len; addr = ahd_le64toh(sg_list[i].addr); len = ahd_le32toh(sg_list[i].len); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (uint32_t)((addr >> 32) & 0xFFFFFFFF), (uint32_t)(addr & 0xFFFFFFFF), sg_list[i].len & AHD_SG_LEN_MASK, (sg_list[i].len & AHD_DMA_LAST_SEG) ? " Last" : ""); } } else { struct ahd_dma_seg *sg_list; sg_list = (struct ahd_dma_seg*)scb->sg_list; for (i = 0; i < scb->sg_count; i++) { uint32_t len; len = ahd_le32toh(sg_list[i].len); printk("sg[%d] - Addr 0x%x%x : Length %d%s\n", i, (len & AHD_SG_HIGH_ADDR_MASK) >> 24, ahd_le32toh(sg_list[i].addr), len & AHD_SG_LEN_MASK, len & AHD_DMA_LAST_SEG ? " Last" : ""); } } } } #endif /* AHD_DEBUG */ static void ahd_handle_seqint(struct ahd_softc *ahd, u_int intstat) { u_int seqintcode; /* * Save the sequencer interrupt code and clear the SEQINT * bit. We will unpause the sequencer, if appropriate, * after servicing the request. */ seqintcode = ahd_inb(ahd, SEQINTCODE); ahd_outb(ahd, CLRINT, CLRSEQINT); if ((ahd->bugs & AHD_INTCOLLISION_BUG) != 0) { /* * Unpause the sequencer and let it clear * SEQINT by writing NO_SEQINT to it. This * will cause the sequencer to be paused again, * which is the expected state of this routine. */ ahd_unpause(ahd); while (!ahd_is_paused(ahd)) ; ahd_outb(ahd, CLRINT, CLRSEQINT); } ahd_update_modes(ahd); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Handle Seqint Called for code %d\n", ahd_name(ahd), seqintcode); #endif switch (seqintcode) { case ENTERING_NONPACK: { struct scb *scb; u_int scbid; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { /* * Somehow need to know if this * is from a selection or reselection. * From that, we can determine target * ID so we at least have an I_T nexus. */ } else { ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); ahd_outb(ahd, SAVED_LUN, scb->hscb->lun); ahd_outb(ahd, SEQ_FLAGS, 0x0); } if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0 && (ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * Phase change after read stream with * CRC error with P0 asserted on last * packet. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Assuming LQIPHASE_NLQ with " "P0 assertion\n", ahd_name(ahd)); #endif } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) printk("%s: Entering NONPACK\n", ahd_name(ahd)); #endif break; } case INVALID_SEQINT: printk("%s: Invalid Sequencer interrupt occurred, " "resetting channel.\n", ahd_name(ahd)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) ahd_dump_card_state(ahd); #endif ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; case STATUS_OVERRUN: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) ahd_print_path(ahd, scb); else printk("%s: ", ahd_name(ahd)); printk("SCB %d Packetized Status Overrun", scbid); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } case CFG4ISTAT_INTR: { struct scb *scb; u_int scbid; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { ahd_dump_card_state(ahd); printk("CFG4ISTAT: Free SCB %d referenced", scbid); panic("For safety"); } ahd_outq(ahd, HADDR, scb->sense_busaddr); ahd_outw(ahd, HCNT, AHD_SENSE_BUFSIZE); ahd_outb(ahd, HCNT + 2, 0); ahd_outb(ahd, SG_CACHE_PRE, SG_LAST_SEG); ahd_outb(ahd, DFCNTRL, PRELOADEN|SCSIEN|HDMAEN); break; } case ILLEGAL_PHASE: { u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; printk("%s: ILLEGAL_PHASE 0x%x\n", ahd_name(ahd), bus_phase); switch (bus_phase) { case P_DATAOUT: case P_DATAIN: case P_DATAOUT_DT: case P_DATAIN_DT: case P_MESGOUT: case P_STATUS: case P_MESGIN: ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); printk("%s: Issued Bus Reset.\n", ahd_name(ahd)); break; case P_COMMAND: { struct ahd_devinfo devinfo; struct scb *scb; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; u_int scbid; /* * If a target takes us into the command phase * assume that it has been externally reset and * has thus lost our previous packetized negotiation * agreement. Since we have not sent an identify * message and may not have fully qualified the * connection, we change our command to TUR, assert * ATN and ABORT the task when we go to message in * phase. The OSM will see the REQUEUE_REQUEST * status and retry the command. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("Invalid phase with no valid SCB. " "Resetting bus.\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); break; } ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE, /*paused*/TRUE); /* Hand-craft TUR command */ ahd_outb(ahd, SCB_CDB_STORE, 0); ahd_outb(ahd, SCB_CDB_STORE+1, 0); ahd_outb(ahd, SCB_CDB_STORE+2, 0); ahd_outb(ahd, SCB_CDB_STORE+3, 0); ahd_outb(ahd, SCB_CDB_STORE+4, 0); ahd_outb(ahd, SCB_CDB_STORE+5, 0); ahd_outb(ahd, SCB_CDB_LEN, 6); scb->hscb->control &= ~(TAG_ENB|SCB_TAG_TYPE); scb->hscb->control |= MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, scb->hscb->control); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, SAVED_SCSIID, scb->hscb->scsiid); /* * The lun is 0, regardless of the SCB's lun * as we have not sent an identify message. */ ahd_outb(ahd, SAVED_LUN, 0); ahd_outb(ahd, SEQ_FLAGS, 0); ahd_assert_atn(ahd); scb->flags &= ~SCB_PACKETIZED; scb->flags |= SCB_ABORT|SCB_EXTERNAL_RESET; ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); /* Notify XPT */ ahd_send_async(ahd, devinfo.channel, devinfo.target, CAM_LUN_WILDCARD, AC_SENT_BDR); /* * Allow the sequencer to continue with * non-pack processing. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQOINT1, CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT1, 0); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("Unexpected command phase from " "packetized target\n"); } #endif break; } } break; } case CFG4OVERRUN: { struct scb *scb; u_int scb_index; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: CFG4OVERRUN mode = %x\n", ahd_name(ahd), ahd_inb(ahd, MODE_PTR)); } #endif scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (scb == NULL) { /* * Attempt to transfer to an SCB that is * not outstanding. */ ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; /* * Clear status received flag to prevent any * attempt to complete this bogus SCB. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~STATUS_RCVD); } break; } case DUMP_CARD_STATE: { ahd_dump_card_state(ahd); break; } case PDATA_REINIT: { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: PDATA_REINIT - DFCNTRL = 0x%x " "SG_CACHE_SHADOW = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, DFCNTRL), ahd_inb(ahd, SG_CACHE_SHADOW)); } #endif ahd_reinitialize_dataptrs(ahd); break; } case HOST_MSG_LOOP: { struct ahd_devinfo devinfo; /* * The sequencer has encountered a message phase * that requires host assistance for completion. * While handling the message phase(s), we will be * notified by the sequencer after each byte is * transferred so we can track bus phase changes. * * If this is the first time we've seen a HOST_MSG_LOOP * interrupt, initialize the state of the host message * loop. */ ahd_fetch_devinfo(ahd, &devinfo); if (ahd->msg_type == MSG_TYPE_NONE) { struct scb *scb; u_int scb_index; u_int bus_phase; bus_phase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; if (bus_phase != P_MESGIN && bus_phase != P_MESGOUT) { printk("ahd_intr: HOST_MSG_LOOP bad " "phase 0x%x\n", bus_phase); /* * Probably transitioned to bus free before * we got here. Just punt the message. */ ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_restart(ahd); return; } scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); if (devinfo.role == ROLE_INITIATOR) { if (bus_phase == P_MESGOUT) ahd_setup_initiator_msgout(ahd, &devinfo, scb); else { ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; } } #ifdef AHD_TARGET_MODE else { if (bus_phase == P_MESGOUT) { ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd->msgin_index = 0; } else ahd_setup_target_msgin(ahd, &devinfo, scb); } #endif } ahd_handle_message_phase(ahd); break; } case NO_MATCH: { /* Ensure we don't leave the selection hardware on */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); printk("%s:%c:%d: no active SCB for reconnecting " "target - issuing BUS DEVICE RESET\n", ahd_name(ahd), 'A', ahd_inb(ahd, SELID) >> 4); printk("SAVED_SCSIID == 0x%x, SAVED_LUN == 0x%x, " "REG0 == 0x%x ACCUM = 0x%x\n", ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN), ahd_inw(ahd, REG0), ahd_inb(ahd, ACCUM)); printk("SEQ_FLAGS == 0x%x, SCBPTR == 0x%x, BTT == 0x%x, " "SINDEX == 0x%x\n", ahd_inb(ahd, SEQ_FLAGS), ahd_get_scbptr(ahd), ahd_find_busy_tcl(ahd, BUILD_TCL(ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN))), ahd_inw(ahd, SINDEX)); printk("SELID == 0x%x, SCB_SCSIID == 0x%x, SCB_LUN == 0x%x, " "SCB_CONTROL == 0x%x\n", ahd_inb(ahd, SELID), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inb_scbram(ahd, SCB_LUN), ahd_inb_scbram(ahd, SCB_CONTROL)); printk("SCSIBUS[0] == 0x%x, SCSISIGI == 0x%x\n", ahd_inb(ahd, SCSIBUS), ahd_inb(ahd, SCSISIGI)); printk("SXFRCTL0 == 0x%x\n", ahd_inb(ahd, SXFRCTL0)); printk("SEQCTL0 == 0x%x\n", ahd_inb(ahd, SEQCTL0)); ahd_dump_card_state(ahd); ahd->msgout_buf[0] = MSG_BUS_DEV_RESET; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_assert_atn(ahd); break; } case PROTO_VIOLATION: { ahd_handle_proto_violation(ahd); break; } case IGN_WIDE_RES: { struct ahd_devinfo devinfo; ahd_fetch_devinfo(ahd, &devinfo); ahd_handle_ign_wide_residue(ahd, &devinfo); break; } case BAD_PHASE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: unknown scsi bus phase %x, " "lastphase = 0x%x. Attempting to continue\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); break; } case MISSED_BUSFREE: { u_int lastphase; lastphase = ahd_inb(ahd, LASTPHASE); printk("%s:%c:%d: Missed busfree. " "Lastphase = 0x%x, Curphase = 0x%x\n", ahd_name(ahd), 'A', SCSIID_TARGET(ahd, ahd_inb(ahd, SAVED_SCSIID)), lastphase, ahd_inb(ahd, SCSISIGI)); ahd_restart(ahd); return; } case DATA_OVERRUN: { /* * When the sequencer detects an overrun, it * places the controller in "BITBUCKET" mode * and allows the target to complete its transfer. * Unfortunately, none of the counters get updated * when the controller is in this mode, so we have * no way of knowing how large the overrun was. */ struct scb *scb; u_int scbindex; #ifdef AHD_DEBUG u_int lastphase; #endif scbindex = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbindex); #ifdef AHD_DEBUG lastphase = ahd_inb(ahd, LASTPHASE); if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected %s. Tag == 0x%x.\n", ahd_lookup_phase_entry(lastphase)->phasemsg, SCB_GET_TAG(scb)); ahd_print_path(ahd, scb); printk("%s seen Data Phase. Length = %ld. " "NumSGs = %d.\n", ahd_inb(ahd, SEQ_FLAGS) & DPHASE ? "Have" : "Haven't", ahd_get_transfer_length(scb), scb->sg_count); ahd_dump_sglist(scb); } #endif /* * Set this and it will take effect when the * target does a command complete. */ ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); break; } case MKMSG_FAILED: { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; ahd_fetch_devinfo(ahd, &devinfo); printk("%s:%c:%d:%d: Attempt to issue message failed\n", ahd_name(ahd), devinfo.channel, devinfo.target, devinfo.lun); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (scb->flags & SCB_RECOVERY_SCB) != 0) /* * Ensure that we didn't put a second instance of this * SCB into the QINFIFO. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); break; } case TASKMGMT_FUNC_COMPLETE: { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { u_int lun; u_int tag; cam_status error; ahd_print_path(ahd, scb); printk("Task Management Func 0x%x Complete\n", scb->hscb->task_management); lun = CAM_LUN_WILDCARD; tag = SCB_LIST_NULL; switch (scb->hscb->task_management) { case SIU_TASKMGMT_ABORT_TASK: tag = SCB_GET_TAG(scb); case SIU_TASKMGMT_ABORT_TASK_SET: case SIU_TASKMGMT_CLEAR_TASK_SET: lun = scb->hscb->lun; error = CAM_REQ_ABORTED; ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', lun, tag, ROLE_INITIATOR, error); break; case SIU_TASKMGMT_LUN_RESET: lun = scb->hscb->lun; case SIU_TASKMGMT_TARGET_RESET: { struct ahd_devinfo devinfo; ahd_scb_devinfo(ahd, &devinfo, scb); error = CAM_BDR_SENT; ahd_handle_devreset(ahd, &devinfo, lun, CAM_BDR_SENT, lun != CAM_LUN_WILDCARD ? "Lun Reset" : "Target Reset", /*verbose_level*/0); break; } default: panic("Unexpected TaskMgmt Func\n"); break; } } break; } case TASKMGMT_CMD_CMPLT_OKAY: { u_int scbid; struct scb *scb; /* * An ABORT TASK TMF failed to be delivered before * the targeted command completed normally. */ scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL) { /* * Remove the second instance of this SCB from * the QINFIFO if it is still there. */ ahd_print_path(ahd, scb); printk("SCB completes before TMF\n"); /* * Handle losing the race. Wait until any * current selection completes. We will then * set the TMF back to zero in this SCB so that * the sequencer doesn't bother to issue another * sequencer interrupt for its completion. */ while ((ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 && (ahd_inb(ahd, SSTAT0) & SELDO) == 0 && (ahd_inb(ahd, SSTAT1) & SELTO) == 0) ; ahd_outb(ahd, SCB_TASK_MANAGEMENT, 0); ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, /*status*/0, SEARCH_REMOVE); } break; } case TRACEPOINT0: case TRACEPOINT1: case TRACEPOINT2: case TRACEPOINT3: printk("%s: Tracepoint %d\n", ahd_name(ahd), seqintcode - TRACEPOINT0); break; case NO_SEQINT: break; case SAW_HWERR: ahd_handle_hwerrint(ahd); break; default: printk("%s: Unexpected SEQINTCODE %d\n", ahd_name(ahd), seqintcode); break; } /* * The sequencer is paused immediately on * a SEQINT, so we should restart it when * we're done. */ ahd_unpause(ahd); } static void ahd_handle_scsiint(struct ahd_softc *ahd, u_int intstat) { struct scb *scb; u_int status0; u_int status3; u_int status; u_int lqistat1; u_int lqostat0; u_int scbid; u_int busfreetime; ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); status3 = ahd_inb(ahd, SSTAT3) & (NTRAMPERR|OSRAMPERR); status0 = ahd_inb(ahd, SSTAT0) & (IOERR|OVERRUN|SELDI|SELDO); status = ahd_inb(ahd, SSTAT1) & (SELTO|SCSIRSTI|BUSFREE|SCSIPERR); lqistat1 = ahd_inb(ahd, LQISTAT1); lqostat0 = ahd_inb(ahd, LQOSTAT0); busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; /* * Ignore external resets after a bus reset. */ if (((status & SCSIRSTI) != 0) && (ahd->flags & AHD_BUS_RESET_ACTIVE)) { ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); return; } /* * Clear bus reset flag */ ahd->flags &= ~AHD_BUS_RESET_ACTIVE; if ((status0 & (SELDI|SELDO)) != 0) { u_int simode0; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); status0 &= simode0 & (IOERR|OVERRUN|SELDI|SELDO); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; if ((status0 & IOERR) != 0) { u_int now_lvd; now_lvd = ahd_inb(ahd, SBLKCTL) & ENAB40; printk("%s: Transceiver State Has Changed to %s mode\n", ahd_name(ahd), now_lvd ? "LVD" : "SE"); ahd_outb(ahd, CLRSINT0, CLRIOERR); /* * A change in I/O mode is equivalent to a bus reset. */ ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); ahd_pause(ahd); ahd_setup_iocell_workaround(ahd); ahd_unpause(ahd); } else if ((status0 & OVERRUN) != 0) { printk("%s: SCSI offset overrun detected. Resetting bus.\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } else if ((status & SCSIRSTI) != 0) { printk("%s: Someone reset channel A\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/FALSE); } else if ((status & SCSIPERR) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_transmission_error(ahd); } else if (lqostat0 != 0) { printk("%s: lqostat0 == 0x%x!\n", ahd_name(ahd), lqostat0); ahd_outb(ahd, CLRLQOINT0, lqostat0); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); } else if ((status & SELTO) != 0) { /* Stop the selection */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* No more pending messages */ ahd_clear_msg_state(ahd); /* Clear interrupt state */ ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRBUSFREE|CLRSCSIPERR); /* * Although the driver does not care about the * 'Selection in Progress' status bit, the busy * LED does. SELINGO is only cleared by a successful * selection, so we must manually clear it to insure * the LED turns off just incase no future successful * selections occur (e.g. no devices on the bus). */ ahd_outb(ahd, CLRSINT0, CLRSELINGO); scbid = ahd_inw(ahd, WAITING_TID_HEAD); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: ahd_intr - referenced scb not " "valid during SELTO scb(0x%x)\n", ahd_name(ahd), scbid); ahd_dump_card_state(ahd); } else { struct ahd_devinfo devinfo; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SELTO) != 0) { ahd_print_path(ahd, scb); printk("Saw Selection Timeout for SCB 0x%x\n", scbid); } #endif ahd_scb_devinfo(ahd, &devinfo, scb); ahd_set_transaction_status(scb, CAM_SEL_TIMEOUT); ahd_freeze_devq(ahd, scb); /* * Cancel any pending transactions on the device * now that it seems to be missing. This will * also revert us to async/narrow transfers until * we can renegotiate with the device. */ ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_SEL_TIMEOUT, "Selection Timeout", /*verbose_level*/1); } ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if ((status0 & (SELDI|SELDO)) != 0) { ahd_iocell_first_selection(ahd); ahd_unpause(ahd); } else if (status3 != 0) { printk("%s: SCSI Cell parity error SSTAT3 == 0x%x\n", ahd_name(ahd), status3); ahd_outb(ahd, CLRSINT3, status3); } else if ((lqistat1 & (LQIPHASE_LQ|LQIPHASE_NLQ)) != 0) { /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); ahd_handle_lqiphase_error(ahd, lqistat1); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * This status can be delayed during some * streaming operations. The SCSIPHASE * handler has already dealt with this case * so just clear the error. */ ahd_outb(ahd, CLRLQIINT1, CLRLQICRCI_NLQ); } else if ((status & BUSFREE) != 0 || (lqistat1 & LQOBUSFREE) != 0) { u_int lqostat1; int restart; int clear_fifo; int packetized; u_int mode; /* * Clear our selection hardware as soon as possible. * We may have an entry in the waiting Q for this target, * that is affected by this busfree and we don't want to * go about selecting the target while we handle the event. */ ahd_outb(ahd, SCSISEQ0, 0); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Determine what we were up to at the time of * the busfree. */ mode = AHD_MODE_SCSI; busfreetime = ahd_inb(ahd, SSTAT2) & BUSFREETIME; lqostat1 = ahd_inb(ahd, LQOSTAT1); switch (busfreetime) { case BUSFREE_DFF0: case BUSFREE_DFF1: { mode = busfreetime == BUSFREE_DFF0 ? AHD_MODE_DFF0 : AHD_MODE_DFF1; ahd_set_modes(ahd, mode, mode); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: Invalid SCB %d in DFF%d " "during unexpected busfree\n", ahd_name(ahd), scbid, mode); packetized = 0; } else packetized = (scb->flags & SCB_PACKETIZED) != 0; clear_fifo = 1; break; } case BUSFREE_LQO: clear_fifo = 0; packetized = 1; break; default: clear_fifo = 0; packetized = (lqostat1 & LQOBUSFREE) != 0; if (!packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE && (ahd_inb(ahd, SSTAT0) & SELDI) == 0 && ((ahd_inb(ahd, SSTAT0) & SELDO) == 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) == 0)) /* * Assume packetized if we are not * on the bus in a non-packetized * capacity and any pending selection * was a packetized selection. */ packetized = 1; break; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("Saw Busfree. Busfreetime = 0x%x.\n", busfreetime); #endif /* * Busfrees that occur in non-packetized phases are * handled by the nonpkt_busfree handler. */ if (packetized && ahd_inb(ahd, LASTPHASE) == P_BUSFREE) { restart = ahd_handle_pkt_busfree(ahd, busfreetime); } else { packetized = 0; restart = ahd_handle_nonpkt_busfree(ahd); } /* * Clear the busfree interrupt status. The setting of * the interrupt is a pulse, so in a perfect world, we * would not need to muck with the ENBUSFREE logic. This * would ensure that if the bus moves on to another * connection, busfree protection is still in force. If * BUSFREEREV is broken, however, we must manually clear * the ENBUSFREE if the busfree occurred during a non-pack * connection so that we don't get false positives during * future, packetized, connections. */ ahd_outb(ahd, CLRSINT1, CLRBUSFREE); if (packetized == 0 && (ahd->bugs & AHD_BUSFREEREV_BUG) != 0) ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENBUSFREE); if (clear_fifo) ahd_clear_fifo(ahd, mode); ahd_clear_msg_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); if (restart) { ahd_restart(ahd); } else { ahd_unpause(ahd); } } else { printk("%s: Missing case in ahd_handle_scsiint. status = %x\n", ahd_name(ahd), status); ahd_dump_card_state(ahd); ahd_clear_intstat(ahd); ahd_unpause(ahd); } } static void ahd_handle_transmission_error(struct ahd_softc *ahd) { struct scb *scb; u_int scbid; u_int lqistat1; u_int lqistat2; u_int msg_out; u_int curphase; u_int lastphase; u_int perrdiag; u_int cur_col; int silent; scb = NULL; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); lqistat1 = ahd_inb(ahd, LQISTAT1) & ~(LQIPHASE_LQ|LQIPHASE_NLQ); lqistat2 = ahd_inb(ahd, LQISTAT2); if ((lqistat1 & (LQICRCI_NLQ|LQICRCI_LQ)) == 0 && (ahd->bugs & AHD_NLQICRC_DELAYED_BUG) != 0) { u_int lqistate; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); lqistate = ahd_inb(ahd, LQISTATE); if ((lqistate >= 0x1E && lqistate <= 0x24) || (lqistate == 0x29)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_RECOVERY) != 0) { printk("%s: NLQCRC found via LQISTATE\n", ahd_name(ahd)); } #endif lqistat1 |= LQICRCI_NLQ; } ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); } ahd_outb(ahd, CLRLQIINT1, lqistat1); lastphase = ahd_inb(ahd, LASTPHASE); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; perrdiag = ahd_inb(ahd, PERRDIAG); msg_out = MSG_INITIATOR_DET_ERR; ahd_outb(ahd, CLRSINT1, CLRSCSIPERR); /* * Try to find the SCB associated with this error. */ silent = FALSE; if (lqistat1 == 0 || (lqistat1 & LQICRCI_NLQ) != 0) { if ((lqistat1 & (LQICRCI_NLQ|LQIOVERI_NLQ)) != 0) ahd_set_active_fifo(ahd); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && SCB_IS_SILENT(scb)) silent = TRUE; } cur_col = 0; if (silent == FALSE) { printk("%s: Transmission error detected\n", ahd_name(ahd)); ahd_lqistat1_print(lqistat1, &cur_col, 50); ahd_lastphase_print(lastphase, &cur_col, 50); ahd_scsisigi_print(curphase, &cur_col, 50); ahd_perrdiag_print(perrdiag, &cur_col, 50); printk("\n"); ahd_dump_card_state(ahd); } if ((lqistat1 & (LQIOVERI_LQ|LQIOVERI_NLQ)) != 0) { if (silent == FALSE) { printk("%s: Gross protocol error during incoming " "packet. lqistat1 == 0x%x. Resetting bus.\n", ahd_name(ahd), lqistat1); } ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((lqistat1 & LQICRCI_LQ) != 0) { /* * A CRC error has been detected on an incoming LQ. * The bus is currently hung on the last ACK. * Hit LQIRETRY to release the last ack, and * wait for the sequencer to determine that ATNO * is asserted while in message out to take us * to our host message loop. No NONPACKREQ or * LQIPHASE type errors will occur in this * scenario. After this first LQIRETRY, the LQI * manager will be in ISELO where it will * happily sit until another packet phase begins. * Unexpected bus free detection is enabled * through any phases that occur after we release * this last ack until the LQI manager sees a * packet phase. This implies we may have to * ignore a perfectly valid "unexected busfree" * after our "initiator detected error" message is * sent. A busfree is the expected response after * we tell the target that it's L_Q was corrupted. * (SPI4R09 10.7.3.3.3) */ ahd_outb(ahd, LQCTL2, LQIRETRY); printk("LQIRetry for LQICRCI_LQ to release ACK\n"); } else if ((lqistat1 & LQICRCI_NLQ) != 0) { /* * We detected a CRC error in a NON-LQ packet. * The hardware has varying behavior in this situation * depending on whether this packet was part of a * stream or not. * * PKT by PKT mode: * The hardware has already acked the complete packet. * If the target honors our outstanding ATN condition, * we should be (or soon will be) in MSGOUT phase. * This will trigger the LQIPHASE_LQ status bit as the * hardware was expecting another LQ. Unexpected * busfree detection is enabled. Once LQIPHASE_LQ is * true (first entry into host message loop is much * the same), we must clear LQIPHASE_LQ and hit * LQIRETRY so the hardware is ready to handle * a future LQ. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree * or start another packet in response to our message. * * Read Streaming P0 asserted: * If we raise ATN and the target completes the entire * stream (P0 asserted during the last packet), the * hardware will ack all data and return to the ISTART * state. When the target reponds to our ATN condition, * LQIPHASE_LQ will be asserted. We should respond to * this with an LQIRETRY to prepare for any future * packets. NONPACKREQ will not be asserted again * once we hit LQIRETRY until another packet is * processed. The target may either go busfree or * start another packet in response to our message. * Busfree detection is enabled. * * Read Streaming P0 not asserted: * If we raise ATN and the target transitions to * MSGOUT in or after a packet where P0 is not * asserted, the hardware will assert LQIPHASE_NLQ. * We should respond to the LQIPHASE_NLQ with an * LQIRETRY. Should the target stay in a non-pkt * phase after we send our message, the hardware * will assert LQIPHASE_LQ. Recovery is then just as * listed above for the read streaming with P0 asserted. * Busfree detection is enabled. */ if (silent == FALSE) printk("LQICRC_NLQ\n"); if (scb == NULL) { printk("%s: No SCB valid for LQICRC_NLQ. " "Resetting bus\n", ahd_name(ahd)); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } } else if ((lqistat1 & LQIBADLQI) != 0) { printk("Need to handle BADLQI!\n"); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); return; } else if ((perrdiag & (PARITYERR|PREVPHASE)) == PARITYERR) { if ((curphase & ~P_DATAIN_DT) != 0) { /* Ack the byte. So we can continue. */ if (silent == FALSE) printk("Acking %s to clear perror\n", ahd_lookup_phase_entry(curphase)->phasemsg); ahd_inb(ahd, SCSIDAT); } if (curphase == P_MESGIN) msg_out = MSG_PARITY_ERROR; } /* * We've set the hardware to assert ATN if we * get a parity error on "in" phases, so all we * need to do is stuff the message buffer with * the appropriate message. "In" phases have set * mesg_out to something other than MSG_NOP. */ ahd->send_msg_perror = msg_out; if (scb != NULL && msg_out == MSG_INITIATOR_DET_ERR) scb->flags |= SCB_TRANSMISSION_ERROR; ahd_outb(ahd, MSG_OUT, HOST_MSG); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } static void ahd_handle_lqiphase_error(struct ahd_softc *ahd, u_int lqistat1) { /* * Clear the sources of the interrupts. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, CLRLQIINT1, lqistat1); /* * If the "illegal" phase changes were in response * to our ATN to flag a CRC error, AND we ended up * on packet boundaries, clear the error, restart the * LQI manager as appropriate, and go on our merry * way toward sending the message. Otherwise, reset * the bus to clear the error. */ ahd_set_active_fifo(ahd); if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0 && (ahd_inb(ahd, MDFFSTAT) & DLZERO) != 0) { if ((lqistat1 & LQIPHASE_LQ) != 0) { printk("LQIRETRY for LQIPHASE_LQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else if ((lqistat1 & LQIPHASE_NLQ) != 0) { printk("LQIRETRY for LQIPHASE_NLQ\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } else panic("ahd_handle_lqiphase_error: No phase errors\n"); ahd_dump_card_state(ahd); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_unpause(ahd); } else { printk("Resetting Channel for LQI Phase error\n"); ahd_dump_card_state(ahd); ahd_reset_channel(ahd, 'A', /*Initiate Reset*/TRUE); } } /* * Packetized unexpected or expected busfree. * Entered in mode based on busfreetime. */ static int ahd_handle_pkt_busfree(struct ahd_softc *ahd, u_int busfreetime) { u_int lqostat1; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); lqostat1 = ahd_inb(ahd, LQOSTAT1); if ((lqostat1 & LQOBUSFREE) != 0) { struct scb *scb; u_int scbid; u_int saved_scbptr; u_int waiting_h; u_int waiting_t; u_int next; /* * The LQO manager detected an unexpected busfree * either: * * 1) During an outgoing LQ. * 2) After an outgoing LQ but before the first * REQ of the command packet. * 3) During an outgoing command packet. * * In all cases, CURRSCB is pointing to the * SCB that encountered the failure. Clean * up the queue, clear SELDO and LQOBUSFREE, * and allow the sequencer to restart the select * out at its lesure. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); scbid = ahd_inw(ahd, CURRSCB); scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) panic("SCB not valid during LQOBUSFREE"); /* * Clear the status. */ ahd_outb(ahd, CLRLQOINT1, CLRLQOBUSFREE); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) ahd_outb(ahd, CLRLQOINT1, 0); ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_flush_device_writes(ahd); ahd_outb(ahd, CLRSINT0, CLRSELDO); /* * Return the LQO manager to its idle loop. It will * not do this automatically if the busfree occurs * after the first REQ of either the LQ or command * packet or between the LQ and command packet. */ ahd_outb(ahd, LQCTL2, ahd_inb(ahd, LQCTL2) | LQOTOIDLE); /* * Update the waiting for selection queue so * we restart on the correct SCB. */ waiting_h = ahd_inw(ahd, WAITING_TID_HEAD); saved_scbptr = ahd_get_scbptr(ahd); if (waiting_h != scbid) { ahd_outw(ahd, WAITING_TID_HEAD, scbid); waiting_t = ahd_inw(ahd, WAITING_TID_TAIL); if (waiting_t == waiting_h) { ahd_outw(ahd, WAITING_TID_TAIL, scbid); next = SCB_LIST_NULL; } else { ahd_set_scbptr(ahd, waiting_h); next = ahd_inw_scbram(ahd, SCB_NEXT2); } ahd_set_scbptr(ahd, scbid); ahd_outw(ahd, SCB_NEXT2, next); } ahd_set_scbptr(ahd, saved_scbptr); if (scb->crc_retry_count < AHD_MAX_LQ_CRC_ERRORS) { if (SCB_IS_SILENT(scb) == FALSE) { ahd_print_path(ahd, scb); printk("Probable outgoing LQ CRC error. " "Retrying command\n"); } scb->crc_retry_count++; } else { ahd_set_transaction_status(scb, CAM_UNCOR_PARITY); ahd_freeze_scb(scb); ahd_freeze_devq(ahd, scb); } /* Return unpausing the sequencer. */ return (0); } else if ((ahd_inb(ahd, PERRDIAG) & PARITYERR) != 0) { /* * Ignore what are really parity errors that * occur on the last REQ of a free running * clock prior to going busfree. Some drives * do not properly active negate just before * going busfree resulting in a parity glitch. */ ahd_outb(ahd, CLRSINT1, CLRSCSIPERR|CLRBUSFREE); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MASKED_ERRORS) != 0) printk("%s: Parity on last REQ detected " "during busfree phase.\n", ahd_name(ahd)); #endif /* Return unpausing the sequencer. */ return (0); } if (ahd->src_mode != AHD_MODE_SCSI) { u_int scbid; struct scb *scb; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); ahd_print_path(ahd, scb); printk("Unexpected PKT busfree condition\n"); ahd_dump_card_state(ahd); ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), 'A', SCB_GET_LUN(scb), SCB_GET_TAG(scb), ROLE_INITIATOR, CAM_UNEXP_BUSFREE); /* Return restarting the sequencer. */ return (1); } printk("%s: Unexpected PKT busfree condition\n", ahd_name(ahd)); ahd_dump_card_state(ahd); /* Restart the sequencer. */ return (1); } /* * Non-packetized unexpected or expected busfree. */ static int ahd_handle_nonpkt_busfree(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int lastphase; u_int saved_scsiid; u_int saved_lun; u_int target; u_int initiator_role_id; u_int scbid; u_int ppr_busfree; int printerror; /* * Look at what phase we were last in. If its message out, * chances are pretty good that the busfree was in response * to one of our abort requests. */ lastphase = ahd_inb(ahd, LASTPHASE); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); saved_lun = ahd_inb(ahd, SAVED_LUN); target = SCSIID_TARGET(ahd, saved_scsiid); initiator_role_id = SCSIID_OUR_ID(saved_scsiid); ahd_compile_devinfo(&devinfo, initiator_role_id, target, saved_lun, 'A', ROLE_INITIATOR); printerror = 1; scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); if (scb != NULL && (ahd_inb(ahd, SEQ_FLAGS) & NOT_IDENTIFIED) != 0) scb = NULL; ppr_busfree = (ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0; if (lastphase == P_MESGOUT) { u_int tag; tag = SCB_LIST_NULL; if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT_TAG, TRUE) || ahd_sent_msg(ahd, AHDMSG_1B, MSG_ABORT, TRUE)) { int found; int sent_msg; if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); printk("Abort for unidentified " "connection completed.\n"); /* restart the sequencer. */ return (1); } sent_msg = ahd->msgout_buf[ahd->msgout_index - 1]; ahd_print_path(ahd, scb); printk("SCB %d - Abort%s Completed.\n", SCB_GET_TAG(scb), sent_msg == MSG_ABORT_TAG ? "" : " Tag"); if (sent_msg == MSG_ABORT_TAG) tag = SCB_GET_TAG(scb); if ((scb->flags & SCB_EXTERNAL_RESET) != 0) { /* * This abort is in response to an * unexpected switch to command phase * for a packetized connection. Since * the identify message was never sent, * "saved lun" is 0. We really want to * abort only the SCB that encountered * this error, which could have a different * lun. The SCB will be retried so the OS * will see the UA after renegotiating to * packetized. */ tag = SCB_GET_TAG(scb); saved_lun = scb->hscb->lun; } found = ahd_abort_scbs(ahd, target, 'A', saved_lun, tag, ROLE_INITIATOR, CAM_REQ_ABORTED); printk("found == 0x%x\n", found); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_BUS_DEV_RESET, TRUE)) { #ifdef __FreeBSD__ /* * Don't mark the user's request for this BDR * as completing with CAM_BDR_SENT. CAM3 * specifies CAM_REQ_CMP. */ if (scb != NULL && scb->io_ctx->ccb_h.func_code== XPT_RESET_DEV && ahd_match_scb(ahd, scb, target, 'A', CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_INITIATOR)) ahd_set_transaction_status(scb, CAM_REQ_CMP); #endif ahd_handle_devreset(ahd, &devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset", /*verbose_level*/0); printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, FALSE) && ppr_busfree == 0) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; /* * PPR Rejected. * * If the previous negotiation was packetized, * this could be because the device has been * reset without our knowledge. Force our * current negotiation to async and retry the * negotiation. Otherwise retry the command * with non-ppr negotiation. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR negotiation rejected busfree.\n"); #endif tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tinfo->curr.ppr_options & MSG_EXT_PPR_IU_REQ)!=0) { ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); /* * The expect PPR busfree handler below * will effect the retry and necessary * abort. */ } else { tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; tinfo->goal.ppr_options = 0; if (scb != NULL) { /* * Remove any SCBs in the waiting * for selection queue that may * also be for this target so that * command ordering is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-narrow and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("WDTR negotiation rejected busfree.\n"); #endif ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, FALSE) && ppr_busfree == 0) { /* * Negotiation Rejected. Go-async and * retry command. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("SDTR negotiation rejected busfree.\n"); #endif ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); if (scb != NULL) { /* * Remove any SCBs in the waiting for * selection queue that may also be for * this target so that command ordering * is preserved. */ ahd_freeze_devq(ahd, scb); ahd_qinfifo_requeue_tail(ahd, scb); } printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_IDE_BUSFREE) != 0 && ahd_sent_msg(ahd, AHDMSG_1B, MSG_INITIATOR_DET_ERR, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected IDE Busfree\n"); #endif printerror = 0; } else if ((ahd->msg_flags & MSG_FLAG_EXPECT_QASREJ_BUSFREE) && ahd_sent_msg(ahd, AHDMSG_1B, MSG_MESSAGE_REJECT, TRUE)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Expected QAS Reject Busfree\n"); #endif printerror = 0; } } /* * The busfree required flag is honored at the end of * the message phases. We check it last in case we * had to send some other message that caused a busfree. */ if (scb != NULL && printerror != 0 && (lastphase == P_MESGIN || lastphase == P_MESGOUT) && ((ahd->msg_flags & MSG_FLAG_EXPECT_PPR_BUSFREE) != 0)) { ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_REQUEUE_REQ); ahd_freeze_scb(scb); if ((ahd->msg_flags & MSG_FLAG_IU_REQ_CHANGED) != 0) { ahd_abort_scbs(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQ_ABORTED); } else { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR Negotiation Busfree.\n"); #endif ahd_done(ahd, scb); } printerror = 0; } if (printerror != 0) { int aborted; aborted = 0; if (scb != NULL) { u_int tag; if ((scb->hscb->control & TAG_ENB) != 0) tag = SCB_GET_TAG(scb); else tag = SCB_LIST_NULL; ahd_print_path(ahd, scb); aborted = ahd_abort_scbs(ahd, target, 'A', SCB_GET_LUN(scb), tag, ROLE_INITIATOR, CAM_UNEXP_BUSFREE); } else { /* * We had not fully identified this connection, * so we cannot abort anything. */ printk("%s: ", ahd_name(ahd)); } printk("Unexpected busfree %s, %d SCBs aborted, " "PRGMCNT == 0x%x\n", ahd_lookup_phase_entry(lastphase)->phasemsg, aborted, ahd_inw(ahd, PRGMCNT)); ahd_dump_card_state(ahd); if (lastphase != P_BUSFREE) ahd_force_renegotiation(ahd, &devinfo); } /* Always restart the sequencer. */ return (1); } static void ahd_handle_proto_violation(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; struct scb *scb; u_int scbid; u_int seq_flags; u_int curphase; u_int lastphase; int found; ahd_fetch_devinfo(ahd, &devinfo); scbid = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scbid); seq_flags = ahd_inb(ahd, SEQ_FLAGS); curphase = ahd_inb(ahd, SCSISIGI) & PHASE_MASK; lastphase = ahd_inb(ahd, LASTPHASE); if ((seq_flags & NOT_IDENTIFIED) != 0) { /* * The reconnecting target either did not send an * identify message, or did, but we didn't find an SCB * to match. */ ahd_print_devinfo(ahd, &devinfo); printk("Target did not send an IDENTIFY message. " "LASTPHASE = 0x%x.\n", lastphase); scb = NULL; } else if (scb == NULL) { /* * We don't seem to have an SCB active for this * transaction. Print an error and reset the bus. */ ahd_print_devinfo(ahd, &devinfo); printk("No SCB found during protocol violation\n"); goto proto_violation_reset; } else { ahd_set_transaction_status(scb, CAM_SEQUENCE_FAIL); if ((seq_flags & NO_CDB_SENT) != 0) { ahd_print_path(ahd, scb); printk("No or incomplete CDB sent to device.\n"); } else if ((ahd_inb_scbram(ahd, SCB_CONTROL) & STATUS_RCVD) == 0) { /* * The target never bothered to provide status to * us prior to completing the command. Since we don't * know the disposition of this command, we must attempt * to abort it. Assert ATN and prepare to send an abort * message. */ ahd_print_path(ahd, scb); printk("Completed command without status.\n"); } else { ahd_print_path(ahd, scb); printk("Unknown protocol violation.\n"); ahd_dump_card_state(ahd); } } if ((lastphase & ~P_DATAIN_DT) == 0 || lastphase == P_COMMAND) { proto_violation_reset: /* * Target either went directly to data * phase or didn't respond to our ATN. * The only safe thing to do is to blow * it away with a bus reset. */ found = ahd_reset_channel(ahd, 'A', TRUE); printk("%s: Issued Channel %c Bus Reset. " "%d SCBs aborted\n", ahd_name(ahd), 'A', found); } else { /* * Leave the selection hardware off in case * this abort attempt will affect yet to * be sent commands. */ ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); ahd_assert_atn(ahd); ahd_outb(ahd, MSG_OUT, HOST_MSG); if (scb == NULL) { ahd_print_devinfo(ahd, &devinfo); ahd->msgout_buf[0] = MSG_ABORT_TASK; ahd->msgout_len = 1; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } else { ahd_print_path(ahd, scb); scb->flags |= SCB_ABORT; } printk("Protocol violation %s. Attempting to abort.\n", ahd_lookup_phase_entry(curphase)->phasemsg); } } /* * Force renegotiation to occur the next time we initiate * a command to the current device. */ static void ahd_force_renegotiation(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Forcing renegotiation\n"); } #endif targ_info = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); ahd_update_neg_request(ahd, devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } #define AHD_MAX_STEPS 2000 static void ahd_clear_critical_section(struct ahd_softc *ahd) { ahd_mode_state saved_modes; int stepping; int steps; int first_instr; u_int simode0; u_int simode1; u_int simode3; u_int lqimode0; u_int lqimode1; u_int lqomode0; u_int lqomode1; if (ahd->num_critical_sections == 0) return; stepping = FALSE; steps = 0; first_instr = 0; simode0 = 0; simode1 = 0; simode3 = 0; lqimode0 = 0; lqimode1 = 0; lqomode0 = 0; lqomode1 = 0; saved_modes = ahd_save_modes(ahd); for (;;) { struct cs *cs; u_int seqaddr; u_int i; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seqaddr = ahd_inw(ahd, CURADDR); cs = ahd->critical_sections; for (i = 0; i < ahd->num_critical_sections; i++, cs++) { if (cs->begin < seqaddr && cs->end >= seqaddr) break; } if (i == ahd->num_critical_sections) break; if (steps > AHD_MAX_STEPS) { printk("%s: Infinite loop in critical section\n" "%s: First Instruction 0x%x now 0x%x\n", ahd_name(ahd), ahd_name(ahd), first_instr, seqaddr); ahd_dump_card_state(ahd); panic("critical section loop"); } steps++; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Single stepping at 0x%x\n", ahd_name(ahd), seqaddr); #endif if (stepping == FALSE) { first_instr = seqaddr; ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); simode0 = ahd_inb(ahd, SIMODE0); simode3 = ahd_inb(ahd, SIMODE3); lqimode0 = ahd_inb(ahd, LQIMODE0); lqimode1 = ahd_inb(ahd, LQIMODE1); lqomode0 = ahd_inb(ahd, LQOMODE0); lqomode1 = ahd_inb(ahd, LQOMODE1); ahd_outb(ahd, SIMODE0, 0); ahd_outb(ahd, SIMODE3, 0); ahd_outb(ahd, LQIMODE0, 0); ahd_outb(ahd, LQIMODE1, 0); ahd_outb(ahd, LQOMODE0, 0); ahd_outb(ahd, LQOMODE1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); simode1 = ahd_inb(ahd, SIMODE1); /* * We don't clear ENBUSFREE. Unfortunately * we cannot re-enable busfree detection within * the current connection, so we must leave it * on while single stepping. */ ahd_outb(ahd, SIMODE1, simode1 & ENBUSFREE); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) | STEP); stepping = TRUE; } ahd_outb(ahd, CLRSINT1, CLRBUSFREE); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); ahd_outb(ahd, HCNTRL, ahd->unpause); while (!ahd_is_paused(ahd)) ahd_delay(200); ahd_update_modes(ahd); } if (stepping) { ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, SIMODE0, simode0); ahd_outb(ahd, SIMODE3, simode3); ahd_outb(ahd, LQIMODE0, lqimode0); ahd_outb(ahd, LQIMODE1, lqimode1); ahd_outb(ahd, LQOMODE0, lqomode0); ahd_outb(ahd, LQOMODE1, lqomode1); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, SEQCTL0, ahd_inb(ahd, SEQCTL0) & ~STEP); ahd_outb(ahd, SIMODE1, simode1); /* * SCSIINT seems to glitch occasionally when * the interrupt masks are restored. Clear SCSIINT * one more time so that only persistent errors * are seen as a real interrupt. */ ahd_outb(ahd, CLRINT, CLRSCSIINT); } ahd_restore_modes(ahd, saved_modes); } /* * Clear any pending interrupt status. */ static void ahd_clear_intstat(struct ahd_softc *ahd) { AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); /* Clear any interrupt conditions this may have caused */ ahd_outb(ahd, CLRLQIINT0, CLRLQIATNQAS|CLRLQICRCT1|CLRLQICRCT2 |CLRLQIBADLQT|CLRLQIATNLQ|CLRLQIATNCMD); ahd_outb(ahd, CLRLQIINT1, CLRLQIPHASE_LQ|CLRLQIPHASE_NLQ|CLRLIQABORT |CLRLQICRCI_LQ|CLRLQICRCI_NLQ|CLRLQIBADLQI |CLRLQIOVERI_LQ|CLRLQIOVERI_NLQ|CLRNONPACKREQ); ahd_outb(ahd, CLRLQOINT0, CLRLQOTARGSCBPERR|CLRLQOSTOPT2|CLRLQOATNLQ |CLRLQOATNPKT|CLRLQOTCRC); ahd_outb(ahd, CLRLQOINT1, CLRLQOINITSCBPERR|CLRLQOSTOPI2|CLRLQOBADQAS |CLRLQOBUSFREE|CLRLQOPHACHGINPKT); if ((ahd->bugs & AHD_CLRLQO_AUTOCLR_BUG) != 0) { ahd_outb(ahd, CLRLQOINT0, 0); ahd_outb(ahd, CLRLQOINT1, 0); } ahd_outb(ahd, CLRSINT3, CLRNTRAMPERR|CLROSRAMPERR); ahd_outb(ahd, CLRSINT1, CLRSELTIMEO|CLRATNO|CLRSCSIRSTI |CLRBUSFREE|CLRSCSIPERR|CLRREQINIT); ahd_outb(ahd, CLRSINT0, CLRSELDO|CLRSELDI|CLRSELINGO |CLRIOERR|CLROVERRUN); ahd_outb(ahd, CLRINT, CLRSCSIINT); } /**************************** Debugging Routines ******************************/ #ifdef AHD_DEBUG uint32_t ahd_debug = AHD_DEBUG_OPTS; #endif #if 0 void ahd_print_scb(struct scb *scb) { struct hardware_scb *hscb; int i; hscb = scb->hscb; printk("scb:%p control:0x%x scsiid:0x%x lun:%d cdb_len:%d\n", (void *)scb, hscb->control, hscb->scsiid, hscb->lun, hscb->cdb_len); printk("Shared Data: "); for (i = 0; i < sizeof(hscb->shared_data.idata.cdb); i++) printk("%#02x", hscb->shared_data.idata.cdb[i]); printk(" dataptr:%#x%x datacnt:%#x sgptr:%#x tag:%#x\n", (uint32_t)((ahd_le64toh(hscb->dataptr) >> 32) & 0xFFFFFFFF), (uint32_t)(ahd_le64toh(hscb->dataptr) & 0xFFFFFFFF), ahd_le32toh(hscb->datacnt), ahd_le32toh(hscb->sgptr), SCB_GET_TAG(scb)); ahd_dump_sglist(scb); } #endif /* 0 */ /************************* Transfer Negotiation *******************************/ /* * Allocate per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static struct ahd_tmode_tstate * ahd_alloc_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel) { struct ahd_tmode_tstate *master_tstate; struct ahd_tmode_tstate *tstate; int i; master_tstate = ahd->enabled_targets[ahd->our_id]; if (ahd->enabled_targets[scsi_id] != NULL && ahd->enabled_targets[scsi_id] != master_tstate) panic("%s: ahd_alloc_tstate - Target already allocated", ahd_name(ahd)); tstate = kmalloc(sizeof(*tstate), GFP_ATOMIC); if (tstate == NULL) return (NULL); /* * If we have allocated a master tstate, copy user settings from * the master tstate (taken from SRAM or the EEPROM) for this * channel, but reset our current and goal settings to async/narrow * until an initiator talks to us. */ if (master_tstate != NULL) { memcpy(tstate, master_tstate, sizeof(*tstate)); memset(tstate->enabled_luns, 0, sizeof(tstate->enabled_luns)); for (i = 0; i < 16; i++) { memset(&tstate->transinfo[i].curr, 0, sizeof(tstate->transinfo[i].curr)); memset(&tstate->transinfo[i].goal, 0, sizeof(tstate->transinfo[i].goal)); } } else memset(tstate, 0, sizeof(*tstate)); ahd->enabled_targets[scsi_id] = tstate; return (tstate); } #ifdef AHD_TARGET_MODE /* * Free per target mode instance (ID we respond to as a target) * transfer negotiation data structures. */ static void ahd_free_tstate(struct ahd_softc *ahd, u_int scsi_id, char channel, int force) { struct ahd_tmode_tstate *tstate; /* * Don't clean up our "master" tstate. * It has our default user settings. */ if (scsi_id == ahd->our_id && force == FALSE) return; tstate = ahd->enabled_targets[scsi_id]; if (tstate != NULL) kfree(tstate); ahd->enabled_targets[scsi_id] = NULL; } #endif /* * Called when we have an active connection to a target on the bus, * this function finds the nearest period to the input period limited * by the capabilities of the bus connectivity of and sync settings for * the target. */ static void ahd_devlimited_syncrate(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *period, u_int *ppr_options, role_t role) { struct ahd_transinfo *transinfo; u_int maxsync; if ((ahd_inb(ahd, SBLKCTL) & ENAB40) != 0 && (ahd_inb(ahd, SSTAT2) & EXP_ACTIVE) == 0) { maxsync = AHD_SYNCRATE_PACED; } else { maxsync = AHD_SYNCRATE_ULTRA; /* Can't do DT related options on an SE bus */ *ppr_options &= MSG_EXT_PPR_QAS_REQ; } /* * Never allow a value higher than our current goal * period otherwise we may allow a target initiated * negotiation to go above the limit as set by the * user. In the case of an initiator initiated * sync negotiation, we limit based on the user * setting. This allows the system to still accept * incoming negotiations even if target initiated * negotiation is not performed. */ if (role == ROLE_TARGET) transinfo = &tinfo->user; else transinfo = &tinfo->goal; *ppr_options &= (transinfo->ppr_options|MSG_EXT_PPR_PCOMP_EN); if (transinfo->width == MSG_EXT_WDTR_BUS_8_BIT) { maxsync = max(maxsync, (u_int)AHD_SYNCRATE_ULTRA2); *ppr_options &= ~MSG_EXT_PPR_DT_REQ; } if (transinfo->period == 0) { *period = 0; *ppr_options = 0; } else { *period = max(*period, (u_int)transinfo->period); ahd_find_syncrate(ahd, period, ppr_options, maxsync); } } /* * Look up the valid period to SCSIRATE conversion in our table. * Return the period and offset that should be sent to the target * if this was the beginning of an SDTR. */ void ahd_find_syncrate(struct ahd_softc *ahd, u_int *period, u_int *ppr_options, u_int maxsync) { if (*period < maxsync) *period = maxsync; if ((*ppr_options & MSG_EXT_PPR_DT_REQ) != 0 && *period > AHD_SYNCRATE_MIN_DT) *ppr_options &= ~MSG_EXT_PPR_DT_REQ; if (*period > AHD_SYNCRATE_MIN) *period = 0; /* Honor PPR option conformance rules. */ if (*period > AHD_SYNCRATE_PACED) *ppr_options &= ~MSG_EXT_PPR_RTI; if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0) *ppr_options &= (MSG_EXT_PPR_DT_REQ|MSG_EXT_PPR_QAS_REQ); if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0) *ppr_options &= MSG_EXT_PPR_QAS_REQ; /* Skip all PACED only entries if IU is not available */ if ((*ppr_options & MSG_EXT_PPR_IU_REQ) == 0 && *period < AHD_SYNCRATE_DT) *period = AHD_SYNCRATE_DT; /* Skip all DT only entries if DT is not available */ if ((*ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && *period < AHD_SYNCRATE_ULTRA2) *period = AHD_SYNCRATE_ULTRA2; } /* * Truncate the given synchronous offset to a value the * current adapter type and syncrate are capable of. */ static void ahd_validate_offset(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int period, u_int *offset, int wide, role_t role) { u_int maxoffset; /* Limit offset to what we can do */ if (period == 0) maxoffset = 0; else if (period <= AHD_SYNCRATE_PACED) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) maxoffset = MAX_OFFSET_PACED_BUG; else maxoffset = MAX_OFFSET_PACED; } else maxoffset = MAX_OFFSET_NON_PACED; *offset = min(*offset, maxoffset); if (tinfo != NULL) { if (role == ROLE_TARGET) *offset = min(*offset, (u_int)tinfo->user.offset); else *offset = min(*offset, (u_int)tinfo->goal.offset); } } /* * Truncate the given transfer width parameter to a value the * current adapter type is capable of. */ static void ahd_validate_width(struct ahd_softc *ahd, struct ahd_initiator_tinfo *tinfo, u_int *bus_width, role_t role) { switch (*bus_width) { default: if (ahd->features & AHD_WIDE) { /* Respond Wide */ *bus_width = MSG_EXT_WDTR_BUS_16_BIT; break; } /* FALLTHROUGH */ case MSG_EXT_WDTR_BUS_8_BIT: *bus_width = MSG_EXT_WDTR_BUS_8_BIT; break; } if (tinfo != NULL) { if (role == ROLE_TARGET) *bus_width = min((u_int)tinfo->user.width, *bus_width); else *bus_width = min((u_int)tinfo->goal.width, *bus_width); } } /* * Update the bitmask of targets for which the controller should * negotiate with at the next convenient opportunity. This currently * means the next time we send the initial identify messages for * a new transaction. */ int ahd_update_neg_request(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_tmode_tstate *tstate, struct ahd_initiator_tinfo *tinfo, ahd_neg_type neg_type) { u_int auto_negotiate_orig; auto_negotiate_orig = tstate->auto_negotiate; if (neg_type == AHD_NEG_ALWAYS) { /* * Force our "current" settings to be * unknown so that unless a bus reset * occurs the need to renegotiate is * recorded persistently. */ if ((ahd->features & AHD_WIDE) != 0) tinfo->curr.width = AHD_WIDTH_UNKNOWN; tinfo->curr.period = AHD_PERIOD_UNKNOWN; tinfo->curr.offset = AHD_OFFSET_UNKNOWN; } if (tinfo->curr.period != tinfo->goal.period || tinfo->curr.width != tinfo->goal.width || tinfo->curr.offset != tinfo->goal.offset || tinfo->curr.ppr_options != tinfo->goal.ppr_options || (neg_type == AHD_NEG_IF_NON_ASYNC && (tinfo->goal.offset != 0 || tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT || tinfo->goal.ppr_options != 0))) tstate->auto_negotiate |= devinfo->target_mask; else tstate->auto_negotiate &= ~devinfo->target_mask; return (auto_negotiate_orig != tstate->auto_negotiate); } /* * Update the user/goal/curr tables of synchronous negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_syncrate(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int ppr_options, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int old_period; u_int old_offset; u_int old_ppr; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; if (period == 0 || offset == 0) { period = 0; offset = 0; } tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) { tinfo->user.period = period; tinfo->user.offset = offset; tinfo->user.ppr_options = ppr_options; } if ((type & AHD_TRANS_GOAL) != 0) { tinfo->goal.period = period; tinfo->goal.offset = offset; tinfo->goal.ppr_options = ppr_options; } old_period = tinfo->curr.period; old_offset = tinfo->curr.offset; old_ppr = tinfo->curr.ppr_options; if ((type & AHD_TRANS_CUR) != 0 && (old_period != period || old_offset != offset || old_ppr != ppr_options)) { update_needed++; tinfo->curr.period = period; tinfo->curr.offset = offset; tinfo->curr.ppr_options = ppr_options; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { if (offset != 0) { int options; printk("%s: target %d synchronous with " "period = 0x%x, offset = 0x%x", ahd_name(ahd), devinfo->target, period, offset); options = 0; if ((ppr_options & MSG_EXT_PPR_RD_STRM) != 0) { printk("(RDSTRM"); options++; } if ((ppr_options & MSG_EXT_PPR_DT_REQ) != 0) { printk("%s", options ? "|DT" : "(DT"); options++; } if ((ppr_options & MSG_EXT_PPR_IU_REQ) != 0) { printk("%s", options ? "|IU" : "(IU"); options++; } if ((ppr_options & MSG_EXT_PPR_RTI) != 0) { printk("%s", options ? "|RTI" : "(RTI"); options++; } if ((ppr_options & MSG_EXT_PPR_QAS_REQ) != 0) { printk("%s", options ? "|QAS" : "(QAS"); options++; } if (options != 0) printk(")\n"); else printk("\n"); } else { printk("%s: target %d using " "asynchronous transfers%s\n", ahd_name(ahd), devinfo->target, (ppr_options & MSG_EXT_PPR_QAS_REQ) != 0 ? "(QAS)" : ""); } } } /* * Always refresh the neg-table to handle the case of the * sequencer setting the ENATNO bit for a MK_MESSAGE request. * We will always renegotiate in that case if this is a * packetized request. Also manage the busfree expected flag * from this common routine so that we catch changes due to * WDTR or SDTR messages. */ if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); if (ahd->msg_type != MSG_TYPE_NONE) { if ((old_ppr & MSG_EXT_PPR_IU_REQ) != (ppr_options & MSG_EXT_PPR_IU_REQ)) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, devinfo); printk("Expecting IU Change busfree\n"); } #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; } if ((old_ppr & MSG_EXT_PPR_IU_REQ) != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("PPR with IU_REQ outstanding\n"); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE; } } } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the user/goal/curr tables of wide negotiation * parameters as well as, in the case of a current or active update, * any data structures on the host controller. In the case of an * active update, the specified target is currently talking to us on * the bus, so the transfer parameter update must take effect * immediately. */ void ahd_set_width(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int width, u_int type, int paused) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int oldwidth; int active; int update_needed; active = (type & AHD_TRANS_ACTIVE) == AHD_TRANS_ACTIVE; update_needed = 0; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); if ((type & AHD_TRANS_USER) != 0) tinfo->user.width = width; if ((type & AHD_TRANS_GOAL) != 0) tinfo->goal.width = width; oldwidth = tinfo->curr.width; if ((type & AHD_TRANS_CUR) != 0 && oldwidth != width) { update_needed++; tinfo->curr.width = width; ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_TRANSFER_NEG); if (bootverbose) { printk("%s: target %d using %dbit transfers\n", ahd_name(ahd), devinfo->target, 8 * (0x01 << width)); } } if ((type & AHD_TRANS_CUR) != 0) { if (!paused) ahd_pause(ahd); ahd_update_neg_table(ahd, devinfo, &tinfo->curr); if (!paused) ahd_unpause(ahd); } update_needed += ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_TO_GOAL); if (update_needed && active) ahd_update_pending_scbs(ahd); } /* * Update the current state of tagged queuing for a given target. */ static void ahd_set_tags(struct ahd_softc *ahd, struct scsi_cmnd *cmd, struct ahd_devinfo *devinfo, ahd_queue_alg alg) { struct scsi_device *sdev = cmd->device; ahd_platform_set_tags(ahd, sdev, devinfo, alg); ahd_send_async(ahd, devinfo->channel, devinfo->target, devinfo->lun, AC_TRANSFER_NEG); } static void ahd_update_neg_table(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct ahd_transinfo *tinfo) { ahd_mode_state saved_modes; u_int period; u_int ppr_opts; u_int con_opts; u_int offset; u_int saved_negoaddr; uint8_t iocell_opts[sizeof(ahd->iocell_opts)]; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_negoaddr = ahd_inb(ahd, NEGOADDR); ahd_outb(ahd, NEGOADDR, devinfo->target); period = tinfo->period; offset = tinfo->offset; memcpy(iocell_opts, ahd->iocell_opts, sizeof(ahd->iocell_opts)); ppr_opts = tinfo->ppr_options & (MSG_EXT_PPR_QAS_REQ|MSG_EXT_PPR_DT_REQ |MSG_EXT_PPR_IU_REQ|MSG_EXT_PPR_RTI); con_opts = 0; if (period == 0) period = AHD_SYNCRATE_ASYNC; if (period == AHD_SYNCRATE_160) { if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * When the SPI4 spec was finalized, PACE transfers * was not made a configurable option in the PPR * message. Instead it is assumed to be enabled for * any syncrate faster than 80MHz. Nevertheless, * Harpoon2A4 allows this to be configurable. * * Harpoon2A4 also assumes at most 2 data bytes per * negotiated REQ/ACK offset. Paced transfers take * 4, so we must adjust our offset. */ ppr_opts |= PPROPT_PACE; offset *= 2; /* * Harpoon2A assumed that there would be a * fallback rate between 160MHz and 80MHz, * so 7 is used as the period factor rather * than 8 for 160MHz. */ period = AHD_SYNCRATE_REVA_160; } if ((tinfo->ppr_options & MSG_EXT_PPR_PCOMP_EN) == 0) iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; } else { /* * Precomp should be disabled for non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_PRECOMP_MASK; if ((ahd->features & AHD_NEW_IOCELL_OPTS) != 0 && (ppr_opts & MSG_EXT_PPR_DT_REQ) != 0 && (ppr_opts & MSG_EXT_PPR_IU_REQ) == 0) { /* * Slow down our CRC interval to be * compatible with non-packetized * U160 devices that can't handle a * CRC at full speed. */ con_opts |= ENSLOWCRC; } if ((ahd->bugs & AHD_PACED_NEGTABLE_BUG) != 0) { /* * On H2A4, revert to a slower slewrate * on non-paced transfers. */ iocell_opts[AHD_PRECOMP_SLEW_INDEX] &= ~AHD_SLEWRATE_MASK; } } ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PRECOMP_SLEW); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_PRECOMP_SLEW_INDEX]); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_AMPLITUDE); ahd_outb(ahd, ANNEXDAT, iocell_opts[AHD_AMPLITUDE_INDEX]); ahd_outb(ahd, NEGPERIOD, period); ahd_outb(ahd, NEGPPROPTS, ppr_opts); ahd_outb(ahd, NEGOFFSET, offset); if (tinfo->width == MSG_EXT_WDTR_BUS_16_BIT) con_opts |= WIDEXFER; /* * Slow down our CRC interval to be * compatible with packetized U320 devices * that can't handle a CRC at full speed */ if (ahd->features & AHD_AIC79XXB_SLOWCRC) { con_opts |= ENSLOWCRC; } /* * During packetized transfers, the target will * give us the opportunity to send command packets * without us asserting attention. */ if ((tinfo->ppr_options & MSG_EXT_PPR_IU_REQ) == 0) con_opts |= ENAUTOATNO; ahd_outb(ahd, NEGCONOPTS, con_opts); ahd_outb(ahd, NEGOADDR, saved_negoaddr); ahd_restore_modes(ahd, saved_modes); } /* * When the transfer settings for a connection change, setup for * negotiation in pending SCBs to effect the change as quickly as * possible. We also cancel any negotiations that are scheduled * for inflight SCBs that have not been started yet. */ static void ahd_update_pending_scbs(struct ahd_softc *ahd) { struct scb *pending_scb; int pending_scb_count; int paused; u_int saved_scbptr; ahd_mode_state saved_modes; /* * Traverse the pending SCB list and ensure that all of the * SCBs there have the proper settings. We can only safely * clear the negotiation required flag (setting requires the * execution queue to be modified) and this is only possible * if we are not already attempting to select out for this * SCB. For this reason, all callers only call this routine * if we are changing the negotiation settings for the currently * active transaction on the bus. */ pending_scb_count = 0; LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; ahd_scb_devinfo(ahd, &devinfo, pending_scb); tinfo = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); if ((tstate->auto_negotiate & devinfo.target_mask) == 0 && (pending_scb->flags & SCB_AUTO_NEGOTIATE) != 0) { pending_scb->flags &= ~SCB_AUTO_NEGOTIATE; pending_scb->hscb->control &= ~MK_MESSAGE; } ahd_sync_scb(ahd, pending_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); pending_scb_count++; } if (pending_scb_count == 0) return; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* * Force the sequencer to reinitialize the selection for * the command at the head of the execution queue if it * has already been setup. The negotiation changes may * effect whether we select-out with ATN. It is only * safe to clear ENSELO when the bus is not free and no * selection is in progres or completed. */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if ((ahd_inb(ahd, SCSISIGI) & BSYI) != 0 && (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) == 0) ahd_outb(ahd, SCSISEQ0, ahd_inb(ahd, SCSISEQ0) & ~ENSELO); saved_scbptr = ahd_get_scbptr(ahd); /* Ensure that the hscbs down on the card match the new information */ LIST_FOREACH(pending_scb, &ahd->pending_scbs, pending_links) { u_int scb_tag; u_int control; scb_tag = SCB_GET_TAG(pending_scb); ahd_set_scbptr(ahd, scb_tag); control = ahd_inb_scbram(ahd, SCB_CONTROL); control &= ~MK_MESSAGE; control |= pending_scb->hscb->control & MK_MESSAGE; ahd_outb(ahd, SCB_CONTROL, control); } ahd_set_scbptr(ahd, saved_scbptr); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } /**************************** Pathing Information *****************************/ static void ahd_fetch_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { ahd_mode_state saved_modes; u_int saved_scsiid; role_t role; int our_id; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd_inb(ahd, SSTAT0) & TARGET) role = ROLE_TARGET; else role = ROLE_INITIATOR; if (role == ROLE_TARGET && (ahd_inb(ahd, SEQ_FLAGS) & CMDPHASE_PENDING) != 0) { /* We were selected, so pull our id from TARGIDIN */ our_id = ahd_inb(ahd, TARGIDIN) & OID; } else if (role == ROLE_TARGET) our_id = ahd_inb(ahd, TOWNID); else our_id = ahd_inb(ahd, IOWNID); saved_scsiid = ahd_inb(ahd, SAVED_SCSIID); ahd_compile_devinfo(devinfo, our_id, SCSIID_TARGET(ahd, saved_scsiid), ahd_inb(ahd, SAVED_LUN), SCSIID_CHANNEL(ahd, saved_scsiid), role); ahd_restore_modes(ahd, saved_modes); } void ahd_print_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { printk("%s:%c:%d:%d: ", ahd_name(ahd), 'A', devinfo->target, devinfo->lun); } static const struct ahd_phase_table_entry* ahd_lookup_phase_entry(int phase) { const struct ahd_phase_table_entry *entry; const struct ahd_phase_table_entry *last_entry; /* * num_phases doesn't include the default entry which * will be returned if the phase doesn't match. */ last_entry = &ahd_phase_table[num_phases]; for (entry = ahd_phase_table; entry < last_entry; entry++) { if (phase == entry->phase) break; } return (entry); } void ahd_compile_devinfo(struct ahd_devinfo *devinfo, u_int our_id, u_int target, u_int lun, char channel, role_t role) { devinfo->our_scsiid = our_id; devinfo->target = target; devinfo->lun = lun; devinfo->target_offset = target; devinfo->channel = channel; devinfo->role = role; if (channel == 'B') devinfo->target_offset += 8; devinfo->target_mask = (0x01 << devinfo->target_offset); } static void ahd_scb_devinfo(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { role_t role; int our_id; our_id = SCSIID_OUR_ID(scb->hscb->scsiid); role = ROLE_INITIATOR; if ((scb->hscb->control & TARGET_SCB) != 0) role = ROLE_TARGET; ahd_compile_devinfo(devinfo, our_id, SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), role); } /************************ Message Phase Processing ****************************/ /* * When an initiator transaction with the MK_MESSAGE flag either reconnects * or enters the initial message out phase, we are interrupted. Fill our * outgoing message buffer with the appropriate message and beging handing * the message phase(s) manually. */ static void ahd_setup_initiator_msgout(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (ahd_currently_packetized(ahd)) ahd->msg_flags |= MSG_FLAG_PACKETIZED; if (ahd->send_msg_perror && ahd_inb(ahd, MSG_OUT) == HOST_MSG) { ahd->msgout_buf[ahd->msgout_index++] = ahd->send_msg_perror; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("Setting up for Parity Error delivery\n"); #endif return; } else if (scb == NULL) { printk("%s: WARNING. No pending message for " "I_T msgin. Issuing NO-OP\n", ahd_name(ahd)); ahd->msgout_buf[ahd->msgout_index++] = MSG_NOOP; ahd->msgout_len++; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; return; } if ((scb->flags & SCB_DEVICE_RESET) == 0 && (scb->flags & SCB_PACKETIZED) == 0 && ahd_inb(ahd, MSG_OUT) == MSG_IDENTIFYFLAG) { u_int identify_msg; identify_msg = MSG_IDENTIFYFLAG | SCB_GET_LUN(scb); if ((scb->hscb->control & DISCENB) != 0) identify_msg |= MSG_IDENTIFY_DISCFLAG; ahd->msgout_buf[ahd->msgout_index++] = identify_msg; ahd->msgout_len++; if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = scb->hscb->control & (TAG_ENB|SCB_TAG_TYPE); ahd->msgout_buf[ahd->msgout_index++] = SCB_GET_TAG(scb); ahd->msgout_len += 2; } } if (scb->flags & SCB_DEVICE_RESET) { ahd->msgout_buf[ahd->msgout_index++] = MSG_BUS_DEV_RESET; ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Bus Device Reset Message Sent\n"); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & SCB_ABORT) != 0) { if ((scb->hscb->control & TAG_ENB) != 0) { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT_TAG; } else { ahd->msgout_buf[ahd->msgout_index++] = MSG_ABORT; } ahd->msgout_len++; ahd_print_path(ahd, scb); printk("Abort%s Message Sent\n", (scb->hscb->control & TAG_ENB) != 0 ? " Tag" : ""); /* * Clear our selection hardware in advance of * the busfree. We may have an entry in the waiting * Q for this target, and we don't want to go about * selecting while we handle the busfree and blow it * away. */ ahd_outb(ahd, SCSISEQ0, 0); } else if ((scb->flags & (SCB_AUTO_NEGOTIATE|SCB_NEGOTIATE)) != 0) { ahd_build_transfer_msg(ahd, devinfo); /* * Clear our selection hardware in advance of potential * PPR IU status change busfree. We may have an entry in * the waiting Q for this target, and we don't want to go * about selecting while we handle the busfree and blow * it away. */ ahd_outb(ahd, SCSISEQ0, 0); } else { printk("ahd_intr: AWAITING_MSG for an SCB that " "does not have a waiting message\n"); printk("SCSIID = %x, target_mask = %x\n", scb->hscb->scsiid, devinfo->target_mask); panic("SCB = %d, SCB Control = %x:%x, MSG_OUT = %x " "SCB flags = %x", SCB_GET_TAG(scb), scb->hscb->control, ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb(ahd, MSG_OUT), scb->flags); } /* * Clear the MK_MESSAGE flag from the SCB so we aren't * asked to send this message again. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & ~MK_MESSAGE); scb->hscb->control &= ~MK_MESSAGE; ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; } /* * Build an appropriate transfer negotiation message for the * currently active target. */ static void ahd_build_transfer_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * We need to initiate transfer negotiations. * If our current and goal settings are identical, * we want to renegotiate due to a check condition. */ struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int dowide; int dosync; int doppr; u_int period; u_int ppr_options; u_int offset; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Filter our period based on the current connection. * If we can't perform DT transfers on this segment (not in LVD * mode for instance), then our decision to issue a PPR message * may change. */ period = tinfo->goal.period; offset = tinfo->goal.offset; ppr_options = tinfo->goal.ppr_options; /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) ppr_options = 0; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); dowide = tinfo->curr.width != tinfo->goal.width; dosync = tinfo->curr.offset != offset || tinfo->curr.period != period; /* * Only use PPR if we have options that need it, even if the device * claims to support it. There might be an expander in the way * that doesn't. */ doppr = ppr_options != 0; if (!dowide && !dosync && !doppr) { dowide = tinfo->goal.width != MSG_EXT_WDTR_BUS_8_BIT; dosync = tinfo->goal.offset != 0; } if (!dowide && !dosync && !doppr) { /* * Force async with a WDTR message if we have a wide bus, * or just issue an SDTR with a 0 offset. */ if ((ahd->features & AHD_WIDE) != 0) dowide = 1; else dosync = 1; if (bootverbose) { ahd_print_devinfo(ahd, devinfo); printk("Ensuring async\n"); } } /* Target initiated PPR is not allowed in the SCSI spec */ if (devinfo->role == ROLE_TARGET) doppr = 0; /* * Both the PPR message and SDTR message require the * goal syncrate to be limited to what the target device * is capable of handling (based on whether an LVD->SE * expander is on the bus), so combine these two cases. * Regardless, guarantee that if we are using WDTR and SDTR * messages that WDTR comes first. */ if (doppr || (dosync && !dowide)) { offset = tinfo->goal.offset; ahd_validate_offset(ahd, tinfo, period, &offset, doppr ? tinfo->goal.width : tinfo->curr.width, devinfo->role); if (doppr) { ahd_construct_ppr(ahd, devinfo, period, offset, tinfo->goal.width, ppr_options); } else { ahd_construct_sdtr(ahd, devinfo, period, offset); } } else { ahd_construct_wdtr(ahd, devinfo, tinfo->goal.width); } } /* * Build a synchronous negotiation message in our message * buffer based on the input parameters. */ static void ahd_construct_sdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset) { if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_sync_msg( ahd->msgout_buf + ahd->msgout_index, period, offset); ahd->msgout_len += 5; if (bootverbose) { printk("(%s:%c:%d:%d): Sending SDTR period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, period, offset); } } /* * Build a wide negotiateion message in our message * buffer based on the input parameters. */ static void ahd_construct_wdtr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int bus_width) { ahd->msgout_index += spi_populate_width_msg( ahd->msgout_buf + ahd->msgout_index, bus_width); ahd->msgout_len += 4; if (bootverbose) { printk("(%s:%c:%d:%d): Sending WDTR %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width); } } /* * Build a parallel protocol request message in our message * buffer based on the input parameters. */ static void ahd_construct_ppr(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int period, u_int offset, u_int bus_width, u_int ppr_options) { /* * Always request precompensation from * the other target if we are running * at paced syncrates. */ if (period <= AHD_SYNCRATE_PACED) ppr_options |= MSG_EXT_PPR_PCOMP_EN; if (offset == 0) period = AHD_ASYNC_XFER_PERIOD; ahd->msgout_index += spi_populate_ppr_msg( ahd->msgout_buf + ahd->msgout_index, period, offset, bus_width, ppr_options); ahd->msgout_len += 8; if (bootverbose) { printk("(%s:%c:%d:%d): Sending PPR bus_width %x, period %x, " "offset %x, ppr_options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, bus_width, period, offset, ppr_options); } } /* * Clear any active message state. */ static void ahd_clear_msg_state(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd->send_msg_perror = 0; ahd->msg_flags = MSG_FLAG_NONE; ahd->msgout_len = 0; ahd->msgin_index = 0; ahd->msg_type = MSG_TYPE_NONE; if ((ahd_inb(ahd, SCSISIGO) & ATNO) != 0) { /* * The target didn't care to respond to our * message request, so clear ATN. */ ahd_outb(ahd, CLRSINT1, CLRATNO); } ahd_outb(ahd, MSG_OUT, MSG_NOOP); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) & ~TARGET_MSG_PENDING); ahd_restore_modes(ahd, saved_modes); } /* * Manual message loop handler. */ static void ahd_handle_message_phase(struct ahd_softc *ahd) { struct ahd_devinfo devinfo; u_int bus_phase; int end_session; ahd_fetch_devinfo(ahd, &devinfo); end_session = FALSE; bus_phase = ahd_inb(ahd, LASTPHASE); if ((ahd_inb(ahd, LQISTAT2) & LQIPHASE_OUTPKT) != 0) { printk("LQIRETRY for LQIPHASE_OUTPKT\n"); ahd_outb(ahd, LQCTL2, LQIRETRY); } reswitch: switch (ahd->msg_type) { case MSG_TYPE_INITIATOR_MSGOUT: { int lastbyte; int phasemis; int msgdone; if (ahd->msgout_len == 0 && ahd->send_msg_perror == 0) panic("HOST_MSG_LOOP interrupt with no active message"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_OUT"); } #endif phasemis = bus_phase != P_MESGOUT; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif if (bus_phase == P_MESGIN) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd_outb(ahd, CLRSINT1, CLRATNO); ahd->send_msg_perror = 0; ahd->msg_type = MSG_TYPE_INITIATOR_MSGIN; ahd->msgin_index = 0; goto reswitch; } end_session = TRUE; break; } if (ahd->send_msg_perror) { ahd_outb(ahd, CLRSINT1, CLRATNO); ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->send_msg_perror); #endif /* * If we are notifying the target of a CRC error * during packetized operations, the target is * within its rights to acknowledge our message * with a busfree. */ if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0 && ahd->send_msg_perror == MSG_INITIATOR_DET_ERR) ahd->msg_flags |= MSG_FLAG_EXPECT_IDE_BUSFREE; ahd_outb(ahd, RETURN_2, ahd->send_msg_perror); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { /* * The target has requested a retry. * Re-assert ATN, reset our message index to * 0, and try again. */ ahd->msgout_index = 0; ahd_assert_atn(ahd); } lastbyte = ahd->msgout_index == (ahd->msgout_len - 1); if (lastbyte) { /* Last byte is signified by dropping ATN */ ahd_outb(ahd, CLRSINT1, CLRATNO); } /* * Clear our interrupt status and present * the next byte on the bus. */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgout_buf[ahd->msgout_index]); #endif ahd_outb(ahd, RETURN_2, ahd->msgout_buf[ahd->msgout_index++]); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_WRITE); break; } case MSG_TYPE_INITIATOR_MSGIN: { int phasemis; int message_done; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("INITIATOR_MSG_IN"); } #endif phasemis = bus_phase != P_MESGIN; if (phasemis) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { printk(" PHASEMIS %s\n", ahd_lookup_phase_entry(bus_phase) ->phasemsg); } #endif ahd->msgin_index = 0; if (bus_phase == P_MESGOUT && (ahd->send_msg_perror != 0 || (ahd->msgout_len != 0 && ahd->msgout_index == 0))) { ahd->msg_type = MSG_TYPE_INITIATOR_MSGOUT; goto reswitch; } end_session = TRUE; break; } /* Pull the byte in without acking it */ ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIBUS); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk(" byte 0x%x\n", ahd->msgin_buf[ahd->msgin_index]); #endif message_done = ahd_parse_msg(ahd, &devinfo); if (message_done) { /* * Clear our incoming message buffer in case there * is another message following this one. */ ahd->msgin_index = 0; /* * If this message illicited a response, * assert ATN so the target takes us to the * message out phase. */ if (ahd->msgout_len != 0) { #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) { ahd_print_devinfo(ahd, &devinfo); printk("Asserting ATN for response\n"); } #endif ahd_assert_atn(ahd); } } else ahd->msgin_index++; if (message_done == MSGLOOP_TERMINATED) { end_session = TRUE; } else { /* Ack the byte */ ahd_outb(ahd, CLRSINT1, CLRREQINIT); ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_READ); } break; } case MSG_TYPE_TARGET_MSGIN: { int msgdone; int msgout_request; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); if (ahd->msgout_len == 0) panic("Target MSGIN with no active message"); /* * If we interrupted a mesgout session, the initiator * will not know this until our first REQ. So, we * only honor mesgout requests after we've sent our * first byte. */ if ((ahd_inb(ahd, SCSISIGI) & ATNI) != 0 && ahd->msgout_index > 0) msgout_request = TRUE; else msgout_request = FALSE; if (msgout_request) { /* * Change gears and see if * this messages is of interest to * us or should be passed back to * the sequencer. */ ahd->msg_type = MSG_TYPE_TARGET_MSGOUT; ahd_outb(ahd, SCSISIGO, P_MESGOUT | BSYO); ahd->msgin_index = 0; /* Dummy read to REQ for first byte */ ahd_inb(ahd, SCSIDAT); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); break; } msgdone = ahd->msgout_index == ahd->msgout_len; if (msgdone) { ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); end_session = TRUE; break; } /* * Present the next byte on the bus. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd_outb(ahd, SCSIDAT, ahd->msgout_buf[ahd->msgout_index++]); break; } case MSG_TYPE_TARGET_MSGOUT: { int lastbyte; int msgdone; /* * By default, the message loop will continue. */ ahd_outb(ahd, RETURN_1, CONT_MSG_LOOP_TARG); /* * The initiator signals that this is * the last byte by dropping ATN. */ lastbyte = (ahd_inb(ahd, SCSISIGI) & ATNI) == 0; /* * Read the latched byte, but turn off SPIOEN first * so that we don't inadvertently cause a REQ for the * next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) & ~SPIOEN); ahd->msgin_buf[ahd->msgin_index] = ahd_inb(ahd, SCSIDAT); msgdone = ahd_parse_msg(ahd, &devinfo); if (msgdone == MSGLOOP_TERMINATED) { /* * The message is *really* done in that it caused * us to go to bus free. The sequencer has already * been reset at this point, so pull the ejection * handle. */ return; } ahd->msgin_index++; /* * XXX Read spec about initiator dropping ATN too soon * and use msgdone to detect it. */ if (msgdone == MSGLOOP_MSGCOMPLETE) { ahd->msgin_index = 0; /* * If this message illicited a response, transition * to the Message in phase and send it. */ if (ahd->msgout_len != 0) { ahd_outb(ahd, SCSISIGO, P_MESGIN | BSYO); ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); ahd->msg_type = MSG_TYPE_TARGET_MSGIN; ahd->msgin_index = 0; break; } } if (lastbyte) end_session = TRUE; else { /* Ask for the next byte. */ ahd_outb(ahd, SXFRCTL0, ahd_inb(ahd, SXFRCTL0) | SPIOEN); } break; } default: panic("Unknown REQINIT message type"); } if (end_session) { if ((ahd->msg_flags & MSG_FLAG_PACKETIZED) != 0) { printk("%s: Returning to Idle Loop\n", ahd_name(ahd)); ahd_clear_msg_state(ahd); /* * Perform the equivalent of a clear_target_state. */ ahd_outb(ahd, LASTPHASE, P_BUSFREE); ahd_outb(ahd, SEQ_FLAGS, NOT_IDENTIFIED|NO_CDB_SENT); ahd_outb(ahd, SEQCTL0, FASTMODE|SEQRESET); } else { ahd_clear_msg_state(ahd); ahd_outb(ahd, RETURN_1, EXIT_MSG_LOOP); } } } /* * See if we sent a particular extended message to the target. * If "full" is true, return true only if the target saw the full * message. If "full" is false, return true if the target saw at * least the first byte of the message. */ static int ahd_sent_msg(struct ahd_softc *ahd, ahd_msgtype type, u_int msgval, int full) { int found; u_int index; found = FALSE; index = 0; while (index < ahd->msgout_len) { if (ahd->msgout_buf[index] == MSG_EXTENDED) { u_int end_index; end_index = index + 1 + ahd->msgout_buf[index + 1]; if (ahd->msgout_buf[index+2] == msgval && type == AHDMSG_EXT) { if (full) { if (ahd->msgout_index > end_index) found = TRUE; } else if (ahd->msgout_index > index) found = TRUE; } index = end_index; } else if (ahd->msgout_buf[index] >= MSG_SIMPLE_TASK && ahd->msgout_buf[index] <= MSG_IGN_WIDE_RESIDUE) { /* Skip tag type and tag id or residue param*/ index += 2; } else { /* Single byte message */ if (type == AHDMSG_1B && ahd->msgout_index > index && (ahd->msgout_buf[index] == msgval || ((ahd->msgout_buf[index] & MSG_IDENTIFYFLAG) != 0 && msgval == MSG_IDENTIFYFLAG))) found = TRUE; index++; } if (found) break; } return (found); } /* * Wait for a complete incoming message, parse it, and respond accordingly. */ static int ahd_parse_msg(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; int reject; int done; int response; done = MSGLOOP_IN_PROG; response = FALSE; reject = FALSE; tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* * Parse as much of the message as is available, * rejecting it if we don't support it. When * the entire message is available and has been * handled, return MSGLOOP_MSGCOMPLETE, indicating * that we have parsed an entire message. * * In the case of extended messages, we accept the length * byte outright and perform more checking once we know the * extended message type. */ switch (ahd->msgin_buf[0]) { case MSG_DISCONNECT: case MSG_SAVEDATAPOINTER: case MSG_CMDCOMPLETE: case MSG_RESTOREPOINTERS: case MSG_IGN_WIDE_RESIDUE: /* * End our message loop as these are messages * the sequencer handles on its own. */ done = MSGLOOP_TERMINATED; break; case MSG_MESSAGE_REJECT: response = ahd_handle_msg_reject(ahd, devinfo); /* FALLTHROUGH */ case MSG_NOOP: done = MSGLOOP_MSGCOMPLETE; break; case MSG_EXTENDED: { /* Wait for enough of the message to begin validation */ if (ahd->msgin_index < 2) break; switch (ahd->msgin_buf[2]) { case MSG_EXT_SDTR: { u_int period; u_int ppr_options; u_int offset; u_int saved_offset; if (ahd->msgin_buf[1] != MSG_EXT_SDTR_LEN) { reject = TRUE; break; } /* * Wait until we have both args before validating * and acting on this message. * * Add one to MSG_EXT_SDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_SDTR_LEN + 1)) break; period = ahd->msgin_buf[3]; ppr_options = 0; saved_offset = offset = ahd->msgin_buf[4]; ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, tinfo->curr.width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received " "SDTR period %x, offset %x\n\t" "Filtered to period %x, offset %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, ahd->msgin_buf[3], saved_offset, period, offset); } ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * See if we initiated Sync Negotiation * and didn't have to fall down to async * transfers. */ if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, TRUE)) { /* We started it */ if (saved_offset != offset) { /* Went too low - force async */ reject = TRUE; } } else { /* * Send our own SDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_sdtr(ahd, devinfo, period, offset); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_WDTR: { u_int bus_width; u_int saved_width; u_int sending_reply; sending_reply = FALSE; if (ahd->msgin_buf[1] != MSG_EXT_WDTR_LEN) { reject = TRUE; break; } /* * Wait until we have our arg before validating * and acting on this message. * * Add one to MSG_EXT_WDTR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_WDTR_LEN + 1)) break; bus_width = ahd->msgin_buf[3]; saved_width = bus_width; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); if (bootverbose) { printk("(%s:%c:%d:%d): Received WDTR " "%x filtered to %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, bus_width); } if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, TRUE)) { /* * Don't send a WDTR back to the * target, since we asked first. * If the width went higher than our * request, reject it. */ if (saved_width > bus_width) { reject = TRUE; printk("(%s:%c:%d:%d): requested %dBit " "transfers. Rejecting...\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, 8 * (0x01 << bus_width)); bus_width = 0; } } else { /* * Send our own WDTR in reply */ if (bootverbose && devinfo->role == ROLE_INITIATOR) { printk("(%s:%c:%d:%d): Target " "Initiated WDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_wdtr(ahd, devinfo, bus_width); ahd->msgout_index = 0; response = TRUE; sending_reply = TRUE; } /* * After a wide message, we are async, but * some devices don't seem to honor this portion * of the spec. Force a renegotiation of the * sync component of our transfer agreement even * if our goal is async. By updating our width * after forcing the negotiation, we avoid * renegotiating for width. */ ahd_update_neg_request(ahd, devinfo, tstate, tinfo, AHD_NEG_ALWAYS); ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); if (sending_reply == FALSE && reject == FALSE) { /* * We will always have an SDTR to send. */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = TRUE; } done = MSGLOOP_MSGCOMPLETE; break; } case MSG_EXT_PPR: { u_int period; u_int offset; u_int bus_width; u_int ppr_options; u_int saved_width; u_int saved_offset; u_int saved_ppr_options; if (ahd->msgin_buf[1] != MSG_EXT_PPR_LEN) { reject = TRUE; break; } /* * Wait until we have all args before validating * and acting on this message. * * Add one to MSG_EXT_PPR_LEN to account for * the extended message preamble. */ if (ahd->msgin_index < (MSG_EXT_PPR_LEN + 1)) break; period = ahd->msgin_buf[3]; offset = ahd->msgin_buf[5]; bus_width = ahd->msgin_buf[6]; saved_width = bus_width; ppr_options = ahd->msgin_buf[7]; /* * According to the spec, a DT only * period factor with no DT option * set implies async. */ if ((ppr_options & MSG_EXT_PPR_DT_REQ) == 0 && period <= 9) offset = 0; saved_ppr_options = ppr_options; saved_offset = offset; /* * Transfer options are only available if we * are negotiating wide. */ if (bus_width == 0) ppr_options &= MSG_EXT_PPR_QAS_REQ; ahd_validate_width(ahd, tinfo, &bus_width, devinfo->role); ahd_devlimited_syncrate(ahd, tinfo, &period, &ppr_options, devinfo->role); ahd_validate_offset(ahd, tinfo, period, &offset, bus_width, devinfo->role); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, TRUE)) { /* * If we are unable to do any of the * requested options (we went too low), * then we'll have to reject the message. */ if (saved_width > bus_width || saved_offset != offset || saved_ppr_options != ppr_options) { reject = TRUE; period = 0; offset = 0; bus_width = 0; ppr_options = 0; } } else { if (devinfo->role != ROLE_TARGET) printk("(%s:%c:%d:%d): Target " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); else printk("(%s:%c:%d:%d): Initiator " "Initiated PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_construct_ppr(ahd, devinfo, period, offset, bus_width, ppr_options); ahd->msgout_index = 0; response = TRUE; } if (bootverbose) { printk("(%s:%c:%d:%d): Received PPR width %x, " "period %x, offset %x,options %x\n" "\tFiltered to width %x, period %x, " "offset %x, options %x\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, saved_width, ahd->msgin_buf[3], saved_offset, saved_ppr_options, bus_width, period, offset, ppr_options); } ahd_set_width(ahd, devinfo, bus_width, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, period, offset, ppr_options, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); done = MSGLOOP_MSGCOMPLETE; break; } default: /* Unknown extended message. Reject it. */ reject = TRUE; break; } break; } #ifdef AHD_TARGET_MODE case MSG_BUS_DEV_RESET: ahd_handle_devreset(ahd, devinfo, CAM_LUN_WILDCARD, CAM_BDR_SENT, "Bus Device Reset Received", /*verbose_level*/0); ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; case MSG_ABORT_TAG: case MSG_ABORT: case MSG_CLEAR_QUEUE: { int tag; /* Target mode messages */ if (devinfo->role != ROLE_TARGET) { reject = TRUE; break; } tag = SCB_LIST_NULL; if (ahd->msgin_buf[0] == MSG_ABORT_TAG) tag = ahd_inb(ahd, INITIATOR_TAG); ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, devinfo->lun, tag, ROLE_TARGET, CAM_REQ_ABORTED); tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[devinfo->lun]; if (lstate != NULL) { ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, ahd->msgin_buf[0], /*arg*/tag); ahd_send_lstate_events(ahd, lstate); } } ahd_restart(ahd); done = MSGLOOP_TERMINATED; break; } #endif case MSG_QAS_REQUEST: #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MESSAGES) != 0) printk("%s: QAS request. SCSISIGI == 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SCSISIGI)); #endif ahd->msg_flags |= MSG_FLAG_EXPECT_QASREJ_BUSFREE; /* FALLTHROUGH */ case MSG_TERM_IO_PROC: default: reject = TRUE; break; } if (reject) { /* * Setup to reject the message. */ ahd->msgout_index = 0; ahd->msgout_len = 1; ahd->msgout_buf[0] = MSG_MESSAGE_REJECT; done = MSGLOOP_MSGCOMPLETE; response = TRUE; } if (done != MSGLOOP_IN_PROG && !response) /* Clear the outgoing message buffer */ ahd->msgout_len = 0; return (done); } /* * Process a message reject message. */ static int ahd_handle_msg_reject(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { /* * What we care about here is if we had an * outstanding SDTR or WDTR message for this * target. If we did, this is a signal that * the target is refusing negotiation. */ struct scb *scb; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; u_int scb_index; u_int last_msg; int response = 0; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); tinfo = ahd_fetch_transinfo(ahd, devinfo->channel, devinfo->our_scsiid, devinfo->target, &tstate); /* Might be necessary */ last_msg = ahd_inb(ahd, LAST_MSG); if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/FALSE)) { if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_PPR, /*full*/TRUE) && tinfo->goal.period <= AHD_SYNCRATE_PACED) { /* * Target may not like our SPI-4 PPR Options. * Attempt to negotiate 80MHz which will turn * off these options. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying simple U160 PPR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.period = AHD_SYNCRATE_DT; tinfo->goal.ppr_options &= MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; } else { /* * Target does not support the PPR message. * Attempt to negotiate SPI-2 style. */ if (bootverbose) { printk("(%s:%c:%d:%d): PPR Rejected. " "Trying WDTR/SDTR\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } tinfo->goal.ppr_options = 0; tinfo->curr.transport_version = 2; tinfo->goal.transport_version = 2; } ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_WDTR, /*full*/FALSE)) { /* note 8bit xfers */ printk("(%s:%c:%d:%d): refuses WIDE negotiation. Using " "8bit transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); /* * No need to clear the sync rate. If the target * did not accept the command, our syncrate is * unaffected. If the target started the negotiation, * but rejected our response, we already cleared the * sync rate before sending our WDTR. */ if (tinfo->goal.offset != tinfo->curr.offset) { /* Start the sync negotiation */ ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } } else if (ahd_sent_msg(ahd, AHDMSG_EXT, MSG_EXT_SDTR, /*full*/FALSE)) { /* note asynch xfers and clear flag */ ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_ACTIVE|AHD_TRANS_GOAL, /*paused*/TRUE); printk("(%s:%c:%d:%d): refuses synchronous negotiation. " "Using asynchronous transfers\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); } else if ((scb->hscb->control & MSG_SIMPLE_TASK) != 0) { int tag_type; int mask; tag_type = (scb->hscb->control & MSG_SIMPLE_TASK); if (tag_type == MSG_SIMPLE_TASK) { printk("(%s:%c:%d:%d): refuses tagged commands. " "Performing non-tagged I/O\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_NONE); mask = ~0x23; } else { printk("(%s:%c:%d:%d): refuses %s tagged commands. " "Performing simple queue tagged I/O only\n", ahd_name(ahd), devinfo->channel, devinfo->target, devinfo->lun, tag_type == MSG_ORDERED_TASK ? "ordered" : "head of queue"); ahd_set_tags(ahd, scb->io_ctx, devinfo, AHD_QUEUE_BASIC); mask = ~0x03; } /* * Resend the identify for this CCB as the target * may believe that the selection is invalid otherwise. */ ahd_outb(ahd, SCB_CONTROL, ahd_inb_scbram(ahd, SCB_CONTROL) & mask); scb->hscb->control &= mask; ahd_set_transaction_tag(scb, /*enabled*/FALSE, /*type*/MSG_SIMPLE_TASK); ahd_outb(ahd, MSG_OUT, MSG_IDENTIFYFLAG); ahd_assert_atn(ahd); ahd_busy_tcl(ahd, BUILD_TCL(scb->hscb->scsiid, devinfo->lun), SCB_GET_TAG(scb)); /* * Requeue all tagged commands for this target * currently in our possession so they can be * converted to untagged commands. */ ahd_search_qinfifo(ahd, SCB_GET_TARGET(ahd, scb), SCB_GET_CHANNEL(ahd, scb), SCB_GET_LUN(scb), /*tag*/SCB_LIST_NULL, ROLE_INITIATOR, CAM_REQUEUE_REQ, SEARCH_COMPLETE); } else if (ahd_sent_msg(ahd, AHDMSG_1B, MSG_IDENTIFYFLAG, TRUE)) { /* * Most likely the device believes that we had * previously negotiated packetized. */ ahd->msg_flags |= MSG_FLAG_EXPECT_PPR_BUSFREE | MSG_FLAG_IU_REQ_CHANGED; ahd_force_renegotiation(ahd, devinfo); ahd->msgout_index = 0; ahd->msgout_len = 0; ahd_build_transfer_msg(ahd, devinfo); ahd->msgout_index = 0; response = 1; } else { /* * Otherwise, we ignore it. */ printk("%s:%c:%d: Message reject for %x -- ignored\n", ahd_name(ahd), devinfo->channel, devinfo->target, last_msg); } return (response); } /* * Process an ingnore wide residue message. */ static void ahd_handle_ign_wide_residue(struct ahd_softc *ahd, struct ahd_devinfo *devinfo) { u_int scb_index; struct scb *scb; scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * XXX Actually check data direction in the sequencer? * Perhaps add datadir to some spare bits in the hscb? */ if ((ahd_inb(ahd, SEQ_FLAGS) & DPHASE) == 0 || ahd_get_transfer_dir(scb) != CAM_DIR_IN) { /* * Ignore the message if we haven't * seen an appropriate data phase yet. */ } else { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. Otherwise, subtract a byte * and update the residual count accordingly. */ uint32_t sgptr; sgptr = ahd_inb_scbram(ahd, SCB_RESIDUAL_SGPTR); if ((sgptr & SG_LIST_NULL) != 0 && (ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) & SCB_XFERLEN_ODD) != 0) { /* * If the residual occurred on the last * transfer and the transfer request was * expected to end on an odd count, do * nothing. */ } else { uint32_t data_cnt; uint64_t data_addr; uint32_t sglen; /* Pull in the rest of the sgptr */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); data_cnt = ahd_inl_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((sgptr & SG_LIST_NULL) != 0) { /* * The residual data count is not updated * for the command run to completion case. * Explicitly zero the count. */ data_cnt &= ~AHD_SG_LEN_MASK; } data_addr = ahd_inq(ahd, SHADDR); data_cnt += 1; data_addr -= 1; sgptr &= SG_PTR_MASK; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le64toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* * The residual sg ptr points to the next S/G * to load so we must go back one. */ sg--; sglen = ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; if (sg != scb->sg_list && sglen < (data_cnt & AHD_SG_LEN_MASK)) { sg--; sglen = ahd_le32toh(sg->len); /* * Preserve High Address and SG_LIST * bits while setting the count to 1. */ data_cnt = 1|(sglen&(~AHD_SG_LEN_MASK)); data_addr = ahd_le32toh(sg->addr) + (sglen & AHD_SG_LEN_MASK) - 1; /* * Increment sg so it points to the * "next" sg. */ sg++; sgptr = ahd_sg_virt_to_bus(ahd, scb, sg); } } /* * Toggle the "oddness" of the transfer length * to handle this mid-transfer ignore wide * residue. This ensures that the oddness is * correct for subsequent data transfers. */ ahd_outb(ahd, SCB_TASK_ATTRIBUTE, ahd_inb_scbram(ahd, SCB_TASK_ATTRIBUTE) ^ SCB_XFERLEN_ODD); ahd_outl(ahd, SCB_RESIDUAL_SGPTR, sgptr); ahd_outl(ahd, SCB_RESIDUAL_DATACNT, data_cnt); /* * The FIFO's pointers will be updated if/when the * sequencer re-enters a data phase. */ } } } /* * Reinitialize the data pointers for the active transfer * based on its current residual. */ static void ahd_reinitialize_dataptrs(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int scb_index; u_int wait; uint32_t sgptr; uint32_t resid; uint64_t dataptr; AHD_ASSERT_MODES(ahd, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK, AHD_MODE_DFF0_MSK|AHD_MODE_DFF1_MSK); scb_index = ahd_get_scbptr(ahd); scb = ahd_lookup_scb(ahd, scb_index); /* * Release and reacquire the FIFO so we * have a clean slate. */ ahd_outb(ahd, DFFSXFRCTL, CLRCHN); wait = 1000; while (--wait && !(ahd_inb(ahd, MDFFSTAT) & FIFOFREE)) ahd_delay(100); if (wait == 0) { ahd_print_path(ahd, scb); printk("ahd_reinitialize_dataptrs: Forcing FIFO free.\n"); ahd_outb(ahd, DFFSXFRCTL, RSTCHN|CLRSHCNT); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, ahd_inb(ahd, DFFSTAT) | (saved_modes == 0x11 ? CURRFIFO_1 : CURRFIFO_0)); /* * Determine initial values for data_addr and data_cnt * for resuming the data phase. */ sgptr = ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR); sgptr &= SG_PTR_MASK; resid = (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 2) << 16) | (ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT + 1) << 8) | ahd_inb_scbram(ahd, SCB_RESIDUAL_DATACNT); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) { struct ahd_dma64_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le64toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outl(ahd, HADDR + 4, dataptr >> 32); } else { struct ahd_dma_seg *sg; sg = ahd_sg_bus_to_virt(ahd, scb, sgptr); /* The residual sg_ptr always points to the next sg */ sg--; dataptr = ahd_le32toh(sg->addr) + (ahd_le32toh(sg->len) & AHD_SG_LEN_MASK) - resid; ahd_outb(ahd, HADDR + 4, (ahd_le32toh(sg->len) & ~AHD_SG_LEN_MASK) >> 24); } ahd_outl(ahd, HADDR, dataptr); ahd_outb(ahd, HCNT + 2, resid >> 16); ahd_outb(ahd, HCNT + 1, resid >> 8); ahd_outb(ahd, HCNT, resid); } /* * Handle the effects of issuing a bus device reset message. */ static void ahd_handle_devreset(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, u_int lun, cam_status status, char *message, int verbose_level) { #ifdef AHD_TARGET_MODE struct ahd_tmode_tstate* tstate; #endif int found; found = ahd_abort_scbs(ahd, devinfo->target, devinfo->channel, lun, SCB_LIST_NULL, devinfo->role, status); #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target mord peripheral * drivers affected by this action. */ tstate = ahd->enabled_targets[devinfo->our_scsiid]; if (tstate != NULL) { u_int cur_lun; u_int max_lun; if (lun != CAM_LUN_WILDCARD) { cur_lun = 0; max_lun = AHD_NUM_LUNS - 1; } else { cur_lun = lun; max_lun = lun; } for (;cur_lun <= max_lun; cur_lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[cur_lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, devinfo->our_scsiid, MSG_BUS_DEV_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Go back to async/narrow transfers and renegotiate. */ ahd_set_width(ahd, devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); if (status != CAM_SEL_TIMEOUT) ahd_send_async(ahd, devinfo->channel, devinfo->target, CAM_LUN_WILDCARD, AC_SENT_BDR); if (message != NULL && bootverbose) printk("%s: %s on %c:%d. %d SCBs aborted\n", ahd_name(ahd), message, devinfo->channel, devinfo->target, found); } #ifdef AHD_TARGET_MODE static void ahd_setup_target_msgin(struct ahd_softc *ahd, struct ahd_devinfo *devinfo, struct scb *scb) { /* * To facilitate adding multiple messages together, * each routine should increment the index and len * variables instead of setting them explicitly. */ ahd->msgout_index = 0; ahd->msgout_len = 0; if (scb != NULL && (scb->flags & SCB_AUTO_NEGOTIATE) != 0) ahd_build_transfer_msg(ahd, devinfo); else panic("ahd_intr: AWAITING target message with no message"); ahd->msgout_index = 0; ahd->msg_type = MSG_TYPE_TARGET_MSGIN; } #endif /**************************** Initialization **********************************/ static u_int ahd_sglist_size(struct ahd_softc *ahd) { bus_size_t list_size; list_size = sizeof(struct ahd_dma_seg) * AHD_NSEG; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) list_size = sizeof(struct ahd_dma64_seg) * AHD_NSEG; return (list_size); } /* * Calculate the optimum S/G List allocation size. S/G elements used * for a given transaction must be physically contiguous. Assume the * OS will allocate full pages to us, so it doesn't make sense to request * less than a page. */ static u_int ahd_sglist_allocsize(struct ahd_softc *ahd) { bus_size_t sg_list_increment; bus_size_t sg_list_size; bus_size_t max_list_size; bus_size_t best_list_size; /* Start out with the minimum required for AHD_NSEG. */ sg_list_increment = ahd_sglist_size(ahd); sg_list_size = sg_list_increment; /* Get us as close as possible to a page in size. */ while ((sg_list_size + sg_list_increment) <= PAGE_SIZE) sg_list_size += sg_list_increment; /* * Try to reduce the amount of wastage by allocating * multiple pages. */ best_list_size = sg_list_size; max_list_size = roundup(sg_list_increment, PAGE_SIZE); if (max_list_size < 4 * PAGE_SIZE) max_list_size = 4 * PAGE_SIZE; if (max_list_size > (AHD_SCB_MAX_ALLOC * sg_list_increment)) max_list_size = (AHD_SCB_MAX_ALLOC * sg_list_increment); while ((sg_list_size + sg_list_increment) <= max_list_size && (sg_list_size % PAGE_SIZE) != 0) { bus_size_t new_mod; bus_size_t best_mod; sg_list_size += sg_list_increment; new_mod = sg_list_size % PAGE_SIZE; best_mod = best_list_size % PAGE_SIZE; if (new_mod > best_mod || new_mod == 0) { best_list_size = sg_list_size; } } return (best_list_size); } /* * Allocate a controller structure for a new device * and perform initial initializion. */ struct ahd_softc * ahd_alloc(void *platform_arg, char *name) { struct ahd_softc *ahd; #ifndef __FreeBSD__ ahd = kmalloc(sizeof(*ahd), GFP_ATOMIC); if (!ahd) { printk("aic7xxx: cannot malloc softc!\n"); kfree(name); return NULL; } #else ahd = device_get_softc((device_t)platform_arg); #endif memset(ahd, 0, sizeof(*ahd)); ahd->seep_config = kmalloc(sizeof(*ahd->seep_config), GFP_ATOMIC); if (ahd->seep_config == NULL) { #ifndef __FreeBSD__ kfree(ahd); #endif kfree(name); return (NULL); } LIST_INIT(&ahd->pending_scbs); /* We don't know our unit number until the OSM sets it */ ahd->name = name; ahd->unit = -1; ahd->description = NULL; ahd->bus_description = NULL; ahd->channel = 'A'; ahd->chip = AHD_NONE; ahd->features = AHD_FENONE; ahd->bugs = AHD_BUGNONE; ahd->flags = AHD_SPCHK_ENB_A|AHD_RESET_BUS_A|AHD_TERM_ENB_A | AHD_EXTENDED_TRANS_A|AHD_STPWLEVEL_A; ahd_timer_init(&ahd->reset_timer); ahd_timer_init(&ahd->stat_timer); ahd->int_coalescing_timer = AHD_INT_COALESCING_TIMER_DEFAULT; ahd->int_coalescing_maxcmds = AHD_INT_COALESCING_MAXCMDS_DEFAULT; ahd->int_coalescing_mincmds = AHD_INT_COALESCING_MINCMDS_DEFAULT; ahd->int_coalescing_threshold = AHD_INT_COALESCING_THRESHOLD_DEFAULT; ahd->int_coalescing_stop_threshold = AHD_INT_COALESCING_STOP_THRESHOLD_DEFAULT; if (ahd_platform_alloc(ahd, platform_arg) != 0) { ahd_free(ahd); ahd = NULL; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) { printk("%s: scb size = 0x%x, hscb size = 0x%x\n", ahd_name(ahd), (u_int)sizeof(struct scb), (u_int)sizeof(struct hardware_scb)); } #endif return (ahd); } int ahd_softc_init(struct ahd_softc *ahd) { ahd->unpause = 0; ahd->pause = PAUSE; return (0); } void ahd_set_unit(struct ahd_softc *ahd, int unit) { ahd->unit = unit; } void ahd_set_name(struct ahd_softc *ahd, char *name) { if (ahd->name != NULL) kfree(ahd->name); ahd->name = name; } void ahd_free(struct ahd_softc *ahd) { int i; switch (ahd->init_level) { default: case 5: ahd_shutdown(ahd); /* FALLTHROUGH */ case 4: ahd_dmamap_unload(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 3: ahd_dmamem_free(ahd, ahd->shared_data_dmat, ahd->qoutfifo, ahd->shared_data_map.dmamap); ahd_dmamap_destroy(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap); /* FALLTHROUGH */ case 2: ahd_dma_tag_destroy(ahd, ahd->shared_data_dmat); case 1: #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->buffer_dmat); #endif break; case 0: break; } #ifndef __linux__ ahd_dma_tag_destroy(ahd, ahd->parent_dmat); #endif ahd_platform_free(ahd); ahd_fini_scbdata(ahd); for (i = 0; i < AHD_NUM_TARGETS; i++) { struct ahd_tmode_tstate *tstate; tstate = ahd->enabled_targets[i]; if (tstate != NULL) { #ifdef AHD_TARGET_MODE int j; for (j = 0; j < AHD_NUM_LUNS; j++) { struct ahd_tmode_lstate *lstate; lstate = tstate->enabled_luns[j]; if (lstate != NULL) { xpt_free_path(lstate->path); kfree(lstate); } } #endif kfree(tstate); } } #ifdef AHD_TARGET_MODE if (ahd->black_hole != NULL) { xpt_free_path(ahd->black_hole->path); kfree(ahd->black_hole); } #endif if (ahd->name != NULL) kfree(ahd->name); if (ahd->seep_config != NULL) kfree(ahd->seep_config); if (ahd->saved_stack != NULL) kfree(ahd->saved_stack); #ifndef __FreeBSD__ kfree(ahd); #endif return; } static void ahd_shutdown(void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; /* * Stop periodic timer callbacks. */ ahd_timer_stop(&ahd->reset_timer); ahd_timer_stop(&ahd->stat_timer); /* This will reset most registers to 0, but not all */ ahd_reset(ahd, /*reinit*/FALSE); } /* * Reset the controller and record some information about it * that is only available just after a reset. If "reinit" is * non-zero, this reset occurred after initial configuration * and the caller requests that the chip be fully reinitialized * to a runable state. Chip interrupts are *not* enabled after * a reinitialization. The caller must enable interrupts via * ahd_intr_enable(). */ int ahd_reset(struct ahd_softc *ahd, int reinit) { u_int sxfrctl1; int wait; uint32_t cmd; /* * Preserve the value of the SXFRCTL1 register for all channels. * It contains settings that affect termination and we don't want * to disturb the integrity of the bus. */ ahd_pause(ahd); ahd_update_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sxfrctl1 = ahd_inb(ahd, SXFRCTL1); cmd = ahd_pci_read_config(ahd->dev_softc, PCIR_COMMAND, /*bytes*/2); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { uint32_t mod_cmd; /* * A4 Razor #632 * During the assertion of CHIPRST, the chip * does not disable its parity logic prior to * the start of the reset. This may cause a * parity error to be detected and thus a * spurious SERR or PERR assertion. Disble * PERR and SERR responses during the CHIPRST. */ mod_cmd = cmd & ~(PCIM_CMD_PERRESPEN|PCIM_CMD_SERRESPEN); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, mod_cmd, /*bytes*/2); } ahd_outb(ahd, HCNTRL, CHIPRST | ahd->pause); /* * Ensure that the reset has finished. We delay 1000us * prior to reading the register to make sure the chip * has sufficiently completed its reset to handle register * accesses. */ wait = 1000; do { ahd_delay(1000); } while (--wait && !(ahd_inb(ahd, HCNTRL) & CHIPRSTACK)); if (wait == 0) { printk("%s: WARNING - Failed chip reset! " "Trying to initialize anyway.\n", ahd_name(ahd)); } ahd_outb(ahd, HCNTRL, ahd->pause); if ((ahd->bugs & AHD_PCIX_CHIPRST_BUG) != 0) { /* * Clear any latched PCI error status and restore * previous SERR and PERR response enables. */ ahd_pci_write_config(ahd->dev_softc, PCIR_STATUS + 1, 0xFF, /*bytes*/1); ahd_pci_write_config(ahd->dev_softc, PCIR_COMMAND, cmd, /*bytes*/2); } /* * Mode should be SCSI after a chip reset, but lets * set it just to be safe. We touch the MODE_PTR * register directly so as to bypass the lazy update * code in ahd_set_modes(). */ ahd_known_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, MODE_PTR, ahd_build_mode_state(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI)); /* * Restore SXFRCTL1. * * We must always initialize STPWEN to 1 before we * restore the saved values. STPWEN is initialized * to a tri-state condition which can only be cleared * by turning it on. */ ahd_outb(ahd, SXFRCTL1, sxfrctl1|STPWEN); ahd_outb(ahd, SXFRCTL1, sxfrctl1); /* Determine chip configuration */ ahd->features &= ~AHD_WIDE; if ((ahd_inb(ahd, SBLKCTL) & SELWIDE) != 0) ahd->features |= AHD_WIDE; /* * If a recovery action has forced a chip reset, * re-initialize the chip to our liking. */ if (reinit != 0) ahd_chip_init(ahd); return (0); } /* * Determine the number of SCBs available on the controller */ static int ahd_probe_scbs(struct ahd_softc *ahd) { int i; AHD_ASSERT_MODES(ahd, ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK), ~(AHD_MODE_UNKNOWN_MSK|AHD_MODE_CFG_MSK)); for (i = 0; i < AHD_SCB_MAX; i++) { int j; ahd_set_scbptr(ahd, i); ahd_outw(ahd, SCB_BASE, i); for (j = 2; j < 64; j++) ahd_outb(ahd, SCB_BASE+j, 0); /* Start out life as unallocated (needing an abort) */ ahd_outb(ahd, SCB_CONTROL, MK_MESSAGE); if (ahd_inw_scbram(ahd, SCB_BASE) != i) break; ahd_set_scbptr(ahd, 0); if (ahd_inw_scbram(ahd, SCB_BASE) != 0) break; } return (i); } static void ahd_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error) { dma_addr_t *baddr; baddr = (dma_addr_t *)arg; *baddr = segs->ds_addr; } static void ahd_initialize_hscbs(struct ahd_softc *ahd) { int i; for (i = 0; i < ahd->scb_data.maxhscbs; i++) { ahd_set_scbptr(ahd, i); /* Clear the control byte. */ ahd_outb(ahd, SCB_CONTROL, 0); /* Set the next pointer */ ahd_outw(ahd, SCB_NEXT, SCB_LIST_NULL); } } static int ahd_init_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; int i; scb_data = &ahd->scb_data; TAILQ_INIT(&scb_data->free_scbs); for (i = 0; i < AHD_NUM_TARGETS * AHD_NUM_LUNS_NONPKT; i++) LIST_INIT(&scb_data->free_scb_lists[i]); LIST_INIT(&scb_data->any_dev_free_scb_list); SLIST_INIT(&scb_data->hscb_maps); SLIST_INIT(&scb_data->sg_maps); SLIST_INIT(&scb_data->sense_maps); /* Determine the number of hardware SCBs and initialize them */ scb_data->maxhscbs = ahd_probe_scbs(ahd); if (scb_data->maxhscbs == 0) { printk("%s: No SCB space found\n", ahd_name(ahd)); return (ENXIO); } ahd_initialize_hscbs(ahd); /* * Create our DMA tags. These tags define the kinds of device * accessible memory allocations and memory mappings we will * need to perform during normal operation. * * Unless we need to further restrict the allocation, we rely * on the restrictions of the parent dmat, hence the common * use of MAXADDR and MAXSIZE. */ /* DMA tag for our hardware scb structures */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->hscb_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* DMA tag for our S/G structures. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/8, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, ahd_sglist_allocsize(ahd), /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sg_dmat) != 0) { goto error_exit; } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MEMORY) != 0) printk("%s: ahd_sglist_allocsize = 0x%x\n", ahd_name(ahd), ahd_sglist_allocsize(ahd)); #endif scb_data->init_level++; /* DMA tag for our sense buffers. We allocate in page sized chunks */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, PAGE_SIZE, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &scb_data->sense_dmat) != 0) { goto error_exit; } scb_data->init_level++; /* Perform initial CCB allocation */ ahd_alloc_scbs(ahd); if (scb_data->numscbs == 0) { printk("%s: ahd_init_scbdata - " "Unable to allocate initial scbs\n", ahd_name(ahd)); goto error_exit; } /* * Note that we were successful */ return (0); error_exit: return (ENOMEM); } static struct scb * ahd_find_scb_by_tag(struct ahd_softc *ahd, u_int tag) { struct scb *scb; /* * Look on the pending list. */ LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (SCB_GET_TAG(scb) == tag) return (scb); } /* * Then on all of the collision free lists. */ TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { if (SCB_GET_TAG(list_scb) == tag) return (list_scb); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb); } /* * And finally on the generic free list. */ LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (SCB_GET_TAG(scb) == tag) return (scb); } return (NULL); } static void ahd_fini_scbdata(struct ahd_softc *ahd) { struct scb_data *scb_data; scb_data = &ahd->scb_data; if (scb_data == NULL) return; switch (scb_data->init_level) { default: case 7: { struct map_node *sns_map; while ((sns_map = SLIST_FIRST(&scb_data->sense_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sense_maps, links); ahd_dmamap_unload(ahd, scb_data->sense_dmat, sns_map->dmamap); ahd_dmamem_free(ahd, scb_data->sense_dmat, sns_map->vaddr, sns_map->dmamap); kfree(sns_map); } ahd_dma_tag_destroy(ahd, scb_data->sense_dmat); /* FALLTHROUGH */ } case 6: { struct map_node *sg_map; while ((sg_map = SLIST_FIRST(&scb_data->sg_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->sg_maps, links); ahd_dmamap_unload(ahd, scb_data->sg_dmat, sg_map->dmamap); ahd_dmamem_free(ahd, scb_data->sg_dmat, sg_map->vaddr, sg_map->dmamap); kfree(sg_map); } ahd_dma_tag_destroy(ahd, scb_data->sg_dmat); /* FALLTHROUGH */ } case 5: { struct map_node *hscb_map; while ((hscb_map = SLIST_FIRST(&scb_data->hscb_maps)) != NULL) { SLIST_REMOVE_HEAD(&scb_data->hscb_maps, links); ahd_dmamap_unload(ahd, scb_data->hscb_dmat, hscb_map->dmamap); ahd_dmamem_free(ahd, scb_data->hscb_dmat, hscb_map->vaddr, hscb_map->dmamap); kfree(hscb_map); } ahd_dma_tag_destroy(ahd, scb_data->hscb_dmat); /* FALLTHROUGH */ } case 4: case 3: case 2: case 1: case 0: break; } } /* * DSP filter Bypass must be enabled until the first selection * after a change in bus mode (Razor #491 and #493). */ static void ahd_setup_iocell_workaround(struct ahd_softc *ahd) { ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) | BYPASSENAB | RCVROFFSTDIS | XMITOFFSTDIS); ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) | (ENSELDO|ENSELDI)); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: Setting up iocell workaround\n", ahd_name(ahd)); #endif ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_HAD_FIRST_SEL; } static void ahd_iocell_first_selection(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int sblkctl; if ((ahd->flags & AHD_HAD_FIRST_SEL) != 0) return; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); sblkctl = ahd_inb(ahd, SBLKCTL); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: iocell first selection\n", ahd_name(ahd)); #endif if ((sblkctl & ENAB40) != 0) { ahd_outb(ahd, DSPDATACTL, ahd_inb(ahd, DSPDATACTL) & ~BYPASSENAB); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: BYPASS now disabled\n", ahd_name(ahd)); #endif } ahd_outb(ahd, SIMODE0, ahd_inb(ahd, SIMODE0) & ~(ENSELDO|ENSELDI)); ahd_outb(ahd, CLRINT, CLRSCSIINT); ahd_restore_modes(ahd, saved_modes); ahd->flags |= AHD_HAD_FIRST_SEL; } /*************************** SCB Management ***********************************/ static void ahd_add_col_list(struct ahd_softc *ahd, struct scb *scb, u_int col_idx) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; scb->flags |= SCB_ON_COL_LIST; AHD_SET_SCB_COL_IDX(scb, col_idx); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb != NULL) { LIST_INSERT_AFTER(first_scb, scb, collision_links); } else { LIST_INSERT_HEAD(free_list, scb, collision_links); TAILQ_INSERT_TAIL(free_tailq, scb, links.tqe); } } static void ahd_rem_col_list(struct ahd_softc *ahd, struct scb *scb) { struct scb_list *free_list; struct scb_tailq *free_tailq; struct scb *first_scb; u_int col_idx; scb->flags &= ~SCB_ON_COL_LIST; col_idx = AHD_GET_SCB_COL_IDX(ahd, scb); free_list = &ahd->scb_data.free_scb_lists[col_idx]; free_tailq = &ahd->scb_data.free_scbs; first_scb = LIST_FIRST(free_list); if (first_scb == scb) { struct scb *next_scb; /* * Maintain order in the collision free * lists for fairness if this device has * other colliding tags active. */ next_scb = LIST_NEXT(scb, collision_links); if (next_scb != NULL) { TAILQ_INSERT_AFTER(free_tailq, scb, next_scb, links.tqe); } TAILQ_REMOVE(free_tailq, scb, links.tqe); } LIST_REMOVE(scb, collision_links); } /* * Get a free scb. If there are none, see if we can allocate a new SCB. */ struct scb * ahd_get_scb(struct ahd_softc *ahd, u_int col_idx) { struct scb *scb; int tries; tries = 0; look_again: TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { if (AHD_GET_SCB_COL_IDX(ahd, scb) != col_idx) { ahd_rem_col_list(ahd, scb); goto found; } } if ((scb = LIST_FIRST(&ahd->scb_data.any_dev_free_scb_list)) == NULL) { if (tries++ != 0) return (NULL); ahd_alloc_scbs(ahd); goto look_again; } LIST_REMOVE(scb, links.le); if (col_idx != AHD_NEVER_COL_IDX && (scb->col_scb != NULL) && (scb->col_scb->flags & SCB_ACTIVE) == 0) { LIST_REMOVE(scb->col_scb, links.le); ahd_add_col_list(ahd, scb->col_scb, col_idx); } found: scb->flags |= SCB_ACTIVE; return (scb); } /* * Return an SCB resource to the free list. */ void ahd_free_scb(struct ahd_softc *ahd, struct scb *scb) { /* Clean up for the next user */ scb->flags = SCB_FLAG_NONE; scb->hscb->control = 0; ahd->scb_data.scbindex[SCB_GET_TAG(scb)] = NULL; if (scb->col_scb == NULL) { /* * No collision possible. Just free normally. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } else if ((scb->col_scb->flags & SCB_ON_COL_LIST) != 0) { /* * The SCB we might have collided with is on * a free collision list. Put both SCBs on * the generic list. */ ahd_rem_col_list(ahd, scb->col_scb); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb->col_scb, links.le); } else if ((scb->col_scb->flags & (SCB_PACKETIZED|SCB_ACTIVE)) == SCB_ACTIVE && (scb->col_scb->hscb->control & TAG_ENB) != 0) { /* * The SCB we might collide with on the next allocation * is still active in a non-packetized, tagged, context. * Put us on the SCB collision list. */ ahd_add_col_list(ahd, scb, AHD_GET_SCB_COL_IDX(ahd, scb->col_scb)); } else { /* * The SCB we might collide with on the next allocation * is either active in a packetized context, or free. * Since we can't collide, put this SCB on the generic * free list. */ LIST_INSERT_HEAD(&ahd->scb_data.any_dev_free_scb_list, scb, links.le); } ahd_platform_scb_free(ahd, scb); } static void ahd_alloc_scbs(struct ahd_softc *ahd) { struct scb_data *scb_data; struct scb *next_scb; struct hardware_scb *hscb; struct map_node *hscb_map; struct map_node *sg_map; struct map_node *sense_map; uint8_t *segs; uint8_t *sense_data; dma_addr_t hscb_busaddr; dma_addr_t sg_busaddr; dma_addr_t sense_busaddr; int newcount; int i; scb_data = &ahd->scb_data; if (scb_data->numscbs >= AHD_SCB_MAX_ALLOC) /* Can't allocate any more */ return; if (scb_data->scbs_left != 0) { int offset; offset = (PAGE_SIZE / sizeof(*hscb)) - scb_data->scbs_left; hscb_map = SLIST_FIRST(&scb_data->hscb_maps); hscb = &((struct hardware_scb *)hscb_map->vaddr)[offset]; hscb_busaddr = hscb_map->physaddr + (offset * sizeof(*hscb)); } else { hscb_map = kmalloc(sizeof(*hscb_map), GFP_ATOMIC); if (hscb_map == NULL) return; /* Allocate the next batch of hardware SCBs */ if (ahd_dmamem_alloc(ahd, scb_data->hscb_dmat, (void **)&hscb_map->vaddr, BUS_DMA_NOWAIT, &hscb_map->dmamap) != 0) { kfree(hscb_map); return; } SLIST_INSERT_HEAD(&scb_data->hscb_maps, hscb_map, links); ahd_dmamap_load(ahd, scb_data->hscb_dmat, hscb_map->dmamap, hscb_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &hscb_map->physaddr, /*flags*/0); hscb = (struct hardware_scb *)hscb_map->vaddr; hscb_busaddr = hscb_map->physaddr; scb_data->scbs_left = PAGE_SIZE / sizeof(*hscb); } if (scb_data->sgs_left != 0) { int offset; offset = ((ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd)) - scb_data->sgs_left) * ahd_sglist_size(ahd); sg_map = SLIST_FIRST(&scb_data->sg_maps); segs = sg_map->vaddr + offset; sg_busaddr = sg_map->physaddr + offset; } else { sg_map = kmalloc(sizeof(*sg_map), GFP_ATOMIC); if (sg_map == NULL) return; /* Allocate the next batch of S/G lists */ if (ahd_dmamem_alloc(ahd, scb_data->sg_dmat, (void **)&sg_map->vaddr, BUS_DMA_NOWAIT, &sg_map->dmamap) != 0) { kfree(sg_map); return; } SLIST_INSERT_HEAD(&scb_data->sg_maps, sg_map, links); ahd_dmamap_load(ahd, scb_data->sg_dmat, sg_map->dmamap, sg_map->vaddr, ahd_sglist_allocsize(ahd), ahd_dmamap_cb, &sg_map->physaddr, /*flags*/0); segs = sg_map->vaddr; sg_busaddr = sg_map->physaddr; scb_data->sgs_left = ahd_sglist_allocsize(ahd) / ahd_sglist_size(ahd); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped SG data\n"); #endif } if (scb_data->sense_left != 0) { int offset; offset = PAGE_SIZE - (AHD_SENSE_BUFSIZE * scb_data->sense_left); sense_map = SLIST_FIRST(&scb_data->sense_maps); sense_data = sense_map->vaddr + offset; sense_busaddr = sense_map->physaddr + offset; } else { sense_map = kmalloc(sizeof(*sense_map), GFP_ATOMIC); if (sense_map == NULL) return; /* Allocate the next batch of sense buffers */ if (ahd_dmamem_alloc(ahd, scb_data->sense_dmat, (void **)&sense_map->vaddr, BUS_DMA_NOWAIT, &sense_map->dmamap) != 0) { kfree(sense_map); return; } SLIST_INSERT_HEAD(&scb_data->sense_maps, sense_map, links); ahd_dmamap_load(ahd, scb_data->sense_dmat, sense_map->dmamap, sense_map->vaddr, PAGE_SIZE, ahd_dmamap_cb, &sense_map->physaddr, /*flags*/0); sense_data = sense_map->vaddr; sense_busaddr = sense_map->physaddr; scb_data->sense_left = PAGE_SIZE / AHD_SENSE_BUFSIZE; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_MEMORY) printk("Mapped sense data\n"); #endif } newcount = min(scb_data->sense_left, scb_data->scbs_left); newcount = min(newcount, scb_data->sgs_left); newcount = min(newcount, (AHD_SCB_MAX_ALLOC - scb_data->numscbs)); for (i = 0; i < newcount; i++) { struct scb_platform_data *pdata; u_int col_tag; #ifndef __linux__ int error; #endif next_scb = kmalloc(sizeof(*next_scb), GFP_ATOMIC); if (next_scb == NULL) break; pdata = kmalloc(sizeof(*pdata), GFP_ATOMIC); if (pdata == NULL) { kfree(next_scb); break; } next_scb->platform_data = pdata; next_scb->hscb_map = hscb_map; next_scb->sg_map = sg_map; next_scb->sense_map = sense_map; next_scb->sg_list = segs; next_scb->sense_data = sense_data; next_scb->sense_busaddr = sense_busaddr; memset(hscb, 0, sizeof(*hscb)); next_scb->hscb = hscb; hscb->hscb_busaddr = ahd_htole32(hscb_busaddr); /* * The sequencer always starts with the second entry. * The first entry is embedded in the scb. */ next_scb->sg_list_busaddr = sg_busaddr; if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) next_scb->sg_list_busaddr += sizeof(struct ahd_dma64_seg); else next_scb->sg_list_busaddr += sizeof(struct ahd_dma_seg); next_scb->ahd_softc = ahd; next_scb->flags = SCB_FLAG_NONE; #ifndef __linux__ error = ahd_dmamap_create(ahd, ahd->buffer_dmat, /*flags*/0, &next_scb->dmamap); if (error != 0) { kfree(next_scb); kfree(pdata); break; } #endif next_scb->hscb->tag = ahd_htole16(scb_data->numscbs); col_tag = scb_data->numscbs ^ 0x100; next_scb->col_scb = ahd_find_scb_by_tag(ahd, col_tag); if (next_scb->col_scb != NULL) next_scb->col_scb->col_scb = next_scb; ahd_free_scb(ahd, next_scb); hscb++; hscb_busaddr += sizeof(*hscb); segs += ahd_sglist_size(ahd); sg_busaddr += ahd_sglist_size(ahd); sense_data += AHD_SENSE_BUFSIZE; sense_busaddr += AHD_SENSE_BUFSIZE; scb_data->numscbs++; scb_data->sense_left--; scb_data->scbs_left--; scb_data->sgs_left--; } } void ahd_controller_info(struct ahd_softc *ahd, char *buf) { const char *speed; const char *type; int len; len = sprintf(buf, "%s: ", ahd_chip_names[ahd->chip & AHD_CHIPID_MASK]); buf += len; speed = "Ultra320 "; if ((ahd->features & AHD_WIDE) != 0) { type = "Wide "; } else { type = "Single "; } len = sprintf(buf, "%s%sChannel %c, SCSI Id=%d, ", speed, type, ahd->channel, ahd->our_id); buf += len; sprintf(buf, "%s, %d SCBs", ahd->bus_description, ahd->scb_data.maxhscbs); } static const char *channel_strings[] = { "Primary Low", "Primary High", "Secondary Low", "Secondary High" }; static const char *termstat_strings[] = { "Terminated Correctly", "Over Terminated", "Under Terminated", "Not Configured" }; /***************************** Timer Facilities *******************************/ #define ahd_timer_init init_timer #define ahd_timer_stop del_timer_sync typedef void ahd_linux_callback_t (u_long); static void ahd_timer_reset(ahd_timer_t *timer, int usec, ahd_callback_t *func, void *arg) { struct ahd_softc *ahd; ahd = (struct ahd_softc *)arg; del_timer(timer); timer->data = (u_long)arg; timer->expires = jiffies + (usec * HZ)/1000000; timer->function = (ahd_linux_callback_t*)func; add_timer(timer); } /* * Start the board, ready for normal operation */ int ahd_init(struct ahd_softc *ahd) { uint8_t *next_vaddr; dma_addr_t next_baddr; size_t driver_data_size; int i; int error; u_int warn_user; uint8_t current_sensing; uint8_t fstat; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd->stack_size = ahd_probe_stack_size(ahd); ahd->saved_stack = kmalloc(ahd->stack_size * sizeof(uint16_t), GFP_ATOMIC); if (ahd->saved_stack == NULL) return (ENOMEM); /* * Verify that the compiler hasn't over-aggressively * padded important structures. */ if (sizeof(struct hardware_scb) != 64) panic("Hardware SCB size is incorrect"); #ifdef AHD_DEBUG if ((ahd_debug & AHD_DEBUG_SEQUENCER) != 0) ahd->flags |= AHD_SEQUENCER_DEBUG; #endif /* * Default to allowing initiator operations. */ ahd->flags |= AHD_INITIATORROLE; /* * Only allow target mode features if this unit has them enabled. */ if ((AHD_TMODE_ENABLE & (0x1 << ahd->unit)) == 0) ahd->features &= ~AHD_TARGETMODE; #ifndef __linux__ /* DMA tag for mapping buffers into device visible space. */ if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/ahd->flags & AHD_39BIT_ADDRESSING ? (dma_addr_t)0x7FFFFFFFFFULL : BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/(AHD_NSEG - 1) * PAGE_SIZE, /*nsegments*/AHD_NSEG, /*maxsegsz*/AHD_MAXTRANSFER_SIZE, /*flags*/BUS_DMA_ALLOCNOW, &ahd->buffer_dmat) != 0) { return (ENOMEM); } #endif ahd->init_level++; /* * DMA tag for our command fifos and other data in system memory * the card's sequencer must be able to access. For initiator * roles, we need to allocate space for the qoutfifo. When providing * for the target mode role, we must additionally provide space for * the incoming target command fifo. */ driver_data_size = AHD_SCB_MAX * sizeof(*ahd->qoutfifo) + sizeof(struct hardware_scb); if ((ahd->features & AHD_TARGETMODE) != 0) driver_data_size += AHD_TMODE_CMDS * sizeof(struct target_cmd); if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) driver_data_size += PKT_OVERRUN_BUFSIZE; if (ahd_dma_tag_create(ahd, ahd->parent_dmat, /*alignment*/1, /*boundary*/BUS_SPACE_MAXADDR_32BIT + 1, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, driver_data_size, /*nsegments*/1, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, &ahd->shared_data_dmat) != 0) { return (ENOMEM); } ahd->init_level++; /* Allocation of driver data */ if (ahd_dmamem_alloc(ahd, ahd->shared_data_dmat, (void **)&ahd->shared_data_map.vaddr, BUS_DMA_NOWAIT, &ahd->shared_data_map.dmamap) != 0) { return (ENOMEM); } ahd->init_level++; /* And permanently map it in */ ahd_dmamap_load(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd->shared_data_map.vaddr, driver_data_size, ahd_dmamap_cb, &ahd->shared_data_map.physaddr, /*flags*/0); ahd->qoutfifo = (struct ahd_completion *)ahd->shared_data_map.vaddr; next_vaddr = (uint8_t *)&ahd->qoutfifo[AHD_QOUT_SIZE]; next_baddr = ahd->shared_data_map.physaddr + AHD_QOUT_SIZE*sizeof(struct ahd_completion); if ((ahd->features & AHD_TARGETMODE) != 0) { ahd->targetcmds = (struct target_cmd *)next_vaddr; next_vaddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); next_baddr += AHD_TMODE_CMDS * sizeof(struct target_cmd); } if ((ahd->bugs & AHD_PKT_BITBUCKET_BUG) != 0) { ahd->overrun_buf = next_vaddr; next_vaddr += PKT_OVERRUN_BUFSIZE; next_baddr += PKT_OVERRUN_BUFSIZE; } /* * We need one SCB to serve as the "next SCB". Since the * tag identifier in this SCB will never be used, there is * no point in using a valid HSCB tag from an SCB pulled from * the standard free pool. So, we allocate this "sentinel" * specially from the DMA safe memory chunk used for the QOUTFIFO. */ ahd->next_queued_hscb = (struct hardware_scb *)next_vaddr; ahd->next_queued_hscb_map = &ahd->shared_data_map; ahd->next_queued_hscb->hscb_busaddr = ahd_htole32(next_baddr); ahd->init_level++; /* Allocate SCB data now that buffer_dmat is initialized */ if (ahd_init_scbdata(ahd) != 0) return (ENOMEM); if ((ahd->flags & AHD_INITIATORROLE) == 0) ahd->flags &= ~AHD_RESET_BUS_A; /* * Before committing these settings to the chip, give * the OSM one last chance to modify our configuration. */ ahd_platform_init(ahd); /* Bring up the chip. */ ahd_chip_init(ahd); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if ((ahd->flags & AHD_CURRENT_SENSING) == 0) goto init_done; /* * Verify termination based on current draw and * warn user if the bus is over/under terminated. */ error = ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, CURSENSE_ENB); if (error != 0) { printk("%s: current sensing timeout 1\n", ahd_name(ahd)); goto init_done; } for (i = 20, fstat = FLX_FSTAT_BUSY; (fstat & FLX_FSTAT_BUSY) != 0 && i; i--) { error = ahd_read_flexport(ahd, FLXADDR_FLEXSTAT, &fstat); if (error != 0) { printk("%s: current sensing timeout 2\n", ahd_name(ahd)); goto init_done; } } if (i == 0) { printk("%s: Timedout during current-sensing test\n", ahd_name(ahd)); goto init_done; } /* Latch Current Sensing status. */ error = ahd_read_flexport(ahd, FLXADDR_CURRENT_STAT, &current_sensing); if (error != 0) { printk("%s: current sensing timeout 3\n", ahd_name(ahd)); goto init_done; } /* Diable current sensing. */ ahd_write_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, 0); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TERMCTL) != 0) { printk("%s: current_sensing == 0x%x\n", ahd_name(ahd), current_sensing); } #endif warn_user = 0; for (i = 0; i < 4; i++, current_sensing >>= FLX_CSTAT_SHIFT) { u_int term_stat; term_stat = (current_sensing & FLX_CSTAT_MASK); switch (term_stat) { case FLX_CSTAT_OVER: case FLX_CSTAT_UNDER: warn_user++; case FLX_CSTAT_INVALID: case FLX_CSTAT_OKAY: if (warn_user == 0 && bootverbose == 0) break; printk("%s: %s Channel %s\n", ahd_name(ahd), channel_strings[i], termstat_strings[term_stat]); break; } } if (warn_user) { printk("%s: WARNING. Termination is not configured correctly.\n" "%s: WARNING. SCSI bus operations may FAIL.\n", ahd_name(ahd), ahd_name(ahd)); } init_done: ahd_restart(ahd); ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); return (0); } /* * (Re)initialize chip state after a chip reset. */ static void ahd_chip_init(struct ahd_softc *ahd) { uint32_t busaddr; u_int sxfrctl1; u_int scsiseq_template; u_int wait; u_int i; u_int target; ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Take the LED out of diagnostic mode */ ahd_outb(ahd, SBLKCTL, ahd_inb(ahd, SBLKCTL) & ~(DIAGLEDEN|DIAGLEDON)); /* * Return HS_MAILBOX to its default value. */ ahd->hs_mailbox = 0; ahd_outb(ahd, HS_MAILBOX, 0); /* Set the SCSI Id, SXFRCTL0, SXFRCTL1, and SIMODE1. */ ahd_outb(ahd, IOWNID, ahd->our_id); ahd_outb(ahd, TOWNID, ahd->our_id); sxfrctl1 = (ahd->flags & AHD_TERM_ENB_A) != 0 ? STPWEN : 0; sxfrctl1 |= (ahd->flags & AHD_SPCHK_ENB_A) != 0 ? ENSPCHK : 0; if ((ahd->bugs & AHD_LONG_SETIMO_BUG) && (ahd->seltime != STIMESEL_MIN)) { /* * The selection timer duration is twice as long * as it should be. Halve it by adding "1" to * the user specified setting. */ sxfrctl1 |= ahd->seltime + STIMESEL_BUG_ADJ; } else { sxfrctl1 |= ahd->seltime; } ahd_outb(ahd, SXFRCTL0, DFON); ahd_outb(ahd, SXFRCTL1, sxfrctl1|ahd->seltime|ENSTIMER|ACTNEGEN); ahd_outb(ahd, SIMODE1, ENSELTIMO|ENSCSIRST|ENSCSIPERR); /* * Now that termination is set, wait for up * to 500ms for our transceivers to settle. If * the adapter does not have a cable attached, * the transceivers may never settle, so don't * complain if we fail here. */ for (wait = 10000; (ahd_inb(ahd, SBLKCTL) & (ENAB40|ENAB20)) == 0 && wait; wait--) ahd_delay(100); /* Clear any false bus resets due to the transceivers settling */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); ahd_outb(ahd, CLRINT, CLRSCSIINT); /* Initialize mode specific S/G state. */ for (i = 0; i < 2; i++) { ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); ahd_outb(ahd, LONGJMP_ADDR + 1, INVALID_ADDR); ahd_outb(ahd, SG_STATE, 0); ahd_outb(ahd, CLRSEQINTSRC, 0xFF); ahd_outb(ahd, SEQIMODE, ENSAVEPTRS|ENCFG4DATA|ENCFG4ISTAT |ENCFG4TSTAT|ENCFG4ICMD|ENCFG4TCMD); } ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); ahd_outb(ahd, DSCOMMAND0, ahd_inb(ahd, DSCOMMAND0)|MPARCKEN|CACHETHEN); ahd_outb(ahd, DFF_THRSH, RD_DFTHRSH_75|WR_DFTHRSH_75); ahd_outb(ahd, SIMODE0, ENIOERR|ENOVERRUN); ahd_outb(ahd, SIMODE3, ENNTRAMPERR|ENOSRAMPERR); if ((ahd->bugs & AHD_BUSFREEREV_BUG) != 0) { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|AUTO_MSGOUT_DE); } else { ahd_outb(ahd, OPTIONMODE, AUTOACKEN|BUSFREEREV|AUTO_MSGOUT_DE); } ahd_outb(ahd, SCSCHKN, CURRFIFODEF|WIDERESEN|SHVALIDSTDIS); if ((ahd->chip & AHD_BUS_MASK) == AHD_PCIX) /* * Do not issue a target abort when a split completion * error occurs. Let our PCIX interrupt handler deal * with it instead. H2A4 Razor #625 */ ahd_outb(ahd, PCIXCTL, ahd_inb(ahd, PCIXCTL) | SPLTSTADIS); if ((ahd->bugs & AHD_LQOOVERRUN_BUG) != 0) ahd_outb(ahd, LQOSCSCTL, LQONOCHKOVER); /* * Tweak IOCELL settings. */ if ((ahd->flags & AHD_HP_BOARD) != 0) { for (i = 0; i < NUMDSPS; i++) { ahd_outb(ahd, DSPSELECT, i); ahd_outb(ahd, WRTBIASCTL, WRTBIASCTL_HP_DEFAULT); } #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("%s: WRTBIASCTL now 0x%x\n", ahd_name(ahd), WRTBIASCTL_HP_DEFAULT); #endif } ahd_setup_iocell_workaround(ahd); /* * Enable LQI Manager interrupts. */ ahd_outb(ahd, LQIMODE1, ENLQIPHASE_LQ|ENLQIPHASE_NLQ|ENLIQABORT | ENLQICRCI_LQ|ENLQICRCI_NLQ|ENLQIBADLQI | ENLQIOVERI_LQ|ENLQIOVERI_NLQ); ahd_outb(ahd, LQOMODE0, ENLQOATNLQ|ENLQOATNPKT|ENLQOTCRC); /* * We choose to have the sequencer catch LQOPHCHGINPKT errors * manually for the command phase at the start of a packetized * selection case. ENLQOBUSFREE should be made redundant by * the BUSFREE interrupt, but it seems that some LQOBUSFREE * events fail to assert the BUSFREE interrupt so we must * also enable LQOBUSFREE interrupts. */ ahd_outb(ahd, LQOMODE1, ENLQOBUSFREE); /* * Setup sequencer interrupt handlers. */ ahd_outw(ahd, INTVEC1_ADDR, ahd_resolve_seqaddr(ahd, LABEL_seq_isr)); ahd_outw(ahd, INTVEC2_ADDR, ahd_resolve_seqaddr(ahd, LABEL_timer_isr)); /* * Setup SCB Offset registers. */ if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, pkt_long_lun)); } else { ahd_outb(ahd, LUNPTR, offsetof(struct hardware_scb, lun)); } ahd_outb(ahd, CMDLENPTR, offsetof(struct hardware_scb, cdb_len)); ahd_outb(ahd, ATTRPTR, offsetof(struct hardware_scb, task_attribute)); ahd_outb(ahd, FLAGPTR, offsetof(struct hardware_scb, task_management)); ahd_outb(ahd, CMDPTR, offsetof(struct hardware_scb, shared_data.idata.cdb)); ahd_outb(ahd, QNEXTPTR, offsetof(struct hardware_scb, next_hscb_busaddr)); ahd_outb(ahd, ABRTBITPTR, MK_MESSAGE_BIT_OFFSET); ahd_outb(ahd, ABRTBYTEPTR, offsetof(struct hardware_scb, control)); if ((ahd->bugs & AHD_PKT_LUN_BUG) != 0) { ahd_outb(ahd, LUNLEN, sizeof(ahd->next_queued_hscb->pkt_long_lun) - 1); } else { ahd_outb(ahd, LUNLEN, LUNLEN_SINGLE_LEVEL_LUN); } ahd_outb(ahd, CDBLIMIT, SCB_CDB_LEN_PTR - 1); ahd_outb(ahd, MAXCMD, 0xFF); ahd_outb(ahd, SCBAUTOPTR, AUSCBPTR_EN | offsetof(struct hardware_scb, tag)); /* We haven't been enabled for target mode yet. */ ahd_outb(ahd, MULTARGID, 0); ahd_outb(ahd, MULTARGID + 1, 0); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* Initialize the negotiation table. */ if ((ahd->features & AHD_NEW_IOCELL_OPTS) == 0) { /* * Clear the spare bytes in the neg table to avoid * spurious parity errors. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { ahd_outb(ahd, NEGOADDR, target); ahd_outb(ahd, ANNEXCOL, AHD_ANNEXCOL_PER_DEV0); for (i = 0; i < AHD_NUM_PER_DEV_ANNEXCOLS; i++) ahd_outb(ahd, ANNEXDAT, 0); } } for (target = 0; target < AHD_NUM_TARGETS; target++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, target, &tstate); ahd_compile_devinfo(&devinfo, ahd->our_id, target, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_update_neg_table(ahd, &devinfo, &tinfo->curr); } ahd_outb(ahd, CLRSINT3, NTRAMPERR|OSRAMPERR); ahd_outb(ahd, CLRINT, CLRSCSIINT); #ifdef NEEDS_MORE_TESTING /* * Always enable abort on incoming L_Qs if this feature is * supported. We use this to catch invalid SCB references. */ if ((ahd->bugs & AHD_ABORT_LQI_BUG) == 0) ahd_outb(ahd, LQCTL1, ABORTPENDING); else #endif ahd_outb(ahd, LQCTL1, 0); /* All of our queues are empty */ ahd->qoutfifonext = 0; ahd->qoutfifonext_valid_tag = QOUTFIFO_ENTRY_VALID; ahd_outb(ahd, QOUTFIFO_ENTRY_VALID_TAG, QOUTFIFO_ENTRY_VALID); for (i = 0; i < AHD_QOUT_SIZE; i++) ahd->qoutfifo[i].valid_tag = 0; ahd_sync_qoutfifo(ahd, BUS_DMASYNC_PREREAD); ahd->qinfifonext = 0; for (i = 0; i < AHD_QIN_SIZE; i++) ahd->qinfifo[i] = SCB_LIST_NULL; if ((ahd->features & AHD_TARGETMODE) != 0) { /* All target command blocks start out invalid. */ for (i = 0; i < AHD_TMODE_CMDS; i++) ahd->targetcmds[i].cmd_valid = 0; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_PREREAD); ahd->tqinfifonext = 1; ahd_outb(ahd, KERNEL_TQINPOS, ahd->tqinfifonext - 1); ahd_outb(ahd, TQINPOS, ahd->tqinfifonext); } /* Initialize Scratch Ram. */ ahd_outb(ahd, SEQ_FLAGS, 0); ahd_outb(ahd, SEQ_FLAGS2, 0); /* We don't have any waiting selections */ ahd_outw(ahd, WAITING_TID_HEAD, SCB_LIST_NULL); ahd_outw(ahd, WAITING_TID_TAIL, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCB, SCB_LIST_NULL); ahd_outw(ahd, MK_MESSAGE_SCSIID, 0xFF); for (i = 0; i < AHD_NUM_TARGETS; i++) ahd_outw(ahd, WAITING_SCB_TAILS + (2 * i), SCB_LIST_NULL); /* * Nobody is waiting to be DMAed into the QOUTFIFO. */ ahd_outw(ahd, COMPLETE_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_SCB_DMAINPROG_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_HEAD, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_DMA_SCB_TAIL, SCB_LIST_NULL); ahd_outw(ahd, COMPLETE_ON_QFREEZE_HEAD, SCB_LIST_NULL); /* * The Freeze Count is 0. */ ahd->qfreeze_cnt = 0; ahd_outw(ahd, QFREEZE_COUNT, 0); ahd_outw(ahd, KERNEL_QFREEZE_COUNT, 0); /* * Tell the sequencer where it can find our arrays in memory. */ busaddr = ahd->shared_data_map.physaddr; ahd_outl(ahd, SHARED_DATA_ADDR, busaddr); ahd_outl(ahd, QOUTFIFO_NEXT_ADDR, busaddr); /* * Setup the allowed SCSI Sequences based on operational mode. * If we are a target, we'll enable select in operations once * we've had a lun enabled. */ scsiseq_template = ENAUTOATNP; if ((ahd->flags & AHD_INITIATORROLE) != 0) scsiseq_template |= ENRSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq_template); /* There are no busy SCBs yet. */ for (target = 0; target < AHD_NUM_TARGETS; target++) { int lun; for (lun = 0; lun < AHD_NUM_LUNS_NONPKT; lun++) ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(target, 'A', lun)); } /* * Initialize the group code to command length table. * Vendor Unique codes are set to 0 so we only capture * the first byte of the cdb. These can be overridden * when target mode is enabled. */ ahd_outb(ahd, CMDSIZE_TABLE, 5); ahd_outb(ahd, CMDSIZE_TABLE + 1, 9); ahd_outb(ahd, CMDSIZE_TABLE + 2, 9); ahd_outb(ahd, CMDSIZE_TABLE + 3, 0); ahd_outb(ahd, CMDSIZE_TABLE + 4, 15); ahd_outb(ahd, CMDSIZE_TABLE + 5, 11); ahd_outb(ahd, CMDSIZE_TABLE + 6, 0); ahd_outb(ahd, CMDSIZE_TABLE + 7, 0); /* Tell the sequencer of our initial queue positions */ ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); ahd_outb(ahd, QOFF_CTLSTA, SCB_QSIZE_512); ahd->qinfifonext = 0; ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_set_hescb_qoff(ahd, 0); ahd_set_snscb_qoff(ahd, 0); ahd_set_sescb_qoff(ahd, 0); ahd_set_sdscb_qoff(ahd, 0); /* * Tell the sequencer which SCB will be the next one it receives. */ busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); /* * Default to coalescing disabled. */ ahd_outw(ahd, INT_COALESCING_CMDCOUNT, 0); ahd_outw(ahd, CMDS_PENDING, 0); ahd_update_coalescing_values(ahd, ahd->int_coalescing_timer, ahd->int_coalescing_maxcmds, ahd->int_coalescing_mincmds); ahd_enable_coalescing(ahd, FALSE); ahd_loadseq(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); if (ahd->features & AHD_AIC79XXB_SLOWCRC) { u_int negodat3 = ahd_inb(ahd, NEGCONOPTS); negodat3 |= ENSLOWCRC; ahd_outb(ahd, NEGCONOPTS, negodat3); negodat3 = ahd_inb(ahd, NEGCONOPTS); if (!(negodat3 & ENSLOWCRC)) printk("aic79xx: failed to set the SLOWCRC bit\n"); else printk("aic79xx: SLOWCRC bit set\n"); } } /* * Setup default device and controller settings. * This should only be called if our probe has * determined that no configuration data is available. */ int ahd_default_config(struct ahd_softc *ahd) { int targ; ahd->our_id = 7; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < AHD_NUM_TARGETS; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable |= target_mask; tstate->discenable |= target_mask; ahd->user_tagenable |= target_mask; #ifdef AHD_FORCE_160 tinfo->user.period = AHD_SYNCRATE_DT; #else tinfo->user.period = AHD_SYNCRATE_160; #endif tinfo->user.offset = MAX_OFFSET; tinfo->user.ppr_options = MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ | MSG_EXT_PPR_QAS_REQ | MSG_EXT_PPR_DT_REQ; if ((ahd->features & AHD_RTI) != 0) tinfo->user.ppr_options |= MSG_EXT_PPR_RTI; tinfo->user.width = MSG_EXT_WDTR_BUS_16_BIT; /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); tstate->tagenable &= ~target_mask; ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } return (0); } /* * Parse device configuration information. */ int ahd_parse_cfgdata(struct ahd_softc *ahd, struct seeprom_config *sc) { int targ; int max_targ; max_targ = sc->max_targets & CFMAXTARG; ahd->our_id = sc->brtime_id & CFSCSIID; /* * Allocate a tstate to house information for our * initiator presence on the bus as well as the user * data for any target mode initiator. */ if (ahd_alloc_tstate(ahd, ahd->our_id, 'A') == NULL) { printk("%s: unable to allocate ahd_tmode_tstate. " "Failing attach\n", ahd_name(ahd)); return (ENOMEM); } for (targ = 0; targ < max_targ; targ++) { struct ahd_devinfo devinfo; struct ahd_initiator_tinfo *tinfo; struct ahd_transinfo *user_tinfo; struct ahd_tmode_tstate *tstate; uint16_t target_mask; tinfo = ahd_fetch_transinfo(ahd, 'A', ahd->our_id, targ, &tstate); user_tinfo = &tinfo->user; /* * We support SPC2 and SPI4. */ tinfo->user.protocol_version = 4; tinfo->user.transport_version = 4; target_mask = 0x01 << targ; ahd->user_discenable &= ~target_mask; tstate->discenable &= ~target_mask; ahd->user_tagenable &= ~target_mask; if (sc->device_flags[targ] & CFDISC) { tstate->discenable |= target_mask; ahd->user_discenable |= target_mask; ahd->user_tagenable |= target_mask; } else { /* * Cannot be packetized without disconnection. */ sc->device_flags[targ] &= ~CFPACKETIZED; } user_tinfo->ppr_options = 0; user_tinfo->period = (sc->device_flags[targ] & CFXFER); if (user_tinfo->period < CFXFER_ASYNC) { if (user_tinfo->period <= AHD_PERIOD_10MHz) user_tinfo->ppr_options |= MSG_EXT_PPR_DT_REQ; user_tinfo->offset = MAX_OFFSET; } else { user_tinfo->offset = 0; user_tinfo->period = AHD_ASYNC_XFER_PERIOD; } #ifdef AHD_FORCE_160 if (user_tinfo->period <= AHD_SYNCRATE_160) user_tinfo->period = AHD_SYNCRATE_DT; #endif if ((sc->device_flags[targ] & CFPACKETIZED) != 0) { user_tinfo->ppr_options |= MSG_EXT_PPR_RD_STRM | MSG_EXT_PPR_WR_FLOW | MSG_EXT_PPR_HOLD_MCS | MSG_EXT_PPR_IU_REQ; if ((ahd->features & AHD_RTI) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_RTI; } if ((sc->device_flags[targ] & CFQAS) != 0) user_tinfo->ppr_options |= MSG_EXT_PPR_QAS_REQ; if ((sc->device_flags[targ] & CFWIDEB) != 0) user_tinfo->width = MSG_EXT_WDTR_BUS_16_BIT; else user_tinfo->width = MSG_EXT_WDTR_BUS_8_BIT; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) printk("(%d): %x:%x:%x:%x\n", targ, user_tinfo->width, user_tinfo->period, user_tinfo->offset, user_tinfo->ppr_options); #endif /* * Start out Async/Narrow/Untagged and with * conservative protocol support. */ tstate->tagenable &= ~target_mask; tinfo->goal.protocol_version = 2; tinfo->goal.transport_version = 2; tinfo->curr.protocol_version = 2; tinfo->curr.transport_version = 2; ahd_compile_devinfo(&devinfo, ahd->our_id, targ, CAM_LUN_WILDCARD, 'A', ROLE_INITIATOR); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR|AHD_TRANS_GOAL, /*paused*/TRUE); } ahd->flags &= ~AHD_SPCHK_ENB_A; if (sc->bios_control & CFSPARITY) ahd->flags |= AHD_SPCHK_ENB_A; ahd->flags &= ~AHD_RESET_BUS_A; if (sc->bios_control & CFRESETB) ahd->flags |= AHD_RESET_BUS_A; ahd->flags &= ~AHD_EXTENDED_TRANS_A; if (sc->bios_control & CFEXTEND) ahd->flags |= AHD_EXTENDED_TRANS_A; ahd->flags &= ~AHD_BIOS_ENABLED; if ((sc->bios_control & CFBIOSSTATE) == CFBS_ENABLED) ahd->flags |= AHD_BIOS_ENABLED; ahd->flags &= ~AHD_STPWLEVEL_A; if ((sc->adapter_control & CFSTPWLEVEL) != 0) ahd->flags |= AHD_STPWLEVEL_A; return (0); } /* * Parse device configuration information. */ int ahd_parse_vpddata(struct ahd_softc *ahd, struct vpd_config *vpd) { int error; error = ahd_verify_vpd_cksum(vpd); if (error == 0) return (EINVAL); if ((vpd->bios_flags & VPDBOOTHOST) != 0) ahd->flags |= AHD_BOOT_CHANNEL; return (0); } void ahd_intr_enable(struct ahd_softc *ahd, int enable) { u_int hcntrl; hcntrl = ahd_inb(ahd, HCNTRL); hcntrl &= ~INTEN; ahd->pause &= ~INTEN; ahd->unpause &= ~INTEN; if (enable) { hcntrl |= INTEN; ahd->pause |= INTEN; ahd->unpause |= INTEN; } ahd_outb(ahd, HCNTRL, hcntrl); } static void ahd_update_coalescing_values(struct ahd_softc *ahd, u_int timer, u_int maxcmds, u_int mincmds) { if (timer > AHD_TIMER_MAX_US) timer = AHD_TIMER_MAX_US; ahd->int_coalescing_timer = timer; if (maxcmds > AHD_INT_COALESCING_MAXCMDS_MAX) maxcmds = AHD_INT_COALESCING_MAXCMDS_MAX; if (mincmds > AHD_INT_COALESCING_MINCMDS_MAX) mincmds = AHD_INT_COALESCING_MINCMDS_MAX; ahd->int_coalescing_maxcmds = maxcmds; ahd_outw(ahd, INT_COALESCING_TIMER, timer / AHD_TIMER_US_PER_TICK); ahd_outb(ahd, INT_COALESCING_MAXCMDS, -maxcmds); ahd_outb(ahd, INT_COALESCING_MINCMDS, -mincmds); } static void ahd_enable_coalescing(struct ahd_softc *ahd, int enable) { ahd->hs_mailbox &= ~ENINT_COALESCE; if (enable) ahd->hs_mailbox |= ENINT_COALESCE; ahd_outb(ahd, HS_MAILBOX, ahd->hs_mailbox); ahd_flush_device_writes(ahd); ahd_run_qoutfifo(ahd); } /* * Ensure that the card is paused in a location * outside of all critical sections and that all * pending work is completed prior to returning. * This routine should only be called from outside * an interrupt context. */ void ahd_pause_and_flushwork(struct ahd_softc *ahd) { u_int intstat; u_int maxloops; maxloops = 1000; ahd->flags |= AHD_ALL_INTERRUPTS; ahd_pause(ahd); /* * Freeze the outgoing selections. We do this only * until we are safely paused without further selections * pending. */ ahd->qfreeze_cnt--; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_outb(ahd, SEQ_FLAGS2, ahd_inb(ahd, SEQ_FLAGS2) | SELECTOUT_QFROZEN); do { ahd_unpause(ahd); /* * Give the sequencer some time to service * any active selections. */ ahd_delay(500); ahd_intr(ahd); ahd_pause(ahd); intstat = ahd_inb(ahd, INTSTAT); if ((intstat & INT_PEND) == 0) { ahd_clear_critical_section(ahd); intstat = ahd_inb(ahd, INTSTAT); } } while (--maxloops && (intstat != 0xFF || (ahd->features & AHD_REMOVABLE) == 0) && ((intstat & INT_PEND) != 0 || (ahd_inb(ahd, SCSISEQ0) & ENSELO) != 0 || (ahd_inb(ahd, SSTAT0) & (SELDO|SELINGO)) != 0)); if (maxloops == 0) { printk("Infinite interrupt loop, INTSTAT = %x", ahd_inb(ahd, INTSTAT)); } ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); ahd_flush_qoutfifo(ahd); ahd->flags &= ~AHD_ALL_INTERRUPTS; } #ifdef CONFIG_PM int ahd_suspend(struct ahd_softc *ahd) { ahd_pause_and_flushwork(ahd); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ahd_unpause(ahd); return (EBUSY); } ahd_shutdown(ahd); return (0); } void ahd_resume(struct ahd_softc *ahd) { ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, TRUE); ahd_restart(ahd); } #endif /************************** Busy Target Table *********************************/ /* * Set SCBPTR to the SCB that contains the busy * table entry for TCL. Return the offset into * the SCB that contains the entry for TCL. * saved_scbid is dereferenced and set to the * scbid that should be restored once manipualtion * of the TCL entry is complete. */ static inline u_int ahd_index_busy_tcl(struct ahd_softc *ahd, u_int *saved_scbid, u_int tcl) { /* * Index to the SCB that contains the busy entry. */ AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); *saved_scbid = ahd_get_scbptr(ahd); ahd_set_scbptr(ahd, TCL_LUN(tcl) | ((TCL_TARGET_OFFSET(tcl) & 0xC) << 4)); /* * And now calculate the SCB offset to the entry. * Each entry is 2 bytes wide, hence the * multiplication by 2. */ return (((TCL_TARGET_OFFSET(tcl) & 0x3) << 1) + SCB_DISCONNECTED_LISTS); } /* * Return the untagged transaction id for a given target/channel lun. */ static u_int ahd_find_busy_tcl(struct ahd_softc *ahd, u_int tcl) { u_int scbid; u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); scbid = ahd_inw_scbram(ahd, scb_offset); ahd_set_scbptr(ahd, saved_scbptr); return (scbid); } static void ahd_busy_tcl(struct ahd_softc *ahd, u_int tcl, u_int scbid) { u_int scb_offset; u_int saved_scbptr; scb_offset = ahd_index_busy_tcl(ahd, &saved_scbptr, tcl); ahd_outw(ahd, scb_offset, scbid); ahd_set_scbptr(ahd, saved_scbptr); } /************************** SCB and SCB queue management **********************/ static int ahd_match_scb(struct ahd_softc *ahd, struct scb *scb, int target, char channel, int lun, u_int tag, role_t role) { int targ = SCB_GET_TARGET(ahd, scb); char chan = SCB_GET_CHANNEL(ahd, scb); int slun = SCB_GET_LUN(scb); int match; match = ((chan == channel) || (channel == ALL_CHANNELS)); if (match != 0) match = ((targ == target) || (target == CAM_TARGET_WILDCARD)); if (match != 0) match = ((lun == slun) || (lun == CAM_LUN_WILDCARD)); if (match != 0) { #ifdef AHD_TARGET_MODE int group; group = XPT_FC_GROUP(scb->io_ctx->ccb_h.func_code); if (role == ROLE_INITIATOR) { match = (group != XPT_FC_GROUP_TMODE) && ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); } else if (role == ROLE_TARGET) { match = (group == XPT_FC_GROUP_TMODE) && ((tag == scb->io_ctx->csio.tag_id) || (tag == SCB_LIST_NULL)); } #else /* !AHD_TARGET_MODE */ match = ((tag == SCB_GET_TAG(scb)) || (tag == SCB_LIST_NULL)); #endif /* AHD_TARGET_MODE */ } return match; } static void ahd_freeze_devq(struct ahd_softc *ahd, struct scb *scb) { int target; char channel; int lun; target = SCB_GET_TARGET(ahd, scb); lun = SCB_GET_LUN(scb); channel = SCB_GET_CHANNEL(ahd, scb); ahd_search_qinfifo(ahd, target, channel, lun, /*tag*/SCB_LIST_NULL, ROLE_UNKNOWN, CAM_REQUEUE_REQ, SEARCH_COMPLETE); ahd_platform_freeze_devq(ahd, scb); } void ahd_qinfifo_requeue_tail(struct ahd_softc *ahd, struct scb *scb) { struct scb *prev_scb; ahd_mode_state saved_modes; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); prev_scb = NULL; if (ahd_qinfifo_count(ahd) != 0) { u_int prev_tag; u_int prev_pos; prev_pos = AHD_QIN_WRAP(ahd->qinfifonext - 1); prev_tag = ahd->qinfifo[prev_pos]; prev_scb = ahd_lookup_scb(ahd, prev_tag); } ahd_qinfifo_requeue(ahd, prev_scb, scb); ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); ahd_restore_modes(ahd, saved_modes); } static void ahd_qinfifo_requeue(struct ahd_softc *ahd, struct scb *prev_scb, struct scb *scb) { if (prev_scb == NULL) { uint32_t busaddr; busaddr = ahd_le32toh(scb->hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); } else { prev_scb->hscb->next_hscb_busaddr = scb->hscb->hscb_busaddr; ahd_sync_scb(ahd, prev_scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } ahd->qinfifo[AHD_QIN_WRAP(ahd->qinfifonext)] = SCB_GET_TAG(scb); ahd->qinfifonext++; scb->hscb->next_hscb_busaddr = ahd->next_queued_hscb->hscb_busaddr; ahd_sync_scb(ahd, scb, BUS_DMASYNC_PREREAD|BUS_DMASYNC_PREWRITE); } static int ahd_qinfifo_count(struct ahd_softc *ahd) { u_int qinpos; u_int wrap_qinpos; u_int wrap_qinfifonext; AHD_ASSERT_MODES(ahd, AHD_MODE_CCHAN_MSK, AHD_MODE_CCHAN_MSK); qinpos = ahd_get_snscb_qoff(ahd); wrap_qinpos = AHD_QIN_WRAP(qinpos); wrap_qinfifonext = AHD_QIN_WRAP(ahd->qinfifonext); if (wrap_qinfifonext >= wrap_qinpos) return (wrap_qinfifonext - wrap_qinpos); else return (wrap_qinfifonext + ARRAY_SIZE(ahd->qinfifo) - wrap_qinpos); } static void ahd_reset_cmds_pending(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int pending_cmds; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Don't count any commands as outstanding that the * sequencer has already marked for completion. */ ahd_flush_qoutfifo(ahd); pending_cmds = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { pending_cmds++; } ahd_outw(ahd, CMDS_PENDING, pending_cmds - ahd_qinfifo_count(ahd)); ahd_restore_modes(ahd, saved_modes); ahd->flags &= ~AHD_UPDATE_PEND_CMDS; } static void ahd_done_with_status(struct ahd_softc *ahd, struct scb *scb, uint32_t status) { cam_status ostat; cam_status cstat; ostat = ahd_get_transaction_status(scb); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scb, status); cstat = ahd_get_transaction_status(scb); if (cstat != CAM_REQ_CMP) ahd_freeze_scb(scb); ahd_done(ahd, scb); } int ahd_search_qinfifo(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action) { struct scb *scb; struct scb *mk_msg_scb; struct scb *prev_scb; ahd_mode_state saved_modes; u_int qinstart; u_int qinpos; u_int qintail; u_int tid_next; u_int tid_prev; u_int scbid; u_int seq_flags2; u_int savedscbptr; uint32_t busaddr; int found; int targets; /* Must be in CCHAN mode */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); /* * Halt any pending SCB DMA. The sequencer will reinitiate * this dma if the qinfifo is not empty once we unpause. */ if ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN|CCSCBDIR)) == (CCARREN|CCSCBEN|CCSCBDIR)) { ahd_outb(ahd, CCSCBCTL, ahd_inb(ahd, CCSCBCTL) & ~(CCARREN|CCSCBEN)); while ((ahd_inb(ahd, CCSCBCTL) & (CCARREN|CCSCBEN)) != 0) ; } /* Determine sequencer's position in the qinfifo. */ qintail = AHD_QIN_WRAP(ahd->qinfifonext); qinstart = ahd_get_snscb_qoff(ahd); qinpos = AHD_QIN_WRAP(qinstart); found = 0; prev_scb = NULL; if (action == SEARCH_PRINT) { printk("qinstart = %d qinfifonext = %d\nQINFIFO:", qinstart, ahd->qinfifonext); } /* * Start with an empty queue. Entries that are not chosen * for removal will be re-added to the queue as we go. */ ahd->qinfifonext = qinstart; busaddr = ahd_le32toh(ahd->next_queued_hscb->hscb_busaddr); ahd_outl(ahd, NEXT_QUEUED_SCB_ADDR, busaddr); while (qinpos != qintail) { scb = ahd_lookup_scb(ahd, ahd->qinfifo[qinpos]); if (scb == NULL) { printk("qinpos = %d, SCB index = %d\n", qinpos, ahd->qinfifo[qinpos]); panic("Loop 1\n"); } if (ahd_match_scb(ahd, scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in qinfifo\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: break; case SEARCH_PRINT: printk(" 0x%x", ahd->qinfifo[qinpos]); /* FALLTHROUGH */ case SEARCH_COUNT: ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; break; } } else { ahd_qinfifo_requeue(ahd, prev_scb, scb); prev_scb = scb; } qinpos = AHD_QIN_WRAP(qinpos+1); } ahd_set_hnscb_qoff(ahd, ahd->qinfifonext); if (action == SEARCH_PRINT) printk("\nWAITING_TID_QUEUES:\n"); /* * Search waiting for selection lists. We traverse the * list of "their ids" waiting for selection and, if * appropriate, traverse the SCBs of each "their id" * looking for matches. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); seq_flags2 = ahd_inb(ahd, SEQ_FLAGS2); if ((seq_flags2 & PENDING_MK_MESSAGE) != 0) { scbid = ahd_inw(ahd, MK_MESSAGE_SCB); mk_msg_scb = ahd_lookup_scb(ahd, scbid); } else mk_msg_scb = NULL; savedscbptr = ahd_get_scbptr(ahd); tid_next = ahd_inw(ahd, WAITING_TID_HEAD); tid_prev = SCB_LIST_NULL; targets = 0; for (scbid = tid_next; !SCBID_IS_NULL(scbid); scbid = tid_next) { u_int tid_head; u_int tid_tail; targets++; if (targets > AHD_NUM_TARGETS) panic("TID LIST LOOP"); if (scbid >= ahd->scb_data.numscbs) { printk("%s: Waiting TID List inconsistency. " "SCB index == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = 0x%x Not Active!\n", ahd_name(ahd), scbid); panic("Waiting TID List traversal\n"); } ahd_set_scbptr(ahd, scbid); tid_next = ahd_inw_scbram(ahd, SCB_NEXT2); if (ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN) == 0) { tid_prev = scbid; continue; } /* * We found a list of scbs that needs to be searched. */ if (action == SEARCH_PRINT) printk(" %d ( ", SCB_GET_TARGET(ahd, scb)); tid_head = scbid; found += ahd_search_scb_list(ahd, target, channel, lun, tag, role, status, action, &tid_head, &tid_tail, SCB_GET_TARGET(ahd, scb)); /* * Check any MK_MESSAGE SCB that is still waiting to * enter this target's waiting for selection queue. */ if (mk_msg_scb != NULL && ahd_match_scb(ahd, mk_msg_scb, target, channel, lun, tag, role)) { /* * We found an scb that needs to be acted on. */ found++; switch (action) { case SEARCH_COMPLETE: if ((mk_msg_scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB pending MK_MSG\n"); ahd_done_with_status(ahd, mk_msg_scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: { u_int tail_offset; printk("Removing MK_MSG scb\n"); /* * Reset our tail to the tail of the * main per-target list. */ tail_offset = WAITING_SCB_TAILS + (2 * SCB_GET_TARGET(ahd, mk_msg_scb)); ahd_outw(ahd, tail_offset, tid_tail); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING)-1); mk_msg_scb = NULL; break; } case SEARCH_PRINT: printk(" 0x%x", SCB_GET_TAG(scb)); /* FALLTHROUGH */ case SEARCH_COUNT: break; } } if (mk_msg_scb != NULL && SCBID_IS_NULL(tid_head) && ahd_match_scb(ahd, scb, target, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN)) { /* * When removing the last SCB for a target * queue with a pending MK_MESSAGE scb, we * must queue the MK_MESSAGE scb. */ printk("Queueing mk_msg_scb\n"); tid_head = ahd_inw(ahd, MK_MESSAGE_SCB); seq_flags2 &= ~PENDING_MK_MESSAGE; ahd_outb(ahd, SEQ_FLAGS2, seq_flags2); mk_msg_scb = NULL; } if (tid_head != scbid) ahd_stitch_tid_list(ahd, tid_prev, tid_head, tid_next); if (!SCBID_IS_NULL(tid_head)) tid_prev = tid_head; if (action == SEARCH_PRINT) printk(")\n"); } /* Restore saved state. */ ahd_set_scbptr(ahd, savedscbptr); ahd_restore_modes(ahd, saved_modes); return (found); } static int ahd_search_scb_list(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status, ahd_search_action action, u_int *list_head, u_int *list_tail, u_int tid) { struct scb *scb; u_int scbid; u_int next; u_int prev; int found; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); found = 0; prev = SCB_LIST_NULL; next = *list_head; *list_tail = SCB_LIST_NULL; for (scbid = next; !SCBID_IS_NULL(scbid); scbid = next) { if (scbid >= ahd->scb_data.numscbs) { printk("%s:SCB List inconsistency. " "SCB == 0x%x, yet numscbs == 0x%x.", ahd_name(ahd), scbid, ahd->scb_data.numscbs); ahd_dump_card_state(ahd); panic("for safety"); } scb = ahd_lookup_scb(ahd, scbid); if (scb == NULL) { printk("%s: SCB = %d Not Active!\n", ahd_name(ahd), scbid); panic("Waiting List traversal\n"); } ahd_set_scbptr(ahd, scbid); *list_tail = scbid; next = ahd_inw_scbram(ahd, SCB_NEXT); if (ahd_match_scb(ahd, scb, target, channel, lun, SCB_LIST_NULL, role) == 0) { prev = scbid; continue; } found++; switch (action) { case SEARCH_COMPLETE: if ((scb->flags & SCB_ACTIVE) == 0) printk("Inactive SCB in Waiting List\n"); ahd_done_with_status(ahd, scb, status); /* FALLTHROUGH */ case SEARCH_REMOVE: ahd_rem_wscb(ahd, scbid, prev, next, tid); *list_tail = prev; if (SCBID_IS_NULL(prev)) *list_head = next; break; case SEARCH_PRINT: printk("0x%x ", scbid); case SEARCH_COUNT: prev = scbid; break; } if (found > AHD_SCB_MAX) panic("SCB LIST LOOP"); } if (action == SEARCH_COMPLETE || action == SEARCH_REMOVE) ahd_outw(ahd, CMDS_PENDING, ahd_inw(ahd, CMDS_PENDING) - found); return (found); } static void ahd_stitch_tid_list(struct ahd_softc *ahd, u_int tid_prev, u_int tid_cur, u_int tid_next) { AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (SCBID_IS_NULL(tid_cur)) { /* Bypass current TID list */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_next); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_next); } if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_prev); } else { /* Stitch through tid_cur */ if (SCBID_IS_NULL(tid_prev)) { ahd_outw(ahd, WAITING_TID_HEAD, tid_cur); } else { ahd_set_scbptr(ahd, tid_prev); ahd_outw(ahd, SCB_NEXT2, tid_cur); } ahd_set_scbptr(ahd, tid_cur); ahd_outw(ahd, SCB_NEXT2, tid_next); if (SCBID_IS_NULL(tid_next)) ahd_outw(ahd, WAITING_TID_TAIL, tid_cur); } } /* * Manipulate the waiting for selection list and return the * scb that follows the one that we remove. */ static u_int ahd_rem_wscb(struct ahd_softc *ahd, u_int scbid, u_int prev, u_int next, u_int tid) { u_int tail_offset; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (!SCBID_IS_NULL(prev)) { ahd_set_scbptr(ahd, prev); ahd_outw(ahd, SCB_NEXT, next); } /* * SCBs that have MK_MESSAGE set in them may * cause the tail pointer to be updated without * setting the next pointer of the previous tail. * Only clear the tail if the removed SCB was * the tail. */ tail_offset = WAITING_SCB_TAILS + (2 * tid); if (SCBID_IS_NULL(next) && ahd_inw(ahd, tail_offset) == scbid) ahd_outw(ahd, tail_offset, prev); ahd_add_scb_to_free_list(ahd, scbid); return (next); } /* * Add the SCB as selected by SCBPTR onto the on chip list of * free hardware SCBs. This list is empty/unused if we are not * performing SCB paging. */ static void ahd_add_scb_to_free_list(struct ahd_softc *ahd, u_int scbid) { /* XXX Need some other mechanism to designate "free". */ /* * Invalidate the tag so that our abort * routines don't think it's active. ahd_outb(ahd, SCB_TAG, SCB_LIST_NULL); */ } /******************************** Error Handling ******************************/ /* * Abort all SCBs that match the given description (target/channel/lun/tag), * setting their status to the passed in status if the status has not already * been modified from CAM_REQ_INPROG. This routine assumes that the sequencer * is paused before it is called. */ static int ahd_abort_scbs(struct ahd_softc *ahd, int target, char channel, int lun, u_int tag, role_t role, uint32_t status) { struct scb *scbp; struct scb *scbp_next; u_int i, j; u_int maxtarget; u_int minlun; u_int maxlun; int found; ahd_mode_state saved_modes; /* restore this when we're done */ saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); found = ahd_search_qinfifo(ahd, target, channel, lun, SCB_LIST_NULL, role, CAM_REQUEUE_REQ, SEARCH_COMPLETE); /* * Clean out the busy target table for any untagged commands. */ i = 0; maxtarget = 16; if (target != CAM_TARGET_WILDCARD) { i = target; if (channel == 'B') i += 8; maxtarget = i + 1; } if (lun == CAM_LUN_WILDCARD) { minlun = 0; maxlun = AHD_NUM_LUNS_NONPKT; } else if (lun >= AHD_NUM_LUNS_NONPKT) { minlun = maxlun = 0; } else { minlun = lun; maxlun = lun + 1; } if (role != ROLE_TARGET) { for (;i < maxtarget; i++) { for (j = minlun;j < maxlun; j++) { u_int scbid; u_int tcl; tcl = BUILD_TCL_RAW(i, 'A', j); scbid = ahd_find_busy_tcl(ahd, tcl); scbp = ahd_lookup_scb(ahd, scbid); if (scbp == NULL || ahd_match_scb(ahd, scbp, target, channel, lun, tag, role) == 0) continue; ahd_unbusy_tcl(ahd, BUILD_TCL_RAW(i, 'A', j)); } } } /* * Don't abort commands that have already completed, * but haven't quite made it up to the host yet. */ ahd_flush_qoutfifo(ahd); /* * Go through the pending CCB list and look for * commands for this target that are still active. * These are other tagged commands that were * disconnected when the reset occurred. */ scbp_next = LIST_FIRST(&ahd->pending_scbs); while (scbp_next != NULL) { scbp = scbp_next; scbp_next = LIST_NEXT(scbp, pending_links); if (ahd_match_scb(ahd, scbp, target, channel, lun, tag, role)) { cam_status ostat; ostat = ahd_get_transaction_status(scbp); if (ostat == CAM_REQ_INPROG) ahd_set_transaction_status(scbp, status); if (ahd_get_transaction_status(scbp) != CAM_REQ_CMP) ahd_freeze_scb(scbp); if ((scbp->flags & SCB_ACTIVE) == 0) printk("Inactive SCB on pending list\n"); ahd_done(ahd, scbp); found++; } } ahd_restore_modes(ahd, saved_modes); ahd_platform_abort_scbs(ahd, target, channel, lun, tag, role, status); ahd->flags |= AHD_UPDATE_PEND_CMDS; return found; } static void ahd_reset_current_bus(struct ahd_softc *ahd) { uint8_t scsiseq; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ0) & ~(ENSELO|ENARBO|SCSIRSTO); ahd_outb(ahd, SCSISEQ0, scsiseq | SCSIRSTO); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); /* Turn off the bus reset */ ahd_outb(ahd, SCSISEQ0, scsiseq); ahd_flush_device_writes(ahd); ahd_delay(AHD_BUSRESET_DELAY); if ((ahd->bugs & AHD_SCSIRST_BUG) != 0) { /* * 2A Razor #474 * Certain chip state is not cleared for * SCSI bus resets that we initiate, so * we must reset the chip. */ ahd_reset(ahd, /*reinit*/TRUE); ahd_intr_enable(ahd, /*enable*/TRUE); AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); } ahd_clear_intstat(ahd); } int ahd_reset_channel(struct ahd_softc *ahd, char channel, int initiate_reset) { struct ahd_devinfo caminfo; u_int initiator; u_int target; u_int max_scsiid; int found; u_int fifo; u_int next_fifo; uint8_t scsiseq; /* * Check if the last bus reset is cleared */ if (ahd->flags & AHD_BUS_RESET_ACTIVE) { printk("%s: bus reset still active\n", ahd_name(ahd)); return 0; } ahd->flags |= AHD_BUS_RESET_ACTIVE; ahd->pending_device = NULL; ahd_compile_devinfo(&caminfo, CAM_TARGET_WILDCARD, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, channel, ROLE_UNKNOWN); ahd_pause(ahd); /* Make sure the sequencer is in a safe location. */ ahd_clear_critical_section(ahd); /* * Run our command complete fifos to ensure that we perform * completion processing on any commands that 'completed' * before the reset occurred. */ ahd_run_qoutfifo(ahd); #ifdef AHD_TARGET_MODE if ((ahd->flags & AHD_TARGETROLE) != 0) { ahd_run_tqinfifo(ahd, /*paused*/TRUE); } #endif ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); /* * Disable selections so no automatic hardware * functions will modify chip state. */ ahd_outb(ahd, SCSISEQ0, 0); ahd_outb(ahd, SCSISEQ1, 0); /* * Safely shut down our DMA engines. Always start with * the FIFO that is not currently active (if any are * actively connected). */ next_fifo = fifo = ahd_inb(ahd, DFFSTAT) & CURRFIFO; if (next_fifo > CURRFIFO_1) /* If disconneced, arbitrarily start with FIFO1. */ next_fifo = fifo = 0; do { next_fifo ^= CURRFIFO_1; ahd_set_modes(ahd, next_fifo, next_fifo); ahd_outb(ahd, DFCNTRL, ahd_inb(ahd, DFCNTRL) & ~(SCSIEN|HDMAEN)); while ((ahd_inb(ahd, DFCNTRL) & HDMAENACK) != 0) ahd_delay(10); /* * Set CURRFIFO to the now inactive channel. */ ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); ahd_outb(ahd, DFFSTAT, next_fifo); } while (next_fifo != fifo); /* * Reset the bus if we are initiating this reset */ ahd_clear_msg_state(ahd); ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) & ~(ENBUSFREE|ENSCSIRST)); if (initiate_reset) ahd_reset_current_bus(ahd); ahd_clear_intstat(ahd); /* * Clean up all the state information for the * pending transactions on this bus. */ found = ahd_abort_scbs(ahd, CAM_TARGET_WILDCARD, channel, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, CAM_SCSI_BUS_RESET); /* * Cleanup anything left in the FIFOs. */ ahd_clear_fifo(ahd, 0); ahd_clear_fifo(ahd, 1); /* * Clear SCSI interrupt status */ ahd_outb(ahd, CLRSINT1, CLRSCSIRSTI); /* * Reenable selections */ ahd_outb(ahd, SIMODE1, ahd_inb(ahd, SIMODE1) | ENSCSIRST); scsiseq = ahd_inb(ahd, SCSISEQ_TEMPLATE); ahd_outb(ahd, SCSISEQ1, scsiseq & (ENSELI|ENRSELI|ENAUTOATNP)); max_scsiid = (ahd->features & AHD_WIDE) ? 15 : 7; #ifdef AHD_TARGET_MODE /* * Send an immediate notify ccb to all target more peripheral * drivers affected by this action. */ for (target = 0; target <= max_scsiid; target++) { struct ahd_tmode_tstate* tstate; u_int lun; tstate = ahd->enabled_targets[target]; if (tstate == NULL) continue; for (lun = 0; lun < AHD_NUM_LUNS; lun++) { struct ahd_tmode_lstate* lstate; lstate = tstate->enabled_luns[lun]; if (lstate == NULL) continue; ahd_queue_lstate_event(ahd, lstate, CAM_TARGET_WILDCARD, EVENT_TYPE_BUS_RESET, /*arg*/0); ahd_send_lstate_events(ahd, lstate); } } #endif /* * Revert to async/narrow transfers until we renegotiate. */ for (target = 0; target <= max_scsiid; target++) { if (ahd->enabled_targets[target] == NULL) continue; for (initiator = 0; initiator <= max_scsiid; initiator++) { struct ahd_devinfo devinfo; ahd_compile_devinfo(&devinfo, target, initiator, CAM_LUN_WILDCARD, 'A', ROLE_UNKNOWN); ahd_set_width(ahd, &devinfo, MSG_EXT_WDTR_BUS_8_BIT, AHD_TRANS_CUR, /*paused*/TRUE); ahd_set_syncrate(ahd, &devinfo, /*period*/0, /*offset*/0, /*ppr_options*/0, AHD_TRANS_CUR, /*paused*/TRUE); } } /* Notify the XPT that a bus reset occurred */ ahd_send_async(ahd, caminfo.channel, CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD, AC_BUS_RESET); ahd_restart(ahd); return (found); } /**************************** Statistics Processing ***************************/ static void ahd_stat_timer(void *arg) { struct ahd_softc *ahd = arg; u_long s; int enint_coal; ahd_lock(ahd, &s); enint_coal = ahd->hs_mailbox & ENINT_COALESCE; if (ahd->cmdcmplt_total > ahd->int_coalescing_threshold) enint_coal |= ENINT_COALESCE; else if (ahd->cmdcmplt_total < ahd->int_coalescing_stop_threshold) enint_coal &= ~ENINT_COALESCE; if (enint_coal != (ahd->hs_mailbox & ENINT_COALESCE)) { ahd_enable_coalescing(ahd, enint_coal); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_INT_COALESCING) != 0) printk("%s: Interrupt coalescing " "now %sabled. Cmds %d\n", ahd_name(ahd), (enint_coal & ENINT_COALESCE) ? "en" : "dis", ahd->cmdcmplt_total); #endif } ahd->cmdcmplt_bucket = (ahd->cmdcmplt_bucket+1) & (AHD_STAT_BUCKETS-1); ahd->cmdcmplt_total -= ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket]; ahd->cmdcmplt_counts[ahd->cmdcmplt_bucket] = 0; ahd_timer_reset(&ahd->stat_timer, AHD_STAT_UPDATE_US, ahd_stat_timer, ahd); ahd_unlock(ahd, &s); } /****************************** Status Processing *****************************/ static void ahd_handle_scsi_status(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; int paused; /* * The sequencer freezes its select-out queue * anytime a SCSI status error occurs. We must * handle the error and increment our qfreeze count * to allow the sequencer to continue. We don't * bother clearing critical sections here since all * operations are on data structures that the sequencer * is not touching once the queue is frozen. */ hscb = scb->hscb; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } /* Freeze the queue until the client sees the error. */ ahd_freeze_devq(ahd, scb); ahd_freeze_scb(scb); ahd->qfreeze_cnt++; ahd_outw(ahd, KERNEL_QFREEZE_COUNT, ahd->qfreeze_cnt); if (paused == 0) ahd_unpause(ahd); /* Don't want to clobber the original sense code */ if ((scb->flags & SCB_SENSE) != 0) { /* * Clear the SCB_SENSE Flag and perform * a normal command completion. */ scb->flags &= ~SCB_SENSE; ahd_set_transaction_status(scb, CAM_AUTOSENSE_FAIL); ahd_done(ahd, scb); return; } ahd_set_transaction_status(scb, CAM_SCSI_STATUS_ERROR); ahd_set_scsi_status(scb, hscb->shared_data.istatus.scsi_status); switch (hscb->shared_data.istatus.scsi_status) { case STATUS_PKT_SENSE: { struct scsi_status_iu_header *siu; ahd_sync_sense(ahd, scb, BUS_DMASYNC_POSTREAD); siu = (struct scsi_status_iu_header *)scb->sense_data; ahd_set_scsi_status(scb, siu->status); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) { ahd_print_path(ahd, scb); printk("SCB 0x%x Received PKT Status of 0x%x\n", SCB_GET_TAG(scb), siu->status); printk("\tflags = 0x%x, sense len = 0x%x, " "pktfail = 0x%x\n", siu->flags, scsi_4btoul(siu->sense_length), scsi_4btoul(siu->pkt_failures_length)); } #endif if ((siu->flags & SIU_RSPVALID) != 0) { ahd_print_path(ahd, scb); if (scsi_4btoul(siu->pkt_failures_length) < 4) { printk("Unable to parse pkt_failures\n"); } else { switch (SIU_PKTFAIL_CODE(siu)) { case SIU_PFC_NONE: printk("No packet failure found\n"); break; case SIU_PFC_CIU_FIELDS_INVALID: printk("Invalid Command IU Field\n"); break; case SIU_PFC_TMF_NOT_SUPPORTED: printk("TMF not supported\n"); break; case SIU_PFC_TMF_FAILED: printk("TMF failed\n"); break; case SIU_PFC_INVALID_TYPE_CODE: printk("Invalid L_Q Type code\n"); break; case SIU_PFC_ILLEGAL_REQUEST: printk("Illegal request\n"); default: break; } } if (siu->status == SCSI_STATUS_OK) ahd_set_transaction_status(scb, CAM_REQ_CMP_ERR); } if ((siu->flags & SIU_SNSVALID) != 0) { scb->flags |= SCB_PKT_SENSE; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SENSE) != 0) printk("Sense data available\n"); #endif } ahd_done(ahd, scb); break; } case SCSI_STATUS_CMD_TERMINATED: case SCSI_STATUS_CHECK_COND: { struct ahd_devinfo devinfo; struct ahd_dma_seg *sg; struct scsi_sense *sc; struct ahd_initiator_tinfo *targ_info; struct ahd_tmode_tstate *tstate; struct ahd_transinfo *tinfo; #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("SCB %d: requests Check Status\n", SCB_GET_TAG(scb)); } #endif if (ahd_perform_autosense(scb) == 0) break; ahd_compile_devinfo(&devinfo, SCB_GET_OUR_ID(scb), SCB_GET_TARGET(ahd, scb), SCB_GET_LUN(scb), SCB_GET_CHANNEL(ahd, scb), ROLE_INITIATOR); targ_info = ahd_fetch_transinfo(ahd, devinfo.channel, devinfo.our_scsiid, devinfo.target, &tstate); tinfo = &targ_info->curr; sg = scb->sg_list; sc = (struct scsi_sense *)hscb->shared_data.idata.cdb; /* * Save off the residual if there is one. */ ahd_update_residual(ahd, scb); #ifdef AHD_DEBUG if (ahd_debug & AHD_SHOW_SENSE) { ahd_print_path(ahd, scb); printk("Sending Sense\n"); } #endif scb->sg_count = 0; sg = ahd_sg_setup(ahd, scb, sg, ahd_get_sense_bufaddr(ahd, scb), ahd_get_sense_bufsize(ahd, scb), /*last*/TRUE); sc->opcode = REQUEST_SENSE; sc->byte2 = 0; if (tinfo->protocol_version <= SCSI_REV_2 && SCB_GET_LUN(scb) < 8) sc->byte2 = SCB_GET_LUN(scb) << 5; sc->unused[0] = 0; sc->unused[1] = 0; sc->length = ahd_get_sense_bufsize(ahd, scb); sc->control = 0; /* * We can't allow the target to disconnect. * This will be an untagged transaction and * having the target disconnect will make this * transaction indestinguishable from outstanding * tagged transactions. */ hscb->control = 0; /* * This request sense could be because the * the device lost power or in some other * way has lost our transfer negotiations. * Renegotiate if appropriate. Unit attention * errors will be reported before any data * phases occur. */ if (ahd_get_residual(scb) == ahd_get_transfer_length(scb)) { ahd_update_neg_request(ahd, &devinfo, tstate, targ_info, AHD_NEG_IF_NON_ASYNC); } if (tstate->auto_negotiate & devinfo.target_mask) { hscb->control |= MK_MESSAGE; scb->flags &= ~(SCB_NEGOTIATE|SCB_ABORT|SCB_DEVICE_RESET); scb->flags |= SCB_AUTO_NEGOTIATE; } hscb->cdb_len = sizeof(*sc); ahd_setup_data_scb(ahd, scb); scb->flags |= SCB_SENSE; ahd_queue_scb(ahd, scb); break; } case SCSI_STATUS_OK: printk("%s: Interrupted for status of 0???\n", ahd_name(ahd)); /* FALLTHROUGH */ default: ahd_done(ahd, scb); break; } } static void ahd_handle_scb_status(struct ahd_softc *ahd, struct scb *scb) { if (scb->hscb->shared_data.istatus.scsi_status != 0) { ahd_handle_scsi_status(ahd, scb); } else { ahd_calc_residual(ahd, scb); ahd_done(ahd, scb); } } /* * Calculate the residual for a just completed SCB. */ static void ahd_calc_residual(struct ahd_softc *ahd, struct scb *scb) { struct hardware_scb *hscb; struct initiator_status *spkt; uint32_t sgptr; uint32_t resid_sgptr; uint32_t resid; /* * 5 cases. * 1) No residual. * SG_STATUS_VALID clear in sgptr. * 2) Transferless command * 3) Never performed any transfers. * sgptr has SG_FULL_RESID set. * 4) No residual but target did not * save data pointers after the * last transfer, so sgptr was * never updated. * 5) We have a partial residual. * Use residual_sgptr to determine * where we are. */ hscb = scb->hscb; sgptr = ahd_le32toh(hscb->sgptr); if ((sgptr & SG_STATUS_VALID) == 0) /* Case 1 */ return; sgptr &= ~SG_STATUS_VALID; if ((sgptr & SG_LIST_NULL) != 0) /* Case 2 */ return; /* * Residual fields are the same in both * target and initiator status packets, * so we can always use the initiator fields * regardless of the role for this SCB. */ spkt = &hscb->shared_data.istatus; resid_sgptr = ahd_le32toh(spkt->residual_sgptr); if ((sgptr & SG_FULL_RESID) != 0) { /* Case 3 */ resid = ahd_get_transfer_length(scb); } else if ((resid_sgptr & SG_LIST_NULL) != 0) { /* Case 4 */ return; } else if ((resid_sgptr & SG_OVERRUN_RESID) != 0) { ahd_print_path(ahd, scb); printk("data overrun detected Tag == 0x%x.\n", SCB_GET_TAG(scb)); ahd_freeze_devq(ahd, scb); ahd_set_transaction_status(scb, CAM_DATA_RUN_ERR); ahd_freeze_scb(scb); return; } else if ((resid_sgptr & ~SG_PTR_MASK) != 0) { panic("Bogus resid sgptr value 0x%x\n", resid_sgptr); /* NOTREACHED */ } else { struct ahd_dma_seg *sg; /* * Remainder of the SG where the transfer * stopped. */ resid = ahd_le32toh(spkt->residual_datacnt) & AHD_SG_LEN_MASK; sg = ahd_sg_bus_to_virt(ahd, scb, resid_sgptr & SG_PTR_MASK); /* The residual sg_ptr always points to the next sg */ sg--; /* * Add up the contents of all residual * SG segments that are after the SG where * the transfer stopped. */ while ((ahd_le32toh(sg->len) & AHD_DMA_LAST_SEG) == 0) { sg++; resid += ahd_le32toh(sg->len) & AHD_SG_LEN_MASK; } } if ((scb->flags & SCB_SENSE) == 0) ahd_set_residual(scb, resid); else ahd_set_sense_residual(scb, resid); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_MISC) != 0) { ahd_print_path(ahd, scb); printk("Handled %sResidual of %d bytes\n", (scb->flags & SCB_SENSE) ? "Sense " : "", resid); } #endif } /******************************* Target Mode **********************************/ #ifdef AHD_TARGET_MODE /* * Add a target mode event to this lun's queue */ static void ahd_queue_lstate_event(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate, u_int initiator_id, u_int event_type, u_int event_arg) { struct ahd_tmode_event *event; int pending; xpt_freeze_devq(lstate->path, /*count*/1); if (lstate->event_w_idx >= lstate->event_r_idx) pending = lstate->event_w_idx - lstate->event_r_idx; else pending = AHD_TMODE_EVENT_BUFFER_SIZE + 1 - (lstate->event_r_idx - lstate->event_w_idx); if (event_type == EVENT_TYPE_BUS_RESET || event_type == MSG_BUS_DEV_RESET) { /* * Any earlier events are irrelevant, so reset our buffer. * This has the effect of allowing us to deal with reset * floods (an external device holding down the reset line) * without losing the event that is really interesting. */ lstate->event_r_idx = 0; lstate->event_w_idx = 0; xpt_release_devq(lstate->path, pending, /*runqueue*/FALSE); } if (pending == AHD_TMODE_EVENT_BUFFER_SIZE) { xpt_print_path(lstate->path); printk("immediate event %x:%x lost\n", lstate->event_buffer[lstate->event_r_idx].event_type, lstate->event_buffer[lstate->event_r_idx].event_arg); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; xpt_release_devq(lstate->path, /*count*/1, /*runqueue*/FALSE); } event = &lstate->event_buffer[lstate->event_w_idx]; event->initiator_id = initiator_id; event->event_type = event_type; event->event_arg = event_arg; lstate->event_w_idx++; if (lstate->event_w_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_w_idx = 0; } /* * Send any target mode events queued up waiting * for immediate notify resources. */ void ahd_send_lstate_events(struct ahd_softc *ahd, struct ahd_tmode_lstate *lstate) { struct ccb_hdr *ccbh; struct ccb_immed_notify *inot; while (lstate->event_r_idx != lstate->event_w_idx && (ccbh = SLIST_FIRST(&lstate->immed_notifies)) != NULL) { struct ahd_tmode_event *event; event = &lstate->event_buffer[lstate->event_r_idx]; SLIST_REMOVE_HEAD(&lstate->immed_notifies, sim_links.sle); inot = (struct ccb_immed_notify *)ccbh; switch (event->event_type) { case EVENT_TYPE_BUS_RESET: ccbh->status = CAM_SCSI_BUS_RESET|CAM_DEV_QFRZN; break; default: ccbh->status = CAM_MESSAGE_RECV|CAM_DEV_QFRZN; inot->message_args[0] = event->event_type; inot->message_args[1] = event->event_arg; break; } inot->initiator_id = event->initiator_id; inot->sense_len = 0; xpt_done((union ccb *)inot); lstate->event_r_idx++; if (lstate->event_r_idx == AHD_TMODE_EVENT_BUFFER_SIZE) lstate->event_r_idx = 0; } } #endif /******************** Sequencer Program Patching/Download *********************/ #ifdef AHD_DUMP_SEQ void ahd_dumpseq(struct ahd_softc* ahd) { int i; int max_prog; max_prog = 2048; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < max_prog; i++) { uint8_t ins_bytes[4]; ahd_insb(ahd, SEQRAM, ins_bytes, 4); printk("0x%08x\n", ins_bytes[0] << 24 | ins_bytes[1] << 16 | ins_bytes[2] << 8 | ins_bytes[3]); } } #endif static void ahd_loadseq(struct ahd_softc *ahd) { struct cs cs_table[num_critical_sections]; u_int begin_set[num_critical_sections]; u_int end_set[num_critical_sections]; const struct patch *cur_patch; u_int cs_count; u_int cur_cs; u_int i; int downloaded; u_int skip_addr; u_int sg_prefetch_cnt; u_int sg_prefetch_cnt_limit; u_int sg_prefetch_align; u_int sg_size; u_int cacheline_mask; uint8_t download_consts[DOWNLOAD_CONST_COUNT]; if (bootverbose) printk("%s: Downloading Sequencer Program...", ahd_name(ahd)); #if DOWNLOAD_CONST_COUNT != 8 #error "Download Const Mismatch" #endif /* * Start out with 0 critical sections * that apply to this firmware load. */ cs_count = 0; cur_cs = 0; memset(begin_set, 0, sizeof(begin_set)); memset(end_set, 0, sizeof(end_set)); /* * Setup downloadable constant table. * * The computation for the S/G prefetch variables is * a bit complicated. We would like to always fetch * in terms of cachelined sized increments. However, * if the cacheline is not an even multiple of the * SG element size or is larger than our SG RAM, using * just the cache size might leave us with only a portion * of an SG element at the tail of a prefetch. If the * cacheline is larger than our S/G prefetch buffer less * the size of an SG element, we may round down to a cacheline * that doesn't contain any or all of the S/G of interest * within the bounds of our S/G ram. Provide variables to * the sequencer that will allow it to handle these edge * cases. */ /* Start by aligning to the nearest cacheline. */ sg_prefetch_align = ahd->pci_cachesize; if (sg_prefetch_align == 0) sg_prefetch_align = 8; /* Round down to the nearest power of 2. */ while (powerof2(sg_prefetch_align) == 0) sg_prefetch_align--; cacheline_mask = sg_prefetch_align - 1; /* * If the cacheline boundary is greater than half our prefetch RAM * we risk not being able to fetch even a single complete S/G * segment if we align to that boundary. */ if (sg_prefetch_align > CCSGADDR_MAX/2) sg_prefetch_align = CCSGADDR_MAX/2; /* Start by fetching a single cacheline. */ sg_prefetch_cnt = sg_prefetch_align; /* * Increment the prefetch count by cachelines until * at least one S/G element will fit. */ sg_size = sizeof(struct ahd_dma_seg); if ((ahd->flags & AHD_64BIT_ADDRESSING) != 0) sg_size = sizeof(struct ahd_dma64_seg); while (sg_prefetch_cnt < sg_size) sg_prefetch_cnt += sg_prefetch_align; /* * If the cacheline is not an even multiple of * the S/G size, we may only get a partial S/G when * we align. Add a cacheline if this is the case. */ if ((sg_prefetch_align % sg_size) != 0 && (sg_prefetch_cnt < CCSGADDR_MAX)) sg_prefetch_cnt += sg_prefetch_align; /* * Lastly, compute a value that the sequencer can use * to determine if the remainder of the CCSGRAM buffer * has a full S/G element in it. */ sg_prefetch_cnt_limit = -(sg_prefetch_cnt - sg_size + 1); download_consts[SG_PREFETCH_CNT] = sg_prefetch_cnt; download_consts[SG_PREFETCH_CNT_LIMIT] = sg_prefetch_cnt_limit; download_consts[SG_PREFETCH_ALIGN_MASK] = ~(sg_prefetch_align - 1); download_consts[SG_PREFETCH_ADDR_MASK] = (sg_prefetch_align - 1); download_consts[SG_SIZEOF] = sg_size; download_consts[PKT_OVERRUN_BUFOFFSET] = (ahd->overrun_buf - (uint8_t *)ahd->qoutfifo) / 256; download_consts[SCB_TRANSFER_SIZE] = SCB_TRANSFER_SIZE_1BYTE_LUN; download_consts[CACHELINE_MASK] = cacheline_mask; cur_patch = patches; downloaded = 0; skip_addr = 0; ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE|LOADRAM); ahd_outw(ahd, PRGMCNT, 0); for (i = 0; i < sizeof(seqprog)/4; i++) { if (ahd_check_patch(ahd, &cur_patch, i, &skip_addr) == 0) { /* * Don't download this instruction as it * is in a patch that was removed. */ continue; } /* * Move through the CS table until we find a CS * that might apply to this instruction. */ for (; cur_cs < num_critical_sections; cur_cs++) { if (critical_sections[cur_cs].end <= i) { if (begin_set[cs_count] == TRUE && end_set[cs_count] == FALSE) { cs_table[cs_count].end = downloaded; end_set[cs_count] = TRUE; cs_count++; } continue; } if (critical_sections[cur_cs].begin <= i && begin_set[cs_count] == FALSE) { cs_table[cs_count].begin = downloaded; begin_set[cs_count] = TRUE; } break; } ahd_download_instr(ahd, i, download_consts); downloaded++; } ahd->num_critical_sections = cs_count; if (cs_count != 0) { cs_count *= sizeof(struct cs); ahd->critical_sections = kmalloc(cs_count, GFP_ATOMIC); if (ahd->critical_sections == NULL) panic("ahd_loadseq: Could not malloc"); memcpy(ahd->critical_sections, cs_table, cs_count); } ahd_outb(ahd, SEQCTL0, PERRORDIS|FAILDIS|FASTMODE); if (bootverbose) { printk(" %d instructions downloaded\n", downloaded); printk("%s: Features 0x%x, Bugs 0x%x, Flags 0x%x\n", ahd_name(ahd), ahd->features, ahd->bugs, ahd->flags); } } static int ahd_check_patch(struct ahd_softc *ahd, const struct patch **start_patch, u_int start_instr, u_int *skip_addr) { const struct patch *cur_patch; const struct patch *last_patch; u_int num_patches; num_patches = ARRAY_SIZE(patches); last_patch = &patches[num_patches]; cur_patch = *start_patch; while (cur_patch < last_patch && start_instr == cur_patch->begin) { if (cur_patch->patch_func(ahd) == 0) { /* Start rejecting code */ *skip_addr = start_instr + cur_patch->skip_instr; cur_patch += cur_patch->skip_patch; } else { /* Accepted this patch. Advance to the next * one and wait for our intruction pointer to * hit this point. */ cur_patch++; } } *start_patch = cur_patch; if (start_instr < *skip_addr) /* Still skipping */ return (0); return (1); } static u_int ahd_resolve_seqaddr(struct ahd_softc *ahd, u_int address) { const struct patch *cur_patch; int address_offset; u_int skip_addr; u_int i; address_offset = 0; cur_patch = patches; skip_addr = 0; for (i = 0; i < address;) { ahd_check_patch(ahd, &cur_patch, i, &skip_addr); if (skip_addr > i) { int end_addr; end_addr = min(address, skip_addr); address_offset += end_addr - i; i = skip_addr; } else { i++; } } return (address - address_offset); } static void ahd_download_instr(struct ahd_softc *ahd, u_int instrptr, uint8_t *dconsts) { union ins_formats instr; struct ins_format1 *fmt1_ins; struct ins_format3 *fmt3_ins; u_int opcode; /* * The firmware is always compiled into a little endian format. */ instr.integer = ahd_le32toh(*(uint32_t*)&seqprog[instrptr * 4]); fmt1_ins = &instr.format1; fmt3_ins = NULL; /* Pull the opcode */ opcode = instr.format1.opcode; switch (opcode) { case AIC_OP_JMP: case AIC_OP_JC: case AIC_OP_JNC: case AIC_OP_CALL: case AIC_OP_JNE: case AIC_OP_JNZ: case AIC_OP_JE: case AIC_OP_JZ: { fmt3_ins = &instr.format3; fmt3_ins->address = ahd_resolve_seqaddr(ahd, fmt3_ins->address); /* FALLTHROUGH */ } case AIC_OP_OR: case AIC_OP_AND: case AIC_OP_XOR: case AIC_OP_ADD: case AIC_OP_ADC: case AIC_OP_BMOV: if (fmt1_ins->parity != 0) { fmt1_ins->immediate = dconsts[fmt1_ins->immediate]; } fmt1_ins->parity = 0; /* FALLTHROUGH */ case AIC_OP_ROL: { int i, count; /* Calculate odd parity for the instruction */ for (i = 0, count = 0; i < 31; i++) { uint32_t mask; mask = 0x01 << i; if ((instr.integer & mask) != 0) count++; } if ((count & 0x01) == 0) instr.format1.parity = 1; /* The sequencer is a little endian cpu */ instr.integer = ahd_htole32(instr.integer); ahd_outsb(ahd, SEQRAM, instr.bytes, 4); break; } default: panic("Unknown opcode encountered in seq program"); break; } } static int ahd_probe_stack_size(struct ahd_softc *ahd) { int last_probe; last_probe = 0; while (1) { int i; /* * We avoid using 0 as a pattern to avoid * confusion if the stack implementation * "back-fills" with zeros when "poping' * entries. */ for (i = 1; i <= last_probe+1; i++) { ahd_outb(ahd, STACK, i & 0xFF); ahd_outb(ahd, STACK, (i >> 8) & 0xFF); } /* Verify */ for (i = last_probe+1; i > 0; i--) { u_int stack_entry; stack_entry = ahd_inb(ahd, STACK) |(ahd_inb(ahd, STACK) << 8); if (stack_entry != i) goto sized; } last_probe++; } sized: return (last_probe); } int ahd_print_register(const ahd_reg_parse_entry_t *table, u_int num_entries, const char *name, u_int address, u_int value, u_int *cur_column, u_int wrap_point) { int printed; u_int printed_mask; if (cur_column != NULL && *cur_column >= wrap_point) { printk("\n"); *cur_column = 0; } printed = printk("%s[0x%x]", name, value); if (table == NULL) { printed += printk(" "); *cur_column += printed; return (printed); } printed_mask = 0; while (printed_mask != 0xFF) { int entry; for (entry = 0; entry < num_entries; entry++) { if (((value & table[entry].mask) != table[entry].value) || ((printed_mask & table[entry].mask) == table[entry].mask)) continue; printed += printk("%s%s", printed_mask == 0 ? ":(" : "|", table[entry].name); printed_mask |= table[entry].mask; break; } if (entry >= num_entries) break; } if (printed_mask != 0) printed += printk(") "); else printed += printk(" "); if (cur_column != NULL) *cur_column += printed; return (printed); } void ahd_dump_card_state(struct ahd_softc *ahd) { struct scb *scb; ahd_mode_state saved_modes; u_int dffstat; int paused; u_int scb_index; u_int saved_scb_index; u_int cur_col; int i; if (ahd_is_paused(ahd)) { paused = 1; } else { paused = 0; ahd_pause(ahd); } saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); printk(">>>>>>>>>>>>>>>>>> Dump Card State Begins <<<<<<<<<<<<<<<<<\n" "%s: Dumping Card State at program address 0x%x Mode 0x%x\n", ahd_name(ahd), ahd_inw(ahd, CURADDR), ahd_build_mode_state(ahd, ahd->saved_src_mode, ahd->saved_dst_mode)); if (paused) printk("Card was paused\n"); if (ahd_check_cmdcmpltqueues(ahd)) printk("Completions are pending\n"); /* * Mode independent registers. */ cur_col = 0; ahd_intstat_print(ahd_inb(ahd, INTSTAT), &cur_col, 50); ahd_seloid_print(ahd_inb(ahd, SELOID), &cur_col, 50); ahd_selid_print(ahd_inb(ahd, SELID), &cur_col, 50); ahd_hs_mailbox_print(ahd_inb(ahd, LOCAL_HS_MAILBOX), &cur_col, 50); ahd_intctl_print(ahd_inb(ahd, INTCTL), &cur_col, 50); ahd_seqintstat_print(ahd_inb(ahd, SEQINTSTAT), &cur_col, 50); ahd_saved_mode_print(ahd_inb(ahd, SAVED_MODE), &cur_col, 50); ahd_dffstat_print(ahd_inb(ahd, DFFSTAT), &cur_col, 50); ahd_scsisigi_print(ahd_inb(ahd, SCSISIGI), &cur_col, 50); ahd_scsiphase_print(ahd_inb(ahd, SCSIPHASE), &cur_col, 50); ahd_scsibus_print(ahd_inb(ahd, SCSIBUS), &cur_col, 50); ahd_lastphase_print(ahd_inb(ahd, LASTPHASE), &cur_col, 50); ahd_scsiseq0_print(ahd_inb(ahd, SCSISEQ0), &cur_col, 50); ahd_scsiseq1_print(ahd_inb(ahd, SCSISEQ1), &cur_col, 50); ahd_seqctl0_print(ahd_inb(ahd, SEQCTL0), &cur_col, 50); ahd_seqintctl_print(ahd_inb(ahd, SEQINTCTL), &cur_col, 50); ahd_seq_flags_print(ahd_inb(ahd, SEQ_FLAGS), &cur_col, 50); ahd_seq_flags2_print(ahd_inb(ahd, SEQ_FLAGS2), &cur_col, 50); ahd_qfreeze_count_print(ahd_inw(ahd, QFREEZE_COUNT), &cur_col, 50); ahd_kernel_qfreeze_count_print(ahd_inw(ahd, KERNEL_QFREEZE_COUNT), &cur_col, 50); ahd_mk_message_scb_print(ahd_inw(ahd, MK_MESSAGE_SCB), &cur_col, 50); ahd_mk_message_scsiid_print(ahd_inb(ahd, MK_MESSAGE_SCSIID), &cur_col, 50); ahd_sstat0_print(ahd_inb(ahd, SSTAT0), &cur_col, 50); ahd_sstat1_print(ahd_inb(ahd, SSTAT1), &cur_col, 50); ahd_sstat2_print(ahd_inb(ahd, SSTAT2), &cur_col, 50); ahd_sstat3_print(ahd_inb(ahd, SSTAT3), &cur_col, 50); ahd_perrdiag_print(ahd_inb(ahd, PERRDIAG), &cur_col, 50); ahd_simode1_print(ahd_inb(ahd, SIMODE1), &cur_col, 50); ahd_lqistat0_print(ahd_inb(ahd, LQISTAT0), &cur_col, 50); ahd_lqistat1_print(ahd_inb(ahd, LQISTAT1), &cur_col, 50); ahd_lqistat2_print(ahd_inb(ahd, LQISTAT2), &cur_col, 50); ahd_lqostat0_print(ahd_inb(ahd, LQOSTAT0), &cur_col, 50); ahd_lqostat1_print(ahd_inb(ahd, LQOSTAT1), &cur_col, 50); ahd_lqostat2_print(ahd_inb(ahd, LQOSTAT2), &cur_col, 50); printk("\n"); printk("\nSCB Count = %d CMDS_PENDING = %d LASTSCB 0x%x " "CURRSCB 0x%x NEXTSCB 0x%x\n", ahd->scb_data.numscbs, ahd_inw(ahd, CMDS_PENDING), ahd_inw(ahd, LASTSCB), ahd_inw(ahd, CURRSCB), ahd_inw(ahd, NEXTSCB)); cur_col = 0; /* QINFIFO */ ahd_search_qinfifo(ahd, CAM_TARGET_WILDCARD, ALL_CHANNELS, CAM_LUN_WILDCARD, SCB_LIST_NULL, ROLE_UNKNOWN, /*status*/0, SEARCH_PRINT); saved_scb_index = ahd_get_scbptr(ahd); printk("Pending list:"); i = 0; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { if (i++ > AHD_SCB_MAX) break; cur_col = printk("\n%3d FIFO_USE[0x%x] ", SCB_GET_TAG(scb), ahd_inb_scbram(ahd, SCB_FIFO_USE_COUNT)); ahd_set_scbptr(ahd, SCB_GET_TAG(scb)); ahd_scb_control_print(ahd_inb_scbram(ahd, SCB_CONTROL), &cur_col, 60); ahd_scb_scsiid_print(ahd_inb_scbram(ahd, SCB_SCSIID), &cur_col, 60); } printk("\nTotal %d\n", i); printk("Kernel Free SCB list: "); i = 0; TAILQ_FOREACH(scb, &ahd->scb_data.free_scbs, links.tqe) { struct scb *list_scb; list_scb = scb; do { printk("%d ", SCB_GET_TAG(list_scb)); list_scb = LIST_NEXT(list_scb, collision_links); } while (list_scb && i++ < AHD_SCB_MAX); } LIST_FOREACH(scb, &ahd->scb_data.any_dev_free_scb_list, links.le) { if (i++ > AHD_SCB_MAX) break; printk("%d ", SCB_GET_TAG(scb)); } printk("\n"); printk("Sequencer Complete DMA-inprog list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_DMAINPROG_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer DMA-Up and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_DMA_SCB_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); printk("Sequencer On QFreeze and Complete list: "); scb_index = ahd_inw(ahd, COMPLETE_ON_QFREEZE_HEAD); i = 0; while (!SCBID_IS_NULL(scb_index) && i++ < AHD_SCB_MAX) { ahd_set_scbptr(ahd, scb_index); printk("%d ", scb_index); scb_index = ahd_inw_scbram(ahd, SCB_NEXT_COMPLETE); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); dffstat = ahd_inb(ahd, DFFSTAT); for (i = 0; i < 2; i++) { #ifdef AHD_DEBUG struct scb *fifo_scb; #endif u_int fifo_scbptr; ahd_set_modes(ahd, AHD_MODE_DFF0 + i, AHD_MODE_DFF0 + i); fifo_scbptr = ahd_get_scbptr(ahd); printk("\n\n%s: FIFO%d %s, LONGJMP == 0x%x, SCB 0x%x\n", ahd_name(ahd), i, (dffstat & (FIFO0FREE << i)) ? "Free" : "Active", ahd_inw(ahd, LONGJMP_ADDR), fifo_scbptr); cur_col = 0; ahd_seqimode_print(ahd_inb(ahd, SEQIMODE), &cur_col, 50); ahd_seqintsrc_print(ahd_inb(ahd, SEQINTSRC), &cur_col, 50); ahd_dfcntrl_print(ahd_inb(ahd, DFCNTRL), &cur_col, 50); ahd_dfstatus_print(ahd_inb(ahd, DFSTATUS), &cur_col, 50); ahd_sg_cache_shadow_print(ahd_inb(ahd, SG_CACHE_SHADOW), &cur_col, 50); ahd_sg_state_print(ahd_inb(ahd, SG_STATE), &cur_col, 50); ahd_dffsxfrctl_print(ahd_inb(ahd, DFFSXFRCTL), &cur_col, 50); ahd_soffcnt_print(ahd_inb(ahd, SOFFCNT), &cur_col, 50); ahd_mdffstat_print(ahd_inb(ahd, MDFFSTAT), &cur_col, 50); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("SHADDR = 0x%x%x, SHCNT = 0x%x ", ahd_inl(ahd, SHADDR+4), ahd_inl(ahd, SHADDR), (ahd_inb(ahd, SHCNT) | (ahd_inb(ahd, SHCNT + 1) << 8) | (ahd_inb(ahd, SHCNT + 2) << 16))); if (cur_col > 50) { printk("\n"); cur_col = 0; } cur_col += printk("HADDR = 0x%x%x, HCNT = 0x%x ", ahd_inl(ahd, HADDR+4), ahd_inl(ahd, HADDR), (ahd_inb(ahd, HCNT) | (ahd_inb(ahd, HCNT + 1) << 8) | (ahd_inb(ahd, HCNT + 2) << 16))); ahd_ccsgctl_print(ahd_inb(ahd, CCSGCTL), &cur_col, 50); #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_SG) != 0) { fifo_scb = ahd_lookup_scb(ahd, fifo_scbptr); if (fifo_scb != NULL) ahd_dump_sglist(fifo_scb); } #endif } printk("\nLQIN: "); for (i = 0; i < 20; i++) printk("0x%x ", ahd_inb(ahd, LQIN + i)); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CFG, AHD_MODE_CFG); printk("%s: LQISTATE = 0x%x, LQOSTATE = 0x%x, OPTIONMODE = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, LQISTATE), ahd_inb(ahd, LQOSTATE), ahd_inb(ahd, OPTIONMODE)); printk("%s: OS_SPACE_CNT = 0x%x MAXCMDCNT = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, OS_SPACE_CNT), ahd_inb(ahd, MAXCMDCNT)); printk("%s: SAVED_SCSIID = 0x%x SAVED_LUN = 0x%x\n", ahd_name(ahd), ahd_inb(ahd, SAVED_SCSIID), ahd_inb(ahd, SAVED_LUN)); ahd_simode0_print(ahd_inb(ahd, SIMODE0), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, AHD_MODE_CCHAN, AHD_MODE_CCHAN); cur_col = 0; ahd_ccscbctl_print(ahd_inb(ahd, CCSCBCTL), &cur_col, 50); printk("\n"); ahd_set_modes(ahd, ahd->saved_src_mode, ahd->saved_dst_mode); printk("%s: REG0 == 0x%x, SINDEX = 0x%x, DINDEX = 0x%x\n", ahd_name(ahd), ahd_inw(ahd, REG0), ahd_inw(ahd, SINDEX), ahd_inw(ahd, DINDEX)); printk("%s: SCBPTR == 0x%x, SCB_NEXT == 0x%x, SCB_NEXT2 == 0x%x\n", ahd_name(ahd), ahd_get_scbptr(ahd), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2)); printk("CDB %x %x %x %x %x %x\n", ahd_inb_scbram(ahd, SCB_CDB_STORE), ahd_inb_scbram(ahd, SCB_CDB_STORE+1), ahd_inb_scbram(ahd, SCB_CDB_STORE+2), ahd_inb_scbram(ahd, SCB_CDB_STORE+3), ahd_inb_scbram(ahd, SCB_CDB_STORE+4), ahd_inb_scbram(ahd, SCB_CDB_STORE+5)); printk("STACK:"); for (i = 0; i < ahd->stack_size; i++) { ahd->saved_stack[i] = ahd_inb(ahd, STACK)|(ahd_inb(ahd, STACK) << 8); printk(" 0x%x", ahd->saved_stack[i]); } for (i = ahd->stack_size-1; i >= 0; i--) { ahd_outb(ahd, STACK, ahd->saved_stack[i] & 0xFF); ahd_outb(ahd, STACK, (ahd->saved_stack[i] >> 8) & 0xFF); } printk("\n<<<<<<<<<<<<<<<<< Dump Card State Ends >>>>>>>>>>>>>>>>>>\n"); ahd_restore_modes(ahd, saved_modes); if (paused == 0) ahd_unpause(ahd); } #if 0 void ahd_dump_scbs(struct ahd_softc *ahd) { ahd_mode_state saved_modes; u_int saved_scb_index; int i; saved_modes = ahd_save_modes(ahd); ahd_set_modes(ahd, AHD_MODE_SCSI, AHD_MODE_SCSI); saved_scb_index = ahd_get_scbptr(ahd); for (i = 0; i < AHD_SCB_MAX; i++) { ahd_set_scbptr(ahd, i); printk("%3d", i); printk("(CTRL 0x%x ID 0x%x N 0x%x N2 0x%x SG 0x%x, RSG 0x%x)\n", ahd_inb_scbram(ahd, SCB_CONTROL), ahd_inb_scbram(ahd, SCB_SCSIID), ahd_inw_scbram(ahd, SCB_NEXT), ahd_inw_scbram(ahd, SCB_NEXT2), ahd_inl_scbram(ahd, SCB_SGPTR), ahd_inl_scbram(ahd, SCB_RESIDUAL_SGPTR)); } printk("\n"); ahd_set_scbptr(ahd, saved_scb_index); ahd_restore_modes(ahd, saved_modes); } #endif /* 0 */ /**************************** Flexport Logic **********************************/ /* * Read count 16bit words from 16bit word address start_addr from the * SEEPROM attached to the controller, into buf, using the controller's * SEEPROM reading state machine. Optionally treat the data as a byte * stream in terms of byte order. */ int ahd_read_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count, int bytestream) { u_int cur_addr; u_int end_addr; int error; /* * If we never make it through the loop even once, * we were passed invalid arguments. */ error = EINVAL; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_READ | SEESTART); error = ahd_wait_seeprom(ahd); if (error) break; if (bytestream != 0) { uint8_t *bytestream_ptr; bytestream_ptr = (uint8_t *)buf; *bytestream_ptr++ = ahd_inb(ahd, SEEDAT); *bytestream_ptr = ahd_inb(ahd, SEEDAT+1); } else { /* * ahd_inw() already handles machine byte order. */ *buf = ahd_inw(ahd, SEEDAT); } buf++; } return (error); } /* * Write count 16bit words from buf, into SEEPROM attache to the * controller starting at 16bit word address start_addr, using the * controller's SEEPROM writing state machine. */ int ahd_write_seeprom(struct ahd_softc *ahd, uint16_t *buf, u_int start_addr, u_int count) { u_int cur_addr; u_int end_addr; int error; int retval; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); error = ENOENT; /* Place the chip into write-enable mode */ ahd_outb(ahd, SEEADR, SEEOP_EWEN_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWEN | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); /* * Write the data. If we don't get through the loop at * least once, the arguments were invalid. */ retval = EINVAL; end_addr = start_addr + count; for (cur_addr = start_addr; cur_addr < end_addr; cur_addr++) { ahd_outw(ahd, SEEDAT, *buf++); ahd_outb(ahd, SEEADR, cur_addr); ahd_outb(ahd, SEECTL, SEEOP_WRITE | SEESTART); retval = ahd_wait_seeprom(ahd); if (retval) break; } /* * Disable writes. */ ahd_outb(ahd, SEEADR, SEEOP_EWDS_ADDR); ahd_outb(ahd, SEECTL, SEEOP_EWDS | SEESTART); error = ahd_wait_seeprom(ahd); if (error) return (error); return (retval); } /* * Wait ~100us for the serial eeprom to satisfy our request. */ static int ahd_wait_seeprom(struct ahd_softc *ahd) { int cnt; cnt = 5000; while ((ahd_inb(ahd, SEESTAT) & (SEEARBACK|SEEBUSY)) != 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } /* * Validate the two checksums in the per_channel * vital product data struct. */ static int ahd_verify_vpd_cksum(struct vpd_config *vpd) { int i; int maxaddr; uint32_t checksum; uint8_t *vpdarray; vpdarray = (uint8_t *)vpd; maxaddr = offsetof(struct vpd_config, vpd_checksum); checksum = 0; for (i = offsetof(struct vpd_config, resource_type); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->vpd_checksum) return (0); checksum = 0; maxaddr = offsetof(struct vpd_config, checksum); for (i = offsetof(struct vpd_config, default_target_flags); i < maxaddr; i++) checksum = checksum + vpdarray[i]; if (checksum == 0 || (-checksum & 0xFF) != vpd->checksum) return (0); return (1); } int ahd_verify_cksum(struct seeprom_config *sc) { int i; int maxaddr; uint32_t checksum; uint16_t *scarray; maxaddr = (sizeof(*sc)/2) - 1; checksum = 0; scarray = (uint16_t *)sc; for (i = 0; i < maxaddr; i++) checksum = checksum + scarray[i]; if (checksum == 0 || (checksum & 0xFFFF) != sc->checksum) { return (0); } else { return (1); } } int ahd_acquire_seeprom(struct ahd_softc *ahd) { /* * We should be able to determine the SEEPROM type * from the flexport logic, but unfortunately not * all implementations have this logic and there is * no programatic method for determining if the logic * is present. */ return (1); #if 0 uint8_t seetype; int error; error = ahd_read_flexport(ahd, FLXADDR_ROMSTAT_CURSENSECTL, &seetype); if (error != 0 || ((seetype & FLX_ROMSTAT_SEECFG) == FLX_ROMSTAT_SEE_NONE)) return (0); return (1); #endif } void ahd_release_seeprom(struct ahd_softc *ahd) { /* Currently a no-op */ } /* * Wait at most 2 seconds for flexport arbitration to succeed. */ static int ahd_wait_flexport(struct ahd_softc *ahd) { int cnt; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); cnt = 1000000 * 2 / 5; while ((ahd_inb(ahd, BRDCTL) & FLXARBACK) == 0 && --cnt) ahd_delay(5); if (cnt == 0) return (ETIMEDOUT); return (0); } int ahd_write_flexport(struct ahd_softc *ahd, u_int addr, u_int value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_write_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); ahd_outb(ahd, BRDDAT, value); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDSTB|BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, BRDEN|(addr << 3)); ahd_flush_device_writes(ahd); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } int ahd_read_flexport(struct ahd_softc *ahd, u_int addr, uint8_t *value) { int error; AHD_ASSERT_MODES(ahd, AHD_MODE_SCSI_MSK, AHD_MODE_SCSI_MSK); if (addr > 7) panic("ahd_read_flexport: address out of range"); ahd_outb(ahd, BRDCTL, BRDRW|BRDEN|(addr << 3)); error = ahd_wait_flexport(ahd); if (error != 0) return (error); *value = ahd_inb(ahd, BRDDAT); ahd_outb(ahd, BRDCTL, 0); ahd_flush_device_writes(ahd); return (0); } /************************* Target Mode ****************************************/ #ifdef AHD_TARGET_MODE cam_status ahd_find_tmode_devs(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb, struct ahd_tmode_tstate **tstate, struct ahd_tmode_lstate **lstate, int notfound_failure) { if ((ahd->features & AHD_TARGETMODE) == 0) return (CAM_REQ_INVALID); /* * Handle the 'black hole' device that sucks up * requests to unattached luns on enabled targets. */ if (ccb->ccb_h.target_id == CAM_TARGET_WILDCARD && ccb->ccb_h.target_lun == CAM_LUN_WILDCARD) { *tstate = NULL; *lstate = ahd->black_hole; } else { u_int max_id; max_id = (ahd->features & AHD_WIDE) ? 16 : 8; if (ccb->ccb_h.target_id >= max_id) return (CAM_TID_INVALID); if (ccb->ccb_h.target_lun >= AHD_NUM_LUNS) return (CAM_LUN_INVALID); *tstate = ahd->enabled_targets[ccb->ccb_h.target_id]; *lstate = NULL; if (*tstate != NULL) *lstate = (*tstate)->enabled_luns[ccb->ccb_h.target_lun]; } if (notfound_failure != 0 && *lstate == NULL) return (CAM_PATH_INVALID); return (CAM_REQ_CMP); } void ahd_handle_en_lun(struct ahd_softc *ahd, struct cam_sim *sim, union ccb *ccb) { #if NOT_YET struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_en_lun *cel; cam_status status; u_int target; u_int lun; u_int target_mask; u_long s; char channel; status = ahd_find_tmode_devs(ahd, sim, ccb, &tstate, &lstate, /*notfound_failure*/FALSE); if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } if ((ahd->features & AHD_MULTIROLE) != 0) { u_int our_id; our_id = ahd->our_id; if (ccb->ccb_h.target_id != our_id) { if ((ahd->features & AHD_MULTI_TID) != 0 && (ahd->flags & AHD_INITIATORROLE) != 0) { /* * Only allow additional targets if * the initiator role is disabled. * The hardware cannot handle a re-select-in * on the initiator id during a re-select-out * on a different target id. */ status = CAM_TID_INVALID; } else if ((ahd->flags & AHD_INITIATORROLE) != 0 || ahd->enabled_luns > 0) { /* * Only allow our target id to change * if the initiator role is not configured * and there are no enabled luns which * are attached to the currently registered * scsi id. */ status = CAM_TID_INVALID; } } } if (status != CAM_REQ_CMP) { ccb->ccb_h.status = status; return; } /* * We now have an id that is valid. * If we aren't in target mode, switch modes. */ if ((ahd->flags & AHD_TARGETROLE) == 0 && ccb->ccb_h.target_id != CAM_TARGET_WILDCARD) { u_long s; printk("Configuring Target Mode\n"); ahd_lock(ahd, &s); if (LIST_FIRST(&ahd->pending_scbs) != NULL) { ccb->ccb_h.status = CAM_BUSY; ahd_unlock(ahd, &s); return; } ahd->flags |= AHD_TARGETROLE; if ((ahd->features & AHD_MULTIROLE) == 0) ahd->flags &= ~AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); ahd_unlock(ahd, &s); } cel = &ccb->cel; target = ccb->ccb_h.target_id; lun = ccb->ccb_h.target_lun; channel = SIM_CHANNEL(ahd, sim); target_mask = 0x01 << target; if (channel == 'B') target_mask <<= 8; if (cel->enable != 0) { u_int scsiseq1; /* Are we already enabled?? */ if (lstate != NULL) { xpt_print_path(ccb->ccb_h.path); printk("Lun already enabled\n"); ccb->ccb_h.status = CAM_LUN_ALRDY_ENA; return; } if (cel->grp6_len != 0 || cel->grp7_len != 0) { /* * Don't (yet?) support vendor * specific commands. */ ccb->ccb_h.status = CAM_REQ_INVALID; printk("Non-zero Group Codes\n"); return; } /* * Seems to be okay. * Setup our data structures. */ if (target != CAM_TARGET_WILDCARD && tstate == NULL) { tstate = ahd_alloc_tstate(ahd, target, channel); if (tstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate tstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } } lstate = kmalloc(sizeof(*lstate), GFP_ATOMIC); if (lstate == NULL) { xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate lstate\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } memset(lstate, 0, sizeof(*lstate)); status = xpt_create_path(&lstate->path, /*periph*/NULL, xpt_path_path_id(ccb->ccb_h.path), xpt_path_target_id(ccb->ccb_h.path), xpt_path_lun_id(ccb->ccb_h.path)); if (status != CAM_REQ_CMP) { kfree(lstate); xpt_print_path(ccb->ccb_h.path); printk("Couldn't allocate path\n"); ccb->ccb_h.status = CAM_RESRC_UNAVAIL; return; } SLIST_INIT(&lstate->accept_tios); SLIST_INIT(&lstate->immed_notifies); ahd_lock(ahd, &s); ahd_pause(ahd); if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = lstate; ahd->enabled_luns++; if ((ahd->features & AHD_MULTI_TID) != 0) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask |= target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } else { u_int our_id; char channel; channel = SIM_CHANNEL(ahd, sim); our_id = SIM_SCSI_ID(ahd, sim); /* * This can only happen if selections * are not enabled */ if (target != our_id) { u_int sblkctl; char cur_channel; int swap; sblkctl = ahd_inb(ahd, SBLKCTL); cur_channel = (sblkctl & SELBUSB) ? 'B' : 'A'; if ((ahd->features & AHD_TWIN) == 0) cur_channel = 'A'; swap = cur_channel != channel; ahd->our_id = target; if (swap) ahd_outb(ahd, SBLKCTL, sblkctl ^ SELBUSB); ahd_outb(ahd, SCSIID, target); if (swap) ahd_outb(ahd, SBLKCTL, sblkctl); } } } else ahd->black_hole = lstate; /* Allow select-in operations */ if (ahd->black_hole != NULL && ahd->enabled_luns > 0) { scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 |= ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); } ahd_unpause(ahd); ahd_unlock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; xpt_print_path(ccb->ccb_h.path); printk("Lun now enabled for target mode\n"); } else { struct scb *scb; int i, empty; if (lstate == NULL) { ccb->ccb_h.status = CAM_LUN_INVALID; return; } ahd_lock(ahd, &s); ccb->ccb_h.status = CAM_REQ_CMP; LIST_FOREACH(scb, &ahd->pending_scbs, pending_links) { struct ccb_hdr *ccbh; ccbh = &scb->io_ctx->ccb_h; if (ccbh->func_code == XPT_CONT_TARGET_IO && !xpt_path_comp(ccbh->path, ccb->ccb_h.path)){ printk("CTIO pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; ahd_unlock(ahd, &s); return; } } if (SLIST_FIRST(&lstate->accept_tios) != NULL) { printk("ATIOs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (SLIST_FIRST(&lstate->immed_notifies) != NULL) { printk("INOTs pending\n"); ccb->ccb_h.status = CAM_REQ_INVALID; } if (ccb->ccb_h.status != CAM_REQ_CMP) { ahd_unlock(ahd, &s); return; } xpt_print_path(ccb->ccb_h.path); printk("Target mode disabled\n"); xpt_free_path(lstate->path); kfree(lstate); ahd_pause(ahd); /* Can we clean up the target too? */ if (target != CAM_TARGET_WILDCARD) { tstate->enabled_luns[lun] = NULL; ahd->enabled_luns--; for (empty = 1, i = 0; i < 8; i++) if (tstate->enabled_luns[i] != NULL) { empty = 0; break; } if (empty) { ahd_free_tstate(ahd, target, channel, /*force*/FALSE); if (ahd->features & AHD_MULTI_TID) { u_int targid_mask; targid_mask = ahd_inw(ahd, TARGID); targid_mask &= ~target_mask; ahd_outw(ahd, TARGID, targid_mask); ahd_update_scsiid(ahd, targid_mask); } } } else { ahd->black_hole = NULL; /* * We can't allow selections without * our black hole device. */ empty = TRUE; } if (ahd->enabled_luns == 0) { /* Disallow select-in */ u_int scsiseq1; scsiseq1 = ahd_inb(ahd, SCSISEQ_TEMPLATE); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ_TEMPLATE, scsiseq1); scsiseq1 = ahd_inb(ahd, SCSISEQ1); scsiseq1 &= ~ENSELI; ahd_outb(ahd, SCSISEQ1, scsiseq1); if ((ahd->features & AHD_MULTIROLE) == 0) { printk("Configuring Initiator Mode\n"); ahd->flags &= ~AHD_TARGETROLE; ahd->flags |= AHD_INITIATORROLE; ahd_pause(ahd); ahd_loadseq(ahd); ahd_restart(ahd); /* * Unpaused. The extra unpause * that follows is harmless. */ } } ahd_unpause(ahd); ahd_unlock(ahd, &s); } #endif } static void ahd_update_scsiid(struct ahd_softc *ahd, u_int targid_mask) { #if NOT_YET u_int scsiid_mask; u_int scsiid; if ((ahd->features & AHD_MULTI_TID) == 0) panic("ahd_update_scsiid called on non-multitid unit\n"); /* * Since we will rely on the TARGID mask * for selection enables, ensure that OID * in SCSIID is not set to some other ID * that we don't want to allow selections on. */ if ((ahd->features & AHD_ULTRA2) != 0) scsiid = ahd_inb(ahd, SCSIID_ULTRA2); else scsiid = ahd_inb(ahd, SCSIID); scsiid_mask = 0x1 << (scsiid & OID); if ((targid_mask & scsiid_mask) == 0) { u_int our_id; /* ffs counts from 1 */ our_id = ffs(targid_mask); if (our_id == 0) our_id = ahd->our_id; else our_id--; scsiid &= TID; scsiid |= our_id; } if ((ahd->features & AHD_ULTRA2) != 0) ahd_outb(ahd, SCSIID_ULTRA2, scsiid); else ahd_outb(ahd, SCSIID, scsiid); #endif } static void ahd_run_tqinfifo(struct ahd_softc *ahd, int paused) { struct target_cmd *cmd; ahd_sync_tqinfifo(ahd, BUS_DMASYNC_POSTREAD); while ((cmd = &ahd->targetcmds[ahd->tqinfifonext])->cmd_valid != 0) { /* * Only advance through the queue if we * have the resources to process the command. */ if (ahd_handle_target_cmd(ahd, cmd) != 0) break; cmd->cmd_valid = 0; ahd_dmamap_sync(ahd, ahd->shared_data_dmat, ahd->shared_data_map.dmamap, ahd_targetcmd_offset(ahd, ahd->tqinfifonext), sizeof(struct target_cmd), BUS_DMASYNC_PREREAD); ahd->tqinfifonext++; /* * Lazily update our position in the target mode incoming * command queue as seen by the sequencer. */ if ((ahd->tqinfifonext & (HOST_TQINPOS - 1)) == 1) { u_int hs_mailbox; hs_mailbox = ahd_inb(ahd, HS_MAILBOX); hs_mailbox &= ~HOST_TQINPOS; hs_mailbox |= ahd->tqinfifonext & HOST_TQINPOS; ahd_outb(ahd, HS_MAILBOX, hs_mailbox); } } } static int ahd_handle_target_cmd(struct ahd_softc *ahd, struct target_cmd *cmd) { struct ahd_tmode_tstate *tstate; struct ahd_tmode_lstate *lstate; struct ccb_accept_tio *atio; uint8_t *byte; int initiator; int target; int lun; initiator = SCSIID_TARGET(ahd, cmd->scsiid); target = SCSIID_OUR_ID(cmd->scsiid); lun = (cmd->identify & MSG_IDENTIFY_LUNMASK); byte = cmd->bytes; tstate = ahd->enabled_targets[target]; lstate = NULL; if (tstate != NULL) lstate = tstate->enabled_luns[lun]; /* * Commands for disabled luns go to the black hole driver. */ if (lstate == NULL) lstate = ahd->black_hole; atio = (struct ccb_accept_tio*)SLIST_FIRST(&lstate->accept_tios); if (atio == NULL) { ahd->flags |= AHD_TQINFIFO_BLOCKED; /* * Wait for more ATIOs from the peripheral driver for this lun. */ return (1); } else ahd->flags &= ~AHD_TQINFIFO_BLOCKED; #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Incoming command from %d for %d:%d%s\n", initiator, target, lun, lstate == ahd->black_hole ? "(Black Holed)" : ""); #endif SLIST_REMOVE_HEAD(&lstate->accept_tios, sim_links.sle); if (lstate == ahd->black_hole) { /* Fill in the wildcards */ atio->ccb_h.target_id = target; atio->ccb_h.target_lun = lun; } /* * Package it up and send it off to * whomever has this lun enabled. */ atio->sense_len = 0; atio->init_id = initiator; if (byte[0] != 0xFF) { /* Tag was included */ atio->tag_action = *byte++; atio->tag_id = *byte++; atio->ccb_h.flags = CAM_TAG_ACTION_VALID; } else { atio->ccb_h.flags = 0; } byte++; /* Okay. Now determine the cdb size based on the command code */ switch (*byte >> CMD_GROUP_CODE_SHIFT) { case 0: atio->cdb_len = 6; break; case 1: case 2: atio->cdb_len = 10; break; case 4: atio->cdb_len = 16; break; case 5: atio->cdb_len = 12; break; case 3: default: /* Only copy the opcode. */ atio->cdb_len = 1; printk("Reserved or VU command code type encountered\n"); break; } memcpy(atio->cdb_io.cdb_bytes, byte, atio->cdb_len); atio->ccb_h.status |= CAM_CDB_RECVD; if ((cmd->identify & MSG_IDENTIFY_DISCFLAG) == 0) { /* * We weren't allowed to disconnect. * We're hanging on the bus until a * continue target I/O comes in response * to this accept tio. */ #ifdef AHD_DEBUG if ((ahd_debug & AHD_SHOW_TQIN) != 0) printk("Received Immediate Command %d:%d:%d - %p\n", initiator, target, lun, ahd->pending_device); #endif ahd->pending_device = lstate; ahd_freeze_ccb((union ccb *)atio); atio->ccb_h.flags |= CAM_DIS_DISCONNECT; } xpt_done((union ccb*)atio); return (0); } #endif
gpl-2.0
talnoah/m8_sense
drivers/base/dma-mapping.c
4384
5483
/* * drivers/base/dma-mapping.c - arch-independent dma-mapping routines * * Copyright (c) 2006 SUSE Linux Products GmbH * Copyright (c) 2006 Tejun Heo <teheo@suse.de> * * This file is released under the GPLv2. */ #include <linux/dma-mapping.h> #include <linux/export.h> #include <linux/gfp.h> /* * Managed DMA API */ struct dma_devres { size_t size; void *vaddr; dma_addr_t dma_handle; }; static void dmam_coherent_release(struct device *dev, void *res) { struct dma_devres *this = res; dma_free_coherent(dev, this->size, this->vaddr, this->dma_handle); } static void dmam_noncoherent_release(struct device *dev, void *res) { struct dma_devres *this = res; dma_free_noncoherent(dev, this->size, this->vaddr, this->dma_handle); } static int dmam_match(struct device *dev, void *res, void *match_data) { struct dma_devres *this = res, *match = match_data; if (this->vaddr == match->vaddr) { WARN_ON(this->size != match->size || this->dma_handle != match->dma_handle); return 1; } return 0; } /** * dmam_alloc_coherent - Managed dma_alloc_coherent() * @dev: Device to allocate coherent memory for * @size: Size of allocation * @dma_handle: Out argument for allocated DMA handle * @gfp: Allocation flags * * Managed dma_alloc_coherent(). Memory allocated using this function * will be automatically released on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void * dmam_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { struct dma_devres *dr; void *vaddr; dr = devres_alloc(dmam_coherent_release, sizeof(*dr), gfp); if (!dr) return NULL; vaddr = dma_alloc_coherent(dev, size, dma_handle, gfp); if (!vaddr) { devres_free(dr); return NULL; } dr->vaddr = vaddr; dr->dma_handle = *dma_handle; dr->size = size; devres_add(dev, dr); return vaddr; } EXPORT_SYMBOL(dmam_alloc_coherent); /** * dmam_free_coherent - Managed dma_free_coherent() * @dev: Device to free coherent memory for * @size: Size of allocation * @vaddr: Virtual address of the memory to free * @dma_handle: DMA handle of the memory to free * * Managed dma_free_coherent(). */ void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { struct dma_devres match_data = { size, vaddr, dma_handle }; dma_free_coherent(dev, size, vaddr, dma_handle); WARN_ON(devres_destroy(dev, dmam_coherent_release, dmam_match, &match_data)); } EXPORT_SYMBOL(dmam_free_coherent); /** * dmam_alloc_non_coherent - Managed dma_alloc_non_coherent() * @dev: Device to allocate non_coherent memory for * @size: Size of allocation * @dma_handle: Out argument for allocated DMA handle * @gfp: Allocation flags * * Managed dma_alloc_non_coherent(). Memory allocated using this * function will be automatically released on driver detach. * * RETURNS: * Pointer to allocated memory on success, NULL on failure. */ void *dmam_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { struct dma_devres *dr; void *vaddr; dr = devres_alloc(dmam_noncoherent_release, sizeof(*dr), gfp); if (!dr) return NULL; vaddr = dma_alloc_noncoherent(dev, size, dma_handle, gfp); if (!vaddr) { devres_free(dr); return NULL; } dr->vaddr = vaddr; dr->dma_handle = *dma_handle; dr->size = size; devres_add(dev, dr); return vaddr; } EXPORT_SYMBOL(dmam_alloc_noncoherent); /** * dmam_free_coherent - Managed dma_free_noncoherent() * @dev: Device to free noncoherent memory for * @size: Size of allocation * @vaddr: Virtual address of the memory to free * @dma_handle: DMA handle of the memory to free * * Managed dma_free_noncoherent(). */ void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { struct dma_devres match_data = { size, vaddr, dma_handle }; dma_free_noncoherent(dev, size, vaddr, dma_handle); WARN_ON(!devres_destroy(dev, dmam_noncoherent_release, dmam_match, &match_data)); } EXPORT_SYMBOL(dmam_free_noncoherent); #ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY static void dmam_coherent_decl_release(struct device *dev, void *res) { dma_release_declared_memory(dev); } /** * dmam_declare_coherent_memory - Managed dma_declare_coherent_memory() * @dev: Device to declare coherent memory for * @bus_addr: Bus address of coherent memory to be declared * @device_addr: Device address of coherent memory to be declared * @size: Size of coherent memory to be declared * @flags: Flags * * Managed dma_declare_coherent_memory(). * * RETURNS: * 0 on success, -errno on failure. */ int dmam_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, dma_addr_t device_addr, size_t size, int flags) { void *res; int rc; res = devres_alloc(dmam_coherent_decl_release, 0, GFP_KERNEL); if (!res) return -ENOMEM; rc = dma_declare_coherent_memory(dev, bus_addr, device_addr, size, flags); if (rc == 0) devres_add(dev, res); else devres_free(res); return rc; } EXPORT_SYMBOL(dmam_declare_coherent_memory); /** * dmam_release_declared_memory - Managed dma_release_declared_memory(). * @dev: Device to release declared coherent memory for * * Managed dmam_release_declared_memory(). */ void dmam_release_declared_memory(struct device *dev) { WARN_ON(devres_destroy(dev, dmam_coherent_decl_release, NULL, NULL)); } EXPORT_SYMBOL(dmam_release_declared_memory); #endif
gpl-2.0
kozmikkick/KozmiKG2
drivers/net/ethernet/sun/cassini.c
4896
143012
/* cassini.c: Sun Microsystems Cassini(+) ethernet driver. * * Copyright (C) 2004 Sun Microsystems Inc. * Copyright (C) 2003 Adrian Sun (asun@darksunrising.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of the * License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA * 02111-1307, USA. * * This driver uses the sungem driver (c) David Miller * (davem@redhat.com) as its basis. * * The cassini chip has a number of features that distinguish it from * the gem chip: * 4 transmit descriptor rings that are used for either QoS (VLAN) or * load balancing (non-VLAN mode) * batching of multiple packets * multiple CPU dispatching * page-based RX descriptor engine with separate completion rings * Gigabit support (GMII and PCS interface) * MIF link up/down detection works * * RX is handled by page sized buffers that are attached as fragments to * the skb. here's what's done: * -- driver allocates pages at a time and keeps reference counts * on them. * -- the upper protocol layers assume that the header is in the skb * itself. as a result, cassini will copy a small amount (64 bytes) * to make them happy. * -- driver appends the rest of the data pages as frags to skbuffs * and increments the reference count * -- on page reclamation, the driver swaps the page with a spare page. * if that page is still in use, it frees its reference to that page, * and allocates a new page for use. otherwise, it just recycles the * the page. * * NOTE: cassini can parse the header. however, it's not worth it * as long as the network stack requires a header copy. * * TX has 4 queues. currently these queues are used in a round-robin * fashion for load balancing. They can also be used for QoS. for that * to work, however, QoS information needs to be exposed down to the driver * level so that subqueues get targeted to particular transmit rings. * alternatively, the queues can be configured via use of the all-purpose * ioctl. * * RX DATA: the rx completion ring has all the info, but the rx desc * ring has all of the data. RX can conceivably come in under multiple * interrupts, but the INT# assignment needs to be set up properly by * the BIOS and conveyed to the driver. PCI BIOSes don't know how to do * that. also, the two descriptor rings are designed to distinguish between * encrypted and non-encrypted packets, but we use them for buffering * instead. * * by default, the selective clear mask is set up to process rx packets. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/compiler.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/list.h> #include <linux/dma-mapping.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/random.h> #include <linux/mii.h> #include <linux/ip.h> #include <linux/tcp.h> #include <linux/mutex.h> #include <linux/firmware.h> #include <net/checksum.h> #include <linux/atomic.h> #include <asm/io.h> #include <asm/byteorder.h> #include <asm/uaccess.h> #define cas_page_map(x) kmap_atomic((x)) #define cas_page_unmap(x) kunmap_atomic((x)) #define CAS_NCPUS num_online_cpus() #define cas_skb_release(x) netif_rx(x) /* select which firmware to use */ #define USE_HP_WORKAROUND #define HP_WORKAROUND_DEFAULT /* select which firmware to use as default */ #define CAS_HP_ALT_FIRMWARE cas_prog_null /* alternate firmware */ #include "cassini.h" #define USE_TX_COMPWB /* use completion writeback registers */ #define USE_CSMA_CD_PROTO /* standard CSMA/CD */ #define USE_RX_BLANK /* hw interrupt mitigation */ #undef USE_ENTROPY_DEV /* don't test for entropy device */ /* NOTE: these aren't useable unless PCI interrupts can be assigned. * also, we need to make cp->lock finer-grained. */ #undef USE_PCI_INTB #undef USE_PCI_INTC #undef USE_PCI_INTD #undef USE_QOS #undef USE_VPD_DEBUG /* debug vpd information if defined */ /* rx processing options */ #define USE_PAGE_ORDER /* specify to allocate large rx pages */ #define RX_DONT_BATCH 0 /* if 1, don't batch flows */ #define RX_COPY_ALWAYS 0 /* if 0, use frags */ #define RX_COPY_MIN 64 /* copy a little to make upper layers happy */ #undef RX_COUNT_BUFFERS /* define to calculate RX buffer stats */ #define DRV_MODULE_NAME "cassini" #define DRV_MODULE_VERSION "1.6" #define DRV_MODULE_RELDATE "21 May 2008" #define CAS_DEF_MSG_ENABLE \ (NETIF_MSG_DRV | \ NETIF_MSG_PROBE | \ NETIF_MSG_LINK | \ NETIF_MSG_TIMER | \ NETIF_MSG_IFDOWN | \ NETIF_MSG_IFUP | \ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) /* length of time before we decide the hardware is borked, * and dev->tx_timeout() should be called to fix the problem */ #define CAS_TX_TIMEOUT (HZ) #define CAS_LINK_TIMEOUT (22*HZ/10) #define CAS_LINK_FAST_TIMEOUT (1) /* timeout values for state changing. these specify the number * of 10us delays to be used before giving up. */ #define STOP_TRIES_PHY 1000 #define STOP_TRIES 5000 /* specify a minimum frame size to deal with some fifo issues * max mtu == 2 * page size - ethernet header - 64 - swivel = * 2 * page_size - 0x50 */ #define CAS_MIN_FRAME 97 #define CAS_1000MB_MIN_FRAME 255 #define CAS_MIN_MTU 60 #define CAS_MAX_MTU min(((cp->page_size << 1) - 0x50), 9000) #if 1 /* * Eliminate these and use separate atomic counters for each, to * avoid a race condition. */ #else #define CAS_RESET_MTU 1 #define CAS_RESET_ALL 2 #define CAS_RESET_SPARE 3 #endif static char version[] __devinitdata = DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n"; static int cassini_debug = -1; /* -1 == use CAS_DEF_MSG_ENABLE as value */ static int link_mode; MODULE_AUTHOR("Adrian Sun (asun@darksunrising.com)"); MODULE_DESCRIPTION("Sun Cassini(+) ethernet driver"); MODULE_LICENSE("GPL"); MODULE_FIRMWARE("sun/cassini.bin"); module_param(cassini_debug, int, 0); MODULE_PARM_DESC(cassini_debug, "Cassini bitmapped debugging message enable value"); module_param(link_mode, int, 0); MODULE_PARM_DESC(link_mode, "default link mode"); /* * Work around for a PCS bug in which the link goes down due to the chip * being confused and never showing a link status of "up." */ #define DEFAULT_LINKDOWN_TIMEOUT 5 /* * Value in seconds, for user input. */ static int linkdown_timeout = DEFAULT_LINKDOWN_TIMEOUT; module_param(linkdown_timeout, int, 0); MODULE_PARM_DESC(linkdown_timeout, "min reset interval in sec. for PCS linkdown issue; disabled if not positive"); /* * value in 'ticks' (units used by jiffies). Set when we init the * module because 'HZ' in actually a function call on some flavors of * Linux. This will default to DEFAULT_LINKDOWN_TIMEOUT * HZ. */ static int link_transition_timeout; static u16 link_modes[] __devinitdata = { BMCR_ANENABLE, /* 0 : autoneg */ 0, /* 1 : 10bt half duplex */ BMCR_SPEED100, /* 2 : 100bt half duplex */ BMCR_FULLDPLX, /* 3 : 10bt full duplex */ BMCR_SPEED100|BMCR_FULLDPLX, /* 4 : 100bt full duplex */ CAS_BMCR_SPEED1000|BMCR_FULLDPLX /* 5 : 1000bt full duplex */ }; static DEFINE_PCI_DEVICE_TABLE(cas_pci_tbl) = { { PCI_VENDOR_ID_SUN, PCI_DEVICE_ID_SUN_CASSINI, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_SATURN, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL }, { 0, } }; MODULE_DEVICE_TABLE(pci, cas_pci_tbl); static void cas_set_link_modes(struct cas *cp); static inline void cas_lock_tx(struct cas *cp) { int i; for (i = 0; i < N_TX_RINGS; i++) spin_lock(&cp->tx_lock[i]); } static inline void cas_lock_all(struct cas *cp) { spin_lock_irq(&cp->lock); cas_lock_tx(cp); } /* WTZ: QA was finding deadlock problems with the previous * versions after long test runs with multiple cards per machine. * See if replacing cas_lock_all with safer versions helps. The * symptoms QA is reporting match those we'd expect if interrupts * aren't being properly restored, and we fixed a previous deadlock * with similar symptoms by using save/restore versions in other * places. */ #define cas_lock_all_save(cp, flags) \ do { \ struct cas *xxxcp = (cp); \ spin_lock_irqsave(&xxxcp->lock, flags); \ cas_lock_tx(xxxcp); \ } while (0) static inline void cas_unlock_tx(struct cas *cp) { int i; for (i = N_TX_RINGS; i > 0; i--) spin_unlock(&cp->tx_lock[i - 1]); } static inline void cas_unlock_all(struct cas *cp) { cas_unlock_tx(cp); spin_unlock_irq(&cp->lock); } #define cas_unlock_all_restore(cp, flags) \ do { \ struct cas *xxxcp = (cp); \ cas_unlock_tx(xxxcp); \ spin_unlock_irqrestore(&xxxcp->lock, flags); \ } while (0) static void cas_disable_irq(struct cas *cp, const int ring) { /* Make sure we won't get any more interrupts */ if (ring == 0) { writel(0xFFFFFFFF, cp->regs + REG_INTR_MASK); return; } /* disable completion interrupts and selectively mask */ if (cp->cas_flags & CAS_FLAG_REG_PLUS) { switch (ring) { #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) #ifdef USE_PCI_INTB case 1: #endif #ifdef USE_PCI_INTC case 2: #endif #ifdef USE_PCI_INTD case 3: #endif writel(INTRN_MASK_CLEAR_ALL | INTRN_MASK_RX_EN, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; #endif default: writel(INTRN_MASK_CLEAR_ALL, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; } } } static inline void cas_mask_intr(struct cas *cp) { int i; for (i = 0; i < N_RX_COMP_RINGS; i++) cas_disable_irq(cp, i); } static void cas_enable_irq(struct cas *cp, const int ring) { if (ring == 0) { /* all but TX_DONE */ writel(INTR_TX_DONE, cp->regs + REG_INTR_MASK); return; } if (cp->cas_flags & CAS_FLAG_REG_PLUS) { switch (ring) { #if defined (USE_PCI_INTB) || defined(USE_PCI_INTC) || defined(USE_PCI_INTD) #ifdef USE_PCI_INTB case 1: #endif #ifdef USE_PCI_INTC case 2: #endif #ifdef USE_PCI_INTD case 3: #endif writel(INTRN_MASK_RX_EN, cp->regs + REG_PLUS_INTRN_MASK(ring)); break; #endif default: break; } } } static inline void cas_unmask_intr(struct cas *cp) { int i; for (i = 0; i < N_RX_COMP_RINGS; i++) cas_enable_irq(cp, i); } static inline void cas_entropy_gather(struct cas *cp) { #ifdef USE_ENTROPY_DEV if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) return; batch_entropy_store(readl(cp->regs + REG_ENTROPY_IV), readl(cp->regs + REG_ENTROPY_IV), sizeof(uint64_t)*8); #endif } static inline void cas_entropy_reset(struct cas *cp) { #ifdef USE_ENTROPY_DEV if ((cp->cas_flags & CAS_FLAG_ENTROPY_DEV) == 0) return; writel(BIM_LOCAL_DEV_PAD | BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_EXT, cp->regs + REG_BIM_LOCAL_DEV_EN); writeb(ENTROPY_RESET_STC_MODE, cp->regs + REG_ENTROPY_RESET); writeb(0x55, cp->regs + REG_ENTROPY_RAND_REG); /* if we read back 0x0, we don't have an entropy device */ if (readb(cp->regs + REG_ENTROPY_RAND_REG) == 0) cp->cas_flags &= ~CAS_FLAG_ENTROPY_DEV; #endif } /* access to the phy. the following assumes that we've initialized the MIF to * be in frame rather than bit-bang mode */ static u16 cas_phy_read(struct cas *cp, int reg) { u32 cmd; int limit = STOP_TRIES_PHY; cmd = MIF_FRAME_ST | MIF_FRAME_OP_READ; cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); cmd |= MIF_FRAME_TURN_AROUND_MSB; writel(cmd, cp->regs + REG_MIF_FRAME); /* poll for completion */ while (limit-- > 0) { udelay(10); cmd = readl(cp->regs + REG_MIF_FRAME); if (cmd & MIF_FRAME_TURN_AROUND_LSB) return cmd & MIF_FRAME_DATA_MASK; } return 0xFFFF; /* -1 */ } static int cas_phy_write(struct cas *cp, int reg, u16 val) { int limit = STOP_TRIES_PHY; u32 cmd; cmd = MIF_FRAME_ST | MIF_FRAME_OP_WRITE; cmd |= CAS_BASE(MIF_FRAME_PHY_ADDR, cp->phy_addr); cmd |= CAS_BASE(MIF_FRAME_REG_ADDR, reg); cmd |= MIF_FRAME_TURN_AROUND_MSB; cmd |= val & MIF_FRAME_DATA_MASK; writel(cmd, cp->regs + REG_MIF_FRAME); /* poll for completion */ while (limit-- > 0) { udelay(10); cmd = readl(cp->regs + REG_MIF_FRAME); if (cmd & MIF_FRAME_TURN_AROUND_LSB) return 0; } return -1; } static void cas_phy_powerup(struct cas *cp) { u16 ctl = cas_phy_read(cp, MII_BMCR); if ((ctl & BMCR_PDOWN) == 0) return; ctl &= ~BMCR_PDOWN; cas_phy_write(cp, MII_BMCR, ctl); } static void cas_phy_powerdown(struct cas *cp) { u16 ctl = cas_phy_read(cp, MII_BMCR); if (ctl & BMCR_PDOWN) return; ctl |= BMCR_PDOWN; cas_phy_write(cp, MII_BMCR, ctl); } /* cp->lock held. note: the last put_page will free the buffer */ static int cas_page_free(struct cas *cp, cas_page_t *page) { pci_unmap_page(cp->pdev, page->dma_addr, cp->page_size, PCI_DMA_FROMDEVICE); __free_pages(page->buffer, cp->page_order); kfree(page); return 0; } #ifdef RX_COUNT_BUFFERS #define RX_USED_ADD(x, y) ((x)->used += (y)) #define RX_USED_SET(x, y) ((x)->used = (y)) #else #define RX_USED_ADD(x, y) #define RX_USED_SET(x, y) #endif /* local page allocation routines for the receive buffers. jumbo pages * require at least 8K contiguous and 8K aligned buffers. */ static cas_page_t *cas_page_alloc(struct cas *cp, const gfp_t flags) { cas_page_t *page; page = kmalloc(sizeof(cas_page_t), flags); if (!page) return NULL; INIT_LIST_HEAD(&page->list); RX_USED_SET(page, 0); page->buffer = alloc_pages(flags, cp->page_order); if (!page->buffer) goto page_err; page->dma_addr = pci_map_page(cp->pdev, page->buffer, 0, cp->page_size, PCI_DMA_FROMDEVICE); return page; page_err: kfree(page); return NULL; } /* initialize spare pool of rx buffers, but allocate during the open */ static void cas_spare_init(struct cas *cp) { spin_lock(&cp->rx_inuse_lock); INIT_LIST_HEAD(&cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); spin_lock(&cp->rx_spare_lock); INIT_LIST_HEAD(&cp->rx_spare_list); cp->rx_spares_needed = RX_SPARE_COUNT; spin_unlock(&cp->rx_spare_lock); } /* used on close. free all the spare buffers. */ static void cas_spare_free(struct cas *cp) { struct list_head list, *elem, *tmp; /* free spare buffers */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_spare_lock); list_splice_init(&cp->rx_spare_list, &list); spin_unlock(&cp->rx_spare_lock); list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); } INIT_LIST_HEAD(&list); #if 1 /* * Looks like Adrian had protected this with a different * lock than used everywhere else to manipulate this list. */ spin_lock(&cp->rx_inuse_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_inuse_lock); #else spin_lock(&cp->rx_spare_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_spare_lock); #endif list_for_each_safe(elem, tmp, &list) { cas_page_free(cp, list_entry(elem, cas_page_t, list)); } } /* replenish spares if needed */ static void cas_spare_recover(struct cas *cp, const gfp_t flags) { struct list_head list, *elem, *tmp; int needed, i; /* check inuse list. if we don't need any more free buffers, * just free it */ /* make a local copy of the list */ INIT_LIST_HEAD(&list); spin_lock(&cp->rx_inuse_lock); list_splice_init(&cp->rx_inuse_list, &list); spin_unlock(&cp->rx_inuse_lock); list_for_each_safe(elem, tmp, &list) { cas_page_t *page = list_entry(elem, cas_page_t, list); /* * With the lockless pagecache, cassini buffering scheme gets * slightly less accurate: we might find that a page has an * elevated reference count here, due to a speculative ref, * and skip it as in-use. Ideally we would be able to reclaim * it. However this would be such a rare case, it doesn't * matter too much as we should pick it up the next time round. * * Importantly, if we find that the page has a refcount of 1 * here (our refcount), then we know it is definitely not inuse * so we can reuse it. */ if (page_count(page->buffer) > 1) continue; list_del(elem); spin_lock(&cp->rx_spare_lock); if (cp->rx_spares_needed > 0) { list_add(elem, &cp->rx_spare_list); cp->rx_spares_needed--; spin_unlock(&cp->rx_spare_lock); } else { spin_unlock(&cp->rx_spare_lock); cas_page_free(cp, page); } } /* put any inuse buffers back on the list */ if (!list_empty(&list)) { spin_lock(&cp->rx_inuse_lock); list_splice(&list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); } spin_lock(&cp->rx_spare_lock); needed = cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); if (!needed) return; /* we still need spares, so try to allocate some */ INIT_LIST_HEAD(&list); i = 0; while (i < needed) { cas_page_t *spare = cas_page_alloc(cp, flags); if (!spare) break; list_add(&spare->list, &list); i++; } spin_lock(&cp->rx_spare_lock); list_splice(&list, &cp->rx_spare_list); cp->rx_spares_needed -= i; spin_unlock(&cp->rx_spare_lock); } /* pull a page from the list. */ static cas_page_t *cas_page_dequeue(struct cas *cp) { struct list_head *entry; int recover; spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { /* try to do a quick recovery */ spin_unlock(&cp->rx_spare_lock); cas_spare_recover(cp, GFP_ATOMIC); spin_lock(&cp->rx_spare_lock); if (list_empty(&cp->rx_spare_list)) { netif_err(cp, rx_err, cp->dev, "no spare buffers available\n"); spin_unlock(&cp->rx_spare_lock); return NULL; } } entry = cp->rx_spare_list.next; list_del(entry); recover = ++cp->rx_spares_needed; spin_unlock(&cp->rx_spare_lock); /* trigger the timer to do the recovery */ if ((recover & (RX_SPARE_RECOVER_VAL - 1)) == 0) { #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_spare); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_SPARE); schedule_work(&cp->reset_task); #endif } return list_entry(entry, cas_page_t, list); } static void cas_mif_poll(struct cas *cp, const int enable) { u32 cfg; cfg = readl(cp->regs + REG_MIF_CFG); cfg &= (MIF_CFG_MDIO_0 | MIF_CFG_MDIO_1); if (cp->phy_type & CAS_PHY_MII_MDIO1) cfg |= MIF_CFG_PHY_SELECT; /* poll and interrupt on link status change. */ if (enable) { cfg |= MIF_CFG_POLL_EN; cfg |= CAS_BASE(MIF_CFG_POLL_REG, MII_BMSR); cfg |= CAS_BASE(MIF_CFG_POLL_PHY, cp->phy_addr); } writel((enable) ? ~(BMSR_LSTATUS | BMSR_ANEGCOMPLETE) : 0xFFFF, cp->regs + REG_MIF_MASK); writel(cfg, cp->regs + REG_MIF_CFG); } /* Must be invoked under cp->lock */ static void cas_begin_auto_negotiation(struct cas *cp, struct ethtool_cmd *ep) { u16 ctl; #if 1 int lcntl; int changed = 0; int oldstate = cp->lstate; int link_was_not_down = !(oldstate == link_down); #endif /* Setup link parameters */ if (!ep) goto start_aneg; lcntl = cp->link_cntl; if (ep->autoneg == AUTONEG_ENABLE) cp->link_cntl = BMCR_ANENABLE; else { u32 speed = ethtool_cmd_speed(ep); cp->link_cntl = 0; if (speed == SPEED_100) cp->link_cntl |= BMCR_SPEED100; else if (speed == SPEED_1000) cp->link_cntl |= CAS_BMCR_SPEED1000; if (ep->duplex == DUPLEX_FULL) cp->link_cntl |= BMCR_FULLDPLX; } #if 1 changed = (lcntl != cp->link_cntl); #endif start_aneg: if (cp->lstate == link_up) { netdev_info(cp->dev, "PCS link down\n"); } else { if (changed) { netdev_info(cp->dev, "link configuration changed\n"); } } cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; if (!cp->hw_running) return; #if 1 /* * WTZ: If the old state was link_up, we turn off the carrier * to replicate everything we do elsewhere on a link-down * event when we were already in a link-up state.. */ if (oldstate == link_up) netif_carrier_off(cp->dev); if (changed && link_was_not_down) { /* * WTZ: This branch will simply schedule a full reset after * we explicitly changed link modes in an ioctl. See if this * fixes the link-problems we were having for forced mode. */ atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); return; } #endif if (cp->phy_type & CAS_PHY_SERDES) { u32 val = readl(cp->regs + REG_PCS_MII_CTRL); if (cp->link_cntl & BMCR_ANENABLE) { val |= (PCS_MII_RESTART_AUTONEG | PCS_MII_AUTONEG_EN); cp->lstate = link_aneg; } else { if (cp->link_cntl & BMCR_FULLDPLX) val |= PCS_MII_CTRL_DUPLEX; val &= ~PCS_MII_AUTONEG_EN; cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; writel(val, cp->regs + REG_PCS_MII_CTRL); } else { cas_mif_poll(cp, 0); ctl = cas_phy_read(cp, MII_BMCR); ctl &= ~(BMCR_FULLDPLX | BMCR_SPEED100 | CAS_BMCR_SPEED1000 | BMCR_ANENABLE); ctl |= cp->link_cntl; if (ctl & BMCR_ANENABLE) { ctl |= BMCR_ANRESTART; cp->lstate = link_aneg; } else { cp->lstate = link_force_ok; } cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_phy_write(cp, MII_BMCR, ctl); cas_mif_poll(cp, 1); } cp->timer_ticks = 0; mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); } /* Must be invoked under cp->lock. */ static int cas_reset_mii_phy(struct cas *cp) { int limit = STOP_TRIES_PHY; u16 val; cas_phy_write(cp, MII_BMCR, BMCR_RESET); udelay(100); while (--limit) { val = cas_phy_read(cp, MII_BMCR); if ((val & BMCR_RESET) == 0) break; udelay(10); } return limit <= 0; } static int cas_saturn_firmware_init(struct cas *cp) { const struct firmware *fw; const char fw_name[] = "sun/cassini.bin"; int err; if (PHY_NS_DP83065 != cp->phy_id) return 0; err = request_firmware(&fw, fw_name, &cp->pdev->dev); if (err) { pr_err("Failed to load firmware \"%s\"\n", fw_name); return err; } if (fw->size < 2) { pr_err("bogus length %zu in \"%s\"\n", fw->size, fw_name); err = -EINVAL; goto out; } cp->fw_load_addr= fw->data[1] << 8 | fw->data[0]; cp->fw_size = fw->size - 2; cp->fw_data = vmalloc(cp->fw_size); if (!cp->fw_data) { err = -ENOMEM; goto out; } memcpy(cp->fw_data, &fw->data[2], cp->fw_size); out: release_firmware(fw); return err; } static void cas_saturn_firmware_load(struct cas *cp) { int i; cas_phy_powerdown(cp); /* expanded memory access mode */ cas_phy_write(cp, DP83065_MII_MEM, 0x0); /* pointer configuration for new firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff9); cas_phy_write(cp, DP83065_MII_REGD, 0xbd); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffa); cas_phy_write(cp, DP83065_MII_REGD, 0x82); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffb); cas_phy_write(cp, DP83065_MII_REGD, 0x0); cas_phy_write(cp, DP83065_MII_REGE, 0x8ffc); cas_phy_write(cp, DP83065_MII_REGD, 0x39); /* download new firmware */ cas_phy_write(cp, DP83065_MII_MEM, 0x1); cas_phy_write(cp, DP83065_MII_REGE, cp->fw_load_addr); for (i = 0; i < cp->fw_size; i++) cas_phy_write(cp, DP83065_MII_REGD, cp->fw_data[i]); /* enable firmware */ cas_phy_write(cp, DP83065_MII_REGE, 0x8ff8); cas_phy_write(cp, DP83065_MII_REGD, 0x1); } /* phy initialization */ static void cas_phy_init(struct cas *cp) { u16 val; /* if we're in MII/GMII mode, set up phy */ if (CAS_PHY_MII(cp->phy_type)) { writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); cas_mif_poll(cp, 0); cas_reset_mii_phy(cp); /* take out of isolate mode */ if (PHY_LUCENT_B0 == cp->phy_id) { /* workaround link up/down issue with lucent */ cas_phy_write(cp, LUCENT_MII_REG, 0x8000); cas_phy_write(cp, MII_BMCR, 0x00f1); cas_phy_write(cp, LUCENT_MII_REG, 0x0); } else if (PHY_BROADCOM_B0 == (cp->phy_id & 0xFFFFFFFC)) { /* workarounds for broadcom phy */ cas_phy_write(cp, BROADCOM_MII_REG8, 0x0C20); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0012); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1804); cas_phy_write(cp, BROADCOM_MII_REG7, 0x0013); cas_phy_write(cp, BROADCOM_MII_REG5, 0x1204); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0132); cas_phy_write(cp, BROADCOM_MII_REG7, 0x8006); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0232); cas_phy_write(cp, BROADCOM_MII_REG7, 0x201F); cas_phy_write(cp, BROADCOM_MII_REG5, 0x0A20); } else if (PHY_BROADCOM_5411 == cp->phy_id) { val = cas_phy_read(cp, BROADCOM_MII_REG4); val = cas_phy_read(cp, BROADCOM_MII_REG4); if (val & 0x0080) { /* link workaround */ cas_phy_write(cp, BROADCOM_MII_REG4, val & ~0x0080); } } else if (cp->cas_flags & CAS_FLAG_SATURN) { writel((cp->phy_type & CAS_PHY_MII_MDIO0) ? SATURN_PCFG_FSI : 0x0, cp->regs + REG_SATURN_PCFG); /* load firmware to address 10Mbps auto-negotiation * issue. NOTE: this will need to be changed if the * default firmware gets fixed. */ if (PHY_NS_DP83065 == cp->phy_id) { cas_saturn_firmware_load(cp); } cas_phy_powerup(cp); } /* advertise capabilities */ val = cas_phy_read(cp, MII_BMCR); val &= ~BMCR_ANENABLE; cas_phy_write(cp, MII_BMCR, val); udelay(10); cas_phy_write(cp, MII_ADVERTISE, cas_phy_read(cp, MII_ADVERTISE) | (ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL | CAS_ADVERTISE_PAUSE | CAS_ADVERTISE_ASYM_PAUSE)); if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { /* make sure that we don't advertise half * duplex to avoid a chip issue */ val = cas_phy_read(cp, CAS_MII_1000_CTRL); val &= ~CAS_ADVERTISE_1000HALF; val |= CAS_ADVERTISE_1000FULL; cas_phy_write(cp, CAS_MII_1000_CTRL, val); } } else { /* reset pcs for serdes */ u32 val; int limit; writel(PCS_DATAPATH_MODE_SERDES, cp->regs + REG_PCS_DATAPATH_MODE); /* enable serdes pins on saturn */ if (cp->cas_flags & CAS_FLAG_SATURN) writel(0, cp->regs + REG_SATURN_PCFG); /* Reset PCS unit. */ val = readl(cp->regs + REG_PCS_MII_CTRL); val |= PCS_MII_RESET; writel(val, cp->regs + REG_PCS_MII_CTRL); limit = STOP_TRIES; while (--limit > 0) { udelay(10); if ((readl(cp->regs + REG_PCS_MII_CTRL) & PCS_MII_RESET) == 0) break; } if (limit <= 0) netdev_warn(cp->dev, "PCS reset bit would not clear [%08x]\n", readl(cp->regs + REG_PCS_STATE_MACHINE)); /* Make sure PCS is disabled while changing advertisement * configuration. */ writel(0x0, cp->regs + REG_PCS_CFG); /* Advertise all capabilities except half-duplex. */ val = readl(cp->regs + REG_PCS_MII_ADVERT); val &= ~PCS_MII_ADVERT_HD; val |= (PCS_MII_ADVERT_FD | PCS_MII_ADVERT_SYM_PAUSE | PCS_MII_ADVERT_ASYM_PAUSE); writel(val, cp->regs + REG_PCS_MII_ADVERT); /* enable PCS */ writel(PCS_CFG_EN, cp->regs + REG_PCS_CFG); /* pcs workaround: enable sync detect */ writel(PCS_SERDES_CTRL_SYNCD_EN, cp->regs + REG_PCS_SERDES_CTRL); } } static int cas_pcs_link_check(struct cas *cp) { u32 stat, state_machine; int retval = 0; /* The link status bit latches on zero, so you must * read it twice in such a case to see a transition * to the link being up. */ stat = readl(cp->regs + REG_PCS_MII_STATUS); if ((stat & PCS_MII_STATUS_LINK_STATUS) == 0) stat = readl(cp->regs + REG_PCS_MII_STATUS); /* The remote-fault indication is only valid * when autoneg has completed. */ if ((stat & (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) == (PCS_MII_STATUS_AUTONEG_COMP | PCS_MII_STATUS_REMOTE_FAULT)) netif_info(cp, link, cp->dev, "PCS RemoteFault\n"); /* work around link detection issue by querying the PCS state * machine directly. */ state_machine = readl(cp->regs + REG_PCS_STATE_MACHINE); if ((state_machine & PCS_SM_LINK_STATE_MASK) != SM_LINK_STATE_UP) { stat &= ~PCS_MII_STATUS_LINK_STATUS; } else if (state_machine & PCS_SM_WORD_SYNC_STATE_MASK) { stat |= PCS_MII_STATUS_LINK_STATUS; } if (stat & PCS_MII_STATUS_LINK_STATUS) { if (cp->lstate != link_up) { if (cp->opened) { cp->lstate = link_up; cp->link_transition = LINK_TRANSITION_LINK_UP; cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } } else if (cp->lstate == link_up) { cp->lstate = link_down; if (link_transition_timeout != 0 && cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && !cp->link_transition_jiffies_valid) { /* * force a reset, as a workaround for the * link-failure problem. May want to move this to a * point a bit earlier in the sequence. If we had * generated a reset a short time ago, we'll wait for * the link timer to check the status until a * timer expires (link_transistion_jiffies_valid is * true when the timer is running.) Instead of using * a system timer, we just do a check whenever the * link timer is running - this clears the flag after * a suitable delay. */ retval = 1; cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cp->link_transition_jiffies = jiffies; cp->link_transition_jiffies_valid = 1; } else { cp->link_transition = LINK_TRANSITION_ON_FAILURE; } netif_carrier_off(cp->dev); if (cp->opened) netif_info(cp, link, cp->dev, "PCS link down\n"); /* Cassini only: if you force a mode, there can be * sync problems on link down. to fix that, the following * things need to be checked: * 1) read serialink state register * 2) read pcs status register to verify link down. * 3) if link down and serial link == 0x03, then you need * to global reset the chip. */ if ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0) { /* should check to see if we're in a forced mode */ stat = readl(cp->regs + REG_PCS_SERDES_STATE); if (stat == 0x03) return 1; } } else if (cp->lstate == link_down) { if (link_transition_timeout != 0 && cp->link_transition != LINK_TRANSITION_REQUESTED_RESET && !cp->link_transition_jiffies_valid) { /* force a reset, as a workaround for the * link-failure problem. May want to move * this to a point a bit earlier in the * sequence. */ retval = 1; cp->link_transition = LINK_TRANSITION_REQUESTED_RESET; cp->link_transition_jiffies = jiffies; cp->link_transition_jiffies_valid = 1; } else { cp->link_transition = LINK_TRANSITION_STILL_FAILED; } } return retval; } static int cas_pcs_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_PCS_INTR_STATUS); if ((stat & PCS_INTR_STATUS_LINK_CHANGE) == 0) return 0; return cas_pcs_link_check(cp); } static int cas_txmac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 txmac_stat = readl(cp->regs + REG_MAC_TX_STATUS); if (!txmac_stat) return 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "txmac interrupt, txmac_stat: 0x%x\n", txmac_stat); /* Defer timer expiration is quite normal, * don't even log the event. */ if ((txmac_stat & MAC_TX_DEFER_TIMER) && !(txmac_stat & ~MAC_TX_DEFER_TIMER)) return 0; spin_lock(&cp->stat_lock[0]); if (txmac_stat & MAC_TX_UNDERRUN) { netdev_err(dev, "TX MAC xmit underrun\n"); cp->net_stats[0].tx_fifo_errors++; } if (txmac_stat & MAC_TX_MAX_PACKET_ERR) { netdev_err(dev, "TX MAC max packet size error\n"); cp->net_stats[0].tx_errors++; } /* The rest are all cases of one of the 16-bit TX * counters expiring. */ if (txmac_stat & MAC_TX_COLL_NORMAL) cp->net_stats[0].collisions += 0x10000; if (txmac_stat & MAC_TX_COLL_EXCESS) { cp->net_stats[0].tx_aborted_errors += 0x10000; cp->net_stats[0].collisions += 0x10000; } if (txmac_stat & MAC_TX_COLL_LATE) { cp->net_stats[0].tx_aborted_errors += 0x10000; cp->net_stats[0].collisions += 0x10000; } spin_unlock(&cp->stat_lock[0]); /* We do not keep track of MAC_TX_COLL_FIRST and * MAC_TX_PEAK_ATTEMPTS events. */ return 0; } static void cas_load_firmware(struct cas *cp, cas_hp_inst_t *firmware) { cas_hp_inst_t *inst; u32 val; int i; i = 0; while ((inst = firmware) && inst->note) { writel(i, cp->regs + REG_HP_INSTR_RAM_ADDR); val = CAS_BASE(HP_INSTR_RAM_HI_VAL, inst->val); val |= CAS_BASE(HP_INSTR_RAM_HI_MASK, inst->mask); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_HI); val = CAS_BASE(HP_INSTR_RAM_MID_OUTARG, inst->outarg >> 10); val |= CAS_BASE(HP_INSTR_RAM_MID_OUTOP, inst->outop); val |= CAS_BASE(HP_INSTR_RAM_MID_FNEXT, inst->fnext); val |= CAS_BASE(HP_INSTR_RAM_MID_FOFF, inst->foff); val |= CAS_BASE(HP_INSTR_RAM_MID_SNEXT, inst->snext); val |= CAS_BASE(HP_INSTR_RAM_MID_SOFF, inst->soff); val |= CAS_BASE(HP_INSTR_RAM_MID_OP, inst->op); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_MID); val = CAS_BASE(HP_INSTR_RAM_LOW_OUTMASK, inst->outmask); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTSHIFT, inst->outshift); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTEN, inst->outenab); val |= CAS_BASE(HP_INSTR_RAM_LOW_OUTARG, inst->outarg); writel(val, cp->regs + REG_HP_INSTR_RAM_DATA_LOW); ++firmware; ++i; } } static void cas_init_rx_dma(struct cas *cp) { u64 desc_dma = cp->block_dvma; u32 val; int i, size; /* rx free descriptors */ val = CAS_BASE(RX_CFG_SWIVEL, RX_SWIVEL_OFF_VAL); val |= CAS_BASE(RX_CFG_DESC_RING, RX_DESC_RINGN_INDEX(0)); val |= CAS_BASE(RX_CFG_COMP_RING, RX_COMP_RINGN_INDEX(0)); if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) /* do desc 2 */ val |= CAS_BASE(RX_CFG_DESC_RING1, RX_DESC_RINGN_INDEX(1)); writel(val, cp->regs + REG_RX_CFG); val = (unsigned long) cp->init_rxds[0] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_RX_DB_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_DB_LOW); writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { /* rx desc 2 is for IPSEC packets. however, * we don't it that for that purpose. */ val = (unsigned long) cp->init_rxds[1] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_DB1_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_PLUS_RX_DB1_LOW); writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + REG_PLUS_RX_KICK1); } /* rx completion registers */ val = (unsigned long) cp->init_rxcs[0] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_RX_CB_HI); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_RX_CB_LOW); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { /* rx comp 2-4 */ for (i = 1; i < MAX_RX_COMP_RINGS; i++) { val = (unsigned long) cp->init_rxcs[i] - (unsigned long) cp->init_block; writel((desc_dma + val) >> 32, cp->regs + REG_PLUS_RX_CBN_HI(i)); writel((desc_dma + val) & 0xffffffff, cp->regs + REG_PLUS_RX_CBN_LOW(i)); } } /* read selective clear regs to prevent spurious interrupts * on reset because complete == kick. * selective clear set up to prevent interrupts on resets */ readl(cp->regs + REG_INTR_STATUS_ALIAS); writel(INTR_RX_DONE | INTR_RX_BUF_UNAVAIL, cp->regs + REG_ALIAS_CLEAR); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { for (i = 1; i < N_RX_COMP_RINGS; i++) readl(cp->regs + REG_PLUS_INTRN_STATUS_ALIAS(i)); /* 2 is different from 3 and 4 */ if (N_RX_COMP_RINGS > 1) writel(INTR_RX_DONE_ALT | INTR_RX_BUF_UNAVAIL_1, cp->regs + REG_PLUS_ALIASN_CLEAR(1)); for (i = 2; i < N_RX_COMP_RINGS; i++) writel(INTR_RX_DONE_ALT, cp->regs + REG_PLUS_ALIASN_CLEAR(i)); } /* set up pause thresholds */ val = CAS_BASE(RX_PAUSE_THRESH_OFF, cp->rx_pause_off / RX_PAUSE_THRESH_QUANTUM); val |= CAS_BASE(RX_PAUSE_THRESH_ON, cp->rx_pause_on / RX_PAUSE_THRESH_QUANTUM); writel(val, cp->regs + REG_RX_PAUSE_THRESH); /* zero out dma reassembly buffers */ for (i = 0; i < 64; i++) { writel(i, cp->regs + REG_RX_TABLE_ADDR); writel(0x0, cp->regs + REG_RX_TABLE_DATA_LOW); writel(0x0, cp->regs + REG_RX_TABLE_DATA_MID); writel(0x0, cp->regs + REG_RX_TABLE_DATA_HI); } /* make sure address register is 0 for normal operation */ writel(0x0, cp->regs + REG_RX_CTRL_FIFO_ADDR); writel(0x0, cp->regs + REG_RX_IPP_FIFO_ADDR); /* interrupt mitigation */ #ifdef USE_RX_BLANK val = CAS_BASE(RX_BLANK_INTR_TIME, RX_BLANK_INTR_TIME_VAL); val |= CAS_BASE(RX_BLANK_INTR_PKT, RX_BLANK_INTR_PKT_VAL); writel(val, cp->regs + REG_RX_BLANK); #else writel(0x0, cp->regs + REG_RX_BLANK); #endif /* interrupt generation as a function of low water marks for * free desc and completion entries. these are used to trigger * housekeeping for rx descs. we don't use the free interrupt * as it's not very useful */ /* val = CAS_BASE(RX_AE_THRESH_FREE, RX_AE_FREEN_VAL(0)); */ val = CAS_BASE(RX_AE_THRESH_COMP, RX_AE_COMP_VAL); writel(val, cp->regs + REG_RX_AE_THRESH); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { val = CAS_BASE(RX_AE1_THRESH_FREE, RX_AE_FREEN_VAL(1)); writel(val, cp->regs + REG_PLUS_RX_AE1_THRESH); } /* Random early detect registers. useful for congestion avoidance. * this should be tunable. */ writel(0x0, cp->regs + REG_RX_RED); /* receive page sizes. default == 2K (0x800) */ val = 0; if (cp->page_size == 0x1000) val = 0x1; else if (cp->page_size == 0x2000) val = 0x2; else if (cp->page_size == 0x4000) val = 0x3; /* round mtu + offset. constrain to page size. */ size = cp->dev->mtu + 64; if (size > cp->page_size) size = cp->page_size; if (size <= 0x400) i = 0x0; else if (size <= 0x800) i = 0x1; else if (size <= 0x1000) i = 0x2; else i = 0x3; cp->mtu_stride = 1 << (i + 10); val = CAS_BASE(RX_PAGE_SIZE, val); val |= CAS_BASE(RX_PAGE_SIZE_MTU_STRIDE, i); val |= CAS_BASE(RX_PAGE_SIZE_MTU_COUNT, cp->page_size >> (i + 10)); val |= CAS_BASE(RX_PAGE_SIZE_MTU_OFF, 0x1); writel(val, cp->regs + REG_RX_PAGE_SIZE); /* enable the header parser if desired */ if (CAS_HP_FIRMWARE == cas_prog_null) return; val = CAS_BASE(HP_CFG_NUM_CPU, CAS_NCPUS > 63 ? 0 : CAS_NCPUS); val |= HP_CFG_PARSE_EN | HP_CFG_SYN_INC_MASK; val |= CAS_BASE(HP_CFG_TCP_THRESH, HP_TCP_THRESH_VAL); writel(val, cp->regs + REG_HP_CFG); } static inline void cas_rxc_init(struct cas_rx_comp *rxc) { memset(rxc, 0, sizeof(*rxc)); rxc->word4 = cpu_to_le64(RX_COMP4_ZERO); } /* NOTE: we use the ENC RX DESC ring for spares. the rx_page[0,1] * flipping is protected by the fact that the chip will not * hand back the same page index while it's being processed. */ static inline cas_page_t *cas_page_spare(struct cas *cp, const int index) { cas_page_t *page = cp->rx_pages[1][index]; cas_page_t *new; if (page_count(page->buffer) == 1) return page; new = cas_page_dequeue(cp); if (new) { spin_lock(&cp->rx_inuse_lock); list_add(&page->list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); } return new; } /* this needs to be changed if we actually use the ENC RX DESC ring */ static cas_page_t *cas_page_swap(struct cas *cp, const int ring, const int index) { cas_page_t **page0 = cp->rx_pages[0]; cas_page_t **page1 = cp->rx_pages[1]; /* swap if buffer is in use */ if (page_count(page0[index]->buffer) > 1) { cas_page_t *new = cas_page_spare(cp, index); if (new) { page1[index] = page0[index]; page0[index] = new; } } RX_USED_SET(page0[index], 0); return page0[index]; } static void cas_clean_rxds(struct cas *cp) { /* only clean ring 0 as ring 1 is used for spare buffers */ struct cas_rx_desc *rxd = cp->init_rxds[0]; int i, size; /* release all rx flows */ for (i = 0; i < N_RX_FLOWS; i++) { struct sk_buff *skb; while ((skb = __skb_dequeue(&cp->rx_flows[i]))) { cas_skb_release(skb); } } /* initialize descriptors */ size = RX_DESC_RINGN_SIZE(0); for (i = 0; i < size; i++) { cas_page_t *page = cas_page_swap(cp, 0, i); rxd[i].buffer = cpu_to_le64(page->dma_addr); rxd[i].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, i) | CAS_BASE(RX_INDEX_RING, 0)); } cp->rx_old[0] = RX_DESC_RINGN_SIZE(0) - 4; cp->rx_last[0] = 0; cp->cas_flags &= ~CAS_FLAG_RXD_POST(0); } static void cas_clean_rxcs(struct cas *cp) { int i, j; /* take ownership of rx comp descriptors */ memset(cp->rx_cur, 0, sizeof(*cp->rx_cur)*N_RX_COMP_RINGS); memset(cp->rx_new, 0, sizeof(*cp->rx_new)*N_RX_COMP_RINGS); for (i = 0; i < N_RX_COMP_RINGS; i++) { struct cas_rx_comp *rxc = cp->init_rxcs[i]; for (j = 0; j < RX_COMP_RINGN_SIZE(i); j++) { cas_rxc_init(rxc + j); } } } #if 0 /* When we get a RX fifo overflow, the RX unit is probably hung * so we do the following. * * If any part of the reset goes wrong, we return 1 and that causes the * whole chip to be reset. */ static int cas_rxmac_reset(struct cas *cp) { struct net_device *dev = cp->dev; int limit; u32 val; /* First, reset MAC RX. */ writel(cp->mac_rx_cfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX MAC will not disable, resetting whole chip\n"); return 1; } /* Second, disable RX DMA. */ writel(0, cp->regs + REG_RX_CFG); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_RX_CFG) & RX_CFG_DMA_EN)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX DMA will not disable, resetting whole chip\n"); return 1; } mdelay(5); /* Execute RX reset command. */ writel(SW_RESET_RX, cp->regs + REG_SW_RESET); for (limit = 0; limit < STOP_TRIES; limit++) { if (!(readl(cp->regs + REG_SW_RESET) & SW_RESET_RX)) break; udelay(10); } if (limit == STOP_TRIES) { netdev_err(dev, "RX reset command will not execute, resetting whole chip\n"); return 1; } /* reset driver rx state */ cas_clean_rxds(cp); cas_clean_rxcs(cp); /* Now, reprogram the rest of RX unit. */ cas_init_rx_dma(cp); /* re-enable */ val = readl(cp->regs + REG_RX_CFG); writel(val | RX_CFG_DMA_EN, cp->regs + REG_RX_CFG); writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); val = readl(cp->regs + REG_MAC_RX_CFG); writel(val | MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); return 0; } #endif static int cas_rxmac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MAC_RX_STATUS); if (!stat) return 0; netif_dbg(cp, intr, cp->dev, "rxmac interrupt, stat: 0x%x\n", stat); /* these are all rollovers */ spin_lock(&cp->stat_lock[0]); if (stat & MAC_RX_ALIGN_ERR) cp->net_stats[0].rx_frame_errors += 0x10000; if (stat & MAC_RX_CRC_ERR) cp->net_stats[0].rx_crc_errors += 0x10000; if (stat & MAC_RX_LEN_ERR) cp->net_stats[0].rx_length_errors += 0x10000; if (stat & MAC_RX_OVERFLOW) { cp->net_stats[0].rx_over_errors++; cp->net_stats[0].rx_fifo_errors++; } /* We do not track MAC_RX_FRAME_COUNT and MAC_RX_VIOL_ERR * events. */ spin_unlock(&cp->stat_lock[0]); return 0; } static int cas_mac_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MAC_CTRL_STATUS); if (!stat) return 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "mac interrupt, stat: 0x%x\n", stat); /* This interrupt is just for pause frame and pause * tracking. It is useful for diagnostics and debug * but probably by default we will mask these events. */ if (stat & MAC_CTRL_PAUSE_STATE) cp->pause_entered++; if (stat & MAC_CTRL_PAUSE_RECEIVED) cp->pause_last_time_recvd = (stat >> 16); return 0; } /* Must be invoked under cp->lock. */ static inline int cas_mdio_link_not_up(struct cas *cp) { u16 val; switch (cp->lstate) { case link_force_ret: netif_info(cp, link, cp->dev, "Autoneg failed again, keeping forced mode\n"); cas_phy_write(cp, MII_BMCR, cp->link_fcntl); cp->timer_ticks = 5; cp->lstate = link_force_ok; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; break; case link_aneg: val = cas_phy_read(cp, MII_BMCR); /* Try forced modes. we try things in the following order: * 1000 full -> 100 full/half -> 10 half */ val &= ~(BMCR_ANRESTART | BMCR_ANENABLE); val |= BMCR_FULLDPLX; val |= (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? CAS_BMCR_SPEED1000 : BMCR_SPEED100; cas_phy_write(cp, MII_BMCR, val); cp->timer_ticks = 5; cp->lstate = link_force_try; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; break; case link_force_try: /* Downgrade from 1000 to 100 to 10 Mbps if necessary. */ val = cas_phy_read(cp, MII_BMCR); cp->timer_ticks = 5; if (val & CAS_BMCR_SPEED1000) { /* gigabit */ val &= ~CAS_BMCR_SPEED1000; val |= (BMCR_SPEED100 | BMCR_FULLDPLX); cas_phy_write(cp, MII_BMCR, val); break; } if (val & BMCR_SPEED100) { if (val & BMCR_FULLDPLX) /* fd failed */ val &= ~BMCR_FULLDPLX; else { /* 100Mbps failed */ val &= ~BMCR_SPEED100; } cas_phy_write(cp, MII_BMCR, val); break; } default: break; } return 0; } /* must be invoked with cp->lock held */ static int cas_mii_link_check(struct cas *cp, const u16 bmsr) { int restart; if (bmsr & BMSR_LSTATUS) { /* Ok, here we got a link. If we had it due to a forced * fallback, and we were configured for autoneg, we * retry a short autoneg pass. If you know your hub is * broken, use ethtool ;) */ if ((cp->lstate == link_force_try) && (cp->link_cntl & BMCR_ANENABLE)) { cp->lstate = link_force_ret; cp->link_transition = LINK_TRANSITION_LINK_CONFIG; cas_mif_poll(cp, 0); cp->link_fcntl = cas_phy_read(cp, MII_BMCR); cp->timer_ticks = 5; if (cp->opened) netif_info(cp, link, cp->dev, "Got link after fallback, retrying autoneg once...\n"); cas_phy_write(cp, MII_BMCR, cp->link_fcntl | BMCR_ANENABLE | BMCR_ANRESTART); cas_mif_poll(cp, 1); } else if (cp->lstate != link_up) { cp->lstate = link_up; cp->link_transition = LINK_TRANSITION_LINK_UP; if (cp->opened) { cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } return 0; } /* link not up. if the link was previously up, we restart the * whole process */ restart = 0; if (cp->lstate == link_up) { cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; netif_carrier_off(cp->dev); if (cp->opened) netif_info(cp, link, cp->dev, "Link down\n"); restart = 1; } else if (++cp->timer_ticks > 10) cas_mdio_link_not_up(cp); return restart; } static int cas_mif_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_MIF_STATUS); u16 bmsr; /* check for a link change */ if (CAS_VAL(MIF_STATUS_POLL_STATUS, stat) == 0) return 0; bmsr = CAS_VAL(MIF_STATUS_POLL_DATA, stat); return cas_mii_link_check(cp, bmsr); } static int cas_pci_interrupt(struct net_device *dev, struct cas *cp, u32 status) { u32 stat = readl(cp->regs + REG_PCI_ERR_STATUS); if (!stat) return 0; netdev_err(dev, "PCI error [%04x:%04x]", stat, readl(cp->regs + REG_BIM_DIAG)); /* cassini+ has this reserved */ if ((stat & PCI_ERR_BADACK) && ((cp->cas_flags & CAS_FLAG_REG_PLUS) == 0)) pr_cont(" <No ACK64# during ABS64 cycle>"); if (stat & PCI_ERR_DTRTO) pr_cont(" <Delayed transaction timeout>"); if (stat & PCI_ERR_OTHER) pr_cont(" <other>"); if (stat & PCI_ERR_BIM_DMA_WRITE) pr_cont(" <BIM DMA 0 write req>"); if (stat & PCI_ERR_BIM_DMA_READ) pr_cont(" <BIM DMA 0 read req>"); pr_cont("\n"); if (stat & PCI_ERR_OTHER) { u16 cfg; /* Interrogate PCI config space for the * true cause. */ pci_read_config_word(cp->pdev, PCI_STATUS, &cfg); netdev_err(dev, "Read PCI cfg space status [%04x]\n", cfg); if (cfg & PCI_STATUS_PARITY) netdev_err(dev, "PCI parity error detected\n"); if (cfg & PCI_STATUS_SIG_TARGET_ABORT) netdev_err(dev, "PCI target abort\n"); if (cfg & PCI_STATUS_REC_TARGET_ABORT) netdev_err(dev, "PCI master acks target abort\n"); if (cfg & PCI_STATUS_REC_MASTER_ABORT) netdev_err(dev, "PCI master abort\n"); if (cfg & PCI_STATUS_SIG_SYSTEM_ERROR) netdev_err(dev, "PCI system error SERR#\n"); if (cfg & PCI_STATUS_DETECTED_PARITY) netdev_err(dev, "PCI parity error\n"); /* Write the error bits back to clear them. */ cfg &= (PCI_STATUS_PARITY | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_DETECTED_PARITY); pci_write_config_word(cp->pdev, PCI_STATUS, cfg); } /* For all PCI errors, we should reset the chip. */ return 1; } /* All non-normal interrupt conditions get serviced here. * Returns non-zero if we should just exit the interrupt * handler right now (ie. if we reset the card which invalidates * all of the other original irq status bits). */ static int cas_abnormal_irq(struct net_device *dev, struct cas *cp, u32 status) { if (status & INTR_RX_TAG_ERROR) { /* corrupt RX tag framing */ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "corrupt rx tag framing\n"); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_errors++; spin_unlock(&cp->stat_lock[0]); goto do_reset; } if (status & INTR_RX_LEN_MISMATCH) { /* length mismatch. */ netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "length mismatch for rx frame\n"); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_errors++; spin_unlock(&cp->stat_lock[0]); goto do_reset; } if (status & INTR_PCS_STATUS) { if (cas_pcs_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_TX_MAC_STATUS) { if (cas_txmac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_RX_MAC_STATUS) { if (cas_rxmac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_MAC_CTRL_STATUS) { if (cas_mac_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_MIF_STATUS) { if (cas_mif_interrupt(dev, cp, status)) goto do_reset; } if (status & INTR_PCI_ERROR_STATUS) { if (cas_pci_interrupt(dev, cp, status)) goto do_reset; } return 0; do_reset: #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); netdev_err(dev, "reset called in cas_abnormal_irq [0x%x]\n", status); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); netdev_err(dev, "reset called in cas_abnormal_irq\n"); schedule_work(&cp->reset_task); #endif return 1; } /* NOTE: CAS_TABORT returns 1 or 2 so that it can be used when * determining whether to do a netif_stop/wakeup */ #define CAS_TABORT(x) (((x)->cas_flags & CAS_FLAG_TARGET_ABORT) ? 2 : 1) #define CAS_ROUND_PAGE(x) (((x) + PAGE_SIZE - 1) & PAGE_MASK) static inline int cas_calc_tabort(struct cas *cp, const unsigned long addr, const int len) { unsigned long off = addr + len; if (CAS_TABORT(cp) == 1) return 0; if ((CAS_ROUND_PAGE(off) - off) > TX_TARGET_ABORT_LEN) return 0; return TX_TARGET_ABORT_LEN; } static inline void cas_tx_ringN(struct cas *cp, int ring, int limit) { struct cas_tx_desc *txds; struct sk_buff **skbs; struct net_device *dev = cp->dev; int entry, count; spin_lock(&cp->tx_lock[ring]); txds = cp->init_txds[ring]; skbs = cp->tx_skbs[ring]; entry = cp->tx_old[ring]; count = TX_BUFF_COUNT(ring, entry, limit); while (entry != limit) { struct sk_buff *skb = skbs[entry]; dma_addr_t daddr; u32 dlen; int frag; if (!skb) { /* this should never occur */ entry = TX_DESC_NEXT(ring, entry); continue; } /* however, we might get only a partial skb release. */ count -= skb_shinfo(skb)->nr_frags + + cp->tx_tiny_use[ring][entry].nbufs + 1; if (count < 0) break; netif_printk(cp, tx_done, KERN_DEBUG, cp->dev, "tx[%d] done, slot %d\n", ring, entry); skbs[entry] = NULL; cp->tx_tiny_use[ring][entry].nbufs = 0; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { struct cas_tx_desc *txd = txds + entry; daddr = le64_to_cpu(txd->buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd->control)); pci_unmap_page(cp->pdev, daddr, dlen, PCI_DMA_TODEVICE); entry = TX_DESC_NEXT(ring, entry); /* tiny buffer may follow */ if (cp->tx_tiny_use[ring][entry].used) { cp->tx_tiny_use[ring][entry].used = 0; entry = TX_DESC_NEXT(ring, entry); } } spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].tx_packets++; cp->net_stats[ring].tx_bytes += skb->len; spin_unlock(&cp->stat_lock[ring]); dev_kfree_skb_irq(skb); } cp->tx_old[ring] = entry; /* this is wrong for multiple tx rings. the net device needs * multiple queues for this to do the right thing. we wait * for 2*packets to be available when using tiny buffers */ if (netif_queue_stopped(dev) && (TX_BUFFS_AVAIL(cp, ring) > CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1))) netif_wake_queue(dev); spin_unlock(&cp->tx_lock[ring]); } static void cas_tx(struct net_device *dev, struct cas *cp, u32 status) { int limit, ring; #ifdef USE_TX_COMPWB u64 compwb = le64_to_cpu(cp->init_block->tx_compwb); #endif netif_printk(cp, intr, KERN_DEBUG, cp->dev, "tx interrupt, status: 0x%x, %llx\n", status, (unsigned long long)compwb); /* process all the rings */ for (ring = 0; ring < N_TX_RINGS; ring++) { #ifdef USE_TX_COMPWB /* use the completion writeback registers */ limit = (CAS_VAL(TX_COMPWB_MSB, compwb) << 8) | CAS_VAL(TX_COMPWB_LSB, compwb); compwb = TX_COMPWB_NEXT(compwb); #else limit = readl(cp->regs + REG_TX_COMPN(ring)); #endif if (cp->tx_old[ring] != limit) cas_tx_ringN(cp, ring, limit); } } static int cas_rx_process_pkt(struct cas *cp, struct cas_rx_comp *rxc, int entry, const u64 *words, struct sk_buff **skbref) { int dlen, hlen, len, i, alloclen; int off, swivel = RX_SWIVEL_OFF_VAL; struct cas_page *page; struct sk_buff *skb; void *addr, *crcaddr; __sum16 csum; char *p; hlen = CAS_VAL(RX_COMP2_HDR_SIZE, words[1]); dlen = CAS_VAL(RX_COMP1_DATA_SIZE, words[0]); len = hlen + dlen; if (RX_COPY_ALWAYS || (words[2] & RX_COMP3_SMALL_PKT)) alloclen = len; else alloclen = max(hlen, RX_COPY_MIN); skb = netdev_alloc_skb(cp->dev, alloclen + swivel + cp->crc_size); if (skb == NULL) return -1; *skbref = skb; skb_reserve(skb, swivel); p = skb->data; addr = crcaddr = NULL; if (hlen) { /* always copy header pages */ i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP2_HDR_OFF, words[1]) * 0x100 + swivel; i = hlen; if (!dlen) /* attach FCS */ i += cp->crc_size; pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); cas_page_unmap(addr); RX_USED_ADD(page, 0x100); p += hlen; swivel = 0; } if (alloclen < (hlen + dlen)) { skb_frag_t *frag = skb_shinfo(skb)->frags; /* normal or jumbo packets. we use frags */ i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; hlen = min(cp->page_size - off, dlen); if (hlen < 0) { netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "rx page overflow: %d\n", hlen); dev_kfree_skb_irq(skb); return -1; } i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); /* make sure we always copy a header */ swivel = 0; if (p == (char *) skb->data) { /* not split */ addr = cas_page_map(page->buffer); memcpy(p, addr + off, RX_COPY_MIN); pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); cas_page_unmap(addr); off += RX_COPY_MIN; swivel = RX_COPY_MIN; RX_USED_ADD(page, cp->mtu_stride); } else { RX_USED_ADD(page, hlen); } skb_put(skb, alloclen); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen - swivel; skb->truesize += hlen - swivel; skb->len += hlen - swivel; __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); frag->page_offset = off; skb_frag_size_set(frag, hlen - swivel); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { hlen = dlen; off = 0; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, hlen + cp->crc_size, PCI_DMA_FROMDEVICE); pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, hlen + cp->crc_size, PCI_DMA_FROMDEVICE); skb_shinfo(skb)->nr_frags++; skb->data_len += hlen; skb->len += hlen; frag++; __skb_frag_set_page(frag, page->buffer); __skb_frag_ref(frag); frag->page_offset = 0; skb_frag_size_set(frag, hlen); RX_USED_ADD(page, hlen + cp->crc_size); } if (cp->crc_size) { addr = cas_page_map(page->buffer); crcaddr = addr + off + hlen; } } else { /* copying packet */ if (!dlen) goto end_copy_pkt; i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; off = CAS_VAL(RX_COMP1_DATA_OFF, words[0]) + swivel; hlen = min(cp->page_size - off, dlen); if (hlen < 0) { netif_printk(cp, rx_err, KERN_DEBUG, cp->dev, "rx page overflow: %d\n", hlen); dev_kfree_skb_irq(skb); return -1; } i = hlen; if (i == dlen) /* attach FCS */ i += cp->crc_size; pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr + off, i); pci_dma_sync_single_for_device(cp->pdev, page->dma_addr + off, i, PCI_DMA_FROMDEVICE); cas_page_unmap(addr); if (p == (char *) skb->data) /* not split */ RX_USED_ADD(page, cp->mtu_stride); else RX_USED_ADD(page, i); /* any more data? */ if ((words[0] & RX_COMP1_SPLIT_PKT) && ((dlen -= hlen) > 0)) { p += hlen; i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); page = cp->rx_pages[CAS_VAL(RX_INDEX_RING, i)][CAS_VAL(RX_INDEX_NUM, i)]; pci_dma_sync_single_for_cpu(cp->pdev, page->dma_addr, dlen + cp->crc_size, PCI_DMA_FROMDEVICE); addr = cas_page_map(page->buffer); memcpy(p, addr, dlen + cp->crc_size); pci_dma_sync_single_for_device(cp->pdev, page->dma_addr, dlen + cp->crc_size, PCI_DMA_FROMDEVICE); cas_page_unmap(addr); RX_USED_ADD(page, dlen + cp->crc_size); } end_copy_pkt: if (cp->crc_size) { addr = NULL; crcaddr = skb->data + alloclen; } skb_put(skb, alloclen); } csum = (__force __sum16)htons(CAS_VAL(RX_COMP4_TCP_CSUM, words[3])); if (cp->crc_size) { /* checksum includes FCS. strip it out. */ csum = csum_fold(csum_partial(crcaddr, cp->crc_size, csum_unfold(csum))); if (addr) cas_page_unmap(addr); } skb->protocol = eth_type_trans(skb, cp->dev); if (skb->protocol == htons(ETH_P_IP)) { skb->csum = csum_unfold(~csum); skb->ip_summed = CHECKSUM_COMPLETE; } else skb_checksum_none_assert(skb); return len; } /* we can handle up to 64 rx flows at a time. we do the same thing * as nonreassm except that we batch up the buffers. * NOTE: we currently just treat each flow as a bunch of packets that * we pass up. a better way would be to coalesce the packets * into a jumbo packet. to do that, we need to do the following: * 1) the first packet will have a clean split between header and * data. save both. * 2) each time the next flow packet comes in, extend the * data length and merge the checksums. * 3) on flow release, fix up the header. * 4) make sure the higher layer doesn't care. * because packets get coalesced, we shouldn't run into fragment count * issues. */ static inline void cas_rx_flow_pkt(struct cas *cp, const u64 *words, struct sk_buff *skb) { int flowid = CAS_VAL(RX_COMP3_FLOWID, words[2]) & (N_RX_FLOWS - 1); struct sk_buff_head *flow = &cp->rx_flows[flowid]; /* this is protected at a higher layer, so no need to * do any additional locking here. stick the buffer * at the end. */ __skb_queue_tail(flow, skb); if (words[0] & RX_COMP1_RELEASE_FLOW) { while ((skb = __skb_dequeue(flow))) { cas_skb_release(skb); } } } /* put rx descriptor back on ring. if a buffer is in use by a higher * layer, this will need to put in a replacement. */ static void cas_post_page(struct cas *cp, const int ring, const int index) { cas_page_t *new; int entry; entry = cp->rx_old[ring]; new = cas_page_swap(cp, ring, index); cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); cp->init_rxds[ring][entry].index = cpu_to_le64(CAS_BASE(RX_INDEX_NUM, index) | CAS_BASE(RX_INDEX_RING, ring)); entry = RX_DESC_ENTRY(ring, entry + 1); cp->rx_old[ring] = entry; if (entry % 4) return; if (ring == 0) writel(entry, cp->regs + REG_RX_KICK); else if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) writel(entry, cp->regs + REG_PLUS_RX_KICK1); } /* only when things are bad */ static int cas_post_rxds_ringN(struct cas *cp, int ring, int num) { unsigned int entry, last, count, released; int cluster; cas_page_t **page = cp->rx_pages[ring]; entry = cp->rx_old[ring]; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "rxd[%d] interrupt, done: %d\n", ring, entry); cluster = -1; count = entry & 0x3; last = RX_DESC_ENTRY(ring, num ? entry + num - 4: entry - 4); released = 0; while (entry != last) { /* make a new buffer if it's still in use */ if (page_count(page[entry]->buffer) > 1) { cas_page_t *new = cas_page_dequeue(cp); if (!new) { /* let the timer know that we need to * do this again */ cp->cas_flags |= CAS_FLAG_RXD_POST(ring); if (!timer_pending(&cp->link_timer)) mod_timer(&cp->link_timer, jiffies + CAS_LINK_FAST_TIMEOUT); cp->rx_old[ring] = entry; cp->rx_last[ring] = num ? num - released : 0; return -ENOMEM; } spin_lock(&cp->rx_inuse_lock); list_add(&page[entry]->list, &cp->rx_inuse_list); spin_unlock(&cp->rx_inuse_lock); cp->init_rxds[ring][entry].buffer = cpu_to_le64(new->dma_addr); page[entry] = new; } if (++count == 4) { cluster = entry; count = 0; } released++; entry = RX_DESC_ENTRY(ring, entry + 1); } cp->rx_old[ring] = entry; if (cluster < 0) return 0; if (ring == 0) writel(cluster, cp->regs + REG_RX_KICK); else if ((N_RX_DESC_RINGS > 1) && (cp->cas_flags & CAS_FLAG_REG_PLUS)) writel(cluster, cp->regs + REG_PLUS_RX_KICK1); return 0; } /* process a completion ring. packets are set up in three basic ways: * small packets: should be copied header + data in single buffer. * large packets: header and data in a single buffer. * split packets: header in a separate buffer from data. * data may be in multiple pages. data may be > 256 * bytes but in a single page. * * NOTE: RX page posting is done in this routine as well. while there's * the capability of using multiple RX completion rings, it isn't * really worthwhile due to the fact that the page posting will * force serialization on the single descriptor ring. */ static int cas_rx_ringN(struct cas *cp, int ring, int budget) { struct cas_rx_comp *rxcs = cp->init_rxcs[ring]; int entry, drops; int npackets = 0; netif_printk(cp, intr, KERN_DEBUG, cp->dev, "rx[%d] interrupt, done: %d/%d\n", ring, readl(cp->regs + REG_RX_COMP_HEAD), cp->rx_new[ring]); entry = cp->rx_new[ring]; drops = 0; while (1) { struct cas_rx_comp *rxc = rxcs + entry; struct sk_buff *uninitialized_var(skb); int type, len; u64 words[4]; int i, dring; words[0] = le64_to_cpu(rxc->word1); words[1] = le64_to_cpu(rxc->word2); words[2] = le64_to_cpu(rxc->word3); words[3] = le64_to_cpu(rxc->word4); /* don't touch if still owned by hw */ type = CAS_VAL(RX_COMP1_TYPE, words[0]); if (type == 0) break; /* hw hasn't cleared the zero bit yet */ if (words[3] & RX_COMP4_ZERO) { break; } /* get info on the packet */ if (words[3] & (RX_COMP4_LEN_MISMATCH | RX_COMP4_BAD)) { spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].rx_errors++; if (words[3] & RX_COMP4_LEN_MISMATCH) cp->net_stats[ring].rx_length_errors++; if (words[3] & RX_COMP4_BAD) cp->net_stats[ring].rx_crc_errors++; spin_unlock(&cp->stat_lock[ring]); /* We'll just return it to Cassini. */ drop_it: spin_lock(&cp->stat_lock[ring]); ++cp->net_stats[ring].rx_dropped; spin_unlock(&cp->stat_lock[ring]); goto next; } len = cas_rx_process_pkt(cp, rxc, entry, words, &skb); if (len < 0) { ++drops; goto drop_it; } /* see if it's a flow re-assembly or not. the driver * itself handles release back up. */ if (RX_DONT_BATCH || (type == 0x2)) { /* non-reassm: these always get released */ cas_skb_release(skb); } else { cas_rx_flow_pkt(cp, words, skb); } spin_lock(&cp->stat_lock[ring]); cp->net_stats[ring].rx_packets++; cp->net_stats[ring].rx_bytes += len; spin_unlock(&cp->stat_lock[ring]); next: npackets++; /* should it be released? */ if (words[0] & RX_COMP1_RELEASE_HDR) { i = CAS_VAL(RX_COMP2_HDR_INDEX, words[1]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } if (words[0] & RX_COMP1_RELEASE_DATA) { i = CAS_VAL(RX_COMP1_DATA_INDEX, words[0]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } if (words[0] & RX_COMP1_RELEASE_NEXT) { i = CAS_VAL(RX_COMP2_NEXT_INDEX, words[1]); dring = CAS_VAL(RX_INDEX_RING, i); i = CAS_VAL(RX_INDEX_NUM, i); cas_post_page(cp, dring, i); } /* skip to the next entry */ entry = RX_COMP_ENTRY(ring, entry + 1 + CAS_VAL(RX_COMP1_SKIP, words[0])); #ifdef USE_NAPI if (budget && (npackets >= budget)) break; #endif } cp->rx_new[ring] = entry; if (drops) netdev_info(cp->dev, "Memory squeeze, deferring packet\n"); return npackets; } /* put completion entries back on the ring */ static void cas_post_rxcs_ringN(struct net_device *dev, struct cas *cp, int ring) { struct cas_rx_comp *rxc = cp->init_rxcs[ring]; int last, entry; last = cp->rx_cur[ring]; entry = cp->rx_new[ring]; netif_printk(cp, intr, KERN_DEBUG, dev, "rxc[%d] interrupt, done: %d/%d\n", ring, readl(cp->regs + REG_RX_COMP_HEAD), entry); /* zero and re-mark descriptors */ while (last != entry) { cas_rxc_init(rxc + last); last = RX_COMP_ENTRY(ring, last + 1); } cp->rx_cur[ring] = last; if (ring == 0) writel(last, cp->regs + REG_RX_COMP_TAIL); else if (cp->cas_flags & CAS_FLAG_REG_PLUS) writel(last, cp->regs + REG_PLUS_RX_COMPN_TAIL(ring)); } /* cassini can use all four PCI interrupts for the completion ring. * rings 3 and 4 are identical */ #if defined(USE_PCI_INTC) || defined(USE_PCI_INTD) static inline void cas_handle_irqN(struct net_device *dev, struct cas *cp, const u32 status, const int ring) { if (status & (INTR_RX_COMP_FULL_ALT | INTR_RX_COMP_AF_ALT)) cas_post_rxcs_ringN(dev, cp, ring); } static irqreturn_t cas_interruptN(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; int ring = (irq == cp->pci_irq_INTC) ? 2 : 3; u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(ring)); /* check for shared irq */ if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, ring, 0); #endif status &= ~INTR_RX_DONE_ALT; } if (status) cas_handle_irqN(dev, cp, status, ring); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #endif #ifdef USE_PCI_INTB /* everything but rx packets */ static inline void cas_handle_irq1(struct cas *cp, const u32 status) { if (status & INTR_RX_BUF_UNAVAIL_1) { /* Frame arrived, no free RX buffers available. * NOTE: we can get this on a link transition. */ cas_post_rxds_ringN(cp, 1, 0); spin_lock(&cp->stat_lock[1]); cp->net_stats[1].rx_dropped++; spin_unlock(&cp->stat_lock[1]); } if (status & INTR_RX_BUF_AE_1) cas_post_rxds_ringN(cp, 1, RX_DESC_RINGN_SIZE(1) - RX_AE_FREEN_VAL(1)); if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) cas_post_rxcs_ringN(cp, 1); } /* ring 2 handles a few more events than 3 and 4 */ static irqreturn_t cas_interrupt1(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; u32 status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); /* check for shared interrupt */ if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & INTR_RX_DONE_ALT) { /* handle rx separately */ #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 1, 0); #endif status &= ~INTR_RX_DONE_ALT; } if (status) cas_handle_irq1(cp, status); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #endif static inline void cas_handle_irq(struct net_device *dev, struct cas *cp, const u32 status) { /* housekeeping interrupts */ if (status & INTR_ERROR_MASK) cas_abnormal_irq(dev, cp, status); if (status & INTR_RX_BUF_UNAVAIL) { /* Frame arrived, no free RX buffers available. * NOTE: we can get this on a link transition. */ cas_post_rxds_ringN(cp, 0, 0); spin_lock(&cp->stat_lock[0]); cp->net_stats[0].rx_dropped++; spin_unlock(&cp->stat_lock[0]); } else if (status & INTR_RX_BUF_AE) { cas_post_rxds_ringN(cp, 0, RX_DESC_RINGN_SIZE(0) - RX_AE_FREEN_VAL(0)); } if (status & (INTR_RX_COMP_AF | INTR_RX_COMP_FULL)) cas_post_rxcs_ringN(dev, cp, 0); } static irqreturn_t cas_interrupt(int irq, void *dev_id) { struct net_device *dev = dev_id; struct cas *cp = netdev_priv(dev); unsigned long flags; u32 status = readl(cp->regs + REG_INTR_STATUS); if (status == 0) return IRQ_NONE; spin_lock_irqsave(&cp->lock, flags); if (status & (INTR_TX_ALL | INTR_TX_INTME)) { cas_tx(dev, cp, status); status &= ~(INTR_TX_ALL | INTR_TX_INTME); } if (status & INTR_RX_DONE) { #ifdef USE_NAPI cas_mask_intr(cp); napi_schedule(&cp->napi); #else cas_rx_ringN(cp, 0, 0); #endif status &= ~INTR_RX_DONE; } if (status) cas_handle_irq(dev, cp, status); spin_unlock_irqrestore(&cp->lock, flags); return IRQ_HANDLED; } #ifdef USE_NAPI static int cas_poll(struct napi_struct *napi, int budget) { struct cas *cp = container_of(napi, struct cas, napi); struct net_device *dev = cp->dev; int i, enable_intr, credits; u32 status = readl(cp->regs + REG_INTR_STATUS); unsigned long flags; spin_lock_irqsave(&cp->lock, flags); cas_tx(dev, cp, status); spin_unlock_irqrestore(&cp->lock, flags); /* NAPI rx packets. we spread the credits across all of the * rxc rings * * to make sure we're fair with the work we loop through each * ring N_RX_COMP_RING times with a request of * budget / N_RX_COMP_RINGS */ enable_intr = 1; credits = 0; for (i = 0; i < N_RX_COMP_RINGS; i++) { int j; for (j = 0; j < N_RX_COMP_RINGS; j++) { credits += cas_rx_ringN(cp, j, budget / N_RX_COMP_RINGS); if (credits >= budget) { enable_intr = 0; goto rx_comp; } } } rx_comp: /* final rx completion */ spin_lock_irqsave(&cp->lock, flags); if (status) cas_handle_irq(dev, cp, status); #ifdef USE_PCI_INTB if (N_RX_COMP_RINGS > 1) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(1)); if (status) cas_handle_irq1(dev, cp, status); } #endif #ifdef USE_PCI_INTC if (N_RX_COMP_RINGS > 2) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(2)); if (status) cas_handle_irqN(dev, cp, status, 2); } #endif #ifdef USE_PCI_INTD if (N_RX_COMP_RINGS > 3) { status = readl(cp->regs + REG_PLUS_INTRN_STATUS(3)); if (status) cas_handle_irqN(dev, cp, status, 3); } #endif spin_unlock_irqrestore(&cp->lock, flags); if (enable_intr) { napi_complete(napi); cas_unmask_intr(cp); } return credits; } #endif #ifdef CONFIG_NET_POLL_CONTROLLER static void cas_netpoll(struct net_device *dev) { struct cas *cp = netdev_priv(dev); cas_disable_irq(cp, 0); cas_interrupt(cp->pdev->irq, dev); cas_enable_irq(cp, 0); #ifdef USE_PCI_INTB if (N_RX_COMP_RINGS > 1) { /* cas_interrupt1(); */ } #endif #ifdef USE_PCI_INTC if (N_RX_COMP_RINGS > 2) { /* cas_interruptN(); */ } #endif #ifdef USE_PCI_INTD if (N_RX_COMP_RINGS > 3) { /* cas_interruptN(); */ } #endif } #endif static void cas_tx_timeout(struct net_device *dev) { struct cas *cp = netdev_priv(dev); netdev_err(dev, "transmit timed out, resetting\n"); if (!cp->hw_running) { netdev_err(dev, "hrm.. hw not running!\n"); return; } netdev_err(dev, "MIF_STATE[%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE)); netdev_err(dev, "MAC_STATE[%08x]\n", readl(cp->regs + REG_MAC_STATE_MACHINE)); netdev_err(dev, "TX_STATE[%08x:%08x:%08x] FIFO[%08x:%08x:%08x] SM1[%08x] SM2[%08x]\n", readl(cp->regs + REG_TX_CFG), readl(cp->regs + REG_MAC_TX_STATUS), readl(cp->regs + REG_MAC_TX_CFG), readl(cp->regs + REG_TX_FIFO_PKT_CNT), readl(cp->regs + REG_TX_FIFO_WRITE_PTR), readl(cp->regs + REG_TX_FIFO_READ_PTR), readl(cp->regs + REG_TX_SM_1), readl(cp->regs + REG_TX_SM_2)); netdev_err(dev, "RX_STATE[%08x:%08x:%08x]\n", readl(cp->regs + REG_RX_CFG), readl(cp->regs + REG_MAC_RX_STATUS), readl(cp->regs + REG_MAC_RX_CFG)); netdev_err(dev, "HP_STATE[%08x:%08x:%08x:%08x]\n", readl(cp->regs + REG_HP_STATE_MACHINE), readl(cp->regs + REG_HP_STATUS0), readl(cp->regs + REG_HP_STATUS1), readl(cp->regs + REG_HP_STATUS2)); #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); schedule_work(&cp->reset_task); #endif } static inline int cas_intme(int ring, int entry) { /* Algorithm: IRQ every 1/2 of descriptors. */ if (!(entry & ((TX_DESC_RINGN_SIZE(ring) >> 1) - 1))) return 1; return 0; } static void cas_write_txd(struct cas *cp, int ring, int entry, dma_addr_t mapping, int len, u64 ctrl, int last) { struct cas_tx_desc *txd = cp->init_txds[ring] + entry; ctrl |= CAS_BASE(TX_DESC_BUFLEN, len); if (cas_intme(ring, entry)) ctrl |= TX_DESC_INTME; if (last) ctrl |= TX_DESC_EOF; txd->control = cpu_to_le64(ctrl); txd->buffer = cpu_to_le64(mapping); } static inline void *tx_tiny_buf(struct cas *cp, const int ring, const int entry) { return cp->tx_tiny_bufs[ring] + TX_TINY_BUF_LEN*entry; } static inline dma_addr_t tx_tiny_map(struct cas *cp, const int ring, const int entry, const int tentry) { cp->tx_tiny_use[ring][tentry].nbufs++; cp->tx_tiny_use[ring][entry].used = 1; return cp->tx_tiny_dvma[ring] + TX_TINY_BUF_LEN*entry; } static inline int cas_xmit_tx_ringN(struct cas *cp, int ring, struct sk_buff *skb) { struct net_device *dev = cp->dev; int entry, nr_frags, frag, tabort, tentry; dma_addr_t mapping; unsigned long flags; u64 ctrl; u32 len; spin_lock_irqsave(&cp->tx_lock[ring], flags); /* This is a hard error, log it. */ if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(skb_shinfo(skb)->nr_frags + 1)) { netif_stop_queue(dev); spin_unlock_irqrestore(&cp->tx_lock[ring], flags); netdev_err(dev, "BUG! Tx Ring full when queue awake!\n"); return 1; } ctrl = 0; if (skb->ip_summed == CHECKSUM_PARTIAL) { const u64 csum_start_off = skb_checksum_start_offset(skb); const u64 csum_stuff_off = csum_start_off + skb->csum_offset; ctrl = TX_DESC_CSUM_EN | CAS_BASE(TX_DESC_CSUM_START, csum_start_off) | CAS_BASE(TX_DESC_CSUM_STUFF, csum_stuff_off); } entry = cp->tx_new[ring]; cp->tx_skbs[ring][entry] = skb; nr_frags = skb_shinfo(skb)->nr_frags; len = skb_headlen(skb); mapping = pci_map_page(cp->pdev, virt_to_page(skb->data), offset_in_page(skb->data), len, PCI_DMA_TODEVICE); tentry = entry; tabort = cas_calc_tabort(cp, (unsigned long) skb->data, len); if (unlikely(tabort)) { /* NOTE: len is always > tabort */ cas_write_txd(cp, ring, entry, mapping, len - tabort, ctrl | TX_DESC_SOF, 0); entry = TX_DESC_NEXT(ring, entry); skb_copy_from_linear_data_offset(skb, len - tabort, tx_tiny_buf(cp, ring, entry), tabort); mapping = tx_tiny_map(cp, ring, entry, tentry); cas_write_txd(cp, ring, entry, mapping, tabort, ctrl, (nr_frags == 0)); } else { cas_write_txd(cp, ring, entry, mapping, len, ctrl | TX_DESC_SOF, (nr_frags == 0)); } entry = TX_DESC_NEXT(ring, entry); for (frag = 0; frag < nr_frags; frag++) { const skb_frag_t *fragp = &skb_shinfo(skb)->frags[frag]; len = skb_frag_size(fragp); mapping = skb_frag_dma_map(&cp->pdev->dev, fragp, 0, len, DMA_TO_DEVICE); tabort = cas_calc_tabort(cp, fragp->page_offset, len); if (unlikely(tabort)) { void *addr; /* NOTE: len is always > tabort */ cas_write_txd(cp, ring, entry, mapping, len - tabort, ctrl, 0); entry = TX_DESC_NEXT(ring, entry); addr = cas_page_map(skb_frag_page(fragp)); memcpy(tx_tiny_buf(cp, ring, entry), addr + fragp->page_offset + len - tabort, tabort); cas_page_unmap(addr); mapping = tx_tiny_map(cp, ring, entry, tentry); len = tabort; } cas_write_txd(cp, ring, entry, mapping, len, ctrl, (frag + 1 == nr_frags)); entry = TX_DESC_NEXT(ring, entry); } cp->tx_new[ring] = entry; if (TX_BUFFS_AVAIL(cp, ring) <= CAS_TABORT(cp)*(MAX_SKB_FRAGS + 1)) netif_stop_queue(dev); netif_printk(cp, tx_queued, KERN_DEBUG, dev, "tx[%d] queued, slot %d, skblen %d, avail %d\n", ring, entry, skb->len, TX_BUFFS_AVAIL(cp, ring)); writel(entry, cp->regs + REG_TX_KICKN(ring)); spin_unlock_irqrestore(&cp->tx_lock[ring], flags); return 0; } static netdev_tx_t cas_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct cas *cp = netdev_priv(dev); /* this is only used as a load-balancing hint, so it doesn't * need to be SMP safe */ static int ring; if (skb_padto(skb, cp->min_frame_size)) return NETDEV_TX_OK; /* XXX: we need some higher-level QoS hooks to steer packets to * individual queues. */ if (cas_xmit_tx_ringN(cp, ring++ & N_TX_RINGS_MASK, skb)) return NETDEV_TX_BUSY; return NETDEV_TX_OK; } static void cas_init_tx_dma(struct cas *cp) { u64 desc_dma = cp->block_dvma; unsigned long off; u32 val; int i; /* set up tx completion writeback registers. must be 8-byte aligned */ #ifdef USE_TX_COMPWB off = offsetof(struct cas_init_block, tx_compwb); writel((desc_dma + off) >> 32, cp->regs + REG_TX_COMPWB_DB_HI); writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_COMPWB_DB_LOW); #endif /* enable completion writebacks, enable paced mode, * disable read pipe, and disable pre-interrupt compwbs */ val = TX_CFG_COMPWB_Q1 | TX_CFG_COMPWB_Q2 | TX_CFG_COMPWB_Q3 | TX_CFG_COMPWB_Q4 | TX_CFG_DMA_RDPIPE_DIS | TX_CFG_PACED_MODE | TX_CFG_INTR_COMPWB_DIS; /* write out tx ring info and tx desc bases */ for (i = 0; i < MAX_TX_RINGS; i++) { off = (unsigned long) cp->init_txds[i] - (unsigned long) cp->init_block; val |= CAS_TX_RINGN_BASE(i); writel((desc_dma + off) >> 32, cp->regs + REG_TX_DBN_HI(i)); writel((desc_dma + off) & 0xffffffff, cp->regs + REG_TX_DBN_LOW(i)); /* don't zero out the kick register here as the system * will wedge */ } writel(val, cp->regs + REG_TX_CFG); /* program max burst sizes. these numbers should be different * if doing QoS. */ #ifdef USE_QOS writel(0x800, cp->regs + REG_TX_MAXBURST_0); writel(0x1600, cp->regs + REG_TX_MAXBURST_1); writel(0x2400, cp->regs + REG_TX_MAXBURST_2); writel(0x4800, cp->regs + REG_TX_MAXBURST_3); #else writel(0x800, cp->regs + REG_TX_MAXBURST_0); writel(0x800, cp->regs + REG_TX_MAXBURST_1); writel(0x800, cp->regs + REG_TX_MAXBURST_2); writel(0x800, cp->regs + REG_TX_MAXBURST_3); #endif } /* Must be invoked under cp->lock. */ static inline void cas_init_dma(struct cas *cp) { cas_init_tx_dma(cp); cas_init_rx_dma(cp); } static void cas_process_mc_list(struct cas *cp) { u16 hash_table[16]; u32 crc; struct netdev_hw_addr *ha; int i = 1; memset(hash_table, 0, sizeof(hash_table)); netdev_for_each_mc_addr(ha, cp->dev) { if (i <= CAS_MC_EXACT_MATCH_SIZE) { /* use the alternate mac address registers for the * first 15 multicast addresses */ writel((ha->addr[4] << 8) | ha->addr[5], cp->regs + REG_MAC_ADDRN(i*3 + 0)); writel((ha->addr[2] << 8) | ha->addr[3], cp->regs + REG_MAC_ADDRN(i*3 + 1)); writel((ha->addr[0] << 8) | ha->addr[1], cp->regs + REG_MAC_ADDRN(i*3 + 2)); i++; } else { /* use hw hash table for the next series of * multicast addresses */ crc = ether_crc_le(ETH_ALEN, ha->addr); crc >>= 24; hash_table[crc >> 4] |= 1 << (15 - (crc & 0xf)); } } for (i = 0; i < 16; i++) writel(hash_table[i], cp->regs + REG_MAC_HASH_TABLEN(i)); } /* Must be invoked under cp->lock. */ static u32 cas_setup_multicast(struct cas *cp) { u32 rxcfg = 0; int i; if (cp->dev->flags & IFF_PROMISC) { rxcfg |= MAC_RX_CFG_PROMISC_EN; } else if (cp->dev->flags & IFF_ALLMULTI) { for (i=0; i < 16; i++) writel(0xFFFF, cp->regs + REG_MAC_HASH_TABLEN(i)); rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; } else { cas_process_mc_list(cp); rxcfg |= MAC_RX_CFG_HASH_FILTER_EN; } return rxcfg; } /* must be invoked under cp->stat_lock[N_TX_RINGS] */ static void cas_clear_mac_err(struct cas *cp) { writel(0, cp->regs + REG_MAC_COLL_NORMAL); writel(0, cp->regs + REG_MAC_COLL_FIRST); writel(0, cp->regs + REG_MAC_COLL_EXCESS); writel(0, cp->regs + REG_MAC_COLL_LATE); writel(0, cp->regs + REG_MAC_TIMER_DEFER); writel(0, cp->regs + REG_MAC_ATTEMPTS_PEAK); writel(0, cp->regs + REG_MAC_RECV_FRAME); writel(0, cp->regs + REG_MAC_LEN_ERR); writel(0, cp->regs + REG_MAC_ALIGN_ERR); writel(0, cp->regs + REG_MAC_FCS_ERR); writel(0, cp->regs + REG_MAC_RX_CODE_ERR); } static void cas_mac_reset(struct cas *cp) { int i; /* do both TX and RX reset */ writel(0x1, cp->regs + REG_MAC_TX_RESET); writel(0x1, cp->regs + REG_MAC_RX_RESET); /* wait for TX */ i = STOP_TRIES; while (i-- > 0) { if (readl(cp->regs + REG_MAC_TX_RESET) == 0) break; udelay(10); } /* wait for RX */ i = STOP_TRIES; while (i-- > 0) { if (readl(cp->regs + REG_MAC_RX_RESET) == 0) break; udelay(10); } if (readl(cp->regs + REG_MAC_TX_RESET) | readl(cp->regs + REG_MAC_RX_RESET)) netdev_err(cp->dev, "mac tx[%d]/rx[%d] reset failed [%08x]\n", readl(cp->regs + REG_MAC_TX_RESET), readl(cp->regs + REG_MAC_RX_RESET), readl(cp->regs + REG_MAC_STATE_MACHINE)); } /* Must be invoked under cp->lock. */ static void cas_init_mac(struct cas *cp) { unsigned char *e = &cp->dev->dev_addr[0]; int i; cas_mac_reset(cp); /* setup core arbitration weight register */ writel(CAWR_RR_DIS, cp->regs + REG_CAWR); /* XXX Use pci_dma_burst_advice() */ #if !defined(CONFIG_SPARC64) && !defined(CONFIG_ALPHA) /* set the infinite burst register for chips that don't have * pci issues. */ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) == 0) writel(INF_BURST_EN, cp->regs + REG_INF_BURST); #endif writel(0x1BF0, cp->regs + REG_MAC_SEND_PAUSE); writel(0x00, cp->regs + REG_MAC_IPG0); writel(0x08, cp->regs + REG_MAC_IPG1); writel(0x04, cp->regs + REG_MAC_IPG2); /* change later for 802.3z */ writel(0x40, cp->regs + REG_MAC_SLOT_TIME); /* min frame + FCS */ writel(ETH_ZLEN + 4, cp->regs + REG_MAC_FRAMESIZE_MIN); /* Ethernet payload + header + FCS + optional VLAN tag. NOTE: we * specify the maximum frame size to prevent RX tag errors on * oversized frames. */ writel(CAS_BASE(MAC_FRAMESIZE_MAX_BURST, 0x2000) | CAS_BASE(MAC_FRAMESIZE_MAX_FRAME, (CAS_MAX_MTU + ETH_HLEN + 4 + 4)), cp->regs + REG_MAC_FRAMESIZE_MAX); /* NOTE: crc_size is used as a surrogate for half-duplex. * workaround saturn half-duplex issue by increasing preamble * size to 65 bytes. */ if ((cp->cas_flags & CAS_FLAG_SATURN) && cp->crc_size) writel(0x41, cp->regs + REG_MAC_PA_SIZE); else writel(0x07, cp->regs + REG_MAC_PA_SIZE); writel(0x04, cp->regs + REG_MAC_JAM_SIZE); writel(0x10, cp->regs + REG_MAC_ATTEMPT_LIMIT); writel(0x8808, cp->regs + REG_MAC_CTRL_TYPE); writel((e[5] | (e[4] << 8)) & 0x3ff, cp->regs + REG_MAC_RANDOM_SEED); writel(0, cp->regs + REG_MAC_ADDR_FILTER0); writel(0, cp->regs + REG_MAC_ADDR_FILTER1); writel(0, cp->regs + REG_MAC_ADDR_FILTER2); writel(0, cp->regs + REG_MAC_ADDR_FILTER2_1_MASK); writel(0, cp->regs + REG_MAC_ADDR_FILTER0_MASK); /* setup mac address in perfect filter array */ for (i = 0; i < 45; i++) writel(0x0, cp->regs + REG_MAC_ADDRN(i)); writel((e[4] << 8) | e[5], cp->regs + REG_MAC_ADDRN(0)); writel((e[2] << 8) | e[3], cp->regs + REG_MAC_ADDRN(1)); writel((e[0] << 8) | e[1], cp->regs + REG_MAC_ADDRN(2)); writel(0x0001, cp->regs + REG_MAC_ADDRN(42)); writel(0xc200, cp->regs + REG_MAC_ADDRN(43)); writel(0x0180, cp->regs + REG_MAC_ADDRN(44)); cp->mac_rx_cfg = cas_setup_multicast(cp); spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_clear_mac_err(cp); spin_unlock(&cp->stat_lock[N_TX_RINGS]); /* Setup MAC interrupts. We want to get all of the interesting * counter expiration events, but we do not want to hear about * normal rx/tx as the DMA engine tells us that. */ writel(MAC_TX_FRAME_XMIT, cp->regs + REG_MAC_TX_MASK); writel(MAC_RX_FRAME_RECV, cp->regs + REG_MAC_RX_MASK); /* Don't enable even the PAUSE interrupts for now, we * make no use of those events other than to record them. */ writel(0xffffffff, cp->regs + REG_MAC_CTRL_MASK); } /* Must be invoked under cp->lock. */ static void cas_init_pause_thresholds(struct cas *cp) { /* Calculate pause thresholds. Setting the OFF threshold to the * full RX fifo size effectively disables PAUSE generation */ if (cp->rx_fifo_size <= (2 * 1024)) { cp->rx_pause_off = cp->rx_pause_on = cp->rx_fifo_size; } else { int max_frame = (cp->dev->mtu + ETH_HLEN + 4 + 4 + 64) & ~63; if (max_frame * 3 > cp->rx_fifo_size) { cp->rx_pause_off = 7104; cp->rx_pause_on = 960; } else { int off = (cp->rx_fifo_size - (max_frame * 2)); int on = off - max_frame; cp->rx_pause_off = off; cp->rx_pause_on = on; } } } static int cas_vpd_match(const void __iomem *p, const char *str) { int len = strlen(str) + 1; int i; for (i = 0; i < len; i++) { if (readb(p + i) != str[i]) return 0; } return 1; } /* get the mac address by reading the vpd information in the rom. * also get the phy type and determine if there's an entropy generator. * NOTE: this is a bit convoluted for the following reasons: * 1) vpd info has order-dependent mac addresses for multinic cards * 2) the only way to determine the nic order is to use the slot * number. * 3) fiber cards don't have bridges, so their slot numbers don't * mean anything. * 4) we don't actually know we have a fiber card until after * the mac addresses are parsed. */ static int cas_get_vpd_info(struct cas *cp, unsigned char *dev_addr, const int offset) { void __iomem *p = cp->regs + REG_EXPANSION_ROM_RUN_START; void __iomem *base, *kstart; int i, len; int found = 0; #define VPD_FOUND_MAC 0x01 #define VPD_FOUND_PHY 0x02 int phy_type = CAS_PHY_MII_MDIO0; /* default phy type */ int mac_off = 0; #if defined(CONFIG_SPARC) const unsigned char *addr; #endif /* give us access to the PROM */ writel(BIM_LOCAL_DEV_PROM | BIM_LOCAL_DEV_PAD, cp->regs + REG_BIM_LOCAL_DEV_EN); /* check for an expansion rom */ if (readb(p) != 0x55 || readb(p + 1) != 0xaa) goto use_random_mac_addr; /* search for beginning of vpd */ base = NULL; for (i = 2; i < EXPANSION_ROM_SIZE; i++) { /* check for PCIR */ if ((readb(p + i + 0) == 0x50) && (readb(p + i + 1) == 0x43) && (readb(p + i + 2) == 0x49) && (readb(p + i + 3) == 0x52)) { base = p + (readb(p + i + 8) | (readb(p + i + 9) << 8)); break; } } if (!base || (readb(base) != 0x82)) goto use_random_mac_addr; i = (readb(base + 1) | (readb(base + 2) << 8)) + 3; while (i < EXPANSION_ROM_SIZE) { if (readb(base + i) != 0x90) /* no vpd found */ goto use_random_mac_addr; /* found a vpd field */ len = readb(base + i + 1) | (readb(base + i + 2) << 8); /* extract keywords */ kstart = base + i + 3; p = kstart; while ((p - kstart) < len) { int klen = readb(p + 2); int j; char type; p += 3; /* look for the following things: * -- correct length == 29 * 3 (type) + 2 (size) + * 18 (strlen("local-mac-address") + 1) + * 6 (mac addr) * -- VPD Instance 'I' * -- VPD Type Bytes 'B' * -- VPD data length == 6 * -- property string == local-mac-address * * -- correct length == 24 * 3 (type) + 2 (size) + * 12 (strlen("entropy-dev") + 1) + * 7 (strlen("vms110") + 1) * -- VPD Instance 'I' * -- VPD Type String 'B' * -- VPD data length == 7 * -- property string == entropy-dev * * -- correct length == 18 * 3 (type) + 2 (size) + * 9 (strlen("phy-type") + 1) + * 4 (strlen("pcs") + 1) * -- VPD Instance 'I' * -- VPD Type String 'S' * -- VPD data length == 4 * -- property string == phy-type * * -- correct length == 23 * 3 (type) + 2 (size) + * 14 (strlen("phy-interface") + 1) + * 4 (strlen("pcs") + 1) * -- VPD Instance 'I' * -- VPD Type String 'S' * -- VPD data length == 4 * -- property string == phy-interface */ if (readb(p) != 'I') goto next; /* finally, check string and length */ type = readb(p + 3); if (type == 'B') { if ((klen == 29) && readb(p + 4) == 6 && cas_vpd_match(p + 5, "local-mac-address")) { if (mac_off++ > offset) goto next; /* set mac address */ for (j = 0; j < 6; j++) dev_addr[j] = readb(p + 23 + j); goto found_mac; } } if (type != 'S') goto next; #ifdef USE_ENTROPY_DEV if ((klen == 24) && cas_vpd_match(p + 5, "entropy-dev") && cas_vpd_match(p + 17, "vms110")) { cp->cas_flags |= CAS_FLAG_ENTROPY_DEV; goto next; } #endif if (found & VPD_FOUND_PHY) goto next; if ((klen == 18) && readb(p + 4) == 4 && cas_vpd_match(p + 5, "phy-type")) { if (cas_vpd_match(p + 14, "pcs")) { phy_type = CAS_PHY_SERDES; goto found_phy; } } if ((klen == 23) && readb(p + 4) == 4 && cas_vpd_match(p + 5, "phy-interface")) { if (cas_vpd_match(p + 19, "pcs")) { phy_type = CAS_PHY_SERDES; goto found_phy; } } found_mac: found |= VPD_FOUND_MAC; goto next; found_phy: found |= VPD_FOUND_PHY; next: p += klen; } i += len + 3; } use_random_mac_addr: if (found & VPD_FOUND_MAC) goto done; #if defined(CONFIG_SPARC) addr = of_get_property(cp->of_node, "local-mac-address", NULL); if (addr != NULL) { memcpy(dev_addr, addr, 6); goto done; } #endif /* Sun MAC prefix then 3 random bytes. */ pr_info("MAC address not found in ROM VPD\n"); dev_addr[0] = 0x08; dev_addr[1] = 0x00; dev_addr[2] = 0x20; get_random_bytes(dev_addr + 3, 3); done: writel(0, cp->regs + REG_BIM_LOCAL_DEV_EN); return phy_type; } /* check pci invariants */ static void cas_check_pci_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; cp->cas_flags = 0; if ((pdev->vendor == PCI_VENDOR_ID_SUN) && (pdev->device == PCI_DEVICE_ID_SUN_CASSINI)) { if (pdev->revision >= CAS_ID_REVPLUS) cp->cas_flags |= CAS_FLAG_REG_PLUS; if (pdev->revision < CAS_ID_REVPLUS02u) cp->cas_flags |= CAS_FLAG_TARGET_ABORT; /* Original Cassini supports HW CSUM, but it's not * enabled by default as it can trigger TX hangs. */ if (pdev->revision < CAS_ID_REV2) cp->cas_flags |= CAS_FLAG_NO_HW_CSUM; } else { /* Only sun has original cassini chips. */ cp->cas_flags |= CAS_FLAG_REG_PLUS; /* We use a flag because the same phy might be externally * connected. */ if ((pdev->vendor == PCI_VENDOR_ID_NS) && (pdev->device == PCI_DEVICE_ID_NS_SATURN)) cp->cas_flags |= CAS_FLAG_SATURN; } } static int cas_check_invariants(struct cas *cp) { struct pci_dev *pdev = cp->pdev; u32 cfg; int i; /* get page size for rx buffers. */ cp->page_order = 0; #ifdef USE_PAGE_ORDER if (PAGE_SHIFT < CAS_JUMBO_PAGE_SHIFT) { /* see if we can allocate larger pages */ struct page *page = alloc_pages(GFP_ATOMIC, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); if (page) { __free_pages(page, CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT); cp->page_order = CAS_JUMBO_PAGE_SHIFT - PAGE_SHIFT; } else { printk("MTU limited to %d bytes\n", CAS_MAX_MTU); } } #endif cp->page_size = (PAGE_SIZE << cp->page_order); /* Fetch the FIFO configurations. */ cp->tx_fifo_size = readl(cp->regs + REG_TX_FIFO_SIZE) * 64; cp->rx_fifo_size = RX_FIFO_SIZE; /* finish phy determination. MDIO1 takes precedence over MDIO0 if * they're both connected. */ cp->phy_type = cas_get_vpd_info(cp, cp->dev->dev_addr, PCI_SLOT(pdev->devfn)); if (cp->phy_type & CAS_PHY_SERDES) { cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; /* no more checking needed */ } /* MII */ cfg = readl(cp->regs + REG_MIF_CFG); if (cfg & MIF_CFG_MDIO_1) { cp->phy_type = CAS_PHY_MII_MDIO1; } else if (cfg & MIF_CFG_MDIO_0) { cp->phy_type = CAS_PHY_MII_MDIO0; } cas_mif_poll(cp, 0); writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); for (i = 0; i < 32; i++) { u32 phy_id; int j; for (j = 0; j < 3; j++) { cp->phy_addr = i; phy_id = cas_phy_read(cp, MII_PHYSID1) << 16; phy_id |= cas_phy_read(cp, MII_PHYSID2); if (phy_id && (phy_id != 0xFFFFFFFF)) { cp->phy_id = phy_id; goto done; } } } pr_err("MII phy did not respond [%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE)); return -1; done: /* see if we can do gigabit */ cfg = cas_phy_read(cp, MII_BMSR); if ((cfg & CAS_BMSR_1000_EXTEND) && cas_phy_read(cp, CAS_MII_1000_EXTEND)) cp->cas_flags |= CAS_FLAG_1000MB_CAP; return 0; } /* Must be invoked under cp->lock. */ static inline void cas_start_dma(struct cas *cp) { int i; u32 val; int txfailed = 0; /* enable dma */ val = readl(cp->regs + REG_TX_CFG) | TX_CFG_DMA_EN; writel(val, cp->regs + REG_TX_CFG); val = readl(cp->regs + REG_RX_CFG) | RX_CFG_DMA_EN; writel(val, cp->regs + REG_RX_CFG); /* enable the mac */ val = readl(cp->regs + REG_MAC_TX_CFG) | MAC_TX_CFG_EN; writel(val, cp->regs + REG_MAC_TX_CFG); val = readl(cp->regs + REG_MAC_RX_CFG) | MAC_RX_CFG_EN; writel(val, cp->regs + REG_MAC_RX_CFG); i = STOP_TRIES; while (i-- > 0) { val = readl(cp->regs + REG_MAC_TX_CFG); if ((val & MAC_TX_CFG_EN)) break; udelay(10); } if (i < 0) txfailed = 1; i = STOP_TRIES; while (i-- > 0) { val = readl(cp->regs + REG_MAC_RX_CFG); if ((val & MAC_RX_CFG_EN)) { if (txfailed) { netdev_err(cp->dev, "enabling mac failed [tx:%08x:%08x]\n", readl(cp->regs + REG_MIF_STATE_MACHINE), readl(cp->regs + REG_MAC_STATE_MACHINE)); } goto enable_rx_done; } udelay(10); } netdev_err(cp->dev, "enabling mac failed [%s:%08x:%08x]\n", (txfailed ? "tx,rx" : "rx"), readl(cp->regs + REG_MIF_STATE_MACHINE), readl(cp->regs + REG_MAC_STATE_MACHINE)); enable_rx_done: cas_unmask_intr(cp); /* enable interrupts */ writel(RX_DESC_RINGN_SIZE(0) - 4, cp->regs + REG_RX_KICK); writel(0, cp->regs + REG_RX_COMP_TAIL); if (cp->cas_flags & CAS_FLAG_REG_PLUS) { if (N_RX_DESC_RINGS > 1) writel(RX_DESC_RINGN_SIZE(1) - 4, cp->regs + REG_PLUS_RX_KICK1); for (i = 1; i < N_RX_COMP_RINGS; i++) writel(0, cp->regs + REG_PLUS_RX_COMPN_TAIL(i)); } } /* Must be invoked under cp->lock. */ static void cas_read_pcs_link_mode(struct cas *cp, int *fd, int *spd, int *pause) { u32 val = readl(cp->regs + REG_PCS_MII_LPA); *fd = (val & PCS_MII_LPA_FD) ? 1 : 0; *pause = (val & PCS_MII_LPA_SYM_PAUSE) ? 0x01 : 0x00; if (val & PCS_MII_LPA_ASYM_PAUSE) *pause |= 0x10; *spd = 1000; } /* Must be invoked under cp->lock. */ static void cas_read_mii_link_mode(struct cas *cp, int *fd, int *spd, int *pause) { u32 val; *fd = 0; *spd = 10; *pause = 0; /* use GMII registers */ val = cas_phy_read(cp, MII_LPA); if (val & CAS_LPA_PAUSE) *pause = 0x01; if (val & CAS_LPA_ASYM_PAUSE) *pause |= 0x10; if (val & LPA_DUPLEX) *fd = 1; if (val & LPA_100) *spd = 100; if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { val = cas_phy_read(cp, CAS_MII_1000_STATUS); if (val & (CAS_LPA_1000FULL | CAS_LPA_1000HALF)) *spd = 1000; if (val & CAS_LPA_1000FULL) *fd = 1; } } /* A link-up condition has occurred, initialize and enable the * rest of the chip. * * Must be invoked under cp->lock. */ static void cas_set_link_modes(struct cas *cp) { u32 val; int full_duplex, speed, pause; full_duplex = 0; speed = 10; pause = 0; if (CAS_PHY_MII(cp->phy_type)) { cas_mif_poll(cp, 0); val = cas_phy_read(cp, MII_BMCR); if (val & BMCR_ANENABLE) { cas_read_mii_link_mode(cp, &full_duplex, &speed, &pause); } else { if (val & BMCR_FULLDPLX) full_duplex = 1; if (val & BMCR_SPEED100) speed = 100; else if (val & CAS_BMCR_SPEED1000) speed = (cp->cas_flags & CAS_FLAG_1000MB_CAP) ? 1000 : 100; } cas_mif_poll(cp, 1); } else { val = readl(cp->regs + REG_PCS_MII_CTRL); cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); if ((val & PCS_MII_AUTONEG_EN) == 0) { if (val & PCS_MII_CTRL_DUPLEX) full_duplex = 1; } } netif_info(cp, link, cp->dev, "Link up at %d Mbps, %s-duplex\n", speed, full_duplex ? "full" : "half"); val = MAC_XIF_TX_MII_OUTPUT_EN | MAC_XIF_LINK_LED; if (CAS_PHY_MII(cp->phy_type)) { val |= MAC_XIF_MII_BUFFER_OUTPUT_EN; if (!full_duplex) val |= MAC_XIF_DISABLE_ECHO; } if (full_duplex) val |= MAC_XIF_FDPLX_LED; if (speed == 1000) val |= MAC_XIF_GMII_MODE; writel(val, cp->regs + REG_MAC_XIF_CFG); /* deal with carrier and collision detect. */ val = MAC_TX_CFG_IPG_EN; if (full_duplex) { val |= MAC_TX_CFG_IGNORE_CARRIER; val |= MAC_TX_CFG_IGNORE_COLL; } else { #ifndef USE_CSMA_CD_PROTO val |= MAC_TX_CFG_NEVER_GIVE_UP_EN; val |= MAC_TX_CFG_NEVER_GIVE_UP_LIM; #endif } /* val now set up for REG_MAC_TX_CFG */ /* If gigabit and half-duplex, enable carrier extension * mode. increase slot time to 512 bytes as well. * else, disable it and make sure slot time is 64 bytes. * also activate checksum bug workaround */ if ((speed == 1000) && !full_duplex) { writel(val | MAC_TX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_TX_CFG); val = readl(cp->regs + REG_MAC_RX_CFG); val &= ~MAC_RX_CFG_STRIP_FCS; /* checksum workaround */ writel(val | MAC_RX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_RX_CFG); writel(0x200, cp->regs + REG_MAC_SLOT_TIME); cp->crc_size = 4; /* minimum size gigabit frame at half duplex */ cp->min_frame_size = CAS_1000MB_MIN_FRAME; } else { writel(val, cp->regs + REG_MAC_TX_CFG); /* checksum bug workaround. don't strip FCS when in * half-duplex mode */ val = readl(cp->regs + REG_MAC_RX_CFG); if (full_duplex) { val |= MAC_RX_CFG_STRIP_FCS; cp->crc_size = 0; cp->min_frame_size = CAS_MIN_MTU; } else { val &= ~MAC_RX_CFG_STRIP_FCS; cp->crc_size = 4; cp->min_frame_size = CAS_MIN_FRAME; } writel(val & ~MAC_RX_CFG_CARRIER_EXTEND, cp->regs + REG_MAC_RX_CFG); writel(0x40, cp->regs + REG_MAC_SLOT_TIME); } if (netif_msg_link(cp)) { if (pause & 0x01) { netdev_info(cp->dev, "Pause is enabled (rxfifo: %d off: %d on: %d)\n", cp->rx_fifo_size, cp->rx_pause_off, cp->rx_pause_on); } else if (pause & 0x10) { netdev_info(cp->dev, "TX pause enabled\n"); } else { netdev_info(cp->dev, "Pause is disabled\n"); } } val = readl(cp->regs + REG_MAC_CTRL_CFG); val &= ~(MAC_CTRL_CFG_SEND_PAUSE_EN | MAC_CTRL_CFG_RECV_PAUSE_EN); if (pause) { /* symmetric or asymmetric pause */ val |= MAC_CTRL_CFG_SEND_PAUSE_EN; if (pause & 0x01) { /* symmetric pause */ val |= MAC_CTRL_CFG_RECV_PAUSE_EN; } } writel(val, cp->regs + REG_MAC_CTRL_CFG); cas_start_dma(cp); } /* Must be invoked under cp->lock. */ static void cas_init_hw(struct cas *cp, int restart_link) { if (restart_link) cas_phy_init(cp); cas_init_pause_thresholds(cp); cas_init_mac(cp); cas_init_dma(cp); if (restart_link) { /* Default aneg parameters */ cp->timer_ticks = 0; cas_begin_auto_negotiation(cp, NULL); } else if (cp->lstate == link_up) { cas_set_link_modes(cp); netif_carrier_on(cp->dev); } } /* Must be invoked under cp->lock. on earlier cassini boards, * SOFT_0 is tied to PCI reset. we use this to force a pci reset, * let it settle out, and then restore pci state. */ static void cas_hard_reset(struct cas *cp) { writel(BIM_LOCAL_DEV_SOFT_0, cp->regs + REG_BIM_LOCAL_DEV_EN); udelay(20); pci_restore_state(cp->pdev); } static void cas_global_reset(struct cas *cp, int blkflag) { int limit; /* issue a global reset. don't use RSTOUT. */ if (blkflag && !CAS_PHY_MII(cp->phy_type)) { /* For PCS, when the blkflag is set, we should set the * SW_REST_BLOCK_PCS_SLINK bit to prevent the results of * the last autonegotiation from being cleared. We'll * need some special handling if the chip is set into a * loopback mode. */ writel((SW_RESET_TX | SW_RESET_RX | SW_RESET_BLOCK_PCS_SLINK), cp->regs + REG_SW_RESET); } else { writel(SW_RESET_TX | SW_RESET_RX, cp->regs + REG_SW_RESET); } /* need to wait at least 3ms before polling register */ mdelay(3); limit = STOP_TRIES; while (limit-- > 0) { u32 val = readl(cp->regs + REG_SW_RESET); if ((val & (SW_RESET_TX | SW_RESET_RX)) == 0) goto done; udelay(10); } netdev_err(cp->dev, "sw reset failed\n"); done: /* enable various BIM interrupts */ writel(BIM_CFG_DPAR_INTR_ENABLE | BIM_CFG_RMA_INTR_ENABLE | BIM_CFG_RTA_INTR_ENABLE, cp->regs + REG_BIM_CFG); /* clear out pci error status mask for handled errors. * we don't deal with DMA counter overflows as they happen * all the time. */ writel(0xFFFFFFFFU & ~(PCI_ERR_BADACK | PCI_ERR_DTRTO | PCI_ERR_OTHER | PCI_ERR_BIM_DMA_WRITE | PCI_ERR_BIM_DMA_READ), cp->regs + REG_PCI_ERR_STATUS_MASK); /* set up for MII by default to address mac rx reset timeout * issue */ writel(PCS_DATAPATH_MODE_MII, cp->regs + REG_PCS_DATAPATH_MODE); } static void cas_reset(struct cas *cp, int blkflag) { u32 val; cas_mask_intr(cp); cas_global_reset(cp, blkflag); cas_mac_reset(cp); cas_entropy_reset(cp); /* disable dma engines. */ val = readl(cp->regs + REG_TX_CFG); val &= ~TX_CFG_DMA_EN; writel(val, cp->regs + REG_TX_CFG); val = readl(cp->regs + REG_RX_CFG); val &= ~RX_CFG_DMA_EN; writel(val, cp->regs + REG_RX_CFG); /* program header parser */ if ((cp->cas_flags & CAS_FLAG_TARGET_ABORT) || (CAS_HP_ALT_FIRMWARE == cas_prog_null)) { cas_load_firmware(cp, CAS_HP_FIRMWARE); } else { cas_load_firmware(cp, CAS_HP_ALT_FIRMWARE); } /* clear out error registers */ spin_lock(&cp->stat_lock[N_TX_RINGS]); cas_clear_mac_err(cp); spin_unlock(&cp->stat_lock[N_TX_RINGS]); } /* Shut down the chip, must be called with pm_mutex held. */ static void cas_shutdown(struct cas *cp) { unsigned long flags; /* Make us not-running to avoid timers respawning */ cp->hw_running = 0; del_timer_sync(&cp->link_timer); /* Stop the reset task */ #if 0 while (atomic_read(&cp->reset_task_pending_mtu) || atomic_read(&cp->reset_task_pending_spare) || atomic_read(&cp->reset_task_pending_all)) schedule(); #else while (atomic_read(&cp->reset_task_pending)) schedule(); #endif /* Actually stop the chip */ cas_lock_all_save(cp, flags); cas_reset(cp, 0); if (cp->cas_flags & CAS_FLAG_SATURN) cas_phy_powerdown(cp); cas_unlock_all_restore(cp, flags); } static int cas_change_mtu(struct net_device *dev, int new_mtu) { struct cas *cp = netdev_priv(dev); if (new_mtu < CAS_MIN_MTU || new_mtu > CAS_MAX_MTU) return -EINVAL; dev->mtu = new_mtu; if (!netif_running(dev) || !netif_device_present(dev)) return 0; /* let the reset task handle it */ #if 1 atomic_inc(&cp->reset_task_pending); if ((cp->phy_type & CAS_PHY_SERDES)) { atomic_inc(&cp->reset_task_pending_all); } else { atomic_inc(&cp->reset_task_pending_mtu); } schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, (cp->phy_type & CAS_PHY_SERDES) ? CAS_RESET_ALL : CAS_RESET_MTU); pr_err("reset called in cas_change_mtu\n"); schedule_work(&cp->reset_task); #endif flush_work_sync(&cp->reset_task); return 0; } static void cas_clean_txd(struct cas *cp, int ring) { struct cas_tx_desc *txd = cp->init_txds[ring]; struct sk_buff *skb, **skbs = cp->tx_skbs[ring]; u64 daddr, dlen; int i, size; size = TX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { int frag; if (skbs[i] == NULL) continue; skb = skbs[i]; skbs[i] = NULL; for (frag = 0; frag <= skb_shinfo(skb)->nr_frags; frag++) { int ent = i & (size - 1); /* first buffer is never a tiny buffer and so * needs to be unmapped. */ daddr = le64_to_cpu(txd[ent].buffer); dlen = CAS_VAL(TX_DESC_BUFLEN, le64_to_cpu(txd[ent].control)); pci_unmap_page(cp->pdev, daddr, dlen, PCI_DMA_TODEVICE); if (frag != skb_shinfo(skb)->nr_frags) { i++; /* next buffer might by a tiny buffer. * skip past it. */ ent = i & (size - 1); if (cp->tx_tiny_use[ring][ent].used) i++; } } dev_kfree_skb_any(skb); } /* zero out tiny buf usage */ memset(cp->tx_tiny_use[ring], 0, size*sizeof(*cp->tx_tiny_use[ring])); } /* freed on close */ static inline void cas_free_rx_desc(struct cas *cp, int ring) { cas_page_t **page = cp->rx_pages[ring]; int i, size; size = RX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { if (page[i]) { cas_page_free(cp, page[i]); page[i] = NULL; } } } static void cas_free_rxds(struct cas *cp) { int i; for (i = 0; i < N_RX_DESC_RINGS; i++) cas_free_rx_desc(cp, i); } /* Must be invoked under cp->lock. */ static void cas_clean_rings(struct cas *cp) { int i; /* need to clean all tx rings */ memset(cp->tx_old, 0, sizeof(*cp->tx_old)*N_TX_RINGS); memset(cp->tx_new, 0, sizeof(*cp->tx_new)*N_TX_RINGS); for (i = 0; i < N_TX_RINGS; i++) cas_clean_txd(cp, i); /* zero out init block */ memset(cp->init_block, 0, sizeof(struct cas_init_block)); cas_clean_rxds(cp); cas_clean_rxcs(cp); } /* allocated on open */ static inline int cas_alloc_rx_desc(struct cas *cp, int ring) { cas_page_t **page = cp->rx_pages[ring]; int size, i = 0; size = RX_DESC_RINGN_SIZE(ring); for (i = 0; i < size; i++) { if ((page[i] = cas_page_alloc(cp, GFP_KERNEL)) == NULL) return -1; } return 0; } static int cas_alloc_rxds(struct cas *cp) { int i; for (i = 0; i < N_RX_DESC_RINGS; i++) { if (cas_alloc_rx_desc(cp, i) < 0) { cas_free_rxds(cp); return -1; } } return 0; } static void cas_reset_task(struct work_struct *work) { struct cas *cp = container_of(work, struct cas, reset_task); #if 0 int pending = atomic_read(&cp->reset_task_pending); #else int pending_all = atomic_read(&cp->reset_task_pending_all); int pending_spare = atomic_read(&cp->reset_task_pending_spare); int pending_mtu = atomic_read(&cp->reset_task_pending_mtu); if (pending_all == 0 && pending_spare == 0 && pending_mtu == 0) { /* We can have more tasks scheduled than actually * needed. */ atomic_dec(&cp->reset_task_pending); return; } #endif /* The link went down, we reset the ring, but keep * DMA stopped. Use this function for reset * on error as well. */ if (cp->hw_running) { unsigned long flags; /* Make sure we don't get interrupts or tx packets */ netif_device_detach(cp->dev); cas_lock_all_save(cp, flags); if (cp->opened) { /* We call cas_spare_recover when we call cas_open. * but we do not initialize the lists cas_spare_recover * uses until cas_open is called. */ cas_spare_recover(cp, GFP_ATOMIC); } #if 1 /* test => only pending_spare set */ if (!pending_all && !pending_mtu) goto done; #else if (pending == CAS_RESET_SPARE) goto done; #endif /* when pending == CAS_RESET_ALL, the following * call to cas_init_hw will restart auto negotiation. * Setting the second argument of cas_reset to * !(pending == CAS_RESET_ALL) will set this argument * to 1 (avoiding reinitializing the PHY for the normal * PCS case) when auto negotiation is not restarted. */ #if 1 cas_reset(cp, !(pending_all > 0)); if (cp->opened) cas_clean_rings(cp); cas_init_hw(cp, (pending_all > 0)); #else cas_reset(cp, !(pending == CAS_RESET_ALL)); if (cp->opened) cas_clean_rings(cp); cas_init_hw(cp, pending == CAS_RESET_ALL); #endif done: cas_unlock_all_restore(cp, flags); netif_device_attach(cp->dev); } #if 1 atomic_sub(pending_all, &cp->reset_task_pending_all); atomic_sub(pending_spare, &cp->reset_task_pending_spare); atomic_sub(pending_mtu, &cp->reset_task_pending_mtu); atomic_dec(&cp->reset_task_pending); #else atomic_set(&cp->reset_task_pending, 0); #endif } static void cas_link_timer(unsigned long data) { struct cas *cp = (struct cas *) data; int mask, pending = 0, reset = 0; unsigned long flags; if (link_transition_timeout != 0 && cp->link_transition_jiffies_valid && ((jiffies - cp->link_transition_jiffies) > (link_transition_timeout))) { /* One-second counter so link-down workaround doesn't * cause resets to occur so fast as to fool the switch * into thinking the link is down. */ cp->link_transition_jiffies_valid = 0; } if (!cp->hw_running) return; spin_lock_irqsave(&cp->lock, flags); cas_lock_tx(cp); cas_entropy_gather(cp); /* If the link task is still pending, we just * reschedule the link timer */ #if 1 if (atomic_read(&cp->reset_task_pending_all) || atomic_read(&cp->reset_task_pending_spare) || atomic_read(&cp->reset_task_pending_mtu)) goto done; #else if (atomic_read(&cp->reset_task_pending)) goto done; #endif /* check for rx cleaning */ if ((mask = (cp->cas_flags & CAS_FLAG_RXD_POST_MASK))) { int i, rmask; for (i = 0; i < MAX_RX_DESC_RINGS; i++) { rmask = CAS_FLAG_RXD_POST(i); if ((mask & rmask) == 0) continue; /* post_rxds will do a mod_timer */ if (cas_post_rxds_ringN(cp, i, cp->rx_last[i]) < 0) { pending = 1; continue; } cp->cas_flags &= ~rmask; } } if (CAS_PHY_MII(cp->phy_type)) { u16 bmsr; cas_mif_poll(cp, 0); bmsr = cas_phy_read(cp, MII_BMSR); /* WTZ: Solaris driver reads this twice, but that * may be due to the PCS case and the use of a * common implementation. Read it twice here to be * safe. */ bmsr = cas_phy_read(cp, MII_BMSR); cas_mif_poll(cp, 1); readl(cp->regs + REG_MIF_STATUS); /* avoid dups */ reset = cas_mii_link_check(cp, bmsr); } else { reset = cas_pcs_link_check(cp); } if (reset) goto done; /* check for tx state machine confusion */ if ((readl(cp->regs + REG_MAC_TX_STATUS) & MAC_TX_FRAME_XMIT) == 0) { u32 val = readl(cp->regs + REG_MAC_STATE_MACHINE); u32 wptr, rptr; int tlm = CAS_VAL(MAC_SM_TLM, val); if (((tlm == 0x5) || (tlm == 0x3)) && (CAS_VAL(MAC_SM_ENCAP_SM, val) == 0)) { netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, "tx err: MAC_STATE[%08x]\n", val); reset = 1; goto done; } val = readl(cp->regs + REG_TX_FIFO_PKT_CNT); wptr = readl(cp->regs + REG_TX_FIFO_WRITE_PTR); rptr = readl(cp->regs + REG_TX_FIFO_READ_PTR); if ((val == 0) && (wptr != rptr)) { netif_printk(cp, tx_err, KERN_DEBUG, cp->dev, "tx err: TX_FIFO[%08x:%08x:%08x]\n", val, wptr, rptr); reset = 1; } if (reset) cas_hard_reset(cp); } done: if (reset) { #if 1 atomic_inc(&cp->reset_task_pending); atomic_inc(&cp->reset_task_pending_all); schedule_work(&cp->reset_task); #else atomic_set(&cp->reset_task_pending, CAS_RESET_ALL); pr_err("reset called in cas_link_timer\n"); schedule_work(&cp->reset_task); #endif } if (!pending) mod_timer(&cp->link_timer, jiffies + CAS_LINK_TIMEOUT); cas_unlock_tx(cp); spin_unlock_irqrestore(&cp->lock, flags); } /* tiny buffers are used to avoid target abort issues with * older cassini's */ static void cas_tx_tiny_free(struct cas *cp) { struct pci_dev *pdev = cp->pdev; int i; for (i = 0; i < N_TX_RINGS; i++) { if (!cp->tx_tiny_bufs[i]) continue; pci_free_consistent(pdev, TX_TINY_BUF_BLOCK, cp->tx_tiny_bufs[i], cp->tx_tiny_dvma[i]); cp->tx_tiny_bufs[i] = NULL; } } static int cas_tx_tiny_alloc(struct cas *cp) { struct pci_dev *pdev = cp->pdev; int i; for (i = 0; i < N_TX_RINGS; i++) { cp->tx_tiny_bufs[i] = pci_alloc_consistent(pdev, TX_TINY_BUF_BLOCK, &cp->tx_tiny_dvma[i]); if (!cp->tx_tiny_bufs[i]) { cas_tx_tiny_free(cp); return -1; } } return 0; } static int cas_open(struct net_device *dev) { struct cas *cp = netdev_priv(dev); int hw_was_up, err; unsigned long flags; mutex_lock(&cp->pm_mutex); hw_was_up = cp->hw_running; /* The power-management mutex protects the hw_running * etc. state so it is safe to do this bit without cp->lock */ if (!cp->hw_running) { /* Reset the chip */ cas_lock_all_save(cp, flags); /* We set the second arg to cas_reset to zero * because cas_init_hw below will have its second * argument set to non-zero, which will force * autonegotiation to start. */ cas_reset(cp, 0); cp->hw_running = 1; cas_unlock_all_restore(cp, flags); } err = -ENOMEM; if (cas_tx_tiny_alloc(cp) < 0) goto err_unlock; /* alloc rx descriptors */ if (cas_alloc_rxds(cp) < 0) goto err_tx_tiny; /* allocate spares */ cas_spare_init(cp); cas_spare_recover(cp, GFP_KERNEL); /* We can now request the interrupt as we know it's masked * on the controller. cassini+ has up to 4 interrupts * that can be used, but you need to do explicit pci interrupt * mapping to expose them */ if (request_irq(cp->pdev->irq, cas_interrupt, IRQF_SHARED, dev->name, (void *) dev)) { netdev_err(cp->dev, "failed to request irq !\n"); err = -EAGAIN; goto err_spare; } #ifdef USE_NAPI napi_enable(&cp->napi); #endif /* init hw */ cas_lock_all_save(cp, flags); cas_clean_rings(cp); cas_init_hw(cp, !hw_was_up); cp->opened = 1; cas_unlock_all_restore(cp, flags); netif_start_queue(dev); mutex_unlock(&cp->pm_mutex); return 0; err_spare: cas_spare_free(cp); cas_free_rxds(cp); err_tx_tiny: cas_tx_tiny_free(cp); err_unlock: mutex_unlock(&cp->pm_mutex); return err; } static int cas_close(struct net_device *dev) { unsigned long flags; struct cas *cp = netdev_priv(dev); #ifdef USE_NAPI napi_disable(&cp->napi); #endif /* Make sure we don't get distracted by suspend/resume */ mutex_lock(&cp->pm_mutex); netif_stop_queue(dev); /* Stop traffic, mark us closed */ cas_lock_all_save(cp, flags); cp->opened = 0; cas_reset(cp, 0); cas_phy_init(cp); cas_begin_auto_negotiation(cp, NULL); cas_clean_rings(cp); cas_unlock_all_restore(cp, flags); free_irq(cp->pdev->irq, (void *) dev); cas_spare_free(cp); cas_free_rxds(cp); cas_tx_tiny_free(cp); mutex_unlock(&cp->pm_mutex); return 0; } static struct { const char name[ETH_GSTRING_LEN]; } ethtool_cassini_statnames[] = { {"collisions"}, {"rx_bytes"}, {"rx_crc_errors"}, {"rx_dropped"}, {"rx_errors"}, {"rx_fifo_errors"}, {"rx_frame_errors"}, {"rx_length_errors"}, {"rx_over_errors"}, {"rx_packets"}, {"tx_aborted_errors"}, {"tx_bytes"}, {"tx_dropped"}, {"tx_errors"}, {"tx_fifo_errors"}, {"tx_packets"} }; #define CAS_NUM_STAT_KEYS ARRAY_SIZE(ethtool_cassini_statnames) static struct { const int offsets; /* neg. values for 2nd arg to cas_read_phy */ } ethtool_register_table[] = { {-MII_BMSR}, {-MII_BMCR}, {REG_CAWR}, {REG_INF_BURST}, {REG_BIM_CFG}, {REG_RX_CFG}, {REG_HP_CFG}, {REG_MAC_TX_CFG}, {REG_MAC_RX_CFG}, {REG_MAC_CTRL_CFG}, {REG_MAC_XIF_CFG}, {REG_MIF_CFG}, {REG_PCS_CFG}, {REG_SATURN_PCFG}, {REG_PCS_MII_STATUS}, {REG_PCS_STATE_MACHINE}, {REG_MAC_COLL_EXCESS}, {REG_MAC_COLL_LATE} }; #define CAS_REG_LEN ARRAY_SIZE(ethtool_register_table) #define CAS_MAX_REGS (sizeof (u32)*CAS_REG_LEN) static void cas_read_regs(struct cas *cp, u8 *ptr, int len) { u8 *p; int i; unsigned long flags; spin_lock_irqsave(&cp->lock, flags); for (i = 0, p = ptr; i < len ; i ++, p += sizeof(u32)) { u16 hval; u32 val; if (ethtool_register_table[i].offsets < 0) { hval = cas_phy_read(cp, -ethtool_register_table[i].offsets); val = hval; } else { val= readl(cp->regs+ethtool_register_table[i].offsets); } memcpy(p, (u8 *)&val, sizeof(u32)); } spin_unlock_irqrestore(&cp->lock, flags); } static struct net_device_stats *cas_get_stats(struct net_device *dev) { struct cas *cp = netdev_priv(dev); struct net_device_stats *stats = cp->net_stats; unsigned long flags; int i; unsigned long tmp; /* we collate all of the stats into net_stats[N_TX_RING] */ if (!cp->hw_running) return stats + N_TX_RINGS; /* collect outstanding stats */ /* WTZ: the Cassini spec gives these as 16 bit counters but * stored in 32-bit words. Added a mask of 0xffff to be safe, * in case the chip somehow puts any garbage in the other bits. * Also, counter usage didn't seem to mach what Adrian did * in the parts of the code that set these quantities. Made * that consistent. */ spin_lock_irqsave(&cp->stat_lock[N_TX_RINGS], flags); stats[N_TX_RINGS].rx_crc_errors += readl(cp->regs + REG_MAC_FCS_ERR) & 0xffff; stats[N_TX_RINGS].rx_frame_errors += readl(cp->regs + REG_MAC_ALIGN_ERR) &0xffff; stats[N_TX_RINGS].rx_length_errors += readl(cp->regs + REG_MAC_LEN_ERR) & 0xffff; #if 1 tmp = (readl(cp->regs + REG_MAC_COLL_EXCESS) & 0xffff) + (readl(cp->regs + REG_MAC_COLL_LATE) & 0xffff); stats[N_TX_RINGS].tx_aborted_errors += tmp; stats[N_TX_RINGS].collisions += tmp + (readl(cp->regs + REG_MAC_COLL_NORMAL) & 0xffff); #else stats[N_TX_RINGS].tx_aborted_errors += readl(cp->regs + REG_MAC_COLL_EXCESS); stats[N_TX_RINGS].collisions += readl(cp->regs + REG_MAC_COLL_EXCESS) + readl(cp->regs + REG_MAC_COLL_LATE); #endif cas_clear_mac_err(cp); /* saved bits that are unique to ring 0 */ spin_lock(&cp->stat_lock[0]); stats[N_TX_RINGS].collisions += stats[0].collisions; stats[N_TX_RINGS].rx_over_errors += stats[0].rx_over_errors; stats[N_TX_RINGS].rx_frame_errors += stats[0].rx_frame_errors; stats[N_TX_RINGS].rx_fifo_errors += stats[0].rx_fifo_errors; stats[N_TX_RINGS].tx_aborted_errors += stats[0].tx_aborted_errors; stats[N_TX_RINGS].tx_fifo_errors += stats[0].tx_fifo_errors; spin_unlock(&cp->stat_lock[0]); for (i = 0; i < N_TX_RINGS; i++) { spin_lock(&cp->stat_lock[i]); stats[N_TX_RINGS].rx_length_errors += stats[i].rx_length_errors; stats[N_TX_RINGS].rx_crc_errors += stats[i].rx_crc_errors; stats[N_TX_RINGS].rx_packets += stats[i].rx_packets; stats[N_TX_RINGS].tx_packets += stats[i].tx_packets; stats[N_TX_RINGS].rx_bytes += stats[i].rx_bytes; stats[N_TX_RINGS].tx_bytes += stats[i].tx_bytes; stats[N_TX_RINGS].rx_errors += stats[i].rx_errors; stats[N_TX_RINGS].tx_errors += stats[i].tx_errors; stats[N_TX_RINGS].rx_dropped += stats[i].rx_dropped; stats[N_TX_RINGS].tx_dropped += stats[i].tx_dropped; memset(stats + i, 0, sizeof(struct net_device_stats)); spin_unlock(&cp->stat_lock[i]); } spin_unlock_irqrestore(&cp->stat_lock[N_TX_RINGS], flags); return stats + N_TX_RINGS; } static void cas_set_multicast(struct net_device *dev) { struct cas *cp = netdev_priv(dev); u32 rxcfg, rxcfg_new; unsigned long flags; int limit = STOP_TRIES; if (!cp->hw_running) return; spin_lock_irqsave(&cp->lock, flags); rxcfg = readl(cp->regs + REG_MAC_RX_CFG); /* disable RX MAC and wait for completion */ writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_EN) { if (!limit--) break; udelay(10); } /* disable hash filter and wait for completion */ limit = STOP_TRIES; rxcfg &= ~(MAC_RX_CFG_PROMISC_EN | MAC_RX_CFG_HASH_FILTER_EN); writel(rxcfg & ~MAC_RX_CFG_EN, cp->regs + REG_MAC_RX_CFG); while (readl(cp->regs + REG_MAC_RX_CFG) & MAC_RX_CFG_HASH_FILTER_EN) { if (!limit--) break; udelay(10); } /* program hash filters */ cp->mac_rx_cfg = rxcfg_new = cas_setup_multicast(cp); rxcfg |= rxcfg_new; writel(rxcfg, cp->regs + REG_MAC_RX_CFG); spin_unlock_irqrestore(&cp->lock, flags); } static void cas_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct cas *cp = netdev_priv(dev); strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version)); strlcpy(info->bus_info, pci_name(cp->pdev), sizeof(info->bus_info)); info->regdump_len = cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len : CAS_MAX_REGS; info->n_stats = CAS_NUM_STAT_KEYS; } static int cas_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct cas *cp = netdev_priv(dev); u16 bmcr; int full_duplex, speed, pause; unsigned long flags; enum link_state linkstate = link_up; cmd->advertising = 0; cmd->supported = SUPPORTED_Autoneg; if (cp->cas_flags & CAS_FLAG_1000MB_CAP) { cmd->supported |= SUPPORTED_1000baseT_Full; cmd->advertising |= ADVERTISED_1000baseT_Full; } /* Record PHY settings if HW is on. */ spin_lock_irqsave(&cp->lock, flags); bmcr = 0; linkstate = cp->lstate; if (CAS_PHY_MII(cp->phy_type)) { cmd->port = PORT_MII; cmd->transceiver = (cp->cas_flags & CAS_FLAG_SATURN) ? XCVR_INTERNAL : XCVR_EXTERNAL; cmd->phy_address = cp->phy_addr; cmd->advertising |= ADVERTISED_TP | ADVERTISED_MII | ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; cmd->supported |= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_TP | SUPPORTED_MII); if (cp->hw_running) { cas_mif_poll(cp, 0); bmcr = cas_phy_read(cp, MII_BMCR); cas_read_mii_link_mode(cp, &full_duplex, &speed, &pause); cas_mif_poll(cp, 1); } } else { cmd->port = PORT_FIBRE; cmd->transceiver = XCVR_INTERNAL; cmd->phy_address = 0; cmd->supported |= SUPPORTED_FIBRE; cmd->advertising |= ADVERTISED_FIBRE; if (cp->hw_running) { /* pcs uses the same bits as mii */ bmcr = readl(cp->regs + REG_PCS_MII_CTRL); cas_read_pcs_link_mode(cp, &full_duplex, &speed, &pause); } } spin_unlock_irqrestore(&cp->lock, flags); if (bmcr & BMCR_ANENABLE) { cmd->advertising |= ADVERTISED_Autoneg; cmd->autoneg = AUTONEG_ENABLE; ethtool_cmd_speed_set(cmd, ((speed == 10) ? SPEED_10 : ((speed == 1000) ? SPEED_1000 : SPEED_100))); cmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; } else { cmd->autoneg = AUTONEG_DISABLE; ethtool_cmd_speed_set(cmd, ((bmcr & CAS_BMCR_SPEED1000) ? SPEED_1000 : ((bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10))); cmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; } if (linkstate != link_up) { /* Force these to "unknown" if the link is not up and * autonogotiation in enabled. We can set the link * speed to 0, but not cmd->duplex, * because its legal values are 0 and 1. Ethtool will * print the value reported in parentheses after the * word "Unknown" for unrecognized values. * * If in forced mode, we report the speed and duplex * settings that we configured. */ if (cp->link_cntl & BMCR_ANENABLE) { ethtool_cmd_speed_set(cmd, 0); cmd->duplex = 0xff; } else { ethtool_cmd_speed_set(cmd, SPEED_10); if (cp->link_cntl & BMCR_SPEED100) { ethtool_cmd_speed_set(cmd, SPEED_100); } else if (cp->link_cntl & CAS_BMCR_SPEED1000) { ethtool_cmd_speed_set(cmd, SPEED_1000); } cmd->duplex = (cp->link_cntl & BMCR_FULLDPLX)? DUPLEX_FULL : DUPLEX_HALF; } } return 0; } static int cas_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct cas *cp = netdev_priv(dev); unsigned long flags; u32 speed = ethtool_cmd_speed(cmd); /* Verify the settings we care about. */ if (cmd->autoneg != AUTONEG_ENABLE && cmd->autoneg != AUTONEG_DISABLE) return -EINVAL; if (cmd->autoneg == AUTONEG_DISABLE && ((speed != SPEED_1000 && speed != SPEED_100 && speed != SPEED_10) || (cmd->duplex != DUPLEX_HALF && cmd->duplex != DUPLEX_FULL))) return -EINVAL; /* Apply settings and restart link process. */ spin_lock_irqsave(&cp->lock, flags); cas_begin_auto_negotiation(cp, cmd); spin_unlock_irqrestore(&cp->lock, flags); return 0; } static int cas_nway_reset(struct net_device *dev) { struct cas *cp = netdev_priv(dev); unsigned long flags; if ((cp->link_cntl & BMCR_ANENABLE) == 0) return -EINVAL; /* Restart link process. */ spin_lock_irqsave(&cp->lock, flags); cas_begin_auto_negotiation(cp, NULL); spin_unlock_irqrestore(&cp->lock, flags); return 0; } static u32 cas_get_link(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return cp->lstate == link_up; } static u32 cas_get_msglevel(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return cp->msg_enable; } static void cas_set_msglevel(struct net_device *dev, u32 value) { struct cas *cp = netdev_priv(dev); cp->msg_enable = value; } static int cas_get_regs_len(struct net_device *dev) { struct cas *cp = netdev_priv(dev); return cp->casreg_len < CAS_MAX_REGS ? cp->casreg_len: CAS_MAX_REGS; } static void cas_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *p) { struct cas *cp = netdev_priv(dev); regs->version = 0; /* cas_read_regs handles locks (cp->lock). */ cas_read_regs(cp, p, regs->len / sizeof(u32)); } static int cas_get_sset_count(struct net_device *dev, int sset) { switch (sset) { case ETH_SS_STATS: return CAS_NUM_STAT_KEYS; default: return -EOPNOTSUPP; } } static void cas_get_strings(struct net_device *dev, u32 stringset, u8 *data) { memcpy(data, &ethtool_cassini_statnames, CAS_NUM_STAT_KEYS * ETH_GSTRING_LEN); } static void cas_get_ethtool_stats(struct net_device *dev, struct ethtool_stats *estats, u64 *data) { struct cas *cp = netdev_priv(dev); struct net_device_stats *stats = cas_get_stats(cp->dev); int i = 0; data[i++] = stats->collisions; data[i++] = stats->rx_bytes; data[i++] = stats->rx_crc_errors; data[i++] = stats->rx_dropped; data[i++] = stats->rx_errors; data[i++] = stats->rx_fifo_errors; data[i++] = stats->rx_frame_errors; data[i++] = stats->rx_length_errors; data[i++] = stats->rx_over_errors; data[i++] = stats->rx_packets; data[i++] = stats->tx_aborted_errors; data[i++] = stats->tx_bytes; data[i++] = stats->tx_dropped; data[i++] = stats->tx_errors; data[i++] = stats->tx_fifo_errors; data[i++] = stats->tx_packets; BUG_ON(i != CAS_NUM_STAT_KEYS); } static const struct ethtool_ops cas_ethtool_ops = { .get_drvinfo = cas_get_drvinfo, .get_settings = cas_get_settings, .set_settings = cas_set_settings, .nway_reset = cas_nway_reset, .get_link = cas_get_link, .get_msglevel = cas_get_msglevel, .set_msglevel = cas_set_msglevel, .get_regs_len = cas_get_regs_len, .get_regs = cas_get_regs, .get_sset_count = cas_get_sset_count, .get_strings = cas_get_strings, .get_ethtool_stats = cas_get_ethtool_stats, }; static int cas_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct cas *cp = netdev_priv(dev); struct mii_ioctl_data *data = if_mii(ifr); unsigned long flags; int rc = -EOPNOTSUPP; /* Hold the PM mutex while doing ioctl's or we may collide * with open/close and power management and oops. */ mutex_lock(&cp->pm_mutex); switch (cmd) { case SIOCGMIIPHY: /* Get address of MII PHY in use. */ data->phy_id = cp->phy_addr; /* Fallthrough... */ case SIOCGMIIREG: /* Read MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); cas_mif_poll(cp, 0); data->val_out = cas_phy_read(cp, data->reg_num & 0x1f); cas_mif_poll(cp, 1); spin_unlock_irqrestore(&cp->lock, flags); rc = 0; break; case SIOCSMIIREG: /* Write MII PHY register. */ spin_lock_irqsave(&cp->lock, flags); cas_mif_poll(cp, 0); rc = cas_phy_write(cp, data->reg_num & 0x1f, data->val_in); cas_mif_poll(cp, 1); spin_unlock_irqrestore(&cp->lock, flags); break; default: break; } mutex_unlock(&cp->pm_mutex); return rc; } /* When this chip sits underneath an Intel 31154 bridge, it is the * only subordinate device and we can tweak the bridge settings to * reflect that fact. */ static void __devinit cas_program_bridge(struct pci_dev *cas_pdev) { struct pci_dev *pdev = cas_pdev->bus->self; u32 val; if (!pdev) return; if (pdev->vendor != 0x8086 || pdev->device != 0x537c) return; /* Clear bit 10 (Bus Parking Control) in the Secondary * Arbiter Control/Status Register which lives at offset * 0x41. Using a 32-bit word read/modify/write at 0x40 * is much simpler so that's how we do this. */ pci_read_config_dword(pdev, 0x40, &val); val &= ~0x00040000; pci_write_config_dword(pdev, 0x40, val); /* Max out the Multi-Transaction Timer settings since * Cassini is the only device present. * * The register is 16-bit and lives at 0x50. When the * settings are enabled, it extends the GRANT# signal * for a requestor after a transaction is complete. This * allows the next request to run without first needing * to negotiate the GRANT# signal back. * * Bits 12:10 define the grant duration: * * 1 -- 16 clocks * 2 -- 32 clocks * 3 -- 64 clocks * 4 -- 128 clocks * 5 -- 256 clocks * * All other values are illegal. * * Bits 09:00 define which REQ/GNT signal pairs get the * GRANT# signal treatment. We set them all. */ pci_write_config_word(pdev, 0x50, (5 << 10) | 0x3ff); /* The Read Prefecth Policy register is 16-bit and sits at * offset 0x52. It enables a "smart" pre-fetch policy. We * enable it and max out all of the settings since only one * device is sitting underneath and thus bandwidth sharing is * not an issue. * * The register has several 3 bit fields, which indicates a * multiplier applied to the base amount of prefetching the * chip would do. These fields are at: * * 15:13 --- ReRead Primary Bus * 12:10 --- FirstRead Primary Bus * 09:07 --- ReRead Secondary Bus * 06:04 --- FirstRead Secondary Bus * * Bits 03:00 control which REQ/GNT pairs the prefetch settings * get enabled on. Bit 3 is a grouped enabler which controls * all of the REQ/GNT pairs from [8:3]. Bits 2 to 0 control * the individual REQ/GNT pairs [2:0]. */ pci_write_config_word(pdev, 0x52, (0x7 << 13) | (0x7 << 10) | (0x7 << 7) | (0x7 << 4) | (0xf << 0)); /* Force cacheline size to 0x8 */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, 0x08); /* Force latency timer to maximum setting so Cassini can * sit on the bus as long as it likes. */ pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0xff); } static const struct net_device_ops cas_netdev_ops = { .ndo_open = cas_open, .ndo_stop = cas_close, .ndo_start_xmit = cas_start_xmit, .ndo_get_stats = cas_get_stats, .ndo_set_rx_mode = cas_set_multicast, .ndo_do_ioctl = cas_ioctl, .ndo_tx_timeout = cas_tx_timeout, .ndo_change_mtu = cas_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = cas_netpoll, #endif }; static int __devinit cas_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int cas_version_printed = 0; unsigned long casreg_len; struct net_device *dev; struct cas *cp; int i, err, pci_using_dac; u16 pci_cmd; u8 orig_cacheline_size = 0, cas_cacheline_size = 0; if (cas_version_printed++ == 0) pr_info("%s", version); err = pci_enable_device(pdev); if (err) { dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n"); return err; } if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { dev_err(&pdev->dev, "Cannot find proper PCI device " "base address, aborting\n"); err = -ENODEV; goto err_out_disable_pdev; } dev = alloc_etherdev(sizeof(*cp)); if (!dev) { err = -ENOMEM; goto err_out_disable_pdev; } SET_NETDEV_DEV(dev, &pdev->dev); err = pci_request_regions(pdev, dev->name); if (err) { dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n"); goto err_out_free_netdev; } pci_set_master(pdev); /* we must always turn on parity response or else parity * doesn't get generated properly. disable SERR/PERR as well. * in addition, we want to turn MWI on. */ pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd); pci_cmd &= ~PCI_COMMAND_SERR; pci_cmd |= PCI_COMMAND_PARITY; pci_write_config_word(pdev, PCI_COMMAND, pci_cmd); if (pci_try_set_mwi(pdev)) pr_warning("Could not enable MWI for %s\n", pci_name(pdev)); cas_program_bridge(pdev); /* * On some architectures, the default cache line size set * by pci_try_set_mwi reduces perforamnce. We have to increase * it for this case. To start, we'll print some configuration * data. */ #if 1 pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &orig_cacheline_size); if (orig_cacheline_size < CAS_PREF_CACHELINE_SIZE) { cas_cacheline_size = (CAS_PREF_CACHELINE_SIZE < SMP_CACHE_BYTES) ? CAS_PREF_CACHELINE_SIZE : SMP_CACHE_BYTES; if (pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, cas_cacheline_size)) { dev_err(&pdev->dev, "Could not set PCI cache " "line size\n"); goto err_write_cacheline; } } #endif /* Configure DMA attributes. */ if (!pci_set_dma_mask(pdev, DMA_BIT_MASK(64))) { pci_using_dac = 1; err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err < 0) { dev_err(&pdev->dev, "Unable to obtain 64-bit DMA " "for consistent allocations\n"); goto err_out_free_res; } } else { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { dev_err(&pdev->dev, "No usable DMA configuration, " "aborting\n"); goto err_out_free_res; } pci_using_dac = 0; } casreg_len = pci_resource_len(pdev, 0); cp = netdev_priv(dev); cp->pdev = pdev; #if 1 /* A value of 0 indicates we never explicitly set it */ cp->orig_cacheline_size = cas_cacheline_size ? orig_cacheline_size: 0; #endif cp->dev = dev; cp->msg_enable = (cassini_debug < 0) ? CAS_DEF_MSG_ENABLE : cassini_debug; #if defined(CONFIG_SPARC) cp->of_node = pci_device_to_OF_node(pdev); #endif cp->link_transition = LINK_TRANSITION_UNKNOWN; cp->link_transition_jiffies_valid = 0; spin_lock_init(&cp->lock); spin_lock_init(&cp->rx_inuse_lock); spin_lock_init(&cp->rx_spare_lock); for (i = 0; i < N_TX_RINGS; i++) { spin_lock_init(&cp->stat_lock[i]); spin_lock_init(&cp->tx_lock[i]); } spin_lock_init(&cp->stat_lock[N_TX_RINGS]); mutex_init(&cp->pm_mutex); init_timer(&cp->link_timer); cp->link_timer.function = cas_link_timer; cp->link_timer.data = (unsigned long) cp; #if 1 /* Just in case the implementation of atomic operations * change so that an explicit initialization is necessary. */ atomic_set(&cp->reset_task_pending, 0); atomic_set(&cp->reset_task_pending_all, 0); atomic_set(&cp->reset_task_pending_spare, 0); atomic_set(&cp->reset_task_pending_mtu, 0); #endif INIT_WORK(&cp->reset_task, cas_reset_task); /* Default link parameters */ if (link_mode >= 0 && link_mode < 6) cp->link_cntl = link_modes[link_mode]; else cp->link_cntl = BMCR_ANENABLE; cp->lstate = link_down; cp->link_transition = LINK_TRANSITION_LINK_DOWN; netif_carrier_off(cp->dev); cp->timer_ticks = 0; /* give us access to cassini registers */ cp->regs = pci_iomap(pdev, 0, casreg_len); if (!cp->regs) { dev_err(&pdev->dev, "Cannot map device registers, aborting\n"); goto err_out_free_res; } cp->casreg_len = casreg_len; pci_save_state(pdev); cas_check_pci_invariants(cp); cas_hard_reset(cp); cas_reset(cp, 0); if (cas_check_invariants(cp)) goto err_out_iounmap; if (cp->cas_flags & CAS_FLAG_SATURN) if (cas_saturn_firmware_init(cp)) goto err_out_iounmap; cp->init_block = (struct cas_init_block *) pci_alloc_consistent(pdev, sizeof(struct cas_init_block), &cp->block_dvma); if (!cp->init_block) { dev_err(&pdev->dev, "Cannot allocate init block, aborting\n"); goto err_out_iounmap; } for (i = 0; i < N_TX_RINGS; i++) cp->init_txds[i] = cp->init_block->txds[i]; for (i = 0; i < N_RX_DESC_RINGS; i++) cp->init_rxds[i] = cp->init_block->rxds[i]; for (i = 0; i < N_RX_COMP_RINGS; i++) cp->init_rxcs[i] = cp->init_block->rxcs[i]; for (i = 0; i < N_RX_FLOWS; i++) skb_queue_head_init(&cp->rx_flows[i]); dev->netdev_ops = &cas_netdev_ops; dev->ethtool_ops = &cas_ethtool_ops; dev->watchdog_timeo = CAS_TX_TIMEOUT; #ifdef USE_NAPI netif_napi_add(dev, &cp->napi, cas_poll, 64); #endif dev->irq = pdev->irq; dev->dma = 0; /* Cassini features. */ if ((cp->cas_flags & CAS_FLAG_NO_HW_CSUM) == 0) dev->features |= NETIF_F_HW_CSUM | NETIF_F_SG; if (pci_using_dac) dev->features |= NETIF_F_HIGHDMA; if (register_netdev(dev)) { dev_err(&pdev->dev, "Cannot register net device, aborting\n"); goto err_out_free_consistent; } i = readl(cp->regs + REG_BIM_CFG); netdev_info(dev, "Sun Cassini%s (%sbit/%sMHz PCI/%s) Ethernet[%d] %pM\n", (cp->cas_flags & CAS_FLAG_REG_PLUS) ? "+" : "", (i & BIM_CFG_32BIT) ? "32" : "64", (i & BIM_CFG_66MHZ) ? "66" : "33", (cp->phy_type == CAS_PHY_SERDES) ? "Fi" : "Cu", pdev->irq, dev->dev_addr); pci_set_drvdata(pdev, dev); cp->hw_running = 1; cas_entropy_reset(cp); cas_phy_init(cp); cas_begin_auto_negotiation(cp, NULL); return 0; err_out_free_consistent: pci_free_consistent(pdev, sizeof(struct cas_init_block), cp->init_block, cp->block_dvma); err_out_iounmap: mutex_lock(&cp->pm_mutex); if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); pci_iounmap(pdev, cp->regs); err_out_free_res: pci_release_regions(pdev); err_write_cacheline: /* Try to restore it in case the error occurred after we * set it. */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, orig_cacheline_size); err_out_free_netdev: free_netdev(dev); err_out_disable_pdev: pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); return -ENODEV; } static void __devexit cas_remove_one(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct cas *cp; if (!dev) return; cp = netdev_priv(dev); unregister_netdev(dev); if (cp->fw_data) vfree(cp->fw_data); mutex_lock(&cp->pm_mutex); cancel_work_sync(&cp->reset_task); if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); #if 1 if (cp->orig_cacheline_size) { /* Restore the cache line size if we had modified * it. */ pci_write_config_byte(pdev, PCI_CACHE_LINE_SIZE, cp->orig_cacheline_size); } #endif pci_free_consistent(pdev, sizeof(struct cas_init_block), cp->init_block, cp->block_dvma); pci_iounmap(pdev, cp->regs); free_netdev(dev); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } #ifdef CONFIG_PM static int cas_suspend(struct pci_dev *pdev, pm_message_t state) { struct net_device *dev = pci_get_drvdata(pdev); struct cas *cp = netdev_priv(dev); unsigned long flags; mutex_lock(&cp->pm_mutex); /* If the driver is opened, we stop the DMA */ if (cp->opened) { netif_device_detach(dev); cas_lock_all_save(cp, flags); /* We can set the second arg of cas_reset to 0 * because on resume, we'll call cas_init_hw with * its second arg set so that autonegotiation is * restarted. */ cas_reset(cp, 0); cas_clean_rings(cp); cas_unlock_all_restore(cp, flags); } if (cp->hw_running) cas_shutdown(cp); mutex_unlock(&cp->pm_mutex); return 0; } static int cas_resume(struct pci_dev *pdev) { struct net_device *dev = pci_get_drvdata(pdev); struct cas *cp = netdev_priv(dev); netdev_info(dev, "resuming\n"); mutex_lock(&cp->pm_mutex); cas_hard_reset(cp); if (cp->opened) { unsigned long flags; cas_lock_all_save(cp, flags); cas_reset(cp, 0); cp->hw_running = 1; cas_clean_rings(cp); cas_init_hw(cp, 1); cas_unlock_all_restore(cp, flags); netif_device_attach(dev); } mutex_unlock(&cp->pm_mutex); return 0; } #endif /* CONFIG_PM */ static struct pci_driver cas_driver = { .name = DRV_MODULE_NAME, .id_table = cas_pci_tbl, .probe = cas_init_one, .remove = __devexit_p(cas_remove_one), #ifdef CONFIG_PM .suspend = cas_suspend, .resume = cas_resume #endif }; static int __init cas_init(void) { if (linkdown_timeout > 0) link_transition_timeout = linkdown_timeout * HZ; else link_transition_timeout = 0; return pci_register_driver(&cas_driver); } static void __exit cas_cleanup(void) { pci_unregister_driver(&cas_driver); } module_init(cas_init); module_exit(cas_cleanup);
gpl-2.0
Euphoria-OS-Legacy/android_kernel_oneplus_msm8974
drivers/video/mbx/mbxfb.c
4896
26101
/* * linux/drivers/video/mbx/mbxfb.c * * Copyright (C) 2006-2007 8D Technologies inc * Raphael Assenat <raph@8d.com> * - Added video overlay support * - Various improvements * * Copyright (C) 2006 Compulab, Ltd. * Mike Rapoport <mike@compulab.co.il> * - Creation of driver * * Based on pxafb.c * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Intel 2700G (Marathon) Graphics Accelerator Frame Buffer Driver * */ #include <linux/delay.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/uaccess.h> #include <asm/io.h> #include <video/mbxfb.h> #include "regs.h" #include "reg_bits.h" static void __iomem *virt_base_2700; #define write_reg(val, reg) do { writel((val), (reg)); } while(0) /* Without this delay, the graphics appears somehow scaled and * there is a lot of jitter in scanlines. This delay is probably * needed only after setting some specific register(s) somewhere, * not all over the place... */ #define write_reg_dly(val, reg) do { writel((val), reg); udelay(1000); } while(0) #define MIN_XRES 16 #define MIN_YRES 16 #define MAX_XRES 2048 #define MAX_YRES 2048 #define MAX_PALETTES 16 /* FIXME: take care of different chip revisions with different sizes of ODFB */ #define MEMORY_OFFSET 0x60000 struct mbxfb_info { struct device *dev; struct resource *fb_res; struct resource *fb_req; struct resource *reg_res; struct resource *reg_req; void __iomem *fb_virt_addr; unsigned long fb_phys_addr; void __iomem *reg_virt_addr; unsigned long reg_phys_addr; int (*platform_probe) (struct fb_info * fb); int (*platform_remove) (struct fb_info * fb); u32 pseudo_palette[MAX_PALETTES]; #ifdef CONFIG_FB_MBX_DEBUG void *debugfs_data; #endif }; static struct fb_var_screeninfo mbxfb_default __devinitdata = { .xres = 640, .yres = 480, .xres_virtual = 640, .yres_virtual = 480, .bits_per_pixel = 16, .red = {11, 5, 0}, .green = {5, 6, 0}, .blue = {0, 5, 0}, .activate = FB_ACTIVATE_TEST, .height = -1, .width = -1, .pixclock = 40000, .left_margin = 48, .right_margin = 16, .upper_margin = 33, .lower_margin = 10, .hsync_len = 96, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED, .sync = FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, }; static struct fb_fix_screeninfo mbxfb_fix __devinitdata = { .id = "MBX", .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_TRUECOLOR, .xpanstep = 0, .ypanstep = 0, .ywrapstep = 0, .accel = FB_ACCEL_NONE, }; struct pixclock_div { u8 m; u8 n; u8 p; }; static unsigned int mbxfb_get_pixclock(unsigned int pixclock_ps, struct pixclock_div *div) { u8 m, n, p; unsigned int err = 0; unsigned int min_err = ~0x0; unsigned int clk; unsigned int best_clk = 0; unsigned int ref_clk = 13000; /* FIXME: take from platform data */ unsigned int pixclock; /* convert pixclock to KHz */ pixclock = PICOS2KHZ(pixclock_ps); /* PLL output freq = (ref_clk * M) / (N * 2^P) * * M: 1 to 63 * N: 1 to 7 * P: 0 to 7 */ /* RAPH: When N==1, the resulting pixel clock appears to * get divided by 2. Preventing N=1 by starting the following * loop at 2 prevents this. Is this a bug with my chip * revision or something I dont understand? */ for (m = 1; m < 64; m++) { for (n = 2; n < 8; n++) { for (p = 0; p < 8; p++) { clk = (ref_clk * m) / (n * (1 << p)); err = (clk > pixclock) ? (clk - pixclock) : (pixclock - clk); if (err < min_err) { min_err = err; best_clk = clk; div->m = m; div->n = n; div->p = p; } } } } return KHZ2PICOS(best_clk); } static int mbxfb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { u32 val, ret = 1; if (regno < MAX_PALETTES) { u32 *pal = info->pseudo_palette; val = (red & 0xf800) | ((green & 0xfc00) >> 5) | ((blue & 0xf800) >> 11); pal[regno] = val; ret = 0; } return ret; } static int mbxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct pixclock_div div; var->pixclock = mbxfb_get_pixclock(var->pixclock, &div); if (var->xres < MIN_XRES) var->xres = MIN_XRES; if (var->yres < MIN_YRES) var->yres = MIN_YRES; if (var->xres > MAX_XRES) return -EINVAL; if (var->yres > MAX_YRES) return -EINVAL; var->xres_virtual = max(var->xres_virtual, var->xres); var->yres_virtual = max(var->yres_virtual, var->yres); switch (var->bits_per_pixel) { /* 8 bits-per-pixel is not supported yet */ case 8: return -EINVAL; case 16: var->green.length = (var->green.length == 5) ? 5 : 6; var->red.length = 5; var->blue.length = 5; var->transp.length = 6 - var->green.length; var->blue.offset = 0; var->green.offset = 5; var->red.offset = 5 + var->green.length; var->transp.offset = (5 + var->red.offset) & 15; break; case 24: /* RGB 888 */ case 32: /* RGBA 8888 */ var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.length = var->bits_per_pixel - 24; var->transp.offset = (var->transp.length) ? 24 : 0; break; } var->red.msb_right = 0; var->green.msb_right = 0; var->blue.msb_right = 0; var->transp.msb_right = 0; return 0; } static int mbxfb_set_par(struct fb_info *info) { struct fb_var_screeninfo *var = &info->var; struct pixclock_div div; ushort hbps, ht, hfps, has; ushort vbps, vt, vfps, vas; u32 gsctrl = readl(GSCTRL); u32 gsadr = readl(GSADR); info->fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; /* setup color mode */ gsctrl &= ~(FMsk(GSCTRL_GPIXFMT)); /* FIXME: add *WORKING* support for 8-bits per color */ if (info->var.bits_per_pixel == 8) { return -EINVAL; } else { fb_dealloc_cmap(&info->cmap); gsctrl &= ~GSCTRL_LUT_EN; info->fix.visual = FB_VISUAL_TRUECOLOR; switch (info->var.bits_per_pixel) { case 16: if (info->var.green.length == 5) gsctrl |= GSCTRL_GPIXFMT_ARGB1555; else gsctrl |= GSCTRL_GPIXFMT_RGB565; break; case 24: gsctrl |= GSCTRL_GPIXFMT_RGB888; break; case 32: gsctrl |= GSCTRL_GPIXFMT_ARGB8888; break; } } /* setup resolution */ gsctrl &= ~(FMsk(GSCTRL_GSWIDTH) | FMsk(GSCTRL_GSHEIGHT)); gsctrl |= Gsctrl_Width(info->var.xres) | Gsctrl_Height(info->var.yres); write_reg_dly(gsctrl, GSCTRL); gsadr &= ~(FMsk(GSADR_SRCSTRIDE)); gsadr |= Gsadr_Srcstride(info->var.xres * info->var.bits_per_pixel / (8 * 16) - 1); write_reg_dly(gsadr, GSADR); /* setup timings */ var->pixclock = mbxfb_get_pixclock(info->var.pixclock, &div); write_reg_dly((Disp_Pll_M(div.m) | Disp_Pll_N(div.n) | Disp_Pll_P(div.p) | DISP_PLL_EN), DISPPLL); hbps = var->hsync_len; has = hbps + var->left_margin; hfps = has + var->xres; ht = hfps + var->right_margin; vbps = var->vsync_len; vas = vbps + var->upper_margin; vfps = vas + var->yres; vt = vfps + var->lower_margin; write_reg_dly((Dht01_Hbps(hbps) | Dht01_Ht(ht)), DHT01); write_reg_dly((Dht02_Hlbs(has) | Dht02_Has(has)), DHT02); write_reg_dly((Dht03_Hfps(hfps) | Dht03_Hrbs(hfps)), DHT03); write_reg_dly((Dhdet_Hdes(has) | Dhdet_Hdef(hfps)), DHDET); write_reg_dly((Dvt01_Vbps(vbps) | Dvt01_Vt(vt)), DVT01); write_reg_dly((Dvt02_Vtbs(vas) | Dvt02_Vas(vas)), DVT02); write_reg_dly((Dvt03_Vfps(vfps) | Dvt03_Vbbs(vfps)), DVT03); write_reg_dly((Dvdet_Vdes(vas) | Dvdet_Vdef(vfps)), DVDET); write_reg_dly((Dvectrl_Vevent(vfps) | Dvectrl_Vfetch(vbps)), DVECTRL); write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); write_reg_dly(DINTRE_VEVENT0_EN, DINTRE); return 0; } static int mbxfb_blank(int blank, struct fb_info *info) { switch (blank) { case FB_BLANK_POWERDOWN: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_NORMAL: write_reg_dly((readl(DSCTRL) & ~DSCTRL_SYNCGEN_EN), DSCTRL); write_reg_dly((readl(PIXCLK) & ~PIXCLK_EN), PIXCLK); write_reg_dly((readl(VOVRCLK) & ~VOVRCLK_EN), VOVRCLK); break; case FB_BLANK_UNBLANK: write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); write_reg_dly((readl(PIXCLK) | PIXCLK_EN), PIXCLK); break; } return 0; } static int mbxfb_setupOverlay(struct mbxfb_overlaySetup *set) { u32 vsctrl, vscadr, vsadr; u32 sssize, spoctrl, shctrl; u32 vubase, vvbase; u32 vovrclk; if (set->scaled_width==0 || set->scaled_height==0) return -EINVAL; /* read registers which have reserved bits * so we can write them back as-is. */ vovrclk = readl(VOVRCLK); vsctrl = readl(VSCTRL); vscadr = readl(VSCADR); vubase = readl(VUBASE); vvbase = readl(VVBASE); shctrl = readl(SHCTRL); spoctrl = readl(SPOCTRL); sssize = readl(SSSIZE); vsctrl &= ~( FMsk(VSCTRL_VSWIDTH) | FMsk(VSCTRL_VSHEIGHT) | FMsk(VSCTRL_VPIXFMT) | VSCTRL_GAMMA_EN | VSCTRL_CSC_EN | VSCTRL_COSITED ); vsctrl |= Vsctrl_Width(set->width) | Vsctrl_Height(set->height) | VSCTRL_CSC_EN; vscadr &= ~(VSCADR_STR_EN | FMsk(VSCADR_VBASE_ADR) ); vubase &= ~(VUBASE_UVHALFSTR | FMsk(VUBASE_UBASE_ADR)); vvbase &= ~(FMsk(VVBASE_VBASE_ADR)); switch (set->fmt) { case MBXFB_FMT_YUV16: vsctrl |= VSCTRL_VPIXFMT_YUV12; set->Y_stride = ((set->width) + 0xf ) & ~0xf; break; case MBXFB_FMT_YUV12: vsctrl |= VSCTRL_VPIXFMT_YUV12; set->Y_stride = ((set->width) + 0xf ) & ~0xf; vubase |= VUBASE_UVHALFSTR; break; case MBXFB_FMT_UY0VY1: vsctrl |= VSCTRL_VPIXFMT_UY0VY1; set->Y_stride = (set->width*2 + 0xf ) & ~0xf; break; case MBXFB_FMT_VY0UY1: vsctrl |= VSCTRL_VPIXFMT_VY0UY1; set->Y_stride = (set->width*2 + 0xf ) & ~0xf; break; case MBXFB_FMT_Y0UY1V: vsctrl |= VSCTRL_VPIXFMT_Y0UY1V; set->Y_stride = (set->width*2 + 0xf ) & ~0xf; break; case MBXFB_FMT_Y0VY1U: vsctrl |= VSCTRL_VPIXFMT_Y0VY1U; set->Y_stride = (set->width*2 + 0xf ) & ~0xf; break; default: return -EINVAL; } /* VSCTRL has the bits which sets the Video Pixel Format. * When passing from a packed to planar format, * if we write VSCTRL first, VVBASE and VUBASE would * be zero if we would not set them here. (And then, * the chips hangs and only a reset seems to fix it). * * If course, the values calculated here have no meaning * for packed formats. */ set->UV_stride = ((set->width/2) + 0x7 ) & ~0x7; set->U_offset = set->height * set->Y_stride; set->V_offset = set->U_offset + set->height * set->UV_stride; vubase |= Vubase_Ubase_Adr( (0x60000 + set->mem_offset + set->U_offset)>>3); vvbase |= Vvbase_Vbase_Adr( (0x60000 + set->mem_offset + set->V_offset)>>3); vscadr |= Vscadr_Vbase_Adr((0x60000 + set->mem_offset)>>4); if (set->enable) vscadr |= VSCADR_STR_EN; vsadr = Vsadr_Srcstride((set->Y_stride)/16-1) | Vsadr_Xstart(set->x) | Vsadr_Ystart(set->y); sssize &= ~(FMsk(SSSIZE_SC_WIDTH) | FMsk(SSSIZE_SC_HEIGHT)); sssize = Sssize_Sc_Width(set->scaled_width-1) | Sssize_Sc_Height(set->scaled_height-1); spoctrl &= ~(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | SPOCTRL_HV_SC_OR | SPOCTRL_VS_UR_C | FMsk(SPOCTRL_VPITCH)); spoctrl |= Spoctrl_Vpitch((set->height<<11)/set->scaled_height); /* Bypass horiz/vert scaler when same size */ if (set->scaled_width == set->width) spoctrl |= SPOCTRL_H_SC_BP; if (set->scaled_height == set->height) spoctrl |= SPOCTRL_V_SC_BP; shctrl &= ~(FMsk(SHCTRL_HPITCH) | SHCTRL_HDECIM); shctrl |= Shctrl_Hpitch((set->width<<11)/set->scaled_width); /* Video plane registers */ write_reg(vsctrl, VSCTRL); write_reg(vscadr, VSCADR); write_reg(vubase, VUBASE); write_reg(vvbase, VVBASE); write_reg(vsadr, VSADR); /* Video scaler registers */ write_reg(sssize, SSSIZE); write_reg(spoctrl, SPOCTRL); write_reg(shctrl, SHCTRL); /* Clock */ if (set->enable) vovrclk |= 1; else vovrclk &= ~1; write_reg(vovrclk, VOVRCLK); return 0; } static int mbxfb_ioctl_planeorder(struct mbxfb_planeorder *porder) { unsigned long gscadr, vscadr; if (porder->bottom == porder->top) return -EINVAL; gscadr = readl(GSCADR); vscadr = readl(VSCADR); gscadr &= ~(FMsk(GSCADR_BLEND_POS)); vscadr &= ~(FMsk(VSCADR_BLEND_POS)); switch (porder->bottom) { case MBXFB_PLANE_GRAPHICS: gscadr |= GSCADR_BLEND_GFX; break; case MBXFB_PLANE_VIDEO: vscadr |= VSCADR_BLEND_GFX; break; default: return -EINVAL; } switch (porder->top) { case MBXFB_PLANE_GRAPHICS: gscadr |= GSCADR_BLEND_VID; break; case MBXFB_PLANE_VIDEO: vscadr |= GSCADR_BLEND_VID; break; default: return -EINVAL; } write_reg_dly(vscadr, VSCADR); write_reg_dly(gscadr, GSCADR); return 0; } static int mbxfb_ioctl_alphactl(struct mbxfb_alphaCtl *alpha) { unsigned long vscadr, vbbase, vcmsk; unsigned long gscadr, gbbase, gdrctrl; vbbase = Vbbase_Glalpha(alpha->overlay_global_alpha) | Vbbase_Colkey(alpha->overlay_colorkey); gbbase = Gbbase_Glalpha(alpha->graphics_global_alpha) | Gbbase_Colkey(alpha->graphics_colorkey); vcmsk = readl(VCMSK); vcmsk &= ~(FMsk(VCMSK_COLKEY_M)); vcmsk |= Vcmsk_colkey_m(alpha->overlay_colorkey_mask); gdrctrl = readl(GDRCTRL); gdrctrl &= ~(FMsk(GDRCTRL_COLKEYM)); gdrctrl |= Gdrctrl_Colkeym(alpha->graphics_colorkey_mask); vscadr = readl(VSCADR); vscadr &= ~(FMsk(VSCADR_BLEND_M) | VSCADR_COLKEYSRC | VSCADR_COLKEY_EN); gscadr = readl(GSCADR); gscadr &= ~(FMsk(GSCADR_BLEND_M) | GSCADR_COLKEY_EN | GSCADR_COLKEYSRC); switch (alpha->overlay_colorkey_mode) { case MBXFB_COLORKEY_DISABLED: break; case MBXFB_COLORKEY_PREVIOUS: vscadr |= VSCADR_COLKEY_EN; break; case MBXFB_COLORKEY_CURRENT: vscadr |= VSCADR_COLKEY_EN | VSCADR_COLKEYSRC; break; default: return -EINVAL; } switch (alpha->overlay_blend_mode) { case MBXFB_ALPHABLEND_NONE: vscadr |= VSCADR_BLEND_NONE; break; case MBXFB_ALPHABLEND_GLOBAL: vscadr |= VSCADR_BLEND_GLOB; break; case MBXFB_ALPHABLEND_PIXEL: vscadr |= VSCADR_BLEND_PIX; break; default: return -EINVAL; } switch (alpha->graphics_colorkey_mode) { case MBXFB_COLORKEY_DISABLED: break; case MBXFB_COLORKEY_PREVIOUS: gscadr |= GSCADR_COLKEY_EN; break; case MBXFB_COLORKEY_CURRENT: gscadr |= GSCADR_COLKEY_EN | GSCADR_COLKEYSRC; break; default: return -EINVAL; } switch (alpha->graphics_blend_mode) { case MBXFB_ALPHABLEND_NONE: gscadr |= GSCADR_BLEND_NONE; break; case MBXFB_ALPHABLEND_GLOBAL: gscadr |= GSCADR_BLEND_GLOB; break; case MBXFB_ALPHABLEND_PIXEL: gscadr |= GSCADR_BLEND_PIX; break; default: return -EINVAL; } write_reg_dly(vbbase, VBBASE); write_reg_dly(gbbase, GBBASE); write_reg_dly(vcmsk, VCMSK); write_reg_dly(gdrctrl, GDRCTRL); write_reg_dly(gscadr, GSCADR); write_reg_dly(vscadr, VSCADR); return 0; } static int mbxfb_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { struct mbxfb_overlaySetup setup; struct mbxfb_planeorder porder; struct mbxfb_alphaCtl alpha; struct mbxfb_reg reg; int res; __u32 tmp; switch (cmd) { case MBXFB_IOCX_OVERLAY: if (copy_from_user(&setup, (void __user*)arg, sizeof(struct mbxfb_overlaySetup))) return -EFAULT; res = mbxfb_setupOverlay(&setup); if (res) return res; if (copy_to_user((void __user*)arg, &setup, sizeof(struct mbxfb_overlaySetup))) return -EFAULT; return 0; case MBXFB_IOCS_PLANEORDER: if (copy_from_user(&porder, (void __user*)arg, sizeof(struct mbxfb_planeorder))) return -EFAULT; return mbxfb_ioctl_planeorder(&porder); case MBXFB_IOCS_ALPHA: if (copy_from_user(&alpha, (void __user*)arg, sizeof(struct mbxfb_alphaCtl))) return -EFAULT; return mbxfb_ioctl_alphactl(&alpha); case MBXFB_IOCS_REG: if (copy_from_user(&reg, (void __user*)arg, sizeof(struct mbxfb_reg))) return -EFAULT; if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */ return -EINVAL; tmp = readl(virt_base_2700 + reg.addr); tmp &= ~reg.mask; tmp |= reg.val & reg.mask; writel(tmp, virt_base_2700 + reg.addr); return 0; case MBXFB_IOCX_REG: if (copy_from_user(&reg, (void __user*)arg, sizeof(struct mbxfb_reg))) return -EFAULT; if (reg.addr >= 0x10000) /* regs are from 0x3fe0000 to 0x3feffff */ return -EINVAL; reg.val = readl(virt_base_2700 + reg.addr); if (copy_to_user((void __user*)arg, &reg, sizeof(struct mbxfb_reg))) return -EFAULT; return 0; } return -EINVAL; } static struct fb_ops mbxfb_ops = { .owner = THIS_MODULE, .fb_check_var = mbxfb_check_var, .fb_set_par = mbxfb_set_par, .fb_setcolreg = mbxfb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_blank = mbxfb_blank, .fb_ioctl = mbxfb_ioctl, }; /* Enable external SDRAM controller. Assume that all clocks are active by now. */ static void __devinit setup_memc(struct fb_info *fbi) { unsigned long tmp; int i; /* FIXME: use platform specific parameters */ /* setup SDRAM controller */ write_reg_dly((LMCFG_LMC_DS | LMCFG_LMC_TS | LMCFG_LMD_TS | LMCFG_LMA_TS), LMCFG); write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR); /* setup SDRAM timings */ write_reg_dly((Lmtim_Tras(7) | Lmtim_Trp(3) | Lmtim_Trcd(3) | Lmtim_Trc(9) | Lmtim_Tdpl(2)), LMTIM); /* setup SDRAM refresh rate */ write_reg_dly(0xc2b, LMREFRESH); /* setup SDRAM type parameters */ write_reg_dly((LMTYPE_CASLAT_3 | LMTYPE_BKSZ_2 | LMTYPE_ROWSZ_11 | LMTYPE_COLSZ_8), LMTYPE); /* enable memory controller */ write_reg_dly(LMPWR_MC_PWR_ACT, LMPWR); /* perform dummy reads */ for ( i = 0; i < 16; i++ ) { tmp = readl(fbi->screen_base); } } static void enable_clocks(struct fb_info *fbi) { /* enable clocks */ write_reg_dly(SYSCLKSRC_PLL_2, SYSCLKSRC); write_reg_dly(PIXCLKSRC_PLL_1, PIXCLKSRC); write_reg_dly(0x00000000, CLKSLEEP); /* PLL output = (Frefclk * M) / (N * 2^P ) * * M: 0x17, N: 0x3, P: 0x0 == 100 Mhz! * M: 0xb, N: 0x1, P: 0x1 == 71 Mhz * */ write_reg_dly((Core_Pll_M(0xb) | Core_Pll_N(0x1) | Core_Pll_P(0x1) | CORE_PLL_EN), COREPLL); write_reg_dly((Disp_Pll_M(0x1b) | Disp_Pll_N(0x7) | Disp_Pll_P(0x1) | DISP_PLL_EN), DISPPLL); write_reg_dly(0x00000000, VOVRCLK); write_reg_dly(PIXCLK_EN, PIXCLK); write_reg_dly(MEMCLK_EN, MEMCLK); write_reg_dly(0x00000001, M24CLK); write_reg_dly(0x00000001, MBXCLK); write_reg_dly(SDCLK_EN, SDCLK); write_reg_dly(0x00000001, PIXCLKDIV); } static void __devinit setup_graphics(struct fb_info *fbi) { unsigned long gsctrl; unsigned long vscadr; gsctrl = GSCTRL_GAMMA_EN | Gsctrl_Width(fbi->var.xres) | Gsctrl_Height(fbi->var.yres); switch (fbi->var.bits_per_pixel) { case 16: if (fbi->var.green.length == 5) gsctrl |= GSCTRL_GPIXFMT_ARGB1555; else gsctrl |= GSCTRL_GPIXFMT_RGB565; break; case 24: gsctrl |= GSCTRL_GPIXFMT_RGB888; break; case 32: gsctrl |= GSCTRL_GPIXFMT_ARGB8888; break; } write_reg_dly(gsctrl, GSCTRL); write_reg_dly(0x00000000, GBBASE); write_reg_dly(0x00ffffff, GDRCTRL); write_reg_dly((GSCADR_STR_EN | Gscadr_Gbase_Adr(0x6000)), GSCADR); write_reg_dly(0x00000000, GPLUT); vscadr = readl(VSCADR); vscadr &= ~(FMsk(VSCADR_BLEND_POS) | FMsk(VSCADR_BLEND_M)); vscadr |= VSCADR_BLEND_VID | VSCADR_BLEND_NONE; write_reg_dly(vscadr, VSCADR); } static void __devinit setup_display(struct fb_info *fbi) { unsigned long dsctrl = 0; dsctrl = DSCTRL_BLNK_POL; if (fbi->var.sync & FB_SYNC_HOR_HIGH_ACT) dsctrl |= DSCTRL_HS_POL; if (fbi->var.sync & FB_SYNC_VERT_HIGH_ACT) dsctrl |= DSCTRL_VS_POL; write_reg_dly(dsctrl, DSCTRL); write_reg_dly(0xd0303010, DMCTRL); write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); } static void __devinit enable_controller(struct fb_info *fbi) { u32 svctrl, shctrl; write_reg_dly(SYSRST_RST, SYSRST); /* setup a timeout, raise drive strength */ write_reg_dly(0xffffff0c, SYSCFG); enable_clocks(fbi); setup_memc(fbi); setup_graphics(fbi); setup_display(fbi); shctrl = readl(SHCTRL); shctrl &= ~(FMsk(SHCTRL_HINITIAL)); shctrl |= Shctrl_Hinitial(4<<11); writel(shctrl, SHCTRL); svctrl = Svctrl_Initial1(1<<10) | Svctrl_Initial2(1<<10); writel(svctrl, SVCTRL); writel(SPOCTRL_H_SC_BP | SPOCTRL_V_SC_BP | SPOCTRL_VORDER_4TAP , SPOCTRL); /* Those coefficients are good for scaling up. For scaling * down, the application has to calculate them. */ write_reg(0xff000100, VSCOEFF0); write_reg(0xfdfcfdfe, VSCOEFF1); write_reg(0x170d0500, VSCOEFF2); write_reg(0x3d372d22, VSCOEFF3); write_reg(0x00000040, VSCOEFF4); write_reg(0xff010100, HSCOEFF0); write_reg(0x00000000, HSCOEFF1); write_reg(0x02010000, HSCOEFF2); write_reg(0x01020302, HSCOEFF3); write_reg(0xf9fbfe00, HSCOEFF4); write_reg(0xfbf7f6f7, HSCOEFF5); write_reg(0x1c110700, HSCOEFF6); write_reg(0x3e393127, HSCOEFF7); write_reg(0x00000040, HSCOEFF8); } #ifdef CONFIG_PM /* * Power management hooks. Note that we won't be called from IRQ context, * unlike the blank functions above, so we may sleep. */ static int mbxfb_suspend(struct platform_device *dev, pm_message_t state) { /* make frame buffer memory enter self-refresh mode */ write_reg_dly(LMPWR_MC_PWR_SRM, LMPWR); while (readl(LMPWRSTAT) != LMPWRSTAT_MC_PWR_SRM) ; /* empty statement */ /* reset the device, since it's initial state is 'mostly sleeping' */ write_reg_dly(SYSRST_RST, SYSRST); return 0; } static int mbxfb_resume(struct platform_device *dev) { struct fb_info *fbi = platform_get_drvdata(dev); enable_clocks(fbi); /* setup_graphics(fbi); */ /* setup_display(fbi); */ write_reg_dly((readl(DSCTRL) | DSCTRL_SYNCGEN_EN), DSCTRL); return 0; } #else #define mbxfb_suspend NULL #define mbxfb_resume NULL #endif /* debugfs entries */ #ifndef CONFIG_FB_MBX_DEBUG #define mbxfb_debugfs_init(x) do {} while(0) #define mbxfb_debugfs_remove(x) do {} while(0) #endif #define res_size(_r) (((_r)->end - (_r)->start) + 1) static int __devinit mbxfb_probe(struct platform_device *dev) { int ret; struct fb_info *fbi; struct mbxfb_info *mfbi; struct mbxfb_platform_data *pdata; dev_dbg(&dev->dev, "mbxfb_probe\n"); pdata = dev->dev.platform_data; if (!pdata) { dev_err(&dev->dev, "platform data is required\n"); return -EINVAL; } fbi = framebuffer_alloc(sizeof(struct mbxfb_info), &dev->dev); if (fbi == NULL) { dev_err(&dev->dev, "framebuffer_alloc failed\n"); return -ENOMEM; } mfbi = fbi->par; fbi->pseudo_palette = mfbi->pseudo_palette; if (pdata->probe) mfbi->platform_probe = pdata->probe; if (pdata->remove) mfbi->platform_remove = pdata->remove; mfbi->fb_res = platform_get_resource(dev, IORESOURCE_MEM, 0); mfbi->reg_res = platform_get_resource(dev, IORESOURCE_MEM, 1); if (!mfbi->fb_res || !mfbi->reg_res) { dev_err(&dev->dev, "no resources found\n"); ret = -ENODEV; goto err1; } mfbi->fb_req = request_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res), dev->name); if (mfbi->fb_req == NULL) { dev_err(&dev->dev, "failed to claim framebuffer memory\n"); ret = -EINVAL; goto err1; } mfbi->fb_phys_addr = mfbi->fb_res->start; mfbi->reg_req = request_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res), dev->name); if (mfbi->reg_req == NULL) { dev_err(&dev->dev, "failed to claim Marathon registers\n"); ret = -EINVAL; goto err2; } mfbi->reg_phys_addr = mfbi->reg_res->start; mfbi->reg_virt_addr = ioremap_nocache(mfbi->reg_phys_addr, res_size(mfbi->reg_req)); if (!mfbi->reg_virt_addr) { dev_err(&dev->dev, "failed to ioremap Marathon registers\n"); ret = -EINVAL; goto err3; } virt_base_2700 = mfbi->reg_virt_addr; mfbi->fb_virt_addr = ioremap_nocache(mfbi->fb_phys_addr, res_size(mfbi->fb_req)); if (!mfbi->reg_virt_addr) { dev_err(&dev->dev, "failed to ioremap frame buffer\n"); ret = -EINVAL; goto err4; } fbi->screen_base = (char __iomem *)(mfbi->fb_virt_addr + 0x60000); fbi->screen_size = pdata->memsize; fbi->fbops = &mbxfb_ops; fbi->var = mbxfb_default; fbi->fix = mbxfb_fix; fbi->fix.smem_start = mfbi->fb_phys_addr + 0x60000; fbi->fix.smem_len = pdata->memsize; fbi->fix.line_length = mbxfb_default.xres_virtual * mbxfb_default.bits_per_pixel / 8; ret = fb_alloc_cmap(&fbi->cmap, 256, 0); if (ret < 0) { dev_err(&dev->dev, "fb_alloc_cmap failed\n"); ret = -EINVAL; goto err5; } platform_set_drvdata(dev, fbi); printk(KERN_INFO "fb%d: mbx frame buffer device\n", fbi->node); if (mfbi->platform_probe) mfbi->platform_probe(fbi); enable_controller(fbi); mbxfb_debugfs_init(fbi); ret = register_framebuffer(fbi); if (ret < 0) { dev_err(&dev->dev, "register_framebuffer failed\n"); ret = -EINVAL; goto err6; } return 0; err6: fb_dealloc_cmap(&fbi->cmap); err5: iounmap(mfbi->fb_virt_addr); err4: iounmap(mfbi->reg_virt_addr); err3: release_mem_region(mfbi->reg_res->start, res_size(mfbi->reg_res)); err2: release_mem_region(mfbi->fb_res->start, res_size(mfbi->fb_res)); err1: framebuffer_release(fbi); return ret; } static int __devexit mbxfb_remove(struct platform_device *dev) { struct fb_info *fbi = platform_get_drvdata(dev); write_reg_dly(SYSRST_RST, SYSRST); mbxfb_debugfs_remove(fbi); if (fbi) { struct mbxfb_info *mfbi = fbi->par; unregister_framebuffer(fbi); if (mfbi) { if (mfbi->platform_remove) mfbi->platform_remove(fbi); if (mfbi->fb_virt_addr) iounmap(mfbi->fb_virt_addr); if (mfbi->reg_virt_addr) iounmap(mfbi->reg_virt_addr); if (mfbi->reg_req) release_mem_region(mfbi->reg_req->start, res_size(mfbi->reg_req)); if (mfbi->fb_req) release_mem_region(mfbi->fb_req->start, res_size(mfbi->fb_req)); } framebuffer_release(fbi); } return 0; } static struct platform_driver mbxfb_driver = { .probe = mbxfb_probe, .remove = mbxfb_remove, .suspend = mbxfb_suspend, .resume = mbxfb_resume, .driver = { .name = "mbx-fb", }, }; module_platform_driver(mbxfb_driver); MODULE_DESCRIPTION("loadable framebuffer driver for Marathon device"); MODULE_AUTHOR("Mike Rapoport, Compulab"); MODULE_LICENSE("GPL");
gpl-2.0
kallaballa/linux-sunxi
drivers/staging/comedi/drivers/adl_pci8164.c
4896
11579
/* comedi/drivers/adl_pci8164.c Hardware comedi driver fot PCI-8164 Adlink card Copyright (C) 2004 Michel Lachine <mike@mikelachaine.ca> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: adl_pci8164 Description: Driver for the Adlink PCI-8164 4 Axes Motion Control board Devices: [ADLink] PCI-8164 (adl_pci8164) Author: Michel Lachaine <mike@mikelachaine.ca> Status: experimental Updated: Mon, 14 Apr 2008 15:10:32 +0100 Configuration Options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ #include "../comedidev.h" #include <linux/kernel.h> #include <linux/delay.h> #include "comedi_fc.h" #include "comedi_pci.h" #include "8253.h" #define PCI8164_AXIS_X 0x00 #define PCI8164_AXIS_Y 0x08 #define PCI8164_AXIS_Z 0x10 #define PCI8164_AXIS_U 0x18 #define PCI8164_MSTS 0x00 #define PCI8164_SSTS 0x02 #define PCI8164_BUF0 0x04 #define PCI8164_BUF1 0x06 #define PCI8164_CMD 0x00 #define PCI8164_OTP 0x02 #define PCI_DEVICE_ID_PCI8164 0x8164 static DEFINE_PCI_DEVICE_TABLE(adl_pci8164_pci_table) = { { PCI_DEVICE(PCI_VENDOR_ID_ADLINK, PCI_DEVICE_ID_PCI8164) }, {0} }; MODULE_DEVICE_TABLE(pci, adl_pci8164_pci_table); struct adl_pci8164_private { int data; struct pci_dev *pci_dev; }; #define devpriv ((struct adl_pci8164_private *)dev->private) static int adl_pci8164_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int adl_pci8164_detach(struct comedi_device *dev); static struct comedi_driver driver_adl_pci8164 = { .driver_name = "adl_pci8164", .module = THIS_MODULE, .attach = adl_pci8164_attach, .detach = adl_pci8164_detach, }; static int adl_pci8164_insn_read_msts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_ssts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_read_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_cmd(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_otp(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_insn_write_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data); static int adl_pci8164_attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct pci_dev *pcidev = NULL; struct comedi_subdevice *s; int bus, slot; printk(KERN_INFO "comedi: attempt to attach...\n"); printk(KERN_INFO "comedi%d: adl_pci8164\n", dev->minor); dev->board_name = "pci8164"; bus = it->options[0]; slot = it->options[1]; if (alloc_private(dev, sizeof(struct adl_pci8164_private)) < 0) return -ENOMEM; if (alloc_subdevices(dev, 4) < 0) return -ENOMEM; for_each_pci_dev(pcidev) { if (pcidev->vendor == PCI_VENDOR_ID_ADLINK && pcidev->device == PCI_DEVICE_ID_PCI8164) { if (bus || slot) { /* requested particular bus/slot */ if (pcidev->bus->number != bus || PCI_SLOT(pcidev->devfn) != slot) continue; } devpriv->pci_dev = pcidev; if (comedi_pci_enable(pcidev, "adl_pci8164") < 0) { printk(KERN_ERR "comedi%d: Failed to enable " "PCI device and request regions\n", dev->minor); return -EIO; } dev->iobase = pci_resource_start(pcidev, 2); printk(KERN_DEBUG "comedi: base addr %4lx\n", dev->iobase); s = dev->subdevices + 0; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_msts; s->insn_write = adl_pci8164_insn_write_cmd; s = dev->subdevices + 1; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_ssts; s->insn_write = adl_pci8164_insn_write_otp; s = dev->subdevices + 2; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_buf0; s->insn_write = adl_pci8164_insn_write_buf0; s = dev->subdevices + 3; s->type = COMEDI_SUBD_PROC; s->subdev_flags = SDF_READABLE | SDF_WRITABLE; s->n_chan = 4; s->maxdata = 0xffff; s->len_chanlist = 4; /* s->range_table = &range_axis; */ s->insn_read = adl_pci8164_insn_read_buf1; s->insn_write = adl_pci8164_insn_write_buf1; printk(KERN_INFO "comedi: attached\n"); return 1; } } printk(KERN_ERR "comedi%d: no supported board found!" "(req. bus/slot : %d/%d)\n", dev->minor, bus, slot); return -EIO; } static int adl_pci8164_detach(struct comedi_device *dev) { printk(KERN_INFO "comedi%d: pci8164: remove\n", dev->minor); if (devpriv && devpriv->pci_dev) { if (dev->iobase) comedi_pci_disable(devpriv->pci_dev); pci_dev_put(devpriv->pci_dev); } return 0; } /* all the read commands are the same except for the addition a constant * const to the data for inw() */ static void adl_pci8164_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, char *action, unsigned short offset) { int axis, axis_reg; char *axisname; axis = CR_CHAN(insn->chanspec); switch (axis) { case 0: axis_reg = PCI8164_AXIS_X; axisname = "X"; break; case 1: axis_reg = PCI8164_AXIS_Y; axisname = "Y"; break; case 2: axis_reg = PCI8164_AXIS_Z; axisname = "Z"; break; case 3: axis_reg = PCI8164_AXIS_U; axisname = "U"; break; default: axis_reg = PCI8164_AXIS_X; axisname = "X"; } data[0] = inw(dev->iobase + axis_reg + offset); printk(KERN_DEBUG "comedi: pci8164 %s read -> " "%04X:%04X on axis %s\n", action, data[0], data[1], axisname); } static int adl_pci8164_insn_read_msts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "MSTS", PCI8164_MSTS); return 2; } static int adl_pci8164_insn_read_ssts(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "SSTS", PCI8164_SSTS); return 2; } static int adl_pci8164_insn_read_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "BUF0", PCI8164_BUF0); return 2; } static int adl_pci8164_insn_read_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_read(dev, s, insn, data, "BUF1", PCI8164_BUF1); return 2; } /* all the write commands are the same except for the addition a constant * const to the data for outw() */ static void adl_pci8164_insn_out(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data, char *action, unsigned short offset) { unsigned int axis, axis_reg; char *axisname; axis = CR_CHAN(insn->chanspec); switch (axis) { case 0: axis_reg = PCI8164_AXIS_X; axisname = "X"; break; case 1: axis_reg = PCI8164_AXIS_Y; axisname = "Y"; break; case 2: axis_reg = PCI8164_AXIS_Z; axisname = "Z"; break; case 3: axis_reg = PCI8164_AXIS_U; axisname = "U"; break; default: axis_reg = PCI8164_AXIS_X; axisname = "X"; } outw(data[0], dev->iobase + axis_reg + offset); printk(KERN_DEBUG "comedi: pci8164 %s write -> " "%04X:%04X on axis %s\n", action, data[0], data[1], axisname); } static int adl_pci8164_insn_write_cmd(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "CMD", PCI8164_CMD); return 2; } static int adl_pci8164_insn_write_otp(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "OTP", PCI8164_OTP); return 2; } static int adl_pci8164_insn_write_buf0(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "BUF0", PCI8164_BUF0); return 2; } static int adl_pci8164_insn_write_buf1(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { adl_pci8164_insn_out(dev, s, insn, data, "BUF1", PCI8164_BUF1); return 2; } static int __devinit driver_adl_pci8164_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_adl_pci8164.driver_name); } static void __devexit driver_adl_pci8164_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_adl_pci8164_pci_driver = { .id_table = adl_pci8164_pci_table, .probe = &driver_adl_pci8164_pci_probe, .remove = __devexit_p(&driver_adl_pci8164_pci_remove) }; static int __init driver_adl_pci8164_init_module(void) { int retval; retval = comedi_driver_register(&driver_adl_pci8164); if (retval < 0) return retval; driver_adl_pci8164_pci_driver.name = (char *)driver_adl_pci8164.driver_name; return pci_register_driver(&driver_adl_pci8164_pci_driver); } static void __exit driver_adl_pci8164_cleanup_module(void) { pci_unregister_driver(&driver_adl_pci8164_pci_driver); comedi_driver_unregister(&driver_adl_pci8164); } module_init(driver_adl_pci8164_init_module); module_exit(driver_adl_pci8164_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
GaHoKwan/Samsung_kernel_klte_Lollipop
drivers/staging/comedi/drivers/addi-data/addi_common.c
4896
58903
/** @verbatim Copyright (C) 2004,2005 ADDI-DATA GmbH for the source code of this module. ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier Tel: +19(0)7223/9493-0 Fax: +49(0)7223/9493-92 http://www.addi-data.com info@addi-data.com This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA You should also find the complete GPL in the COPYING file accompanying this source code. @endverbatim */ /* +-----------------------------------------------------------------------+ | (C) ADDI-DATA GmbH Dieselstrasse 3 D-77833 Ottersweier | +-----------------------------------------------------------------------+ | Tel : +49 (0) 7223/9493-0 | email : info@addi-data.com | | Fax : +49 (0) 7223/9493-92 | Internet : http://www.addi-data.com | +-----------------------------------------------------------------------+ | Project : ADDI DATA | Compiler : GCC | | Modulname : addi_common.c | Version : 2.96 | +-------------------------------+---------------------------------------+ | Author : | Date : | +-----------------------------------------------------------------------+ | Description : ADDI COMMON Main Module | +-----------------------------------------------------------------------+ | CONFIG OPTIONS | | option[0] - PCI bus number - if bus number and slot number are 0, | | then driver search for first unused card | | option[1] - PCI slot number | | | | option[2] = 0 - DMA ENABLE | | = 1 - DMA DISABLE | +----------+-----------+------------------------------------------------+ */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/errno.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/timex.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/gfp.h> #include <linux/io.h> #include "../../comedidev.h" #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) #include <asm/i387.h> #endif #include "../comedi_fc.h" #include "addi_common.h" #include "addi_amcc_s5933.h" #ifndef ADDIDATA_DRIVER_NAME #define ADDIDATA_DRIVER_NAME "addi_common" #endif /* Update-0.7.57->0.7.68MODULE_AUTHOR("ADDI-DATA GmbH <info@addi-data.com>"); */ /* Update-0.7.57->0.7.68MODULE_DESCRIPTION("Comedi ADDI-DATA module"); */ /* Update-0.7.57->0.7.68MODULE_LICENSE("GPL"); */ #define devpriv ((struct addi_private *)dev->private) #define this_board ((const struct addi_board *)dev->board_ptr) #if defined(CONFIG_APCI_1710) || defined(CONFIG_APCI_3200) || defined(CONFIG_APCI_3300) /* BYTE b_SaveFPUReg [94]; */ void fpu_begin(void) { /* asm ("fstenv b_SaveFPUReg"); */ kernel_fpu_begin(); } void fpu_end(void) { /* asm ("frstor b_SaveFPUReg"); */ kernel_fpu_end(); } #endif #include "addi_eeprom.c" #if (defined (CONFIG_APCI_3120) || defined (CONFIG_APCI_3001)) #include "hwdrv_apci3120.c" #endif #ifdef CONFIG_APCI_1032 #include "hwdrv_apci1032.c" #endif #ifdef CONFIG_APCI_1516 #include "hwdrv_apci1516.c" #endif #ifdef CONFIG_APCI_2016 #include "hwdrv_apci2016.c" #endif #ifdef CONFIG_APCI_2032 #include "hwdrv_apci2032.c" #endif #ifdef CONFIG_APCI_2200 #include "hwdrv_apci2200.c" #endif #ifdef CONFIG_APCI_1564 #include "hwdrv_apci1564.c" #endif #ifdef CONFIG_APCI_1500 #include "hwdrv_apci1500.c" #endif #ifdef CONFIG_APCI_3501 #include "hwdrv_apci3501.c" #endif #ifdef CONFIG_APCI_035 #include "hwdrv_apci035.c" #endif #if (defined (CONFIG_APCI_3200) || defined (CONFIG_APCI_3300)) #include "hwdrv_apci3200.c" #endif #ifdef CONFIG_APCI_1710 #include "hwdrv_APCI1710.c" #endif #ifdef CONFIG_APCI_16XX #include "hwdrv_apci16xx.c" #endif #ifdef CONFIG_APCI_3XXX #include "hwdrv_apci3xxx.c" #endif #ifndef COMEDI_SUBD_TTLIO #define COMEDI_SUBD_TTLIO 11 /* Digital Input Output But TTL */ #endif static DEFINE_PCI_DEVICE_TABLE(addi_apci_tbl) = { #ifdef CONFIG_APCI_3120 {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x818D)}, #endif #ifdef CONFIG_APCI_1032 {PCI_DEVICE(APCI1032_BOARD_VENDOR_ID, 0x1003)}, #endif #ifdef CONFIG_APCI_1516 {PCI_DEVICE(APCI1516_BOARD_VENDOR_ID, 0x1001)}, #endif #ifdef CONFIG_APCI_2016 {PCI_DEVICE(APCI2016_BOARD_VENDOR_ID, 0x1002)}, #endif #ifdef CONFIG_APCI_2032 {PCI_DEVICE(APCI2032_BOARD_VENDOR_ID, 0x1004)}, #endif #ifdef CONFIG_APCI_2200 {PCI_DEVICE(APCI2200_BOARD_VENDOR_ID, 0x1005)}, #endif #ifdef CONFIG_APCI_1564 {PCI_DEVICE(APCI1564_BOARD_VENDOR_ID, 0x1006)}, #endif #ifdef CONFIG_APCI_1500 {PCI_DEVICE(APCI1500_BOARD_VENDOR_ID, 0x80fc)}, #endif #ifdef CONFIG_APCI_3001 {PCI_DEVICE(APCI3120_BOARD_VENDOR_ID, 0x828D)}, #endif #ifdef CONFIG_APCI_3501 {PCI_DEVICE(APCI3501_BOARD_VENDOR_ID, 0x3001)}, #endif #ifdef CONFIG_APCI_035 {PCI_DEVICE(APCI035_BOARD_VENDOR_ID, 0x0300)}, #endif #ifdef CONFIG_APCI_3200 {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3000)}, #endif #ifdef CONFIG_APCI_3300 {PCI_DEVICE(APCI3200_BOARD_VENDOR_ID, 0x3007)}, #endif #ifdef CONFIG_APCI_1710 {PCI_DEVICE(APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID)}, #endif #ifdef CONFIG_APCI_16XX {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x1009)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x100A)}, #endif #ifdef CONFIG_APCI_3XXX {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3010)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300F)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300E)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3013)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3014)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3015)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3016)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3017)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3018)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3019)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301A)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301B)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301C)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301D)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301E)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x301F)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3020)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3021)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3022)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3023)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x300B)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3002)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3003)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3004)}, {PCI_DEVICE(PCI_VENDOR_ID_ADDIDATA, 0x3024)}, #endif {0} }; MODULE_DEVICE_TABLE(pci, addi_apci_tbl); static const struct addi_board boardtypes[] = { #ifdef CONFIG_APCI_3120 {"apci3120", APCI3120_BOARD_VENDOR_ID, 0x818D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 8, 0xffff, 0x3fff, &range_apci3120_ai, &range_apci3120_ao, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, i_APCI3120_InsnWriteAnalogOutput, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1032 {"apci1032", APCI1032_BOARD_VENDOR_ID, 0x1003, 4, APCI1032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 32, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1032_Interrupt, i_APCI1032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1032_ConfigDigitalInput, i_APCI1032_Read1DigitalInput, NULL, i_APCI1032_ReadMoreDigitalInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1516 {"apci1516", APCI1516_BOARD_VENDOR_ID, 0x1001, 128, APCI1516_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 8, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI1516_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1516_Read1DigitalInput, NULL, i_APCI1516_ReadMoreDigitalInput, i_APCI1516_ConfigDigitalOutput, i_APCI1516_WriteDigitalOutput, i_APCI1516_ReadDigitalOutput, NULL, i_APCI1516_ConfigWatchdog, i_APCI1516_StartStopWriteWatchdog, i_APCI1516_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2016 {"apci2016", APCI2016_BOARD_VENDOR_ID, 0x1002, 128, APCI2016_ADDRESS_RANGE, 32, 0, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2016_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2016_ConfigDigitalOutput, i_APCI2016_WriteDigitalOutput, i_APCI2016_BitsDigitalOutput, NULL, i_APCI2016_ConfigWatchdog, i_APCI2016_StartStopWriteWatchdog, i_APCI2016_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2032 {"apci2032", APCI2032_BOARD_VENDOR_ID, 0x1004, 4, APCI2032_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI2032_Interrupt, i_APCI2032_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2032_ConfigDigitalOutput, i_APCI2032_WriteDigitalOutput, i_APCI2032_ReadDigitalOutput, i_APCI2032_ReadInterruptStatus, i_APCI2032_ConfigWatchdog, i_APCI2032_StartStopWriteWatchdog, i_APCI2032_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_2200 {"apci2200", APCI2200_BOARD_VENDOR_ID, 0x1005, 4, APCI2200_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 8, 16, 0, 0, NULL, 0, 1, 0, 0, 0, NULL, i_APCI2200_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI2200_Read1DigitalInput, NULL, i_APCI2200_ReadMoreDigitalInput, i_APCI2200_ConfigDigitalOutput, i_APCI2200_WriteDigitalOutput, i_APCI2200_ReadDigitalOutput, NULL, i_APCI2200_ConfigWatchdog, i_APCI2200_StartStopWriteWatchdog, i_APCI2200_ReadWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1564 {"apci1564", APCI1564_BOARD_VENDOR_ID, 0x1006, 128, APCI1564_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_93C76, 0, 0, 0, 0, 0, 0, NULL, NULL, 32, 32, 0xffffffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1564_Interrupt, i_APCI1564_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1564_ConfigDigitalInput, i_APCI1564_Read1DigitalInput, NULL, i_APCI1564_ReadMoreDigitalInput, i_APCI1564_ConfigDigitalOutput, i_APCI1564_WriteDigitalOutput, i_APCI1564_ReadDigitalOutput, i_APCI1564_ReadInterruptStatus, i_APCI1564_ConfigTimerCounterWatchdog, i_APCI1564_StartStopWriteTimerCounterWatchdog, i_APCI1564_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1500 {"apci1500", APCI1500_BOARD_VENDOR_ID, 0x80fc, 128, APCI1500_ADDRESS_RANGE, 4, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 16, 16, 0xffff, 0, NULL, 0, 1, 0, 0, 0, v_APCI1500_Interrupt, i_APCI1500_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI1500_ConfigDigitalInputEvent, i_APCI1500_Initialisation, i_APCI1500_StartStopInputEvent, i_APCI1500_ReadMoreDigitalInput, i_APCI1500_ConfigDigitalOutputErrorInterrupt, i_APCI1500_WriteDigitalOutput, i_APCI1500_ConfigureInterrupt, NULL, i_APCI1500_ConfigCounterTimerWatchdog, i_APCI1500_StartStopTriggerTimerCounterWatchdog, i_APCI1500_ReadInterruptMask, i_APCI1500_ReadCounterTimerWatchdog, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3001 {"apci3001", APCI3120_BOARD_VENDOR_ID, 0x828D, AMCC_OP_REG_SIZE, APCI3120_ADDRESS_RANGE, 8, 0, ADDIDATA_NO_EEPROM, NULL, 16, 8, 16, 0, 0xfff, 0, &range_apci3120_ai, NULL, 4, 4, 0x0f, 0, NULL, 1, 1, 1, 10000, 100000, v_APCI3120_Interrupt, i_APCI3120_Reset, i_APCI3120_InsnConfigAnalogInput, i_APCI3120_InsnReadAnalogInput, NULL, NULL, i_APCI3120_CommandTestAnalogInput, i_APCI3120_CommandAnalogInput, i_APCI3120_StopCyclicAcquisition, NULL, NULL, NULL, NULL, i_APCI3120_InsnReadDigitalInput, NULL, i_APCI3120_InsnBitsDigitalInput, i_APCI3120_InsnConfigDigitalOutput, i_APCI3120_InsnWriteDigitalOutput, i_APCI3120_InsnBitsDigitalOutput, NULL, i_APCI3120_InsnConfigTimer, i_APCI3120_InsnWriteTimer, i_APCI3120_InsnReadTimer, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3501 {"apci3501", APCI3501_BOARD_VENDOR_ID, 0x3001, 64, APCI3501_ADDRESS_RANGE, 0, 0, ADDIDATA_EEPROM, ADDIDATA_S5933, 0, 0, 0, 8, 0, 16383, NULL, &range_apci3501_ao, 2, 2, 0x3, 0, NULL, 0, 1, 0, 0, 0, v_APCI3501_Interrupt, i_APCI3501_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3501_ConfigAnalogOutput, i_APCI3501_WriteAnalogOutput, NULL, NULL, NULL, NULL, i_APCI3501_ReadDigitalInput, i_APCI3501_ConfigDigitalOutput, i_APCI3501_WriteDigitalOutput, i_APCI3501_ReadDigitalOutput, NULL, i_APCI3501_ConfigTimerCounterWatchdog, i_APCI3501_StartStopWriteTimerCounterWatchdog, i_APCI3501_ReadTimerCounterWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_035 {"apci035", APCI035_BOARD_VENDOR_ID, 0x0300, 127, APCI035_ADDRESS_RANGE, 0, 0, 1, ADDIDATA_S5920, 16, 8, 16, 0, 0xff, 0, &range_apci035_ai, NULL, 0, 0, 0, 0, NULL, 0, 1, 0, 10000, 100000, v_APCI035_Interrupt, i_APCI035_Reset, i_APCI035_ConfigAnalogInput, i_APCI035_ReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI035_ConfigTimerWatchdog, i_APCI035_StartStopWriteTimerWatchdog, i_APCI035_ReadTimerWatchdog, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3200 {"apci3200", APCI3200_BOARD_VENDOR_ID, 0x3000, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 16, 8, 16, 0, 0x3ffff, 0, &range_apci3200_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_3300 /* Begin JK .20.10.2004 = APCI-3300 integration */ {"apci3300", APCI3200_BOARD_VENDOR_ID, 0x3007, 128, 256, 4, 4, ADDIDATA_EEPROM, ADDIDATA_S5920, 0, 8, 8, 0, 0x3ffff, 0, &range_apci3300_ai, NULL, 4, 4, 0, 0, NULL, 0, 0, 0, 10000, 100000, v_APCI3200_Interrupt, i_APCI3200_Reset, i_APCI3200_ConfigAnalogInput, i_APCI3200_ReadAnalogInput, i_APCI3200_InsnWriteReleaseAnalogInput, i_APCI3200_InsnBits_AnalogInput_Test, i_APCI3200_CommandTestAnalogInput, i_APCI3200_CommandAnalogInput, i_APCI3200_StopCyclicAcquisition, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3200_ReadDigitalInput, i_APCI3200_ConfigDigitalOutput, i_APCI3200_WriteDigitalOutput, i_APCI3200_ReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_1710 {"apci1710", APCI1710_BOARD_VENDOR_ID, APCI1710_BOARD_DEVICE_ID, 128, 8, 256, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 0, NULL, 0, 0, 0, 0, 0, v_APCI1710_Interrupt, i_APCI1710_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, #endif #ifdef CONFIG_APCI_16XX {"apci1648", PCI_VENDOR_ID_ADDIDATA, 0x1009, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 48, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, {"apci1696", PCI_VENDOR_ID_ADDIDATA, 0x100A, 128, 0, 0, 0, ADDIDATA_NO_EEPROM, NULL, 0, 0, 0, 0, 0, 0, NULL, NULL, 0, 0, 0, 96, &range_apci16xx_ttl, 0, 0, 0, 0, 0, NULL, i_APCI16XX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI16XX_InsnConfigInitTTLIO, i_APCI16XX_InsnBitsReadTTLIO, i_APCI16XX_InsnReadTTLIOAllPortValue, i_APCI16XX_InsnBitsWriteTTLIO}, #endif #ifdef CONFIG_APCI_3XXX {"apci3000-16", PCI_VENDOR_ID_ADDIDATA, 0x3010, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-8", PCI_VENDOR_ID_ADDIDATA, 0x300F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3000-4", PCI_VENDOR_ID_ADDIDATA, 0x300E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-16", PCI_VENDOR_ID_ADDIDATA, 0x3013, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-8", PCI_VENDOR_ID_ADDIDATA, 0x3014, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3006-4", PCI_VENDOR_ID_ADDIDATA, 0x3015, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-16", PCI_VENDOR_ID_ADDIDATA, 0x3016, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-8", PCI_VENDOR_ID_ADDIDATA, 0x3017, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3010-4", PCI_VENDOR_ID_ADDIDATA, 0x3018, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 4095, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-16", PCI_VENDOR_ID_ADDIDATA, 0x3019, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-8", PCI_VENDOR_ID_ADDIDATA, 0x301A, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3016-4", PCI_VENDOR_ID_ADDIDATA, 0x301B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 4, 2, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-16-4", PCI_VENDOR_ID_ADDIDATA, 0x301C, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3100-8-4", PCI_VENDOR_ID_ADDIDATA, 0x301D, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-16-4", PCI_VENDOR_ID_ADDIDATA, 0x301E, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3106-8-4", PCI_VENDOR_ID_ADDIDATA, 0x301F, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 6, 10000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-16-4", PCI_VENDOR_ID_ADDIDATA, 0x3020, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3110-8-4", PCI_VENDOR_ID_ADDIDATA, 0x3021, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 4095, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-16-4", PCI_VENDOR_ID_ADDIDATA, 0x3022, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 16, 8, 16, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3116-8-4", PCI_VENDOR_ID_ADDIDATA, 0x3023, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 8, 4, 8, 4, 65535, 4095, &range_apci3XXX_ai, &range_apci3XXX_ao, 4, 4, 1, 24, &range_apci3XXX_ttl, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, {"apci3003", PCI_VENDOR_ID_ADDIDATA, 0x300B, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 7, 2500, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-16", PCI_VENDOR_ID_ADDIDATA, 0x3002, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 16, 16, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-8", PCI_VENDOR_ID_ADDIDATA, 0x3003, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 8, 8, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3002-4", PCI_VENDOR_ID_ADDIDATA, 0x3004, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 4, 4, 0, 65535, 0, &range_apci3XXX_ai, NULL, 4, 4, 1, 0, NULL, 0, 0, 6, 5000, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, i_APCI3XXX_InsnConfigAnalogInput, i_APCI3XXX_InsnReadAnalogInput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnReadDigitalInput, NULL, i_APCI3XXX_InsnBitsDigitalInput, NULL, i_APCI3XXX_InsnWriteDigitalOutput, i_APCI3XXX_InsnBitsDigitalOutput, i_APCI3XXX_InsnReadDigitalOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL}, {"apci3500", PCI_VENDOR_ID_ADDIDATA, 0x3024, 256, 256, 256, 256, ADDIDATA_NO_EEPROM, ADDIDATA_9054, 0, 0, 0, 4, 0, 4095, NULL, &range_apci3XXX_ao, 0, 0, 0, 24, &range_apci3XXX_ttl, 0, 0, 0, 0, 0, v_APCI3XXX_Interrupt, i_APCI3XXX_Reset, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnWriteAnalogOutput, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, i_APCI3XXX_InsnConfigInitTTLIO, i_APCI3XXX_InsnBitsTTLIO, i_APCI3XXX_InsnReadTTLIO, i_APCI3XXX_InsnWriteTTLIO}, #endif }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct addi_board)) static struct comedi_driver driver_addi = { .driver_name = ADDIDATA_DRIVER_NAME, .module = THIS_MODULE, .attach = i_ADDI_Attach, .detach = i_ADDI_Detach, .num_names = n_boardtypes, .board_name = &boardtypes[0].pc_DriverName, .offset = sizeof(struct addi_board), }; static int __devinit driver_addi_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_addi.driver_name); } static void __devexit driver_addi_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_addi_pci_driver = { .id_table = addi_apci_tbl, .probe = &driver_addi_pci_probe, .remove = __devexit_p(&driver_addi_pci_remove) }; static int __init driver_addi_init_module(void) { int retval; retval = comedi_driver_register(&driver_addi); if (retval < 0) return retval; driver_addi_pci_driver.name = (char *)driver_addi.driver_name; return pci_register_driver(&driver_addi_pci_driver); } static void __exit driver_addi_cleanup_module(void) { pci_unregister_driver(&driver_addi_pci_driver); comedi_driver_unregister(&driver_addi); } module_init(driver_addi_init_module); module_exit(driver_addi_cleanup_module); /* +----------------------------------------------------------------------------+ | Function name :static int i_ADDI_Attach(struct comedi_device *dev, | | struct comedi_devconfig *it) | | | +----------------------------------------------------------------------------+ | Task :Detects the card. | | Configure the driver for a particular board. | | This function does all the initializations and memory | | allocation of data structures for the driver. | +----------------------------------------------------------------------------+ | Input Parameters :struct comedi_device *dev | | struct comedi_devconfig *it | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Attach(struct comedi_device *dev, struct comedi_devconfig *it) { struct comedi_subdevice *s; int ret, pages, i, n_subdevices; unsigned int dw_Dummy; resource_size_t io_addr[5]; unsigned int irq; resource_size_t iobase_a, iobase_main, iobase_addon, iobase_reserved; struct pcilst_struct *card = NULL; unsigned char pci_bus, pci_slot, pci_func; int i_Dma = 0; ret = alloc_private(dev, sizeof(struct addi_private)); if (ret < 0) return -ENOMEM; if (!pci_list_builded) { v_pci_card_list_init(this_board->i_VendorId, 1); /* 1 for displaying the list.. */ pci_list_builded = 1; } /* printk("comedi%d: "ADDIDATA_DRIVER_NAME": board=%s",dev->minor,this_board->pc_DriverName); */ if ((this_board->i_Dma) && (it->options[2] == 0)) { i_Dma = 1; } card = ptr_select_and_alloc_pci_card(this_board->i_VendorId, this_board->i_DeviceId, it->options[0], it->options[1], i_Dma); if (card == NULL) return -EIO; devpriv->allocated = 1; if ((i_pci_card_data(card, &pci_bus, &pci_slot, &pci_func, &io_addr[0], &irq)) < 0) { i_pci_card_free(card); printk(" - Can't get AMCC data!\n"); return -EIO; } iobase_a = io_addr[0]; iobase_main = io_addr[1]; iobase_addon = io_addr[2]; iobase_reserved = io_addr[3]; printk("\nBus %d: Slot %d: Funct%d\nBase0: 0x%8llx\nBase1: 0x%8llx\nBase2: 0x%8llx\nBase3: 0x%8llx\n", pci_bus, pci_slot, pci_func, (unsigned long long)io_addr[0], (unsigned long long)io_addr[1], (unsigned long long)io_addr[2], (unsigned long long)io_addr[3]); if ((this_board->pc_EepromChip == NULL) || (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) { /************************************/ /* Test if more that 1 address used */ /************************************/ if (this_board->i_IorangeBase1 != 0) { dev->iobase = (unsigned long)iobase_main; /* DAQ base address... */ } else { dev->iobase = (unsigned long)iobase_a; /* DAQ base address... */ } dev->board_name = this_board->pc_DriverName; devpriv->amcc = card; devpriv->iobase = (int) dev->iobase; devpriv->i_IobaseAmcc = (int) iobase_a; /* AMCC base address... */ devpriv->i_IobaseAddon = (int) iobase_addon; /* ADD ON base address.... */ devpriv->i_IobaseReserved = (int) iobase_reserved; } else { dev->board_name = this_board->pc_DriverName; dev->iobase = (unsigned long)io_addr[2]; devpriv->amcc = card; devpriv->iobase = (int) io_addr[2]; devpriv->i_IobaseReserved = (int) io_addr[3]; printk("\nioremap begin"); devpriv->dw_AiBase = ioremap(io_addr[3], this_board->i_IorangeBase3); printk("\nioremap end"); } /* Initialize parameters that can be overridden in EEPROM */ devpriv->s_EeParameters.i_NbrAiChannel = this_board->i_NbrAiChannel; devpriv->s_EeParameters.i_NbrAoChannel = this_board->i_NbrAoChannel; devpriv->s_EeParameters.i_AiMaxdata = this_board->i_AiMaxdata; devpriv->s_EeParameters.i_AoMaxdata = this_board->i_AoMaxdata; devpriv->s_EeParameters.i_NbrDiChannel = this_board->i_NbrDiChannel; devpriv->s_EeParameters.i_NbrDoChannel = this_board->i_NbrDoChannel; devpriv->s_EeParameters.i_DoMaxdata = this_board->i_DoMaxdata; devpriv->s_EeParameters.i_Dma = this_board->i_Dma; devpriv->s_EeParameters.i_Timer = this_board->i_Timer; devpriv->s_EeParameters.ui_MinAcquisitiontimeNs = this_board->ui_MinAcquisitiontimeNs; devpriv->s_EeParameters.ui_MinDelaytimeNs = this_board->ui_MinDelaytimeNs; /* ## */ if (irq > 0) { if (request_irq(irq, v_ADDI_Interrupt, IRQF_SHARED, this_board->pc_DriverName, dev) < 0) { printk(", unable to allocate IRQ %u, DISABLING IT", irq); irq = 0; /* Can't use IRQ */ } else { printk("\nirq=%u", irq); } } else { printk(", IRQ disabled"); } printk("\nOption %d %d %d\n", it->options[0], it->options[1], it->options[2]); dev->irq = irq; /* Read eepeom and fill addi_board Structure */ if (this_board->i_PCIEeprom) { printk("\nPCI Eeprom used"); if (!(strcmp(this_board->pc_EepromChip, "S5920"))) { /* Set 3 wait stait */ if (!(strcmp(this_board->pc_DriverName, "apci035"))) { outl(0x80808082, devpriv->i_IobaseAmcc + 0x60); } else { outl(0x83838383, devpriv->i_IobaseAmcc + 0x60); } /* Enable the interrupt for the controller */ dw_Dummy = inl(devpriv->i_IobaseAmcc + 0x38); outl(dw_Dummy | 0x2000, devpriv->i_IobaseAmcc + 0x38); printk("\nEnable the interrupt for the controller"); } printk("\nRead Eeprom"); i_EepromReadMainHeader(io_addr[0], this_board->pc_EepromChip, dev); } else { printk("\nPCI Eeprom unused"); } if (it->options[2] > 0) { devpriv->us_UseDma = ADDI_DISABLE; } else { devpriv->us_UseDma = ADDI_ENABLE; } if (devpriv->s_EeParameters.i_Dma) { printk("\nDMA used"); if (devpriv->us_UseDma == ADDI_ENABLE) { /* alloc DMA buffers */ devpriv->b_DmaDoubleBuffer = 0; for (i = 0; i < 2; i++) { for (pages = 4; pages >= 0; pages--) { devpriv->ul_DmaBufferVirtual[i] = (void *) __get_free_pages(GFP_KERNEL, pages); if (devpriv->ul_DmaBufferVirtual[i]) break; } if (devpriv->ul_DmaBufferVirtual[i]) { devpriv->ui_DmaBufferPages[i] = pages; devpriv->ui_DmaBufferSize[i] = PAGE_SIZE * pages; devpriv->ui_DmaBufferSamples[i] = devpriv-> ui_DmaBufferSize[i] >> 1; devpriv->ul_DmaBufferHw[i] = virt_to_bus((void *)devpriv-> ul_DmaBufferVirtual[i]); } } if (!devpriv->ul_DmaBufferVirtual[0]) { printk (", Can't allocate DMA buffer, DMA disabled!"); devpriv->us_UseDma = ADDI_DISABLE; } if (devpriv->ul_DmaBufferVirtual[1]) { devpriv->b_DmaDoubleBuffer = 1; } } if ((devpriv->us_UseDma == ADDI_ENABLE)) { printk("\nDMA ENABLED\n"); } else { printk("\nDMA DISABLED\n"); } } if (!strcmp(this_board->pc_DriverName, "apci1710")) { #ifdef CONFIG_APCI_1710 i_ADDI_AttachPCI1710(dev); /* save base address */ devpriv->s_BoardInfos.ui_Address = io_addr[2]; #endif } else { /* Update-0.7.57->0.7.68dev->n_subdevices = 7; */ n_subdevices = 7; ret = alloc_subdevices(dev, n_subdevices); if (ret < 0) return ret; /* Allocate and Initialise AI Subdevice Structures */ s = dev->subdevices + 0; if ((devpriv->s_EeParameters.i_NbrAiChannel) || (this_board->i_NbrAiChannelDiff)) { dev->read_subdev = s; s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_COMMON | SDF_GROUND | SDF_DIFF; if (devpriv->s_EeParameters.i_NbrAiChannel) { s->n_chan = devpriv->s_EeParameters.i_NbrAiChannel; devpriv->b_SingelDiff = 0; } else { s->n_chan = this_board->i_NbrAiChannelDiff; devpriv->b_SingelDiff = 1; } s->maxdata = devpriv->s_EeParameters.i_AiMaxdata; s->len_chanlist = this_board->i_AiChannelList; s->range_table = this_board->pr_AiRangelist; /* Set the initialisation flag */ devpriv->b_AiInitialisation = 1; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogInput; s->insn_read = this_board->i_hwdrv_InsnReadAnalogInput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogInput; s->insn_bits = this_board->i_hwdrv_InsnBitsAnalogInput; s->do_cmdtest = this_board->i_hwdrv_CommandTestAnalogInput; s->do_cmd = this_board->i_hwdrv_CommandAnalogInput; s->cancel = this_board->i_hwdrv_CancelAnalogInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise AO Subdevice Structures */ s = dev->subdevices + 1; if (devpriv->s_EeParameters.i_NbrAoChannel) { s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrAoChannel; s->maxdata = devpriv->s_EeParameters.i_AoMaxdata; s->len_chanlist = devpriv->s_EeParameters.i_NbrAoChannel; s->range_table = this_board->pr_AoRangelist; s->insn_config = this_board->i_hwdrv_InsnConfigAnalogOutput; s->insn_write = this_board->i_hwdrv_InsnWriteAnalogOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DI Subdevice Structures */ s = dev->subdevices + 2; if (devpriv->s_EeParameters.i_NbrDiChannel) { s->type = COMEDI_SUBD_DI; s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrDiChannel; s->maxdata = 1; s->len_chanlist = devpriv->s_EeParameters.i_NbrDiChannel; s->range_table = &range_digital; s->io_bits = 0; /* all bits input */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalInput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalInput; s->insn_write = this_board->i_hwdrv_InsnWriteDigitalInput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalInput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise DO Subdevice Structures */ s = dev->subdevices + 3; if (devpriv->s_EeParameters.i_NbrDoChannel) { s->type = COMEDI_SUBD_DO; s->subdev_flags = SDF_READABLE | SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = devpriv->s_EeParameters.i_NbrDoChannel; s->maxdata = devpriv->s_EeParameters.i_DoMaxdata; s->len_chanlist = devpriv->s_EeParameters.i_NbrDoChannel; s->range_table = &range_digital; s->io_bits = 0xf; /* all bits output */ s->insn_config = this_board->i_hwdrv_InsnConfigDigitalOutput; /* for digital output memory.. */ s->insn_write = this_board->i_hwdrv_InsnWriteDigitalOutput; s->insn_bits = this_board->i_hwdrv_InsnBitsDigitalOutput; s->insn_read = this_board->i_hwdrv_InsnReadDigitalOutput; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise Timer Subdevice Structures */ s = dev->subdevices + 4; if (devpriv->s_EeParameters.i_Timer) { s->type = COMEDI_SUBD_TIMER; s->subdev_flags = SDF_WRITEABLE | SDF_GROUND | SDF_COMMON; s->n_chan = 1; s->maxdata = 0; s->len_chanlist = 1; s->range_table = &range_digital; s->insn_write = this_board->i_hwdrv_InsnWriteTimer; s->insn_read = this_board->i_hwdrv_InsnReadTimer; s->insn_config = this_board->i_hwdrv_InsnConfigTimer; s->insn_bits = this_board->i_hwdrv_InsnBitsTimer; } else { s->type = COMEDI_SUBD_UNUSED; } /* Allocate and Initialise TTL */ s = dev->subdevices + 5; if (this_board->i_NbrTTLChannel) { s->type = COMEDI_SUBD_TTLIO; s->subdev_flags = SDF_WRITEABLE | SDF_READABLE | SDF_GROUND | SDF_COMMON; s->n_chan = this_board->i_NbrTTLChannel; s->maxdata = 1; s->io_bits = 0; /* all bits input */ s->len_chanlist = this_board->i_NbrTTLChannel; s->range_table = &range_digital; s->insn_config = this_board->i_hwdr_ConfigInitTTLIO; s->insn_bits = this_board->i_hwdr_ReadTTLIOBits; s->insn_read = this_board->i_hwdr_ReadTTLIOAllPortValue; s->insn_write = this_board->i_hwdr_WriteTTLIOChlOnOff; } else { s->type = COMEDI_SUBD_UNUSED; } /* EEPROM */ s = dev->subdevices + 6; if (this_board->i_PCIEeprom) { s->type = COMEDI_SUBD_MEMORY; s->subdev_flags = SDF_READABLE | SDF_INTERNAL; s->n_chan = 256; s->maxdata = 0xffff; s->insn_read = i_ADDIDATA_InsnReadEeprom; } else { s->type = COMEDI_SUBD_UNUSED; } } printk("\ni_ADDI_Attach end\n"); i_ADDI_Reset(dev); devpriv->b_ValidDriver = 1; return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Detach(struct comedi_device *dev) | | | | | +----------------------------------------------------------------------------+ | Task : Deallocates resources of the addi_common driver | | Free the DMA buffers, unregister irq. | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Detach(struct comedi_device *dev) { if (dev->private) { if (devpriv->b_ValidDriver) { i_ADDI_Reset(dev); } if (dev->irq) { free_irq(dev->irq, dev); } if ((this_board->pc_EepromChip == NULL) || (strcmp(this_board->pc_EepromChip, ADDIDATA_9054) != 0)) { if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } if (devpriv->ul_DmaBufferVirtual[0]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[0], devpriv->ui_DmaBufferPages[0]); } if (devpriv->ul_DmaBufferVirtual[1]) { free_pages((unsigned long)devpriv-> ul_DmaBufferVirtual[1], devpriv->ui_DmaBufferPages[1]); } } else { iounmap(devpriv->dw_AiBase); if (devpriv->allocated) { i_pci_card_free(devpriv->amcc); } } if (pci_list_builded) { /* v_pci_card_list_cleanup(PCI_VENDOR_ID_AMCC); */ v_pci_card_list_cleanup(this_board->i_VendorId); pci_list_builded = 0; } } return 0; } /* +----------------------------------------------------------------------------+ | Function name : static int i_ADDI_Reset(struct comedi_device *dev) | | | +----------------------------------------------------------------------------+ | Task : Disables all interrupts, Resets digital output to low, | | Set all analog output to low | | | +----------------------------------------------------------------------------+ | Input Parameters : struct comedi_device *dev | | | | | +----------------------------------------------------------------------------+ | Return Value : 0 | | | +----------------------------------------------------------------------------+ */ static int i_ADDI_Reset(struct comedi_device *dev) { this_board->i_hwdrv_Reset(dev); return 0; } /* Interrupt function */ /* +----------------------------------------------------------------------------+ | Function name : | |static void v_ADDI_Interrupt(int irq, void *d) | | | +----------------------------------------------------------------------------+ | Task : Registerd interrupt routine | | | +----------------------------------------------------------------------------+ | Input Parameters : int irq | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static irqreturn_t v_ADDI_Interrupt(int irq, void *d) { struct comedi_device *dev = d; this_board->v_hwdrv_Interrupt(irq, d); return IRQ_RETVAL(1); } /* EEPROM Read Function */ /* +----------------------------------------------------------------------------+ | Function name : | |INT i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | +----------------------------------------------------------------------------+ | Task : Read 256 words from EEPROM | | | +----------------------------------------------------------------------------+ | Input Parameters :(struct comedi_device *dev,struct comedi_subdevice *s, struct comedi_insn *insn,unsigned int *data) | | | | | +----------------------------------------------------------------------------+ | Return Value : | | | +----------------------------------------------------------------------------+ */ static int i_ADDIDATA_InsnReadEeprom(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { unsigned short w_Data; unsigned short w_Address; w_Address = CR_CHAN(insn->chanspec); /* address to be read as 0,1,2,3...255 */ w_Data = w_EepromReadWord(devpriv->i_IobaseAmcc, this_board->pc_EepromChip, 0x100 + (2 * w_Address)); data[0] = w_Data; /* multiplied by 2 bcozinput will be like 0,1,2...255 */ return insn->n; }
gpl-2.0
estiko/android_kernel_lenovo_a706_xtremeuv
drivers/staging/rtl8712/rtl871x_mlme.c
4896
55627
/****************************************************************************** * rtl871x_mlme.c * * Copyright(c) 2007 - 2010 Realtek Corporation. All rights reserved. * Linux device driver for RTL8192SU * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * Modifications for inclusion into the Linux staging tree are * Copyright(c) 2010 Larry Finger. All rights reserved. * * Contact information: * WLAN FAE <wlanfae@realtek.com> * Larry Finger <Larry.Finger@lwfinger.net> * ******************************************************************************/ #define _RTL871X_MLME_C_ #include "osdep_service.h" #include "drv_types.h" #include "recv_osdep.h" #include "xmit_osdep.h" #include "mlme_osdep.h" #include "sta_info.h" #include "wifi.h" #include "wlan_bssdef.h" static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len); static sint _init_mlme_priv(struct _adapter *padapter) { sint i; u8 *pbuf; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; memset((u8 *)pmlmepriv, 0, sizeof(struct mlme_priv)); pmlmepriv->nic_hdl = (u8 *)padapter; pmlmepriv->pscanned = NULL; pmlmepriv->fw_state = 0; pmlmepriv->cur_network.network.InfrastructureMode = Ndis802_11AutoUnknown; /* Maybe someday we should rename this variable to "active_mode"(Jeff)*/ pmlmepriv->passive_mode = 1; /* 1: active, 0: passive. */ spin_lock_init(&(pmlmepriv->lock)); spin_lock_init(&(pmlmepriv->lock2)); _init_queue(&(pmlmepriv->free_bss_pool)); _init_queue(&(pmlmepriv->scanned_queue)); set_scanned_network_val(pmlmepriv, 0); memset(&pmlmepriv->assoc_ssid, 0, sizeof(struct ndis_802_11_ssid)); pbuf = _malloc(MAX_BSS_CNT * (sizeof(struct wlan_network))); if (pbuf == NULL) return _FAIL; pmlmepriv->free_bss_buf = pbuf; pnetwork = (struct wlan_network *)pbuf; for (i = 0; i < MAX_BSS_CNT; i++) { _init_listhead(&(pnetwork->list)); list_insert_tail(&(pnetwork->list), &(pmlmepriv->free_bss_pool.queue)); pnetwork++; } pmlmepriv->sitesurveyctrl.last_rx_pkts = 0; pmlmepriv->sitesurveyctrl.last_tx_pkts = 0; pmlmepriv->sitesurveyctrl.traffic_busy = false; /* allocate DMA-able/Non-Page memory for cmd_buf and rsp_buf */ r8712_init_mlme_timer(padapter); return _SUCCESS; } struct wlan_network *_r8712_alloc_network(struct mlme_priv *pmlmepriv) { unsigned long irqL; struct wlan_network *pnetwork; struct __queue *free_queue = &pmlmepriv->free_bss_pool; struct list_head *plist = NULL; if (_queue_empty(free_queue) == true) return NULL; spin_lock_irqsave(&free_queue->lock, irqL); plist = get_next(&(free_queue->queue)); pnetwork = LIST_CONTAINOR(plist , struct wlan_network, list); list_delete(&pnetwork->list); pnetwork->last_scanned = jiffies; pmlmepriv->num_of_scanned++; spin_unlock_irqrestore(&free_queue->lock, irqL); return pnetwork; } static void _free_network(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { u32 curr_time, delta_time; unsigned long irqL; struct __queue *free_queue = &(pmlmepriv->free_bss_pool); if (pnetwork == NULL) return; if (pnetwork->fixed == true) return; curr_time = jiffies; delta_time = (curr_time - (u32)pnetwork->last_scanned) / HZ; if (delta_time < SCANQUEUE_LIFETIME) return; spin_lock_irqsave(&free_queue->lock, irqL); list_delete(&pnetwork->list); list_insert_tail(&pnetwork->list, &free_queue->queue); pmlmepriv->num_of_scanned--; spin_unlock_irqrestore(&free_queue->lock, irqL); } static void _free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { struct __queue *free_queue = &pmlmepriv->free_bss_pool; if (pnetwork == NULL) return; if (pnetwork->fixed == true) return; list_delete(&pnetwork->list); list_insert_tail(&pnetwork->list, get_list_head(free_queue)); pmlmepriv->num_of_scanned--; } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ static struct wlan_network *_r8712_find_network(struct __queue *scanned_queue, u8 *addr) { unsigned long irqL; struct list_head *phead, *plist; struct wlan_network *pnetwork = NULL; u8 zero_addr[ETH_ALEN] = {0, 0, 0, 0, 0, 0}; if (!memcmp(zero_addr, addr, ETH_ALEN)) return NULL; spin_lock_irqsave(&scanned_queue->lock, irqL); phead = get_list_head(scanned_queue); plist = get_next(phead); while (plist != phead) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); plist = get_next(plist); if (!memcmp(addr, pnetwork->network.MacAddress, ETH_ALEN)) break; } spin_unlock_irqrestore(&scanned_queue->lock, irqL); return pnetwork; } static void _free_network_queue(struct _adapter *padapter) { unsigned long irqL; struct list_head *phead, *plist; struct wlan_network *pnetwork; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct __queue *scanned_queue = &pmlmepriv->scanned_queue; spin_lock_irqsave(&scanned_queue->lock, irqL); phead = get_list_head(scanned_queue); plist = get_next(phead); while (end_of_queue_search(phead, plist) == false) { pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); plist = get_next(plist); _free_network(pmlmepriv, pnetwork); } spin_unlock_irqrestore(&scanned_queue->lock, irqL); } sint r8712_if_up(struct _adapter *padapter) { sint res; if (padapter->bDriverStopped || padapter->bSurpriseRemoved || (check_fwstate(&padapter->mlmepriv, _FW_LINKED) == false)) { res = false; } else res = true; return res; } void r8712_generate_random_ibss(u8 *pibss) { u32 curtime = jiffies; pibss[0] = 0x02; /*in ad-hoc mode bit1 must set to 1 */ pibss[1] = 0x11; pibss[2] = 0x87; pibss[3] = (u8)(curtime & 0xff); pibss[4] = (u8)((curtime>>8) & 0xff); pibss[5] = (u8)((curtime>>16) & 0xff); } uint r8712_get_ndis_wlan_bssid_ex_sz(struct ndis_wlan_bssid_ex *bss) { uint t_len; t_len = sizeof(u32) + 6 * sizeof(unsigned long) + 2 + sizeof(struct ndis_802_11_ssid) + sizeof(u32) + sizeof(s32) + sizeof(enum NDIS_802_11_NETWORK_TYPE) + sizeof(struct NDIS_802_11_CONFIGURATION) + sizeof(enum NDIS_802_11_NETWORK_INFRASTRUCTURE) + sizeof(NDIS_802_11_RATES_EX) + sizeof(u32) + bss->IELength; return t_len; } u8 *r8712_get_capability_from_ie(u8 *ie) { return ie + 8 + 2; } int r8712_init_mlme_priv(struct _adapter *padapter) { return _init_mlme_priv(padapter); } void r8712_free_mlme_priv(struct mlme_priv *pmlmepriv) { kfree(pmlmepriv->free_bss_buf); } static struct wlan_network *alloc_network(struct mlme_priv *pmlmepriv) { return _r8712_alloc_network(pmlmepriv); } static void free_network_nolock(struct mlme_priv *pmlmepriv, struct wlan_network *pnetwork) { _free_network_nolock(pmlmepriv, pnetwork); } void r8712_free_network_queue(struct _adapter *dev) { _free_network_queue(dev); } /* return the wlan_network with the matching addr Shall be calle under atomic context... to avoid possible racing condition... */ static struct wlan_network *r8712_find_network(struct __queue *scanned_queue, u8 *addr) { struct wlan_network *pnetwork = _r8712_find_network(scanned_queue, addr); return pnetwork; } int r8712_is_same_ibss(struct _adapter *adapter, struct wlan_network *pnetwork) { int ret = true; struct security_priv *psecuritypriv = &adapter->securitypriv; if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) ret = false; else if ((psecuritypriv->PrivacyAlgrthm == _NO_PRIVACY_) && (pnetwork->network.Privacy == 1)) ret = false; else ret = true; return ret; } static int is_same_network(struct ndis_wlan_bssid_ex *src, struct ndis_wlan_bssid_ex *dst) { u16 s_cap, d_cap; memcpy((u8 *)&s_cap, r8712_get_capability_from_ie(src->IEs), 2); memcpy((u8 *)&d_cap, r8712_get_capability_from_ie(dst->IEs), 2); return (src->Ssid.SsidLength == dst->Ssid.SsidLength) && (src->Configuration.DSConfig == dst->Configuration.DSConfig) && ((!memcmp(src->MacAddress, dst->MacAddress, ETH_ALEN))) && ((!memcmp(src->Ssid.Ssid, dst->Ssid.Ssid, src->Ssid.SsidLength))) && ((s_cap & WLAN_CAPABILITY_IBSS) == (d_cap & WLAN_CAPABILITY_IBSS)) && ((s_cap & WLAN_CAPABILITY_BSS) == (d_cap & WLAN_CAPABILITY_BSS)); } struct wlan_network *r8712_get_oldest_wlan_network( struct __queue *scanned_queue) { struct list_head *plist, *phead; struct wlan_network *pwlan = NULL; struct wlan_network *oldest = NULL; phead = get_list_head(scanned_queue); plist = get_next(phead); while (1) { if (end_of_queue_search(phead, plist) == true) break; pwlan = LIST_CONTAINOR(plist, struct wlan_network, list); if (pwlan->fixed != true) { if (oldest == NULL || time_after((unsigned long)oldest->last_scanned, (unsigned long)pwlan->last_scanned)) oldest = pwlan; } plist = get_next(plist); } return oldest; } static void update_network(struct ndis_wlan_bssid_ex *dst, struct ndis_wlan_bssid_ex *src, struct _adapter *padapter) { u32 last_evm = 0, tmpVal; if (check_fwstate(&padapter->mlmepriv, _FW_LINKED) && is_same_network(&(padapter->mlmepriv.cur_network.network), src)) { if (padapter->recvpriv.signal_qual_data.total_num++ >= PHY_LINKQUALITY_SLID_WIN_MAX) { padapter->recvpriv.signal_qual_data.total_num = PHY_LINKQUALITY_SLID_WIN_MAX; last_evm = padapter->recvpriv.signal_qual_data. elements[padapter->recvpriv. signal_qual_data.index]; padapter->recvpriv.signal_qual_data.total_val -= last_evm; } padapter->recvpriv.signal_qual_data.total_val += src->Rssi; padapter->recvpriv.signal_qual_data. elements[padapter->recvpriv.signal_qual_data. index++] = src->Rssi; if (padapter->recvpriv.signal_qual_data.index >= PHY_LINKQUALITY_SLID_WIN_MAX) padapter->recvpriv.signal_qual_data.index = 0; /* <1> Showed on UI for user, in percentage. */ tmpVal = padapter->recvpriv.signal_qual_data.total_val / padapter->recvpriv.signal_qual_data.total_num; padapter->recvpriv.signal = (u8)tmpVal; src->Rssi = padapter->recvpriv.signal; } else src->Rssi = (src->Rssi + dst->Rssi) / 2; memcpy((u8 *)dst, (u8 *)src, r8712_get_ndis_wlan_bssid_ex_sz(src)); } static void update_current_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *pnetwork) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; if (is_same_network(&(pmlmepriv->cur_network.network), pnetwork)) { update_network(&(pmlmepriv->cur_network.network), pnetwork, adapter); r8712_update_protection(adapter, (pmlmepriv->cur_network.network.IEs) + sizeof(struct NDIS_802_11_FIXED_IEs), pmlmepriv->cur_network.network.IELength); } } /* Caller must hold pmlmepriv->lock first. */ static void update_scanned_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *target) { struct list_head *plist, *phead; u32 bssid_ex_sz; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct __queue *queue = &pmlmepriv->scanned_queue; struct wlan_network *pnetwork = NULL; struct wlan_network *oldest = NULL; phead = get_list_head(queue); plist = get_next(phead); while (1) { if (end_of_queue_search(phead, plist) == true) break; pnetwork = LIST_CONTAINOR(plist, struct wlan_network, list); if (is_same_network(&pnetwork->network, target)) break; if ((oldest == ((struct wlan_network *)0)) || time_after((unsigned long)oldest->last_scanned, (unsigned long)pnetwork->last_scanned)) oldest = pnetwork; plist = get_next(plist); } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (end_of_queue_search(phead, plist) == true) { if (_queue_empty(&pmlmepriv->free_bss_pool) == true) { /* If there are no more slots, expire the oldest */ pnetwork = oldest; target->Rssi = (pnetwork->network.Rssi + target->Rssi) / 2; memcpy(&pnetwork->network, target, r8712_get_ndis_wlan_bssid_ex_sz(target)); pnetwork->last_scanned = jiffies; } else { /* Otherwise just pull from the free list */ /* update scan_time */ pnetwork = alloc_network(pmlmepriv); if (pnetwork == NULL) return; bssid_ex_sz = r8712_get_ndis_wlan_bssid_ex_sz(target); target->Length = bssid_ex_sz; memcpy(&pnetwork->network, target, bssid_ex_sz); list_insert_tail(&pnetwork->list, &queue->queue); } } else { /* we have an entry and we are going to update it. But * this entry may be already expired. In this case we * do the same as we found a new net and call the new_net * handler */ update_network(&pnetwork->network, target, adapter); pnetwork->last_scanned = jiffies; } } static void rtl8711_add_network(struct _adapter *adapter, struct ndis_wlan_bssid_ex *pnetwork) { unsigned long irqL; struct mlme_priv *pmlmepriv = &(((struct _adapter *)adapter)->mlmepriv); struct __queue *queue = &pmlmepriv->scanned_queue; spin_lock_irqsave(&queue->lock, irqL); update_current_network(adapter, pnetwork); update_scanned_network(adapter, pnetwork); spin_unlock_irqrestore(&queue->lock, irqL); } /*select the desired network based on the capability of the (i)bss. * check items: (1) security * (2) network_type * (3) WMM * (4) HT * (5) others */ static int is_desired_network(struct _adapter *adapter, struct wlan_network *pnetwork) { u8 wps_ie[512]; uint wps_ielen; int bselected = true; struct security_priv *psecuritypriv = &adapter->securitypriv; if (psecuritypriv->wps_phase == true) { if (r8712_get_wps_ie(pnetwork->network.IEs, pnetwork->network.IELength, wps_ie, &wps_ielen) == true) return true; else return false; } if ((psecuritypriv->PrivacyAlgrthm != _NO_PRIVACY_) && (pnetwork->network.Privacy == 0)) bselected = false; if (check_fwstate(&adapter->mlmepriv, WIFI_ADHOC_STATE) == true) { if (pnetwork->network.InfrastructureMode != adapter->mlmepriv.cur_network.network. InfrastructureMode) bselected = false; } return bselected; } /* TODO: Perry : For Power Management */ void r8712_atimdone_event_callback(struct _adapter *adapter , u8 *pbuf) { } void r8712_survey_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long flags; u32 len; struct ndis_wlan_bssid_ex *pnetwork; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; pnetwork = (struct ndis_wlan_bssid_ex *)pbuf; #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->Length = le32_to_cpu(pnetwork->Length); pnetwork->Ssid.SsidLength = le32_to_cpu(pnetwork->Ssid.SsidLength); pnetwork->Privacy = le32_to_cpu(pnetwork->Privacy); pnetwork->Rssi = le32_to_cpu(pnetwork->Rssi); pnetwork->NetworkTypeInUse = le32_to_cpu(pnetwork->NetworkTypeInUse); pnetwork->Configuration.ATIMWindow = le32_to_cpu(pnetwork->Configuration.ATIMWindow); pnetwork->Configuration.BeaconPeriod = le32_to_cpu(pnetwork->Configuration.BeaconPeriod); pnetwork->Configuration.DSConfig = le32_to_cpu(pnetwork->Configuration.DSConfig); pnetwork->Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->Configuration.FHConfig.DwellTime); pnetwork->Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->Configuration.FHConfig.HopPattern); pnetwork->Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->Configuration.FHConfig.HopSet); pnetwork->Configuration.FHConfig.Length = le32_to_cpu(pnetwork->Configuration.FHConfig.Length); pnetwork->Configuration.Length = le32_to_cpu(pnetwork->Configuration.Length); pnetwork->InfrastructureMode = le32_to_cpu(pnetwork->InfrastructureMode); pnetwork->IELength = le32_to_cpu(pnetwork->IELength); #endif len = r8712_get_ndis_wlan_bssid_ex_sz(pnetwork); if (len > sizeof(struct wlan_bssid_ex)) return; spin_lock_irqsave(&pmlmepriv->lock2, flags); /* update IBSS_network 's timestamp */ if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) { if (!memcmp(&(pmlmepriv->cur_network.network.MacAddress), pnetwork->MacAddress, ETH_ALEN)) { struct wlan_network *ibss_wlan = NULL; memcpy(pmlmepriv->cur_network.network.IEs, pnetwork->IEs, 8); ibss_wlan = r8712_find_network( &pmlmepriv->scanned_queue, pnetwork->MacAddress); if (ibss_wlan) { memcpy(ibss_wlan->network.IEs, pnetwork->IEs, 8); goto exit; } } } /* lock pmlmepriv->lock when you accessing network_q */ if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == false) { if (pnetwork->Ssid.Ssid[0] != 0) rtl8711_add_network(adapter, pnetwork); else { pnetwork->Ssid.SsidLength = 8; memcpy(pnetwork->Ssid.Ssid, "<hidden>", 8); rtl8711_add_network(adapter, pnetwork); } } exit: spin_unlock_irqrestore(&pmlmepriv->lock2, flags); } void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; spin_lock_irqsave(&pmlmepriv->lock, irqL); if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) { u8 timer_cancelled; _cancel_timer(&pmlmepriv->scan_to_timer, &timer_cancelled); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); } if (pmlmepriv->to_join == true) { if ((check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { if (check_fwstate(pmlmepriv, _FW_LINKED) == false) { set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (r8712_select_and_join_from_scan(pmlmepriv) == _SUCCESS) _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); else { struct wlan_bssid_ex *pdev_network = &(adapter->registrypriv.dev_network); u8 *pibss = adapter->registrypriv. dev_network.MacAddress; pmlmepriv->fw_state ^= _FW_UNDER_SURVEY; memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); r8712_update_registrypriv_dev_network (adapter); r8712_generate_random_ibss(pibss); pmlmepriv->fw_state = WIFI_ADHOC_MASTER_STATE; pmlmepriv->to_join = false; } } } else { pmlmepriv->to_join = false; set_fwstate(pmlmepriv, _FW_UNDER_LINKING); if (r8712_select_and_join_from_scan(pmlmepriv) == _SUCCESS) _set_timer(&pmlmepriv->assoc_timer, MAX_JOIN_TIMEOUT); else _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } /* *r8712_free_assoc_resources: the caller has to lock pmlmepriv->lock */ void r8712_free_assoc_resources(struct _adapter *adapter) { unsigned long irqL; struct wlan_network *pwlan = NULL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; pwlan = r8712_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE|WIFI_AP_STATE)) { struct sta_info *psta; psta = r8712_get_stainfo(&adapter->stapriv, tgt_network->network.MacAddress); spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); r8712_free_stainfo(adapter, psta); spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE|WIFI_ADHOC_MASTER_STATE|WIFI_AP_STATE)) r8712_free_all_stainfo(adapter); if (pwlan) pwlan->fixed = false; if (((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE)) && (adapter->stapriv.asoc_sta_count == 1))) free_network_nolock(pmlmepriv, pwlan); } /* *r8712_indicate_connect: the caller has to lock pmlmepriv->lock */ void r8712_indicate_connect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; pmlmepriv->to_join = false; set_fwstate(pmlmepriv, _FW_LINKED); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_LINK); r8712_os_indicate_connect(padapter); if (padapter->registrypriv.power_mgnt > PS_MODE_ACTIVE) _set_timer(&pmlmepriv->dhcp_timer, 60000); } /* *r8712_ind_disconnect: the caller has to lock pmlmepriv->lock */ void r8712_ind_disconnect(struct _adapter *padapter) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { _clr_fwstate_(pmlmepriv, _FW_LINKED); padapter->ledpriv.LedControlHandler(padapter, LED_CTL_NO_LINK); r8712_os_indicate_disconnect(padapter); } if (padapter->pwrctrlpriv.pwr_mode != padapter->registrypriv.power_mgnt) { _cancel_timer_ex(&pmlmepriv->dhcp_timer); r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt, padapter->registrypriv.smart_ps); } } /*Notes: *pnetwork : returns from r8712_joinbss_event_callback *ptarget_wlan: found from scanned_queue *if join_res > 0, for (fw_state==WIFI_STATION_STATE), we check if * "ptarget_sta" & "ptarget_wlan" exist. *if join_res > 0, for (fw_state==WIFI_ADHOC_STATE), we only check * if "ptarget_wlan" exist. *if join_res > 0, update "cur_network->network" from * "pnetwork->network" if (ptarget_wlan !=NULL). */ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL = 0, irqL2; u8 timer_cancelled; struct sta_info *ptarget_sta = NULL, *pcur_sta = NULL; struct sta_priv *pstapriv = &adapter->stapriv; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct wlan_network *cur_network = &pmlmepriv->cur_network; struct wlan_network *pcur_wlan = NULL, *ptarget_wlan = NULL; unsigned int the_same_macaddr = false; struct wlan_network *pnetwork; if (sizeof(struct list_head) == 4 * sizeof(u32)) { pnetwork = (struct wlan_network *) _malloc(sizeof(struct wlan_network)); memcpy((u8 *)pnetwork+16, (u8 *)pbuf + 8, sizeof(struct wlan_network) - 16); } else pnetwork = (struct wlan_network *)pbuf; #ifdef __BIG_ENDIAN /* endian_convert */ pnetwork->join_res = le32_to_cpu(pnetwork->join_res); pnetwork->network_type = le32_to_cpu(pnetwork->network_type); pnetwork->network.Length = le32_to_cpu(pnetwork->network.Length); pnetwork->network.Ssid.SsidLength = le32_to_cpu(pnetwork->network.Ssid.SsidLength); pnetwork->network.Privacy = le32_to_cpu(pnetwork->network.Privacy); pnetwork->network.Rssi = le32_to_cpu(pnetwork->network.Rssi); pnetwork->network.NetworkTypeInUse = le32_to_cpu(pnetwork->network.NetworkTypeInUse); pnetwork->network.Configuration.ATIMWindow = le32_to_cpu(pnetwork->network.Configuration.ATIMWindow); pnetwork->network.Configuration.BeaconPeriod = le32_to_cpu(pnetwork->network.Configuration.BeaconPeriod); pnetwork->network.Configuration.DSConfig = le32_to_cpu(pnetwork->network.Configuration.DSConfig); pnetwork->network.Configuration.FHConfig.DwellTime = le32_to_cpu(pnetwork->network.Configuration.FHConfig. DwellTime); pnetwork->network.Configuration.FHConfig.HopPattern = le32_to_cpu(pnetwork->network.Configuration. FHConfig.HopPattern); pnetwork->network.Configuration.FHConfig.HopSet = le32_to_cpu(pnetwork->network.Configuration.FHConfig.HopSet); pnetwork->network.Configuration.FHConfig.Length = le32_to_cpu(pnetwork->network.Configuration.FHConfig.Length); pnetwork->network.Configuration.Length = le32_to_cpu(pnetwork->network.Configuration.Length); pnetwork->network.InfrastructureMode = le32_to_cpu(pnetwork->network.InfrastructureMode); pnetwork->network.IELength = le32_to_cpu(pnetwork->network.IELength); #endif the_same_macaddr = !memcmp(pnetwork->network.MacAddress, cur_network->network.MacAddress, ETH_ALEN); pnetwork->network.Length = r8712_get_ndis_wlan_bssid_ex_sz(&pnetwork->network); spin_lock_irqsave(&pmlmepriv->lock, irqL); if (pnetwork->network.Length > sizeof(struct wlan_bssid_ex)) goto ignore_joinbss_callback; if (pnetwork->join_res > 0) { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) { /*s1. find ptarget_wlan*/ if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { if (the_same_macaddr == true) ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, cur_network->network.MacAddress); else { pcur_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, cur_network->network.MacAddress); pcur_wlan->fixed = false; pcur_sta = r8712_get_stainfo(pstapriv, cur_network->network.MacAddress); spin_lock_irqsave(&pstapriv-> sta_hash_lock, irqL2); r8712_free_stainfo(adapter, pcur_sta); spin_unlock_irqrestore(&(pstapriv-> sta_hash_lock), irqL2); ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, pnetwork->network. MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; } } else { ptarget_wlan = r8712_find_network(&pmlmepriv-> scanned_queue, pnetwork->network.MacAddress); if (ptarget_wlan) ptarget_wlan->fixed = true; } if (ptarget_wlan == NULL) { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; goto ignore_joinbss_callback; } /*s2. find ptarget_sta & update ptarget_sta*/ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)) { if (the_same_macaddr == true) { ptarget_sta = r8712_get_stainfo(pstapriv, pnetwork->network.MacAddress); if (ptarget_sta == NULL) ptarget_sta = r8712_alloc_stainfo(pstapriv, pnetwork->network.MacAddress); } else ptarget_sta = r8712_alloc_stainfo(pstapriv, pnetwork->network.MacAddress); if (ptarget_sta) /*update ptarget_sta*/ { ptarget_sta->aid = pnetwork->join_res; ptarget_sta->qos_option = 1; ptarget_sta->mac_id = 5; if (adapter->securitypriv. AuthAlgrthm == 2) { adapter->securitypriv. binstallGrpkey = false; adapter->securitypriv. busetkipkey = false; adapter->securitypriv. bgrpkey_handshake = false; ptarget_sta->ieee8021x_blocked = true; ptarget_sta->XPrivacy = adapter->securitypriv. PrivacyAlgrthm; memset((u8 *)&ptarget_sta-> x_UncstKey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> tkiprxmickey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> tkiptxmickey, 0, sizeof(union Keytype)); memset((u8 *)&ptarget_sta-> txpn, 0, sizeof(union pn48)); memset((u8 *)&ptarget_sta-> rxpn, 0, sizeof(union pn48)); } } else { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING)) pmlmepriv->fw_state ^= _FW_UNDER_LINKING; goto ignore_joinbss_callback; } } /*s3. update cur_network & indicate connect*/ memcpy(&cur_network->network, &pnetwork->network, pnetwork->network.Length); cur_network->aid = pnetwork->join_res; /*update fw_state will clr _FW_UNDER_LINKING*/ switch (pnetwork->network.InfrastructureMode) { case Ndis802_11Infrastructure: pmlmepriv->fw_state = WIFI_STATION_STATE; break; case Ndis802_11IBSS: pmlmepriv->fw_state = WIFI_ADHOC_STATE; break; default: pmlmepriv->fw_state = WIFI_NULL_STATE; break; } r8712_update_protection(adapter, (cur_network->network.IEs) + sizeof(struct NDIS_802_11_FIXED_IEs), (cur_network->network.IELength)); /*TODO: update HT_Capability*/ update_ht_cap(adapter, cur_network->network.IEs, cur_network->network.IELength); /*indicate connect*/ if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) r8712_indicate_connect(adapter); _cancel_timer(&pmlmepriv->assoc_timer, &timer_cancelled); } else goto ignore_joinbss_callback; } else { if (check_fwstate(pmlmepriv, _FW_UNDER_LINKING) == true) { _set_timer(&pmlmepriv->assoc_timer, 1); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); } } ignore_joinbss_callback: spin_unlock_irqrestore(&pmlmepriv->lock, irqL); if (sizeof(struct list_head) == 4 * sizeof(u32)) kfree((u8 *)pnetwork); } void r8712_stassoc_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL; struct sta_info *psta; struct mlme_priv *pmlmepriv = &(adapter->mlmepriv); struct stassoc_event *pstassoc = (struct stassoc_event *)pbuf; /* to do: */ if (r8712_access_ctrl(&adapter->acl_list, pstassoc->macaddr) == false) return; psta = r8712_get_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta != NULL) { /*the sta have been in sta_info_queue => do nothing *(between drv has received this event before and * fw have not yet to set key to CAM_ENTRY) */ return; } psta = r8712_alloc_stainfo(&adapter->stapriv, pstassoc->macaddr); if (psta == NULL) return; /* to do : init sta_info variable */ psta->qos_option = 0; psta->mac_id = le32_to_cpu((uint)pstassoc->cam_id); /* psta->aid = (uint)pstassoc->cam_id; */ if (adapter->securitypriv.AuthAlgrthm == 2) psta->XPrivacy = adapter->securitypriv.PrivacyAlgrthm; psta->ieee8021x_blocked = false; spin_lock_irqsave(&pmlmepriv->lock, irqL); if ((check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE) == true) || (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE) == true)) { if (adapter->stapriv.asoc_sta_count == 2) { /* a sta + bc/mc_stainfo (not Ibss_stainfo) */ r8712_indicate_connect(adapter); } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void r8712_stadel_event_callback(struct _adapter *adapter, u8 *pbuf) { unsigned long irqL, irqL2; struct sta_info *psta; struct wlan_network *pwlan = NULL; struct wlan_bssid_ex *pdev_network = NULL; u8 *pibss = NULL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct stadel_event *pstadel = (struct stadel_event *)pbuf; struct sta_priv *pstapriv = &adapter->stapriv; struct wlan_network *tgt_network = &pmlmepriv->cur_network; spin_lock_irqsave(&pmlmepriv->lock, irqL2); if (check_fwstate(pmlmepriv, WIFI_STATION_STATE) == true) { r8712_ind_disconnect(adapter); r8712_free_assoc_resources(adapter); } if (check_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE | WIFI_ADHOC_STATE)) { psta = r8712_get_stainfo(&adapter->stapriv, pstadel->macaddr); spin_lock_irqsave(&pstapriv->sta_hash_lock, irqL); r8712_free_stainfo(adapter, psta); spin_unlock_irqrestore(&pstapriv->sta_hash_lock, irqL); if (adapter->stapriv.asoc_sta_count == 1) { /*a sta + bc/mc_stainfo (not Ibss_stainfo) */ pwlan = r8712_find_network(&pmlmepriv->scanned_queue, tgt_network->network.MacAddress); if (pwlan) { pwlan->fixed = false; free_network_nolock(pmlmepriv, pwlan); } /*re-create ibss*/ pdev_network = &(adapter->registrypriv.dev_network); pibss = adapter->registrypriv.dev_network.MacAddress; memcpy(pdev_network, &tgt_network->network, r8712_get_ndis_wlan_bssid_ex_sz(&tgt_network-> network)); memset(&pdev_network->Ssid, 0, sizeof(struct ndis_802_11_ssid)); memcpy(&pdev_network->Ssid, &pmlmepriv->assoc_ssid, sizeof(struct ndis_802_11_ssid)); r8712_update_registrypriv_dev_network(adapter); r8712_generate_random_ibss(pibss); if (check_fwstate(pmlmepriv, WIFI_ADHOC_STATE)) { _clr_fwstate_(pmlmepriv, WIFI_ADHOC_STATE); set_fwstate(pmlmepriv, WIFI_ADHOC_MASTER_STATE); } } } spin_unlock_irqrestore(&pmlmepriv->lock, irqL2); } void r8712_cpwm_event_callback(struct _adapter *adapter, u8 *pbuf) { struct reportpwrstate_parm *preportpwrstate = (struct reportpwrstate_parm *)pbuf; preportpwrstate->state |= (u8)(adapter->pwrctrlpriv.cpwm_tog + 0x80); r8712_cpwm_int_hdl(adapter, preportpwrstate); } /* When the Netgear 3500 AP is with WPA2PSK-AES mode, it will send * the ADDBA req frame with start seq control = 0 to wifi client after * the WPA handshake and the seqence number of following data packet * will be 0. In this case, the Rx reorder sequence is not longer than 0 * and the WiFi client will drop the data with seq number 0. * So, the 8712 firmware has to inform driver with receiving the * ADDBA-Req frame so that the driver can reset the * sequence value of Rx reorder contorl. */ void r8712_got_addbareq_event_callback(struct _adapter *adapter, u8 *pbuf) { struct ADDBA_Req_Report_parm *pAddbareq_pram = (struct ADDBA_Req_Report_parm *)pbuf; struct sta_info *psta; struct sta_priv *pstapriv = &adapter->stapriv; struct recv_reorder_ctrl *precvreorder_ctrl = NULL; printk(KERN_INFO "r8712u: [%s] mac = %pM, seq = %d, tid = %d\n", __func__, pAddbareq_pram->MacAddress, pAddbareq_pram->StartSeqNum, pAddbareq_pram->tid); psta = r8712_get_stainfo(pstapriv, pAddbareq_pram->MacAddress); if (psta) { precvreorder_ctrl = &psta->recvreorder_ctrl[pAddbareq_pram->tid]; /* set the indicate_seq to 0xffff so that the rx reorder * can store any following data packet. */ precvreorder_ctrl->indicate_seq = 0xffff; } } void r8712_wpspbc_event_callback(struct _adapter *adapter, u8 *pbuf) { if (adapter->securitypriv.wps_hw_pbc_pressed == false) adapter->securitypriv.wps_hw_pbc_pressed = true; } void _r8712_sitesurvey_ctrl_handler(struct _adapter *adapter) { struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct sitesurvey_ctrl *psitesurveyctrl = &pmlmepriv->sitesurveyctrl; struct registry_priv *pregistrypriv = &adapter->registrypriv; u64 current_tx_pkts; uint current_rx_pkts; current_tx_pkts = (adapter->xmitpriv.tx_pkts) - (psitesurveyctrl->last_tx_pkts); current_rx_pkts = (adapter->recvpriv.rx_pkts) - (psitesurveyctrl->last_rx_pkts); psitesurveyctrl->last_tx_pkts = adapter->xmitpriv.tx_pkts; psitesurveyctrl->last_rx_pkts = adapter->recvpriv.rx_pkts; if ((current_tx_pkts > pregistrypriv->busy_thresh) || (current_rx_pkts > pregistrypriv->busy_thresh)) psitesurveyctrl->traffic_busy = true; else psitesurveyctrl->traffic_busy = false; } void _r8712_join_timeout_handler(struct _adapter *adapter) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; spin_lock_irqsave(&pmlmepriv->lock, irqL); _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); pmlmepriv->to_join = false; if (check_fwstate(pmlmepriv, _FW_LINKED) == true) { r8712_os_indicate_disconnect(adapter); _clr_fwstate_(pmlmepriv, _FW_LINKED); } if (adapter->pwrctrlpriv.pwr_mode != adapter->registrypriv.power_mgnt) { r8712_set_ps_mode(adapter, adapter->registrypriv.power_mgnt, adapter->registrypriv.smart_ps); } spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void r8712_scan_timeout_handler (struct _adapter *adapter) { unsigned long irqL; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; spin_lock_irqsave(&pmlmepriv->lock, irqL); _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY); pmlmepriv->to_join = false; /* scan fail, so clear to_join flag */ spin_unlock_irqrestore(&pmlmepriv->lock, irqL); } void _r8712_dhcp_timeout_handler (struct _adapter *adapter) { if (adapter->bDriverStopped || adapter->bSurpriseRemoved) return; if (adapter->pwrctrlpriv.pwr_mode != adapter->registrypriv.power_mgnt) r8712_set_ps_mode(adapter, adapter->registrypriv.power_mgnt, adapter->registrypriv.smart_ps); } void _r8712_wdg_timeout_handler(struct _adapter *adapter) { r8712_wdg_wk_cmd(adapter); } int r8712_select_and_join_from_scan(struct mlme_priv *pmlmepriv) { struct list_head *phead; unsigned char *dst_ssid, *src_ssid; struct _adapter *adapter; struct __queue *queue = NULL; struct wlan_network *pnetwork = NULL; struct wlan_network *pnetwork_max_rssi = NULL; adapter = (struct _adapter *)pmlmepriv->nic_hdl; queue = &pmlmepriv->scanned_queue; phead = get_list_head(queue); pmlmepriv->pscanned = get_next(phead); while (1) { if (end_of_queue_search(phead, pmlmepriv->pscanned) == true) { if ((pmlmepriv->assoc_by_rssi == true) && (pnetwork_max_rssi != NULL)) { pnetwork = pnetwork_max_rssi; goto ask_for_joinbss; } return _FAIL; } pnetwork = LIST_CONTAINOR(pmlmepriv->pscanned, struct wlan_network, list); if (pnetwork == NULL) return _FAIL; pmlmepriv->pscanned = get_next(pmlmepriv->pscanned); if (pmlmepriv->assoc_by_bssid == true) { dst_ssid = pnetwork->network.MacAddress; src_ssid = pmlmepriv->assoc_bssid; if (!memcmp(dst_ssid, src_ssid, ETH_ALEN)) { if (check_fwstate(pmlmepriv, _FW_LINKED)) { if (is_same_network(&pmlmepriv-> cur_network.network, &pnetwork->network)) { _clr_fwstate_(pmlmepriv, _FW_UNDER_LINKING); /*r8712_indicate_connect again*/ r8712_indicate_connect(adapter); return 2; } r8712_disassoc_cmd(adapter); r8712_ind_disconnect(adapter); r8712_free_assoc_resources(adapter); } goto ask_for_joinbss; } } else if (pmlmepriv->assoc_ssid.SsidLength == 0) goto ask_for_joinbss; dst_ssid = pnetwork->network.Ssid.Ssid; src_ssid = pmlmepriv->assoc_ssid.Ssid; if ((pnetwork->network.Ssid.SsidLength == pmlmepriv->assoc_ssid.SsidLength) && (!memcmp(dst_ssid, src_ssid, pmlmepriv->assoc_ssid.SsidLength))) { if (pmlmepriv->assoc_by_rssi == true) { /* if the ssid is the same, select the bss * which has the max rssi*/ if (pnetwork_max_rssi) { if (pnetwork->network.Rssi > pnetwork_max_rssi->network.Rssi) pnetwork_max_rssi = pnetwork; } else pnetwork_max_rssi = pnetwork; } else if (is_desired_network(adapter, pnetwork)) { if (check_fwstate(pmlmepriv, _FW_LINKED)) { r8712_disassoc_cmd(adapter); r8712_free_assoc_resources(adapter); } goto ask_for_joinbss; } } } return _FAIL; ask_for_joinbss: return r8712_joinbss_cmd(adapter, pnetwork); } sint r8712_set_auth(struct _adapter *adapter, struct security_priv *psecuritypriv) { struct cmd_priv *pcmdpriv = &adapter->cmdpriv; struct cmd_obj *pcmd; struct setauth_parm *psetauthparm; sint ret = _SUCCESS; pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; psetauthparm = (struct setauth_parm *)_malloc( sizeof(struct setauth_parm)); if (psetauthparm == NULL) { kfree((unsigned char *)pcmd); return _FAIL; } memset(psetauthparm, 0, sizeof(struct setauth_parm)); psetauthparm->mode = (u8)psecuritypriv->AuthAlgrthm; pcmd->cmdcode = _SetAuth_CMD_; pcmd->parmbuf = (unsigned char *)psetauthparm; pcmd->cmdsz = sizeof(struct setauth_parm); pcmd->rsp = NULL; pcmd->rspsz = 0; _init_listhead(&pcmd->list); r8712_enqueue_cmd(pcmdpriv, pcmd); return ret; } sint r8712_set_key(struct _adapter *adapter, struct security_priv *psecuritypriv, sint keyid) { struct cmd_priv *pcmdpriv = &adapter->cmdpriv; struct cmd_obj *pcmd; struct setkey_parm *psetkeyparm; u8 keylen; pcmd = (struct cmd_obj *)_malloc(sizeof(struct cmd_obj)); if (pcmd == NULL) return _FAIL; psetkeyparm = (struct setkey_parm *)_malloc(sizeof(struct setkey_parm)); if (psetkeyparm == NULL) { kfree((unsigned char *)pcmd); return _FAIL; } memset(psetkeyparm, 0, sizeof(struct setkey_parm)); if (psecuritypriv->AuthAlgrthm == 2) { /* 802.1X */ psetkeyparm->algorithm = (u8)psecuritypriv->XGrpPrivacy; } else { /* WEP */ psetkeyparm->algorithm = (u8)psecuritypriv->PrivacyAlgrthm; } psetkeyparm->keyid = (u8)keyid; switch (psetkeyparm->algorithm) { case _WEP40_: keylen = 5; memcpy(psetkeyparm->key, psecuritypriv->DefKey[keyid].skey, keylen); break; case _WEP104_: keylen = 13; memcpy(psetkeyparm->key, psecuritypriv->DefKey[keyid].skey, keylen); break; case _TKIP_: if (keyid < 1 || keyid > 2) return _FAIL; keylen = 16; memcpy(psetkeyparm->key, &psecuritypriv->XGrpKey[keyid - 1], keylen); psetkeyparm->grpkey = 1; break; case _AES_: if (keyid < 1 || keyid > 2) return _FAIL; keylen = 16; memcpy(psetkeyparm->key, &psecuritypriv->XGrpKey[keyid - 1], keylen); psetkeyparm->grpkey = 1; break; default: return _FAIL; } pcmd->cmdcode = _SetKey_CMD_; pcmd->parmbuf = (u8 *)psetkeyparm; pcmd->cmdsz = (sizeof(struct setkey_parm)); pcmd->rsp = NULL; pcmd->rspsz = 0; _init_listhead(&pcmd->list); r8712_enqueue_cmd(pcmdpriv, pcmd); return _SUCCESS; } /* adjust IEs for r8712_joinbss_cmd in WMM */ int r8712_restruct_wmm_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len, uint initial_out_len) { unsigned int ielength = 0; unsigned int i, j; i = 12; /* after the fixed IE */ while (i < in_len) { ielength = initial_out_len; if (in_ie[i] == 0xDD && in_ie[i + 2] == 0x00 && in_ie[i + 3] == 0x50 && in_ie[i + 4] == 0xF2 && in_ie[i + 5] == 0x02 && i + 5 < in_len) { /*WMM element ID and OUI*/ for (j = i; j < i + 9; j++) { out_ie[ielength] = in_ie[j]; ielength++; } out_ie[initial_out_len + 1] = 0x07; out_ie[initial_out_len + 6] = 0x00; out_ie[initial_out_len + 8] = 0x00; break; } i += (in_ie[i + 1] + 2); /* to the next IE element */ } return ielength; } /* * Ported from 8185: IsInPreAuthKeyList(). * * Search by BSSID, * Return Value: * -1 :if there is no pre-auth key in the table * >=0 :if there is pre-auth key, and return the entry id */ static int SecIsInPMKIDList(struct _adapter *Adapter, u8 *bssid) { struct security_priv *psecuritypriv = &Adapter->securitypriv; int i = 0; do { if (psecuritypriv->PMKIDList[i].bUsed && (!memcmp(psecuritypriv->PMKIDList[i].Bssid, bssid, ETH_ALEN))) break; else i++; } while (i < NUM_PMKID_CACHE); if (i == NUM_PMKID_CACHE) { i = -1; /* Could not find. */ } else { ; /* There is one Pre-Authentication Key for the * specific BSSID. */ } return i; } sint r8712_restruct_sec_ie(struct _adapter *adapter, u8 *in_ie, u8 *out_ie, uint in_len) { u8 authmode = 0, securitytype, match; u8 sec_ie[255], uncst_oui[4], bkup_ie[255]; u8 wpa_oui[4] = {0x0, 0x50, 0xf2, 0x01}; uint ielength, cnt, remove_cnt; int iEntry; struct mlme_priv *pmlmepriv = &adapter->mlmepriv; struct security_priv *psecuritypriv = &adapter->securitypriv; uint ndisauthmode = psecuritypriv->ndisauthtype; uint ndissecuritytype = psecuritypriv->ndisencryptstatus; if ((ndisauthmode == Ndis802_11AuthModeWPA) || (ndisauthmode == Ndis802_11AuthModeWPAPSK)) { authmode = _WPA_IE_ID_; uncst_oui[0] = 0x0; uncst_oui[1] = 0x50; uncst_oui[2] = 0xf2; } if ((ndisauthmode == Ndis802_11AuthModeWPA2) || (ndisauthmode == Ndis802_11AuthModeWPA2PSK)) { authmode = _WPA2_IE_ID_; uncst_oui[0] = 0x0; uncst_oui[1] = 0x0f; uncst_oui[2] = 0xac; } switch (ndissecuritytype) { case Ndis802_11Encryption1Enabled: case Ndis802_11Encryption1KeyAbsent: securitytype = _WEP40_; uncst_oui[3] = 0x1; break; case Ndis802_11Encryption2Enabled: case Ndis802_11Encryption2KeyAbsent: securitytype = _TKIP_; uncst_oui[3] = 0x2; break; case Ndis802_11Encryption3Enabled: case Ndis802_11Encryption3KeyAbsent: securitytype = _AES_; uncst_oui[3] = 0x4; break; default: securitytype = _NO_PRIVACY_; break; } /*Search required WPA or WPA2 IE and copy to sec_ie[] */ cnt = 12; match = false; while (cnt < in_len) { if (in_ie[cnt] == authmode) { if ((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt+2], &wpa_oui[0], 4))) { memcpy(&sec_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); match = true; break; } if (authmode == _WPA2_IE_ID_) { memcpy(&sec_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); match = true; break; } if (((authmode == _WPA_IE_ID_) && (!memcmp(&in_ie[cnt + 2], &wpa_oui[0], 4))) || (authmode == _WPA2_IE_ID_)) memcpy(&bkup_ie[0], &in_ie[cnt], in_ie[cnt + 1] + 2); } cnt += in_ie[cnt+1] + 2; /*get next*/ } /*restruct WPA IE or WPA2 IE in sec_ie[] */ if (match == true) { if (sec_ie[0] == _WPA_IE_ID_) { /* parsing SSN IE to select required encryption * algorithm, and set the bc/mc encryption algorithm */ while (true) { /*check wpa_oui tag*/ if (memcmp(&sec_ie[2], &wpa_oui[0], 4)) { match = false; break; } if ((sec_ie[6] != 0x01) || (sec_ie[7] != 0x0)) { /*IE Ver error*/ match = false; break; } if (!memcmp(&sec_ie[8], &wpa_oui[0], 3)) { /* get bc/mc encryption type (group * key type)*/ switch (sec_ie[11]) { case 0x0: /*none*/ psecuritypriv->XGrpPrivacy = _NO_PRIVACY_; break; case 0x1: /*WEP_40*/ psecuritypriv->XGrpPrivacy = _WEP40_; break; case 0x2: /*TKIP*/ psecuritypriv->XGrpPrivacy = _TKIP_; break; case 0x3: /*AESCCMP*/ case 0x4: psecuritypriv->XGrpPrivacy = _AES_; break; case 0x5: /*WEP_104*/ psecuritypriv->XGrpPrivacy = _WEP104_; break; } } else { match = false; break; } if (sec_ie[12] == 0x01) { /*check the unicast encryption type*/ if (memcmp(&sec_ie[14], &uncst_oui[0], 4)) { match = false; break; } /*else the uncst_oui is match*/ } else { /*mixed mode, unicast_enc_type > 1*/ /*select the uncst_oui and remove * the other uncst_oui*/ cnt = sec_ie[12]; remove_cnt = (cnt-1) * 4; sec_ie[12] = 0x01; memcpy(&sec_ie[14], &uncst_oui[0], 4); /*remove the other unicast suit*/ memcpy(&sec_ie[18], &sec_ie[18 + remove_cnt], sec_ie[1] - 18 + 2 - remove_cnt); sec_ie[1] = sec_ie[1] - remove_cnt; } break; } } if (authmode == _WPA2_IE_ID_) { /* parsing RSN IE to select required encryption * algorithm, and set the bc/mc encryption algorithm */ while (true) { if ((sec_ie[2] != 0x01) || (sec_ie[3] != 0x0)) { /*IE Ver error*/ match = false; break; } if (!memcmp(&sec_ie[4], &uncst_oui[0], 3)) { /*get bc/mc encryption type*/ switch (sec_ie[7]) { case 0x1: /*WEP_40*/ psecuritypriv->XGrpPrivacy = _WEP40_; break; case 0x2: /*TKIP*/ psecuritypriv->XGrpPrivacy = _TKIP_; break; case 0x4: /*AESWRAP*/ psecuritypriv->XGrpPrivacy = _AES_; break; case 0x5: /*WEP_104*/ psecuritypriv->XGrpPrivacy = _WEP104_; break; default: /*one*/ psecuritypriv->XGrpPrivacy = _NO_PRIVACY_; break; } } else { match = false; break; } if (sec_ie[8] == 0x01) { /*check the unicast encryption type*/ if (memcmp(&sec_ie[10], &uncst_oui[0], 4)) { match = false; break; } /*else the uncst_oui is match*/ } else { /*mixed mode, unicast_enc_type > 1*/ /*select the uncst_oui and remove the * other uncst_oui*/ cnt = sec_ie[8]; remove_cnt = (cnt-1)*4; sec_ie[8] = 0x01; memcpy(&sec_ie[10], &uncst_oui[0], 4); /*remove the other unicast suit*/ memcpy(&sec_ie[14], &sec_ie[14 + remove_cnt], (sec_ie[1] - 14 + 2 - remove_cnt)); sec_ie[1] = sec_ie[1]-remove_cnt; } break; } } } if ((authmode == _WPA_IE_ID_) || (authmode == _WPA2_IE_ID_)) { /*copy fixed ie*/ memcpy(out_ie, in_ie, 12); ielength = 12; /*copy RSN or SSN*/ if (match == true) { memcpy(&out_ie[ielength], &sec_ie[0], sec_ie[1]+2); ielength += sec_ie[1] + 2; if (authmode == _WPA2_IE_ID_) { /*the Pre-Authentication bit should be zero*/ out_ie[ielength - 1] = 0; out_ie[ielength - 2] = 0; } r8712_report_sec_ie(adapter, authmode, sec_ie); } } else { /*copy fixed ie only*/ memcpy(out_ie, in_ie, 12); ielength = 12; if (psecuritypriv->wps_phase == true) { memcpy(out_ie+ielength, psecuritypriv->wps_ie, psecuritypriv->wps_ie_len); ielength += psecuritypriv->wps_ie_len; } } iEntry = SecIsInPMKIDList(adapter, pmlmepriv->assoc_bssid); if (iEntry < 0) return ielength; else { if (authmode == _WPA2_IE_ID_) { out_ie[ielength] = 1; ielength++; out_ie[ielength] = 0; /*PMKID count = 0x0100*/ ielength++; memcpy(&out_ie[ielength], &psecuritypriv->PMKIDList[iEntry].PMKID, 16); ielength += 16; out_ie[13] += 18;/*PMKID length = 2+16*/ } } return ielength; } void r8712_init_registrypriv_dev_network(struct _adapter *adapter) { struct registry_priv *pregistrypriv = &adapter->registrypriv; struct eeprom_priv *peepriv = &adapter->eeprompriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; u8 *myhwaddr = myid(peepriv); memcpy(pdev_network->MacAddress, myhwaddr, ETH_ALEN); memcpy(&pdev_network->Ssid, &pregistrypriv->ssid, sizeof(struct ndis_802_11_ssid)); pdev_network->Configuration.Length = sizeof(struct NDIS_802_11_CONFIGURATION); pdev_network->Configuration.BeaconPeriod = 100; pdev_network->Configuration.FHConfig.Length = 0; pdev_network->Configuration.FHConfig.HopPattern = 0; pdev_network->Configuration.FHConfig.HopSet = 0; pdev_network->Configuration.FHConfig.DwellTime = 0; } void r8712_update_registrypriv_dev_network(struct _adapter *adapter) { int sz = 0; struct registry_priv *pregistrypriv = &adapter->registrypriv; struct wlan_bssid_ex *pdev_network = &pregistrypriv->dev_network; struct security_priv *psecuritypriv = &adapter->securitypriv; struct wlan_network *cur_network = &adapter->mlmepriv.cur_network; pdev_network->Privacy = cpu_to_le32(psecuritypriv->PrivacyAlgrthm > 0 ? 1 : 0) ; /* adhoc no 802.1x */ pdev_network->Rssi = 0; switch (pregistrypriv->wireless_mode) { case WIRELESS_11B: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11DS); break; case WIRELESS_11G: case WIRELESS_11BG: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11OFDM24); break; case WIRELESS_11A: pdev_network->NetworkTypeInUse = cpu_to_le32(Ndis802_11OFDM5); break; default: /* TODO */ break; } pdev_network->Configuration.DSConfig = cpu_to_le32( pregistrypriv->channel); if (cur_network->network.InfrastructureMode == Ndis802_11IBSS) pdev_network->Configuration.ATIMWindow = cpu_to_le32(3); pdev_network->InfrastructureMode = cpu_to_le32( cur_network->network.InfrastructureMode); /* 1. Supported rates * 2. IE */ sz = r8712_generate_ie(pregistrypriv); pdev_network->IELength = sz; pdev_network->Length = r8712_get_ndis_wlan_bssid_ex_sz( (struct ndis_wlan_bssid_ex *)pdev_network); } /*the function is at passive_level*/ void r8712_joinbss_reset(struct _adapter *padapter) { int i; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; /* todo: if you want to do something io/reg/hw setting before join_bss, * please add code here */ phtpriv->ampdu_enable = false;/*reset to disabled*/ for (i = 0; i < 16; i++) phtpriv->baddbareq_issued[i] = false;/*reset it*/ if (phtpriv->ht_option) { /* validate usb rx aggregation */ r8712_write8(padapter, 0x102500D9, 48);/*TH = 48 pages, 6k*/ } else { /* invalidate usb rx aggregation */ /* TH=1 => means that invalidate usb rx aggregation */ r8712_write8(padapter, 0x102500D9, 1); } } /*the function is >= passive_level*/ unsigned int r8712_restructure_ht_ie(struct _adapter *padapter, u8 *in_ie, u8 *out_ie, uint in_len, uint *pout_len) { u32 ielen, out_len; unsigned char *p, *pframe; struct ieee80211_ht_cap ht_capie; unsigned char WMM_IE[] = {0x00, 0x50, 0xf2, 0x02, 0x00, 0x01, 0x00}; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct qos_priv *pqospriv = &pmlmepriv->qospriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; phtpriv->ht_option = 0; p = r8712_get_ie(in_ie+12, _HT_CAPABILITY_IE_, &ielen, in_len-12); if (p && (ielen > 0)) { if (pqospriv->qos_option == 0) { out_len = *pout_len; pframe = r8712_set_ie(out_ie+out_len, _VENDOR_SPECIFIC_IE_, _WMM_IE_Length_, WMM_IE, pout_len); pqospriv->qos_option = 1; } out_len = *pout_len; memset(&ht_capie, 0, sizeof(struct ieee80211_ht_cap)); ht_capie.cap_info = IEEE80211_HT_CAP_SUP_WIDTH | IEEE80211_HT_CAP_SGI_20 | IEEE80211_HT_CAP_SGI_40 | IEEE80211_HT_CAP_TX_STBC | IEEE80211_HT_CAP_MAX_AMSDU | IEEE80211_HT_CAP_DSSSCCK40; ht_capie.ampdu_params_info = (IEEE80211_HT_CAP_AMPDU_FACTOR & 0x03) | (IEEE80211_HT_CAP_AMPDU_DENSITY & 0x00); pframe = r8712_set_ie(out_ie+out_len, _HT_CAPABILITY_IE_, sizeof(struct ieee80211_ht_cap), (unsigned char *)&ht_capie, pout_len); phtpriv->ht_option = 1; } return phtpriv->ht_option; } /* the function is > passive_level (in critical_section) */ static void update_ht_cap(struct _adapter *padapter, u8 *pie, uint ie_len) { u8 *p, max_ampdu_sz; int i, len; struct sta_info *bmc_sta, *psta; struct ieee80211_ht_cap *pht_capie; struct ieee80211_ht_addt_info *pht_addtinfo; struct recv_reorder_ctrl *preorder_ctrl; struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; struct registry_priv *pregistrypriv = &padapter->registrypriv; struct wlan_network *pcur_network = &(pmlmepriv->cur_network); if (!phtpriv->ht_option) return; /* maybe needs check if ap supports rx ampdu. */ if ((phtpriv->ampdu_enable == false) && (pregistrypriv->ampdu_enable == 1)) phtpriv->ampdu_enable = true; /*check Max Rx A-MPDU Size*/ len = 0; p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs), _HT_CAPABILITY_IE_, &len, ie_len - sizeof(struct NDIS_802_11_FIXED_IEs)); if (p && len > 0) { pht_capie = (struct ieee80211_ht_cap *)(p+2); max_ampdu_sz = (pht_capie->ampdu_params_info & IEEE80211_HT_CAP_AMPDU_FACTOR); /* max_ampdu_sz (kbytes); */ max_ampdu_sz = 1 << (max_ampdu_sz+3); phtpriv->rx_ampdu_maxlen = max_ampdu_sz; } /* for A-MPDU Rx reordering buffer control for bmc_sta & sta_info * if A-MPDU Rx is enabled, reseting rx_ordering_ctrl * wstart_b(indicate_seq) to default value=0xffff * todo: check if AP can send A-MPDU packets */ bmc_sta = r8712_get_bcmc_stainfo(padapter); if (bmc_sta) { for (i = 0; i < 16; i++) { preorder_ctrl = &bmc_sta->recvreorder_ctrl[i]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; } } psta = r8712_get_stainfo(&padapter->stapriv, pcur_network->network.MacAddress); if (psta) { for (i = 0; i < 16 ; i++) { preorder_ctrl = &psta->recvreorder_ctrl[i]; preorder_ctrl->indicate_seq = 0xffff; preorder_ctrl->wend_b = 0xffff; } } len = 0; p = r8712_get_ie(pie + sizeof(struct NDIS_802_11_FIXED_IEs), _HT_ADD_INFO_IE_, &len, ie_len-sizeof(struct NDIS_802_11_FIXED_IEs)); if (p && len > 0) pht_addtinfo = (struct ieee80211_ht_addt_info *)(p + 2); } void r8712_issue_addbareq_cmd(struct _adapter *padapter, int priority) { struct mlme_priv *pmlmepriv = &padapter->mlmepriv; struct ht_priv *phtpriv = &pmlmepriv->htpriv; if ((phtpriv->ht_option == 1) && (phtpriv->ampdu_enable == true)) { if (phtpriv->baddbareq_issued[priority] == false) { r8712_addbareq_cmd(padapter, (u8)priority); phtpriv->baddbareq_issued[priority] = true; } } }
gpl-2.0
Euphoria-OS/android_kernel_msm
arch/hexagon/kernel/vdso.c
7200
2541
/* * vDSO implementation for Hexagon * * Copyright (c) 2011, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. */ #include <linux/err.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/binfmts.h> #include <asm/vdso.h> static struct page *vdso_page; /* Create a vDSO page holding the signal trampoline. * We want this for a non-executable stack. */ static int __init vdso_init(void) { struct hexagon_vdso *vdso; vdso_page = alloc_page(GFP_KERNEL); if (!vdso_page) panic("Cannot allocate vdso"); vdso = vmap(&vdso_page, 1, 0, PAGE_KERNEL); if (!vdso) panic("Cannot map vdso"); clear_page(vdso); /* Install the signal trampoline; currently looks like this: * r6 = #__NR_rt_sigreturn; * trap0(#1); */ vdso->rt_signal_trampoline[0] = __rt_sigtramp_template[0]; vdso->rt_signal_trampoline[1] = __rt_sigtramp_template[1]; vunmap(vdso); return 0; } arch_initcall(vdso_init); /* * Called from binfmt_elf. Create a VMA for the vDSO page. */ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) { int ret; unsigned long vdso_base; struct mm_struct *mm = current->mm; down_write(&mm->mmap_sem); /* Try to get it loaded right near ld.so/glibc. */ vdso_base = STACK_TOP; vdso_base = get_unmapped_area(NULL, vdso_base, PAGE_SIZE, 0, 0); if (IS_ERR_VALUE(vdso_base)) { ret = vdso_base; goto up_fail; } /* MAYWRITE to allow gdb to COW and set breakpoints. */ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE, VM_READ|VM_EXEC| VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC, &vdso_page); if (ret) goto up_fail; mm->context.vdso = (void *)vdso_base; up_fail: up_write(&mm->mmap_sem); return ret; } const char *arch_vma_name(struct vm_area_struct *vma) { if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) return "[vdso]"; return NULL; }
gpl-2.0
cwabbott0/samsung-chromebook-kernel
arch/powerpc/sysdev/dcr.c
7456
6158
/* * (c) Copyright 2006 Benjamin Herrenschmidt, IBM Corp. * <benh@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #include <linux/kernel.h> #include <linux/export.h> #include <asm/prom.h> #include <asm/dcr.h> #ifdef CONFIG_PPC_DCR_MMIO static struct device_node *find_dcr_parent(struct device_node *node) { struct device_node *par, *tmp; const u32 *p; for (par = of_node_get(node); par;) { if (of_get_property(par, "dcr-controller", NULL)) break; p = of_get_property(par, "dcr-parent", NULL); tmp = par; if (p == NULL) par = of_get_parent(par); else par = of_find_node_by_phandle(*p); of_node_put(tmp); } return par; } #endif #if defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) bool dcr_map_ok_generic(dcr_host_t host) { if (host.type == DCR_HOST_NATIVE) return dcr_map_ok_native(host.host.native); else if (host.type == DCR_HOST_MMIO) return dcr_map_ok_mmio(host.host.mmio); else return 0; } EXPORT_SYMBOL_GPL(dcr_map_ok_generic); dcr_host_t dcr_map_generic(struct device_node *dev, unsigned int dcr_n, unsigned int dcr_c) { dcr_host_t host; struct device_node *dp; const char *prop; host.type = DCR_HOST_INVALID; dp = find_dcr_parent(dev); if (dp == NULL) return host; prop = of_get_property(dp, "dcr-access-method", NULL); pr_debug("dcr_map_generic(dcr-access-method = %s)\n", prop); if (!strcmp(prop, "native")) { host.type = DCR_HOST_NATIVE; host.host.native = dcr_map_native(dev, dcr_n, dcr_c); } else if (!strcmp(prop, "mmio")) { host.type = DCR_HOST_MMIO; host.host.mmio = dcr_map_mmio(dev, dcr_n, dcr_c); } of_node_put(dp); return host; } EXPORT_SYMBOL_GPL(dcr_map_generic); void dcr_unmap_generic(dcr_host_t host, unsigned int dcr_c) { if (host.type == DCR_HOST_NATIVE) dcr_unmap_native(host.host.native, dcr_c); else if (host.type == DCR_HOST_MMIO) dcr_unmap_mmio(host.host.mmio, dcr_c); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); } EXPORT_SYMBOL_GPL(dcr_unmap_generic); u32 dcr_read_generic(dcr_host_t host, unsigned int dcr_n) { if (host.type == DCR_HOST_NATIVE) return dcr_read_native(host.host.native, dcr_n); else if (host.type == DCR_HOST_MMIO) return dcr_read_mmio(host.host.mmio, dcr_n); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); return 0; } EXPORT_SYMBOL_GPL(dcr_read_generic); void dcr_write_generic(dcr_host_t host, unsigned int dcr_n, u32 value) { if (host.type == DCR_HOST_NATIVE) dcr_write_native(host.host.native, dcr_n, value); else if (host.type == DCR_HOST_MMIO) dcr_write_mmio(host.host.mmio, dcr_n, value); else /* host.type == DCR_HOST_INVALID */ WARN_ON(true); } EXPORT_SYMBOL_GPL(dcr_write_generic); #endif /* defined(CONFIG_PPC_DCR_NATIVE) && defined(CONFIG_PPC_DCR_MMIO) */ unsigned int dcr_resource_start(const struct device_node *np, unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); if (dr == NULL || ds & 1 || index >= (ds / 8)) return 0; return dr[index * 2]; } EXPORT_SYMBOL_GPL(dcr_resource_start); unsigned int dcr_resource_len(const struct device_node *np, unsigned int index) { unsigned int ds; const u32 *dr = of_get_property(np, "dcr-reg", &ds); if (dr == NULL || ds & 1 || index >= (ds / 8)) return 0; return dr[index * 2 + 1]; } EXPORT_SYMBOL_GPL(dcr_resource_len); #ifdef CONFIG_PPC_DCR_MMIO u64 of_translate_dcr_address(struct device_node *dev, unsigned int dcr_n, unsigned int *out_stride) { struct device_node *dp; const u32 *p; unsigned int stride; u64 ret = OF_BAD_ADDR; dp = find_dcr_parent(dev); if (dp == NULL) return OF_BAD_ADDR; /* Stride is not properly defined yet, default to 0x10 for Axon */ p = of_get_property(dp, "dcr-mmio-stride", NULL); stride = (p == NULL) ? 0x10 : *p; /* XXX FIXME: Which property name is to use of the 2 following ? */ p = of_get_property(dp, "dcr-mmio-range", NULL); if (p == NULL) p = of_get_property(dp, "dcr-mmio-space", NULL); if (p == NULL) goto done; /* Maybe could do some better range checking here */ ret = of_translate_address(dp, p); if (ret != OF_BAD_ADDR) ret += (u64)(stride) * (u64)dcr_n; if (out_stride) *out_stride = stride; done: of_node_put(dp); return ret; } dcr_host_mmio_t dcr_map_mmio(struct device_node *dev, unsigned int dcr_n, unsigned int dcr_c) { dcr_host_mmio_t ret = { .token = NULL, .stride = 0, .base = dcr_n }; u64 addr; pr_debug("dcr_map(%s, 0x%x, 0x%x)\n", dev->full_name, dcr_n, dcr_c); addr = of_translate_dcr_address(dev, dcr_n, &ret.stride); pr_debug("translates to addr: 0x%llx, stride: 0x%x\n", (unsigned long long) addr, ret.stride); if (addr == OF_BAD_ADDR) return ret; pr_debug("mapping 0x%x bytes\n", dcr_c * ret.stride); ret.token = ioremap(addr, dcr_c * ret.stride); if (ret.token == NULL) return ret; pr_debug("mapped at 0x%p -> base is 0x%p\n", ret.token, ret.token - dcr_n * ret.stride); ret.token -= dcr_n * ret.stride; return ret; } EXPORT_SYMBOL_GPL(dcr_map_mmio); void dcr_unmap_mmio(dcr_host_mmio_t host, unsigned int dcr_c) { dcr_host_mmio_t h = host; if (h.token == NULL) return; h.token += host.base * h.stride; iounmap(h.token); h.token = NULL; } EXPORT_SYMBOL_GPL(dcr_unmap_mmio); #endif /* defined(CONFIG_PPC_DCR_MMIO) */ #ifdef CONFIG_PPC_DCR_NATIVE DEFINE_SPINLOCK(dcr_ind_lock); #endif /* defined(CONFIG_PPC_DCR_NATIVE) */
gpl-2.0
Hundsbuah/SGP771_SGP712
sound/pci/aw2/aw2-tsl.c
14880
4092
/***************************************************************************** * * Copyright (C) 2008 Cedric Bregardis <cedric.bregardis@free.fr> and * Jean-Christian Hassler <jhassler@free.fr> * Copyright 1998 Emagic Soft- und Hardware GmbH * Copyright 2002 Martijn Sipkema * * This file is part of the Audiowerk2 ALSA driver * * The Audiowerk2 ALSA driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2. * * The Audiowerk2 ALSA driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the Audiowerk2 ALSA driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, * USA. * *****************************************************************************/ #define TSL_WS0 (1UL << 31) #define TSL_WS1 (1UL << 30) #define TSL_WS2 (1UL << 29) #define TSL_WS3 (1UL << 28) #define TSL_WS4 (1UL << 27) #define TSL_DIS_A1 (1UL << 24) #define TSL_SDW_A1 (1UL << 23) #define TSL_SIB_A1 (1UL << 22) #define TSL_SF_A1 (1UL << 21) #define TSL_LF_A1 (1UL << 20) #define TSL_BSEL_A1 (1UL << 17) #define TSL_DOD_A1 (1UL << 15) #define TSL_LOW_A1 (1UL << 14) #define TSL_DIS_A2 (1UL << 11) #define TSL_SDW_A2 (1UL << 10) #define TSL_SIB_A2 (1UL << 9) #define TSL_SF_A2 (1UL << 8) #define TSL_LF_A2 (1UL << 7) #define TSL_BSEL_A2 (1UL << 4) #define TSL_DOD_A2 (1UL << 2) #define TSL_LOW_A2 (1UL << 1) #define TSL_EOS (1UL << 0) /* Audiowerk8 hardware setup: */ /* WS0, SD4, TSL1 - Analog/ digital in */ /* WS1, SD0, TSL1 - Analog out #1, digital out */ /* WS2, SD2, TSL1 - Analog out #2 */ /* WS3, SD1, TSL2 - Analog out #3 */ /* WS4, SD3, TSL2 - Analog out #4 */ /* Audiowerk8 timing: */ /* Timeslot: | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | ... */ /* A1_INPUT: */ /* SD4: <_ADC-L_>-------<_ADC-R_>-------< */ /* WS0: _______________/---------------\_ */ /* A1_OUTPUT: */ /* SD0: <_1-L___>-------<_1-R___>-------< */ /* WS1: _______________/---------------\_ */ /* SD2: >-------<_2-L___>-------<_2-R___> */ /* WS2: -------\_______________/--------- */ /* A2_OUTPUT: */ /* SD1: <_3-L___>-------<_3-R___>-------< */ /* WS3: _______________/---------------\_ */ /* SD3: >-------<_4-L___>-------<_4-R___> */ /* WS4: -------\_______________/--------- */ static int tsl1[8] = { 1 * TSL_SDW_A1 | 3 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1 | TSL_LF_A1, 1 * TSL_SDW_A1 | 2 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1, 0 * TSL_SDW_A1 | 3 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1, 0 * TSL_SDW_A1 | 2 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1, 1 * TSL_SDW_A1 | 1 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1 | TSL_WS1 | TSL_WS0, 1 * TSL_SDW_A1 | 0 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1 | TSL_WS1 | TSL_WS0, 0 * TSL_SDW_A1 | 1 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1 | TSL_WS1 | TSL_WS0, 0 * TSL_SDW_A1 | 0 * TSL_BSEL_A1 | 0 * TSL_DIS_A1 | 0 * TSL_DOD_A1 | TSL_WS1 | TSL_WS0 | TSL_SF_A1 | TSL_EOS, }; static int tsl2[8] = { 0 * TSL_SDW_A2 | 3 * TSL_BSEL_A2 | 2 * TSL_DOD_A2 | TSL_LF_A2, 0 * TSL_SDW_A2 | 2 * TSL_BSEL_A2 | 2 * TSL_DOD_A2, 0 * TSL_SDW_A2 | 3 * TSL_BSEL_A2 | 2 * TSL_DOD_A2, 0 * TSL_SDW_A2 | 2 * TSL_BSEL_A2 | 2 * TSL_DOD_A2, 0 * TSL_SDW_A2 | 1 * TSL_BSEL_A2 | 2 * TSL_DOD_A2 | TSL_WS2, 0 * TSL_SDW_A2 | 0 * TSL_BSEL_A2 | 2 * TSL_DOD_A2 | TSL_WS2, 0 * TSL_SDW_A2 | 1 * TSL_BSEL_A2 | 2 * TSL_DOD_A2 | TSL_WS2, 0 * TSL_SDW_A2 | 0 * TSL_BSEL_A2 | 2 * TSL_DOD_A2 | TSL_WS2 | TSL_EOS };
gpl-2.0
PsychoGame/gee-caf-kernel
sound/isa/gus/gus_timer.c
14880
5336
/* * Routines for Gravis UltraSound soundcards - Timers * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * GUS have similar timers as AdLib (OPL2/OPL3 chips). * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/gus.h> /* * Timer 1 - 80us */ static int snd_gf1_timer1_start(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; unsigned int ticks; struct snd_gus_card *gus; gus = snd_timer_chip(timer); spin_lock_irqsave(&gus->reg_lock, flags); ticks = timer->sticks; tmp = (gus->gf1.timer_enabled |= 4); snd_gf1_write8(gus, SNDRV_GF1_GB_ADLIB_TIMER_1, 256 - ticks); /* timer 1 count */ snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, tmp); /* enable timer 1 IRQ */ snd_gf1_adlib_write(gus, 0x04, tmp >> 2); /* timer 2 start */ spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } static int snd_gf1_timer1_stop(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; struct snd_gus_card *gus; gus = snd_timer_chip(timer); spin_lock_irqsave(&gus->reg_lock, flags); tmp = (gus->gf1.timer_enabled &= ~4); snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, tmp); /* disable timer #1 */ spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } /* * Timer 2 - 320us */ static int snd_gf1_timer2_start(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; unsigned int ticks; struct snd_gus_card *gus; gus = snd_timer_chip(timer); spin_lock_irqsave(&gus->reg_lock, flags); ticks = timer->sticks; tmp = (gus->gf1.timer_enabled |= 8); snd_gf1_write8(gus, SNDRV_GF1_GB_ADLIB_TIMER_2, 256 - ticks); /* timer 2 count */ snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, tmp); /* enable timer 2 IRQ */ snd_gf1_adlib_write(gus, 0x04, tmp >> 2); /* timer 2 start */ spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } static int snd_gf1_timer2_stop(struct snd_timer * timer) { unsigned long flags; unsigned char tmp; struct snd_gus_card *gus; gus = snd_timer_chip(timer); spin_lock_irqsave(&gus->reg_lock, flags); tmp = (gus->gf1.timer_enabled &= ~8); snd_gf1_write8(gus, SNDRV_GF1_GB_SOUND_BLASTER_CONTROL, tmp); /* disable timer #1 */ spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } /* */ static void snd_gf1_interrupt_timer1(struct snd_gus_card * gus) { struct snd_timer *timer = gus->gf1.timer1; if (timer == NULL) return; snd_timer_interrupt(timer, timer->sticks); } static void snd_gf1_interrupt_timer2(struct snd_gus_card * gus) { struct snd_timer *timer = gus->gf1.timer2; if (timer == NULL) return; snd_timer_interrupt(timer, timer->sticks); } /* */ static struct snd_timer_hardware snd_gf1_timer1 = { .flags = SNDRV_TIMER_HW_STOP, .resolution = 80000, .ticks = 256, .start = snd_gf1_timer1_start, .stop = snd_gf1_timer1_stop, }; static struct snd_timer_hardware snd_gf1_timer2 = { .flags = SNDRV_TIMER_HW_STOP, .resolution = 320000, .ticks = 256, .start = snd_gf1_timer2_start, .stop = snd_gf1_timer2_stop, }; static void snd_gf1_timer1_free(struct snd_timer *timer) { struct snd_gus_card *gus = timer->private_data; gus->gf1.timer1 = NULL; } static void snd_gf1_timer2_free(struct snd_timer *timer) { struct snd_gus_card *gus = timer->private_data; gus->gf1.timer2 = NULL; } void snd_gf1_timers_init(struct snd_gus_card * gus) { struct snd_timer *timer; struct snd_timer_id tid; if (gus->gf1.timer1 != NULL || gus->gf1.timer2 != NULL) return; gus->gf1.interrupt_handler_timer1 = snd_gf1_interrupt_timer1; gus->gf1.interrupt_handler_timer2 = snd_gf1_interrupt_timer2; tid.dev_class = SNDRV_TIMER_CLASS_CARD; tid.dev_sclass = SNDRV_TIMER_SCLASS_NONE; tid.card = gus->card->number; tid.device = gus->timer_dev; tid.subdevice = 0; if (snd_timer_new(gus->card, "GF1 timer", &tid, &timer) >= 0) { strcpy(timer->name, "GF1 timer #1"); timer->private_data = gus; timer->private_free = snd_gf1_timer1_free; timer->hw = snd_gf1_timer1; } gus->gf1.timer1 = timer; tid.device++; if (snd_timer_new(gus->card, "GF1 timer", &tid, &timer) >= 0) { strcpy(timer->name, "GF1 timer #2"); timer->private_data = gus; timer->private_free = snd_gf1_timer2_free; timer->hw = snd_gf1_timer2; } gus->gf1.timer2 = timer; } void snd_gf1_timers_done(struct snd_gus_card * gus) { snd_gf1_set_default_handlers(gus, SNDRV_GF1_HANDLER_TIMER1 | SNDRV_GF1_HANDLER_TIMER2); if (gus->gf1.timer1) { snd_device_free(gus->card, gus->gf1.timer1); gus->gf1.timer1 = NULL; } if (gus->gf1.timer2) { snd_device_free(gus->card, gus->gf1.timer2); gus->gf1.timer2 = NULL; } }
gpl-2.0
leafji/MYSQL_5.7
regex/regexec.c
33
4377
/* * the outer shell of regexec() * * This file includes engine.c *twice*, after muchos fiddling with the * macros that code uses. This lets the same code operate on two different * representations for state sets. */ #include <my_global.h> #include <m_string.h> #include <m_ctype.h> #ifdef _WIN32 #include <limits.h> #endif #include "my_regex.h" #include "utils.h" #include "regex2.h" /* for use in asserts */ #define nope 0 /* macros for manipulating states, small version */ #define states long #define states1 long /* for later use in regexec() decision. Ensure Win64 definition is correct.*/ #define CLEAR(v) ((v) = 0) #define SET0(v, n) ((v) &= ~((states) 1 << (n))) #define SET1(v, n) ((v) |= (states) 1 << (n)) #define ISSET(v, n) ((v) & ((states) 1 << (n))) #define ASSIGN(d, s) ((d) = (s)) #define EQ(a, b) ((a) == (b)) #define STATEVARS int dummy /* dummy version */ #define STATESETUP(m, n) /* nothing */ #define STATETEARDOWN(m) /* nothing */ #define SETUP(v) ((v) = 0) #define onestate long /* Changed from int by Monty */ #define INIT(o, n) ((o) = (unsigned states)1 << (n)) #define INC(o) ((o) <<= 1) #define ISSTATEIN(v, o) ((v) & (o)) /* some abbreviations; note that some of these know variable names! */ /* do "if I'm here, I can also be there" etc without branches */ #define FWD(dst, src, n) ((dst) |= ((unsigned states)(src)&(here)) << (n)) #define BACK(dst, src, n) ((dst) |= ((unsigned states)(src)&(here)) >> (n)) #define ISSETBACK(v, n) ((v) & ((unsigned states)here >> (n))) /* function names */ #define SNAMES /* engine.c looks after details */ #include "engine.c" /* now undo things */ #undef states #undef CLEAR #undef SET0 #undef SET1 #undef ISSET #undef ASSIGN #undef EQ #undef STATEVARS #undef STATESETUP #undef STATETEARDOWN #undef SETUP #undef onestate #undef INIT #undef INC #undef ISSTATEIN #undef FWD #undef BACK #undef ISSETBACK #undef SNAMES /* macros for manipulating states, large version */ #define states char * #define CLEAR(v) memset(v, 0, m->g->nstates) #define SET0(v, n) ((v)[n] = 0) #define SET1(v, n) ((v)[n] = 1) #define ISSET(v, n) ((v)[n]) #define ASSIGN(d, s) memcpy(d, s, m->g->nstates) #define EQ(a, b) (memcmp(a, b, m->g->nstates) == 0) #define STATEVARS int vn; char *space #define STATESETUP(m, nv) { (m)->space = malloc((nv)*(m)->g->nstates); \ if ((m)->space == NULL) return(MY_REG_ESPACE); \ (m)->vn = 0; } #define STATETEARDOWN(m) { free((m)->space); } #define SETUP(v) ((v) = &m->space[m->vn++ * m->g->nstates]) #define onestate int #define INIT(o, n) ((o) = (n)) #define INC(o) ((o)++) #define ISSTATEIN(v, o) ((v)[o]) /* some abbreviations; note that some of these know variable names! */ /* do "if I'm here, I can also be there" etc without branches */ #define FWD(dst, src, n) ((dst)[here+(n)] |= (src)[here]) #define BACK(dst, src, n) ((dst)[here-(n)] |= (src)[here]) #define ISSETBACK(v, n) ((v)[here - (n)]) /* function names */ #define LNAMES /* flag */ #include "engine.c" /* - regexec - interface for matching = extern int regexec(const regex_t *, const char *, size_t, \ = regmatch_t [], int); = #define MY_REG_NOTBOL 00001 = #define MY_REG_NOTEOL 00002 = #define MY_REG_STARTEND 00004 = #define MY_REG_TRACE 00400 // tracing of execution = #define MY_REG_LARGE 01000 // force large representation = #define MY_REG_BACKR 02000 // force use of backref code * * We put this here so we can exploit knowledge of the state representation * when choosing which matcher to call. Also, by this point the matchers * have been prototyped. */ int /* 0 success, MY_REG_NOMATCH failure */ my_regexec(preg, str, nmatch, pmatch, eflags) const my_regex_t *preg; const char *str; size_t nmatch; my_regmatch_t pmatch[]; int eflags; { char *pstr = (char *) str; struct re_guts *g = preg->re_g; #ifdef REDEBUG # define GOODFLAGS(f) (f) #else # define GOODFLAGS(f) ((f)&(MY_REG_NOTBOL|MY_REG_NOTEOL|MY_REG_STARTEND)) #endif if (preg->re_magic != MAGIC1 || g->magic != MAGIC2) return(MY_REG_BADPAT); assert(!(g->iflags&BAD)); if (g->iflags&BAD) /* backstop for no-debug case */ return(MY_REG_BADPAT); eflags = GOODFLAGS(eflags); if ((size_t) g->nstates <= CHAR_BIT*sizeof(states1) && !(eflags&MY_REG_LARGE)) return(smatcher(preg->charset, g, pstr, nmatch, pmatch, eflags)); else return(lmatcher(preg->charset, g, pstr, nmatch, pmatch, eflags)); }
gpl-2.0
Pivosgroup/buildroot-linux-kernel-m3
fs/jbd/commit.c
33
28955
/* * linux/fs/jbd/commit.c * * Written by Stephen C. Tweedie <sct@redhat.com>, 1998 * * Copyright 1998 Red Hat corp --- All Rights Reserved * * This file is part of the Linux kernel and is made available under * the terms of the GNU General Public License, version 2, or at your * option, any later version, incorporated herein by reference. * * Journal commit routines for the generic filesystem journaling code; * part of the ext2fs journaling system. */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/pagemap.h> #include <linux/bio.h> /* * Default IO end handler for temporary BJ_IO buffer_heads. */ static void journal_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { BUFFER_TRACE(bh, ""); if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); } /* * When an ext3-ordered file is truncated, it is possible that many pages are * not successfully freed, because they are attached to a committing transaction. * After the transaction commits, these pages are left on the LRU, with no * ->mapping, and with attached buffers. These pages are trivially reclaimable * by the VM, but their apparent absence upsets the VM accounting, and it makes * the numbers in /proc/meminfo look odd. * * So here, we have a buffer which has just come off the forget list. Look to * see if we can strip all buffers from the backing page. * * Called under journal->j_list_lock. The caller provided us with a ref * against the buffer, and we drop that here. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page; if (buffer_dirty(bh)) goto nope; if (atomic_read(&bh->b_count) != 1) goto nope; page = bh->b_page; if (!page) goto nope; if (page->mapping) goto nope; /* OK, it's a truncated page */ if (!trylock_page(page)) goto nope; page_cache_get(page); __brelse(bh); try_to_free_buffers(page); unlock_page(page); page_cache_release(page); return; nope: __brelse(bh); } /* * Decrement reference counter for data buffer. If it has been marked * 'BH_Freed', release it and the page to which it belongs if possible. */ static void release_data_buffer(struct buffer_head *bh) { if (buffer_freed(bh)) { clear_buffer_freed(bh); release_buffer_page(bh); } else put_bh(bh); } /* * Try to acquire jbd_lock_bh_state() against the buffer, when j_list_lock is * held. For ranking reasons we must trylock. If we lose, schedule away and * return 0. j_list_lock is dropped in this case. */ static int inverted_lock(journal_t *journal, struct buffer_head *bh) { if (!jbd_trylock_bh_state(bh)) { spin_unlock(&journal->j_list_lock); schedule(); return 0; } return 1; } /* Done it all: now write the commit record. We should have * cleaned up our previous buffers by now, so if we are in abort * mode we can now just skip the rest of the journal write * entirely. * * Returns 1 if the journal needs to be aborted or 0 on success */ static int journal_write_commit_record(journal_t *journal, transaction_t *commit_transaction) { struct journal_head *descriptor; struct buffer_head *bh; journal_header_t *header; int ret; int barrier_done = 0; if (is_journal_aborted(journal)) return 0; descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) return 1; bh = jh2bh(descriptor); header = (journal_header_t *)(bh->b_data); header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_COMMIT_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); JBUFFER_TRACE(descriptor, "write commit block"); set_buffer_dirty(bh); if (journal->j_flags & JFS_BARRIER) { set_buffer_ordered(bh); barrier_done = 1; } ret = sync_dirty_buffer(bh); if (barrier_done) clear_buffer_ordered(bh); /* is it possible for another commit to fail at roughly * the same time as this one? If so, we don't want to * trust the barrier flag in the super, but instead want * to remember if we sent a barrier request */ if (ret == -EOPNOTSUPP && barrier_done) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: barrier-based sync failed on %s - " "disabling barriers\n", bdevname(journal->j_dev, b)); spin_lock(&journal->j_state_lock); journal->j_flags &= ~JFS_BARRIER; spin_unlock(&journal->j_state_lock); /* And try again, without the barrier */ set_buffer_uptodate(bh); set_buffer_dirty(bh); ret = sync_dirty_buffer(bh); } put_bh(bh); /* One for getblk() */ journal_put_journal_head(descriptor); return (ret == -EIO); } static void journal_do_submit_data(struct buffer_head **wbuf, int bufs, int write_op) { int i; for (i = 0; i < bufs; i++) { wbuf[i]->b_end_io = end_buffer_write_sync; /* We use-up our safety reference in submit_bh() */ submit_bh(write_op, wbuf[i]); } } /* * Submit all the data buffers to disk */ static int journal_submit_data_buffers(journal_t *journal, transaction_t *commit_transaction, int write_op) { struct journal_head *jh; struct buffer_head *bh; int locked; int bufs = 0; struct buffer_head **wbuf = journal->j_wbuf; int err = 0; /* * Whenever we unlock the journal and sleep, things can get added * onto ->t_sync_datalist, so we have to keep looping back to * write_out_data until we *know* that the list is empty. * * Cleanup any flushed data buffers from the data list. Even in * abort mode, we want to flush this out as soon as possible. */ write_out_data: cond_resched(); spin_lock(&journal->j_list_lock); while (commit_transaction->t_sync_datalist) { jh = commit_transaction->t_sync_datalist; bh = jh2bh(jh); locked = 0; /* Get reference just to make sure buffer does not disappear * when we are forced to drop various locks */ get_bh(bh); /* If the buffer is dirty, we need to submit IO and hence * we need the buffer lock. We try to lock the buffer without * blocking. If we fail, we need to drop j_list_lock and do * blocking lock_buffer(). */ if (buffer_dirty(bh)) { if (!trylock_buffer(bh)) { BUFFER_TRACE(bh, "needs blocking lock"); spin_unlock(&journal->j_list_lock); /* Write out all data to prevent deadlocks */ journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; lock_buffer(bh); spin_lock(&journal->j_list_lock); } locked = 1; } /* We have to get bh_state lock. Again out of order, sigh. */ if (!inverted_lock(journal, bh)) { jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); } /* Someone already cleaned up the buffer? */ if (!buffer_jbd(bh) || bh2jh(bh) != jh || jh->b_transaction != commit_transaction || jh->b_jlist != BJ_SyncData) { jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); BUFFER_TRACE(bh, "already cleaned up"); release_data_buffer(bh); continue; } if (locked && test_clear_buffer_dirty(bh)) { BUFFER_TRACE(bh, "needs writeout, adding to array"); wbuf[bufs++] = bh; __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); if (bufs == journal->j_wbufsize) { spin_unlock(&journal->j_list_lock); journal_do_submit_data(wbuf, bufs, write_op); bufs = 0; goto write_out_data; } } else if (!locked && buffer_locked(bh)) { __journal_file_buffer(jh, commit_transaction, BJ_Locked); jbd_unlock_bh_state(bh); put_bh(bh); } else { BUFFER_TRACE(bh, "writeout complete: unfile"); if (unlikely(!buffer_uptodate(bh))) err = -EIO; __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); if (locked) unlock_buffer(bh); journal_remove_journal_head(bh); /* One for our safety reference, other for * journal_remove_journal_head() */ put_bh(bh); release_data_buffer(bh); } if (need_resched() || spin_needbreak(&journal->j_list_lock)) { spin_unlock(&journal->j_list_lock); goto write_out_data; } } spin_unlock(&journal->j_list_lock); journal_do_submit_data(wbuf, bufs, write_op); return err; } /* * journal_commit_transaction * * The primary function for committing a transaction to the log. This * function is called by the journal thread to begin a complete commit. */ void journal_commit_transaction(journal_t *journal) { transaction_t *commit_transaction; struct journal_head *jh, *new_jh, *descriptor; struct buffer_head **wbuf = journal->j_wbuf; int bufs; int flags; int err; unsigned int blocknr; ktime_t start_time; u64 commit_time; char *tagp = NULL; journal_header_t *header; journal_block_tag_t *tag = NULL; int space_left = 0; int first_tag = 0; int tag_flag; int i; int write_op = WRITE; /* * First job: lock down the current transaction and wait for * all outstanding updates to complete. */ #ifdef COMMIT_STATS spin_lock(&journal->j_list_lock); summarise_journal_usage(journal); spin_unlock(&journal->j_list_lock); #endif /* Do we need to erase the effects of a prior journal_flush? */ if (journal->j_flags & JFS_FLUSHED) { jbd_debug(3, "super block updated\n"); journal_update_superblock(journal, 1); } else { jbd_debug(3, "superblock not updated\n"); } J_ASSERT(journal->j_running_transaction != NULL); J_ASSERT(journal->j_committing_transaction == NULL); commit_transaction = journal->j_running_transaction; J_ASSERT(commit_transaction->t_state == T_RUNNING); jbd_debug(1, "JBD: starting commit of transaction %d\n", commit_transaction->t_tid); spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_LOCKED; /* * Use plugged writes here, since we want to submit several before * we unplug the device. We don't do explicit unplugging in here, * instead we rely on sync_buffer() doing the unplug for us. */ if (commit_transaction->t_synchronous_commit) write_op = WRITE_SYNC_PLUG; spin_lock(&commit_transaction->t_handle_lock); while (commit_transaction->t_updates) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (commit_transaction->t_updates) { spin_unlock(&commit_transaction->t_handle_lock); spin_unlock(&journal->j_state_lock); schedule(); spin_lock(&journal->j_state_lock); spin_lock(&commit_transaction->t_handle_lock); } finish_wait(&journal->j_wait_updates, &wait); } spin_unlock(&commit_transaction->t_handle_lock); J_ASSERT (commit_transaction->t_outstanding_credits <= journal->j_max_transaction_buffers); /* * First thing we are allowed to do is to discard any remaining * BJ_Reserved buffers. Note, it is _not_ permissible to assume * that there are no such buffers: if a large filesystem * operation like a truncate needs to split itself over multiple * transactions, then it may try to do a journal_restart() while * there are still BJ_Reserved buffers outstanding. These must * be released cleanly from the current transaction. * * In this case, the filesystem must still reserve write access * again before modifying the buffer in the new transaction, but * we do not require it to remember exactly which old buffers it * has reserved. This is consistent with the existing behaviour * that multiple journal_get_write_access() calls to the same * buffer are perfectly permissable. */ while (commit_transaction->t_reserved_list) { jh = commit_transaction->t_reserved_list; JBUFFER_TRACE(jh, "reserved, unused: refile"); /* * A journal_get_undo_access()+journal_release_buffer() may * leave undo-committed data. */ if (jh->b_committed_data) { struct buffer_head *bh = jh2bh(jh); jbd_lock_bh_state(bh); jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; jbd_unlock_bh_state(bh); } journal_refile_buffer(journal, jh); } /* * Now try to drop any written-back buffers from the journal's * checkpoint lists. We do this *before* commit because it potentially * frees some memory */ spin_lock(&journal->j_list_lock); __journal_clean_checkpoint_list(journal); spin_unlock(&journal->j_list_lock); jbd_debug (3, "JBD: commit phase 1\n"); /* * Switch to a new revoke table. */ journal_switch_revoke_table(journal); commit_transaction->t_state = T_FLUSH; journal->j_committing_transaction = commit_transaction; journal->j_running_transaction = NULL; start_time = ktime_get(); commit_transaction->t_log_start = journal->j_head; wake_up(&journal->j_wait_transaction_locked); spin_unlock(&journal->j_state_lock); jbd_debug (3, "JBD: commit phase 2\n"); /* * Now start flushing things to disk, in the order they appear * on the transaction lists. Data blocks go first. */ err = journal_submit_data_buffers(journal, commit_transaction, write_op); /* * Wait for all previously submitted IO to complete. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_locked_list) { struct buffer_head *bh; jh = commit_transaction->t_locked_list->b_tprev; bh = jh2bh(jh); get_bh(bh); if (buffer_locked(bh)) { spin_unlock(&journal->j_list_lock); wait_on_buffer(bh); spin_lock(&journal->j_list_lock); } if (unlikely(!buffer_uptodate(bh))) { if (!trylock_page(bh->b_page)) { spin_unlock(&journal->j_list_lock); lock_page(bh->b_page); spin_lock(&journal->j_list_lock); } if (bh->b_page->mapping) set_bit(AS_EIO, &bh->b_page->mapping->flags); unlock_page(bh->b_page); SetPageError(bh->b_page); err = -EIO; } if (!inverted_lock(journal, bh)) { put_bh(bh); spin_lock(&journal->j_list_lock); continue; } if (buffer_jbd(bh) && bh2jh(bh) == jh && jh->b_transaction == commit_transaction && jh->b_jlist == BJ_Locked) { __journal_unfile_buffer(jh); jbd_unlock_bh_state(bh); journal_remove_journal_head(bh); put_bh(bh); } else { jbd_unlock_bh_state(bh); } release_data_buffer(bh); cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); if (err) { char b[BDEVNAME_SIZE]; printk(KERN_WARNING "JBD: Detected IO errors while flushing file data " "on %s\n", bdevname(journal->j_fs_dev, b)); if (journal->j_flags & JFS_ABORT_ON_SYNCDATA_ERR) journal_abort(journal, err); err = 0; } journal_write_revoke_records(journal, commit_transaction, write_op); /* * If we found any dirty or locked buffers, then we should have * looped back up to the write_out_data label. If there weren't * any then journal_clean_data_list should have wiped the list * clean by now, so check that it is in fact empty. */ J_ASSERT (commit_transaction->t_sync_datalist == NULL); jbd_debug (3, "JBD: commit phase 3\n"); /* * Way to go: we have now written out all of the data for a * transaction! Now comes the tricky part: we need to write out * metadata. Loop over the transaction's entire buffer list: */ spin_lock(&journal->j_state_lock); commit_transaction->t_state = T_COMMIT; spin_unlock(&journal->j_state_lock); J_ASSERT(commit_transaction->t_nr_buffers <= commit_transaction->t_outstanding_credits); descriptor = NULL; bufs = 0; while (commit_transaction->t_buffers) { /* Find the next buffer to be journaled... */ jh = commit_transaction->t_buffers; /* If we're in abort mode, we just un-journal the buffer and release it. */ if (is_journal_aborted(journal)) { clear_buffer_jbddirty(jh2bh(jh)); JBUFFER_TRACE(jh, "journal is aborting: refile"); journal_refile_buffer(journal, jh); /* If that was the last one, we need to clean up * any descriptor buffers which may have been * already allocated, even if we are now * aborting. */ if (!commit_transaction->t_buffers) goto start_journal_io; continue; } /* Make sure we have a descriptor block in which to record the metadata buffer. */ if (!descriptor) { struct buffer_head *bh; J_ASSERT (bufs == 0); jbd_debug(4, "JBD: get descriptor\n"); descriptor = journal_get_descriptor_buffer(journal); if (!descriptor) { journal_abort(journal, -EIO); continue; } bh = jh2bh(descriptor); jbd_debug(4, "JBD: got buffer %llu (%p)\n", (unsigned long long)bh->b_blocknr, bh->b_data); header = (journal_header_t *)&bh->b_data[0]; header->h_magic = cpu_to_be32(JFS_MAGIC_NUMBER); header->h_blocktype = cpu_to_be32(JFS_DESCRIPTOR_BLOCK); header->h_sequence = cpu_to_be32(commit_transaction->t_tid); tagp = &bh->b_data[sizeof(journal_header_t)]; space_left = bh->b_size - sizeof(journal_header_t); first_tag = 1; set_buffer_jwrite(bh); set_buffer_dirty(bh); wbuf[bufs++] = bh; /* Record it so that we can wait for IO completion later */ BUFFER_TRACE(bh, "ph3: file as descriptor"); journal_file_buffer(descriptor, commit_transaction, BJ_LogCtl); } /* Where is the buffer to be written? */ err = journal_next_log_block(journal, &blocknr); /* If the block mapping failed, just abandon the buffer and repeat this loop: we'll fall into the refile-on-abort condition above. */ if (err) { journal_abort(journal, err); continue; } /* * start_this_handle() uses t_outstanding_credits to determine * the free space in the log, but this counter is changed * by journal_next_log_block() also. */ commit_transaction->t_outstanding_credits--; /* Bump b_count to prevent truncate from stumbling over the shadowed buffer! @@@ This can go if we ever get rid of the BJ_IO/BJ_Shadow pairing of buffers. */ atomic_inc(&jh2bh(jh)->b_count); /* Make a temporary IO buffer with which to write it out (this will requeue both the metadata buffer and the temporary IO buffer). new_bh goes on BJ_IO*/ set_bit(BH_JWrite, &jh2bh(jh)->b_state); /* * akpm: journal_write_metadata_buffer() sets * new_bh->b_transaction to commit_transaction. * We need to clean this up before we release new_bh * (which is of type BJ_IO) */ JBUFFER_TRACE(jh, "ph3: write metadata"); flags = journal_write_metadata_buffer(commit_transaction, jh, &new_jh, blocknr); set_bit(BH_JWrite, &jh2bh(new_jh)->b_state); wbuf[bufs++] = jh2bh(new_jh); /* Record the new block's tag in the current descriptor buffer */ tag_flag = 0; if (flags & 1) tag_flag |= JFS_FLAG_ESCAPE; if (!first_tag) tag_flag |= JFS_FLAG_SAME_UUID; tag = (journal_block_tag_t *) tagp; tag->t_blocknr = cpu_to_be32(jh2bh(jh)->b_blocknr); tag->t_flags = cpu_to_be32(tag_flag); tagp += sizeof(journal_block_tag_t); space_left -= sizeof(journal_block_tag_t); if (first_tag) { memcpy (tagp, journal->j_uuid, 16); tagp += 16; space_left -= 16; first_tag = 0; } /* If there's no more to do, or if the descriptor is full, let the IO rip! */ if (bufs == journal->j_wbufsize || commit_transaction->t_buffers == NULL || space_left < sizeof(journal_block_tag_t) + 16) { jbd_debug(4, "JBD: Submit %d IOs\n", bufs); /* Write an end-of-descriptor marker before submitting the IOs. "tag" still points to the last tag we set up. */ tag->t_flags |= cpu_to_be32(JFS_FLAG_LAST_TAG); start_journal_io: for (i = 0; i < bufs; i++) { struct buffer_head *bh = wbuf[i]; lock_buffer(bh); clear_buffer_dirty(bh); set_buffer_uptodate(bh); bh->b_end_io = journal_end_buffer_io_sync; submit_bh(write_op, bh); } cond_resched(); /* Force a new descriptor to be generated next time round the loop. */ descriptor = NULL; bufs = 0; } } /* Lo and behold: we have just managed to send a transaction to the log. Before we can commit it, wait for the IO so far to complete. Control buffers being written are on the transaction's t_log_list queue, and metadata buffers are on the t_iobuf_list queue. Wait for the buffers in reverse order. That way we are less likely to be woken up until all IOs have completed, and so we incur less scheduling load. */ jbd_debug(3, "JBD: commit phase 4\n"); /* * akpm: these are BJ_IO, and j_list_lock is not needed. * See __journal_try_to_free_buffer. */ wait_for_iobuf: while (commit_transaction->t_iobuf_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_iobuf_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_iobuf; } if (cond_resched()) goto wait_for_iobuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; clear_buffer_jwrite(bh); JBUFFER_TRACE(jh, "ph4: unfile after journal write"); journal_unfile_buffer(journal, jh); /* * ->t_iobuf_list should contain only dummy buffer_heads * which were created by journal_write_metadata_buffer(). */ BUFFER_TRACE(bh, "dumping temporary bh"); journal_put_journal_head(jh); __brelse(bh); J_ASSERT_BH(bh, atomic_read(&bh->b_count) == 0); free_buffer_head(bh); /* We also have to unlock and free the corresponding shadowed buffer */ jh = commit_transaction->t_shadow_list->b_tprev; bh = jh2bh(jh); clear_bit(BH_JWrite, &bh->b_state); J_ASSERT_BH(bh, buffer_jbddirty(bh)); /* The metadata is now released for reuse, but we need to remember it against this transaction so that when we finally commit, we can do any checkpointing required. */ JBUFFER_TRACE(jh, "file as BJ_Forget"); journal_file_buffer(jh, commit_transaction, BJ_Forget); /* Wake up any transactions which were waiting for this IO to complete */ wake_up_bit(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "brelse shadowed buffer"); __brelse(bh); } J_ASSERT (commit_transaction->t_shadow_list == NULL); jbd_debug(3, "JBD: commit phase 5\n"); /* Here we wait for the revoke record and descriptor record buffers */ wait_for_ctlbuf: while (commit_transaction->t_log_list != NULL) { struct buffer_head *bh; jh = commit_transaction->t_log_list->b_tprev; bh = jh2bh(jh); if (buffer_locked(bh)) { wait_on_buffer(bh); goto wait_for_ctlbuf; } if (cond_resched()) goto wait_for_ctlbuf; if (unlikely(!buffer_uptodate(bh))) err = -EIO; BUFFER_TRACE(bh, "ph5: control buffer writeout done: unfile"); clear_buffer_jwrite(bh); journal_unfile_buffer(journal, jh); journal_put_journal_head(jh); __brelse(bh); /* One for getblk */ /* AKPM: bforget here */ } if (err) journal_abort(journal, err); jbd_debug(3, "JBD: commit phase 6\n"); if (journal_write_commit_record(journal, commit_transaction)) err = -EIO; if (err) journal_abort(journal, err); /* End of a transaction! Finally, we can do checkpoint processing: any buffers committed as a result of this transaction can be removed from any checkpoint list it was on before. */ jbd_debug(3, "JBD: commit phase 7\n"); J_ASSERT(commit_transaction->t_sync_datalist == NULL); J_ASSERT(commit_transaction->t_buffers == NULL); J_ASSERT(commit_transaction->t_checkpoint_list == NULL); J_ASSERT(commit_transaction->t_iobuf_list == NULL); J_ASSERT(commit_transaction->t_shadow_list == NULL); J_ASSERT(commit_transaction->t_log_list == NULL); restart_loop: /* * As there are other places (journal_unmap_buffer()) adding buffers * to this list we have to be careful and hold the j_list_lock. */ spin_lock(&journal->j_list_lock); while (commit_transaction->t_forget) { transaction_t *cp_transaction; struct buffer_head *bh; jh = commit_transaction->t_forget; spin_unlock(&journal->j_list_lock); bh = jh2bh(jh); jbd_lock_bh_state(bh); J_ASSERT_JH(jh, jh->b_transaction == commit_transaction || jh->b_transaction == journal->j_running_transaction); /* * If there is undo-protected committed data against * this buffer, then we can remove it now. If it is a * buffer needing such protection, the old frozen_data * field now points to a committed version of the * buffer, so rotate that field to the new committed * data. * * Otherwise, we can just throw away the frozen data now. */ if (jh->b_committed_data) { jbd_free(jh->b_committed_data, bh->b_size); jh->b_committed_data = NULL; if (jh->b_frozen_data) { jh->b_committed_data = jh->b_frozen_data; jh->b_frozen_data = NULL; } } else if (jh->b_frozen_data) { jbd_free(jh->b_frozen_data, bh->b_size); jh->b_frozen_data = NULL; } spin_lock(&journal->j_list_lock); cp_transaction = jh->b_cp_transaction; if (cp_transaction) { JBUFFER_TRACE(jh, "remove from old cp transaction"); __journal_remove_checkpoint(jh); } /* Only re-checkpoint the buffer_head if it is marked * dirty. If the buffer was added to the BJ_Forget list * by journal_forget, it may no longer be dirty and * there's no point in keeping a checkpoint record for * it. */ /* A buffer which has been freed while still being * journaled by a previous transaction may end up still * being dirty here, but we want to avoid writing back * that buffer in the future after the "add to orphan" * operation been committed, That's not only a performance * gain, it also stops aliasing problems if the buffer is * left behind for writeback and gets reallocated for another * use in a different page. */ if (buffer_freed(bh) && !jh->b_next_transaction) { clear_buffer_freed(bh); clear_buffer_jbddirty(bh); } if (buffer_jbddirty(bh)) { JBUFFER_TRACE(jh, "add to new checkpointing trans"); __journal_insert_checkpoint(jh, commit_transaction); if (is_journal_aborted(journal)) clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "refile for checkpoint writeback"); __journal_refile_buffer(jh); jbd_unlock_bh_state(bh); } else { J_ASSERT_BH(bh, !buffer_dirty(bh)); /* The buffer on BJ_Forget list and not jbddirty means * it has been freed by this transaction and hence it * could not have been reallocated until this * transaction has committed. *BUT* it could be * reallocated once we have written all the data to * disk and before we process the buffer on BJ_Forget * list. */ JBUFFER_TRACE(jh, "refile or unfile freed buffer"); __journal_refile_buffer(jh); if (!jh->b_transaction) { jbd_unlock_bh_state(bh); /* needs a brelse */ journal_remove_journal_head(bh); release_buffer_page(bh); } else jbd_unlock_bh_state(bh); } cond_resched_lock(&journal->j_list_lock); } spin_unlock(&journal->j_list_lock); /* * This is a bit sleazy. We use j_list_lock to protect transition * of a transaction into T_FINISHED state and calling * __journal_drop_transaction(). Otherwise we could race with * other checkpointing code processing the transaction... */ spin_lock(&journal->j_state_lock); spin_lock(&journal->j_list_lock); /* * Now recheck if some buffers did not get attached to the transaction * while the lock was dropped... */ if (commit_transaction->t_forget) { spin_unlock(&journal->j_list_lock); spin_unlock(&journal->j_state_lock); goto restart_loop; } /* Done with this transaction! */ jbd_debug(3, "JBD: commit phase 8\n"); J_ASSERT(commit_transaction->t_state == T_COMMIT); commit_transaction->t_state = T_FINISHED; J_ASSERT(commit_transaction == journal->j_committing_transaction); journal->j_commit_sequence = commit_transaction->t_tid; journal->j_committing_transaction = NULL; commit_time = ktime_to_ns(ktime_sub(ktime_get(), start_time)); /* * weight the commit time higher than the average time so we don't * react too strongly to vast changes in commit time */ if (likely(journal->j_average_commit_time)) journal->j_average_commit_time = (commit_time*3 + journal->j_average_commit_time) / 4; else journal->j_average_commit_time = commit_time; spin_unlock(&journal->j_state_lock); if (commit_transaction->t_checkpoint_list == NULL && commit_transaction->t_checkpoint_io_list == NULL) { __journal_drop_transaction(journal, commit_transaction); } else { if (journal->j_checkpoint_transactions == NULL) { journal->j_checkpoint_transactions = commit_transaction; commit_transaction->t_cpnext = commit_transaction; commit_transaction->t_cpprev = commit_transaction; } else { commit_transaction->t_cpnext = journal->j_checkpoint_transactions; commit_transaction->t_cpprev = commit_transaction->t_cpnext->t_cpprev; commit_transaction->t_cpnext->t_cpprev = commit_transaction; commit_transaction->t_cpprev->t_cpnext = commit_transaction; } } spin_unlock(&journal->j_list_lock); jbd_debug(1, "JBD: commit %d complete, head %d\n", journal->j_commit_sequence, journal->j_tail_sequence); wake_up(&journal->j_wait_done_commit); }
gpl-2.0
yangyang1989/u-boot-2010.03-mini2440
cpu/arm_cortexa8/omap3/clock.c
33
13456
/* * (C) Copyright 2008 * Texas Instruments, <www.ti.com> * * Author : * Manikandan Pillai <mani.pillai@ti.com> * * Derived from Beagle Board and OMAP3 SDP code by * Richard Woodruff <r-woodruff2@ti.com> * Syed Mohammed Khasim <khasim@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, * MA 02111-1307 USA */ #include <common.h> #include <asm/io.h> #include <asm/arch/clocks.h> #include <asm/arch/clocks_omap3.h> #include <asm/arch/mem.h> #include <asm/arch/sys_proto.h> #include <environment.h> #include <command.h> /****************************************************************************** * get_sys_clk_speed() - determine reference oscillator speed * based on known 32kHz clock and gptimer. *****************************************************************************/ u32 get_osc_clk_speed(void) { u32 start, cstart, cend, cdiff, cdiv, val; struct prcm *prcm_base = (struct prcm *)PRCM_BASE; struct prm *prm_base = (struct prm *)PRM_BASE; struct gptimer *gpt1_base = (struct gptimer *)OMAP34XX_GPT1; struct s32ktimer *s32k_base = (struct s32ktimer *)SYNC_32KTIMER_BASE; val = readl(&prm_base->clksrc_ctrl); if (val & SYSCLKDIV_2) cdiv = 2; else if (val & SYSCLKDIV_1) cdiv = 1; else /* * Should never reach here! (Assume divider as 1) */ cdiv = 1; /* enable timer2 */ val = readl(&prcm_base->clksel_wkup) | CLKSEL_GPT1; /* select sys_clk for GPT1 */ writel(val, &prcm_base->clksel_wkup); /* Enable I and F Clocks for GPT1 */ val = readl(&prcm_base->iclken_wkup) | EN_GPT1 | EN_32KSYNC; writel(val, &prcm_base->iclken_wkup); val = readl(&prcm_base->fclken_wkup) | EN_GPT1; writel(val, &prcm_base->fclken_wkup); writel(0, &gpt1_base->tldr); /* start counting at 0 */ writel(GPT_EN, &gpt1_base->tclr); /* enable clock */ /* enable 32kHz source, determine sys_clk via gauging */ /* start time in 20 cycles */ start = 20 + readl(&s32k_base->s32k_cr); /* dead loop till start time */ while (readl(&s32k_base->s32k_cr) < start); /* get start sys_clk count */ cstart = readl(&gpt1_base->tcrr); /* wait for 40 cycles */ while (readl(&s32k_base->s32k_cr) < (start + 20)) ; cend = readl(&gpt1_base->tcrr); /* get end sys_clk count */ cdiff = cend - cstart; /* get elapsed ticks */ if (cdiv == 2) { cdiff *= 2; } /* based on number of ticks assign speed */ if (cdiff > 19000) return S38_4M; else if (cdiff > 15200) return S26M; else if (cdiff > 13000) return S24M; else if (cdiff > 9000) return S19_2M; else if (cdiff > 7600) return S13M; else return S12M; } /****************************************************************************** * get_sys_clkin_sel() - returns the sys_clkin_sel field value based on * input oscillator clock frequency. *****************************************************************************/ void get_sys_clkin_sel(u32 osc_clk, u32 *sys_clkin_sel) { switch(osc_clk) { case S38_4M: *sys_clkin_sel = 4; break; case S26M: *sys_clkin_sel = 3; break; case S19_2M: *sys_clkin_sel = 2; break; case S13M: *sys_clkin_sel = 1; break; case S12M: default: *sys_clkin_sel = 0; } } /****************************************************************************** * prcm_init() - inits clocks for PRCM as defined in clocks.h * called from SRAM, or Flash (using temp SRAM stack). *****************************************************************************/ void prcm_init(void) { void (*f_lock_pll) (u32, u32, u32, u32); int xip_safe, p0, p1, p2, p3; u32 osc_clk = 0, sys_clkin_sel; u32 clk_index, sil_index = 0; struct prm *prm_base = (struct prm *)PRM_BASE; struct prcm *prcm_base = (struct prcm *)PRCM_BASE; dpll_param *dpll_param_p; f_lock_pll = (void *) ((u32) &_end_vect - (u32) &_start + SRAM_VECT_CODE); xip_safe = is_running_in_sram(); /* * Gauge the input clock speed and find out the sys_clkin_sel * value corresponding to the input clock. */ osc_clk = get_osc_clk_speed(); get_sys_clkin_sel(osc_clk, &sys_clkin_sel); /* set input crystal speed */ sr32(&prm_base->clksel, 0, 3, sys_clkin_sel); /* If the input clock is greater than 19.2M always divide/2 */ if (sys_clkin_sel > 2) { /* input clock divider */ sr32(&prm_base->clksrc_ctrl, 6, 2, 2); clk_index = sys_clkin_sel / 2; } else { /* input clock divider */ sr32(&prm_base->clksrc_ctrl, 6, 2, 1); clk_index = sys_clkin_sel; } /* * The DPLL tables are defined according to sysclk value and * silicon revision. The clk_index value will be used to get * the values for that input sysclk from the DPLL param table * and sil_index will get the values for that SysClk for the * appropriate silicon rev. */ if (get_cpu_rev()) sil_index = 1; /* Unlock MPU DPLL (slows things down, and needed later) */ sr32(&prcm_base->clken_pll_mpu, 0, 3, PLL_LOW_POWER_BYPASS); wait_on_value(ST_MPU_CLK, 0, &prcm_base->idlest_pll_mpu, LDELAY); /* Getting the base address of Core DPLL param table */ dpll_param_p = (dpll_param *) get_core_dpll_param(); /* Moving it to the right sysclk and ES rev base */ dpll_param_p = dpll_param_p + 3 * clk_index + sil_index; if (xip_safe) { /* * CORE DPLL * sr32(CM_CLKSEL2_EMU) set override to work when asleep */ sr32(&prcm_base->clken_pll, 0, 3, PLL_FAST_RELOCK_BYPASS); wait_on_value(ST_CORE_CLK, 0, &prcm_base->idlest_ckgen, LDELAY); /* * For OMAP3 ES1.0 Errata 1.50, default value directly doesn't * work. write another value and then default value. */ /* m3x2 */ sr32(&prcm_base->clksel1_emu, 16, 5, CORE_M3X2 + 1); /* m3x2 */ sr32(&prcm_base->clksel1_emu, 16, 5, CORE_M3X2); /* Set M2 */ sr32(&prcm_base->clksel1_pll, 27, 2, dpll_param_p->m2); /* Set M */ sr32(&prcm_base->clksel1_pll, 16, 11, dpll_param_p->m); /* Set N */ sr32(&prcm_base->clksel1_pll, 8, 7, dpll_param_p->n); /* 96M Src */ sr32(&prcm_base->clksel1_pll, 6, 1, 0); /* ssi */ sr32(&prcm_base->clksel_core, 8, 4, CORE_SSI_DIV); /* fsusb */ sr32(&prcm_base->clksel_core, 4, 2, CORE_FUSB_DIV); /* l4 */ sr32(&prcm_base->clksel_core, 2, 2, CORE_L4_DIV); /* l3 */ sr32(&prcm_base->clksel_core, 0, 2, CORE_L3_DIV); /* gfx */ sr32(&prcm_base->clksel_gfx, 0, 3, GFX_DIV); /* reset mgr */ sr32(&prcm_base->clksel_wkup, 1, 2, WKUP_RSM); /* FREQSEL */ sr32(&prcm_base->clken_pll, 4, 4, dpll_param_p->fsel); /* lock mode */ sr32(&prcm_base->clken_pll, 0, 3, PLL_LOCK); wait_on_value(ST_CORE_CLK, 1, &prcm_base->idlest_ckgen, LDELAY); } else if (is_running_in_flash()) { /* * if running from flash, jump to small relocated code * area in SRAM. */ p0 = readl(&prcm_base->clken_pll); sr32(&p0, 0, 3, PLL_FAST_RELOCK_BYPASS); sr32(&p0, 4, 4, dpll_param_p->fsel); /* FREQSEL */ p1 = readl(&prcm_base->clksel1_pll); sr32(&p1, 27, 2, dpll_param_p->m2); /* Set M2 */ sr32(&p1, 16, 11, dpll_param_p->m); /* Set M */ sr32(&p1, 8, 7, dpll_param_p->n); /* Set N */ sr32(&p1, 6, 1, 0); /* set source for 96M */ p2 = readl(&prcm_base->clksel_core); sr32(&p2, 8, 4, CORE_SSI_DIV); /* ssi */ sr32(&p2, 4, 2, CORE_FUSB_DIV); /* fsusb */ sr32(&p2, 2, 2, CORE_L4_DIV); /* l4 */ sr32(&p2, 0, 2, CORE_L3_DIV); /* l3 */ p3 = (u32)&prcm_base->idlest_ckgen; (*f_lock_pll) (p0, p1, p2, p3); } /* PER DPLL */ sr32(&prcm_base->clken_pll, 16, 3, PLL_STOP); wait_on_value(ST_PERIPH_CLK, 0, &prcm_base->idlest_ckgen, LDELAY); /* Getting the base address to PER DPLL param table */ /* Set N */ dpll_param_p = (dpll_param *) get_per_dpll_param(); /* Moving it to the right sysclk base */ dpll_param_p = dpll_param_p + clk_index; /* * Errata 1.50 Workaround for OMAP3 ES1.0 only * If using default divisors, write default divisor + 1 * and then the actual divisor value */ sr32(&prcm_base->clksel1_emu, 24, 5, PER_M6X2 + 1); /* set M6 */ sr32(&prcm_base->clksel1_emu, 24, 5, PER_M6X2); /* set M6 */ sr32(&prcm_base->clksel_cam, 0, 5, PER_M5X2 + 1); /* set M5 */ sr32(&prcm_base->clksel_cam, 0, 5, PER_M5X2); /* set M5 */ sr32(&prcm_base->clksel_dss, 0, 5, PER_M4X2 + 1); /* set M4 */ sr32(&prcm_base->clksel_dss, 0, 5, PER_M4X2); /* set M4 */ sr32(&prcm_base->clksel_dss, 8, 5, PER_M3X2 + 1); /* set M3 */ sr32(&prcm_base->clksel_dss, 8, 5, PER_M3X2); /* set M3 */ sr32(&prcm_base->clksel3_pll, 0, 5, dpll_param_p->m2 + 1); /* set M2 */ sr32(&prcm_base->clksel3_pll, 0, 5, dpll_param_p->m2); /* set M2 */ /* Workaround end */ sr32(&prcm_base->clksel2_pll, 8, 11, dpll_param_p->m); /* set m */ sr32(&prcm_base->clksel2_pll, 0, 7, dpll_param_p->n); /* set n */ sr32(&prcm_base->clken_pll, 20, 4, dpll_param_p->fsel); /* FREQSEL */ sr32(&prcm_base->clken_pll, 16, 3, PLL_LOCK); /* lock mode */ wait_on_value(ST_PERIPH_CLK, 2, &prcm_base->idlest_ckgen, LDELAY); /* Getting the base address to MPU DPLL param table */ dpll_param_p = (dpll_param *) get_mpu_dpll_param(); /* Moving it to the right sysclk and ES rev base */ dpll_param_p = dpll_param_p + 3 * clk_index + sil_index; /* MPU DPLL (unlocked already) */ /* Set M2 */ sr32(&prcm_base->clksel2_pll_mpu, 0, 5, dpll_param_p->m2); /* Set M */ sr32(&prcm_base->clksel1_pll_mpu, 8, 11, dpll_param_p->m); /* Set N */ sr32(&prcm_base->clksel1_pll_mpu, 0, 7, dpll_param_p->n); /* FREQSEL */ sr32(&prcm_base->clken_pll_mpu, 4, 4, dpll_param_p->fsel); /* lock mode */ sr32(&prcm_base->clken_pll_mpu, 0, 3, PLL_LOCK); wait_on_value(ST_MPU_CLK, 1, &prcm_base->idlest_pll_mpu, LDELAY); /* Getting the base address to IVA DPLL param table */ dpll_param_p = (dpll_param *) get_iva_dpll_param(); /* Moving it to the right sysclk and ES rev base */ dpll_param_p = dpll_param_p + 3 * clk_index + sil_index; /* IVA DPLL (set to 12*20=240MHz) */ sr32(&prcm_base->clken_pll_iva2, 0, 3, PLL_STOP); wait_on_value(ST_IVA2_CLK, 0, &prcm_base->idlest_pll_iva2, LDELAY); /* set M2 */ sr32(&prcm_base->clksel2_pll_iva2, 0, 5, dpll_param_p->m2); /* set M */ sr32(&prcm_base->clksel1_pll_iva2, 8, 11, dpll_param_p->m); /* set N */ sr32(&prcm_base->clksel1_pll_iva2, 0, 7, dpll_param_p->n); /* FREQSEL */ sr32(&prcm_base->clken_pll_iva2, 4, 4, dpll_param_p->fsel); /* lock mode */ sr32(&prcm_base->clken_pll_iva2, 0, 3, PLL_LOCK); wait_on_value(ST_IVA2_CLK, 1, &prcm_base->idlest_pll_iva2, LDELAY); /* Set up GPTimers to sys_clk source only */ sr32(&prcm_base->clksel_per, 0, 8, 0xff); sr32(&prcm_base->clksel_wkup, 0, 1, 1); sdelay(5000); } /****************************************************************************** * peripheral_enable() - Enable the clks & power for perifs (GPT2, UART1,...) *****************************************************************************/ void per_clocks_enable(void) { struct prcm *prcm_base = (struct prcm *)PRCM_BASE; /* Enable GP2 timer. */ sr32(&prcm_base->clksel_per, 0, 1, 0x1); /* GPT2 = sys clk */ sr32(&prcm_base->iclken_per, 3, 1, 0x1); /* ICKen GPT2 */ sr32(&prcm_base->fclken_per, 3, 1, 0x1); /* FCKen GPT2 */ #ifdef CONFIG_SYS_NS16550 /* Enable UART1 clocks */ sr32(&prcm_base->fclken1_core, 13, 1, 0x1); sr32(&prcm_base->iclken1_core, 13, 1, 0x1); /* UART 3 Clocks */ sr32(&prcm_base->fclken_per, 11, 1, 0x1); sr32(&prcm_base->iclken_per, 11, 1, 0x1); #endif #ifdef CONFIG_OMAP3_GPIO_2 sr32(&prcm_base->fclken_per, 13, 1, 1); sr32(&prcm_base->iclken_per, 13, 1, 1); #endif #ifdef CONFIG_OMAP3_GPIO_3 sr32(&prcm_base->fclken_per, 14, 1, 1); sr32(&prcm_base->iclken_per, 14, 1, 1); #endif #ifdef CONFIG_OMAP3_GPIO_4 sr32(&prcm_base->fclken_per, 15, 1, 1); sr32(&prcm_base->iclken_per, 15, 1, 1); #endif #ifdef CONFIG_OMAP3_GPIO_5 sr32(&prcm_base->fclken_per, 16, 1, 1); sr32(&prcm_base->iclken_per, 16, 1, 1); #endif #ifdef CONFIG_OMAP3_GPIO_6 sr32(&prcm_base->fclken_per, 17, 1, 1); sr32(&prcm_base->iclken_per, 17, 1, 1); #endif #ifdef CONFIG_DRIVER_OMAP34XX_I2C /* Turn on all 3 I2C clocks */ sr32(&prcm_base->fclken1_core, 15, 3, 0x7); sr32(&prcm_base->iclken1_core, 15, 3, 0x7); /* I2C1,2,3 = on */ #endif /* Enable the ICLK for 32K Sync Timer as its used in udelay */ sr32(&prcm_base->iclken_wkup, 2, 1, 0x1); sr32(&prcm_base->fclken_iva2, 0, 32, FCK_IVA2_ON); sr32(&prcm_base->fclken1_core, 0, 32, FCK_CORE1_ON); sr32(&prcm_base->iclken1_core, 0, 32, ICK_CORE1_ON); sr32(&prcm_base->iclken2_core, 0, 32, ICK_CORE2_ON); sr32(&prcm_base->fclken_wkup, 0, 32, FCK_WKUP_ON); sr32(&prcm_base->iclken_wkup, 0, 32, ICK_WKUP_ON); sr32(&prcm_base->fclken_dss, 0, 32, FCK_DSS_ON); sr32(&prcm_base->iclken_dss, 0, 32, ICK_DSS_ON); sr32(&prcm_base->fclken_cam, 0, 32, FCK_CAM_ON); sr32(&prcm_base->iclken_cam, 0, 32, ICK_CAM_ON); sr32(&prcm_base->fclken_per, 0, 32, FCK_PER_ON); sr32(&prcm_base->iclken_per, 0, 32, ICK_PER_ON); sdelay(1000); }
gpl-2.0
sakuramilk/sc02c_kernel_gb
drivers/usb/host/s3c-otg/s3c-otg-oci.c
33
17680
/**************************************************************************** * (C) Copyright 2008 Samsung Electronics Co., Ltd., All rights reserved * * [File Name] : OCI.c * [Description] : The file implement the external and internal functions of OCI * [Author] : Jang Kyu Hyeok { kyuhyeok.jang@samsung.com } * [Department] : System LSI Division/Embedded S/W Platform * [Created Date]: 2009/02/10 * [Revision History] * (1) 2008/06/12 by Jang Kyu Hyeok { kyuhyeok.jang@samsung.com } * - Created this file and Implement functions of OCI * ****************************************************************************/ /**************************************************************************** * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA ****************************************************************************/ #include "s3c-otg-oci.h" static bool ch_enable[16]; /** * int oci_init(struct sec_otghost *otghost) * * @brief Initialize oci module. * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_init(struct sec_otghost *otghost) { otg_mem_set((void*)ch_enable, true, sizeof(bool)*16); otghost->ch_halt = false; if(oci_sys_init() == USB_ERR_SUCCESS) { if(oci_core_reset() == USB_ERR_SUCCESS) { oci_set_global_interrupt(false); return USB_ERR_SUCCESS; } else { otg_dbg(OTG_DBG_OCI, "oci_core_reset() Fail\n"); return USB_ERR_FAIL; } } return USB_ERR_FAIL; } /** * int oci_core_init(void) * * @brief process core initialize as s5pc110 otg spec * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_core_init(void) { gahbcfg_t ahbcfg = {.d32 = 0}; gusbcfg_t usbcfg = {.d32 = 0}; ghwcfg2_t hwcfg2 = {.d32 = 0}; gintmsk_t gintmsk = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_core_init \n"); usbcfg.d32 = read_reg_32(GUSBCFG); otg_dbg(OTG_DBG_OCI, "before - GUSBCFG=0x%x, GOTGCTL=0x%x\n", usbcfg.d32, read_reg_32(GOTGCTL)); /* PHY parameters */ usbcfg.b.physel = 0; usbcfg.b.phyif = 1; /* 16 bit */ usbcfg.b.ulpi_utmi_sel = 0; /* UTMI */ /* usbcfg.b.ddrsel = 1; */ /* DDR */ usbcfg.b.usbtrdtim = 5; /* 16 bit UTMI */ usbcfg.b.toutcal = 7; usbcfg.b.forcehstmode = 1; write_reg_32 (GUSBCFG, usbcfg.d32); otg_dbg(OTG_DBG_OCI, "after - GUSBCFG=0x%x, GOTGCTL=0x%x\n", read_reg_32(GUSBCFG), read_reg_32(GOTGCTL)); /* Reset after setting the PHY parameters */ if(oci_core_reset() == USB_ERR_SUCCESS) { /* Program the GAHBCFG Register.*/ hwcfg2.d32 = read_reg_32 (GHWCFG2); switch (hwcfg2.b.architecture) { case HWCFG2_ARCH_SLAVE_ONLY: otg_dbg(OTG_DBG_OCI, "Slave Only Mode\n"); ahbcfg.b.nptxfemplvl = 0; ahbcfg.b.ptxfemplvl = 0; break; case HWCFG2_ARCH_EXT_DMA: otg_dbg(OTG_DBG_OCI, "External DMA Mode - TBD!\n"); break; case HWCFG2_ARCH_INT_DMA: otg_dbg(OTG_DBG_OCI, "Internal DMA Setting \n"); ahbcfg.b.dmaenable = true; ahbcfg.b.hburstlen = INT_DMA_MODE_INCR4; break; default: otg_dbg(OTG_DBG_OCI, "ERR> hwcfg2\n "); break; } write_reg_32 (GAHBCFG, ahbcfg.d32); /* Program the GUSBCFG register.*/ switch (hwcfg2.b.op_mode) { case MODE_HNP_SRP_CAPABLE: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_HNP_SRP_CAPABLE \n"); usbcfg.b.hnpcap = 1; usbcfg.b.srpcap = 1; otg_dbg(OTG_DBG_OCI, "OTG_DBG_OCI : use HNP and SRP \n"); break; case MODE_SRP_ONLY_CAPABLE: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_SRP_ONLY_CAPABLE \n"); usbcfg.b.srpcap = 1; break; case MODE_NO_HNP_SRP_CAPABLE: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_NO_HNP_SRP_CAPABLE \n"); usbcfg.b.hnpcap = 0; break; case MODE_SRP_CAPABLE_DEVICE: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_SRP_CAPABLE_DEVICE \n"); usbcfg.b.srpcap = 1; break; case MODE_NO_SRP_CAPABLE_DEVICE: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_NO_SRP_CAPABLE_DEVICE \n"); usbcfg.b.srpcap = 0; break; case MODE_SRP_CAPABLE_HOST: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_SRP_CAPABLE_HOST \n"); usbcfg.b.srpcap = 1; break; case MODE_NO_SRP_CAPABLE_HOST: otg_dbg(OTG_DBG_OCI, "GHWCFG2 OP Mode : MODE_NO_SRP_CAPABLE_HOST \n"); usbcfg.b.srpcap = 0; break; default : otg_err(OTG_DBG_OCI, "ERR> hwcfg2\n "); break; } write_reg_32 (GUSBCFG, usbcfg.d32); /* Program the GINTMSK register.*/ gintmsk.b.modemismatch = 1; gintmsk.b.sofintr = 1; /*gintmsk.b.otgintr = 1; */ gintmsk.b.conidstschng = 1; /*gintmsk.b.wkupintr = 1;*/ gintmsk.b.disconnect = 1; /*gintmsk.b.usbsuspend = 1;*/ /*gintmsk.b.sessreqintr = 1;*/ /*gintmsk.b.portintr = 1;*/ /*gintmsk.b.hcintr = 1; */ write_reg_32(GINTMSK, gintmsk.d32); return USB_ERR_SUCCESS; } else { otg_err(OTG_DBG_OCI, "Core Reset FAIL\n"); return USB_ERR_FAIL; } } /** * int oci_host_init(void) * * @brief Process host initialize as s5pc110 spec * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_host_init(void) { gintmsk_t gintmsk = {.d32 = 0}; hcfg_t hcfg = {.d32 = 0}; #if 0 /*#ifdef CONFIG_USB_S3C_OTG_HOST_DTGDRVVBUS*/ hprt_t hprt; hprt.d32 = read_reg_32(HPRT); #endif otg_dbg(OTG_DBG_OCI, "oci_host_init\n"); gintmsk.b.portintr = 1; update_reg_32(GINTMSK,gintmsk.d32); hcfg.b.fslspclksel = HCFG_30_60_MHZ; update_reg_32(HCFG, hcfg.d32); #if 0 /*#ifdef CONFIG_USB_S3C_OTG_HOST_DTGDRVVBUS*/ /* turn on vbus */ if(!hprt.b.prtpwr) { hprt.b.prtpwr = 1; write_reg_32(HPRT, hprt.d32); otg_dbg(true, "turn on Vbus\n"); } #endif oci_config_flush_fifo(OTG_HOST_MODE); return USB_ERR_SUCCESS; } /** * int oci_start(void) * * @brief start to operate oci module by calling oci_core_init function * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_start(void) { otg_dbg(OTG_DBG_OCI, "oci_start \n"); if(oci_core_init() == USB_ERR_SUCCESS) { mdelay(50); if(oci_init_mode() == USB_ERR_SUCCESS) { oci_set_global_interrupt(true); return USB_ERR_SUCCESS; } else { otg_dbg(OTG_DBG_OCI, "oci_init_mode() Fail\n"); return USB_ERR_FAIL; } } else { otg_dbg(OTG_DBG_OCI, "oci_core_init() Fail\n"); return USB_ERR_FAIL; } } /** * int oci_stop(void) * * @brief stop to opearte otg core * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_stop() { gintmsk_t gintmsk = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_stop\n"); /* sometimes, port interrupt occured after removed * otg host driver. so, we have to mask port interrupt. */ write_reg_32(GINTMSK, gintmsk.d32); oci_set_global_interrupt(false); return USB_ERR_SUCCESS; } /** * oci_start_transfer(struct sec_otghost *otghost, stransfer_t *st_t) * * @brief start transfer by using transfer information to receive from scheduler * * @param [IN] *st_t - information about transfer to write register by calling oci_channel_init function * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ u8 oci_start_transfer(struct sec_otghost *otghost, stransfer_t *st_t) { hcchar_t hcchar = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_start_transfer \n"); if(st_t->alloc_chnum ==CH_NONE) { if( oci_channel_alloc(&(st_t->alloc_chnum)) == USB_ERR_SUCCESS) { oci_channel_init(st_t->alloc_chnum, st_t); hcchar.b.chen = 1; update_reg_32(HCCHAR(st_t->alloc_chnum), hcchar.d32); return st_t->alloc_chnum; } else { otg_dbg(OTG_DBG_OCI, "oci_start_transfer Fail - Channel Allocation Error\n"); return CH_NONE; } } else { oci_channel_init(st_t->alloc_chnum, st_t); hcchar.b.chen = 1; update_reg_32(HCCHAR(st_t->alloc_chnum), hcchar.d32); return st_t->alloc_chnum; } } /** * int oci_stop_transfer(struct sec_otghost *otghost, u8 ch_num) * * @brief stop to transfer even if transfering * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_stop_transfer(struct sec_otghost *otghost, u8 ch_num) { hcchar_t hcchar = {.d32 = 0}; hcintmsk_t hcintmsk = {.d32 = 0}; int count = 0, max_error_count = 10000; otg_dbg(OTG_DBG_OCI, "step1: oci_stop_transfer ch=%d, hcchar=0x%x\n", ch_num, read_reg_32(HCCHAR(ch_num))); if(ch_num>16) return USB_ERR_FAIL; otghost->ch_halt = true; hcintmsk.b.chhltd = 1; update_reg_32(HCINTMSK(ch_num),hcintmsk.d32); hcchar.b.chdis = 1; hcchar.b.chen = 1; update_reg_32(HCCHAR(ch_num),hcchar.d32); /* wait for Channel Disabled Interrupt */ do { hcchar.d32 = read_reg_32(HCCHAR(ch_num)); if(count > max_error_count) { otg_dbg(OTG_DBG_OCI, "Warning!! oci_stop_transfer()" "ChDis is not cleared! ch=%d, hcchar=0x%x\n", ch_num, hcchar.d32); break; } count++; } while(hcchar.b.chdis); oci_channel_dealloc(ch_num); clear_reg_32(HAINTMSK,ch_num); write_reg_32(HCINT(ch_num),INT_ALL); clear_reg_32(HCINTMSK(ch_num), INT_ALL); otghost->ch_halt = false; otg_dbg(OTG_DBG_OCI, "step2 : oci_stop_transfer ch=%d, hcchar=0x%x\n", ch_num, read_reg_32(HCCHAR(ch_num))); return USB_ERR_SUCCESS; } /** * int oci_channel_init( u8 ch_num, stransfer_t *st_t) * * @brief Process channel initialize to prepare starting transfer * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ int oci_channel_init( u8 ch_num, stransfer_t *st_t) { u32 intr_enable = 0; gintmsk_t gintmsk = {.d32 = 0}; hcchar_t hcchar = {.d32 = 0}; hctsiz_t hctsiz = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_channel_init \n"); /* Clear channel information */ write_reg_32(HCTSIZ(ch_num), 0); write_reg_32(HCCHAR(ch_num), 0); write_reg_32(HCINTMSK(ch_num), 0); write_reg_32(HCINT(ch_num), INT_ALL);/*write clear*/ write_reg_32(HCDMA(ch_num), 0); /* enable host channel interrupt in GINTSTS */ gintmsk.b.hcintr =1; update_reg_32(GINTMSK, gintmsk.d32); /* Enable the top level host channel interrupt in HAINT */ intr_enable = (1 << ch_num); update_reg_32(HAINTMSK, intr_enable); /* unmask the down level host channel interrupt in HCINT */ write_reg_32(HCINTMSK(ch_num),st_t->hc_reg.hc_int_msk.d32); /* Program the HCSIZn register with the endpoint characteristics for */ hctsiz.b.xfersize = st_t->buf_size; hctsiz.b.pktcnt = st_t->packet_cnt; /* Program the HCCHARn register with the endpoint characteristics for */ hcchar.b.mps = st_t->ed_desc_p->max_packet_size; hcchar.b.epnum = st_t->ed_desc_p->endpoint_num; hcchar.b.epdir = st_t->ed_desc_p->is_ep_in; hcchar.b.lspddev = (st_t->ed_desc_p->dev_speed == LOW_SPEED_OTG); hcchar.b.eptype = st_t->ed_desc_p->endpoint_type; hcchar.b.multicnt = st_t->ed_desc_p->mc; hcchar.b.devaddr = st_t->ed_desc_p->device_addr; if(st_t->ed_desc_p->endpoint_type == INT_TRANSFER || st_t->ed_desc_p->endpoint_type == ISOCH_TRANSFER) { u32 uiFrameNum = 0; uiFrameNum = oci_get_frame_num(); hcchar.b.oddfrm = uiFrameNum%2?1:0; /* * if transfer type is periodic transfer, * must support sof interrupt */ /* gintmsk.b.sofintr = 1; update_reg_32(GINTMSK, gintmsk.d32); */ } if(st_t->ed_desc_p->endpoint_type == CONTROL_TRANSFER) { td_t *td_p; td_p = (td_t *)st_t->parent_td; switch(td_p->standard_dev_req_info.conrol_transfer_stage) { case SETUP_STAGE: hctsiz.b.pid = st_t->ed_status_p->control_data_tgl.setup_tgl; hcchar.b.epdir = EP_OUT; break; case DATA_STAGE: hctsiz.b.pid = st_t->ed_status_p->control_data_tgl.data_tgl; hcchar.b.epdir = st_t->ed_desc_p->is_ep_in; break; case STATUS_STAGE: hctsiz.b.pid = st_t->ed_status_p->control_data_tgl.status_tgl; if(td_p->standard_dev_req_info.is_data_stage) { hcchar.b.epdir = ~(st_t->ed_desc_p->is_ep_in); } else { hcchar.b.epdir = EP_IN; } break; default:break; } } else { hctsiz.b.pid = st_t->ed_status_p->data_tgl; } hctsiz.b.dopng = st_t->ed_status_p->is_ping_enable; write_reg_32(HCTSIZ(ch_num),hctsiz.d32); st_t->ed_status_p->is_ping_enable = false; /* Write DMA Address */ write_reg_32(HCDMA(ch_num),st_t->start_phy_buf_addr); /* Wrote HCCHAR Register */ write_reg_32(HCCHAR(ch_num),hcchar.d32); return USB_ERR_SUCCESS; } /** * u32 oci_get_frame_num(void) * * @brief Get current frame number by reading register. * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ u32 oci_get_frame_num(void) { hfnum_t hfnum; hfnum.d32 = read_reg_32(HFNUM); otg_dbg(OTG_DBG_OCI, " oci_get_frame_num=%d\n", hfnum.b.frnum); return hfnum.b.frnum; } /** * u16 oci_get_frame_interval(void) * * @brief Get current frame interval by reading register. * * @param None * * @return USB_ERR_SUCCESS : If success \n * USB_ERR_FAIL : If call fail \n * @remark * */ u16 oci_get_frame_interval(void) { hfir_t hfir; hfir.d32 = read_reg_32(HFIR); return hfir.b.frint; } void oci_set_frame_interval(u16 interval) { hfir_t hfir = {.d32 = 0}; hfir.b.frint = interval; write_reg_32(HFIR, hfir.d32); } /* OCI Internal Functions */ int oci_channel_alloc(u8 *ch_num) { u8 ch; hcchar_t hcchar = {.d32 = 0}; for(ch = 0 ; ch<16 ; ch++) { if(ch_enable[ch] == true) { hcchar.d32 = read_reg_32(HCCHAR(ch)); if(hcchar.b.chdis == 0) { *ch_num = ch; ch_enable[ch] = false; return USB_ERR_SUCCESS; } } } return USB_ERR_FAIL; } int oci_channel_dealloc(u8 ch_num) { if(ch_num < 16 && ch_enable[ch_num] == false) { ch_enable[ch_num] = true; write_reg_32(HCTSIZ(ch_num), 0); write_reg_32(HCCHAR(ch_num), 0); write_reg_32(HCINTMSK(ch_num), 0); write_reg_32(HCINT(ch_num), INT_ALL); write_reg_32(HCDMA(ch_num), 0); return USB_ERR_SUCCESS; } return USB_ERR_FAIL; } int oci_sys_init(void) { otg_host_phy_init(); return USB_ERR_SUCCESS; } void oci_set_global_interrupt(bool set) { gahbcfg_t ahbcfg; otg_dbg(OTG_DBG_OCI, " oci_set_global_interrupt\n"); ahbcfg.d32 = 0; ahbcfg.b.glblintrmsk = 1; if(set) { update_reg_32(GAHBCFG,ahbcfg.d32); } else { clear_reg_32(GAHBCFG,ahbcfg.d32); } } int oci_init_mode(void) { gintsts_t gintsts; gintsts.d32 = read_reg_32(GINTSTS); otg_dbg(OTG_DBG_OCI, "GINSTS = 0x%x,GINMSK = 0x%x.\n", (unsigned int)gintsts.d32, (unsigned int)read_reg_32(GINTMSK)); if(gintsts.b.curmode == OTG_HOST_MODE) { otg_dbg(OTG_DBG_OCI, "HOST Mode\n"); if(oci_host_init() == USB_ERR_SUCCESS) { return USB_ERR_SUCCESS; } else { otg_dbg(OTG_DBG_OCI, "oci_host_init() Fail\n"); return USB_ERR_FAIL; } } else { /* Device Mode */ otg_dbg(OTG_DBG_OCI, "DEVICE Mode\n"); if(oci_dev_init() == USB_ERR_SUCCESS) { return USB_ERR_SUCCESS; } else { otg_err(OTG_DBG_OCI, "oci_dev_init() Fail\n"); return USB_ERR_FAIL; } } return USB_ERR_SUCCESS; } void oci_config_flush_fifo(u32 mode) { ghwcfg2_t hwcfg2 = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_config_flush_fifo\n"); hwcfg2.d32 = read_reg_32(GHWCFG2); /* Configure data FIFO sizes */ if (hwcfg2.b.dynamic_fifo) { /* Rx FIFO */ write_reg_32(GRXFSIZ, 0x0000010D); /* Non-periodic Tx FIFO */ write_reg_32(GNPTXFSIZ, 0x0080010D); if (mode == OTG_HOST_MODE) { /* For Periodic transactions, */ /* program HPTXFSIZ */ } } /* Flush the FIFOs */ oci_flush_tx_fifo(0); oci_flush_rx_fifo(); } void oci_flush_tx_fifo(u32 num) { grstctl_t greset = {.d32 = 0}; u32 count = 0; otg_dbg(OTG_DBG_OCI, "oci_flush_tx_fifo\n"); greset.b.txfflsh = 1; greset.b.txfnum = num; write_reg_32(GRSTCTL, greset.d32); /* wait for flush to end */ while (greset.b.txfflsh == 1) { greset.d32 = read_reg_32(GRSTCTL); if (++count > MAX_COUNT) break; }; /* Wait for 3 PHY Clocks*/ udelay(30); } void oci_flush_rx_fifo(void) { grstctl_t greset = {.d32 = 0}; u32 count = 0; otg_dbg(OTG_DBG_OCI, "oci_flush_rx_fifo\n"); greset.b.rxfflsh = 1; write_reg_32(GRSTCTL, greset.d32 ); do { greset.d32 = read_reg_32(GRSTCTL); if (++count > MAX_COUNT) break; } while (greset.b.rxfflsh == 1); /* Wait for 3 PHY Clocks*/ udelay(30); } int oci_core_reset(void) { u32 count = 0; grstctl_t greset = {.d32 = 0}; otg_dbg(OTG_DBG_OCI, "oci_core_reset\n"); /* Wait for AHB master IDLE state. */ do { greset.d32 = read_reg_32 (GRSTCTL); mdelay (50); if(++count>100) { otg_dbg(OTG_DBG_OCI, "AHB status is not IDLE\n"); return USB_ERR_FAIL; } } while (greset.b.ahbidle != 1); /* Core Soft Reset */ greset.b.csftrst = 1; write_reg_32 (GRSTCTL, greset.d32); /* Wait for 3 PHY Clocks*/ mdelay (50); return USB_ERR_SUCCESS; } int oci_dev_init(void) { otg_dbg(OTG_DBG_OCI, "oci_dev_init - do nothing.\n"); /* return USB_ERR_FAIL; */ return USB_ERR_SUCCESS; }
gpl-2.0
kylewickens/ubuntu-kernel-10.04
arch/blackfin/mach-bf533/boards/cm_bf533.c
545
11953
/* * Copyright 2004-2009 Analog Devices Inc. * 2008-2009 Bluetechnix * 2005 National ICT Australia (NICTA) * Aidan Williams <aidan@nicta.com.au> * * Licensed under the GPL-2 or later. */ #include <linux/device.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/physmap.h> #include <linux/spi/spi.h> #include <linux/spi/flash.h> #include <linux/spi/mmc_spi.h> #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) #include <linux/usb/isp1362.h> #endif #include <linux/irq.h> #include <asm/dma.h> #include <asm/bfin5xx_spi.h> #include <asm/portmux.h> #include <asm/dpmc.h> /* * Name the Board for the /proc/cpuinfo */ const char bfin_board_name[] = "Bluetechnix CM BF533"; #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) /* all SPI peripherals info goes here */ #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) static struct mtd_partition bfin_spi_flash_partitions[] = { { .name = "bootloader(spi)", .size = 0x00020000, .offset = 0, .mask_flags = MTD_CAP_ROM }, { .name = "linux kernel(spi)", .size = 0xe0000, .offset = 0x20000 }, { .name = "file system(spi)", .size = 0x700000, .offset = 0x00100000, } }; static struct flash_platform_data bfin_spi_flash_data = { .name = "m25p80", .parts = bfin_spi_flash_partitions, .nr_parts = ARRAY_SIZE(bfin_spi_flash_partitions), .type = "m25p64", }; /* SPI flash chip (m25p64) */ static struct bfin5xx_spi_chip spi_flash_chip_info = { .enable_dma = 0, /* use dma transfer with this chip*/ .bits_per_word = 8, }; #endif /* SPI ADC chip */ #if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE) static struct bfin5xx_spi_chip spi_adc_chip_info = { .enable_dma = 1, /* use dma transfer with this chip*/ .bits_per_word = 16, }; #endif #if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) static struct bfin5xx_spi_chip ad1836_spi_chip_info = { .enable_dma = 0, .bits_per_word = 16, }; #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) static struct bfin5xx_spi_chip mmc_spi_chip_info = { .enable_dma = 0, .bits_per_word = 8, }; #endif static struct spi_board_info bfin_spi_board_info[] __initdata = { #if defined(CONFIG_MTD_M25P80) || defined(CONFIG_MTD_M25P80_MODULE) { /* the modalias must be the same as spi device driver name */ .modalias = "m25p80", /* Name of spi_driver for this device */ .max_speed_hz = 25000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 1, /* Framework chip select. On STAMP537 it is SPISSEL1*/ .platform_data = &bfin_spi_flash_data, .controller_data = &spi_flash_chip_info, .mode = SPI_MODE_3, }, #endif #if defined(CONFIG_BFIN_SPI_ADC) || defined(CONFIG_BFIN_SPI_ADC_MODULE) { .modalias = "bfin_spi_adc", /* Name of spi_driver for this device */ .max_speed_hz = 6250000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, /* Framework bus number */ .chip_select = 2, /* Framework chip select. */ .platform_data = NULL, /* No spi_driver specific config */ .controller_data = &spi_adc_chip_info, }, #endif #if defined(CONFIG_SND_BLACKFIN_AD1836) || defined(CONFIG_SND_BLACKFIN_AD1836_MODULE) { .modalias = "ad1836", .max_speed_hz = 3125000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = CONFIG_SND_BLACKFIN_SPI_PFBIT, .controller_data = &ad1836_spi_chip_info, }, #endif #if defined(CONFIG_MMC_SPI) || defined(CONFIG_MMC_SPI_MODULE) { .modalias = "mmc_spi", .max_speed_hz = 20000000, /* max spi clock (SCK) speed in HZ */ .bus_num = 0, .chip_select = 1, .controller_data = &mmc_spi_chip_info, .mode = SPI_MODE_3, }, #endif }; /* SPI (0) */ static struct resource bfin_spi0_resource[] = { [0] = { .start = SPI0_REGBASE, .end = SPI0_REGBASE + 0xFF, .flags = IORESOURCE_MEM, }, [1] = { .start = CH_SPI, .end = CH_SPI, .flags = IORESOURCE_DMA, }, [2] = { .start = IRQ_SPI, .end = IRQ_SPI, .flags = IORESOURCE_IRQ, } }; /* SPI controller data */ static struct bfin5xx_spi_master bfin_spi0_info = { .num_chipselect = 8, .enable_dma = 1, /* master has the ability to do dma transfer */ .pin_req = {P_SPI0_SCK, P_SPI0_MISO, P_SPI0_MOSI, 0}, }; static struct platform_device bfin_spi0_device = { .name = "bfin-spi", .id = 0, /* Bus number */ .num_resources = ARRAY_SIZE(bfin_spi0_resource), .resource = bfin_spi0_resource, .dev = { .platform_data = &bfin_spi0_info, /* Passed to driver */ }, }; #endif /* spi master and devices */ #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) static struct platform_device rtc_device = { .name = "rtc-bfin", .id = -1, }; #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) #include <linux/smc91x.h> static struct smc91x_platdata smc91x_info = { .flags = SMC91X_USE_16BIT | SMC91X_NOWAIT, .leda = RPC_LED_100_10, .ledb = RPC_LED_TX_RX, }; static struct resource smc91x_resources[] = { { .start = 0x20200300, .end = 0x20200300 + 16, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF0, .end = IRQ_PF0, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device smc91x_device = { .name = "smc91x", .id = 0, .num_resources = ARRAY_SIZE(smc91x_resources), .resource = smc91x_resources, .dev = { .platform_data = &smc91x_info, }, }; #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) #include <linux/smsc911x.h> static struct resource smsc911x_resources[] = { { .name = "smsc911x-memory", .start = 0x20308000, .end = 0x20308000 + 0xFF, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF8, .end = IRQ_PF8, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWLEVEL, }, }; static struct smsc911x_platform_config smsc911x_config = { .flags = SMSC911X_USE_16BIT, .irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW, .irq_type = SMSC911X_IRQ_TYPE_OPEN_DRAIN, .phy_interface = PHY_INTERFACE_MODE_MII, }; static struct platform_device smsc911x_device = { .name = "smsc911x", .id = 0, .num_resources = ARRAY_SIZE(smsc911x_resources), .resource = smsc911x_resources, .dev = { .platform_data = &smsc911x_config, }, }; #endif #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) static struct resource bfin_uart_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, }; static struct platform_device bfin_uart_device = { .name = "bfin-uart", .id = 1, .num_resources = ARRAY_SIZE(bfin_uart_resources), .resource = bfin_uart_resources, }; #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 static struct resource bfin_sir0_resources[] = { { .start = 0xFFC00400, .end = 0xFFC004FF, .flags = IORESOURCE_MEM, }, { .start = IRQ_UART0_RX, .end = IRQ_UART0_RX+1, .flags = IORESOURCE_IRQ, }, { .start = CH_UART0_RX, .end = CH_UART0_RX+1, .flags = IORESOURCE_DMA, }, }; static struct platform_device bfin_sir0_device = { .name = "bfin_sir", .id = 0, .num_resources = ARRAY_SIZE(bfin_sir0_resources), .resource = bfin_sir0_resources, }; #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) static struct platform_device bfin_sport0_uart_device = { .name = "bfin-sport-uart", .id = 0, }; static struct platform_device bfin_sport1_uart_device = { .name = "bfin-sport-uart", .id = 1, }; #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) static struct resource isp1362_hcd_resources[] = { { .start = 0x20308000, .end = 0x20308000, .flags = IORESOURCE_MEM, }, { .start = 0x20308004, .end = 0x20308004, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF4, .end = IRQ_PF4, .flags = IORESOURCE_IRQ, }, }; static struct isp1362_platform_data isp1362_priv = { .sel15Kres = 1, .clknotstop = 0, .oc_enable = 0, .int_act_high = 0, .int_edge_triggered = 0, .remote_wakeup_connected = 0, .no_power_switching = 1, .power_switching_mode = 0, }; static struct platform_device isp1362_hcd_device = { .name = "isp1362-hcd", .id = 0, .dev = { .platform_data = &isp1362_priv, }, .num_resources = ARRAY_SIZE(isp1362_hcd_resources), .resource = isp1362_hcd_resources, }; #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) static struct resource net2272_bfin_resources[] = { { .start = 0x20300000, .end = 0x20300000 + 0x100, .flags = IORESOURCE_MEM, }, { .start = IRQ_PF6, .end = IRQ_PF6, .flags = IORESOURCE_IRQ | IORESOURCE_IRQ_HIGHLEVEL, }, }; static struct platform_device net2272_bfin_device = { .name = "net2272", .id = -1, .num_resources = ARRAY_SIZE(net2272_bfin_resources), .resource = net2272_bfin_resources, }; #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) static struct mtd_partition para_partitions[] = { { .name = "bootloader(nor)", .size = 0x40000, .offset = 0, }, { .name = "linux+rootfs(nor)", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, }, }; static struct physmap_flash_data para_flash_data = { .width = 2, .parts = para_partitions, .nr_parts = ARRAY_SIZE(para_partitions), }; static struct resource para_flash_resource = { .start = 0x20000000, .end = 0x201fffff, .flags = IORESOURCE_MEM, }; static struct platform_device para_flash_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &para_flash_data, }, .num_resources = 1, .resource = &para_flash_resource, }; #endif static const unsigned int cclk_vlev_datasheet[] = { VRPAIR(VLEV_085, 250000000), VRPAIR(VLEV_090, 376000000), VRPAIR(VLEV_095, 426000000), VRPAIR(VLEV_100, 426000000), VRPAIR(VLEV_105, 476000000), VRPAIR(VLEV_110, 476000000), VRPAIR(VLEV_115, 476000000), VRPAIR(VLEV_120, 600000000), VRPAIR(VLEV_125, 600000000), VRPAIR(VLEV_130, 600000000), }; static struct bfin_dpmc_platform_data bfin_dmpc_vreg_data = { .tuple_tab = cclk_vlev_datasheet, .tabsize = ARRAY_SIZE(cclk_vlev_datasheet), .vr_settling_time = 25 /* us */, }; static struct platform_device bfin_dpmc = { .name = "bfin dpmc", .dev = { .platform_data = &bfin_dmpc_vreg_data, }, }; static struct platform_device *cm_bf533_devices[] __initdata = { &bfin_dpmc, #if defined(CONFIG_SERIAL_BFIN) || defined(CONFIG_SERIAL_BFIN_MODULE) &bfin_uart_device, #endif #if defined(CONFIG_BFIN_SIR) || defined(CONFIG_BFIN_SIR_MODULE) #ifdef CONFIG_BFIN_SIR0 &bfin_sir0_device, #endif #endif #if defined(CONFIG_SERIAL_BFIN_SPORT) || defined(CONFIG_SERIAL_BFIN_SPORT_MODULE) &bfin_sport0_uart_device, &bfin_sport1_uart_device, #endif #if defined(CONFIG_RTC_DRV_BFIN) || defined(CONFIG_RTC_DRV_BFIN_MODULE) &rtc_device, #endif #if defined(CONFIG_USB_ISP1362_HCD) || defined(CONFIG_USB_ISP1362_HCD_MODULE) &isp1362_hcd_device, #endif #if defined(CONFIG_SMC91X) || defined(CONFIG_SMC91X_MODULE) &smc91x_device, #endif #if defined(CONFIG_SMSC911X) || defined(CONFIG_SMSC911X_MODULE) &smsc911x_device, #endif #if defined(CONFIG_USB_NET2272) || defined(CONFIG_USB_NET2272_MODULE) &net2272_bfin_device, #endif #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) &bfin_spi0_device, #endif #if defined(CONFIG_MTD_PHYSMAP) || defined(CONFIG_MTD_PHYSMAP_MODULE) &para_flash_device, #endif }; static int __init cm_bf533_init(void) { printk(KERN_INFO "%s(): registering device resources\n", __func__); platform_add_devices(cm_bf533_devices, ARRAY_SIZE(cm_bf533_devices)); #if defined(CONFIG_SPI_BFIN) || defined(CONFIG_SPI_BFIN_MODULE) spi_register_board_info(bfin_spi_board_info, ARRAY_SIZE(bfin_spi_board_info)); #endif return 0; } arch_initcall(cm_bf533_init);
gpl-2.0
eoghan2t9/HTC-Wildfire-S-Kernel
drivers/isdn/i4l/isdn_net.c
801
88470
/* $Id: isdn_net.c,v 1.1.2.2 2004/01/12 22:37:19 keil Exp $ * * Linux ISDN subsystem, network interfaces and related functions (linklevel). * * Copyright 1994-1998 by Fritz Elfert (fritz@isdn4linux.de) * Copyright 1995,96 by Thinking Objects Software GmbH Wuerzburg * Copyright 1995,96 by Michael Hipp (Michael.Hipp@student.uni-tuebingen.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * * Data Over Voice (DOV) support added - Guy Ellis 23-Mar-02 * guy@traverse.com.au * Outgoing calls - looks for a 'V' in first char of dialed number * Incoming calls - checks first character of eaz as follows: * Numeric - accept DATA only - original functionality * 'V' - accept VOICE (DOV) only * 'B' - accept BOTH DATA and DOV types * * Jan 2001: fix CISCO HDLC Bjoern A. Zeeb <i4l@zabbadoz.net> * for info on the protocol, see * http://i4l.zabbadoz.net/i4l/cisco-hdlc.txt */ #include <linux/isdn.h> #include <linux/slab.h> #include <net/arp.h> #include <net/dst.h> #include <net/pkt_sched.h> #include <linux/inetdevice.h> #include "isdn_common.h" #include "isdn_net.h" #ifdef CONFIG_ISDN_PPP #include "isdn_ppp.h" #endif #ifdef CONFIG_ISDN_X25 #include <linux/concap.h> #include "isdn_concap.h" #endif /* * Outline of new tbusy handling: * * Old method, roughly spoken, consisted of setting tbusy when entering * isdn_net_start_xmit() and at several other locations and clearing * it from isdn_net_start_xmit() thread when sending was successful. * * With 2.3.x multithreaded network core, to prevent problems, tbusy should * only be set by the isdn_net_start_xmit() thread and only when a tx-busy * condition is detected. Other threads (in particular isdn_net_stat_callb()) * are only allowed to clear tbusy. * * -HE */ /* * About SOFTNET: * Most of the changes were pretty obvious and basically done by HE already. * * One problem of the isdn net device code is that is uses struct net_device * for masters and slaves. However, only master interface are registered to * the network layer, and therefore, it only makes sense to call netif_* * functions on them. * * --KG */ /* * Find out if the netdevice has been ifup-ed yet. * For slaves, look at the corresponding master. */ static __inline__ int isdn_net_device_started(isdn_net_dev *n) { isdn_net_local *lp = n->local; struct net_device *dev; if (lp->master) dev = lp->master; else dev = n->dev; return netif_running(dev); } /* * wake up the network -> net_device queue. * For slaves, wake the corresponding master interface. */ static __inline__ void isdn_net_device_wake_queue(isdn_net_local *lp) { if (lp->master) netif_wake_queue(lp->master); else netif_wake_queue(lp->netdev->dev); } /* * stop the network -> net_device queue. * For slaves, stop the corresponding master interface. */ static __inline__ void isdn_net_device_stop_queue(isdn_net_local *lp) { if (lp->master) netif_stop_queue(lp->master); else netif_stop_queue(lp->netdev->dev); } /* * find out if the net_device which this lp belongs to (lp can be * master or slave) is busy. It's busy iff all (master and slave) * queues are busy */ static __inline__ int isdn_net_device_busy(isdn_net_local *lp) { isdn_net_local *nlp; isdn_net_dev *nd; unsigned long flags; if (!isdn_net_lp_busy(lp)) return 0; if (lp->master) nd = ISDN_MASTER_PRIV(lp)->netdev; else nd = lp->netdev; spin_lock_irqsave(&nd->queue_lock, flags); nlp = lp->next; while (nlp != lp) { if (!isdn_net_lp_busy(nlp)) { spin_unlock_irqrestore(&nd->queue_lock, flags); return 0; } nlp = nlp->next; } spin_unlock_irqrestore(&nd->queue_lock, flags); return 1; } static __inline__ void isdn_net_inc_frame_cnt(isdn_net_local *lp) { atomic_inc(&lp->frame_cnt); if (isdn_net_device_busy(lp)) isdn_net_device_stop_queue(lp); } static __inline__ void isdn_net_dec_frame_cnt(isdn_net_local *lp) { atomic_dec(&lp->frame_cnt); if (!(isdn_net_device_busy(lp))) { if (!skb_queue_empty(&lp->super_tx_queue)) { schedule_work(&lp->tqueue); } else { isdn_net_device_wake_queue(lp); } } } static __inline__ void isdn_net_zero_frame_cnt(isdn_net_local *lp) { atomic_set(&lp->frame_cnt, 0); } /* For 2.2.x we leave the transmitter busy timeout at 2 secs, just * to be safe. * For 2.3.x we push it up to 20 secs, because call establishment * (in particular callback) may take such a long time, and we * don't want confusing messages in the log. However, there is a slight * possibility that this large timeout will break other things like MPPP, * which might rely on the tx timeout. If so, we'll find out this way... */ #define ISDN_NET_TX_TIMEOUT (20*HZ) /* Prototypes */ static int isdn_net_force_dial_lp(isdn_net_local *); static netdev_tx_t isdn_net_start_xmit(struct sk_buff *, struct net_device *); static void isdn_net_ciscohdlck_connected(isdn_net_local *lp); static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp); char *isdn_net_revision = "$Revision: 1.1.2.2 $"; /* * Code for raw-networking over ISDN */ static void isdn_net_unreachable(struct net_device *dev, struct sk_buff *skb, char *reason) { if(skb) { u_short proto = ntohs(skb->protocol); printk(KERN_DEBUG "isdn_net: %s: %s, signalling dst_link_failure %s\n", dev->name, (reason != NULL) ? reason : "unknown", (proto != ETH_P_IP) ? "Protocol != ETH_P_IP" : ""); dst_link_failure(skb); } else { /* dial not triggered by rawIP packet */ printk(KERN_DEBUG "isdn_net: %s: %s\n", dev->name, (reason != NULL) ? reason : "reason unknown"); } } static void isdn_net_reset(struct net_device *dev) { #ifdef CONFIG_ISDN_X25 struct concap_device_ops * dops = ((isdn_net_local *) netdev_priv(dev))->dops; struct concap_proto * cprot = ((isdn_net_local *) netdev_priv(dev))->netdev->cprot; #endif #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops && dops ) cprot -> pops -> restart ( cprot, dev, dops ); #endif } /* Open/initialize the board. */ static int isdn_net_open(struct net_device *dev) { int i; struct net_device *p; struct in_device *in_dev; /* moved here from isdn_net_reset, because only the master has an interface associated which is supposed to be started. BTW: we need to call netif_start_queue, not netif_wake_queue here */ netif_start_queue(dev); isdn_net_reset(dev); /* Fill in the MAC-level header (not needed, but for compatibility... */ for (i = 0; i < ETH_ALEN - sizeof(u32); i++) dev->dev_addr[i] = 0xfc; if ((in_dev = dev->ip_ptr) != NULL) { /* * Any address will do - we take the first */ struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) memcpy(dev->dev_addr+2, &ifa->ifa_local, 4); } /* If this interface has slaves, start them also */ p = MASTER_TO_SLAVE(dev); if (p) { while (p) { isdn_net_reset(p); p = MASTER_TO_SLAVE(p); } } isdn_lock_drivers(); return 0; } /* * Assign an ISDN-channel to a net-interface */ static void isdn_net_bind_channel(isdn_net_local * lp, int idx) { lp->flags |= ISDN_NET_CONNECTED; lp->isdn_device = dev->drvmap[idx]; lp->isdn_channel = dev->chanmap[idx]; dev->rx_netdev[idx] = lp->netdev; dev->st_netdev[idx] = lp->netdev; } /* * unbind a net-interface (resets interface after an error) */ static void isdn_net_unbind_channel(isdn_net_local * lp) { skb_queue_purge(&lp->super_tx_queue); if (!lp->master) { /* reset only master device */ /* Moral equivalent of dev_purge_queues(): BEWARE! This chunk of code cannot be called from hardware interrupt handler. I hope it is true. --ANK */ qdisc_reset_all_tx(lp->netdev->dev); } lp->dialstate = 0; dev->rx_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; dev->st_netdev[isdn_dc2minor(lp->isdn_device, lp->isdn_channel)] = NULL; if (lp->isdn_device != -1 && lp->isdn_channel != -1) isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); lp->flags &= ~ISDN_NET_CONNECTED; lp->isdn_device = -1; lp->isdn_channel = -1; } /* * Perform auto-hangup and cps-calculation for net-interfaces. * * auto-hangup: * Increment idle-counter (this counter is reset on any incoming or * outgoing packet), if counter exceeds configured limit either do a * hangup immediately or - if configured - wait until just before the next * charge-info. * * cps-calculation (needed for dynamic channel-bundling): * Since this function is called every second, simply reset the * byte-counter of the interface after copying it to the cps-variable. */ static unsigned long last_jiffies = -HZ; void isdn_net_autohup(void) { isdn_net_dev *p = dev->netdev; int anymore; anymore = 0; while (p) { isdn_net_local *l = p->local; if (jiffies == last_jiffies) l->cps = l->transcount; else l->cps = (l->transcount * HZ) / (jiffies - last_jiffies); l->transcount = 0; if (dev->net_verbose > 3) printk(KERN_DEBUG "%s: %d bogocps\n", p->dev->name, l->cps); if ((l->flags & ISDN_NET_CONNECTED) && (!l->dialstate)) { anymore = 1; l->huptimer++; /* * if there is some dialmode where timeout-hangup * should _not_ be done, check for that here */ if ((l->onhtime) && (l->huptimer > l->onhtime)) { if (l->hupflags & ISDN_MANCHARGE && l->hupflags & ISDN_CHARGEHUP) { while (time_after(jiffies, l->chargetime + l->chargeint)) l->chargetime += l->chargeint; if (time_after(jiffies, l->chargetime + l->chargeint - 2 * HZ)) if (l->outgoing || l->hupflags & ISDN_INHUP) isdn_net_hangup(p->dev); } else if (l->outgoing) { if (l->hupflags & ISDN_CHARGEHUP) { if (l->hupflags & ISDN_WAITCHARGE) { printk(KERN_DEBUG "isdn_net: Hupflags of %s are %X\n", p->dev->name, l->hupflags); isdn_net_hangup(p->dev); } else if (time_after(jiffies, l->chargetime + l->chargeint)) { printk(KERN_DEBUG "isdn_net: %s: chtime = %lu, chint = %d\n", p->dev->name, l->chargetime, l->chargeint); isdn_net_hangup(p->dev); } } else isdn_net_hangup(p->dev); } else if (l->hupflags & ISDN_INHUP) isdn_net_hangup(p->dev); } if(dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*l) == ISDN_NET_DM_OFF)) { isdn_net_hangup(p->dev); break; } } p = (isdn_net_dev *) p->next; } last_jiffies = jiffies; isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, anymore); } static void isdn_net_lp_disconnected(isdn_net_local *lp) { isdn_net_rm_from_bundle(lp); } /* * Handle status-messages from ISDN-interfacecard. * This function is called from within the main-status-dispatcher * isdn_status_callback, which itself is called from the low-level driver. * Return: 1 = Event handled, 0 = not for us or unknown Event. */ int isdn_net_stat_callback(int idx, isdn_ctrl *c) { isdn_net_dev *p = dev->st_netdev[idx]; int cmd = c->command; if (p) { isdn_net_local *lp = p->local; #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp->netdev->cprot; struct concap_proto_ops *pops = cprot ? cprot->pops : NULL; #endif switch (cmd) { case ISDN_STAT_BSENT: /* A packet has successfully been sent out */ if ((lp->flags & ISDN_NET_CONNECTED) && (!lp->dialstate)) { isdn_net_dec_frame_cnt(lp); lp->stats.tx_packets++; lp->stats.tx_bytes += c->parm.length; } return 1; case ISDN_STAT_DCONN: /* D-Channel is up */ switch (lp->dialstate) { case 4: case 7: case 8: lp->dialstate++; return 1; case 12: lp->dialstate = 5; return 1; } break; case ISDN_STAT_DHUP: /* Either D-Channel-hangup or error during dialout */ #ifdef CONFIG_ISDN_X25 /* If we are not connencted then dialing had failed. If there are generic encap protocol receiver routines signal the closure of the link*/ if( !(lp->flags & ISDN_NET_CONNECTED) && pops && pops -> disconn_ind ) pops -> disconn_ind(cprot); #endif /* CONFIG_ISDN_X25 */ if ((!lp->dialstate) && (lp->flags & ISDN_NET_CONNECTED)) { if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK) isdn_net_ciscohdlck_disconnected(lp); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); printk(KERN_INFO "%s: remote hangup\n", p->dev->name); printk(KERN_INFO "%s: Chargesum is %d\n", p->dev->name, lp->charge); isdn_net_unbind_channel(lp); return 1; } break; #ifdef CONFIG_ISDN_X25 case ISDN_STAT_BHUP: /* B-Channel-hangup */ /* try if there are generic encap protocol receiver routines and signal the closure of the link */ if( pops && pops -> disconn_ind ){ pops -> disconn_ind(cprot); return 1; } break; #endif /* CONFIG_ISDN_X25 */ case ISDN_STAT_BCONN: /* B-Channel is up */ isdn_net_zero_frame_cnt(lp); switch (lp->dialstate) { case 5: case 6: case 7: case 8: case 9: case 10: case 12: if (lp->dialstate <= 6) { dev->usage[idx] |= ISDN_USAGE_OUTGOING; isdn_info_update(); } else dev->rx_netdev[idx] = p; lp->dialstate = 0; isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 1); if (lp->p_encap == ISDN_NET_ENCAP_CISCOHDLCK) isdn_net_ciscohdlck_connected(lp); if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) { if (lp->master) { /* is lp a slave? */ isdn_net_dev *nd = ISDN_MASTER_PRIV(lp)->netdev; isdn_net_add_to_bundle(nd, lp); } } printk(KERN_INFO "isdn_net: %s connected\n", p->dev->name); /* If first Chargeinfo comes before B-Channel connect, * we correct the timestamp here. */ lp->chargetime = jiffies; /* reset dial-timeout */ lp->dialstarted = 0; lp->dialwait_timer = 0; #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_wakeup_daemon(lp); #endif #ifdef CONFIG_ISDN_X25 /* try if there are generic concap receiver routines */ if( pops ) if( pops->connect_ind) pops->connect_ind(cprot); #endif /* CONFIG_ISDN_X25 */ /* ppp needs to do negotiations first */ if (lp->p_encap != ISDN_NET_ENCAP_SYNCPPP) isdn_net_device_wake_queue(lp); return 1; } break; case ISDN_STAT_NODCH: /* No D-Channel avail. */ if (lp->dialstate == 4) { lp->dialstate--; return 1; } break; case ISDN_STAT_CINF: /* Charge-info from TelCo. Calculate interval between * charge-infos and set timestamp for last info for * usage by isdn_net_autohup() */ lp->charge++; if (lp->hupflags & ISDN_HAVECHARGE) { lp->hupflags &= ~ISDN_WAITCHARGE; lp->chargeint = jiffies - lp->chargetime - (2 * HZ); } if (lp->hupflags & ISDN_WAITCHARGE) lp->hupflags |= ISDN_HAVECHARGE; lp->chargetime = jiffies; printk(KERN_DEBUG "isdn_net: Got CINF chargetime of %s now %lu\n", p->dev->name, lp->chargetime); return 1; } } return 0; } /* * Perform dialout for net-interfaces and timeout-handling for * D-Channel-up and B-Channel-up Messages. * This function is initially called from within isdn_net_start_xmit() or * or isdn_net_find_icall() after initializing the dialstate for an * interface. If further calls are needed, the function schedules itself * for a timer-callback via isdn_timer_function(). * The dialstate is also affected by incoming status-messages from * the ISDN-Channel which are handled in isdn_net_stat_callback() above. */ void isdn_net_dial(void) { isdn_net_dev *p = dev->netdev; int anymore = 0; int i; isdn_ctrl cmd; u_char *phone_number; while (p) { isdn_net_local *lp = p->local; #ifdef ISDN_DEBUG_NET_DIAL if (lp->dialstate) printk(KERN_DEBUG "%s: dialstate=%d\n", p->dev->name, lp->dialstate); #endif switch (lp->dialstate) { case 0: /* Nothing to do for this interface */ break; case 1: /* Initiate dialout. Set phone-number-pointer to first number * of interface. */ lp->dial = lp->phone[1]; if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", p->dev->name); isdn_net_hangup(p->dev); break; } anymore = 1; if(lp->dialtimeout > 0) if(lp->dialstarted == 0 || time_after(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait)) { lp->dialstarted = jiffies; lp->dialwait_timer = 0; } lp->dialstate++; /* Fall through */ case 2: /* Prepare dialing. Clear EAZ, then set EAZ. */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_CLREAZ; isdn_command(&cmd); sprintf(cmd.parm.num, "%s", isdn_map_eaz2msn(lp->msn, cmd.driver)); cmd.command = ISDN_CMD_SETEAZ; isdn_command(&cmd); lp->dialretry = 0; anymore = 1; lp->dialstate++; /* Fall through */ case 3: /* Setup interface, dial current phone-number, switch to next number. * If list of phone-numbers is exhausted, increment * retry-counter. */ if(dev->global_flags & ISDN_GLOBAL_STOPPED || (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF)) { char *s; if (dev->global_flags & ISDN_GLOBAL_STOPPED) s = "dial suppressed: isdn system stopped"; else s = "dial suppressed: dialmode `off'"; isdn_net_unreachable(p->dev, NULL, s); isdn_net_hangup(p->dev); break; } cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL2; cmd.arg = lp->isdn_channel + (lp->l2_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL3; cmd.arg = lp->isdn_channel + (lp->l3_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; if (!lp->dial) { printk(KERN_WARNING "%s: phone number deleted?\n", p->dev->name); isdn_net_hangup(p->dev); break; } if (!strncmp(lp->dial->num, "LEASED", strlen("LEASED"))) { lp->dialstate = 4; printk(KERN_INFO "%s: Open leased line ...\n", p->dev->name); } else { if(lp->dialtimeout > 0) if (time_after(jiffies, lp->dialstarted + lp->dialtimeout)) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; isdn_net_unreachable(p->dev, NULL, "dial: timed out"); isdn_net_hangup(p->dev); break; } cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_DIAL; cmd.parm.setup.si2 = 0; /* check for DOV */ phone_number = lp->dial->num; if ((*phone_number == 'v') || (*phone_number == 'V')) { /* DOV call */ cmd.parm.setup.si1 = 1; } else { /* DATA call */ cmd.parm.setup.si1 = 7; } strcpy(cmd.parm.setup.phone, phone_number); /* * Switch to next number or back to start if at end of list. */ if (!(lp->dial = (isdn_net_phone *) lp->dial->next)) { lp->dial = lp->phone[1]; lp->dialretry++; if (lp->dialretry > lp->dialmax) { if (lp->dialtimeout == 0) { lp->dialwait_timer = jiffies + lp->dialwait; lp->dialstarted = 0; isdn_net_unreachable(p->dev, NULL, "dial: tried all numbers dialmax times"); } isdn_net_hangup(p->dev); break; } } sprintf(cmd.parm.setup.eazmsn, "%s", isdn_map_eaz2msn(lp->msn, cmd.driver)); i = isdn_dc2minor(lp->isdn_device, lp->isdn_channel); if (i >= 0) { strcpy(dev->num[i], cmd.parm.setup.phone); dev->usage[i] |= ISDN_USAGE_OUTGOING; isdn_info_update(); } printk(KERN_INFO "%s: dialing %d %s... %s\n", p->dev->name, lp->dialretry, cmd.parm.setup.phone, (cmd.parm.setup.si1 == 1) ? "DOV" : ""); lp->dtimer = 0; #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dial: d=%d c=%d\n", lp->isdn_device, lp->isdn_channel); #endif isdn_command(&cmd); } lp->huptimer = 0; lp->outgoing = 1; if (lp->chargeint) { lp->hupflags |= ISDN_HAVECHARGE; lp->hupflags &= ~ISDN_WAITCHARGE; } else { lp->hupflags |= ISDN_WAITCHARGE; lp->hupflags &= ~ISDN_HAVECHARGE; } anymore = 1; lp->dialstate = (lp->cbdelay && (lp->flags & ISDN_NET_CBOUT)) ? 12 : 4; break; case 4: /* Wait for D-Channel-connect. * If timeout, switch back to state 3. * Dialmax-handling moved to state 3. */ if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) lp->dialstate = 3; anymore = 1; break; case 5: /* Got D-Channel-Connect, send B-Channel-request */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_ACCEPTB; anymore = 1; lp->dtimer = 0; lp->dialstate++; isdn_command(&cmd); break; case 6: /* Wait for B- or D-Channel-connect. If timeout, * switch back to state 3. */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer2: %d\n", lp->dtimer); #endif if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) lp->dialstate = 3; anymore = 1; break; case 7: /* Got incoming Call, setup L2 and L3 protocols, * then wait for D-Channel-connect */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer); #endif cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL2; cmd.arg = lp->isdn_channel + (lp->l2_proto << 8); isdn_command(&cmd); cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_SETL3; cmd.arg = lp->isdn_channel + (lp->l3_proto << 8); isdn_command(&cmd); if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT15) isdn_net_hangup(p->dev); else { anymore = 1; lp->dialstate++; } break; case 9: /* Got incoming D-Channel-Connect, send B-Channel-request */ cmd.driver = lp->isdn_device; cmd.arg = lp->isdn_channel; cmd.command = ISDN_CMD_ACCEPTB; isdn_command(&cmd); anymore = 1; lp->dtimer = 0; lp->dialstate++; break; case 8: case 10: /* Wait for B- or D-channel-connect */ #ifdef ISDN_DEBUG_NET_DIAL printk(KERN_DEBUG "dialtimer4: %d\n", lp->dtimer); #endif if (lp->dtimer++ > ISDN_TIMER_DTIMEOUT10) isdn_net_hangup(p->dev); else anymore = 1; break; case 11: /* Callback Delay */ if (lp->dtimer++ > lp->cbdelay) lp->dialstate = 1; anymore = 1; break; case 12: /* Remote does callback. Hangup after cbdelay, then wait for incoming * call (in state 4). */ if (lp->dtimer++ > lp->cbdelay) { printk(KERN_INFO "%s: hangup waiting for callback ...\n", p->dev->name); lp->dtimer = 0; lp->dialstate = 4; cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_HANGUP; cmd.arg = lp->isdn_channel; isdn_command(&cmd); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); } anymore = 1; break; default: printk(KERN_WARNING "isdn_net: Illegal dialstate %d for device %s\n", lp->dialstate, p->dev->name); } p = (isdn_net_dev *) p->next; } isdn_timer_ctrl(ISDN_TIMER_NETDIAL, anymore); } /* * Perform hangup for a net-interface. */ void isdn_net_hangup(struct net_device *d) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(d); isdn_ctrl cmd; #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp->netdev->cprot; struct concap_proto_ops *pops = cprot ? cprot->pops : NULL; #endif if (lp->flags & ISDN_NET_CONNECTED) { if (lp->slave != NULL) { isdn_net_local *slp = ISDN_SLAVE_PRIV(lp); if (slp->flags & ISDN_NET_CONNECTED) { printk(KERN_INFO "isdn_net: hang up slave %s before %s\n", lp->slave->name, d->name); isdn_net_hangup(lp->slave); } } printk(KERN_INFO "isdn_net: local hangup %s\n", d->name); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); #ifdef CONFIG_ISDN_X25 /* try if there are generic encap protocol receiver routines and signal the closure of the link */ if( pops && pops -> disconn_ind ) pops -> disconn_ind(cprot); #endif /* CONFIG_ISDN_X25 */ cmd.driver = lp->isdn_device; cmd.command = ISDN_CMD_HANGUP; cmd.arg = lp->isdn_channel; isdn_command(&cmd); printk(KERN_INFO "%s: Chargesum is %d\n", d->name, lp->charge); isdn_all_eaz(lp->isdn_device, lp->isdn_channel); } isdn_net_unbind_channel(lp); } typedef struct { __be16 source; __be16 dest; } ip_ports; static void isdn_net_log_skb(struct sk_buff * skb, isdn_net_local * lp) { /* hopefully, this was set correctly */ const u_char *p = skb_network_header(skb); unsigned short proto = ntohs(skb->protocol); int data_ofs; ip_ports *ipp; char addinfo[100]; addinfo[0] = '\0'; /* This check stolen from 2.1.72 dev_queue_xmit_nit() */ if (p < skb->data || skb->network_header >= skb->tail) { /* fall back to old isdn_net_log_packet method() */ char * buf = skb->data; printk(KERN_DEBUG "isdn_net: protocol %04x is buggy, dev %s\n", skb->protocol, lp->netdev->dev->name); p = buf; proto = ETH_P_IP; switch (lp->p_encap) { case ISDN_NET_ENCAP_IPTYP: proto = ntohs(*(__be16 *)&buf[0]); p = &buf[2]; break; case ISDN_NET_ENCAP_ETHER: proto = ntohs(*(__be16 *)&buf[12]); p = &buf[14]; break; case ISDN_NET_ENCAP_CISCOHDLC: proto = ntohs(*(__be16 *)&buf[2]); p = &buf[4]; break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: proto = ntohs(skb->protocol); p = &buf[IPPP_MAX_HEADER]; break; #endif } } data_ofs = ((p[0] & 15) * 4); switch (proto) { case ETH_P_IP: switch (p[9]) { case 1: strcpy(addinfo, " ICMP"); break; case 2: strcpy(addinfo, " IGMP"); break; case 4: strcpy(addinfo, " IPIP"); break; case 6: ipp = (ip_ports *) (&p[data_ofs]); sprintf(addinfo, " TCP, port: %d -> %d", ntohs(ipp->source), ntohs(ipp->dest)); break; case 8: strcpy(addinfo, " EGP"); break; case 12: strcpy(addinfo, " PUP"); break; case 17: ipp = (ip_ports *) (&p[data_ofs]); sprintf(addinfo, " UDP, port: %d -> %d", ntohs(ipp->source), ntohs(ipp->dest)); break; case 22: strcpy(addinfo, " IDP"); break; } printk(KERN_INFO "OPEN: %pI4 -> %pI4%s\n", p + 12, p + 16, addinfo); break; case ETH_P_ARP: printk(KERN_INFO "OPEN: ARP %pI4 -> *.*.*.* ?%pI4\n", p + 14, p + 24); break; } } /* * this function is used to send supervisory data, i.e. data which was * not received from the network layer, but e.g. frames from ipppd, CCP * reset frames etc. */ void isdn_net_write_super(isdn_net_local *lp, struct sk_buff *skb) { if (in_irq()) { // we can't grab the lock from irq context, // so we just queue the packet skb_queue_tail(&lp->super_tx_queue, skb); schedule_work(&lp->tqueue); return; } spin_lock_bh(&lp->xmit_lock); if (!isdn_net_lp_busy(lp)) { isdn_net_writebuf_skb(lp, skb); } else { skb_queue_tail(&lp->super_tx_queue, skb); } spin_unlock_bh(&lp->xmit_lock); } /* * called from tq_immediate */ static void isdn_net_softint(struct work_struct *work) { isdn_net_local *lp = container_of(work, isdn_net_local, tqueue); struct sk_buff *skb; spin_lock_bh(&lp->xmit_lock); while (!isdn_net_lp_busy(lp)) { skb = skb_dequeue(&lp->super_tx_queue); if (!skb) break; isdn_net_writebuf_skb(lp, skb); } spin_unlock_bh(&lp->xmit_lock); } /* * all frames sent from the (net) LL to a HL driver should go via this function * it's serialized by the caller holding the lp->xmit_lock spinlock */ void isdn_net_writebuf_skb(isdn_net_local *lp, struct sk_buff *skb) { int ret; int len = skb->len; /* save len */ /* before obtaining the lock the caller should have checked that the lp isn't busy */ if (isdn_net_lp_busy(lp)) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); goto error; } if (!(lp->flags & ISDN_NET_CONNECTED)) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); goto error; } ret = isdn_writebuf_skb_stub(lp->isdn_device, lp->isdn_channel, 1, skb); if (ret != len) { /* we should never get here */ printk(KERN_WARNING "%s: HL driver queue full\n", lp->netdev->dev->name); goto error; } lp->transcount += len; isdn_net_inc_frame_cnt(lp); return; error: dev_kfree_skb(skb); lp->stats.tx_errors++; } /* * Helper function for isdn_net_start_xmit. * When called, the connection is already established. * Based on cps-calculation, check if device is overloaded. * If so, and if a slave exists, trigger dialing for it. * If any slave is online, deliver packets using a simple round robin * scheme. * * Return: 0 on success, !0 on failure. */ static int isdn_net_xmit(struct net_device *ndev, struct sk_buff *skb) { isdn_net_dev *nd; isdn_net_local *slp; isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); int retv = NETDEV_TX_OK; if (((isdn_net_local *) netdev_priv(ndev))->master) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* For the other encaps the header has already been built */ #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { return isdn_ppp_xmit(skb, ndev); } #endif nd = ((isdn_net_local *) netdev_priv(ndev))->netdev; lp = isdn_net_get_locked_lp(nd); if (!lp) { printk(KERN_WARNING "%s: all channels busy - requeuing!\n", ndev->name); return NETDEV_TX_BUSY; } /* we have our lp locked from now on */ /* Reset hangup-timeout */ lp->huptimer = 0; // FIXME? isdn_net_writebuf_skb(lp, skb); spin_unlock_bh(&lp->xmit_lock); /* the following stuff is here for backwards compatibility. * in future, start-up and hangup of slaves (based on current load) * should move to userspace and get based on an overall cps * calculation */ if (lp->cps > lp->triggercps) { if (lp->slave) { if (!lp->sqfull) { /* First time overload: set timestamp only */ lp->sqfull = 1; lp->sqfull_stamp = jiffies; } else { /* subsequent overload: if slavedelay exceeded, start dialing */ if (time_after(jiffies, lp->sqfull_stamp + lp->slavedelay)) { slp = ISDN_SLAVE_PRIV(lp); if (!(slp->flags & ISDN_NET_CONNECTED)) { isdn_net_force_dial_lp(ISDN_SLAVE_PRIV(lp)); } } } } } else { if (lp->sqfull && time_after(jiffies, lp->sqfull_stamp + lp->slavedelay + (10 * HZ))) { lp->sqfull = 0; } /* this is a hack to allow auto-hangup for slaves on moderate loads */ nd->queue = nd->local; } return retv; } static void isdn_net_adjust_hdr(struct sk_buff *skb, struct net_device *dev) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); if (!skb) return; if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { const int pullsize = skb_network_offset(skb) - ETH_HLEN; if (pullsize > 0) { printk(KERN_DEBUG "isdn_net: Pull junk %d\n", pullsize); skb_pull(skb, pullsize); } } } static void isdn_net_tx_timeout(struct net_device * ndev) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); printk(KERN_WARNING "isdn_tx_timeout dev %s dialstate %d\n", ndev->name, lp->dialstate); if (!lp->dialstate){ lp->stats.tx_errors++; /* * There is a certain probability that this currently * works at all because if we always wake up the interface, * then upper layer will try to send the next packet * immediately. And then, the old clean_up logic in the * driver will hopefully continue to work as it used to do. * * This is rather primitive right know, we better should * clean internal queues here, in particular for multilink and * ppp, and reset HL driver's channel, too. --HE * * actually, this may not matter at all, because ISDN hardware * should not see transmitter hangs at all IMO * changed KERN_DEBUG to KERN_WARNING to find out if this is * ever called --KG */ } ndev->trans_start = jiffies; netif_wake_queue(ndev); } /* * Try sending a packet. * If this interface isn't connected to a ISDN-Channel, find a free channel, * and start dialing. */ static netdev_tx_t isdn_net_start_xmit(struct sk_buff *skb, struct net_device *ndev) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = lp -> netdev -> cprot; /* At this point hard_start_xmit() passes control to the encapsulation protocol (if present). For X.25 auto-dialing is completly bypassed because: - It does not conform with the semantics of a reliable datalink service as needed by X.25 PLP. - I don't want that the interface starts dialing when the network layer sends a message which requests to disconnect the lapb link (or if it sends any other message not resulting in data transmission). Instead, dialing will be initiated by the encapsulation protocol entity when a dl_establish request is received from the upper layer. */ if (cprot && cprot -> pops) { int ret = cprot -> pops -> encap_and_xmit ( cprot , skb); if (ret) netif_stop_queue(ndev); return ret; } else #endif /* auto-dialing xmit function */ { #ifdef ISDN_DEBUG_NET_DUMP u_char *buf; #endif isdn_net_adjust_hdr(skb, ndev); #ifdef ISDN_DEBUG_NET_DUMP buf = skb->data; isdn_dumppkt("S:", buf, skb->len, 40); #endif if (!(lp->flags & ISDN_NET_CONNECTED)) { int chi; /* only do autodial if allowed by config */ if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) { isdn_net_unreachable(ndev, skb, "dial rejected: interface not in dialmode `auto'"); dev_kfree_skb(skb); return NETDEV_TX_OK; } if (lp->phone[1]) { ulong flags; if(lp->dialwait_timer <= 0) if(lp->dialstarted > 0 && lp->dialtimeout > 0 && time_before(jiffies, lp->dialstarted + lp->dialtimeout + lp->dialwait)) lp->dialwait_timer = lp->dialstarted + lp->dialtimeout + lp->dialwait; if(lp->dialwait_timer > 0) { if(time_before(jiffies, lp->dialwait_timer)) { isdn_net_unreachable(ndev, skb, "dial rejected: retry-time not reached"); dev_kfree_skb(skb); return NETDEV_TX_OK; } else lp->dialwait_timer = 0; } /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if (((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn) ) < 0) && ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel^1, lp->msn) ) < 0)) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unreachable(ndev, skb, "No channel"); dev_kfree_skb(skb); return NETDEV_TX_OK; } /* Log packet, which triggered dialing */ if (dev->net_verbose) isdn_net_log_skb(skb, lp); lp->dialstate = 1; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { /* no 'first_skb' handling for syncPPP */ if (isdn_ppp_bind(lp) < 0) { dev_kfree_skb(skb); isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return NETDEV_TX_OK; /* STN (skb to nirvana) ;) */ } #ifdef CONFIG_IPPP_FILTER if (isdn_ppp_autodial_filter(skb, lp)) { isdn_ppp_free(lp); isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unreachable(ndev, skb, "dial rejected: packet filtered"); dev_kfree_skb(skb); return NETDEV_TX_OK; } #endif spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); /* Initiate dialing */ netif_stop_queue(ndev); return NETDEV_TX_BUSY; /* let upper layer requeue skb packet */ } #endif /* Initiate dialing */ spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); isdn_net_device_stop_queue(lp); return NETDEV_TX_BUSY; } else { isdn_net_unreachable(ndev, skb, "No phone number"); dev_kfree_skb(skb); return NETDEV_TX_OK; } } else { /* Device is connected to an ISDN channel */ ndev->trans_start = jiffies; if (!lp->dialstate) { /* ISDN connection is established, try sending */ int ret; ret = (isdn_net_xmit(ndev, skb)); if(ret) netif_stop_queue(ndev); return ret; } else netif_stop_queue(ndev); } } return NETDEV_TX_BUSY; } /* * Shutdown a net-interface. */ static int isdn_net_close(struct net_device *dev) { struct net_device *p; #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = ((isdn_net_local *) netdev_priv(dev))->netdev->cprot; /* printk(KERN_DEBUG "isdn_net_close %s\n" , dev-> name ); */ #endif #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> close( cprot ); #endif netif_stop_queue(dev); p = MASTER_TO_SLAVE(dev); if (p) { /* If this interface has slaves, stop them also */ while (p) { #ifdef CONFIG_ISDN_X25 cprot = ((isdn_net_local *) netdev_priv(p)) -> netdev -> cprot; if( cprot && cprot -> pops ) cprot -> pops -> close( cprot ); #endif isdn_net_hangup(p); p = MASTER_TO_SLAVE(p); } } isdn_net_hangup(dev); isdn_unlock_drivers(); return 0; } /* * Get statistics */ static struct net_device_stats * isdn_net_get_stats(struct net_device *dev) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); return &lp->stats; } /* This is simply a copy from std. eth.c EXCEPT we pull ETH_HLEN * instead of dev->hard_header_len off. This is done because the * lowlevel-driver has already pulled off its stuff when we get * here and this routine only gets called with p_encap == ETHER. * Determine the packet's protocol ID. The rule here is that we * assume 802.3 if the type field is short enough to be a length. * This is normal practice and works for any 'now in use' protocol. */ static __be16 isdn_net_type_trans(struct sk_buff *skb, struct net_device *dev) { struct ethhdr *eth; unsigned char *rawp; skb_reset_mac_header(skb); skb_pull(skb, ETH_HLEN); eth = eth_hdr(skb); if (*eth->h_dest & 1) { if (memcmp(eth->h_dest, dev->broadcast, ETH_ALEN) == 0) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; } /* * This ALLMULTI check should be redundant by 1.4 * so don't forget to remove it. */ else if (dev->flags & (IFF_PROMISC /*| IFF_ALLMULTI*/)) { if (memcmp(eth->h_dest, dev->dev_addr, ETH_ALEN)) skb->pkt_type = PACKET_OTHERHOST; } if (ntohs(eth->h_proto) >= 1536) return eth->h_proto; rawp = skb->data; /* * This is a magic hack to spot IPX packets. Older Novell breaks * the protocol design and runs IPX over 802.3 without an 802.2 LLC * layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This * won't work for fault tolerant netware but does for the rest. */ if (*(unsigned short *) rawp == 0xFFFF) return htons(ETH_P_802_3); /* * Real 802.2 LLC */ return htons(ETH_P_802_2); } /* * CISCO HDLC keepalive specific stuff */ static struct sk_buff* isdn_net_ciscohdlck_alloc_skb(isdn_net_local *lp, int len) { unsigned short hl = dev->drv[lp->isdn_device]->interface->hl_hdrlen; struct sk_buff *skb; skb = alloc_skb(hl + len, GFP_ATOMIC); if (skb) skb_reserve(skb, hl); else printk("isdn out of mem at %s:%d!\n", __FILE__, __LINE__); return skb; } /* cisco hdlck device private ioctls */ static int isdn_ciscohdlck_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); unsigned long len = 0; unsigned long expires = 0; int tmp = 0; int period = lp->cisco_keepalive_period; s8 debserint = lp->cisco_debserint; int rc = 0; if (lp->p_encap != ISDN_NET_ENCAP_CISCOHDLCK) return -EINVAL; switch (cmd) { /* get/set keepalive period */ case SIOCGKEEPPERIOD: len = (unsigned long)sizeof(lp->cisco_keepalive_period); if (copy_to_user(ifr->ifr_data, &lp->cisco_keepalive_period, len)) rc = -EFAULT; break; case SIOCSKEEPPERIOD: tmp = lp->cisco_keepalive_period; len = (unsigned long)sizeof(lp->cisco_keepalive_period); if (copy_from_user(&period, ifr->ifr_data, len)) rc = -EFAULT; if ((period > 0) && (period <= 32767)) lp->cisco_keepalive_period = period; else rc = -EINVAL; if (!rc && (tmp != lp->cisco_keepalive_period)) { expires = (unsigned long)(jiffies + lp->cisco_keepalive_period * HZ); mod_timer(&lp->cisco_timer, expires); printk(KERN_INFO "%s: Keepalive period set " "to %d seconds.\n", dev->name, lp->cisco_keepalive_period); } break; /* get/set debugging */ case SIOCGDEBSERINT: len = (unsigned long)sizeof(lp->cisco_debserint); if (copy_to_user(ifr->ifr_data, &lp->cisco_debserint, len)) rc = -EFAULT; break; case SIOCSDEBSERINT: len = (unsigned long)sizeof(lp->cisco_debserint); if (copy_from_user(&debserint, ifr->ifr_data, len)) rc = -EFAULT; if ((debserint >= 0) && (debserint <= 64)) lp->cisco_debserint = debserint; else rc = -EINVAL; break; default: rc = -EINVAL; break; } return (rc); } static int isdn_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(dev); switch (lp->p_encap) { #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: return isdn_ppp_dev_ioctl(dev, ifr, cmd); #endif case ISDN_NET_ENCAP_CISCOHDLCK: return isdn_ciscohdlck_dev_ioctl(dev, ifr, cmd); default: return -EINVAL; } } /* called via cisco_timer.function */ static void isdn_net_ciscohdlck_slarp_send_keepalive(unsigned long data) { isdn_net_local *lp = (isdn_net_local *) data; struct sk_buff *skb; unsigned char *p; unsigned long last_cisco_myseq = lp->cisco_myseq; int myseq_diff = 0; if (!(lp->flags & ISDN_NET_CONNECTED) || lp->dialstate) { printk("isdn BUG at %s:%d!\n", __FILE__, __LINE__); return; } lp->cisco_myseq++; myseq_diff = (lp->cisco_myseq - lp->cisco_mineseen); if ((lp->cisco_line_state) && ((myseq_diff >= 3)||(myseq_diff <= -3))) { /* line up -> down */ lp->cisco_line_state = 0; printk (KERN_WARNING "UPDOWN: Line protocol on Interface %s," " changed state to down\n", lp->netdev->dev->name); /* should stop routing higher-level data accross */ } else if ((!lp->cisco_line_state) && (myseq_diff >= 0) && (myseq_diff <= 2)) { /* line down -> up */ lp->cisco_line_state = 1; printk (KERN_WARNING "UPDOWN: Line protocol on Interface %s," " changed state to up\n", lp->netdev->dev->name); /* restart routing higher-level data accross */ } if (lp->cisco_debserint) printk (KERN_DEBUG "%s: HDLC " "myseq %lu, mineseen %lu%c, yourseen %lu, %s\n", lp->netdev->dev->name, last_cisco_myseq, lp->cisco_mineseen, ((last_cisco_myseq == lp->cisco_mineseen) ? '*' : 040), lp->cisco_yourseq, ((lp->cisco_line_state) ? "line up" : "line down")); skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp keepalive */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_KEEPALIVE); *(__be32 *)(p + 8) = cpu_to_be32(lp->cisco_myseq); *(__be32 *)(p + 12) = cpu_to_be32(lp->cisco_yourseq); *(__be16 *)(p + 16) = cpu_to_be16(0xffff); // reliability, always 0xffff p += 18; isdn_net_write_super(lp, skb); lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ; add_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_slarp_send_request(isdn_net_local *lp) { struct sk_buff *skb; unsigned char *p; skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp request */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REQUEST); *(__be32 *)(p + 8) = cpu_to_be32(0); // address *(__be32 *)(p + 12) = cpu_to_be32(0); // netmask *(__be16 *)(p + 16) = cpu_to_be16(0); // unused p += 18; isdn_net_write_super(lp, skb); } static void isdn_net_ciscohdlck_connected(isdn_net_local *lp) { lp->cisco_myseq = 0; lp->cisco_mineseen = 0; lp->cisco_yourseq = 0; lp->cisco_keepalive_period = ISDN_TIMER_KEEPINT; lp->cisco_last_slarp_in = 0; lp->cisco_line_state = 0; lp->cisco_debserint = 0; /* send slarp request because interface/seq.no.s reset */ isdn_net_ciscohdlck_slarp_send_request(lp); init_timer(&lp->cisco_timer); lp->cisco_timer.data = (unsigned long) lp; lp->cisco_timer.function = isdn_net_ciscohdlck_slarp_send_keepalive; lp->cisco_timer.expires = jiffies + lp->cisco_keepalive_period * HZ; add_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_disconnected(isdn_net_local *lp) { del_timer(&lp->cisco_timer); } static void isdn_net_ciscohdlck_slarp_send_reply(isdn_net_local *lp) { struct sk_buff *skb; unsigned char *p; struct in_device *in_dev = NULL; __be32 addr = 0; /* local ipv4 address */ __be32 mask = 0; /* local netmask */ if ((in_dev = lp->netdev->dev->ip_ptr) != NULL) { /* take primary(first) address of interface */ struct in_ifaddr *ifa = in_dev->ifa_list; if (ifa != NULL) { addr = ifa->ifa_local; mask = ifa->ifa_mask; } } skb = isdn_net_ciscohdlck_alloc_skb(lp, 4 + 14); if (!skb) return; p = skb_put(skb, 4 + 14); /* cisco header */ *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(CISCO_TYPE_SLARP); /* slarp reply, send own ip/netmask; if values are nonsense remote * should think we are unable to provide it with an address via SLARP */ *(__be32 *)(p + 4) = cpu_to_be32(CISCO_SLARP_REPLY); *(__be32 *)(p + 8) = addr; // address *(__be32 *)(p + 12) = mask; // netmask *(__be16 *)(p + 16) = cpu_to_be16(0); // unused p += 18; isdn_net_write_super(lp, skb); } static void isdn_net_ciscohdlck_slarp_in(isdn_net_local *lp, struct sk_buff *skb) { unsigned char *p; int period; u32 code; u32 my_seq; u32 your_seq; __be32 local; __be32 *addr, *mask; u16 unused; if (skb->len < 14) return; p = skb->data; code = be32_to_cpup((__be32 *)p); p += 4; switch (code) { case CISCO_SLARP_REQUEST: lp->cisco_yourseq = 0; isdn_net_ciscohdlck_slarp_send_reply(lp); break; case CISCO_SLARP_REPLY: addr = (__be32 *)p; mask = (__be32 *)(p + 4); if (*mask != cpu_to_be32(0xfffffffc)) goto slarp_reply_out; if ((*addr & cpu_to_be32(3)) == cpu_to_be32(0) || (*addr & cpu_to_be32(3)) == cpu_to_be32(3)) goto slarp_reply_out; local = *addr ^ cpu_to_be32(3); printk(KERN_INFO "%s: got slarp reply: remote ip: %pI4, local ip: %pI4 mask: %pI4\n", lp->netdev->dev->name, addr, &local, mask); break; slarp_reply_out: printk(KERN_INFO "%s: got invalid slarp reply (%pI4/%pI4) - ignored\n", lp->netdev->dev->name, addr, mask); break; case CISCO_SLARP_KEEPALIVE: period = (int)((jiffies - lp->cisco_last_slarp_in + HZ/2 - 1) / HZ); if (lp->cisco_debserint && (period != lp->cisco_keepalive_period) && lp->cisco_last_slarp_in) { printk(KERN_DEBUG "%s: Keepalive period mismatch - " "is %d but should be %d.\n", lp->netdev->dev->name, period, lp->cisco_keepalive_period); } lp->cisco_last_slarp_in = jiffies; my_seq = be32_to_cpup((__be32 *)(p + 0)); your_seq = be32_to_cpup((__be32 *)(p + 4)); unused = be16_to_cpup((__be16 *)(p + 8)); p += 10; lp->cisco_yourseq = my_seq; lp->cisco_mineseen = your_seq; break; } } static void isdn_net_ciscohdlck_receive(isdn_net_local *lp, struct sk_buff *skb) { unsigned char *p; u8 addr; u8 ctrl; u16 type; if (skb->len < 4) goto out_free; p = skb->data; addr = *(u8 *)(p + 0); ctrl = *(u8 *)(p + 1); type = be16_to_cpup((__be16 *)(p + 2)); p += 4; skb_pull(skb, 4); if (addr != CISCO_ADDR_UNICAST && addr != CISCO_ADDR_BROADCAST) { printk(KERN_WARNING "%s: Unknown Cisco addr 0x%02x\n", lp->netdev->dev->name, addr); goto out_free; } if (ctrl != CISCO_CTRL) { printk(KERN_WARNING "%s: Unknown Cisco ctrl 0x%02x\n", lp->netdev->dev->name, ctrl); goto out_free; } switch (type) { case CISCO_TYPE_SLARP: isdn_net_ciscohdlck_slarp_in(lp, skb); goto out_free; case CISCO_TYPE_CDP: if (lp->cisco_debserint) printk(KERN_DEBUG "%s: Received CDP packet. use " "\"no cdp enable\" on cisco.\n", lp->netdev->dev->name); goto out_free; default: /* no special cisco protocol */ skb->protocol = htons(type); netif_rx(skb); return; } out_free: kfree_skb(skb); } /* * Got a packet from ISDN-Channel. */ static void isdn_net_receive(struct net_device *ndev, struct sk_buff *skb) { isdn_net_local *lp = (isdn_net_local *) netdev_priv(ndev); isdn_net_local *olp = lp; /* original 'lp' */ #ifdef CONFIG_ISDN_X25 struct concap_proto *cprot = lp -> netdev -> cprot; #endif lp->transcount += skb->len; lp->stats.rx_packets++; lp->stats.rx_bytes += skb->len; if (lp->master) { /* Bundling: If device is a slave-device, deliver to master, also * handle master's statistics and hangup-timeout */ ndev = lp->master; lp = (isdn_net_local *) netdev_priv(ndev); lp->stats.rx_packets++; lp->stats.rx_bytes += skb->len; } skb->dev = ndev; skb->pkt_type = PACKET_HOST; skb_reset_mac_header(skb); #ifdef ISDN_DEBUG_NET_DUMP isdn_dumppkt("R:", skb->data, skb->len, 40); #endif switch (lp->p_encap) { case ISDN_NET_ENCAP_ETHER: /* Ethernet over ISDN */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = isdn_net_type_trans(skb, ndev); break; case ISDN_NET_ENCAP_UIHDLC: /* HDLC with UI-frame (for ispa with -h1 option) */ olp->huptimer = 0; lp->huptimer = 0; skb_pull(skb, 2); /* Fall through */ case ISDN_NET_ENCAP_RAWIP: /* RAW-IP without MAC-Header */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = htons(ETH_P_IP); break; case ISDN_NET_ENCAP_CISCOHDLCK: isdn_net_ciscohdlck_receive(lp, skb); return; case ISDN_NET_ENCAP_CISCOHDLC: /* CISCO-HDLC IP with type field and fake I-frame-header */ skb_pull(skb, 2); /* Fall through */ case ISDN_NET_ENCAP_IPTYP: /* IP with type field */ olp->huptimer = 0; lp->huptimer = 0; skb->protocol = *(__be16 *)&(skb->data[0]); skb_pull(skb, 2); if (*(unsigned short *) skb->data == 0xFFFF) skb->protocol = htons(ETH_P_802_3); break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: /* huptimer is done in isdn_ppp_push_higher */ isdn_ppp_receive(lp->netdev, olp, skb); return; #endif default: #ifdef CONFIG_ISDN_X25 /* try if there are generic sync_device receiver routines */ if(cprot) if(cprot -> pops) if( cprot -> pops -> data_ind){ cprot -> pops -> data_ind(cprot,skb); return; }; #endif /* CONFIG_ISDN_X25 */ printk(KERN_WARNING "%s: unknown encapsulation, dropping\n", lp->netdev->dev->name); kfree_skb(skb); return; } netif_rx(skb); return; } /* * A packet arrived via ISDN. Search interface-chain for a corresponding * interface. If found, deliver packet to receiver-function and return 1, * else return 0. */ int isdn_net_rcv_skb(int idx, struct sk_buff *skb) { isdn_net_dev *p = dev->rx_netdev[idx]; if (p) { isdn_net_local *lp = p->local; if ((lp->flags & ISDN_NET_CONNECTED) && (!lp->dialstate)) { isdn_net_receive(p->dev, skb); return 1; } } return 0; } /* * build an header * depends on encaps that is being used. */ static int isdn_net_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned plen) { isdn_net_local *lp = netdev_priv(dev); unsigned char *p; ushort len = 0; switch (lp->p_encap) { case ISDN_NET_ENCAP_ETHER: len = eth_header(skb, dev, type, daddr, saddr, plen); break; #ifdef CONFIG_ISDN_PPP case ISDN_NET_ENCAP_SYNCPPP: /* stick on a fake header to keep fragmentation code happy. */ len = IPPP_MAX_HEADER; skb_push(skb,len); break; #endif case ISDN_NET_ENCAP_RAWIP: printk(KERN_WARNING "isdn_net_header called with RAW_IP!\n"); len = 0; break; case ISDN_NET_ENCAP_IPTYP: /* ethernet type field */ *((__be16 *)skb_push(skb, 2)) = htons(type); len = 2; break; case ISDN_NET_ENCAP_UIHDLC: /* HDLC with UI-Frames (for ispa with -h1 option) */ *((__be16 *)skb_push(skb, 2)) = htons(0x0103); len = 2; break; case ISDN_NET_ENCAP_CISCOHDLC: case ISDN_NET_ENCAP_CISCOHDLCK: p = skb_push(skb, 4); *(u8 *)(p + 0) = CISCO_ADDR_UNICAST; *(u8 *)(p + 1) = CISCO_CTRL; *(__be16 *)(p + 2) = cpu_to_be16(type); p += 4; len = 4; break; #ifdef CONFIG_ISDN_X25 default: /* try if there are generic concap protocol routines */ if( lp-> netdev -> cprot ){ printk(KERN_WARNING "isdn_net_header called with concap_proto!\n"); len = 0; break; } break; #endif /* CONFIG_ISDN_X25 */ } return len; } /* We don't need to send arp, because we have point-to-point connections. */ static int isdn_net_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; isdn_net_local *lp = netdev_priv(dev); int ret = 0; if (lp->p_encap == ISDN_NET_ENCAP_ETHER) { struct ethhdr *eth = (struct ethhdr *) skb->data; /* * Only ARP/IP is currently supported */ if (eth->h_proto != htons(ETH_P_IP)) { printk(KERN_WARNING "isdn_net: %s don't know how to resolve type %d addresses?\n", dev->name, (int) eth->h_proto); memcpy(eth->h_source, dev->dev_addr, dev->addr_len); return 0; } /* * Try to get ARP to resolve the header. */ #ifdef CONFIG_INET ret = arp_find(eth->h_dest, skb); #endif } return ret; } static int isdn_header_cache(const struct neighbour *neigh, struct hh_cache *hh) { const struct net_device *dev = neigh->dev; isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap == ISDN_NET_ENCAP_ETHER) return eth_header_cache(neigh, hh); return -1; } static void isdn_header_cache_update(struct hh_cache *hh, const struct net_device *dev, const unsigned char *haddr) { isdn_net_local *lp = netdev_priv(dev); if (lp->p_encap == ISDN_NET_ENCAP_ETHER) eth_header_cache_update(hh, dev, haddr); } static const struct header_ops isdn_header_ops = { .create = isdn_net_header, .rebuild = isdn_net_rebuild_header, .cache = isdn_header_cache, .cache_update = isdn_header_cache_update, }; /* * Interface-setup. (just after registering a new interface) */ static int isdn_net_init(struct net_device *ndev) { ushort max_hlhdr_len = 0; int drvidx; /* * up till binding we ask the protocol layer to reserve as much * as we might need for HL layer */ for (drvidx = 0; drvidx < ISDN_MAX_DRIVERS; drvidx++) if (dev->drv[drvidx]) if (max_hlhdr_len < dev->drv[drvidx]->interface->hl_hdrlen) max_hlhdr_len = dev->drv[drvidx]->interface->hl_hdrlen; ndev->hard_header_len = ETH_HLEN + max_hlhdr_len; return 0; } static void isdn_net_swapbind(int drvidx) { isdn_net_dev *p; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: swapping ch of %d\n", drvidx); #endif p = dev->netdev; while (p) { if (p->local->pre_device == drvidx) switch (p->local->pre_channel) { case 0: p->local->pre_channel = 1; break; case 1: p->local->pre_channel = 0; break; } p = (isdn_net_dev *) p->next; } } static void isdn_net_swap_usage(int i1, int i2) { int u1 = dev->usage[i1] & ISDN_USAGE_EXCLUSIVE; int u2 = dev->usage[i2] & ISDN_USAGE_EXCLUSIVE; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: usage of %d and %d\n", i1, i2); #endif dev->usage[i1] &= ~ISDN_USAGE_EXCLUSIVE; dev->usage[i1] |= u2; dev->usage[i2] &= ~ISDN_USAGE_EXCLUSIVE; dev->usage[i2] |= u1; isdn_info_update(); } /* * An incoming call-request has arrived. * Search the interface-chain for an appropriate interface. * If found, connect the interface to the ISDN-channel and initiate * D- and B-Channel-setup. If secure-flag is set, accept only * configured phone-numbers. If callback-flag is set, initiate * callback-dialing. * * Return-Value: 0 = No appropriate interface for this call. * 1 = Call accepted * 2 = Reject call, wait cbdelay, then call back * 3 = Reject call * 4 = Wait cbdelay, then call back * 5 = No appropriate interface for this call, * would eventually match if CID was longer. */ int isdn_net_find_icall(int di, int ch, int idx, setup_parm *setup) { char *eaz; int si1; int si2; int ematch; int wret; int swapped; int sidx = 0; u_long flags; isdn_net_dev *p; isdn_net_phone *n; char nr[ISDN_MSNLEN]; char *my_eaz; /* Search name in netdev-chain */ if (!setup->phone[0]) { nr[0] = '0'; nr[1] = '\0'; printk(KERN_INFO "isdn_net: Incoming call without OAD, assuming '0'\n"); } else strlcpy(nr, setup->phone, ISDN_MSNLEN); si1 = (int) setup->si1; si2 = (int) setup->si2; if (!setup->eazmsn[0]) { printk(KERN_WARNING "isdn_net: Incoming call without CPN, assuming '0'\n"); eaz = "0"; } else eaz = setup->eazmsn; if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: call from %s,%d,%d -> %s\n", nr, si1, si2, eaz); /* Accept DATA and VOICE calls at this stage * local eaz is checked later for allowed call types */ if ((si1 != 7) && (si1 != 1)) { if (dev->net_verbose > 1) printk(KERN_INFO "isdn_net: Service-Indicator not 1 or 7, ignored\n"); return 0; } n = (isdn_net_phone *) 0; p = dev->netdev; ematch = wret = swapped = 0; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: di=%d ch=%d idx=%d usg=%d\n", di, ch, idx, dev->usage[idx]); #endif while (p) { int matchret; isdn_net_local *lp = p->local; /* If last check has triggered as binding-swap, revert it */ switch (swapped) { case 2: isdn_net_swap_usage(idx, sidx); /* fall through */ case 1: isdn_net_swapbind(di); break; } swapped = 0; /* check acceptable call types for DOV */ my_eaz = isdn_map_eaz2msn(lp->msn, di); if (si1 == 1) { /* it's a DOV call, check if we allow it */ if (*my_eaz == 'v' || *my_eaz == 'V' || *my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ else my_eaz = NULL; /* force non match */ } else { /* it's a DATA call, check if we allow it */ if (*my_eaz == 'b' || *my_eaz == 'B') my_eaz++; /* skip to allow a match */ } if (my_eaz) matchret = isdn_msncmp(eaz, my_eaz); else matchret = 1; if (!matchret) ematch = 1; /* Remember if more numbers eventually can match */ if (matchret > wret) wret = matchret; #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: if='%s', l.msn=%s, l.flags=%d, l.dstate=%d\n", p->dev->name, lp->msn, lp->flags, lp->dialstate); #endif if ((!matchret) && /* EAZ is matching */ (((!(lp->flags & ISDN_NET_CONNECTED)) && /* but not connected */ (USG_NONE(dev->usage[idx]))) || /* and ch. unused or */ ((((lp->dialstate == 4) || (lp->dialstate == 12)) && /* if dialing */ (!(lp->flags & ISDN_NET_CALLBACK))) /* but no callback */ ))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match1, pdev=%d pch=%d\n", lp->pre_device, lp->pre_channel); #endif if (dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) { if ((lp->pre_channel != ch) || (lp->pre_device != di)) { /* Here we got a problem: * If using an ICN-Card, an incoming call is always signaled on * on the first channel of the card, if both channels are * down. However this channel may be bound exclusive. If the * second channel is free, this call should be accepted. * The solution is horribly but it runs, so what: * We exchange the exclusive bindings of the two channels, the * corresponding variables in the interface-structs. */ if (ch == 0) { sidx = isdn_dc2minor(di, 1); #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: ch is 0\n"); #endif if (USG_NONE(dev->usage[sidx])) { /* Second Channel is free, now see if it is bound * exclusive too. */ if (dev->usage[sidx] & ISDN_USAGE_EXCLUSIVE) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and bound\n"); #endif /* Yes, swap bindings only, if the original * binding is bound to channel 1 of this driver */ if ((lp->pre_device == di) && (lp->pre_channel == 1)) { isdn_net_swapbind(di); swapped = 1; } else { /* ... else iterate next device */ p = (isdn_net_dev *) p->next; continue; } } else { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: 2nd channel is down and unbound\n"); #endif /* No, swap always and swap excl-usage also */ isdn_net_swap_usage(idx, sidx); isdn_net_swapbind(di); swapped = 2; } /* Now check for exclusive binding again */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check\n"); #endif if ((dev->usage[idx] & ISDN_USAGE_EXCLUSIVE) && ((lp->pre_channel != ch) || (lp->pre_device != di))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: final check failed\n"); #endif p = (isdn_net_dev *) p->next; continue; } } } else { /* We are already on the second channel, so nothing to do */ #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: already on 2nd channel\n"); #endif } } } #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match2\n"); #endif n = lp->phone[0]; if (lp->flags & ISDN_NET_SECURE) { while (n) { if (!isdn_msncmp(nr, n->num)) break; n = (isdn_net_phone *) n->next; } } if (n || (!(lp->flags & ISDN_NET_SECURE))) { #ifdef ISDN_DEBUG_NET_ICALL printk(KERN_DEBUG "n_fi: match3\n"); #endif /* matching interface found */ /* * Is the state STOPPED? * If so, no dialin is allowed, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call, interface %s `stopped' -> rejected\n", p->dev->name); return 3; } /* * Is the interface up? * If not, reject the call actively. */ if (!isdn_net_device_started(p)) { printk(KERN_INFO "%s: incoming call, interface down -> rejected\n", p->dev->name); return 3; } /* Interface is up, now see if it's a slave. If so, see if * it's master and parent slave is online. If not, reject the call. */ if (lp->master) { isdn_net_local *mlp = ISDN_MASTER_PRIV(lp); printk(KERN_DEBUG "ICALLslv: %s\n", p->dev->name); printk(KERN_DEBUG "master=%s\n", lp->master->name); if (mlp->flags & ISDN_NET_CONNECTED) { printk(KERN_DEBUG "master online\n"); /* Master is online, find parent-slave (master if first slave) */ while (mlp->slave) { if (ISDN_SLAVE_PRIV(mlp) == lp) break; mlp = ISDN_SLAVE_PRIV(mlp); } } else printk(KERN_DEBUG "master offline\n"); /* Found parent, if it's offline iterate next device */ printk(KERN_DEBUG "mlpf: %d\n", mlp->flags & ISDN_NET_CONNECTED); if (!(mlp->flags & ISDN_NET_CONNECTED)) { p = (isdn_net_dev *) p->next; continue; } } if (lp->flags & ISDN_NET_CALLBACK) { int chi; /* * Is the state MANUAL? * If so, no callback can be made, * so reject actively. * */ if (ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_OFF) { printk(KERN_INFO "incoming call for callback, interface %s `off' -> rejected\n", p->dev->name); return 3; } printk(KERN_DEBUG "%s: call from %s -> %s, start callback\n", p->dev->name, nr, eaz); if (lp->phone[1]) { /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn) ) < 0) { printk(KERN_WARNING "isdn_net_find_icall: No channel for %s\n", p->dev->name); spin_unlock_irqrestore(&dev->lock, flags); return 0; } /* Setup dialstate. */ lp->dtimer = 0; lp->dialstate = 11; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) if (isdn_ppp_bind(lp) < 0) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_unbind_channel(lp); return 0; } #endif spin_unlock_irqrestore(&dev->lock, flags); /* Initiate dialing by returning 2 or 4 */ return (lp->flags & ISDN_NET_CBHUP) ? 2 : 4; } else printk(KERN_WARNING "isdn_net: %s: No phone number\n", p->dev->name); return 0; } else { printk(KERN_DEBUG "%s: call from %s -> %s accepted\n", p->dev->name, nr, eaz); /* if this interface is dialing, it does it probably on a different device, so free this device */ if ((lp->dialstate == 4) || (lp->dialstate == 12)) { #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) isdn_ppp_free(lp); #endif isdn_net_lp_disconnected(lp); isdn_free_channel(lp->isdn_device, lp->isdn_channel, ISDN_USAGE_NET); } spin_lock_irqsave(&dev->lock, flags); dev->usage[idx] &= ISDN_USAGE_EXCLUSIVE; dev->usage[idx] |= ISDN_USAGE_NET; strcpy(dev->num[idx], nr); isdn_info_update(); dev->st_netdev[idx] = lp->netdev; lp->isdn_device = di; lp->isdn_channel = ch; lp->ppp_slot = -1; lp->flags |= ISDN_NET_CONNECTED; lp->dialstate = 7; lp->dtimer = 0; lp->outgoing = 0; lp->huptimer = 0; lp->hupflags |= ISDN_WAITCHARGE; lp->hupflags &= ~ISDN_HAVECHARGE; #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) { if (isdn_ppp_bind(lp) < 0) { isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return 0; } } #endif spin_unlock_irqrestore(&dev->lock, flags); return 1; } } } p = (isdn_net_dev *) p->next; } /* If none of configured EAZ/MSN matched and not verbose, be silent */ if (!ematch || dev->net_verbose) printk(KERN_INFO "isdn_net: call from %s -> %d %s ignored\n", nr, di, eaz); return (wret == 2)?5:0; } /* * Search list of net-interfaces for an interface with given name. */ isdn_net_dev * isdn_net_findif(char *name) { isdn_net_dev *p = dev->netdev; while (p) { if (!strcmp(p->dev->name, name)) return p; p = (isdn_net_dev *) p->next; } return (isdn_net_dev *) NULL; } /* * Force a net-interface to dial out. * This is called from the userlevel-routine below or * from isdn_net_start_xmit(). */ static int isdn_net_force_dial_lp(isdn_net_local * lp) { if ((!(lp->flags & ISDN_NET_CONNECTED)) && !lp->dialstate) { int chi; if (lp->phone[1]) { ulong flags; /* Grab a free ISDN-Channel */ spin_lock_irqsave(&dev->lock, flags); if ((chi = isdn_get_free_channel( ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, lp->pre_device, lp->pre_channel, lp->msn)) < 0) { printk(KERN_WARNING "isdn_net_force_dial: No channel for %s\n", lp->netdev->dev->name); spin_unlock_irqrestore(&dev->lock, flags); return -EAGAIN; } lp->dialstate = 1; /* Connect interface with channel */ isdn_net_bind_channel(lp, chi); #ifdef CONFIG_ISDN_PPP if (lp->p_encap == ISDN_NET_ENCAP_SYNCPPP) if (isdn_ppp_bind(lp) < 0) { isdn_net_unbind_channel(lp); spin_unlock_irqrestore(&dev->lock, flags); return -EAGAIN; } #endif /* Initiate dialing */ spin_unlock_irqrestore(&dev->lock, flags); isdn_net_dial(); return 0; } else return -EINVAL; } else return -EBUSY; } /* * This is called from certain upper protocol layers (multilink ppp * and x25iface encapsulation module) that want to initiate dialing * themselves. */ int isdn_net_dial_req(isdn_net_local * lp) { /* is there a better error code? */ if (!(ISDN_NET_DIALMODE(*lp) == ISDN_NET_DM_AUTO)) return -EBUSY; return isdn_net_force_dial_lp(lp); } /* * Force a net-interface to dial out. * This is always called from within userspace (ISDN_IOCTL_NET_DIAL). */ int isdn_net_force_dial(char *name) { isdn_net_dev *p = isdn_net_findif(name); if (!p) return -ENODEV; return (isdn_net_force_dial_lp(p->local)); } /* The ISDN-specific entries in the device structure. */ static const struct net_device_ops isdn_netdev_ops = { .ndo_init = isdn_net_init, .ndo_open = isdn_net_open, .ndo_stop = isdn_net_close, .ndo_do_ioctl = isdn_net_ioctl, .ndo_start_xmit = isdn_net_start_xmit, .ndo_get_stats = isdn_net_get_stats, .ndo_tx_timeout = isdn_net_tx_timeout, }; /* * Helper for alloc_netdev() */ static void _isdn_setup(struct net_device *dev) { isdn_net_local *lp = netdev_priv(dev); ether_setup(dev); /* Setup the generic properties */ dev->flags = IFF_NOARP|IFF_POINTOPOINT; dev->header_ops = NULL; dev->netdev_ops = &isdn_netdev_ops; /* for clients with MPPP maybe higher values better */ dev->tx_queue_len = 30; lp->p_encap = ISDN_NET_ENCAP_RAWIP; lp->magic = ISDN_NET_MAGIC; lp->last = lp; lp->next = lp; lp->isdn_device = -1; lp->isdn_channel = -1; lp->pre_device = -1; lp->pre_channel = -1; lp->exclusive = -1; lp->ppp_slot = -1; lp->pppbind = -1; skb_queue_head_init(&lp->super_tx_queue); lp->l2_proto = ISDN_PROTO_L2_X75I; lp->l3_proto = ISDN_PROTO_L3_TRANS; lp->triggercps = 6000; lp->slavedelay = 10 * HZ; lp->hupflags = ISDN_INHUP; /* Do hangup even on incoming calls */ lp->onhtime = 10; /* Default hangup-time for saving costs */ lp->dialmax = 1; /* Hangup before Callback, manual dial */ lp->flags = ISDN_NET_CBHUP | ISDN_NET_DM_MANUAL; lp->cbdelay = 25; /* Wait 5 secs before Callback */ lp->dialtimeout = -1; /* Infinite Dial-Timeout */ lp->dialwait = 5 * HZ; /* Wait 5 sec. after failed dial */ lp->dialstarted = 0; /* Jiffies of last dial-start */ lp->dialwait_timer = 0; /* Jiffies of earliest next dial-start */ } /* * Allocate a new network-interface and initialize its data structures. */ char * isdn_net_new(char *name, struct net_device *master) { isdn_net_dev *netdev; /* Avoid creating an existing interface */ if (isdn_net_findif(name)) { printk(KERN_WARNING "isdn_net: interface %s already exists\n", name); return NULL; } if (name == NULL) return NULL; if (!(netdev = kzalloc(sizeof(isdn_net_dev), GFP_KERNEL))) { printk(KERN_WARNING "isdn_net: Could not allocate net-device\n"); return NULL; } netdev->dev = alloc_netdev(sizeof(isdn_net_local), name, _isdn_setup); if (!netdev->dev) { printk(KERN_WARNING "isdn_net: Could not allocate network device\n"); kfree(netdev); return NULL; } netdev->local = netdev_priv(netdev->dev); if (master) { /* Device shall be a slave */ struct net_device *p = MASTER_TO_SLAVE(master); struct net_device *q = master; netdev->local->master = master; /* Put device at end of slave-chain */ while (p) { q = p; p = MASTER_TO_SLAVE(p); } MASTER_TO_SLAVE(q) = netdev->dev; } else { /* Device shall be a master */ /* * Watchdog timer (currently) for master only. */ netdev->dev->watchdog_timeo = ISDN_NET_TX_TIMEOUT; if (register_netdev(netdev->dev) != 0) { printk(KERN_WARNING "isdn_net: Could not register net-device\n"); free_netdev(netdev->dev); kfree(netdev); return NULL; } } netdev->queue = netdev->local; spin_lock_init(&netdev->queue_lock); netdev->local->netdev = netdev; INIT_WORK(&netdev->local->tqueue, isdn_net_softint); spin_lock_init(&netdev->local->xmit_lock); /* Put into to netdev-chain */ netdev->next = (void *) dev->netdev; dev->netdev = netdev; return netdev->dev->name; } char * isdn_net_newslave(char *parm) { char *p = strchr(parm, ','); isdn_net_dev *n; char newname[10]; if (p) { /* Slave-Name MUST not be empty */ if (!strlen(p + 1)) return NULL; strcpy(newname, p + 1); *p = 0; /* Master must already exist */ if (!(n = isdn_net_findif(parm))) return NULL; /* Master must be a real interface, not a slave */ if (n->local->master) return NULL; /* Master must not be started yet */ if (isdn_net_device_started(n)) return NULL; return (isdn_net_new(newname, n->dev)); } return NULL; } /* * Set interface-parameters. * Always set all parameters, so the user-level application is responsible * for not overwriting existing setups. It has to get the current * setup first, if only selected parameters are to be changed. */ int isdn_net_setcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); ulong features; int i; int drvidx; int chidx; char drvid[25]; if (p) { isdn_net_local *lp = p->local; /* See if any registered driver supports the features we want */ features = ((1 << cfg->l2_proto) << ISDN_FEATURE_L2_SHIFT) | ((1 << cfg->l3_proto) << ISDN_FEATURE_L3_SHIFT); for (i = 0; i < ISDN_MAX_DRIVERS; i++) if (dev->drv[i]) if ((dev->drv[i]->interface->features & features) == features) break; if (i == ISDN_MAX_DRIVERS) { printk(KERN_WARNING "isdn_net: No driver with selected features\n"); return -ENODEV; } if (lp->p_encap != cfg->p_encap){ #ifdef CONFIG_ISDN_X25 struct concap_proto * cprot = p -> cprot; #endif if (isdn_net_device_started(p)) { printk(KERN_WARNING "%s: cannot change encap when if is up\n", p->dev->name); return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( cprot && cprot -> pops ) cprot -> pops -> proto_del ( cprot ); p -> cprot = NULL; lp -> dops = NULL; /* ... , prepare for configuration of new one ... */ switch ( cfg -> p_encap ){ case ISDN_NET_ENCAP_X25IFACE: lp -> dops = &isdn_concap_reliable_dl_dops; } /* ... and allocate new one ... */ p -> cprot = isdn_concap_new( cfg -> p_encap ); /* p -> cprot == NULL now if p_encap is not supported by means of the concap_proto mechanism */ /* the protocol is not configured yet; this will happen later when isdn_net_reset() is called */ #endif } switch ( cfg->p_encap ) { case ISDN_NET_ENCAP_SYNCPPP: #ifndef CONFIG_ISDN_PPP printk(KERN_WARNING "%s: SyncPPP support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_PPP; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_X25IFACE: #ifndef CONFIG_ISDN_X25 printk(KERN_WARNING "%s: isdn-x25 support not configured\n", p->dev->name); return -EINVAL; #else p->dev->type = ARPHRD_X25; /* change ARP type */ p->dev->addr_len = 0; #endif break; case ISDN_NET_ENCAP_CISCOHDLCK: break; default: if( cfg->p_encap >= 0 && cfg->p_encap <= ISDN_NET_ENCAP_MAX_ENCAP ) break; printk(KERN_WARNING "%s: encapsulation protocol %d not supported\n", p->dev->name, cfg->p_encap); return -EINVAL; } if (strlen(cfg->drvid)) { /* A bind has been requested ... */ char *c, *e; drvidx = -1; chidx = -1; strcpy(drvid, cfg->drvid); if ((c = strchr(drvid, ','))) { /* The channel-number is appended to the driver-Id with a comma */ chidx = (int) simple_strtoul(c + 1, &e, 10); if (e == c) chidx = -1; *c = '\0'; } for (i = 0; i < ISDN_MAX_DRIVERS; i++) /* Lookup driver-Id in array */ if (!(strcmp(dev->drvid[i], drvid))) { drvidx = i; break; } if ((drvidx == -1) || (chidx == -1)) /* Either driver-Id or channel-number invalid */ return -ENODEV; } else { /* Parameters are valid, so get them */ drvidx = lp->pre_device; chidx = lp->pre_channel; } if (cfg->exclusive > 0) { unsigned long flags; /* If binding is exclusive, try to grab the channel */ spin_lock_irqsave(&dev->lock, flags); if ((i = isdn_get_free_channel(ISDN_USAGE_NET, lp->l2_proto, lp->l3_proto, drvidx, chidx, lp->msn)) < 0) { /* Grab failed, because desired channel is in use */ lp->exclusive = -1; spin_unlock_irqrestore(&dev->lock, flags); return -EBUSY; } /* All went ok, so update isdninfo */ dev->usage[i] = ISDN_USAGE_EXCLUSIVE; isdn_info_update(); spin_unlock_irqrestore(&dev->lock, flags); lp->exclusive = i; } else { /* Non-exclusive binding or unbind. */ lp->exclusive = -1; if ((lp->pre_device != -1) && (cfg->exclusive == -1)) { isdn_unexclusive_channel(lp->pre_device, lp->pre_channel); isdn_free_channel(lp->pre_device, lp->pre_channel, ISDN_USAGE_NET); drvidx = -1; chidx = -1; } } strlcpy(lp->msn, cfg->eaz, sizeof(lp->msn)); lp->pre_device = drvidx; lp->pre_channel = chidx; lp->onhtime = cfg->onhtime; lp->charge = cfg->charge; lp->l2_proto = cfg->l2_proto; lp->l3_proto = cfg->l3_proto; lp->cbdelay = cfg->cbdelay; lp->dialmax = cfg->dialmax; lp->triggercps = cfg->triggercps; lp->slavedelay = cfg->slavedelay * HZ; lp->pppbind = cfg->pppbind; lp->dialtimeout = cfg->dialtimeout >= 0 ? cfg->dialtimeout * HZ : -1; lp->dialwait = cfg->dialwait * HZ; if (cfg->secure) lp->flags |= ISDN_NET_SECURE; else lp->flags &= ~ISDN_NET_SECURE; if (cfg->cbhup) lp->flags |= ISDN_NET_CBHUP; else lp->flags &= ~ISDN_NET_CBHUP; switch (cfg->callback) { case 0: lp->flags &= ~(ISDN_NET_CALLBACK | ISDN_NET_CBOUT); break; case 1: lp->flags |= ISDN_NET_CALLBACK; lp->flags &= ~ISDN_NET_CBOUT; break; case 2: lp->flags |= ISDN_NET_CBOUT; lp->flags &= ~ISDN_NET_CALLBACK; break; } lp->flags &= ~ISDN_NET_DIALMODE_MASK; /* first all bits off */ if (cfg->dialmode && !(cfg->dialmode & ISDN_NET_DIALMODE_MASK)) { /* old isdnctrl version, where only 0 or 1 is given */ printk(KERN_WARNING "Old isdnctrl version detected! Please update.\n"); lp->flags |= ISDN_NET_DM_OFF; /* turn on `off' bit */ } else { lp->flags |= cfg->dialmode; /* turn on selected bits */ } if (cfg->chargehup) lp->hupflags |= ISDN_CHARGEHUP; else lp->hupflags &= ~ISDN_CHARGEHUP; if (cfg->ihup) lp->hupflags |= ISDN_INHUP; else lp->hupflags &= ~ISDN_INHUP; if (cfg->chargeint > 10) { lp->hupflags |= ISDN_CHARGEHUP | ISDN_HAVECHARGE | ISDN_MANCHARGE; lp->chargeint = cfg->chargeint * HZ; } if (cfg->p_encap != lp->p_encap) { if (cfg->p_encap == ISDN_NET_ENCAP_RAWIP) { p->dev->header_ops = NULL; p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } else { p->dev->header_ops = &isdn_header_ops; if (cfg->p_encap == ISDN_NET_ENCAP_ETHER) p->dev->flags = IFF_BROADCAST | IFF_MULTICAST; else p->dev->flags = IFF_NOARP|IFF_POINTOPOINT; } } lp->p_encap = cfg->p_encap; return 0; } return -ENODEV; } /* * Perform get-interface-parameters.ioctl */ int isdn_net_getcfg(isdn_net_ioctl_cfg * cfg) { isdn_net_dev *p = isdn_net_findif(cfg->name); if (p) { isdn_net_local *lp = p->local; strcpy(cfg->eaz, lp->msn); cfg->exclusive = lp->exclusive; if (lp->pre_device >= 0) { sprintf(cfg->drvid, "%s,%d", dev->drvid[lp->pre_device], lp->pre_channel); } else cfg->drvid[0] = '\0'; cfg->onhtime = lp->onhtime; cfg->charge = lp->charge; cfg->l2_proto = lp->l2_proto; cfg->l3_proto = lp->l3_proto; cfg->p_encap = lp->p_encap; cfg->secure = (lp->flags & ISDN_NET_SECURE) ? 1 : 0; cfg->callback = 0; if (lp->flags & ISDN_NET_CALLBACK) cfg->callback = 1; if (lp->flags & ISDN_NET_CBOUT) cfg->callback = 2; cfg->cbhup = (lp->flags & ISDN_NET_CBHUP) ? 1 : 0; cfg->dialmode = lp->flags & ISDN_NET_DIALMODE_MASK; cfg->chargehup = (lp->hupflags & 4) ? 1 : 0; cfg->ihup = (lp->hupflags & 8) ? 1 : 0; cfg->cbdelay = lp->cbdelay; cfg->dialmax = lp->dialmax; cfg->triggercps = lp->triggercps; cfg->slavedelay = lp->slavedelay / HZ; cfg->chargeint = (lp->hupflags & ISDN_CHARGEHUP) ? (lp->chargeint / HZ) : 0; cfg->pppbind = lp->pppbind; cfg->dialtimeout = lp->dialtimeout >= 0 ? lp->dialtimeout / HZ : -1; cfg->dialwait = lp->dialwait / HZ; if (lp->slave) { if (strlen(lp->slave->name) > 8) strcpy(cfg->slave, "too-long"); else strcpy(cfg->slave, lp->slave->name); } else cfg->slave[0] = '\0'; if (lp->master) { if (strlen(lp->master->name) > 8) strcpy(cfg->master, "too-long"); strcpy(cfg->master, lp->master->name); } else cfg->master[0] = '\0'; return 0; } return -ENODEV; } /* * Add a phone-number to an interface. */ int isdn_net_addphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); isdn_net_phone *n; if (p) { if (!(n = kmalloc(sizeof(isdn_net_phone), GFP_KERNEL))) return -ENOMEM; strlcpy(n->num, phone->phone, sizeof(n->num)); n->next = p->local->phone[phone->outgoing & 1]; p->local->phone[phone->outgoing & 1] = n; return 0; } return -ENODEV; } /* * Copy a string of all phone-numbers of an interface to user space. * This might sleep and must be called with the isdn semaphore down. */ int isdn_net_getphones(isdn_net_ioctl_phone * phone, char __user *phones) { isdn_net_dev *p = isdn_net_findif(phone->name); int inout = phone->outgoing & 1; int more = 0; int count = 0; isdn_net_phone *n; if (!p) return -ENODEV; inout &= 1; for (n = p->local->phone[inout]; n; n = n->next) { if (more) { put_user(' ', phones++); count++; } if (copy_to_user(phones, n->num, strlen(n->num) + 1)) { return -EFAULT; } phones += strlen(n->num); count += strlen(n->num); more = 1; } put_user(0, phones); count++; return count; } /* * Copy a string containing the peer's phone number of a connected interface * to user space. */ int isdn_net_getpeer(isdn_net_ioctl_phone *phone, isdn_net_ioctl_phone __user *peer) { isdn_net_dev *p = isdn_net_findif(phone->name); int ch, dv, idx; if (!p) return -ENODEV; /* * Theoretical race: while this executes, the remote number might * become invalid (hang up) or change (new connection), resulting * in (partially) wrong number copied to user. This race * currently ignored. */ ch = p->local->isdn_channel; dv = p->local->isdn_device; if(ch < 0 && dv < 0) return -ENOTCONN; idx = isdn_dc2minor(dv, ch); if (idx <0 ) return -ENODEV; /* for pre-bound channels, we need this extra check */ if (strncmp(dev->num[idx], "???", 3) == 0) return -ENOTCONN; strncpy(phone->phone, dev->num[idx], ISDN_MSNLEN); phone->outgoing = USG_OUTGOING(dev->usage[idx]); if (copy_to_user(peer, phone, sizeof(*peer))) return -EFAULT; return 0; } /* * Delete a phone-number from an interface. */ int isdn_net_delphone(isdn_net_ioctl_phone * phone) { isdn_net_dev *p = isdn_net_findif(phone->name); int inout = phone->outgoing & 1; isdn_net_phone *n; isdn_net_phone *m; if (p) { n = p->local->phone[inout]; m = NULL; while (n) { if (!strcmp(n->num, phone->phone)) { if (p->local->dial == n) p->local->dial = n->next; if (m) m->next = n->next; else p->local->phone[inout] = n->next; kfree(n); return 0; } m = n; n = (isdn_net_phone *) n->next; } return -EINVAL; } return -ENODEV; } /* * Delete all phone-numbers of an interface. */ static int isdn_net_rmallphone(isdn_net_dev * p) { isdn_net_phone *n; isdn_net_phone *m; int i; for (i = 0; i < 2; i++) { n = p->local->phone[i]; while (n) { m = n->next; kfree(n); n = m; } p->local->phone[i] = NULL; } p->local->dial = NULL; return 0; } /* * Force a hangup of a network-interface. */ int isdn_net_force_hangup(char *name) { isdn_net_dev *p = isdn_net_findif(name); struct net_device *q; if (p) { if (p->local->isdn_device < 0) return 1; q = p->local->slave; /* If this interface has slaves, do a hangup for them also. */ while (q) { isdn_net_hangup(q); q = MASTER_TO_SLAVE(q); } isdn_net_hangup(p->dev); return 0; } return -ENODEV; } /* * Helper-function for isdn_net_rm: Do the real work. */ static int isdn_net_realrm(isdn_net_dev * p, isdn_net_dev * q) { u_long flags; if (isdn_net_device_started(p)) { return -EBUSY; } #ifdef CONFIG_ISDN_X25 if( p -> cprot && p -> cprot -> pops ) p -> cprot -> pops -> proto_del ( p -> cprot ); #endif /* Free all phone-entries */ isdn_net_rmallphone(p); /* If interface is bound exclusive, free channel-usage */ if (p->local->exclusive != -1) isdn_unexclusive_channel(p->local->pre_device, p->local->pre_channel); if (p->local->master) { /* It's a slave-device, so update master's slave-pointer if necessary */ if (((isdn_net_local *) ISDN_MASTER_PRIV(p->local))->slave == p->dev) ((isdn_net_local *)ISDN_MASTER_PRIV(p->local))->slave = p->local->slave; } else { /* Unregister only if it's a master-device */ unregister_netdev(p->dev); } /* Unlink device from chain */ spin_lock_irqsave(&dev->lock, flags); if (q) q->next = p->next; else dev->netdev = p->next; if (p->local->slave) { /* If this interface has a slave, remove it also */ char *slavename = p->local->slave->name; isdn_net_dev *n = dev->netdev; q = NULL; while (n) { if (!strcmp(n->dev->name, slavename)) { spin_unlock_irqrestore(&dev->lock, flags); isdn_net_realrm(n, q); spin_lock_irqsave(&dev->lock, flags); break; } q = n; n = (isdn_net_dev *)n->next; } } spin_unlock_irqrestore(&dev->lock, flags); /* If no more net-devices remain, disable auto-hangup timer */ if (dev->netdev == NULL) isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0); free_netdev(p->dev); kfree(p); return 0; } /* * Remove a single network-interface. */ int isdn_net_rm(char *name) { u_long flags; isdn_net_dev *p; isdn_net_dev *q; /* Search name in netdev-chain */ spin_lock_irqsave(&dev->lock, flags); p = dev->netdev; q = NULL; while (p) { if (!strcmp(p->dev->name, name)) { spin_unlock_irqrestore(&dev->lock, flags); return (isdn_net_realrm(p, q)); } q = p; p = (isdn_net_dev *) p->next; } spin_unlock_irqrestore(&dev->lock, flags); /* If no more net-devices remain, disable auto-hangup timer */ if (dev->netdev == NULL) isdn_timer_ctrl(ISDN_TIMER_NETHANGUP, 0); return -ENODEV; } /* * Remove all network-interfaces */ int isdn_net_rmall(void) { u_long flags; int ret; /* Walk through netdev-chain */ spin_lock_irqsave(&dev->lock, flags); while (dev->netdev) { if (!dev->netdev->local->master) { /* Remove master-devices only, slaves get removed with their master */ spin_unlock_irqrestore(&dev->lock, flags); if ((ret = isdn_net_realrm(dev->netdev, NULL))) { return ret; } spin_lock_irqsave(&dev->lock, flags); } } dev->netdev = NULL; spin_unlock_irqrestore(&dev->lock, flags); return 0; }
gpl-2.0
project-voodoo/linux_samsung
drivers/xen/xenbus/xenbus_probe_frontend.c
1569
8903
#define DPRINTK(fmt, args...) \ pr_debug("xenbus_probe (%s:%d) " fmt ".\n", \ __func__, __LINE__, ##args) #include <linux/kernel.h> #include <linux/err.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/proc_fs.h> #include <linux/notifier.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/io.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/xen/hypervisor.h> #include <xen/xenbus.h> #include <xen/events.h> #include <xen/page.h> #include <xen/platform_pci.h> #include "xenbus_comms.h" #include "xenbus_probe.h" /* device/<type>/<id> => <type>-<id> */ static int frontend_bus_id(char bus_id[XEN_BUS_ID_SIZE], const char *nodename) { nodename = strchr(nodename, '/'); if (!nodename || strlen(nodename + 1) >= XEN_BUS_ID_SIZE) { printk(KERN_WARNING "XENBUS: bad frontend %s\n", nodename); return -EINVAL; } strlcpy(bus_id, nodename + 1, XEN_BUS_ID_SIZE); if (!strchr(bus_id, '/')) { printk(KERN_WARNING "XENBUS: bus_id %s no slash\n", bus_id); return -EINVAL; } *strchr(bus_id, '/') = '-'; return 0; } /* device/<typename>/<name> */ static int xenbus_probe_frontend(struct xen_bus_type *bus, const char *type, const char *name) { char *nodename; int err; nodename = kasprintf(GFP_KERNEL, "%s/%s/%s", bus->root, type, name); if (!nodename) return -ENOMEM; DPRINTK("%s", nodename); err = xenbus_probe_node(bus, type, nodename); kfree(nodename); return err; } static int xenbus_uevent_frontend(struct device *_dev, struct kobj_uevent_env *env) { struct xenbus_device *dev = to_xenbus_device(_dev); if (add_uevent_var(env, "MODALIAS=xen:%s", dev->devicetype)) return -ENOMEM; return 0; } static void backend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { xenbus_otherend_changed(watch, vec, len, 1); } static struct device_attribute xenbus_frontend_dev_attrs[] = { __ATTR_NULL }; static const struct dev_pm_ops xenbus_pm_ops = { .suspend = xenbus_dev_suspend, .resume = xenbus_dev_resume, .freeze = xenbus_dev_suspend, .thaw = xenbus_dev_cancel, .restore = xenbus_dev_resume, }; static struct xen_bus_type xenbus_frontend = { .root = "device", .levels = 2, /* device/type/<id> */ .get_bus_id = frontend_bus_id, .probe = xenbus_probe_frontend, .otherend_changed = backend_changed, .bus = { .name = "xen", .match = xenbus_match, .uevent = xenbus_uevent_frontend, .probe = xenbus_dev_probe, .remove = xenbus_dev_remove, .shutdown = xenbus_dev_shutdown, .dev_attrs = xenbus_frontend_dev_attrs, .pm = &xenbus_pm_ops, }, }; static void frontend_changed(struct xenbus_watch *watch, const char **vec, unsigned int len) { DPRINTK(""); xenbus_dev_changed(vec[XS_WATCH_PATH], &xenbus_frontend); } /* We watch for devices appearing and vanishing. */ static struct xenbus_watch fe_watch = { .node = "device", .callback = frontend_changed, }; static int read_backend_details(struct xenbus_device *xendev) { return xenbus_read_otherend_details(xendev, "backend-id", "backend"); } static int is_device_connecting(struct device *dev, void *data, bool ignore_nonessential) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; struct xenbus_driver *xendrv; /* * A device with no driver will never connect. We care only about * devices which should currently be in the process of connecting. */ if (!dev->driver) return 0; /* Is this search limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (ignore_nonessential) { /* With older QEMU, for PVonHVM guests the guest config files * could contain: vfb = [ 'vnc=1, vnclisten=0.0.0.0'] * which is nonsensical as there is no PV FB (there can be * a PVKB) running as HVM guest. */ if ((strncmp(xendev->nodename, "device/vkbd", 11) == 0)) return 0; if ((strncmp(xendev->nodename, "device/vfb", 10) == 0)) return 0; } xendrv = to_xenbus_driver(dev->driver); return (xendev->state < XenbusStateConnected || (xendev->state == XenbusStateConnected && xendrv->is_ready && !xendrv->is_ready(xendev))); } static int essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, true /* ignore PV[KBB+FB] */); } static int non_essential_device_connecting(struct device *dev, void *data) { return is_device_connecting(dev, data, false); } static int exists_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, essential_device_connecting); } static int exists_non_essential_connecting_device(struct device_driver *drv) { return bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, non_essential_device_connecting); } static int print_device_status(struct device *dev, void *data) { struct xenbus_device *xendev = to_xenbus_device(dev); struct device_driver *drv = data; /* Is this operation limited to a particular driver? */ if (drv && (dev->driver != drv)) return 0; if (!dev->driver) { /* Information only: is this too noisy? */ printk(KERN_INFO "XENBUS: Device with no driver: %s\n", xendev->nodename); } else if (xendev->state < XenbusStateConnected) { enum xenbus_state rstate = XenbusStateUnknown; if (xendev->otherend) rstate = xenbus_read_driver_state(xendev->otherend); printk(KERN_WARNING "XENBUS: Timeout connecting " "to device: %s (local state %d, remote state %d)\n", xendev->nodename, xendev->state, rstate); } return 0; } /* We only wait for device setup after most initcalls have run. */ static int ready_to_wait_for_devices; static bool wait_loop(unsigned long start, unsigned int max_delay, unsigned int *seconds_waited) { if (time_after(jiffies, start + (*seconds_waited+5)*HZ)) { if (!*seconds_waited) printk(KERN_WARNING "XENBUS: Waiting for " "devices to initialise: "); *seconds_waited += 5; printk("%us...", max_delay - *seconds_waited); if (*seconds_waited == max_delay) return true; } schedule_timeout_interruptible(HZ/10); return false; } /* * On a 5-minute timeout, wait for all devices currently configured. We need * to do this to guarantee that the filesystems and / or network devices * needed for boot are available, before we can allow the boot to proceed. * * This needs to be on a late_initcall, to happen after the frontend device * drivers have been initialised, but before the root fs is mounted. * * A possible improvement here would be to have the tools add a per-device * flag to the store entry, indicating whether it is needed at boot time. * This would allow people who knew what they were doing to accelerate their * boot slightly, but of course needs tools or manual intervention to set up * those flags correctly. */ static void wait_for_devices(struct xenbus_driver *xendrv) { unsigned long start = jiffies; struct device_driver *drv = xendrv ? &xendrv->driver : NULL; unsigned int seconds_waited = 0; if (!ready_to_wait_for_devices || !xen_domain()) return; while (exists_non_essential_connecting_device(drv)) if (wait_loop(start, 30, &seconds_waited)) break; /* Skips PVKB and PVFB check.*/ while (exists_essential_connecting_device(drv)) if (wait_loop(start, 270, &seconds_waited)) break; if (seconds_waited) printk("\n"); bus_for_each_dev(&xenbus_frontend.bus, NULL, drv, print_device_status); } int __xenbus_register_frontend(struct xenbus_driver *drv, struct module *owner, const char *mod_name) { int ret; drv->read_otherend_details = read_backend_details; ret = xenbus_register_driver_common(drv, &xenbus_frontend, owner, mod_name); if (ret) return ret; /* If this driver is loaded as a module wait for devices to attach. */ wait_for_devices(drv); return 0; } EXPORT_SYMBOL_GPL(__xenbus_register_frontend); static int frontend_probe_and_watch(struct notifier_block *notifier, unsigned long event, void *data) { /* Enumerate devices in xenstore and watch for changes. */ xenbus_probe_devices(&xenbus_frontend); register_xenbus_watch(&fe_watch); return NOTIFY_DONE; } static int __init xenbus_probe_frontend_init(void) { static struct notifier_block xenstore_notifier = { .notifier_call = frontend_probe_and_watch }; int err; DPRINTK(""); /* Register ourselves with the kernel bus subsystem */ err = bus_register(&xenbus_frontend.bus); if (err) return err; register_xenstore_notifier(&xenstore_notifier); return 0; } subsys_initcall(xenbus_probe_frontend_init); #ifndef MODULE static int __init boot_wait_for_devices(void) { if (xen_hvm_domain() && !xen_platform_pci_unplug) return -ENODEV; ready_to_wait_for_devices = 1; wait_for_devices(NULL); return 0; } late_initcall(boot_wait_for_devices); #endif MODULE_LICENSE("GPL");
gpl-2.0
i2t/rmptcp
arch/arm/mach-u300/core.c
1825
11702
/* * * arch/arm/mach-u300/core.c * * * Copyright (C) 2007-2012 ST-Ericsson SA * License terms: GNU General Public License (GPL) version 2 * Core platform support, IRQ handling and device definitions. * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/kernel.h> #include <linux/pinctrl/machine.h> #include <linux/pinctrl/pinconf-generic.h> #include <linux/platform_data/clk-u300.h> #include <linux/irqchip.h> #include <linux/of_address.h> #include <linux/of_platform.h> #include <linux/clocksource.h> #include <linux/clk.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> /* * These are the large blocks of memory allocated for I/O. * the defines are used for setting up the I/O memory mapping. */ /* NAND Flash CS0 */ #define U300_NAND_CS0_PHYS_BASE 0x80000000 /* NFIF */ #define U300_NAND_IF_PHYS_BASE 0x9f800000 /* ALE, CLE offset for FSMC NAND */ #define PLAT_NAND_CLE (1 << 16) #define PLAT_NAND_ALE (1 << 17) /* AHB Peripherals */ #define U300_AHB_PER_PHYS_BASE 0xa0000000 #define U300_AHB_PER_VIRT_BASE 0xff010000 /* FAST Peripherals */ #define U300_FAST_PER_PHYS_BASE 0xc0000000 #define U300_FAST_PER_VIRT_BASE 0xff020000 /* SLOW Peripherals */ #define U300_SLOW_PER_PHYS_BASE 0xc0010000 #define U300_SLOW_PER_VIRT_BASE 0xff000000 /* Boot ROM */ #define U300_BOOTROM_PHYS_BASE 0xffff0000 #define U300_BOOTROM_VIRT_BASE 0xffff0000 /* SEMI config base */ #define U300_SEMI_CONFIG_BASE 0x2FFE0000 /* * AHB peripherals */ /* AHB Peripherals Bridge Controller */ #define U300_AHB_BRIDGE_BASE (U300_AHB_PER_PHYS_BASE+0x0000) /* Vectored Interrupt Controller 0, servicing 32 interrupts */ #define U300_INTCON0_BASE (U300_AHB_PER_PHYS_BASE+0x1000) #define U300_INTCON0_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x1000) /* Vectored Interrupt Controller 1, servicing 32 interrupts */ #define U300_INTCON1_BASE (U300_AHB_PER_PHYS_BASE+0x2000) #define U300_INTCON1_VBASE IOMEM(U300_AHB_PER_VIRT_BASE+0x2000) /* Memory Stick Pro (MSPRO) controller */ #define U300_MSPRO_BASE (U300_AHB_PER_PHYS_BASE+0x3000) /* EMIF Configuration Area */ #define U300_EMIF_CFG_BASE (U300_AHB_PER_PHYS_BASE+0x4000) /* * FAST peripherals */ /* FAST bridge control */ #define U300_FAST_BRIDGE_BASE (U300_FAST_PER_PHYS_BASE+0x0000) /* MMC/SD controller */ #define U300_MMCSD_BASE (U300_FAST_PER_PHYS_BASE+0x1000) /* PCM I2S0 controller */ #define U300_PCM_I2S0_BASE (U300_FAST_PER_PHYS_BASE+0x2000) /* PCM I2S1 controller */ #define U300_PCM_I2S1_BASE (U300_FAST_PER_PHYS_BASE+0x3000) /* I2C0 controller */ #define U300_I2C0_BASE (U300_FAST_PER_PHYS_BASE+0x4000) /* I2C1 controller */ #define U300_I2C1_BASE (U300_FAST_PER_PHYS_BASE+0x5000) /* SPI controller */ #define U300_SPI_BASE (U300_FAST_PER_PHYS_BASE+0x6000) /* Fast UART1 on U335 only */ #define U300_UART1_BASE (U300_FAST_PER_PHYS_BASE+0x7000) /* * SLOW peripherals */ /* SLOW bridge control */ #define U300_SLOW_BRIDGE_BASE (U300_SLOW_PER_PHYS_BASE) /* SYSCON */ #define U300_SYSCON_BASE (U300_SLOW_PER_PHYS_BASE+0x1000) #define U300_SYSCON_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x1000) /* Watchdog */ #define U300_WDOG_BASE (U300_SLOW_PER_PHYS_BASE+0x2000) /* UART0 */ #define U300_UART0_BASE (U300_SLOW_PER_PHYS_BASE+0x3000) /* APP side special timer */ #define U300_TIMER_APP_BASE (U300_SLOW_PER_PHYS_BASE+0x4000) #define U300_TIMER_APP_VBASE IOMEM(U300_SLOW_PER_VIRT_BASE+0x4000) /* Keypad */ #define U300_KEYPAD_BASE (U300_SLOW_PER_PHYS_BASE+0x5000) /* GPIO */ #define U300_GPIO_BASE (U300_SLOW_PER_PHYS_BASE+0x6000) /* RTC */ #define U300_RTC_BASE (U300_SLOW_PER_PHYS_BASE+0x7000) /* Bus tracer */ #define U300_BUSTR_BASE (U300_SLOW_PER_PHYS_BASE+0x8000) /* Event handler (hardware queue) */ #define U300_EVHIST_BASE (U300_SLOW_PER_PHYS_BASE+0x9000) /* Genric Timer */ #define U300_TIMER_BASE (U300_SLOW_PER_PHYS_BASE+0xa000) /* PPM */ #define U300_PPM_BASE (U300_SLOW_PER_PHYS_BASE+0xb000) /* * REST peripherals */ /* ISP (image signal processor) */ #define U300_ISP_BASE (0xA0008000) /* DMA Controller base */ #define U300_DMAC_BASE (0xC0020000) /* MSL Base */ #define U300_MSL_BASE (0xc0022000) /* APEX Base */ #define U300_APEX_BASE (0xc0030000) /* Video Encoder Base */ #define U300_VIDEOENC_BASE (0xc0080000) /* XGAM Base */ #define U300_XGAM_BASE (0xd0000000) /* * SYSCON addresses applicable to the core machine. */ /* Chip ID register 16bit (R/-) */ #define U300_SYSCON_CIDR (0x400) /* SMCR */ #define U300_SYSCON_SMCR (0x4d0) #define U300_SYSCON_SMCR_FIELD_MASK (0x000e) #define U300_SYSCON_SMCR_SEMI_SREFACK_IND (0x0008) #define U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE (0x0004) #define U300_SYSCON_SMCR_SEMI_EXT_BOOT_MODE_ENABLE (0x0002) /* CPU_SW_DBGEN Software Debug Enable 16bit (R/W) */ #define U300_SYSCON_CSDR (0x4f0) #define U300_SYSCON_CSDR_SW_DEBUG_ENABLE (0x0001) /* PRINT_CONTROL Print Control 16bit (R/-) */ #define U300_SYSCON_PCR (0x4f8) #define U300_SYSCON_PCR_SERV_IND (0x0001) /* BOOT_CONTROL 16bit (R/-) */ #define U300_SYSCON_BCR (0x4fc) #define U300_SYSCON_BCR_ACC_CPU_SUBSYS_VINITHI_IND (0x0400) #define U300_SYSCON_BCR_APP_CPU_SUBSYS_VINITHI_IND (0x0200) #define U300_SYSCON_BCR_EXTRA_BOOT_OPTION_MASK (0x01FC) #define U300_SYSCON_BCR_APP_BOOT_SERV_MASK (0x0003) static void __iomem *syscon_base; /* * Static I/O mappings that are needed for booting the U300 platforms. The * only things we need are the areas where we find the timer, syscon and * intcon, since the remaining device drivers will map their own memory * physical to virtual as the need arise. */ static struct map_desc u300_io_desc[] __initdata = { { .virtual = U300_SLOW_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_SLOW_PER_PHYS_BASE), .length = SZ_64K, .type = MT_DEVICE, }, { .virtual = U300_AHB_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_AHB_PER_PHYS_BASE), .length = SZ_32K, .type = MT_DEVICE, }, { .virtual = U300_FAST_PER_VIRT_BASE, .pfn = __phys_to_pfn(U300_FAST_PER_PHYS_BASE), .length = SZ_32K, .type = MT_DEVICE, }, }; static void __init u300_map_io(void) { iotable_init(u300_io_desc, ARRAY_SIZE(u300_io_desc)); } static unsigned long pin_pullup_conf[] = { PIN_CONF_PACKED(PIN_CONFIG_BIAS_PULL_UP, 1), }; static unsigned long pin_highz_conf[] = { PIN_CONF_PACKED(PIN_CONFIG_BIAS_HIGH_IMPEDANCE, 0), }; /* Pin control settings */ static struct pinctrl_map __initdata u300_pinmux_map[] = { /* anonymous maps for chip power and EMIFs */ PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "power"), PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif0"), PIN_MAP_MUX_GROUP_HOG_DEFAULT("pinctrl-u300", NULL, "emif1"), /* per-device maps for MMC/SD, SPI and UART */ PIN_MAP_MUX_GROUP_DEFAULT("mmci", "pinctrl-u300", NULL, "mmc0"), PIN_MAP_MUX_GROUP_DEFAULT("pl022", "pinctrl-u300", NULL, "spi0"), PIN_MAP_MUX_GROUP_DEFAULT("uart0", "pinctrl-u300", NULL, "uart0"), /* This pin is used for clock return rather than GPIO */ PIN_MAP_CONFIGS_PIN_DEFAULT("mmci", "pinctrl-u300", "PIO APP GPIO 11", pin_pullup_conf), /* This pin is used for card detect */ PIN_MAP_CONFIGS_PIN_DEFAULT("mmci", "pinctrl-u300", "PIO MS INS", pin_highz_conf), }; struct db_chip { u16 chipid; const char *name; }; /* * This is a list of the Digital Baseband chips used in the U300 platform. */ static struct db_chip db_chips[] __initdata = { { .chipid = 0xb800, .name = "DB3000", }, { .chipid = 0xc000, .name = "DB3100", }, { .chipid = 0xc800, .name = "DB3150", }, { .chipid = 0xd800, .name = "DB3200", }, { .chipid = 0xe000, .name = "DB3250", }, { .chipid = 0xe800, .name = "DB3210", }, { .chipid = 0xf000, .name = "DB3350 P1x", }, { .chipid = 0xf100, .name = "DB3350 P2x", }, { .chipid = 0x0000, /* List terminator */ .name = NULL, } }; static void __init u300_init_check_chip(void) { u16 val; struct db_chip *chip; const char *chipname; const char unknown[] = "UNKNOWN"; /* Read out and print chip ID */ val = readw(syscon_base + U300_SYSCON_CIDR); /* This is in funky bigendian order... */ val = (val & 0xFFU) << 8 | (val >> 8); chip = db_chips; chipname = unknown; for ( ; chip->chipid; chip++) { if (chip->chipid == (val & 0xFF00U)) { chipname = chip->name; break; } } printk(KERN_INFO "Initializing U300 system on %s baseband chip " \ "(chip ID 0x%04x)\n", chipname, val); if ((val & 0xFF00U) != 0xf000 && (val & 0xFF00U) != 0xf100) { printk(KERN_ERR "Platform configured for BS335 " \ " with DB3350 but %s detected, expect problems!", chipname); } } /* Forward declare this function from the watchdog */ void coh901327_watchdog_reset(void); static void u300_restart(enum reboot_mode mode, const char *cmd) { switch (mode) { case REBOOT_SOFT: case REBOOT_HARD: #ifdef CONFIG_COH901327_WATCHDOG coh901327_watchdog_reset(); #endif break; default: /* Do nothing */ break; } /* Wait for system do die/reset. */ while (1); } /* These are mostly to get the right device names for the clock lookups */ static struct of_dev_auxdata u300_auxdata_lookup[] __initdata = { OF_DEV_AUXDATA("stericsson,pinctrl-u300", U300_SYSCON_BASE, "pinctrl-u300", NULL), OF_DEV_AUXDATA("stericsson,gpio-coh901", U300_GPIO_BASE, "u300-gpio", NULL), OF_DEV_AUXDATA("stericsson,coh901327", U300_WDOG_BASE, "coh901327_wdog", NULL), OF_DEV_AUXDATA("stericsson,coh901331", U300_RTC_BASE, "rtc-coh901331", NULL), OF_DEV_AUXDATA("stericsson,coh901318", U300_DMAC_BASE, "coh901318", NULL), OF_DEV_AUXDATA("stericsson,fsmc-nand", U300_NAND_IF_PHYS_BASE, "fsmc-nand", NULL), OF_DEV_AUXDATA("arm,primecell", U300_UART0_BASE, "uart0", NULL), OF_DEV_AUXDATA("arm,primecell", U300_UART1_BASE, "uart1", NULL), OF_DEV_AUXDATA("arm,primecell", U300_SPI_BASE, "pl022", NULL), OF_DEV_AUXDATA("st,ddci2c", U300_I2C0_BASE, "stu300.0", NULL), OF_DEV_AUXDATA("st,ddci2c", U300_I2C1_BASE, "stu300.1", NULL), OF_DEV_AUXDATA("arm,primecell", U300_MMCSD_BASE, "mmci", NULL), { /* sentinel */ }, }; static void __init u300_init_irq_dt(void) { struct device_node *syscon; struct clk *clk; syscon = of_find_node_by_path("/syscon@c0011000"); if (!syscon) { pr_crit("could not find syscon node\n"); return; } syscon_base = of_iomap(syscon, 0); if (!syscon_base) { pr_crit("could not remap syscon\n"); return; } /* initialize clocking early, we want to clock the INTCON */ u300_clk_init(syscon_base); /* Bootstrap EMIF and SEMI clocks */ clk = clk_get_sys("pl172", NULL); BUG_ON(IS_ERR(clk)); clk_prepare_enable(clk); clk = clk_get_sys("semi", NULL); BUG_ON(IS_ERR(clk)); clk_prepare_enable(clk); /* Clock the interrupt controller */ clk = clk_get_sys("intcon", NULL); BUG_ON(IS_ERR(clk)); clk_prepare_enable(clk); irqchip_init(); } static void __init u300_init_machine_dt(void) { u16 val; /* Check what platform we run and print some status information */ u300_init_check_chip(); /* Initialize pinmuxing */ pinctrl_register_mappings(u300_pinmux_map, ARRAY_SIZE(u300_pinmux_map)); of_platform_populate(NULL, of_default_bus_match_table, u300_auxdata_lookup, NULL); /* Enable SEMI self refresh */ val = readw(syscon_base + U300_SYSCON_SMCR) | U300_SYSCON_SMCR_SEMI_SREFREQ_ENABLE; writew(val, syscon_base + U300_SYSCON_SMCR); } static const char * u300_board_compat[] = { "stericsson,u300", NULL, }; DT_MACHINE_START(U300_DT, "U300 S335/B335 (Device Tree)") .map_io = u300_map_io, .init_irq = u300_init_irq_dt, .init_time = clocksource_of_init, .init_machine = u300_init_machine_dt, .restart = u300_restart, .dt_compat = u300_board_compat, MACHINE_END
gpl-2.0
Chibaibuki/TCP-IP-Timer-For-Linux-Kernel
drivers/block/virtio_blk.c
2081
24838
//#define DEBUG #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/hdreg.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/virtio.h> #include <linux/virtio_blk.h> #include <linux/scatterlist.h> #include <linux/string_helpers.h> #include <scsi/scsi_cmnd.h> #include <linux/idr.h> #define PART_BITS 4 static bool use_bio; module_param(use_bio, bool, S_IRUGO); static int major; static DEFINE_IDA(vd_index_ida); struct workqueue_struct *virtblk_wq; struct virtio_blk { struct virtio_device *vdev; struct virtqueue *vq; wait_queue_head_t queue_wait; /* The disk structure for the kernel. */ struct gendisk *disk; mempool_t *pool; /* Process context for config space updates */ struct work_struct config_work; /* Lock for config space updates */ struct mutex config_lock; /* enable config space updates */ bool config_enable; /* What host tells us, plus 2 for header & tailer. */ unsigned int sg_elems; /* Ida index - used to track minor number allocations. */ int index; /* Scatterlist: can be too big for stack. */ struct scatterlist sg[/*sg_elems*/]; }; struct virtblk_req { struct request *req; struct bio *bio; struct virtio_blk_outhdr out_hdr; struct virtio_scsi_inhdr in_hdr; struct work_struct work; struct virtio_blk *vblk; int flags; u8 status; struct scatterlist sg[]; }; enum { VBLK_IS_FLUSH = 1, VBLK_REQ_FLUSH = 2, VBLK_REQ_DATA = 4, VBLK_REQ_FUA = 8, }; static inline int virtblk_result(struct virtblk_req *vbr) { switch (vbr->status) { case VIRTIO_BLK_S_OK: return 0; case VIRTIO_BLK_S_UNSUPP: return -ENOTTY; default: return -EIO; } } static inline struct virtblk_req *virtblk_alloc_req(struct virtio_blk *vblk, gfp_t gfp_mask) { struct virtblk_req *vbr; vbr = mempool_alloc(vblk->pool, gfp_mask); if (!vbr) return NULL; vbr->vblk = vblk; if (use_bio) sg_init_table(vbr->sg, vblk->sg_elems); return vbr; } static int __virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr, struct scatterlist *data_sg, bool have_data) { struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6]; unsigned int num_out = 0, num_in = 0; int type = vbr->out_hdr.type & ~VIRTIO_BLK_T_OUT; sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr)); sgs[num_out++] = &hdr; /* * If this is a packet command we need a couple of additional headers. * Behind the normal outhdr we put a segment with the scsi command * block, and before the normal inhdr we put the sense data and the * inhdr with additional status information. */ if (type == VIRTIO_BLK_T_SCSI_CMD) { sg_init_one(&cmd, vbr->req->cmd, vbr->req->cmd_len); sgs[num_out++] = &cmd; } if (have_data) { if (vbr->out_hdr.type & VIRTIO_BLK_T_OUT) sgs[num_out++] = data_sg; else sgs[num_out + num_in++] = data_sg; } if (type == VIRTIO_BLK_T_SCSI_CMD) { sg_init_one(&sense, vbr->req->sense, SCSI_SENSE_BUFFERSIZE); sgs[num_out + num_in++] = &sense; sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr)); sgs[num_out + num_in++] = &inhdr; } sg_init_one(&status, &vbr->status, sizeof(vbr->status)); sgs[num_out + num_in++] = &status; return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC); } static void virtblk_add_req(struct virtblk_req *vbr, bool have_data) { struct virtio_blk *vblk = vbr->vblk; DEFINE_WAIT(wait); int ret; spin_lock_irq(vblk->disk->queue->queue_lock); while (unlikely((ret = __virtblk_add_req(vblk->vq, vbr, vbr->sg, have_data)) < 0)) { prepare_to_wait_exclusive(&vblk->queue_wait, &wait, TASK_UNINTERRUPTIBLE); spin_unlock_irq(vblk->disk->queue->queue_lock); io_schedule(); spin_lock_irq(vblk->disk->queue->queue_lock); finish_wait(&vblk->queue_wait, &wait); } virtqueue_kick(vblk->vq); spin_unlock_irq(vblk->disk->queue->queue_lock); } static void virtblk_bio_send_flush(struct virtblk_req *vbr) { vbr->flags |= VBLK_IS_FLUSH; vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = 0; virtblk_add_req(vbr, false); } static void virtblk_bio_send_data(struct virtblk_req *vbr) { struct virtio_blk *vblk = vbr->vblk; struct bio *bio = vbr->bio; bool have_data; vbr->flags &= ~VBLK_IS_FLUSH; vbr->out_hdr.type = 0; vbr->out_hdr.sector = bio->bi_sector; vbr->out_hdr.ioprio = bio_prio(bio); if (blk_bio_map_sg(vblk->disk->queue, bio, vbr->sg)) { have_data = true; if (bio->bi_rw & REQ_WRITE) vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; else vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } else have_data = false; virtblk_add_req(vbr, have_data); } static void virtblk_bio_send_data_work(struct work_struct *work) { struct virtblk_req *vbr; vbr = container_of(work, struct virtblk_req, work); virtblk_bio_send_data(vbr); } static void virtblk_bio_send_flush_work(struct work_struct *work) { struct virtblk_req *vbr; vbr = container_of(work, struct virtblk_req, work); virtblk_bio_send_flush(vbr); } static inline void virtblk_request_done(struct virtblk_req *vbr) { struct virtio_blk *vblk = vbr->vblk; struct request *req = vbr->req; int error = virtblk_result(vbr); if (req->cmd_type == REQ_TYPE_BLOCK_PC) { req->resid_len = vbr->in_hdr.residual; req->sense_len = vbr->in_hdr.sense_len; req->errors = vbr->in_hdr.errors; } else if (req->cmd_type == REQ_TYPE_SPECIAL) { req->errors = (error != 0); } __blk_end_request_all(req, error); mempool_free(vbr, vblk->pool); } static inline void virtblk_bio_flush_done(struct virtblk_req *vbr) { struct virtio_blk *vblk = vbr->vblk; if (vbr->flags & VBLK_REQ_DATA) { /* Send out the actual write data */ INIT_WORK(&vbr->work, virtblk_bio_send_data_work); queue_work(virtblk_wq, &vbr->work); } else { bio_endio(vbr->bio, virtblk_result(vbr)); mempool_free(vbr, vblk->pool); } } static inline void virtblk_bio_data_done(struct virtblk_req *vbr) { struct virtio_blk *vblk = vbr->vblk; if (unlikely(vbr->flags & VBLK_REQ_FUA)) { /* Send out a flush before end the bio */ vbr->flags &= ~VBLK_REQ_DATA; INIT_WORK(&vbr->work, virtblk_bio_send_flush_work); queue_work(virtblk_wq, &vbr->work); } else { bio_endio(vbr->bio, virtblk_result(vbr)); mempool_free(vbr, vblk->pool); } } static inline void virtblk_bio_done(struct virtblk_req *vbr) { if (unlikely(vbr->flags & VBLK_IS_FLUSH)) virtblk_bio_flush_done(vbr); else virtblk_bio_data_done(vbr); } static void virtblk_done(struct virtqueue *vq) { struct virtio_blk *vblk = vq->vdev->priv; bool bio_done = false, req_done = false; struct virtblk_req *vbr; unsigned long flags; unsigned int len; spin_lock_irqsave(vblk->disk->queue->queue_lock, flags); do { virtqueue_disable_cb(vq); while ((vbr = virtqueue_get_buf(vblk->vq, &len)) != NULL) { if (vbr->bio) { virtblk_bio_done(vbr); bio_done = true; } else { virtblk_request_done(vbr); req_done = true; } } } while (!virtqueue_enable_cb(vq)); /* In case queue is stopped waiting for more buffers. */ if (req_done) blk_start_queue(vblk->disk->queue); spin_unlock_irqrestore(vblk->disk->queue->queue_lock, flags); if (bio_done) wake_up(&vblk->queue_wait); } static bool do_req(struct request_queue *q, struct virtio_blk *vblk, struct request *req) { unsigned int num; struct virtblk_req *vbr; vbr = virtblk_alloc_req(vblk, GFP_ATOMIC); if (!vbr) /* When another request finishes we'll try again. */ return false; vbr->req = req; vbr->bio = NULL; if (req->cmd_flags & REQ_FLUSH) { vbr->out_hdr.type = VIRTIO_BLK_T_FLUSH; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); } else { switch (req->cmd_type) { case REQ_TYPE_FS: vbr->out_hdr.type = 0; vbr->out_hdr.sector = blk_rq_pos(vbr->req); vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_BLOCK_PC: vbr->out_hdr.type = VIRTIO_BLK_T_SCSI_CMD; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; case REQ_TYPE_SPECIAL: vbr->out_hdr.type = VIRTIO_BLK_T_GET_ID; vbr->out_hdr.sector = 0; vbr->out_hdr.ioprio = req_get_ioprio(vbr->req); break; default: /* We don't put anything else in the queue. */ BUG(); } } num = blk_rq_map_sg(q, vbr->req, vblk->sg); if (num) { if (rq_data_dir(vbr->req) == WRITE) vbr->out_hdr.type |= VIRTIO_BLK_T_OUT; else vbr->out_hdr.type |= VIRTIO_BLK_T_IN; } if (__virtblk_add_req(vblk->vq, vbr, vblk->sg, num) < 0) { mempool_free(vbr, vblk->pool); return false; } return true; } static void virtblk_request(struct request_queue *q) { struct virtio_blk *vblk = q->queuedata; struct request *req; unsigned int issued = 0; while ((req = blk_peek_request(q)) != NULL) { BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems); /* If this request fails, stop queue and wait for something to finish to restart it. */ if (!do_req(q, vblk, req)) { blk_stop_queue(q); break; } blk_start_request(req); issued++; } if (issued) virtqueue_kick(vblk->vq); } static void virtblk_make_request(struct request_queue *q, struct bio *bio) { struct virtio_blk *vblk = q->queuedata; struct virtblk_req *vbr; BUG_ON(bio->bi_phys_segments + 2 > vblk->sg_elems); vbr = virtblk_alloc_req(vblk, GFP_NOIO); if (!vbr) { bio_endio(bio, -ENOMEM); return; } vbr->bio = bio; vbr->flags = 0; if (bio->bi_rw & REQ_FLUSH) vbr->flags |= VBLK_REQ_FLUSH; if (bio->bi_rw & REQ_FUA) vbr->flags |= VBLK_REQ_FUA; if (bio->bi_size) vbr->flags |= VBLK_REQ_DATA; if (unlikely(vbr->flags & VBLK_REQ_FLUSH)) virtblk_bio_send_flush(vbr); else virtblk_bio_send_data(vbr); } /* return id (s/n) string for *disk to *id_str */ static int virtblk_get_id(struct gendisk *disk, char *id_str) { struct virtio_blk *vblk = disk->private_data; struct request *req; struct bio *bio; int err; bio = bio_map_kern(vblk->disk->queue, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL); if (IS_ERR(bio)) return PTR_ERR(bio); req = blk_make_request(vblk->disk->queue, bio, GFP_KERNEL); if (IS_ERR(req)) { bio_put(bio); return PTR_ERR(req); } req->cmd_type = REQ_TYPE_SPECIAL; err = blk_execute_rq(vblk->disk->queue, vblk->disk, req, false); blk_put_request(req); return err; } static int virtblk_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long data) { struct gendisk *disk = bdev->bd_disk; struct virtio_blk *vblk = disk->private_data; /* * Only allow the generic SCSI ioctls if the host can support it. */ if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI)) return -ENOTTY; return scsi_cmd_blk_ioctl(bdev, mode, cmd, (void __user *)data); } /* We provide getgeo only to please some old bootloader/partitioning tools */ static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo) { struct virtio_blk *vblk = bd->bd_disk->private_data; struct virtio_blk_geometry vgeo; int err; /* see if the host passed in geometry config */ err = virtio_config_val(vblk->vdev, VIRTIO_BLK_F_GEOMETRY, offsetof(struct virtio_blk_config, geometry), &vgeo); if (!err) { geo->heads = vgeo.heads; geo->sectors = vgeo.sectors; geo->cylinders = vgeo.cylinders; } else { /* some standard values, similar to sd */ geo->heads = 1 << 6; geo->sectors = 1 << 5; geo->cylinders = get_capacity(bd->bd_disk) >> 11; } return 0; } static const struct block_device_operations virtblk_fops = { .ioctl = virtblk_ioctl, .owner = THIS_MODULE, .getgeo = virtblk_getgeo, }; static int index_to_minor(int index) { return index << PART_BITS; } static int minor_to_index(int minor) { return minor >> PART_BITS; } static ssize_t virtblk_serial_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); int err; /* sysfs gives us a PAGE_SIZE buffer */ BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES); buf[VIRTIO_BLK_ID_BYTES] = '\0'; err = virtblk_get_id(disk, buf); if (!err) return strlen(buf); if (err == -EIO) /* Unsupported? Make it empty. */ return 0; return err; } DEVICE_ATTR(serial, S_IRUGO, virtblk_serial_show, NULL); static void virtblk_config_changed_work(struct work_struct *work) { struct virtio_blk *vblk = container_of(work, struct virtio_blk, config_work); struct virtio_device *vdev = vblk->vdev; struct request_queue *q = vblk->disk->queue; char cap_str_2[10], cap_str_10[10]; char *envp[] = { "RESIZE=1", NULL }; u64 capacity, size; mutex_lock(&vblk->config_lock); if (!vblk->config_enable) goto done; /* Host must always specify the capacity. */ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), &capacity, sizeof(capacity)); /* If capacity is too big, truncate with warning. */ if ((sector_t)capacity != capacity) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)capacity); capacity = (sector_t)-1; } size = capacity * queue_logical_block_size(q); string_get_size(size, STRING_UNITS_2, cap_str_2, sizeof(cap_str_2)); string_get_size(size, STRING_UNITS_10, cap_str_10, sizeof(cap_str_10)); dev_notice(&vdev->dev, "new size: %llu %d-byte logical blocks (%s/%s)\n", (unsigned long long)capacity, queue_logical_block_size(q), cap_str_10, cap_str_2); set_capacity(vblk->disk, capacity); revalidate_disk(vblk->disk); kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp); done: mutex_unlock(&vblk->config_lock); } static void virtblk_config_changed(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; queue_work(virtblk_wq, &vblk->config_work); } static int init_vq(struct virtio_blk *vblk) { int err = 0; /* We expect one virtqueue, for output. */ vblk->vq = virtio_find_single_vq(vblk->vdev, virtblk_done, "requests"); if (IS_ERR(vblk->vq)) err = PTR_ERR(vblk->vq); return err; } /* * Legacy naming scheme used for virtio devices. We are stuck with it for * virtio blk but don't ever use it for any new driver. */ static int virtblk_name_format(char *prefix, int index, char *buf, int buflen) { const int base = 'z' - 'a' + 1; char *begin = buf + strlen(prefix); char *end = buf + buflen; char *p; int unit; p = end - 1; *p = '\0'; unit = base; do { if (p == begin) return -EINVAL; *--p = 'a' + (index % unit); index = (index / unit) - 1; } while (index >= 0); memmove(begin, p, end - p); memcpy(buf, prefix, strlen(prefix)); return 0; } static int virtblk_get_cache_mode(struct virtio_device *vdev) { u8 writeback; int err; err = virtio_config_val(vdev, VIRTIO_BLK_F_CONFIG_WCE, offsetof(struct virtio_blk_config, wce), &writeback); if (err) writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_WCE); return writeback; } static void virtblk_update_cache_mode(struct virtio_device *vdev) { u8 writeback = virtblk_get_cache_mode(vdev); struct virtio_blk *vblk = vdev->priv; if (writeback) blk_queue_flush(vblk->disk->queue, REQ_FLUSH); else blk_queue_flush(vblk->disk->queue, 0); revalidate_disk(vblk->disk); } static const char *const virtblk_cache_types[] = { "write through", "write back" }; static ssize_t virtblk_cache_type_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; struct virtio_device *vdev = vblk->vdev; int i; u8 writeback; BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE)); for (i = ARRAY_SIZE(virtblk_cache_types); --i >= 0; ) if (sysfs_streq(buf, virtblk_cache_types[i])) break; if (i < 0) return -EINVAL; writeback = i; vdev->config->set(vdev, offsetof(struct virtio_blk_config, wce), &writeback, sizeof(writeback)); virtblk_update_cache_mode(vdev); return count; } static ssize_t virtblk_cache_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); struct virtio_blk *vblk = disk->private_data; u8 writeback = virtblk_get_cache_mode(vblk->vdev); BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types)); return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]); } static const struct device_attribute dev_attr_cache_type_ro = __ATTR(cache_type, S_IRUGO, virtblk_cache_type_show, NULL); static const struct device_attribute dev_attr_cache_type_rw = __ATTR(cache_type, S_IRUGO|S_IWUSR, virtblk_cache_type_show, virtblk_cache_type_store); static int virtblk_probe(struct virtio_device *vdev) { struct virtio_blk *vblk; struct request_queue *q; int err, index; int pool_size; u64 cap; u32 v, blk_size, sg_elems, opt_io_size; u16 min_io_size; u8 physical_block_exp, alignment_offset; err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS), GFP_KERNEL); if (err < 0) goto out; index = err; /* We need to know how many segments before we allocate. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SEG_MAX, offsetof(struct virtio_blk_config, seg_max), &sg_elems); /* We need at least one SG element, whatever they say. */ if (err || !sg_elems) sg_elems = 1; /* We need an extra sg elements at head and tail. */ sg_elems += 2; vdev->priv = vblk = kmalloc(sizeof(*vblk) + sizeof(vblk->sg[0]) * sg_elems, GFP_KERNEL); if (!vblk) { err = -ENOMEM; goto out_free_index; } init_waitqueue_head(&vblk->queue_wait); vblk->vdev = vdev; vblk->sg_elems = sg_elems; sg_init_table(vblk->sg, vblk->sg_elems); mutex_init(&vblk->config_lock); INIT_WORK(&vblk->config_work, virtblk_config_changed_work); vblk->config_enable = true; err = init_vq(vblk); if (err) goto out_free_vblk; pool_size = sizeof(struct virtblk_req); if (use_bio) pool_size += sizeof(struct scatterlist) * sg_elems; vblk->pool = mempool_create_kmalloc_pool(1, pool_size); if (!vblk->pool) { err = -ENOMEM; goto out_free_vq; } /* FIXME: How many partitions? How long is a piece of string? */ vblk->disk = alloc_disk(1 << PART_BITS); if (!vblk->disk) { err = -ENOMEM; goto out_mempool; } q = vblk->disk->queue = blk_init_queue(virtblk_request, NULL); if (!q) { err = -ENOMEM; goto out_put_disk; } if (use_bio) blk_queue_make_request(q, virtblk_make_request); q->queuedata = vblk; virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN); vblk->disk->major = major; vblk->disk->first_minor = index_to_minor(index); vblk->disk->private_data = vblk; vblk->disk->fops = &virtblk_fops; vblk->disk->driverfs_dev = &vdev->dev; vblk->index = index; /* configure queue flush support */ virtblk_update_cache_mode(vdev); /* If disk is read-only in the host, the guest should obey */ if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO)) set_disk_ro(vblk->disk, 1); /* Host must always specify the capacity. */ vdev->config->get(vdev, offsetof(struct virtio_blk_config, capacity), &cap, sizeof(cap)); /* If capacity is too big, truncate with warning. */ if ((sector_t)cap != cap) { dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n", (unsigned long long)cap); cap = (sector_t)-1; } set_capacity(vblk->disk, cap); /* We can handle whatever the host told us to handle. */ blk_queue_max_segments(q, vblk->sg_elems-2); /* No need to bounce any requests */ blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); /* No real sector limit. */ blk_queue_max_hw_sectors(q, -1U); /* Host can optionally specify maximum segment size and number of * segments. */ err = virtio_config_val(vdev, VIRTIO_BLK_F_SIZE_MAX, offsetof(struct virtio_blk_config, size_max), &v); if (!err) blk_queue_max_segment_size(q, v); else blk_queue_max_segment_size(q, -1U); /* Host can optionally specify the block size of the device */ err = virtio_config_val(vdev, VIRTIO_BLK_F_BLK_SIZE, offsetof(struct virtio_blk_config, blk_size), &blk_size); if (!err) blk_queue_logical_block_size(q, blk_size); else blk_size = queue_logical_block_size(q); /* Use topology information if available */ err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, physical_block_exp), &physical_block_exp); if (!err && physical_block_exp) blk_queue_physical_block_size(q, blk_size * (1 << physical_block_exp)); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, alignment_offset), &alignment_offset); if (!err && alignment_offset) blk_queue_alignment_offset(q, blk_size * alignment_offset); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, min_io_size), &min_io_size); if (!err && min_io_size) blk_queue_io_min(q, blk_size * min_io_size); err = virtio_config_val(vdev, VIRTIO_BLK_F_TOPOLOGY, offsetof(struct virtio_blk_config, opt_io_size), &opt_io_size); if (!err && opt_io_size) blk_queue_io_opt(q, blk_size * opt_io_size); add_disk(vblk->disk); err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_serial); if (err) goto out_del_disk; if (virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE)) err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_cache_type_rw); else err = device_create_file(disk_to_dev(vblk->disk), &dev_attr_cache_type_ro); if (err) goto out_del_disk; return 0; out_del_disk: del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); out_put_disk: put_disk(vblk->disk); out_mempool: mempool_destroy(vblk->pool); out_free_vq: vdev->config->del_vqs(vdev); out_free_vblk: kfree(vblk); out_free_index: ida_simple_remove(&vd_index_ida, index); out: return err; } static void virtblk_remove(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int index = vblk->index; int refc; /* Prevent config work handler from accessing the device. */ mutex_lock(&vblk->config_lock); vblk->config_enable = false; mutex_unlock(&vblk->config_lock); del_gendisk(vblk->disk); blk_cleanup_queue(vblk->disk->queue); /* Stop all the virtqueues. */ vdev->config->reset(vdev); flush_work(&vblk->config_work); refc = atomic_read(&disk_to_dev(vblk->disk)->kobj.kref.refcount); put_disk(vblk->disk); mempool_destroy(vblk->pool); vdev->config->del_vqs(vdev); kfree(vblk); /* Only free device id if we don't have any users */ if (refc == 1) ida_simple_remove(&vd_index_ida, index); } #ifdef CONFIG_PM static int virtblk_freeze(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; /* Ensure we don't receive any more interrupts */ vdev->config->reset(vdev); /* Prevent config work handler from accessing the device. */ mutex_lock(&vblk->config_lock); vblk->config_enable = false; mutex_unlock(&vblk->config_lock); flush_work(&vblk->config_work); spin_lock_irq(vblk->disk->queue->queue_lock); blk_stop_queue(vblk->disk->queue); spin_unlock_irq(vblk->disk->queue->queue_lock); blk_sync_queue(vblk->disk->queue); vdev->config->del_vqs(vdev); return 0; } static int virtblk_restore(struct virtio_device *vdev) { struct virtio_blk *vblk = vdev->priv; int ret; vblk->config_enable = true; ret = init_vq(vdev->priv); if (!ret) { spin_lock_irq(vblk->disk->queue->queue_lock); blk_start_queue(vblk->disk->queue); spin_unlock_irq(vblk->disk->queue->queue_lock); } return ret; } #endif static const struct virtio_device_id id_table[] = { { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY, VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE, VIRTIO_BLK_F_SCSI, VIRTIO_BLK_F_WCE, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE }; static struct virtio_driver virtio_blk = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = virtblk_probe, .remove = virtblk_remove, .config_changed = virtblk_config_changed, #ifdef CONFIG_PM .freeze = virtblk_freeze, .restore = virtblk_restore, #endif }; static int __init init(void) { int error; virtblk_wq = alloc_workqueue("virtio-blk", 0, 0); if (!virtblk_wq) return -ENOMEM; major = register_blkdev(0, "virtblk"); if (major < 0) { error = major; goto out_destroy_workqueue; } error = register_virtio_driver(&virtio_blk); if (error) goto out_unregister_blkdev; return 0; out_unregister_blkdev: unregister_blkdev(major, "virtblk"); out_destroy_workqueue: destroy_workqueue(virtblk_wq); return error; } static void __exit fini(void) { unregister_blkdev(major, "virtblk"); unregister_virtio_driver(&virtio_blk); destroy_workqueue(virtblk_wq); } module_init(init); module_exit(fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio block driver"); MODULE_LICENSE("GPL");
gpl-2.0
tamirda/T805_PhoeniX_Kernel_Lollipop
drivers/mmc/card/mmc_test.c
2081
68868
/* * linux/drivers/mmc/card/mmc_test.c * * Copyright 2007-2008 Pierre Ossman * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. */ #include <linux/mmc/core.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/mmc/mmc.h> #include <linux/slab.h> #include <linux/scatterlist.h> #include <linux/swap.h> /* For nr_free_buffer_pages() */ #include <linux/list.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/seq_file.h> #include <linux/module.h> #define RESULT_OK 0 #define RESULT_FAIL 1 #define RESULT_UNSUP_HOST 2 #define RESULT_UNSUP_CARD 3 #define BUFFER_ORDER 2 #define BUFFER_SIZE (PAGE_SIZE << BUFFER_ORDER) /* * Limit the test area size to the maximum MMC HC erase group size. Note that * the maximum SD allocation unit size is just 4MiB. */ #define TEST_AREA_MAX_SIZE (128 * 1024 * 1024) /** * struct mmc_test_pages - pages allocated by 'alloc_pages()'. * @page: first page in the allocation * @order: order of the number of pages allocated */ struct mmc_test_pages { struct page *page; unsigned int order; }; /** * struct mmc_test_mem - allocated memory. * @arr: array of allocations * @cnt: number of allocations */ struct mmc_test_mem { struct mmc_test_pages *arr; unsigned int cnt; }; /** * struct mmc_test_area - information for performance tests. * @max_sz: test area size (in bytes) * @dev_addr: address on card at which to do performance tests * @max_tfr: maximum transfer size allowed by driver (in bytes) * @max_segs: maximum segments allowed by driver in scatterlist @sg * @max_seg_sz: maximum segment size allowed by driver * @blocks: number of (512 byte) blocks currently mapped by @sg * @sg_len: length of currently mapped scatterlist @sg * @mem: allocated memory * @sg: scatterlist */ struct mmc_test_area { unsigned long max_sz; unsigned int dev_addr; unsigned int max_tfr; unsigned int max_segs; unsigned int max_seg_sz; unsigned int blocks; unsigned int sg_len; struct mmc_test_mem *mem; struct scatterlist *sg; }; /** * struct mmc_test_transfer_result - transfer results for performance tests. * @link: double-linked list * @count: amount of group of sectors to check * @sectors: amount of sectors to check in one group * @ts: time values of transfer * @rate: calculated transfer rate * @iops: I/O operations per second (times 100) */ struct mmc_test_transfer_result { struct list_head link; unsigned int count; unsigned int sectors; struct timespec ts; unsigned int rate; unsigned int iops; }; /** * struct mmc_test_general_result - results for tests. * @link: double-linked list * @card: card under test * @testcase: number of test case * @result: result of test run * @tr_lst: transfer measurements if any as mmc_test_transfer_result */ struct mmc_test_general_result { struct list_head link; struct mmc_card *card; int testcase; int result; struct list_head tr_lst; }; /** * struct mmc_test_dbgfs_file - debugfs related file. * @link: double-linked list * @card: card under test * @file: file created under debugfs */ struct mmc_test_dbgfs_file { struct list_head link; struct mmc_card *card; struct dentry *file; }; /** * struct mmc_test_card - test information. * @card: card under test * @scratch: transfer buffer * @buffer: transfer buffer * @highmem: buffer for highmem tests * @area: information for performance tests * @gr: pointer to results of current testcase */ struct mmc_test_card { struct mmc_card *card; u8 scratch[BUFFER_SIZE]; u8 *buffer; #ifdef CONFIG_HIGHMEM struct page *highmem; #endif struct mmc_test_area area; struct mmc_test_general_result *gr; }; enum mmc_test_prep_media { MMC_TEST_PREP_NONE = 0, MMC_TEST_PREP_WRITE_FULL = 1 << 0, MMC_TEST_PREP_ERASE = 1 << 1, }; struct mmc_test_multiple_rw { unsigned int *sg_len; unsigned int *bs; unsigned int len; unsigned int size; bool do_write; bool do_nonblock_req; enum mmc_test_prep_media prepare; }; struct mmc_test_async_req { struct mmc_async_req areq; struct mmc_test_card *test; }; /*******************************************************************/ /* General helper functions */ /*******************************************************************/ /* * Configure correct block size in card */ static int mmc_test_set_blksize(struct mmc_test_card *test, unsigned size) { return mmc_set_blocklen(test->card, size); } /* * Fill in the mmc_request structure given a set of transfer parameters. */ static void mmc_test_prepare_mrq(struct mmc_test_card *test, struct mmc_request *mrq, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { BUG_ON(!mrq || !mrq->cmd || !mrq->data || !mrq->stop); if (blocks > 1) { mrq->cmd->opcode = write ? MMC_WRITE_MULTIPLE_BLOCK : MMC_READ_MULTIPLE_BLOCK; } else { mrq->cmd->opcode = write ? MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; } mrq->cmd->arg = dev_addr; if (!mmc_card_blockaddr(test->card)) mrq->cmd->arg <<= 9; mrq->cmd->flags = MMC_RSP_R1 | MMC_CMD_ADTC; if (blocks == 1) mrq->stop = NULL; else { mrq->stop->opcode = MMC_STOP_TRANSMISSION; mrq->stop->arg = 0; mrq->stop->flags = MMC_RSP_R1B | MMC_CMD_AC; } mrq->data->blksz = blksz; mrq->data->blocks = blocks; mrq->data->flags = write ? MMC_DATA_WRITE : MMC_DATA_READ; mrq->data->sg = sg; mrq->data->sg_len = sg_len; mmc_set_data_timeout(mrq->data, test->card); } static int mmc_test_busy(struct mmc_command *cmd) { return !(cmd->resp[0] & R1_READY_FOR_DATA) || (R1_CURRENT_STATE(cmd->resp[0]) == R1_STATE_PRG); } /* * Wait for the card to finish the busy state */ static int mmc_test_wait_busy(struct mmc_test_card *test) { int ret, busy; struct mmc_command cmd = {0}; busy = 0; do { memset(&cmd, 0, sizeof(struct mmc_command)); cmd.opcode = MMC_SEND_STATUS; cmd.arg = test->card->rca << 16; cmd.flags = MMC_RSP_R1 | MMC_CMD_AC; ret = mmc_wait_for_cmd(test->card->host, &cmd, 0); if (ret) break; if (!busy && mmc_test_busy(&cmd)) { busy = 1; if (test->card->host->caps & MMC_CAP_WAIT_WHILE_BUSY) pr_info("%s: Warning: Host did not " "wait for busy state to end.\n", mmc_hostname(test->card->host)); } } while (mmc_test_busy(&cmd)); return ret; } /* * Transfer a single sector of kernel addressable data */ static int mmc_test_buffer_transfer(struct mmc_test_card *test, u8 *buffer, unsigned addr, unsigned blksz, int write) { int ret; struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_command stop = {0}; struct mmc_data data = {0}; struct scatterlist sg; mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; sg_init_one(&sg, buffer, blksz); mmc_test_prepare_mrq(test, &mrq, &sg, 1, addr, 1, blksz, write); mmc_wait_for_req(test->card->host, &mrq); if (cmd.error) return cmd.error; if (data.error) return data.error; ret = mmc_test_wait_busy(test); if (ret) return ret; return 0; } static void mmc_test_free_mem(struct mmc_test_mem *mem) { if (!mem) return; while (mem->cnt--) __free_pages(mem->arr[mem->cnt].page, mem->arr[mem->cnt].order); kfree(mem->arr); kfree(mem); } /* * Allocate a lot of memory, preferably max_sz but at least min_sz. In case * there isn't much memory do not exceed 1/16th total lowmem pages. Also do * not exceed a maximum number of segments and try not to make segments much * bigger than maximum segment size. */ static struct mmc_test_mem *mmc_test_alloc_mem(unsigned long min_sz, unsigned long max_sz, unsigned int max_segs, unsigned int max_seg_sz) { unsigned long max_page_cnt = DIV_ROUND_UP(max_sz, PAGE_SIZE); unsigned long min_page_cnt = DIV_ROUND_UP(min_sz, PAGE_SIZE); unsigned long max_seg_page_cnt = DIV_ROUND_UP(max_seg_sz, PAGE_SIZE); unsigned long page_cnt = 0; unsigned long limit = nr_free_buffer_pages() >> 4; struct mmc_test_mem *mem; if (max_page_cnt > limit) max_page_cnt = limit; if (min_page_cnt > max_page_cnt) min_page_cnt = max_page_cnt; if (max_seg_page_cnt > max_page_cnt) max_seg_page_cnt = max_page_cnt; if (max_segs > max_page_cnt) max_segs = max_page_cnt; mem = kzalloc(sizeof(struct mmc_test_mem), GFP_KERNEL); if (!mem) return NULL; mem->arr = kzalloc(sizeof(struct mmc_test_pages) * max_segs, GFP_KERNEL); if (!mem->arr) goto out_free; while (max_page_cnt) { struct page *page; unsigned int order; gfp_t flags = GFP_KERNEL | GFP_DMA | __GFP_NOWARN | __GFP_NORETRY; order = get_order(max_seg_page_cnt << PAGE_SHIFT); while (1) { page = alloc_pages(flags, order); if (page || !order) break; order -= 1; } if (!page) { if (page_cnt < min_page_cnt) goto out_free; break; } mem->arr[mem->cnt].page = page; mem->arr[mem->cnt].order = order; mem->cnt += 1; if (max_page_cnt <= (1UL << order)) break; max_page_cnt -= 1UL << order; page_cnt += 1UL << order; if (mem->cnt >= max_segs) { if (page_cnt < min_page_cnt) goto out_free; break; } } return mem; out_free: mmc_test_free_mem(mem); return NULL; } /* * Map memory into a scatterlist. Optionally allow the same memory to be * mapped more than once. */ static int mmc_test_map_sg(struct mmc_test_mem *mem, unsigned long size, struct scatterlist *sglist, int repeat, unsigned int max_segs, unsigned int max_seg_sz, unsigned int *sg_len, int min_sg_len) { struct scatterlist *sg = NULL; unsigned int i; unsigned long sz = size; sg_init_table(sglist, max_segs); if (min_sg_len > max_segs) min_sg_len = max_segs; *sg_len = 0; do { for (i = 0; i < mem->cnt; i++) { unsigned long len = PAGE_SIZE << mem->arr[i].order; if (min_sg_len && (size / min_sg_len < len)) len = ALIGN(size / min_sg_len, 512); if (len > sz) len = sz; if (len > max_seg_sz) len = max_seg_sz; if (sg) sg = sg_next(sg); else sg = sglist; if (!sg) return -EINVAL; sg_set_page(sg, mem->arr[i].page, len, 0); sz -= len; *sg_len += 1; if (!sz) break; } } while (sz && repeat); if (sz) return -EINVAL; if (sg) sg_mark_end(sg); return 0; } /* * Map memory into a scatterlist so that no pages are contiguous. Allow the * same memory to be mapped more than once. */ static int mmc_test_map_sg_max_scatter(struct mmc_test_mem *mem, unsigned long sz, struct scatterlist *sglist, unsigned int max_segs, unsigned int max_seg_sz, unsigned int *sg_len) { struct scatterlist *sg = NULL; unsigned int i = mem->cnt, cnt; unsigned long len; void *base, *addr, *last_addr = NULL; sg_init_table(sglist, max_segs); *sg_len = 0; while (sz) { base = page_address(mem->arr[--i].page); cnt = 1 << mem->arr[i].order; while (sz && cnt) { addr = base + PAGE_SIZE * --cnt; if (last_addr && last_addr + PAGE_SIZE == addr) continue; last_addr = addr; len = PAGE_SIZE; if (len > max_seg_sz) len = max_seg_sz; if (len > sz) len = sz; if (sg) sg = sg_next(sg); else sg = sglist; if (!sg) return -EINVAL; sg_set_page(sg, virt_to_page(addr), len, 0); sz -= len; *sg_len += 1; } if (i == 0) i = mem->cnt; } if (sg) sg_mark_end(sg); return 0; } /* * Calculate transfer rate in bytes per second. */ static unsigned int mmc_test_rate(uint64_t bytes, struct timespec *ts) { uint64_t ns; ns = ts->tv_sec; ns *= 1000000000; ns += ts->tv_nsec; bytes *= 1000000000; while (ns > UINT_MAX) { bytes >>= 1; ns >>= 1; } if (!ns) return 0; do_div(bytes, (uint32_t)ns); return bytes; } /* * Save transfer results for future usage */ static void mmc_test_save_transfer_result(struct mmc_test_card *test, unsigned int count, unsigned int sectors, struct timespec ts, unsigned int rate, unsigned int iops) { struct mmc_test_transfer_result *tr; if (!test->gr) return; tr = kmalloc(sizeof(struct mmc_test_transfer_result), GFP_KERNEL); if (!tr) return; tr->count = count; tr->sectors = sectors; tr->ts = ts; tr->rate = rate; tr->iops = iops; list_add_tail(&tr->link, &test->gr->tr_lst); } /* * Print the transfer rate. */ static void mmc_test_print_rate(struct mmc_test_card *test, uint64_t bytes, struct timespec *ts1, struct timespec *ts2) { unsigned int rate, iops, sectors = bytes >> 9; struct timespec ts; ts = timespec_sub(*ts2, *ts1); rate = mmc_test_rate(bytes, &ts); iops = mmc_test_rate(100, &ts); /* I/O ops per sec x 100 */ pr_info("%s: Transfer of %u sectors (%u%s KiB) took %lu.%09lu " "seconds (%u kB/s, %u KiB/s, %u.%02u IOPS)\n", mmc_hostname(test->card->host), sectors, sectors >> 1, (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, iops / 100, iops % 100); mmc_test_save_transfer_result(test, 1, sectors, ts, rate, iops); } /* * Print the average transfer rate. */ static void mmc_test_print_avg_rate(struct mmc_test_card *test, uint64_t bytes, unsigned int count, struct timespec *ts1, struct timespec *ts2) { unsigned int rate, iops, sectors = bytes >> 9; uint64_t tot = bytes * count; struct timespec ts; ts = timespec_sub(*ts2, *ts1); rate = mmc_test_rate(tot, &ts); iops = mmc_test_rate(count * 100, &ts); /* I/O ops per sec x 100 */ pr_info("%s: Transfer of %u x %u sectors (%u x %u%s KiB) took " "%lu.%09lu seconds (%u kB/s, %u KiB/s, " "%u.%02u IOPS, sg_len %d)\n", mmc_hostname(test->card->host), count, sectors, count, sectors >> 1, (sectors & 1 ? ".5" : ""), (unsigned long)ts.tv_sec, (unsigned long)ts.tv_nsec, rate / 1000, rate / 1024, iops / 100, iops % 100, test->area.sg_len); mmc_test_save_transfer_result(test, count, sectors, ts, rate, iops); } /* * Return the card size in sectors. */ static unsigned int mmc_test_capacity(struct mmc_card *card) { if (!mmc_card_sd(card) && mmc_card_blockaddr(card)) return card->ext_csd.sectors; else return card->csd.capacity << (card->csd.read_blkbits - 9); } /*******************************************************************/ /* Test preparation and cleanup */ /*******************************************************************/ /* * Fill the first couple of sectors of the card with known data * so that bad reads/writes can be detected */ static int __mmc_test_prepare(struct mmc_test_card *test, int write) { int ret, i; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; if (write) memset(test->buffer, 0xDF, 512); else { for (i = 0;i < 512;i++) test->buffer[i] = i; } for (i = 0;i < BUFFER_SIZE / 512;i++) { ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } return 0; } static int mmc_test_prepare_write(struct mmc_test_card *test) { return __mmc_test_prepare(test, 1); } static int mmc_test_prepare_read(struct mmc_test_card *test) { return __mmc_test_prepare(test, 0); } static int mmc_test_cleanup(struct mmc_test_card *test) { int ret, i; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; memset(test->buffer, 0, 512); for (i = 0;i < BUFFER_SIZE / 512;i++) { ret = mmc_test_buffer_transfer(test, test->buffer, i, 512, 1); if (ret) return ret; } return 0; } /*******************************************************************/ /* Test execution helpers */ /*******************************************************************/ /* * Modifies the mmc_request to perform the "short transfer" tests */ static void mmc_test_prepare_broken_mrq(struct mmc_test_card *test, struct mmc_request *mrq, int write) { BUG_ON(!mrq || !mrq->cmd || !mrq->data); if (mrq->data->blocks > 1) { mrq->cmd->opcode = write ? MMC_WRITE_BLOCK : MMC_READ_SINGLE_BLOCK; mrq->stop = NULL; } else { mrq->cmd->opcode = MMC_SEND_STATUS; mrq->cmd->arg = test->card->rca << 16; } } /* * Checks that a normal transfer didn't have any errors */ static int mmc_test_check_result(struct mmc_test_card *test, struct mmc_request *mrq) { int ret; BUG_ON(!mrq || !mrq->cmd || !mrq->data); ret = 0; if (!ret && mrq->cmd->error) ret = mrq->cmd->error; if (!ret && mrq->data->error) ret = mrq->data->error; if (!ret && mrq->stop && mrq->stop->error) ret = mrq->stop->error; if (!ret && mrq->data->bytes_xfered != mrq->data->blocks * mrq->data->blksz) ret = RESULT_FAIL; if (ret == -EINVAL) ret = RESULT_UNSUP_HOST; return ret; } static int mmc_test_check_result_async(struct mmc_card *card, struct mmc_async_req *areq) { struct mmc_test_async_req *test_async = container_of(areq, struct mmc_test_async_req, areq); mmc_test_wait_busy(test_async->test); return mmc_test_check_result(test_async->test, areq->mrq); } /* * Checks that a "short transfer" behaved as expected */ static int mmc_test_check_broken_result(struct mmc_test_card *test, struct mmc_request *mrq) { int ret; BUG_ON(!mrq || !mrq->cmd || !mrq->data); ret = 0; if (!ret && mrq->cmd->error) ret = mrq->cmd->error; if (!ret && mrq->data->error == 0) ret = RESULT_FAIL; if (!ret && mrq->data->error != -ETIMEDOUT) ret = mrq->data->error; if (!ret && mrq->stop && mrq->stop->error) ret = mrq->stop->error; if (mrq->data->blocks > 1) { if (!ret && mrq->data->bytes_xfered > mrq->data->blksz) ret = RESULT_FAIL; } else { if (!ret && mrq->data->bytes_xfered > 0) ret = RESULT_FAIL; } if (ret == -EINVAL) ret = RESULT_UNSUP_HOST; return ret; } /* * Tests nonblock transfer with certain parameters */ static void mmc_test_nonblock_reset(struct mmc_request *mrq, struct mmc_command *cmd, struct mmc_command *stop, struct mmc_data *data) { memset(mrq, 0, sizeof(struct mmc_request)); memset(cmd, 0, sizeof(struct mmc_command)); memset(data, 0, sizeof(struct mmc_data)); memset(stop, 0, sizeof(struct mmc_command)); mrq->cmd = cmd; mrq->data = data; mrq->stop = stop; } static int mmc_test_nonblock_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write, int count) { struct mmc_request mrq1; struct mmc_command cmd1; struct mmc_command stop1; struct mmc_data data1; struct mmc_request mrq2; struct mmc_command cmd2; struct mmc_command stop2; struct mmc_data data2; struct mmc_test_async_req test_areq[2]; struct mmc_async_req *done_areq; struct mmc_async_req *cur_areq = &test_areq[0].areq; struct mmc_async_req *other_areq = &test_areq[1].areq; int i; int ret; test_areq[0].test = test; test_areq[1].test = test; mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); cur_areq->mrq = &mrq1; cur_areq->err_check = mmc_test_check_result_async; other_areq->mrq = &mrq2; other_areq->err_check = mmc_test_check_result_async; for (i = 0; i < count; i++) { mmc_test_prepare_mrq(test, cur_areq->mrq, sg, sg_len, dev_addr, blocks, blksz, write); done_areq = mmc_start_req(test->card->host, cur_areq, &ret); if (ret || (!done_areq && i > 0)) goto err; if (done_areq) { if (done_areq->mrq == &mrq2) mmc_test_nonblock_reset(&mrq2, &cmd2, &stop2, &data2); else mmc_test_nonblock_reset(&mrq1, &cmd1, &stop1, &data1); } done_areq = cur_areq; cur_areq = other_areq; other_areq = done_areq; dev_addr += blocks; } done_areq = mmc_start_req(test->card->host, NULL, &ret); return ret; err: return ret; } /* * Tests a basic transfer with certain parameters */ static int mmc_test_simple_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_command stop = {0}; struct mmc_data data = {0}; mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; mmc_test_prepare_mrq(test, &mrq, sg, sg_len, dev_addr, blocks, blksz, write); mmc_wait_for_req(test->card->host, &mrq); mmc_test_wait_busy(test); return mmc_test_check_result(test, &mrq); } /* * Tests a transfer where the card will fail completely or partly */ static int mmc_test_broken_transfer(struct mmc_test_card *test, unsigned blocks, unsigned blksz, int write) { struct mmc_request mrq = {0}; struct mmc_command cmd = {0}; struct mmc_command stop = {0}; struct mmc_data data = {0}; struct scatterlist sg; mrq.cmd = &cmd; mrq.data = &data; mrq.stop = &stop; sg_init_one(&sg, test->buffer, blocks * blksz); mmc_test_prepare_mrq(test, &mrq, &sg, 1, 0, blocks, blksz, write); mmc_test_prepare_broken_mrq(test, &mrq, write); mmc_wait_for_req(test->card->host, &mrq); mmc_test_wait_busy(test); return mmc_test_check_broken_result(test, &mrq); } /* * Does a complete transfer test where data is also validated * * Note: mmc_test_prepare() must have been done before this call */ static int mmc_test_transfer(struct mmc_test_card *test, struct scatterlist *sg, unsigned sg_len, unsigned dev_addr, unsigned blocks, unsigned blksz, int write) { int ret, i; unsigned long flags; if (write) { for (i = 0;i < blocks * blksz;i++) test->scratch[i] = i; } else { memset(test->scratch, 0, BUFFER_SIZE); } local_irq_save(flags); sg_copy_from_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); local_irq_restore(flags); ret = mmc_test_set_blksize(test, blksz); if (ret) return ret; ret = mmc_test_simple_transfer(test, sg, sg_len, dev_addr, blocks, blksz, write); if (ret) return ret; if (write) { int sectors; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; sectors = (blocks * blksz + 511) / 512; if ((sectors * 512) == (blocks * blksz)) sectors++; if ((sectors * 512) > BUFFER_SIZE) return -EINVAL; memset(test->buffer, 0, sectors * 512); for (i = 0;i < sectors;i++) { ret = mmc_test_buffer_transfer(test, test->buffer + i * 512, dev_addr + i, 512, 0); if (ret) return ret; } for (i = 0;i < blocks * blksz;i++) { if (test->buffer[i] != (u8)i) return RESULT_FAIL; } for (;i < sectors * 512;i++) { if (test->buffer[i] != 0xDF) return RESULT_FAIL; } } else { local_irq_save(flags); sg_copy_to_buffer(sg, sg_len, test->scratch, BUFFER_SIZE); local_irq_restore(flags); for (i = 0;i < blocks * blksz;i++) { if (test->scratch[i] != (u8)i) return RESULT_FAIL; } } return 0; } /*******************************************************************/ /* Tests */ /*******************************************************************/ struct mmc_test_case { const char *name; int (*prepare)(struct mmc_test_card *); int (*run)(struct mmc_test_card *); int (*cleanup)(struct mmc_test_card *); }; static int mmc_test_basic_write(struct mmc_test_card *test) { int ret; struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; sg_init_one(&sg, test->buffer, 512); ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; return 0; } static int mmc_test_basic_read(struct mmc_test_card *test) { int ret; struct scatterlist sg; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; sg_init_one(&sg, test->buffer, 512); ret = mmc_test_simple_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; return 0; } static int mmc_test_verify_write(struct mmc_test_card *test) { int ret; struct scatterlist sg; sg_init_one(&sg, test->buffer, 512); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; return 0; } static int mmc_test_verify_read(struct mmc_test_card *test) { int ret; struct scatterlist sg; sg_init_one(&sg, test->buffer, 512); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; return 0; } static int mmc_test_multi_write(struct mmc_test_card *test) { int ret; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; sg_init_one(&sg, test->buffer, size); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; return 0; } static int mmc_test_multi_read(struct mmc_test_card *test) { int ret; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; sg_init_one(&sg, test->buffer, size); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; return 0; } static int mmc_test_pow2_write(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { sg_init_one(&sg, test->buffer, i); ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } return 0; } static int mmc_test_pow2_read(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 1; i < 512;i <<= 1) { sg_init_one(&sg, test->buffer, i); ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } return 0; } static int mmc_test_weird_write(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; if (!test->card->csd.write_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { sg_init_one(&sg, test->buffer, i); ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 1); if (ret) return ret; } return 0; } static int mmc_test_weird_read(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; if (!test->card->csd.read_partial) return RESULT_UNSUP_CARD; for (i = 3; i < 512;i += 7) { sg_init_one(&sg, test->buffer, i); ret = mmc_test_transfer(test, &sg, 1, 0, 1, i, 0); if (ret) return ret; } return 0; } static int mmc_test_align_write(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; for (i = 1;i < 4;i++) { sg_init_one(&sg, test->buffer + i, 512); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; } return 0; } static int mmc_test_align_read(struct mmc_test_card *test) { int ret, i; struct scatterlist sg; for (i = 1;i < 4;i++) { sg_init_one(&sg, test->buffer + i, 512); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; } return 0; } static int mmc_test_align_multi_write(struct mmc_test_card *test) { int ret, i; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { sg_init_one(&sg, test->buffer + i, size); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; } return 0; } static int mmc_test_align_multi_read(struct mmc_test_card *test) { int ret, i; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; for (i = 1;i < 4;i++) { sg_init_one(&sg, test->buffer + i, size); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; } return 0; } static int mmc_test_xfersize_write(struct mmc_test_card *test) { int ret; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; ret = mmc_test_broken_transfer(test, 1, 512, 1); if (ret) return ret; return 0; } static int mmc_test_xfersize_read(struct mmc_test_card *test) { int ret; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; ret = mmc_test_broken_transfer(test, 1, 512, 0); if (ret) return ret; return 0; } static int mmc_test_multi_xfersize_write(struct mmc_test_card *test) { int ret; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; ret = mmc_test_broken_transfer(test, 2, 512, 1); if (ret) return ret; return 0; } static int mmc_test_multi_xfersize_read(struct mmc_test_card *test) { int ret; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; ret = mmc_test_broken_transfer(test, 2, 512, 0); if (ret) return ret; return 0; } #ifdef CONFIG_HIGHMEM static int mmc_test_write_high(struct mmc_test_card *test) { int ret; struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, 512, 0); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 1); if (ret) return ret; return 0; } static int mmc_test_read_high(struct mmc_test_card *test) { int ret; struct scatterlist sg; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, 512, 0); ret = mmc_test_transfer(test, &sg, 1, 0, 1, 512, 0); if (ret) return ret; return 0; } static int mmc_test_multi_write_high(struct mmc_test_card *test) { int ret; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, size, 0); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 1); if (ret) return ret; return 0; } static int mmc_test_multi_read_high(struct mmc_test_card *test) { int ret; unsigned int size; struct scatterlist sg; if (test->card->host->max_blk_count == 1) return RESULT_UNSUP_HOST; size = PAGE_SIZE * 2; size = min(size, test->card->host->max_req_size); size = min(size, test->card->host->max_seg_size); size = min(size, test->card->host->max_blk_count * 512); if (size < 1024) return RESULT_UNSUP_HOST; sg_init_table(&sg, 1); sg_set_page(&sg, test->highmem, size, 0); ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0); if (ret) return ret; return 0; } #else static int mmc_test_no_highmem(struct mmc_test_card *test) { pr_info("%s: Highmem not configured - test skipped\n", mmc_hostname(test->card->host)); return 0; } #endif /* CONFIG_HIGHMEM */ /* * Map sz bytes so that it can be transferred. */ static int mmc_test_area_map(struct mmc_test_card *test, unsigned long sz, int max_scatter, int min_sg_len) { struct mmc_test_area *t = &test->area; int err; t->blocks = sz >> 9; if (max_scatter) { err = mmc_test_map_sg_max_scatter(t->mem, sz, t->sg, t->max_segs, t->max_seg_sz, &t->sg_len); } else { err = mmc_test_map_sg(t->mem, sz, t->sg, 1, t->max_segs, t->max_seg_sz, &t->sg_len, min_sg_len); } if (err) pr_info("%s: Failed to map sg list\n", mmc_hostname(test->card->host)); return err; } /* * Transfer bytes mapped by mmc_test_area_map(). */ static int mmc_test_area_transfer(struct mmc_test_card *test, unsigned int dev_addr, int write) { struct mmc_test_area *t = &test->area; return mmc_test_simple_transfer(test, t->sg, t->sg_len, dev_addr, t->blocks, 512, write); } /* * Map and transfer bytes for multiple transfers. */ static int mmc_test_area_io_seq(struct mmc_test_card *test, unsigned long sz, unsigned int dev_addr, int write, int max_scatter, int timed, int count, bool nonblock, int min_sg_len) { struct timespec ts1, ts2; int ret = 0; int i; struct mmc_test_area *t = &test->area; /* * In the case of a maximally scattered transfer, the maximum transfer * size is further limited by using PAGE_SIZE segments. */ if (max_scatter) { struct mmc_test_area *t = &test->area; unsigned long max_tfr; if (t->max_seg_sz >= PAGE_SIZE) max_tfr = t->max_segs * PAGE_SIZE; else max_tfr = t->max_segs * t->max_seg_sz; if (sz > max_tfr) sz = max_tfr; } ret = mmc_test_area_map(test, sz, max_scatter, min_sg_len); if (ret) return ret; if (timed) getnstimeofday(&ts1); if (nonblock) ret = mmc_test_nonblock_transfer(test, t->sg, t->sg_len, dev_addr, t->blocks, 512, write, count); else for (i = 0; i < count && ret == 0; i++) { ret = mmc_test_area_transfer(test, dev_addr, write); dev_addr += sz >> 9; } if (ret) return ret; if (timed) getnstimeofday(&ts2); if (timed) mmc_test_print_avg_rate(test, sz, count, &ts1, &ts2); return 0; } static int mmc_test_area_io(struct mmc_test_card *test, unsigned long sz, unsigned int dev_addr, int write, int max_scatter, int timed) { return mmc_test_area_io_seq(test, sz, dev_addr, write, max_scatter, timed, 1, false, 0); } /* * Write the test area entirely. */ static int mmc_test_area_fill(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; return mmc_test_area_io(test, t->max_tfr, t->dev_addr, 1, 0, 0); } /* * Erase the test area entirely. */ static int mmc_test_area_erase(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; if (!mmc_can_erase(test->card)) return 0; return mmc_erase(test->card, t->dev_addr, t->max_sz >> 9, MMC_ERASE_ARG); } /* * Cleanup struct mmc_test_area. */ static int mmc_test_area_cleanup(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; kfree(t->sg); mmc_test_free_mem(t->mem); return 0; } /* * Initialize an area for testing large transfers. The test area is set to the * middle of the card because cards may have different charateristics at the * front (for FAT file system optimization). Optionally, the area is erased * (if the card supports it) which may improve write performance. Optionally, * the area is filled with data for subsequent read tests. */ static int mmc_test_area_init(struct mmc_test_card *test, int erase, int fill) { struct mmc_test_area *t = &test->area; unsigned long min_sz = 64 * 1024, sz; int ret; ret = mmc_test_set_blksize(test, 512); if (ret) return ret; /* Make the test area size about 4MiB */ sz = (unsigned long)test->card->pref_erase << 9; t->max_sz = sz; while (t->max_sz < 4 * 1024 * 1024) t->max_sz += sz; while (t->max_sz > TEST_AREA_MAX_SIZE && t->max_sz > sz) t->max_sz -= sz; t->max_segs = test->card->host->max_segs; t->max_seg_sz = test->card->host->max_seg_size; t->max_seg_sz -= t->max_seg_sz % 512; t->max_tfr = t->max_sz; if (t->max_tfr >> 9 > test->card->host->max_blk_count) t->max_tfr = test->card->host->max_blk_count << 9; if (t->max_tfr > test->card->host->max_req_size) t->max_tfr = test->card->host->max_req_size; if (t->max_tfr / t->max_seg_sz > t->max_segs) t->max_tfr = t->max_segs * t->max_seg_sz; /* * Try to allocate enough memory for a max. sized transfer. Less is OK * because the same memory can be mapped into the scatterlist more than * once. Also, take into account the limits imposed on scatterlist * segments by the host driver. */ t->mem = mmc_test_alloc_mem(min_sz, t->max_tfr, t->max_segs, t->max_seg_sz); if (!t->mem) return -ENOMEM; t->sg = kmalloc(sizeof(struct scatterlist) * t->max_segs, GFP_KERNEL); if (!t->sg) { ret = -ENOMEM; goto out_free; } t->dev_addr = mmc_test_capacity(test->card) / 2; t->dev_addr -= t->dev_addr % (t->max_sz >> 9); if (erase) { ret = mmc_test_area_erase(test); if (ret) goto out_free; } if (fill) { ret = mmc_test_area_fill(test); if (ret) goto out_free; } return 0; out_free: mmc_test_area_cleanup(test); return ret; } /* * Prepare for large transfers. Do not erase the test area. */ static int mmc_test_area_prepare(struct mmc_test_card *test) { return mmc_test_area_init(test, 0, 0); } /* * Prepare for large transfers. Do erase the test area. */ static int mmc_test_area_prepare_erase(struct mmc_test_card *test) { return mmc_test_area_init(test, 1, 0); } /* * Prepare for large transfers. Erase and fill the test area. */ static int mmc_test_area_prepare_fill(struct mmc_test_card *test) { return mmc_test_area_init(test, 1, 1); } /* * Test best-case performance. Best-case performance is expected from * a single large transfer. * * An additional option (max_scatter) allows the measurement of the same * transfer but with no contiguous pages in the scatter list. This tests * the efficiency of DMA to handle scattered pages. */ static int mmc_test_best_performance(struct mmc_test_card *test, int write, int max_scatter) { struct mmc_test_area *t = &test->area; return mmc_test_area_io(test, t->max_tfr, t->dev_addr, write, max_scatter, 1); } /* * Best-case read performance. */ static int mmc_test_best_read_performance(struct mmc_test_card *test) { return mmc_test_best_performance(test, 0, 0); } /* * Best-case write performance. */ static int mmc_test_best_write_performance(struct mmc_test_card *test) { return mmc_test_best_performance(test, 1, 0); } /* * Best-case read performance into scattered pages. */ static int mmc_test_best_read_perf_max_scatter(struct mmc_test_card *test) { return mmc_test_best_performance(test, 0, 1); } /* * Best-case write performance from scattered pages. */ static int mmc_test_best_write_perf_max_scatter(struct mmc_test_card *test) { return mmc_test_best_performance(test, 1, 1); } /* * Single read performance by transfer size. */ static int mmc_test_profile_read_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; int ret; for (sz = 512; sz < t->max_tfr; sz <<= 1) { dev_addr = t->dev_addr + (sz >> 9); ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); if (ret) return ret; } sz = t->max_tfr; dev_addr = t->dev_addr; return mmc_test_area_io(test, sz, dev_addr, 0, 0, 1); } /* * Single write performance by transfer size. */ static int mmc_test_profile_write_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; int ret; ret = mmc_test_area_erase(test); if (ret) return ret; for (sz = 512; sz < t->max_tfr; sz <<= 1) { dev_addr = t->dev_addr + (sz >> 9); ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); if (ret) return ret; } ret = mmc_test_area_erase(test); if (ret) return ret; sz = t->max_tfr; dev_addr = t->dev_addr; return mmc_test_area_io(test, sz, dev_addr, 1, 0, 1); } /* * Single trim performance by transfer size. */ static int mmc_test_profile_trim_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr; struct timespec ts1, ts2; int ret; if (!mmc_can_trim(test->card)) return RESULT_UNSUP_CARD; if (!mmc_can_erase(test->card)) return RESULT_UNSUP_HOST; for (sz = 512; sz < t->max_sz; sz <<= 1) { dev_addr = t->dev_addr + (sz >> 9); getnstimeofday(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) return ret; getnstimeofday(&ts2); mmc_test_print_rate(test, sz, &ts1, &ts2); } dev_addr = t->dev_addr; getnstimeofday(&ts1); ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) return ret; getnstimeofday(&ts2); mmc_test_print_rate(test, sz, &ts1, &ts2); return 0; } static int mmc_test_seq_read_perf(struct mmc_test_card *test, unsigned long sz) { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; int ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 0, 0, 0); if (ret) return ret; dev_addr += (sz >> 9); } getnstimeofday(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } /* * Consecutive read performance by transfer size. */ static int mmc_test_profile_seq_read_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; int ret; for (sz = 512; sz < t->max_tfr; sz <<= 1) { ret = mmc_test_seq_read_perf(test, sz); if (ret) return ret; } sz = t->max_tfr; return mmc_test_seq_read_perf(test, sz); } static int mmc_test_seq_write_perf(struct mmc_test_card *test, unsigned long sz) { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; int ret; ret = mmc_test_area_erase(test); if (ret) return ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, 1, 0, 0); if (ret) return ret; dev_addr += (sz >> 9); } getnstimeofday(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } /* * Consecutive write performance by transfer size. */ static int mmc_test_profile_seq_write_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; int ret; for (sz = 512; sz < t->max_tfr; sz <<= 1) { ret = mmc_test_seq_write_perf(test, sz); if (ret) return ret; } sz = t->max_tfr; return mmc_test_seq_write_perf(test, sz); } /* * Consecutive trim performance by transfer size. */ static int mmc_test_profile_seq_trim_perf(struct mmc_test_card *test) { struct mmc_test_area *t = &test->area; unsigned long sz; unsigned int dev_addr, i, cnt; struct timespec ts1, ts2; int ret; if (!mmc_can_trim(test->card)) return RESULT_UNSUP_CARD; if (!mmc_can_erase(test->card)) return RESULT_UNSUP_HOST; for (sz = 512; sz <= t->max_sz; sz <<= 1) { ret = mmc_test_area_erase(test); if (ret) return ret; ret = mmc_test_area_fill(test); if (ret) return ret; cnt = t->max_sz / sz; dev_addr = t->dev_addr; getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_erase(test->card, dev_addr, sz >> 9, MMC_TRIM_ARG); if (ret) return ret; dev_addr += (sz >> 9); } getnstimeofday(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); } return 0; } static unsigned int rnd_next = 1; static unsigned int mmc_test_rnd_num(unsigned int rnd_cnt) { uint64_t r; rnd_next = rnd_next * 1103515245 + 12345; r = (rnd_next >> 16) & 0x7fff; return (r * rnd_cnt) >> 15; } static int mmc_test_rnd_perf(struct mmc_test_card *test, int write, int print, unsigned long sz) { unsigned int dev_addr, cnt, rnd_addr, range1, range2, last_ea = 0, ea; unsigned int ssz; struct timespec ts1, ts2, ts; int ret; ssz = sz >> 9; rnd_addr = mmc_test_capacity(test->card) / 4; range1 = rnd_addr / test->card->pref_erase; range2 = range1 / ssz; getnstimeofday(&ts1); for (cnt = 0; cnt < UINT_MAX; cnt++) { getnstimeofday(&ts2); ts = timespec_sub(ts2, ts1); if (ts.tv_sec >= 10) break; ea = mmc_test_rnd_num(range1); if (ea == last_ea) ea -= 1; last_ea = ea; dev_addr = rnd_addr + test->card->pref_erase * ea + ssz * mmc_test_rnd_num(range2); ret = mmc_test_area_io(test, sz, dev_addr, write, 0, 0); if (ret) return ret; } if (print) mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } static int mmc_test_random_perf(struct mmc_test_card *test, int write) { struct mmc_test_area *t = &test->area; unsigned int next; unsigned long sz; int ret; for (sz = 512; sz < t->max_tfr; sz <<= 1) { /* * When writing, try to get more consistent results by running * the test twice with exactly the same I/O but outputting the * results only for the 2nd run. */ if (write) { next = rnd_next; ret = mmc_test_rnd_perf(test, write, 0, sz); if (ret) return ret; rnd_next = next; } ret = mmc_test_rnd_perf(test, write, 1, sz); if (ret) return ret; } sz = t->max_tfr; if (write) { next = rnd_next; ret = mmc_test_rnd_perf(test, write, 0, sz); if (ret) return ret; rnd_next = next; } return mmc_test_rnd_perf(test, write, 1, sz); } /* * Random read performance by transfer size. */ static int mmc_test_random_read_perf(struct mmc_test_card *test) { return mmc_test_random_perf(test, 0); } /* * Random write performance by transfer size. */ static int mmc_test_random_write_perf(struct mmc_test_card *test) { return mmc_test_random_perf(test, 1); } static int mmc_test_seq_perf(struct mmc_test_card *test, int write, unsigned int tot_sz, int max_scatter) { struct mmc_test_area *t = &test->area; unsigned int dev_addr, i, cnt, sz, ssz; struct timespec ts1, ts2; int ret; sz = t->max_tfr; /* * In the case of a maximally scattered transfer, the maximum transfer * size is further limited by using PAGE_SIZE segments. */ if (max_scatter) { unsigned long max_tfr; if (t->max_seg_sz >= PAGE_SIZE) max_tfr = t->max_segs * PAGE_SIZE; else max_tfr = t->max_segs * t->max_seg_sz; if (sz > max_tfr) sz = max_tfr; } ssz = sz >> 9; dev_addr = mmc_test_capacity(test->card) / 4; if (tot_sz > dev_addr << 9) tot_sz = dev_addr << 9; cnt = tot_sz / sz; dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ getnstimeofday(&ts1); for (i = 0; i < cnt; i++) { ret = mmc_test_area_io(test, sz, dev_addr, write, max_scatter, 0); if (ret) return ret; dev_addr += ssz; } getnstimeofday(&ts2); mmc_test_print_avg_rate(test, sz, cnt, &ts1, &ts2); return 0; } static int mmc_test_large_seq_perf(struct mmc_test_card *test, int write) { int ret, i; for (i = 0; i < 10; i++) { ret = mmc_test_seq_perf(test, write, 10 * 1024 * 1024, 1); if (ret) return ret; } for (i = 0; i < 5; i++) { ret = mmc_test_seq_perf(test, write, 100 * 1024 * 1024, 1); if (ret) return ret; } for (i = 0; i < 3; i++) { ret = mmc_test_seq_perf(test, write, 1000 * 1024 * 1024, 1); if (ret) return ret; } return ret; } /* * Large sequential read performance. */ static int mmc_test_large_seq_read_perf(struct mmc_test_card *test) { return mmc_test_large_seq_perf(test, 0); } /* * Large sequential write performance. */ static int mmc_test_large_seq_write_perf(struct mmc_test_card *test) { return mmc_test_large_seq_perf(test, 1); } static int mmc_test_rw_multiple(struct mmc_test_card *test, struct mmc_test_multiple_rw *tdata, unsigned int reqsize, unsigned int size, int min_sg_len) { unsigned int dev_addr; struct mmc_test_area *t = &test->area; int ret = 0; /* Set up test area */ if (size > mmc_test_capacity(test->card) / 2 * 512) size = mmc_test_capacity(test->card) / 2 * 512; if (reqsize > t->max_tfr) reqsize = t->max_tfr; dev_addr = mmc_test_capacity(test->card) / 4; if ((dev_addr & 0xffff0000)) dev_addr &= 0xffff0000; /* Round to 64MiB boundary */ else dev_addr &= 0xfffff800; /* Round to 1MiB boundary */ if (!dev_addr) goto err; if (reqsize > size) return 0; /* prepare test area */ if (mmc_can_erase(test->card) && tdata->prepare & MMC_TEST_PREP_ERASE) { ret = mmc_erase(test->card, dev_addr, size / 512, MMC_SECURE_ERASE_ARG); if (ret) ret = mmc_erase(test->card, dev_addr, size / 512, MMC_ERASE_ARG); if (ret) goto err; } /* Run test */ ret = mmc_test_area_io_seq(test, reqsize, dev_addr, tdata->do_write, 0, 1, size / reqsize, tdata->do_nonblock_req, min_sg_len); if (ret) goto err; return ret; err: pr_info("[%s] error\n", __func__); return ret; } static int mmc_test_rw_multiple_size(struct mmc_test_card *test, struct mmc_test_multiple_rw *rw) { int ret = 0; int i; void *pre_req = test->card->host->ops->pre_req; void *post_req = test->card->host->ops->post_req; if (rw->do_nonblock_req && ((!pre_req && post_req) || (pre_req && !post_req))) { pr_info("error: only one of pre/post is defined\n"); return -EINVAL; } for (i = 0 ; i < rw->len && ret == 0; i++) { ret = mmc_test_rw_multiple(test, rw, rw->bs[i], rw->size, 0); if (ret) break; } return ret; } static int mmc_test_rw_multiple_sg_len(struct mmc_test_card *test, struct mmc_test_multiple_rw *rw) { int ret = 0; int i; for (i = 0 ; i < rw->len && ret == 0; i++) { ret = mmc_test_rw_multiple(test, rw, 512*1024, rw->size, rw->sg_len[i]); if (ret) break; } return ret; } /* * Multiple blocking write 4k to 4 MB chunks */ static int mmc_test_profile_mult_write_blocking_perf(struct mmc_test_card *test) { unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; struct mmc_test_multiple_rw test_data = { .bs = bs, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(bs), .do_write = true, .do_nonblock_req = false, .prepare = MMC_TEST_PREP_ERASE, }; return mmc_test_rw_multiple_size(test, &test_data); }; /* * Multiple non-blocking write 4k to 4 MB chunks */ static int mmc_test_profile_mult_write_nonblock_perf(struct mmc_test_card *test) { unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; struct mmc_test_multiple_rw test_data = { .bs = bs, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(bs), .do_write = true, .do_nonblock_req = true, .prepare = MMC_TEST_PREP_ERASE, }; return mmc_test_rw_multiple_size(test, &test_data); } /* * Multiple blocking read 4k to 4 MB chunks */ static int mmc_test_profile_mult_read_blocking_perf(struct mmc_test_card *test) { unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; struct mmc_test_multiple_rw test_data = { .bs = bs, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(bs), .do_write = false, .do_nonblock_req = false, .prepare = MMC_TEST_PREP_NONE, }; return mmc_test_rw_multiple_size(test, &test_data); } /* * Multiple non-blocking read 4k to 4 MB chunks */ static int mmc_test_profile_mult_read_nonblock_perf(struct mmc_test_card *test) { unsigned int bs[] = {1 << 12, 1 << 13, 1 << 14, 1 << 15, 1 << 16, 1 << 17, 1 << 18, 1 << 19, 1 << 20, 1 << 22}; struct mmc_test_multiple_rw test_data = { .bs = bs, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(bs), .do_write = false, .do_nonblock_req = true, .prepare = MMC_TEST_PREP_NONE, }; return mmc_test_rw_multiple_size(test, &test_data); } /* * Multiple blocking write 1 to 512 sg elements */ static int mmc_test_profile_sglen_wr_blocking_perf(struct mmc_test_card *test) { unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9}; struct mmc_test_multiple_rw test_data = { .sg_len = sg_len, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(sg_len), .do_write = true, .do_nonblock_req = false, .prepare = MMC_TEST_PREP_ERASE, }; return mmc_test_rw_multiple_sg_len(test, &test_data); }; /* * Multiple non-blocking write 1 to 512 sg elements */ static int mmc_test_profile_sglen_wr_nonblock_perf(struct mmc_test_card *test) { unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9}; struct mmc_test_multiple_rw test_data = { .sg_len = sg_len, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(sg_len), .do_write = true, .do_nonblock_req = true, .prepare = MMC_TEST_PREP_ERASE, }; return mmc_test_rw_multiple_sg_len(test, &test_data); } /* * Multiple blocking read 1 to 512 sg elements */ static int mmc_test_profile_sglen_r_blocking_perf(struct mmc_test_card *test) { unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9}; struct mmc_test_multiple_rw test_data = { .sg_len = sg_len, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(sg_len), .do_write = false, .do_nonblock_req = false, .prepare = MMC_TEST_PREP_NONE, }; return mmc_test_rw_multiple_sg_len(test, &test_data); } /* * Multiple non-blocking read 1 to 512 sg elements */ static int mmc_test_profile_sglen_r_nonblock_perf(struct mmc_test_card *test) { unsigned int sg_len[] = {1, 1 << 3, 1 << 4, 1 << 5, 1 << 6, 1 << 7, 1 << 8, 1 << 9}; struct mmc_test_multiple_rw test_data = { .sg_len = sg_len, .size = TEST_AREA_MAX_SIZE, .len = ARRAY_SIZE(sg_len), .do_write = false, .do_nonblock_req = true, .prepare = MMC_TEST_PREP_NONE, }; return mmc_test_rw_multiple_sg_len(test, &test_data); } /* * eMMC hardware reset. */ static int mmc_test_hw_reset(struct mmc_test_card *test) { struct mmc_card *card = test->card; struct mmc_host *host = card->host; int err; err = mmc_hw_reset_check(host); if (!err) return RESULT_OK; if (err == -ENOSYS) return RESULT_FAIL; if (err != -EOPNOTSUPP) return err; if (!mmc_can_reset(card)) return RESULT_UNSUP_CARD; return RESULT_UNSUP_HOST; } static const struct mmc_test_case mmc_test_cases[] = { { .name = "Basic write (no data verification)", .run = mmc_test_basic_write, }, { .name = "Basic read (no data verification)", .run = mmc_test_basic_read, }, { .name = "Basic write (with data verification)", .prepare = mmc_test_prepare_write, .run = mmc_test_verify_write, .cleanup = mmc_test_cleanup, }, { .name = "Basic read (with data verification)", .prepare = mmc_test_prepare_read, .run = mmc_test_verify_read, .cleanup = mmc_test_cleanup, }, { .name = "Multi-block write", .prepare = mmc_test_prepare_write, .run = mmc_test_multi_write, .cleanup = mmc_test_cleanup, }, { .name = "Multi-block read", .prepare = mmc_test_prepare_read, .run = mmc_test_multi_read, .cleanup = mmc_test_cleanup, }, { .name = "Power of two block writes", .prepare = mmc_test_prepare_write, .run = mmc_test_pow2_write, .cleanup = mmc_test_cleanup, }, { .name = "Power of two block reads", .prepare = mmc_test_prepare_read, .run = mmc_test_pow2_read, .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block writes", .prepare = mmc_test_prepare_write, .run = mmc_test_weird_write, .cleanup = mmc_test_cleanup, }, { .name = "Weird sized block reads", .prepare = mmc_test_prepare_read, .run = mmc_test_weird_read, .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned write", .prepare = mmc_test_prepare_write, .run = mmc_test_align_write, .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned read", .prepare = mmc_test_prepare_read, .run = mmc_test_align_read, .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block write", .prepare = mmc_test_prepare_write, .run = mmc_test_align_multi_write, .cleanup = mmc_test_cleanup, }, { .name = "Badly aligned multi-block read", .prepare = mmc_test_prepare_read, .run = mmc_test_align_multi_read, .cleanup = mmc_test_cleanup, }, { .name = "Correct xfer_size at write (start failure)", .run = mmc_test_xfersize_write, }, { .name = "Correct xfer_size at read (start failure)", .run = mmc_test_xfersize_read, }, { .name = "Correct xfer_size at write (midway failure)", .run = mmc_test_multi_xfersize_write, }, { .name = "Correct xfer_size at read (midway failure)", .run = mmc_test_multi_xfersize_read, }, #ifdef CONFIG_HIGHMEM { .name = "Highmem write", .prepare = mmc_test_prepare_write, .run = mmc_test_write_high, .cleanup = mmc_test_cleanup, }, { .name = "Highmem read", .prepare = mmc_test_prepare_read, .run = mmc_test_read_high, .cleanup = mmc_test_cleanup, }, { .name = "Multi-block highmem write", .prepare = mmc_test_prepare_write, .run = mmc_test_multi_write_high, .cleanup = mmc_test_cleanup, }, { .name = "Multi-block highmem read", .prepare = mmc_test_prepare_read, .run = mmc_test_multi_read_high, .cleanup = mmc_test_cleanup, }, #else { .name = "Highmem write", .run = mmc_test_no_highmem, }, { .name = "Highmem read", .run = mmc_test_no_highmem, }, { .name = "Multi-block highmem write", .run = mmc_test_no_highmem, }, { .name = "Multi-block highmem read", .run = mmc_test_no_highmem, }, #endif /* CONFIG_HIGHMEM */ { .name = "Best-case read performance", .prepare = mmc_test_area_prepare_fill, .run = mmc_test_best_read_performance, .cleanup = mmc_test_area_cleanup, }, { .name = "Best-case write performance", .prepare = mmc_test_area_prepare_erase, .run = mmc_test_best_write_performance, .cleanup = mmc_test_area_cleanup, }, { .name = "Best-case read performance into scattered pages", .prepare = mmc_test_area_prepare_fill, .run = mmc_test_best_read_perf_max_scatter, .cleanup = mmc_test_area_cleanup, }, { .name = "Best-case write performance from scattered pages", .prepare = mmc_test_area_prepare_erase, .run = mmc_test_best_write_perf_max_scatter, .cleanup = mmc_test_area_cleanup, }, { .name = "Single read performance by transfer size", .prepare = mmc_test_area_prepare_fill, .run = mmc_test_profile_read_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Single write performance by transfer size", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_write_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Single trim performance by transfer size", .prepare = mmc_test_area_prepare_fill, .run = mmc_test_profile_trim_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Consecutive read performance by transfer size", .prepare = mmc_test_area_prepare_fill, .run = mmc_test_profile_seq_read_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Consecutive write performance by transfer size", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_seq_write_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Consecutive trim performance by transfer size", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_seq_trim_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Random read performance by transfer size", .prepare = mmc_test_area_prepare, .run = mmc_test_random_read_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Random write performance by transfer size", .prepare = mmc_test_area_prepare, .run = mmc_test_random_write_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Large sequential read into scattered pages", .prepare = mmc_test_area_prepare, .run = mmc_test_large_seq_read_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Large sequential write from scattered pages", .prepare = mmc_test_area_prepare, .run = mmc_test_large_seq_write_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Write performance with blocking req 4k to 4MB", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_mult_write_blocking_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Write performance with non-blocking req 4k to 4MB", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_mult_write_nonblock_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Read performance with blocking req 4k to 4MB", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_mult_read_blocking_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Read performance with non-blocking req 4k to 4MB", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_mult_read_nonblock_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Write performance blocking req 1 to 512 sg elems", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_sglen_wr_blocking_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Write performance non-blocking req 1 to 512 sg elems", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_sglen_wr_nonblock_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Read performance blocking req 1 to 512 sg elems", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_sglen_r_blocking_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "Read performance non-blocking req 1 to 512 sg elems", .prepare = mmc_test_area_prepare, .run = mmc_test_profile_sglen_r_nonblock_perf, .cleanup = mmc_test_area_cleanup, }, { .name = "eMMC hardware reset", .run = mmc_test_hw_reset, }, }; static DEFINE_MUTEX(mmc_test_lock); static LIST_HEAD(mmc_test_result); static void mmc_test_run(struct mmc_test_card *test, int testcase) { int i, ret; pr_info("%s: Starting tests of card %s...\n", mmc_hostname(test->card->host), mmc_card_id(test->card)); mmc_claim_host(test->card->host); for (i = 0;i < ARRAY_SIZE(mmc_test_cases);i++) { struct mmc_test_general_result *gr; if (testcase && ((i + 1) != testcase)) continue; pr_info("%s: Test case %d. %s...\n", mmc_hostname(test->card->host), i + 1, mmc_test_cases[i].name); if (mmc_test_cases[i].prepare) { ret = mmc_test_cases[i].prepare(test); if (ret) { pr_info("%s: Result: Prepare " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); continue; } } gr = kzalloc(sizeof(struct mmc_test_general_result), GFP_KERNEL); if (gr) { INIT_LIST_HEAD(&gr->tr_lst); /* Assign data what we know already */ gr->card = test->card; gr->testcase = i; /* Append container to global one */ list_add_tail(&gr->link, &mmc_test_result); /* * Save the pointer to created container in our private * structure. */ test->gr = gr; } ret = mmc_test_cases[i].run(test); switch (ret) { case RESULT_OK: pr_info("%s: Result: OK\n", mmc_hostname(test->card->host)); break; case RESULT_FAIL: pr_info("%s: Result: FAILED\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_HOST: pr_info("%s: Result: UNSUPPORTED " "(by host)\n", mmc_hostname(test->card->host)); break; case RESULT_UNSUP_CARD: pr_info("%s: Result: UNSUPPORTED " "(by card)\n", mmc_hostname(test->card->host)); break; default: pr_info("%s: Result: ERROR (%d)\n", mmc_hostname(test->card->host), ret); } /* Save the result */ if (gr) gr->result = ret; if (mmc_test_cases[i].cleanup) { ret = mmc_test_cases[i].cleanup(test); if (ret) { pr_info("%s: Warning: Cleanup " "stage failed! (%d)\n", mmc_hostname(test->card->host), ret); } } } mmc_release_host(test->card->host); pr_info("%s: Tests completed.\n", mmc_hostname(test->card->host)); } static void mmc_test_free_result(struct mmc_card *card) { struct mmc_test_general_result *gr, *grs; mutex_lock(&mmc_test_lock); list_for_each_entry_safe(gr, grs, &mmc_test_result, link) { struct mmc_test_transfer_result *tr, *trs; if (card && gr->card != card) continue; list_for_each_entry_safe(tr, trs, &gr->tr_lst, link) { list_del(&tr->link); kfree(tr); } list_del(&gr->link); kfree(gr); } mutex_unlock(&mmc_test_lock); } static LIST_HEAD(mmc_test_file_test); static int mtf_test_show(struct seq_file *sf, void *data) { struct mmc_card *card = (struct mmc_card *)sf->private; struct mmc_test_general_result *gr; mutex_lock(&mmc_test_lock); list_for_each_entry(gr, &mmc_test_result, link) { struct mmc_test_transfer_result *tr; if (gr->card != card) continue; seq_printf(sf, "Test %d: %d\n", gr->testcase + 1, gr->result); list_for_each_entry(tr, &gr->tr_lst, link) { seq_printf(sf, "%u %d %lu.%09lu %u %u.%02u\n", tr->count, tr->sectors, (unsigned long)tr->ts.tv_sec, (unsigned long)tr->ts.tv_nsec, tr->rate, tr->iops / 100, tr->iops % 100); } } mutex_unlock(&mmc_test_lock); return 0; } static int mtf_test_open(struct inode *inode, struct file *file) { return single_open(file, mtf_test_show, inode->i_private); } static ssize_t mtf_test_write(struct file *file, const char __user *buf, size_t count, loff_t *pos) { struct seq_file *sf = (struct seq_file *)file->private_data; struct mmc_card *card = (struct mmc_card *)sf->private; struct mmc_test_card *test; char lbuf[12]; long testcase; if (count >= sizeof(lbuf)) return -EINVAL; if (copy_from_user(lbuf, buf, count)) return -EFAULT; lbuf[count] = '\0'; if (strict_strtol(lbuf, 10, &testcase)) return -EINVAL; test = kzalloc(sizeof(struct mmc_test_card), GFP_KERNEL); if (!test) return -ENOMEM; /* * Remove all test cases associated with given card. Thus we have only * actual data of the last run. */ mmc_test_free_result(card); test->card = card; test->buffer = kzalloc(BUFFER_SIZE, GFP_KERNEL); #ifdef CONFIG_HIGHMEM test->highmem = alloc_pages(GFP_KERNEL | __GFP_HIGHMEM, BUFFER_ORDER); #endif #ifdef CONFIG_HIGHMEM if (test->buffer && test->highmem) { #else if (test->buffer) { #endif mutex_lock(&mmc_test_lock); mmc_test_run(test, testcase); mutex_unlock(&mmc_test_lock); } #ifdef CONFIG_HIGHMEM __free_pages(test->highmem, BUFFER_ORDER); #endif kfree(test->buffer); kfree(test); return count; } static const struct file_operations mmc_test_fops_test = { .open = mtf_test_open, .read = seq_read, .write = mtf_test_write, .llseek = seq_lseek, .release = single_release, }; static int mtf_testlist_show(struct seq_file *sf, void *data) { int i; mutex_lock(&mmc_test_lock); for (i = 0; i < ARRAY_SIZE(mmc_test_cases); i++) seq_printf(sf, "%d:\t%s\n", i+1, mmc_test_cases[i].name); mutex_unlock(&mmc_test_lock); return 0; } static int mtf_testlist_open(struct inode *inode, struct file *file) { return single_open(file, mtf_testlist_show, inode->i_private); } static const struct file_operations mmc_test_fops_testlist = { .open = mtf_testlist_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static void mmc_test_free_dbgfs_file(struct mmc_card *card) { struct mmc_test_dbgfs_file *df, *dfs; mutex_lock(&mmc_test_lock); list_for_each_entry_safe(df, dfs, &mmc_test_file_test, link) { if (card && df->card != card) continue; debugfs_remove(df->file); list_del(&df->link); kfree(df); } mutex_unlock(&mmc_test_lock); } static int __mmc_test_register_dbgfs_file(struct mmc_card *card, const char *name, umode_t mode, const struct file_operations *fops) { struct dentry *file = NULL; struct mmc_test_dbgfs_file *df; if (card->debugfs_root) file = debugfs_create_file(name, mode, card->debugfs_root, card, fops); if (IS_ERR_OR_NULL(file)) { dev_err(&card->dev, "Can't create %s. Perhaps debugfs is disabled.\n", name); return -ENODEV; } df = kmalloc(sizeof(struct mmc_test_dbgfs_file), GFP_KERNEL); if (!df) { debugfs_remove(file); dev_err(&card->dev, "Can't allocate memory for internal usage.\n"); return -ENOMEM; } df->card = card; df->file = file; list_add(&df->link, &mmc_test_file_test); return 0; } static int mmc_test_register_dbgfs_file(struct mmc_card *card) { int ret; mutex_lock(&mmc_test_lock); ret = __mmc_test_register_dbgfs_file(card, "test", S_IWUSR | S_IRUGO, &mmc_test_fops_test); if (ret) goto err; ret = __mmc_test_register_dbgfs_file(card, "testlist", S_IRUGO, &mmc_test_fops_testlist); if (ret) goto err; err: mutex_unlock(&mmc_test_lock); return ret; } static int mmc_test_probe(struct mmc_card *card) { int ret; if (!mmc_card_mmc(card) && !mmc_card_sd(card)) return -ENODEV; ret = mmc_test_register_dbgfs_file(card); if (ret) return ret; dev_info(&card->dev, "Card claimed for testing.\n"); return 0; } static void mmc_test_remove(struct mmc_card *card) { mmc_test_free_result(card); mmc_test_free_dbgfs_file(card); } static struct mmc_driver mmc_driver = { .drv = { .name = "mmc_test", }, .probe = mmc_test_probe, .remove = mmc_test_remove, }; static int __init mmc_test_init(void) { return mmc_register_driver(&mmc_driver); } static void __exit mmc_test_exit(void) { /* Clear stalled data if card is still plugged */ mmc_test_free_result(NULL); mmc_test_free_dbgfs_file(NULL); mmc_unregister_driver(&mmc_driver); } module_init(mmc_test_init); module_exit(mmc_test_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Multimedia Card (MMC) host test driver"); MODULE_AUTHOR("Pierre Ossman");
gpl-2.0
anwarMov/android_kernel_asus_a400cg
drivers/staging/usbip/userspace/libsrc/names.c
2337
11404
/* * names.c -- USB name database manipulation routines * * Copyright (C) 1999, 2000 Thomas Sailer (sailer@ife.ee.ethz.ch) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * * * * * Copyright (C) 2005 Takahiro Hirofuchi * - names_deinit() is added. * */ #include <sys/types.h> #include <sys/stat.h> #include <fcntl.h> #include <dirent.h> #include <string.h> #include <errno.h> #include <stdlib.h> #include <unistd.h> #include <stdio.h> #include <ctype.h> #include "names.h" #include "usbip_common.h" struct vendor { struct vendor *next; u_int16_t vendorid; char name[1]; }; struct product { struct product *next; u_int16_t vendorid, productid; char name[1]; }; struct class { struct class *next; u_int8_t classid; char name[1]; }; struct subclass { struct subclass *next; u_int8_t classid, subclassid; char name[1]; }; struct protocol { struct protocol *next; u_int8_t classid, subclassid, protocolid; char name[1]; }; struct genericstrtable { struct genericstrtable *next; unsigned int num; char name[1]; }; #define HASH1 0x10 #define HASH2 0x02 #define HASHSZ 16 static unsigned int hashnum(unsigned int num) { unsigned int mask1 = HASH1 << 27, mask2 = HASH2 << 27; for (; mask1 >= HASH1; mask1 >>= 1, mask2 >>= 1) if (num & mask1) num ^= mask2; return num & (HASHSZ-1); } static struct vendor *vendors[HASHSZ] = { NULL, }; static struct product *products[HASHSZ] = { NULL, }; static struct class *classes[HASHSZ] = { NULL, }; static struct subclass *subclasses[HASHSZ] = { NULL, }; static struct protocol *protocols[HASHSZ] = { NULL, }; const char *names_vendor(u_int16_t vendorid) { struct vendor *v; v = vendors[hashnum(vendorid)]; for (; v; v = v->next) if (v->vendorid == vendorid) return v->name; return NULL; } const char *names_product(u_int16_t vendorid, u_int16_t productid) { struct product *p; p = products[hashnum((vendorid << 16) | productid)]; for (; p; p = p->next) if (p->vendorid == vendorid && p->productid == productid) return p->name; return NULL; } const char *names_class(u_int8_t classid) { struct class *c; c = classes[hashnum(classid)]; for (; c; c = c->next) if (c->classid == classid) return c->name; return NULL; } const char *names_subclass(u_int8_t classid, u_int8_t subclassid) { struct subclass *s; s = subclasses[hashnum((classid << 8) | subclassid)]; for (; s; s = s->next) if (s->classid == classid && s->subclassid == subclassid) return s->name; return NULL; } const char *names_protocol(u_int8_t classid, u_int8_t subclassid, u_int8_t protocolid) { struct protocol *p; p = protocols[hashnum((classid << 16) | (subclassid << 8) | protocolid)]; for (; p; p = p->next) if (p->classid == classid && p->subclassid == subclassid && p->protocolid == protocolid) return p->name; return NULL; } /* add a cleanup function by takahiro */ struct pool { struct pool *next; void *mem; }; static struct pool *pool_head; static void *my_malloc(size_t size) { struct pool *p; p = calloc(1, sizeof(struct pool)); if (!p) { free(p); return NULL; } p->mem = calloc(1, size); if (!p->mem) return NULL; p->next = pool_head; pool_head = p; return p->mem; } void names_free(void) { struct pool *pool; if (!pool_head) return; for (pool = pool_head; pool != NULL; ) { struct pool *tmp; if (pool->mem) free(pool->mem); tmp = pool; pool = pool->next; free(tmp); } } static int new_vendor(const char *name, u_int16_t vendorid) { struct vendor *v; unsigned int h = hashnum(vendorid); v = vendors[h]; for (; v; v = v->next) if (v->vendorid == vendorid) return -1; v = my_malloc(sizeof(struct vendor) + strlen(name)); if (!v) return -1; strcpy(v->name, name); v->vendorid = vendorid; v->next = vendors[h]; vendors[h] = v; return 0; } static int new_product(const char *name, u_int16_t vendorid, u_int16_t productid) { struct product *p; unsigned int h = hashnum((vendorid << 16) | productid); p = products[h]; for (; p; p = p->next) if (p->vendorid == vendorid && p->productid == productid) return -1; p = my_malloc(sizeof(struct product) + strlen(name)); if (!p) return -1; strcpy(p->name, name); p->vendorid = vendorid; p->productid = productid; p->next = products[h]; products[h] = p; return 0; } static int new_class(const char *name, u_int8_t classid) { struct class *c; unsigned int h = hashnum(classid); c = classes[h]; for (; c; c = c->next) if (c->classid == classid) return -1; c = my_malloc(sizeof(struct class) + strlen(name)); if (!c) return -1; strcpy(c->name, name); c->classid = classid; c->next = classes[h]; classes[h] = c; return 0; } static int new_subclass(const char *name, u_int8_t classid, u_int8_t subclassid) { struct subclass *s; unsigned int h = hashnum((classid << 8) | subclassid); s = subclasses[h]; for (; s; s = s->next) if (s->classid == classid && s->subclassid == subclassid) return -1; s = my_malloc(sizeof(struct subclass) + strlen(name)); if (!s) return -1; strcpy(s->name, name); s->classid = classid; s->subclassid = subclassid; s->next = subclasses[h]; subclasses[h] = s; return 0; } static int new_protocol(const char *name, u_int8_t classid, u_int8_t subclassid, u_int8_t protocolid) { struct protocol *p; unsigned int h = hashnum((classid << 16) | (subclassid << 8) | protocolid); p = protocols[h]; for (; p; p = p->next) if (p->classid == classid && p->subclassid == subclassid && p->protocolid == protocolid) return -1; p = my_malloc(sizeof(struct protocol) + strlen(name)); if (!p) return -1; strcpy(p->name, name); p->classid = classid; p->subclassid = subclassid; p->protocolid = protocolid; p->next = protocols[h]; protocols[h] = p; return 0; } static void parse(FILE *f) { char buf[512], *cp; unsigned int linectr = 0; int lastvendor = -1; int lastclass = -1; int lastsubclass = -1; int lasthut = -1; int lastlang = -1; unsigned int u; while (fgets(buf, sizeof(buf), f)) { linectr++; /* remove line ends */ cp = strchr(buf, '\r'); if (cp) *cp = 0; cp = strchr(buf, '\n'); if (cp) *cp = 0; if (buf[0] == '#' || !buf[0]) continue; cp = buf; if (buf[0] == 'P' && buf[1] == 'H' && buf[2] == 'Y' && buf[3] == 'S' && buf[4] == 'D' && buf[5] == 'E' && buf[6] == 'S' && /*isspace(buf[7])*/ buf[7] == ' ') { continue; } if (buf[0] == 'P' && buf[1] == 'H' && buf[2] == 'Y' && /*isspace(buf[3])*/ buf[3] == ' ') { continue; } if (buf[0] == 'B' && buf[1] == 'I' && buf[2] == 'A' && buf[3] == 'S' && /*isspace(buf[4])*/ buf[4] == ' ') { continue; } if (buf[0] == 'L' && /*isspace(buf[1])*/ buf[1] == ' ') { lasthut = lastclass = lastvendor = lastsubclass = -1; /* * set 1 as pseudo-id to indicate that the parser is * in a `L' section. */ lastlang = 1; continue; } if (buf[0] == 'C' && /*isspace(buf[1])*/ buf[1] == ' ') { /* class spec */ cp = buf+2; while (isspace(*cp)) cp++; if (!isxdigit(*cp)) { err("Invalid class spec at line %u", linectr); continue; } u = strtoul(cp, &cp, 16); while (isspace(*cp)) cp++; if (!*cp) { err("Invalid class spec at line %u", linectr); continue; } if (new_class(cp, u)) err("Duplicate class spec at line %u class %04x %s", linectr, u, cp); dbg("line %5u class %02x %s", linectr, u, cp); lasthut = lastlang = lastvendor = lastsubclass = -1; lastclass = u; continue; } if (buf[0] == 'A' && buf[1] == 'T' && isspace(buf[2])) { /* audio terminal type spec */ continue; } if (buf[0] == 'H' && buf[1] == 'C' && buf[2] == 'C' && isspace(buf[3])) { /* HID Descriptor bCountryCode */ continue; } if (isxdigit(*cp)) { /* vendor */ u = strtoul(cp, &cp, 16); while (isspace(*cp)) cp++; if (!*cp) { err("Invalid vendor spec at line %u", linectr); continue; } if (new_vendor(cp, u)) err("Duplicate vendor spec at line %u vendor %04x %s", linectr, u, cp); dbg("line %5u vendor %04x %s", linectr, u, cp); lastvendor = u; lasthut = lastlang = lastclass = lastsubclass = -1; continue; } if (buf[0] == '\t' && isxdigit(buf[1])) { /* product or subclass spec */ u = strtoul(buf+1, &cp, 16); while (isspace(*cp)) cp++; if (!*cp) { err("Invalid product/subclass spec at line %u", linectr); continue; } if (lastvendor != -1) { if (new_product(cp, lastvendor, u)) err("Duplicate product spec at line %u product %04x:%04x %s", linectr, lastvendor, u, cp); dbg("line %5u product %04x:%04x %s", linectr, lastvendor, u, cp); continue; } if (lastclass != -1) { if (new_subclass(cp, lastclass, u)) err("Duplicate subclass spec at line %u class %02x:%02x %s", linectr, lastclass, u, cp); dbg("line %5u subclass %02x:%02x %s", linectr, lastclass, u, cp); lastsubclass = u; continue; } if (lasthut != -1) { /* do not store hut */ continue; } if (lastlang != -1) { /* do not store langid */ continue; } err("Product/Subclass spec without prior Vendor/Class spec at line %u", linectr); continue; } if (buf[0] == '\t' && buf[1] == '\t' && isxdigit(buf[2])) { /* protocol spec */ u = strtoul(buf+2, &cp, 16); while (isspace(*cp)) cp++; if (!*cp) { err("Invalid protocol spec at line %u", linectr); continue; } if (lastclass != -1 && lastsubclass != -1) { if (new_protocol(cp, lastclass, lastsubclass, u)) err("Duplicate protocol spec at line %u class %02x:%02x:%02x %s", linectr, lastclass, lastsubclass, u, cp); dbg("line %5u protocol %02x:%02x:%02x %s", linectr, lastclass, lastsubclass, u, cp); continue; } err("Protocol spec without prior Class and Subclass spec at line %u", linectr); continue; } if (buf[0] == 'H' && buf[1] == 'I' && buf[2] == 'D' && /*isspace(buf[3])*/ buf[3] == ' ') { continue; } if (buf[0] == 'H' && buf[1] == 'U' && buf[2] == 'T' && /*isspace(buf[3])*/ buf[3] == ' ') { lastlang = lastclass = lastvendor = lastsubclass = -1; /* * set 1 as pseudo-id to indicate that the parser is * in a `HUT' section. */ lasthut = 1; continue; } if (buf[0] == 'R' && buf[1] == ' ') continue; if (buf[0] == 'V' && buf[1] == 'T') continue; err("Unknown line at line %u", linectr); } } int names_init(char *n) { FILE *f; f = fopen(n, "r"); if (!f) return errno; parse(f); fclose(f); return 0; }
gpl-2.0
dkhoi1997/android_kernel_samsung_aries
drivers/i2c/busses/i2c-puv3.c
3105
6680
/* * I2C driver for PKUnity-v3 SoC * Code specific to PKUnity SoC and UniCore ISA * * Maintained by GUAN Xue-tao <gxt@mprc.pku.edu.cn> * Copyright (C) 2001-2010 Guan Xuetao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/io.h> #include <mach/hardware.h> /* * Poll the i2c status register until the specified bit is set. * Returns 0 if timed out (100 msec). */ static short poll_status(unsigned long bit) { int loop_cntr = 1000; if (bit & I2C_STATUS_TFNF) { do { udelay(10); } while (!(readl(I2C_STATUS) & bit) && (--loop_cntr > 0)); } else { /* RXRDY handler */ do { if (readl(I2C_TAR) == I2C_TAR_EEPROM) msleep(20); else udelay(10); } while (!(readl(I2C_RXFLR) & 0xf) && (--loop_cntr > 0)); } return (loop_cntr > 0); } static int xfer_read(struct i2c_adapter *adap, unsigned char *buf, int length) { int i2c_reg = *buf; /* Read data */ while (length--) { if (!poll_status(I2C_STATUS_TFNF)) { dev_dbg(&adap->dev, "Tx FIFO Not Full timeout\n"); return -ETIMEDOUT; } /* send addr */ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD); /* get ready to next write */ i2c_reg++; /* send read CMD */ writel(I2C_DATACMD_READ, I2C_DATACMD); /* wait until the Rx FIFO have available */ if (!poll_status(I2C_STATUS_RFNE)) { dev_dbg(&adap->dev, "RXRDY timeout\n"); return -ETIMEDOUT; } /* read the data to buf */ *buf = (readl(I2C_DATACMD) & I2C_DATACMD_DAT_MASK); buf++; } return 0; } static int xfer_write(struct i2c_adapter *adap, unsigned char *buf, int length) { int i2c_reg = *buf; /* Do nothing but storing the reg_num to a static variable */ if (i2c_reg == -1) { printk(KERN_WARNING "Error i2c reg\n"); return -ETIMEDOUT; } if (length == 1) return 0; buf++; length--; while (length--) { /* send addr */ writel(i2c_reg | I2C_DATACMD_WRITE, I2C_DATACMD); /* send write CMD */ writel(*buf | I2C_DATACMD_WRITE, I2C_DATACMD); /* wait until the Rx FIFO have available */ msleep(20); /* read the data to buf */ i2c_reg++; buf++; } return 0; } /* * Generic i2c master transfer entrypoint. * */ static int puv3_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *pmsg, int num) { int i, ret; unsigned char swap; /* Disable i2c */ writel(I2C_ENABLE_DISABLE, I2C_ENABLE); /* Set the work mode and speed*/ writel(I2C_CON_MASTER | I2C_CON_SPEED_STD | I2C_CON_SLAVEDISABLE, I2C_CON); writel(pmsg->addr, I2C_TAR); /* Enable i2c */ writel(I2C_ENABLE_ENABLE, I2C_ENABLE); dev_dbg(&adap->dev, "puv3_i2c_xfer: processing %d messages:\n", num); for (i = 0; i < num; i++) { dev_dbg(&adap->dev, " #%d: %sing %d byte%s %s 0x%02x\n", i, pmsg->flags & I2C_M_RD ? "read" : "writ", pmsg->len, pmsg->len > 1 ? "s" : "", pmsg->flags & I2C_M_RD ? "from" : "to", pmsg->addr); if (pmsg->len && pmsg->buf) { /* sanity check */ if (pmsg->flags & I2C_M_RD) ret = xfer_read(adap, pmsg->buf, pmsg->len); else ret = xfer_write(adap, pmsg->buf, pmsg->len); if (ret) return ret; } dev_dbg(&adap->dev, "transfer complete\n"); pmsg++; /* next message */ } /* XXX: fixup be16_to_cpu in bq27x00_battery.c */ if (pmsg->addr == I2C_TAR_PWIC) { swap = pmsg->buf[0]; pmsg->buf[0] = pmsg->buf[1]; pmsg->buf[1] = swap; } return i; } /* * Return list of supported functionality. */ static u32 puv3_i2c_func(struct i2c_adapter *adapter) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static struct i2c_algorithm puv3_i2c_algorithm = { .master_xfer = puv3_i2c_xfer, .functionality = puv3_i2c_func, }; /* * Main initialization routine. */ static int __devinit puv3_i2c_probe(struct platform_device *pdev) { struct i2c_adapter *adapter; struct resource *mem; int rc; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) return -ENODEV; if (!request_mem_region(mem->start, resource_size(mem), "puv3_i2c")) return -EBUSY; adapter = kzalloc(sizeof(struct i2c_adapter), GFP_KERNEL); if (adapter == NULL) { dev_err(&pdev->dev, "can't allocate inteface!\n"); rc = -ENOMEM; goto fail_nomem; } snprintf(adapter->name, sizeof(adapter->name), "PUV3-I2C at 0x%08x", mem->start); adapter->algo = &puv3_i2c_algorithm; adapter->class = I2C_CLASS_HWMON; adapter->dev.parent = &pdev->dev; platform_set_drvdata(pdev, adapter); adapter->nr = pdev->id; rc = i2c_add_numbered_adapter(adapter); if (rc) { dev_err(&pdev->dev, "Adapter '%s' registration failed\n", adapter->name); goto fail_add_adapter; } dev_info(&pdev->dev, "PKUnity v3 i2c bus adapter.\n"); return 0; fail_add_adapter: platform_set_drvdata(pdev, NULL); kfree(adapter); fail_nomem: release_mem_region(mem->start, resource_size(mem)); return rc; } static int __devexit puv3_i2c_remove(struct platform_device *pdev) { struct i2c_adapter *adapter = platform_get_drvdata(pdev); struct resource *mem; int rc; rc = i2c_del_adapter(adapter); if (rc) { dev_err(&pdev->dev, "Adapter '%s' delete fail\n", adapter->name); return rc; } put_device(&pdev->dev); platform_set_drvdata(pdev, NULL); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); return rc; } #ifdef CONFIG_PM static int puv3_i2c_suspend(struct platform_device *dev, pm_message_t state) { int poll_count; /* Disable the IIC */ writel(I2C_ENABLE_DISABLE, I2C_ENABLE); for (poll_count = 0; poll_count < 50; poll_count++) { if (readl(I2C_ENSTATUS) & I2C_ENSTATUS_ENABLE) udelay(25); } return 0; } static int puv3_i2c_resume(struct platform_device *dev) { return 0 ; } #else #define puv3_i2c_suspend NULL #define puv3_i2c_resume NULL #endif MODULE_ALIAS("platform:puv3_i2c"); static struct platform_driver puv3_i2c_driver = { .probe = puv3_i2c_probe, .remove = __devexit_p(puv3_i2c_remove), .suspend = puv3_i2c_suspend, .resume = puv3_i2c_resume, .driver = { .name = "PKUnity-v3-I2C", .owner = THIS_MODULE, } }; static int __init puv3_i2c_init(void) { return platform_driver_register(&puv3_i2c_driver); } static void __exit puv3_i2c_exit(void) { platform_driver_unregister(&puv3_i2c_driver); } module_init(puv3_i2c_init); module_exit(puv3_i2c_exit); MODULE_DESCRIPTION("PKUnity v3 I2C driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
GenTarkin/SGH-T769-kernel
drivers/mfd/ti-ssp.c
3105
11581
/* * Sequencer Serial Port (SSP) driver for Texas Instruments' SoCs * * Copyright (C) 2010 Texas Instruments Inc * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/errno.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/clk.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/spinlock.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/mfd/core.h> #include <linux/mfd/ti_ssp.h> /* Register Offsets */ #define REG_REV 0x00 #define REG_IOSEL_1 0x04 #define REG_IOSEL_2 0x08 #define REG_PREDIV 0x0c #define REG_INTR_ST 0x10 #define REG_INTR_EN 0x14 #define REG_TEST_CTRL 0x18 /* Per port registers */ #define PORT_CFG_2 0x00 #define PORT_ADDR 0x04 #define PORT_DATA 0x08 #define PORT_CFG_1 0x0c #define PORT_STATE 0x10 #define SSP_PORT_CONFIG_MASK (SSP_EARLY_DIN | SSP_DELAY_DOUT) #define SSP_PORT_CLKRATE_MASK 0x0f #define SSP_SEQRAM_WR_EN BIT(4) #define SSP_SEQRAM_RD_EN BIT(5) #define SSP_START BIT(15) #define SSP_BUSY BIT(10) #define SSP_PORT_ASL BIT(7) #define SSP_PORT_CFO1 BIT(6) #define SSP_PORT_SEQRAM_SIZE 32 static const int ssp_port_base[] = {0x040, 0x080}; static const int ssp_port_seqram[] = {0x100, 0x180}; struct ti_ssp { struct resource *res; struct device *dev; void __iomem *regs; spinlock_t lock; struct clk *clk; int irq; wait_queue_head_t wqh; /* * Some of the iosel2 register bits always read-back as 0, we need to * remember these values so that we don't clobber previously set * values. */ u32 iosel2; }; static inline struct ti_ssp *dev_to_ssp(struct device *dev) { return dev_get_drvdata(dev->parent); } static inline int dev_to_port(struct device *dev) { return to_platform_device(dev)->id; } /* Register Access Helpers, rmw() functions need to run locked */ static inline u32 ssp_read(struct ti_ssp *ssp, int reg) { return __raw_readl(ssp->regs + reg); } static inline void ssp_write(struct ti_ssp *ssp, int reg, u32 val) { __raw_writel(val, ssp->regs + reg); } static inline void ssp_rmw(struct ti_ssp *ssp, int reg, u32 mask, u32 bits) { ssp_write(ssp, reg, (ssp_read(ssp, reg) & ~mask) | bits); } static inline u32 ssp_port_read(struct ti_ssp *ssp, int port, int reg) { return ssp_read(ssp, ssp_port_base[port] + reg); } static inline void ssp_port_write(struct ti_ssp *ssp, int port, int reg, u32 val) { ssp_write(ssp, ssp_port_base[port] + reg, val); } static inline void ssp_port_rmw(struct ti_ssp *ssp, int port, int reg, u32 mask, u32 bits) { ssp_rmw(ssp, ssp_port_base[port] + reg, mask, bits); } static inline void ssp_port_clr_bits(struct ti_ssp *ssp, int port, int reg, u32 bits) { ssp_port_rmw(ssp, port, reg, bits, 0); } static inline void ssp_port_set_bits(struct ti_ssp *ssp, int port, int reg, u32 bits) { ssp_port_rmw(ssp, port, reg, 0, bits); } /* Called to setup port clock mode, caller must hold ssp->lock */ static int __set_mode(struct ti_ssp *ssp, int port, int mode) { mode &= SSP_PORT_CONFIG_MASK; ssp_port_rmw(ssp, port, PORT_CFG_1, SSP_PORT_CONFIG_MASK, mode); return 0; } int ti_ssp_set_mode(struct device *dev, int mode) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev); int ret; spin_lock(&ssp->lock); ret = __set_mode(ssp, port, mode); spin_unlock(&ssp->lock); return ret; } EXPORT_SYMBOL(ti_ssp_set_mode); /* Called to setup iosel2, caller must hold ssp->lock */ static void __set_iosel2(struct ti_ssp *ssp, u32 mask, u32 val) { ssp->iosel2 = (ssp->iosel2 & ~mask) | val; ssp_write(ssp, REG_IOSEL_2, ssp->iosel2); } /* Called to setup port iosel, caller must hold ssp->lock */ static void __set_iosel(struct ti_ssp *ssp, int port, u32 iosel) { unsigned val, shift = port ? 16 : 0; /* IOSEL1 gets the least significant 16 bits */ val = ssp_read(ssp, REG_IOSEL_1); val &= 0xffff << (port ? 0 : 16); val |= (iosel & 0xffff) << (port ? 16 : 0); ssp_write(ssp, REG_IOSEL_1, val); /* IOSEL2 gets the most significant 16 bits */ val = (iosel >> 16) & 0x7; __set_iosel2(ssp, 0x7 << shift, val << shift); } int ti_ssp_set_iosel(struct device *dev, u32 iosel) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev); spin_lock(&ssp->lock); __set_iosel(ssp, port, iosel); spin_unlock(&ssp->lock); return 0; } EXPORT_SYMBOL(ti_ssp_set_iosel); int ti_ssp_load(struct device *dev, int offs, u32* prog, int len) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev); int i; if (len > SSP_PORT_SEQRAM_SIZE) return -ENOSPC; spin_lock(&ssp->lock); /* Enable SeqRAM access */ ssp_port_set_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN); /* Copy code */ for (i = 0; i < len; i++) { __raw_writel(prog[i], ssp->regs + offs + 4*i + ssp_port_seqram[port]); } /* Disable SeqRAM access */ ssp_port_clr_bits(ssp, port, PORT_CFG_2, SSP_SEQRAM_WR_EN); spin_unlock(&ssp->lock); return 0; } EXPORT_SYMBOL(ti_ssp_load); int ti_ssp_raw_read(struct device *dev) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev); int shift = port ? 27 : 11; return (ssp_read(ssp, REG_IOSEL_2) >> shift) & 0xf; } EXPORT_SYMBOL(ti_ssp_raw_read); int ti_ssp_raw_write(struct device *dev, u32 val) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev), shift; spin_lock(&ssp->lock); shift = port ? 22 : 6; val &= 0xf; __set_iosel2(ssp, 0xf << shift, val << shift); spin_unlock(&ssp->lock); return 0; } EXPORT_SYMBOL(ti_ssp_raw_write); static inline int __xfer_done(struct ti_ssp *ssp, int port) { return !(ssp_port_read(ssp, port, PORT_CFG_1) & SSP_BUSY); } int ti_ssp_run(struct device *dev, u32 pc, u32 input, u32 *output) { struct ti_ssp *ssp = dev_to_ssp(dev); int port = dev_to_port(dev); int ret; if (pc & ~(0x3f)) return -EINVAL; /* Grab ssp->lock to serialize rmw on ssp registers */ spin_lock(&ssp->lock); ssp_port_write(ssp, port, PORT_ADDR, input >> 16); ssp_port_write(ssp, port, PORT_DATA, input & 0xffff); ssp_port_rmw(ssp, port, PORT_CFG_1, 0x3f, pc); /* grab wait queue head lock to avoid race with the isr */ spin_lock_irq(&ssp->wqh.lock); /* kick off sequence execution in hardware */ ssp_port_set_bits(ssp, port, PORT_CFG_1, SSP_START); /* drop ssp lock; no register writes beyond this */ spin_unlock(&ssp->lock); ret = wait_event_interruptible_locked_irq(ssp->wqh, __xfer_done(ssp, port)); spin_unlock_irq(&ssp->wqh.lock); if (ret < 0) return ret; if (output) { *output = (ssp_port_read(ssp, port, PORT_ADDR) << 16) | (ssp_port_read(ssp, port, PORT_DATA) & 0xffff); } ret = ssp_port_read(ssp, port, PORT_STATE) & 0x3f; /* stop address */ return ret; } EXPORT_SYMBOL(ti_ssp_run); static irqreturn_t ti_ssp_interrupt(int irq, void *dev_data) { struct ti_ssp *ssp = dev_data; spin_lock(&ssp->wqh.lock); ssp_write(ssp, REG_INTR_ST, 0x3); wake_up_locked(&ssp->wqh); spin_unlock(&ssp->wqh.lock); return IRQ_HANDLED; } static int __devinit ti_ssp_probe(struct platform_device *pdev) { static struct ti_ssp *ssp; const struct ti_ssp_data *pdata = pdev->dev.platform_data; int error = 0, prediv = 0xff, id; unsigned long sysclk; struct device *dev = &pdev->dev; struct mfd_cell cells[2]; ssp = kzalloc(sizeof(*ssp), GFP_KERNEL); if (!ssp) { dev_err(dev, "cannot allocate device info\n"); return -ENOMEM; } ssp->dev = dev; dev_set_drvdata(dev, ssp); ssp->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!ssp->res) { error = -ENODEV; dev_err(dev, "cannot determine register area\n"); goto error_res; } if (!request_mem_region(ssp->res->start, resource_size(ssp->res), pdev->name)) { error = -ENOMEM; dev_err(dev, "cannot claim register memory\n"); goto error_res; } ssp->regs = ioremap(ssp->res->start, resource_size(ssp->res)); if (!ssp->regs) { error = -ENOMEM; dev_err(dev, "cannot map register memory\n"); goto error_map; } ssp->clk = clk_get(dev, NULL); if (IS_ERR(ssp->clk)) { error = PTR_ERR(ssp->clk); dev_err(dev, "cannot claim device clock\n"); goto error_clk; } ssp->irq = platform_get_irq(pdev, 0); if (ssp->irq < 0) { error = -ENODEV; dev_err(dev, "unknown irq\n"); goto error_irq; } error = request_threaded_irq(ssp->irq, NULL, ti_ssp_interrupt, 0, dev_name(dev), ssp); if (error < 0) { dev_err(dev, "cannot acquire irq\n"); goto error_irq; } spin_lock_init(&ssp->lock); init_waitqueue_head(&ssp->wqh); /* Power on and initialize SSP */ error = clk_enable(ssp->clk); if (error) { dev_err(dev, "cannot enable device clock\n"); goto error_enable; } /* Reset registers to a sensible known state */ ssp_write(ssp, REG_IOSEL_1, 0); ssp_write(ssp, REG_IOSEL_2, 0); ssp_write(ssp, REG_INTR_EN, 0x3); ssp_write(ssp, REG_INTR_ST, 0x3); ssp_write(ssp, REG_TEST_CTRL, 0); ssp_port_write(ssp, 0, PORT_CFG_1, SSP_PORT_ASL); ssp_port_write(ssp, 1, PORT_CFG_1, SSP_PORT_ASL); ssp_port_write(ssp, 0, PORT_CFG_2, SSP_PORT_CFO1); ssp_port_write(ssp, 1, PORT_CFG_2, SSP_PORT_CFO1); sysclk = clk_get_rate(ssp->clk); if (pdata && pdata->out_clock) prediv = (sysclk / pdata->out_clock) - 1; prediv = clamp(prediv, 0, 0xff); ssp_rmw(ssp, REG_PREDIV, 0xff, prediv); memset(cells, 0, sizeof(cells)); for (id = 0; id < 2; id++) { const struct ti_ssp_dev_data *data = &pdata->dev_data[id]; cells[id].id = id; cells[id].name = data->dev_name; cells[id].platform_data = data->pdata; cells[id].data_size = data->pdata_size; } error = mfd_add_devices(dev, 0, cells, 2, NULL, 0); if (error < 0) { dev_err(dev, "cannot add mfd cells\n"); goto error_enable; } return 0; error_enable: free_irq(ssp->irq, ssp); error_irq: clk_put(ssp->clk); error_clk: iounmap(ssp->regs); error_map: release_mem_region(ssp->res->start, resource_size(ssp->res)); error_res: kfree(ssp); return error; } static int __devexit ti_ssp_remove(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct ti_ssp *ssp = dev_get_drvdata(dev); mfd_remove_devices(dev); clk_disable(ssp->clk); free_irq(ssp->irq, ssp); clk_put(ssp->clk); iounmap(ssp->regs); release_mem_region(ssp->res->start, resource_size(ssp->res)); kfree(ssp); dev_set_drvdata(dev, NULL); return 0; } static struct platform_driver ti_ssp_driver = { .probe = ti_ssp_probe, .remove = __devexit_p(ti_ssp_remove), .driver = { .name = "ti-ssp", .owner = THIS_MODULE, } }; static int __init ti_ssp_init(void) { return platform_driver_register(&ti_ssp_driver); } module_init(ti_ssp_init); static void __exit ti_ssp_exit(void) { platform_driver_unregister(&ti_ssp_driver); } module_exit(ti_ssp_exit); MODULE_DESCRIPTION("Sequencer Serial Port (SSP) Driver"); MODULE_AUTHOR("Cyril Chemparathy"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:ti-ssp");
gpl-2.0
showp1984/bricked-bacon
arch/arm/mach-msm/pcie_irq.c
3361
4699
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * MSM PCIe controller IRQ driver. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/bitops.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/msi.h> #include <linux/pci.h> #include <mach/irqs.h> #include "pcie.h" /* Any address will do here, as it won't be dereferenced */ #define MSM_PCIE_MSI_PHY 0xa0000000 #define PCIE20_MSI_CTRL_ADDR (0x820) #define PCIE20_MSI_CTRL_UPPER_ADDR (0x824) #define PCIE20_MSI_CTRL_INTR_EN (0x828) #define PCIE20_MSI_CTRL_INTR_MASK (0x82C) #define PCIE20_MSI_CTRL_INTR_STATUS (0x830) #define PCIE20_MSI_CTRL_MAX 8 static DECLARE_BITMAP(msi_irq_in_use, NR_PCIE_MSI_IRQS); static irqreturn_t handle_wake_irq(int irq, void *data) { PCIE_DBG("\n"); return IRQ_HANDLED; } static irqreturn_t handle_msi_irq(int irq, void *data) { int i, j; unsigned long val; struct msm_pcie_dev_t *dev = data; void __iomem *ctrl_status; /* check for set bits, clear it by setting that bit and trigger corresponding irq */ for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) { ctrl_status = dev->pcie20 + PCIE20_MSI_CTRL_INTR_STATUS + (i * 12); val = readl_relaxed(ctrl_status); while (val) { j = find_first_bit(&val, 32); writel_relaxed(BIT(j), ctrl_status); /* ensure that interrupt is cleared (acked) */ wmb(); generic_handle_irq(MSM_PCIE_MSI_INT(j + (32 * i))); val = readl_relaxed(ctrl_status); } } return IRQ_HANDLED; } uint32_t __init msm_pcie_irq_init(struct msm_pcie_dev_t *dev) { int i, rc; PCIE_DBG("\n"); /* program MSI controller and enable all interrupts */ writel_relaxed(MSM_PCIE_MSI_PHY, dev->pcie20 + PCIE20_MSI_CTRL_ADDR); writel_relaxed(0, dev->pcie20 + PCIE20_MSI_CTRL_UPPER_ADDR); for (i = 0; i < PCIE20_MSI_CTRL_MAX; i++) writel_relaxed(~0, dev->pcie20 + PCIE20_MSI_CTRL_INTR_EN + (i * 12)); /* ensure that hardware is configured before proceeding */ wmb(); /* register handler for physical MSI interrupt line */ rc = request_irq(PCIE20_INT_MSI, handle_msi_irq, IRQF_TRIGGER_RISING, "msm_pcie_msi", dev); if (rc) { pr_err("Unable to allocate msi interrupt\n"); goto out; } /* register handler for PCIE_WAKE_N interrupt line */ rc = request_irq(dev->wake_n, handle_wake_irq, IRQF_TRIGGER_FALLING, "msm_pcie_wake", dev); if (rc) { pr_err("Unable to allocate wake interrupt\n"); free_irq(PCIE20_INT_MSI, dev); goto out; } enable_irq_wake(dev->wake_n); /* PCIE_WAKE_N should be enabled only during system suspend */ disable_irq(dev->wake_n); out: return rc; } void __exit msm_pcie_irq_deinit(struct msm_pcie_dev_t *dev) { free_irq(PCIE20_INT_MSI, dev); free_irq(dev->wake_n, dev); } void msm_pcie_destroy_irq(unsigned int irq) { int pos = irq - MSM_PCIE_MSI_INT(0); dynamic_irq_cleanup(irq); clear_bit(pos, msi_irq_in_use); } /* hookup to linux pci msi framework */ void arch_teardown_msi_irq(unsigned int irq) { PCIE_DBG("irq %d deallocated\n", irq); msm_pcie_destroy_irq(irq); } static void msm_pcie_msi_nop(struct irq_data *d) { return; } static struct irq_chip pcie_msi_chip = { .name = "msm-pcie-msi", .irq_ack = msm_pcie_msi_nop, .irq_enable = unmask_msi_irq, .irq_disable = mask_msi_irq, .irq_mask = mask_msi_irq, .irq_unmask = unmask_msi_irq, }; static int msm_pcie_create_irq(void) { int irq, pos; again: pos = find_first_zero_bit(msi_irq_in_use, NR_PCIE_MSI_IRQS); irq = MSM_PCIE_MSI_INT(pos); if (irq >= (MSM_PCIE_MSI_INT(0) + NR_PCIE_MSI_IRQS)) return -ENOSPC; if (test_and_set_bit(pos, msi_irq_in_use)) goto again; dynamic_irq_init(irq); return irq; } /* hookup to linux pci msi framework */ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) { int irq; struct msi_msg msg; irq = msm_pcie_create_irq(); if (irq < 0) return irq; PCIE_DBG("irq %d allocated\n", irq); irq_set_msi_desc(irq, desc); /* write msi vector and data */ msg.address_hi = 0; msg.address_lo = MSM_PCIE_MSI_PHY; msg.data = irq - MSM_PCIE_MSI_INT(0); write_msi_msg(irq, &msg); irq_set_chip_and_handler(irq, &pcie_msi_chip, handle_simple_irq); set_irq_flags(irq, IRQF_VALID); return 0; }
gpl-2.0
ion-storm/Unleashed-N5
drivers/video/msm/tvenc.c
3617
11108
/* Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <mach/hardware.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <asm/system.h> #include <asm/mach-types.h> #include <linux/semaphore.h> #include <linux/uaccess.h> #include <linux/clk.h> #include <linux/platform_device.h> #define TVENC_C #include "tvenc.h" #include "msm_fb.h" #include "mdp4.h" /* AXI rate in KHz */ #define MSM_SYSTEM_BUS_RATE 128000000 static int tvenc_probe(struct platform_device *pdev); static int tvenc_remove(struct platform_device *pdev); static int tvenc_off(struct platform_device *pdev); static int tvenc_on(struct platform_device *pdev); static struct platform_device *pdev_list[MSM_FB_MAX_DEV_LIST]; static int pdev_list_cnt; static struct clk *tvenc_clk; static struct clk *tvdac_clk; static struct clk *tvenc_pclk; static struct clk *mdp_tv_clk; #ifdef CONFIG_FB_MSM_MDP40 static struct clk *tv_src_clk; #endif #ifdef CONFIG_MSM_BUS_SCALING static uint32_t tvenc_bus_scale_handle; #endif static int tvenc_runtime_suspend(struct device *dev) { dev_dbg(dev, "pm_runtime: suspending...\n"); return 0; } static int tvenc_runtime_resume(struct device *dev) { dev_dbg(dev, "pm_runtime: resuming...\n"); return 0; } static struct dev_pm_ops tvenc_dev_pm_ops = { .runtime_suspend = tvenc_runtime_suspend, .runtime_resume = tvenc_runtime_resume, }; static struct platform_driver tvenc_driver = { .probe = tvenc_probe, .remove = tvenc_remove, .suspend = NULL, .resume = NULL, .shutdown = NULL, .driver = { .name = "tvenc", .pm = &tvenc_dev_pm_ops }, }; int tvenc_set_encoder_clock(boolean clock_on) { int ret = 0; if (clock_on) { #ifdef CONFIG_FB_MSM_MDP40 /* Consolidated clock used by both HDMI & TV encoder. Clock exists only in MDP4 and not in older versions */ ret = clk_set_rate(tv_src_clk, 27000000); if (ret) { pr_err("%s: tvsrc_clk set rate failed! %d\n", __func__, ret); goto tvsrc_err; } #endif ret = clk_prepare_enable(tvenc_clk); if (ret) { pr_err("%s: tvenc_clk enable failed! %d\n", __func__, ret); goto tvsrc_err; } if (!IS_ERR(tvenc_pclk)) { ret = clk_prepare_enable(tvenc_pclk); if (ret) { pr_err("%s: tvenc_pclk enable failed! %d\n", __func__, ret); goto tvencp_err; } } return ret; } else { if (!IS_ERR(tvenc_pclk)) clk_disable_unprepare(tvenc_pclk); clk_disable_unprepare(tvenc_clk); return ret; } tvencp_err: clk_disable_unprepare(tvenc_clk); tvsrc_err: return ret; } int tvenc_set_clock(boolean clock_on) { int ret = 0; if (clock_on) { if (tvenc_pdata->poll) { ret = tvenc_set_encoder_clock(CLOCK_ON); if (ret) { pr_err("%s: TVenc clock(s) enable failed! %d\n", __func__, ret); goto tvenc_err; } } ret = clk_prepare_enable(tvdac_clk); if (ret) { pr_err("%s: tvdac_clk enable failed! %d\n", __func__, ret); goto tvdac_err; } if (!IS_ERR(mdp_tv_clk)) { ret = clk_prepare_enable(mdp_tv_clk); if (ret) { pr_err("%s: mdp_tv_clk enable failed! %d\n", __func__, ret); goto mdptv_err; } } return ret; } else { if (!IS_ERR(mdp_tv_clk)) clk_disable_unprepare(mdp_tv_clk); clk_disable_unprepare(tvdac_clk); if (tvenc_pdata->poll) tvenc_set_encoder_clock(CLOCK_OFF); return ret; } mdptv_err: clk_disable_unprepare(tvdac_clk); tvdac_err: tvenc_set_encoder_clock(CLOCK_OFF); tvenc_err: return ret; } static int tvenc_off(struct platform_device *pdev) { int ret = 0; struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); ret = panel_next_off(pdev); if (ret) pr_err("%s: tvout_off failed! %d\n", __func__, ret); tvenc_set_clock(CLOCK_OFF); if (tvenc_pdata && tvenc_pdata->pm_vid_en) ret = tvenc_pdata->pm_vid_en(0); #ifdef CONFIG_MSM_BUS_SCALING if (tvenc_bus_scale_handle > 0) msm_bus_scale_client_update_request(tvenc_bus_scale_handle, 0); #else if (mfd->ebi1_clk) clk_disable_unprepare(mfd->ebi1_clk); #endif if (ret) pr_err("%s: pm_vid_en(off) failed! %d\n", __func__, ret); mdp4_extn_disp = 0; return ret; } static int tvenc_on(struct platform_device *pdev) { int ret = 0; #ifndef CONFIG_MSM_BUS_SCALING struct msm_fb_data_type *mfd = platform_get_drvdata(pdev); #endif #ifdef CONFIG_MSM_BUS_SCALING if (tvenc_bus_scale_handle > 0) msm_bus_scale_client_update_request(tvenc_bus_scale_handle, 1); #else if (mfd->ebi1_clk) clk_prepare_enable(mfd->ebi1_clk); #endif mdp4_extn_disp = 1; if (tvenc_pdata && tvenc_pdata->pm_vid_en) ret = tvenc_pdata->pm_vid_en(1); if (ret) { pr_err("%s: pm_vid_en(on) failed! %d\n", __func__, ret); return ret; } ret = tvenc_set_clock(CLOCK_ON); if (ret) { pr_err("%s: tvenc_set_clock(CLOCK_ON) failed! %d\n", __func__, ret); tvenc_pdata->pm_vid_en(0); goto error; } ret = panel_next_on(pdev); if (ret) { pr_err("%s: tvout_on failed! %d\n", __func__, ret); tvenc_set_clock(CLOCK_OFF); tvenc_pdata->pm_vid_en(0); } error: return ret; } void tvenc_gen_test_pattern(struct msm_fb_data_type *mfd) { uint32 reg = 0, i; reg = readl(MSM_TV_ENC_CTL); reg |= TVENC_CTL_TEST_PATT_EN; for (i = 0; i < 3; i++) { TV_OUT(TV_ENC_CTL, 0); /* disable TV encoder */ switch (i) { /* * TV Encoder - Color Bar Test Pattern */ case 0: reg |= TVENC_CTL_TPG_CLRBAR; break; /* * TV Encoder - Red Frame Test Pattern */ case 1: reg |= TVENC_CTL_TPG_REDCLR; break; /* * TV Encoder - Modulated Ramp Test Pattern */ default: reg |= TVENC_CTL_TPG_MODRAMP; break; } TV_OUT(TV_ENC_CTL, reg); mdelay(5000); switch (i) { /* * TV Encoder - Color Bar Test Pattern */ case 0: reg &= ~TVENC_CTL_TPG_CLRBAR; break; /* * TV Encoder - Red Frame Test Pattern */ case 1: reg &= ~TVENC_CTL_TPG_REDCLR; break; /* * TV Encoder - Modulated Ramp Test Pattern */ default: reg &= ~TVENC_CTL_TPG_MODRAMP; break; } } } static int tvenc_resource_initialized; static int tvenc_probe(struct platform_device *pdev) { struct msm_fb_data_type *mfd; struct platform_device *mdp_dev = NULL; struct msm_fb_panel_data *pdata = NULL; int rc, ret; struct clk *ebi1_clk = NULL; if (pdev->id == 0) { tvenc_base = ioremap(pdev->resource[0].start, pdev->resource[0].end - pdev->resource[0].start + 1); if (!tvenc_base) { pr_err("tvenc_base ioremap failed!\n"); return -ENOMEM; } tvenc_clk = clk_get(&pdev->dev, "enc_clk"); tvdac_clk = clk_get(&pdev->dev, "dac_clk"); tvenc_pclk = clk_get(&pdev->dev, "iface_clk"); mdp_tv_clk = clk_get(&pdev->dev, "mdp_clk"); #ifndef CONFIG_MSM_BUS_SCALING ebi1_clk = clk_get(&pdev->dev, "mem_clk"); if (IS_ERR(ebi1_clk)) { rc = PTR_ERR(ebi1_clk); goto tvenc_probe_err; } clk_set_rate(ebi1_clk, MSM_SYSTEM_BUS_RATE); #endif #ifdef CONFIG_FB_MSM_MDP40 tv_src_clk = clk_get(&pdev->dev, "src_clk"); if (IS_ERR(tv_src_clk)) tv_src_clk = tvenc_clk; /* Fallback to slave */ #endif if (IS_ERR(tvenc_clk)) { pr_err("%s: error: can't get tvenc_clk!\n", __func__); return PTR_ERR(tvenc_clk); } if (IS_ERR(tvdac_clk)) { pr_err("%s: error: can't get tvdac_clk!\n", __func__); return PTR_ERR(tvdac_clk); } if (IS_ERR(tvenc_pclk)) { ret = PTR_ERR(tvenc_pclk); if (-ENOENT == ret) pr_info("%s: tvenc_pclk does not exist!\n", __func__); else { pr_err("%s: error: can't get tvenc_pclk!\n", __func__); return ret; } } if (IS_ERR(mdp_tv_clk)) { ret = PTR_ERR(mdp_tv_clk); if (-ENOENT == ret) pr_info("%s: mdp_tv_clk does not exist!\n", __func__); else { pr_err("%s: error: can't get mdp_tv_clk!\n", __func__); return ret; } } tvenc_pdata = pdev->dev.platform_data; tvenc_resource_initialized = 1; return 0; } if (!tvenc_resource_initialized) return -EPERM; mfd = platform_get_drvdata(pdev); mfd->ebi1_clk = ebi1_clk; if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; if (pdev_list_cnt >= MSM_FB_MAX_DEV_LIST) return -ENOMEM; if (tvenc_base == NULL) return -ENOMEM; mdp_dev = platform_device_alloc("mdp", pdev->id); if (!mdp_dev) return -ENOMEM; /* * link to the latest pdev */ mfd->pdev = mdp_dev; mfd->dest = DISPLAY_TV; /* * alloc panel device data */ if (platform_device_add_data (mdp_dev, pdev->dev.platform_data, sizeof(struct msm_fb_panel_data))) { pr_err("tvenc_probe: platform_device_add_data failed!\n"); platform_device_put(mdp_dev); return -ENOMEM; } /* * data chain */ pdata = mdp_dev->dev.platform_data; pdata->on = tvenc_on; pdata->off = tvenc_off; pdata->next = pdev; /* * get/set panel specific fb info */ mfd->panel_info = pdata->panel_info; #ifdef CONFIG_FB_MSM_MDP40 mfd->fb_imgType = MDP_RGB_565; /* base layer */ #else mfd->fb_imgType = MDP_YCRYCB_H2V1; #endif #ifdef CONFIG_MSM_BUS_SCALING if (!tvenc_bus_scale_handle && tvenc_pdata && tvenc_pdata->bus_scale_table) { tvenc_bus_scale_handle = msm_bus_scale_register_client( tvenc_pdata->bus_scale_table); if (!tvenc_bus_scale_handle) { printk(KERN_ERR "%s not able to get bus scale\n", __func__); } } #endif /* * set driver data */ platform_set_drvdata(mdp_dev, mfd); /* * register in mdp driver */ rc = platform_device_add(mdp_dev); if (rc) goto tvenc_probe_err; pm_runtime_set_active(&pdev->dev); pm_runtime_enable(&pdev->dev); pdev_list[pdev_list_cnt++] = pdev; return 0; tvenc_probe_err: #ifdef CONFIG_MSM_BUS_SCALING if (tvenc_pdata && tvenc_pdata->bus_scale_table && tvenc_bus_scale_handle > 0) { msm_bus_scale_unregister_client(tvenc_bus_scale_handle); tvenc_bus_scale_handle = 0; } #endif platform_device_put(mdp_dev); return rc; } static int tvenc_remove(struct platform_device *pdev) { struct msm_fb_data_type *mfd; mfd = platform_get_drvdata(pdev); #ifdef CONFIG_MSM_BUS_SCALING if (tvenc_pdata && tvenc_pdata->bus_scale_table && tvenc_bus_scale_handle > 0) { msm_bus_scale_unregister_client(tvenc_bus_scale_handle); tvenc_bus_scale_handle = 0; } #else clk_put(mfd->ebi1_clk); #endif pm_runtime_disable(&pdev->dev); return 0; } static int tvenc_register_driver(void) { return platform_driver_register(&tvenc_driver); } static int __init tvenc_driver_init(void) { return tvenc_register_driver(); } module_init(tvenc_driver_init);
gpl-2.0
StelixROM/android_kernel_lge_mako
drivers/infiniband/hw/qib/qib_verbs.c
4641
61430
/* * Copyright (c) 2006, 2007, 2008, 2009, 2010 QLogic Corporation. * All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_mad.h> #include <rdma/ib_user_verbs.h> #include <linux/io.h> #include <linux/module.h> #include <linux/utsname.h> #include <linux/rculist.h> #include <linux/mm.h> #include <linux/random.h> #include "qib.h" #include "qib_common.h" static unsigned int ib_qib_qp_table_size = 256; module_param_named(qp_table_size, ib_qib_qp_table_size, uint, S_IRUGO); MODULE_PARM_DESC(qp_table_size, "QP table size"); unsigned int ib_qib_lkey_table_size = 16; module_param_named(lkey_table_size, ib_qib_lkey_table_size, uint, S_IRUGO); MODULE_PARM_DESC(lkey_table_size, "LKEY table size in bits (2^n, 1 <= n <= 23)"); static unsigned int ib_qib_max_pds = 0xFFFF; module_param_named(max_pds, ib_qib_max_pds, uint, S_IRUGO); MODULE_PARM_DESC(max_pds, "Maximum number of protection domains to support"); static unsigned int ib_qib_max_ahs = 0xFFFF; module_param_named(max_ahs, ib_qib_max_ahs, uint, S_IRUGO); MODULE_PARM_DESC(max_ahs, "Maximum number of address handles to support"); unsigned int ib_qib_max_cqes = 0x2FFFF; module_param_named(max_cqes, ib_qib_max_cqes, uint, S_IRUGO); MODULE_PARM_DESC(max_cqes, "Maximum number of completion queue entries to support"); unsigned int ib_qib_max_cqs = 0x1FFFF; module_param_named(max_cqs, ib_qib_max_cqs, uint, S_IRUGO); MODULE_PARM_DESC(max_cqs, "Maximum number of completion queues to support"); unsigned int ib_qib_max_qp_wrs = 0x3FFF; module_param_named(max_qp_wrs, ib_qib_max_qp_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_qp_wrs, "Maximum number of QP WRs to support"); unsigned int ib_qib_max_qps = 16384; module_param_named(max_qps, ib_qib_max_qps, uint, S_IRUGO); MODULE_PARM_DESC(max_qps, "Maximum number of QPs to support"); unsigned int ib_qib_max_sges = 0x60; module_param_named(max_sges, ib_qib_max_sges, uint, S_IRUGO); MODULE_PARM_DESC(max_sges, "Maximum number of SGEs to support"); unsigned int ib_qib_max_mcast_grps = 16384; module_param_named(max_mcast_grps, ib_qib_max_mcast_grps, uint, S_IRUGO); MODULE_PARM_DESC(max_mcast_grps, "Maximum number of multicast groups to support"); unsigned int ib_qib_max_mcast_qp_attached = 16; module_param_named(max_mcast_qp_attached, ib_qib_max_mcast_qp_attached, uint, S_IRUGO); MODULE_PARM_DESC(max_mcast_qp_attached, "Maximum number of attached QPs to support"); unsigned int ib_qib_max_srqs = 1024; module_param_named(max_srqs, ib_qib_max_srqs, uint, S_IRUGO); MODULE_PARM_DESC(max_srqs, "Maximum number of SRQs to support"); unsigned int ib_qib_max_srq_sges = 128; module_param_named(max_srq_sges, ib_qib_max_srq_sges, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_sges, "Maximum number of SRQ SGEs to support"); unsigned int ib_qib_max_srq_wrs = 0x1FFFF; module_param_named(max_srq_wrs, ib_qib_max_srq_wrs, uint, S_IRUGO); MODULE_PARM_DESC(max_srq_wrs, "Maximum number of SRQ WRs support"); static unsigned int ib_qib_disable_sma; module_param_named(disable_sma, ib_qib_disable_sma, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(disable_sma, "Disable the SMA"); /* * Note that it is OK to post send work requests in the SQE and ERR * states; qib_do_send() will process them and generate error * completions as per IB 1.2 C10-96. */ const int ib_qib_state_ops[IB_QPS_ERR + 1] = { [IB_QPS_RESET] = 0, [IB_QPS_INIT] = QIB_POST_RECV_OK, [IB_QPS_RTR] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK, [IB_QPS_RTS] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK | QIB_PROCESS_NEXT_SEND_OK, [IB_QPS_SQD] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | QIB_POST_SEND_OK | QIB_PROCESS_SEND_OK, [IB_QPS_SQE] = QIB_POST_RECV_OK | QIB_PROCESS_RECV_OK | QIB_POST_SEND_OK | QIB_FLUSH_SEND, [IB_QPS_ERR] = QIB_POST_RECV_OK | QIB_FLUSH_RECV | QIB_POST_SEND_OK | QIB_FLUSH_SEND, }; struct qib_ucontext { struct ib_ucontext ibucontext; }; static inline struct qib_ucontext *to_iucontext(struct ib_ucontext *ibucontext) { return container_of(ibucontext, struct qib_ucontext, ibucontext); } /* * Translate ib_wr_opcode into ib_wc_opcode. */ const enum ib_wc_opcode ib_qib_wc_opcode[] = { [IB_WR_RDMA_WRITE] = IB_WC_RDMA_WRITE, [IB_WR_RDMA_WRITE_WITH_IMM] = IB_WC_RDMA_WRITE, [IB_WR_SEND] = IB_WC_SEND, [IB_WR_SEND_WITH_IMM] = IB_WC_SEND, [IB_WR_RDMA_READ] = IB_WC_RDMA_READ, [IB_WR_ATOMIC_CMP_AND_SWP] = IB_WC_COMP_SWAP, [IB_WR_ATOMIC_FETCH_AND_ADD] = IB_WC_FETCH_ADD }; /* * System image GUID. */ __be64 ib_qib_sys_image_guid; /** * qib_copy_sge - copy data to SGE memory * @ss: the SGE state * @data: the data to copy * @length: the length of the data */ void qib_copy_sge(struct qib_sge_state *ss, void *data, u32 length, int release) { struct qib_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(sge->vaddr, data, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (release) atomic_dec(&sge->mr->refcount); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * qib_skip_sge - skip over SGE memory - XXX almost dup of prev func * @ss: the SGE state * @length: the number of bytes to skip */ void qib_skip_sge(struct qib_sge_state *ss, u32 length, int release) { struct qib_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (release) atomic_dec(&sge->mr->refcount); if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } length -= len; } } /* * Count the number of DMA descriptors needed to send length bytes of data. * Don't modify the qib_sge_state to get the count. * Return zero if any of the segments is not aligned. */ static u32 qib_count_sge(struct qib_sge_state *ss, u32 length) { struct qib_sge *sg_list = ss->sg_list; struct qib_sge sge = ss->sge; u8 num_sge = ss->num_sge; u32 ndesc = 1; /* count the header */ while (length) { u32 len = sge.length; if (len > length) len = length; if (len > sge.sge_length) len = sge.sge_length; BUG_ON(len == 0); if (((long) sge.vaddr & (sizeof(u32) - 1)) || (len != length && (len & (sizeof(u32) - 1)))) { ndesc = 0; break; } ndesc++; sge.vaddr += len; sge.length -= len; sge.sge_length -= len; if (sge.sge_length == 0) { if (--num_sge) sge = *sg_list++; } else if (sge.length == 0 && sge.mr->lkey) { if (++sge.n >= QIB_SEGSZ) { if (++sge.m >= sge.mr->mapsz) break; sge.n = 0; } sge.vaddr = sge.mr->map[sge.m]->segs[sge.n].vaddr; sge.length = sge.mr->map[sge.m]->segs[sge.n].length; } length -= len; } return ndesc; } /* * Copy from the SGEs to the data buffer. */ static void qib_copy_from_sge(void *data, struct qib_sge_state *ss, u32 length) { struct qib_sge *sge = &ss->sge; while (length) { u32 len = sge->length; if (len > length) len = length; if (len > sge->sge_length) len = sge->sge_length; BUG_ON(len == 0); memcpy(data, sge->vaddr, len); sge->vaddr += len; sge->length -= len; sge->sge_length -= len; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) break; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } data += len; length -= len; } } /** * qib_post_one_send - post one RC, UC, or UD send work request * @qp: the QP to post on * @wr: the work request to send */ static int qib_post_one_send(struct qib_qp *qp, struct ib_send_wr *wr) { struct qib_swqe *wqe; u32 next; int i; int j; int acc; int ret; unsigned long flags; struct qib_lkey_table *rkt; struct qib_pd *pd; spin_lock_irqsave(&qp->s_lock, flags); /* Check that state is OK to post send. */ if (unlikely(!(ib_qib_state_ops[qp->state] & QIB_POST_SEND_OK))) goto bail_inval; /* IB spec says that num_sge == 0 is OK. */ if (wr->num_sge > qp->s_max_sge) goto bail_inval; /* * Don't allow RDMA reads or atomic operations on UC or * undefined operations. * Make sure buffer is large enough to hold the result for atomics. */ if (wr->opcode == IB_WR_FAST_REG_MR) { if (qib_fast_reg_mr(qp, wr)) goto bail_inval; } else if (qp->ibqp.qp_type == IB_QPT_UC) { if ((unsigned) wr->opcode >= IB_WR_RDMA_READ) goto bail_inval; } else if (qp->ibqp.qp_type != IB_QPT_RC) { /* Check IB_QPT_SMI, IB_QPT_GSI, IB_QPT_UD opcode */ if (wr->opcode != IB_WR_SEND && wr->opcode != IB_WR_SEND_WITH_IMM) goto bail_inval; /* Check UD destination address PD */ if (qp->ibqp.pd != wr->wr.ud.ah->pd) goto bail_inval; } else if ((unsigned) wr->opcode > IB_WR_ATOMIC_FETCH_AND_ADD) goto bail_inval; else if (wr->opcode >= IB_WR_ATOMIC_CMP_AND_SWP && (wr->num_sge == 0 || wr->sg_list[0].length < sizeof(u64) || wr->sg_list[0].addr & (sizeof(u64) - 1))) goto bail_inval; else if (wr->opcode >= IB_WR_RDMA_READ && !qp->s_max_rd_atomic) goto bail_inval; next = qp->s_head + 1; if (next >= qp->s_size) next = 0; if (next == qp->s_last) { ret = -ENOMEM; goto bail; } rkt = &to_idev(qp->ibqp.device)->lk_table; pd = to_ipd(qp->ibqp.pd); wqe = get_swqe_ptr(qp, qp->s_head); wqe->wr = *wr; wqe->length = 0; j = 0; if (wr->num_sge) { acc = wr->opcode >= IB_WR_RDMA_READ ? IB_ACCESS_LOCAL_WRITE : 0; for (i = 0; i < wr->num_sge; i++) { u32 length = wr->sg_list[i].length; int ok; if (length == 0) continue; ok = qib_lkey_ok(rkt, pd, &wqe->sg_list[j], &wr->sg_list[i], acc); if (!ok) goto bail_inval_free; wqe->length += length; j++; } wqe->wr.num_sge = j; } if (qp->ibqp.qp_type == IB_QPT_UC || qp->ibqp.qp_type == IB_QPT_RC) { if (wqe->length > 0x80000000U) goto bail_inval_free; } else if (wqe->length > (dd_from_ibdev(qp->ibqp.device)->pport + qp->port_num - 1)->ibmtu) goto bail_inval_free; else atomic_inc(&to_iah(wr->wr.ud.ah)->refcount); wqe->ssn = qp->s_ssn++; qp->s_head = next; ret = 0; goto bail; bail_inval_free: while (j) { struct qib_sge *sge = &wqe->sg_list[--j]; atomic_dec(&sge->mr->refcount); } bail_inval: ret = -EINVAL; bail: spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } /** * qib_post_send - post a send on a QP * @ibqp: the QP to post the send on * @wr: the list of work requests to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int qib_post_send(struct ib_qp *ibqp, struct ib_send_wr *wr, struct ib_send_wr **bad_wr) { struct qib_qp *qp = to_iqp(ibqp); int err = 0; for (; wr; wr = wr->next) { err = qib_post_one_send(qp, wr); if (err) { *bad_wr = wr; goto bail; } } /* Try to do the send work in the caller's context. */ qib_do_send(&qp->s_work); bail: return err; } /** * qib_post_receive - post a receive on a QP * @ibqp: the QP to post the receive on * @wr: the WR to post * @bad_wr: the first bad WR is put here * * This may be called from interrupt context. */ static int qib_post_receive(struct ib_qp *ibqp, struct ib_recv_wr *wr, struct ib_recv_wr **bad_wr) { struct qib_qp *qp = to_iqp(ibqp); struct qib_rwq *wq = qp->r_rq.wq; unsigned long flags; int ret; /* Check that state is OK to post receive. */ if (!(ib_qib_state_ops[qp->state] & QIB_POST_RECV_OK) || !wq) { *bad_wr = wr; ret = -EINVAL; goto bail; } for (; wr; wr = wr->next) { struct qib_rwqe *wqe; u32 next; int i; if ((unsigned) wr->num_sge > qp->r_rq.max_sge) { *bad_wr = wr; ret = -EINVAL; goto bail; } spin_lock_irqsave(&qp->r_rq.lock, flags); next = wq->head + 1; if (next >= qp->r_rq.size) next = 0; if (next == wq->tail) { spin_unlock_irqrestore(&qp->r_rq.lock, flags); *bad_wr = wr; ret = -ENOMEM; goto bail; } wqe = get_rwqe_ptr(&qp->r_rq, wq->head); wqe->wr_id = wr->wr_id; wqe->num_sge = wr->num_sge; for (i = 0; i < wr->num_sge; i++) wqe->sg_list[i] = wr->sg_list[i]; /* Make sure queue entry is written before the head index. */ smp_wmb(); wq->head = next; spin_unlock_irqrestore(&qp->r_rq.lock, flags); } ret = 0; bail: return ret; } /** * qib_qp_rcv - processing an incoming packet on a QP * @rcd: the context pointer * @hdr: the packet header * @has_grh: true if the packet has a GRH * @data: the packet data * @tlen: the packet length * @qp: the QP the packet came on * * This is called from qib_ib_rcv() to process an incoming packet * for the given QP. * Called at interrupt level. */ static void qib_qp_rcv(struct qib_ctxtdata *rcd, struct qib_ib_header *hdr, int has_grh, void *data, u32 tlen, struct qib_qp *qp) { struct qib_ibport *ibp = &rcd->ppd->ibport_data; spin_lock(&qp->r_lock); /* Check for valid receive state. */ if (!(ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK)) { ibp->n_pkt_drops++; goto unlock; } switch (qp->ibqp.qp_type) { case IB_QPT_SMI: case IB_QPT_GSI: if (ib_qib_disable_sma) break; /* FALLTHROUGH */ case IB_QPT_UD: qib_ud_rcv(ibp, hdr, has_grh, data, tlen, qp); break; case IB_QPT_RC: qib_rc_rcv(rcd, hdr, has_grh, data, tlen, qp); break; case IB_QPT_UC: qib_uc_rcv(ibp, hdr, has_grh, data, tlen, qp); break; default: break; } unlock: spin_unlock(&qp->r_lock); } /** * qib_ib_rcv - process an incoming packet * @rcd: the context pointer * @rhdr: the header of the packet * @data: the packet payload * @tlen: the packet length * * This is called from qib_kreceive() to process an incoming packet at * interrupt level. Tlen is the length of the header + data + CRC in bytes. */ void qib_ib_rcv(struct qib_ctxtdata *rcd, void *rhdr, void *data, u32 tlen) { struct qib_pportdata *ppd = rcd->ppd; struct qib_ibport *ibp = &ppd->ibport_data; struct qib_ib_header *hdr = rhdr; struct qib_other_headers *ohdr; struct qib_qp *qp; u32 qp_num; int lnh; u8 opcode; u16 lid; /* 24 == LRH+BTH+CRC */ if (unlikely(tlen < 24)) goto drop; /* Check for a valid destination LID (see ch. 7.11.1). */ lid = be16_to_cpu(hdr->lrh[1]); if (lid < QIB_MULTICAST_LID_BASE) { lid &= ~((1 << ppd->lmc) - 1); if (unlikely(lid != ppd->lid)) goto drop; } /* Check for GRH */ lnh = be16_to_cpu(hdr->lrh[0]) & 3; if (lnh == QIB_LRH_BTH) ohdr = &hdr->u.oth; else if (lnh == QIB_LRH_GRH) { u32 vtf; ohdr = &hdr->u.l.oth; if (hdr->u.l.grh.next_hdr != IB_GRH_NEXT_HDR) goto drop; vtf = be32_to_cpu(hdr->u.l.grh.version_tclass_flow); if ((vtf >> IB_GRH_VERSION_SHIFT) != IB_GRH_VERSION) goto drop; } else goto drop; opcode = be32_to_cpu(ohdr->bth[0]) >> 24; ibp->opstats[opcode & 0x7f].n_bytes += tlen; ibp->opstats[opcode & 0x7f].n_packets++; /* Get the destination QP number. */ qp_num = be32_to_cpu(ohdr->bth[1]) & QIB_QPN_MASK; if (qp_num == QIB_MULTICAST_QPN) { struct qib_mcast *mcast; struct qib_mcast_qp *p; if (lnh != QIB_LRH_GRH) goto drop; mcast = qib_mcast_find(ibp, &hdr->u.l.grh.dgid); if (mcast == NULL) goto drop; ibp->n_multicast_rcv++; list_for_each_entry_rcu(p, &mcast->qp_list, list) qib_qp_rcv(rcd, hdr, 1, data, tlen, p->qp); /* * Notify qib_multicast_detach() if it is waiting for us * to finish. */ if (atomic_dec_return(&mcast->refcount) <= 1) wake_up(&mcast->wait); } else { if (rcd->lookaside_qp) { if (rcd->lookaside_qpn != qp_num) { if (atomic_dec_and_test( &rcd->lookaside_qp->refcount)) wake_up( &rcd->lookaside_qp->wait); rcd->lookaside_qp = NULL; } } if (!rcd->lookaside_qp) { qp = qib_lookup_qpn(ibp, qp_num); if (!qp) goto drop; rcd->lookaside_qp = qp; rcd->lookaside_qpn = qp_num; } else qp = rcd->lookaside_qp; ibp->n_unicast_rcv++; qib_qp_rcv(rcd, hdr, lnh == QIB_LRH_GRH, data, tlen, qp); } return; drop: ibp->n_pkt_drops++; } /* * This is called from a timer to check for QPs * which need kernel memory in order to send a packet. */ static void mem_timer(unsigned long data) { struct qib_ibdev *dev = (struct qib_ibdev *) data; struct list_head *list = &dev->memwait; struct qib_qp *qp = NULL; unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); if (!list_empty(list)) { qp = list_entry(list->next, struct qib_qp, iowait); list_del_init(&qp->iowait); atomic_inc(&qp->refcount); if (!list_empty(list)) mod_timer(&dev->mem_timer, jiffies + 1); } spin_unlock_irqrestore(&dev->pending_lock, flags); if (qp) { spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & QIB_S_WAIT_KMEM) { qp->s_flags &= ~QIB_S_WAIT_KMEM; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } static void update_sge(struct qib_sge_state *ss, u32 length) { struct qib_sge *sge = &ss->sge; sge->vaddr += length; sge->length -= length; sge->sge_length -= length; if (sge->sge_length == 0) { if (--ss->num_sge) *sge = *ss->sg_list++; } else if (sge->length == 0 && sge->mr->lkey) { if (++sge->n >= QIB_SEGSZ) { if (++sge->m >= sge->mr->mapsz) return; sge->n = 0; } sge->vaddr = sge->mr->map[sge->m]->segs[sge->n].vaddr; sge->length = sge->mr->map[sge->m]->segs[sge->n].length; } } #ifdef __LITTLE_ENDIAN static inline u32 get_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data <<= ((sizeof(u32) - n) * BITS_PER_BYTE); data >>= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #else static inline u32 get_upper_bits(u32 data, u32 shift) { return data << shift; } static inline u32 set_upper_bits(u32 data, u32 shift) { return data >> shift; } static inline u32 clear_upper_bytes(u32 data, u32 n, u32 off) { data >>= ((sizeof(u32) - n) * BITS_PER_BYTE); data <<= ((sizeof(u32) - n - off) * BITS_PER_BYTE); return data; } #endif static void copy_io(u32 __iomem *piobuf, struct qib_sge_state *ss, u32 length, unsigned flush_wc) { u32 extra = 0; u32 data = 0; u32 last; while (1) { u32 len = ss->sge.length; u32 off; if (len > length) len = length; if (len > ss->sge.sge_length) len = ss->sge.sge_length; BUG_ON(len == 0); /* If the source address is not aligned, try to align it. */ off = (unsigned long)ss->sge.vaddr & (sizeof(u32) - 1); if (off) { u32 *addr = (u32 *)((unsigned long)ss->sge.vaddr & ~(sizeof(u32) - 1)); u32 v = get_upper_bits(*addr, off * BITS_PER_BYTE); u32 y; y = sizeof(u32) - off; if (len > y) len = y; if (len + extra >= sizeof(u32)) { data |= set_upper_bits(v, extra * BITS_PER_BYTE); len = sizeof(u32) - extra; if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, len, extra); if (len == length) { last = data; break; } extra += len; } } else if (extra) { /* Source address is aligned. */ u32 *addr = (u32 *) ss->sge.vaddr; int shift = extra * BITS_PER_BYTE; int ushift = 32 - shift; u32 l = len; while (l >= sizeof(u32)) { u32 v = *addr; data |= set_upper_bits(v, shift); __raw_writel(data, piobuf); data = get_upper_bits(v, ushift); piobuf++; addr++; l -= sizeof(u32); } /* * We still have 'extra' number of bytes leftover. */ if (l) { u32 v = *addr; if (l + extra >= sizeof(u32)) { data |= set_upper_bits(v, shift); len -= l + extra - sizeof(u32); if (len == length) { last = data; break; } __raw_writel(data, piobuf); piobuf++; extra = 0; data = 0; } else { /* Clear unused upper bytes */ data |= clear_upper_bytes(v, l, extra); if (len == length) { last = data; break; } extra += l; } } else if (len == length) { last = data; break; } } else if (len == length) { u32 w; /* * Need to round up for the last dword in the * packet. */ w = (len + 3) >> 2; qib_pio_copy(piobuf, ss->sge.vaddr, w - 1); piobuf += w - 1; last = ((u32 *) ss->sge.vaddr)[w - 1]; break; } else { u32 w = len >> 2; qib_pio_copy(piobuf, ss->sge.vaddr, w); piobuf += w; extra = len & (sizeof(u32) - 1); if (extra) { u32 v = ((u32 *) ss->sge.vaddr)[w]; /* Clear unused upper bytes */ data = clear_upper_bytes(v, extra, 0); } } update_sge(ss, len); length -= len; } /* Update address before sending packet. */ update_sge(ss, length); if (flush_wc) { /* must flush early everything before trigger word */ qib_flush_wc(); __raw_writel(last, piobuf); /* be sure trigger word is written */ qib_flush_wc(); } else __raw_writel(last, piobuf); } static noinline struct qib_verbs_txreq *__get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) { struct qib_verbs_txreq *tx; unsigned long flags; spin_lock_irqsave(&qp->s_lock, flags); spin_lock(&dev->pending_lock); if (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; list_del(l); spin_unlock(&dev->pending_lock); spin_unlock_irqrestore(&qp->s_lock, flags); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); } else { if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK && list_empty(&qp->iowait)) { dev->n_txwait++; qp->s_flags |= QIB_S_WAIT_TX; list_add_tail(&qp->iowait, &dev->txwait); } qp->s_flags &= ~QIB_S_BUSY; spin_unlock(&dev->pending_lock); spin_unlock_irqrestore(&qp->s_lock, flags); tx = ERR_PTR(-EBUSY); } return tx; } static inline struct qib_verbs_txreq *get_txreq(struct qib_ibdev *dev, struct qib_qp *qp) { struct qib_verbs_txreq *tx; unsigned long flags; spin_lock_irqsave(&dev->pending_lock, flags); /* assume the list non empty */ if (likely(!list_empty(&dev->txreq_free))) { struct list_head *l = dev->txreq_free.next; list_del(l); spin_unlock_irqrestore(&dev->pending_lock, flags); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); } else { /* call slow path to get the extra lock */ spin_unlock_irqrestore(&dev->pending_lock, flags); tx = __get_txreq(dev, qp); } return tx; } void qib_put_txreq(struct qib_verbs_txreq *tx) { struct qib_ibdev *dev; struct qib_qp *qp; unsigned long flags; qp = tx->qp; dev = to_idev(qp->ibqp.device); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); if (tx->mr) { atomic_dec(&tx->mr->refcount); tx->mr = NULL; } if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) { tx->txreq.flags &= ~QIB_SDMA_TXREQ_F_FREEBUF; dma_unmap_single(&dd_from_dev(dev)->pcidev->dev, tx->txreq.addr, tx->hdr_dwords << 2, DMA_TO_DEVICE); kfree(tx->align_buf); } spin_lock_irqsave(&dev->pending_lock, flags); /* Put struct back on free list */ list_add(&tx->txreq.list, &dev->txreq_free); if (!list_empty(&dev->txwait)) { /* Wake up first QP wanting a free struct */ qp = list_entry(dev->txwait.next, struct qib_qp, iowait); list_del_init(&qp->iowait); atomic_inc(&qp->refcount); spin_unlock_irqrestore(&dev->pending_lock, flags); spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & QIB_S_WAIT_TX) { qp->s_flags &= ~QIB_S_WAIT_TX; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } else spin_unlock_irqrestore(&dev->pending_lock, flags); } /* * This is called when there are send DMA descriptors that might be * available. * * This is called with ppd->sdma_lock held. */ void qib_verbs_sdma_desc_avail(struct qib_pportdata *ppd, unsigned avail) { struct qib_qp *qp, *nqp; struct qib_qp *qps[20]; struct qib_ibdev *dev; unsigned i, n; n = 0; dev = &ppd->dd->verbs_dev; spin_lock(&dev->pending_lock); /* Search wait list for first QP wanting DMA descriptors. */ list_for_each_entry_safe(qp, nqp, &dev->dmawait, iowait) { if (qp->port_num != ppd->port) continue; if (n == ARRAY_SIZE(qps)) break; if (qp->s_tx->txreq.sg_count > avail) break; avail -= qp->s_tx->txreq.sg_count; list_del_init(&qp->iowait); atomic_inc(&qp->refcount); qps[n++] = qp; } spin_unlock(&dev->pending_lock); for (i = 0; i < n; i++) { qp = qps[i]; spin_lock(&qp->s_lock); if (qp->s_flags & QIB_S_WAIT_DMA_DESC) { qp->s_flags &= ~QIB_S_WAIT_DMA_DESC; qib_schedule_send(qp); } spin_unlock(&qp->s_lock); if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } /* * This is called with ppd->sdma_lock held. */ static void sdma_complete(struct qib_sdma_txreq *cookie, int status) { struct qib_verbs_txreq *tx = container_of(cookie, struct qib_verbs_txreq, txreq); struct qib_qp *qp = tx->qp; spin_lock(&qp->s_lock); if (tx->wqe) qib_send_complete(qp, tx->wqe, IB_WC_SUCCESS); else if (qp->ibqp.qp_type == IB_QPT_RC) { struct qib_ib_header *hdr; if (tx->txreq.flags & QIB_SDMA_TXREQ_F_FREEBUF) hdr = &tx->align_buf->hdr; else { struct qib_ibdev *dev = to_idev(qp->ibqp.device); hdr = &dev->pio_hdrs[tx->hdr_inx].hdr; } qib_rc_send_complete(qp, hdr); } if (atomic_dec_and_test(&qp->s_dma_busy)) { if (qp->state == IB_QPS_RESET) wake_up(&qp->wait_dma); else if (qp->s_flags & QIB_S_WAIT_DMA) { qp->s_flags &= ~QIB_S_WAIT_DMA; qib_schedule_send(qp); } } spin_unlock(&qp->s_lock); qib_put_txreq(tx); } static int wait_kmem(struct qib_ibdev *dev, struct qib_qp *qp) { unsigned long flags; int ret = 0; spin_lock_irqsave(&qp->s_lock, flags); if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { spin_lock(&dev->pending_lock); if (list_empty(&qp->iowait)) { if (list_empty(&dev->memwait)) mod_timer(&dev->mem_timer, jiffies + 1); qp->s_flags |= QIB_S_WAIT_KMEM; list_add_tail(&qp->iowait, &dev->memwait); } spin_unlock(&dev->pending_lock); qp->s_flags &= ~QIB_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } static int qib_verbs_send_dma(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(qp->ibqp.device, qp->port_num); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_verbs_txreq *tx; struct qib_pio_header *phdr; u32 control; u32 ndesc; int ret; tx = qp->s_tx; if (tx) { qp->s_tx = NULL; /* resend previously constructed packet */ ret = qib_sdma_verbs_send(ppd, tx->ss, tx->dwords, tx); goto bail; } tx = get_txreq(dev, qp); if (IS_ERR(tx)) goto bail_tx; control = dd->f_setpbc_control(ppd, plen, qp->s_srate, be16_to_cpu(hdr->lrh[0]) >> 12); tx->qp = qp; atomic_inc(&qp->refcount); tx->wqe = qp->s_wqe; tx->mr = qp->s_rdma_mr; if (qp->s_rdma_mr) qp->s_rdma_mr = NULL; tx->txreq.callback = sdma_complete; if (dd->flags & QIB_HAS_SDMA_TIMEOUT) tx->txreq.flags = QIB_SDMA_TXREQ_F_HEADTOHOST; else tx->txreq.flags = QIB_SDMA_TXREQ_F_INTREQ; if (plen + 1 > dd->piosize2kmax_dwords) tx->txreq.flags |= QIB_SDMA_TXREQ_F_USELARGEBUF; if (len) { /* * Don't try to DMA if it takes more descriptors than * the queue holds. */ ndesc = qib_count_sge(ss, len); if (ndesc >= ppd->sdma_descq_cnt) ndesc = 0; } else ndesc = 1; if (ndesc) { phdr = &dev->pio_hdrs[tx->hdr_inx]; phdr->pbc[0] = cpu_to_le32(plen); phdr->pbc[1] = cpu_to_le32(control); memcpy(&phdr->hdr, hdr, hdrwords << 2); tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEDESC; tx->txreq.sg_count = ndesc; tx->txreq.addr = dev->pio_hdrs_phys + tx->hdr_inx * sizeof(struct qib_pio_header); tx->hdr_dwords = hdrwords + 2; /* add PBC length */ ret = qib_sdma_verbs_send(ppd, ss, dwords, tx); goto bail; } /* Allocate a buffer and copy the header and payload to it. */ tx->hdr_dwords = plen + 1; phdr = kmalloc(tx->hdr_dwords << 2, GFP_ATOMIC); if (!phdr) goto err_tx; phdr->pbc[0] = cpu_to_le32(plen); phdr->pbc[1] = cpu_to_le32(control); memcpy(&phdr->hdr, hdr, hdrwords << 2); qib_copy_from_sge((u32 *) &phdr->hdr + hdrwords, ss, len); tx->txreq.addr = dma_map_single(&dd->pcidev->dev, phdr, tx->hdr_dwords << 2, DMA_TO_DEVICE); if (dma_mapping_error(&dd->pcidev->dev, tx->txreq.addr)) goto map_err; tx->align_buf = phdr; tx->txreq.flags |= QIB_SDMA_TXREQ_F_FREEBUF; tx->txreq.sg_count = 1; ret = qib_sdma_verbs_send(ppd, NULL, 0, tx); goto unaligned; map_err: kfree(phdr); err_tx: qib_put_txreq(tx); ret = wait_kmem(dev, qp); unaligned: ibp->n_unaligned++; bail: return ret; bail_tx: ret = PTR_ERR(tx); goto bail; } /* * If we are now in the error state, return zero to flush the * send work request. */ static int no_bufs_available(struct qib_qp *qp) { struct qib_ibdev *dev = to_idev(qp->ibqp.device); struct qib_devdata *dd; unsigned long flags; int ret = 0; /* * Note that as soon as want_buffer() is called and * possibly before it returns, qib_ib_piobufavail() * could be called. Therefore, put QP on the I/O wait list before * enabling the PIO avail interrupt. */ spin_lock_irqsave(&qp->s_lock, flags); if (ib_qib_state_ops[qp->state] & QIB_PROCESS_RECV_OK) { spin_lock(&dev->pending_lock); if (list_empty(&qp->iowait)) { dev->n_piowait++; qp->s_flags |= QIB_S_WAIT_PIO; list_add_tail(&qp->iowait, &dev->piowait); dd = dd_from_dev(dev); dd->f_wantpiobuf_intr(dd, 1); } spin_unlock(&dev->pending_lock); qp->s_flags &= ~QIB_S_BUSY; ret = -EBUSY; } spin_unlock_irqrestore(&qp->s_lock, flags); return ret; } static int qib_verbs_send_pio(struct qib_qp *qp, struct qib_ib_header *ibhdr, u32 hdrwords, struct qib_sge_state *ss, u32 len, u32 plen, u32 dwords) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); struct qib_pportdata *ppd = dd->pport + qp->port_num - 1; u32 *hdr = (u32 *) ibhdr; u32 __iomem *piobuf_orig; u32 __iomem *piobuf; u64 pbc; unsigned long flags; unsigned flush_wc; u32 control; u32 pbufn; control = dd->f_setpbc_control(ppd, plen, qp->s_srate, be16_to_cpu(ibhdr->lrh[0]) >> 12); pbc = ((u64) control << 32) | plen; piobuf = dd->f_getsendbuf(ppd, pbc, &pbufn); if (unlikely(piobuf == NULL)) return no_bufs_available(qp); /* * Write the pbc. * We have to flush after the PBC for correctness on some cpus * or WC buffer can be written out of order. */ writeq(pbc, piobuf); piobuf_orig = piobuf; piobuf += 2; flush_wc = dd->flags & QIB_PIO_FLUSH_WC; if (len == 0) { /* * If there is just the header portion, must flush before * writing last word of header for correctness, and after * the last header word (trigger word). */ if (flush_wc) { qib_flush_wc(); qib_pio_copy(piobuf, hdr, hdrwords - 1); qib_flush_wc(); __raw_writel(hdr[hdrwords - 1], piobuf + hdrwords - 1); qib_flush_wc(); } else qib_pio_copy(piobuf, hdr, hdrwords); goto done; } if (flush_wc) qib_flush_wc(); qib_pio_copy(piobuf, hdr, hdrwords); piobuf += hdrwords; /* The common case is aligned and contained in one segment. */ if (likely(ss->num_sge == 1 && len <= ss->sge.length && !((unsigned long)ss->sge.vaddr & (sizeof(u32) - 1)))) { u32 *addr = (u32 *) ss->sge.vaddr; /* Update address before sending packet. */ update_sge(ss, len); if (flush_wc) { qib_pio_copy(piobuf, addr, dwords - 1); /* must flush early everything before trigger word */ qib_flush_wc(); __raw_writel(addr[dwords - 1], piobuf + dwords - 1); /* be sure trigger word is written */ qib_flush_wc(); } else qib_pio_copy(piobuf, addr, dwords); goto done; } copy_io(piobuf, ss, len, flush_wc); done: if (dd->flags & QIB_USE_SPCL_TRIG) { u32 spcl_off = (pbufn >= dd->piobcnt2k) ? 2047 : 1023; qib_flush_wc(); __raw_writel(0xaebecede, piobuf_orig + spcl_off); } qib_sendbuf_done(dd, pbufn); if (qp->s_rdma_mr) { atomic_dec(&qp->s_rdma_mr->refcount); qp->s_rdma_mr = NULL; } if (qp->s_wqe) { spin_lock_irqsave(&qp->s_lock, flags); qib_send_complete(qp, qp->s_wqe, IB_WC_SUCCESS); spin_unlock_irqrestore(&qp->s_lock, flags); } else if (qp->ibqp.qp_type == IB_QPT_RC) { spin_lock_irqsave(&qp->s_lock, flags); qib_rc_send_complete(qp, ibhdr); spin_unlock_irqrestore(&qp->s_lock, flags); } return 0; } /** * qib_verbs_send - send a packet * @qp: the QP to send on * @hdr: the packet header * @hdrwords: the number of 32-bit words in the header * @ss: the SGE to send * @len: the length of the packet in bytes * * Return zero if packet is sent or queued OK. * Return non-zero and clear qp->s_flags QIB_S_BUSY otherwise. */ int qib_verbs_send(struct qib_qp *qp, struct qib_ib_header *hdr, u32 hdrwords, struct qib_sge_state *ss, u32 len) { struct qib_devdata *dd = dd_from_ibdev(qp->ibqp.device); u32 plen; int ret; u32 dwords = (len + 3) >> 2; /* * Calculate the send buffer trigger address. * The +1 counts for the pbc control dword following the pbc length. */ plen = hdrwords + dwords + 1; /* * VL15 packets (IB_QPT_SMI) will always use PIO, so we * can defer SDMA restart until link goes ACTIVE without * worrying about just how we got there. */ if (qp->ibqp.qp_type == IB_QPT_SMI || !(dd->flags & QIB_HAS_SEND_DMA)) ret = qib_verbs_send_pio(qp, hdr, hdrwords, ss, len, plen, dwords); else ret = qib_verbs_send_dma(qp, hdr, hdrwords, ss, len, plen, dwords); return ret; } int qib_snapshot_counters(struct qib_pportdata *ppd, u64 *swords, u64 *rwords, u64 *spkts, u64 *rpkts, u64 *xmit_wait) { int ret; struct qib_devdata *dd = ppd->dd; if (!(dd->flags & QIB_PRESENT)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } *swords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); *rwords = dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); *spkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); *rpkts = dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); *xmit_wait = dd->f_portcntr(ppd, QIBPORTCNTR_SENDSTALL); ret = 0; bail: return ret; } /** * qib_get_counters - get various chip counters * @dd: the qlogic_ib device * @cntrs: counters are placed here * * Return the counters needed by recv_pma_get_portcounters(). */ int qib_get_counters(struct qib_pportdata *ppd, struct qib_verbs_counters *cntrs) { int ret; if (!(ppd->dd->flags & QIB_PRESENT)) { /* no hardware, freeze, etc. */ ret = -EINVAL; goto bail; } cntrs->symbol_error_counter = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBSYMBOLERR); cntrs->link_error_recovery_counter = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKERRRECOV); /* * The link downed counter counts when the other side downs the * connection. We add in the number of times we downed the link * due to local link integrity errors to compensate. */ cntrs->link_downed_counter = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_IBLINKDOWN); cntrs->port_rcv_errors = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXDROPPKT) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVOVFL) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERR_RLEN) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_INVALIDRLEN) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLINK) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRICRC) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRVCRC) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_ERRLPCRC) + ppd->dd->f_portcntr(ppd, QIBPORTCNTR_BADFORMAT); cntrs->port_rcv_errors += ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXLOCALPHYERR); cntrs->port_rcv_errors += ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RXVLERR); cntrs->port_rcv_remphys_errors = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_RCVEBP); cntrs->port_xmit_discards = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_UNSUPVL); cntrs->port_xmit_data = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_WORDSEND); cntrs->port_rcv_data = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_WORDRCV); cntrs->port_xmit_packets = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PKTSEND); cntrs->port_rcv_packets = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PKTRCV); cntrs->local_link_integrity_errors = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_LLI); cntrs->excessive_buffer_overrun_errors = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_EXCESSBUFOVFL); cntrs->vl15_dropped = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_VL15PKTDROP); ret = 0; bail: return ret; } /** * qib_ib_piobufavail - callback when a PIO buffer is available * @dd: the device pointer * * This is called from qib_intr() at interrupt level when a PIO buffer is * available after qib_verbs_send() returned an error that no buffers were * available. Disable the interrupt if there are no more QPs waiting. */ void qib_ib_piobufavail(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; struct list_head *list; struct qib_qp *qps[5]; struct qib_qp *qp; unsigned long flags; unsigned i, n; list = &dev->piowait; n = 0; /* * Note: checking that the piowait list is empty and clearing * the buffer available interrupt needs to be atomic or we * could end up with QPs on the wait list with the interrupt * disabled. */ spin_lock_irqsave(&dev->pending_lock, flags); while (!list_empty(list)) { if (n == ARRAY_SIZE(qps)) goto full; qp = list_entry(list->next, struct qib_qp, iowait); list_del_init(&qp->iowait); atomic_inc(&qp->refcount); qps[n++] = qp; } dd->f_wantpiobuf_intr(dd, 0); full: spin_unlock_irqrestore(&dev->pending_lock, flags); for (i = 0; i < n; i++) { qp = qps[i]; spin_lock_irqsave(&qp->s_lock, flags); if (qp->s_flags & QIB_S_WAIT_PIO) { qp->s_flags &= ~QIB_S_WAIT_PIO; qib_schedule_send(qp); } spin_unlock_irqrestore(&qp->s_lock, flags); /* Notify qib_destroy_qp() if it is waiting. */ if (atomic_dec_and_test(&qp->refcount)) wake_up(&qp->wait); } } static int qib_query_device(struct ib_device *ibdev, struct ib_device_attr *props) { struct qib_devdata *dd = dd_from_ibdev(ibdev); struct qib_ibdev *dev = to_idev(ibdev); memset(props, 0, sizeof(*props)); props->device_cap_flags = IB_DEVICE_BAD_PKEY_CNTR | IB_DEVICE_BAD_QKEY_CNTR | IB_DEVICE_SHUTDOWN_PORT | IB_DEVICE_SYS_IMAGE_GUID | IB_DEVICE_RC_RNR_NAK_GEN | IB_DEVICE_PORT_ACTIVE_EVENT | IB_DEVICE_SRQ_RESIZE; props->page_size_cap = PAGE_SIZE; props->vendor_id = QIB_SRC_OUI_1 << 16 | QIB_SRC_OUI_2 << 8 | QIB_SRC_OUI_3; props->vendor_part_id = dd->deviceid; props->hw_ver = dd->minrev; props->sys_image_guid = ib_qib_sys_image_guid; props->max_mr_size = ~0ULL; props->max_qp = ib_qib_max_qps; props->max_qp_wr = ib_qib_max_qp_wrs; props->max_sge = ib_qib_max_sges; props->max_cq = ib_qib_max_cqs; props->max_ah = ib_qib_max_ahs; props->max_cqe = ib_qib_max_cqes; props->max_mr = dev->lk_table.max; props->max_fmr = dev->lk_table.max; props->max_map_per_fmr = 32767; props->max_pd = ib_qib_max_pds; props->max_qp_rd_atom = QIB_MAX_RDMA_ATOMIC; props->max_qp_init_rd_atom = 255; /* props->max_res_rd_atom */ props->max_srq = ib_qib_max_srqs; props->max_srq_wr = ib_qib_max_srq_wrs; props->max_srq_sge = ib_qib_max_srq_sges; /* props->local_ca_ack_delay */ props->atomic_cap = IB_ATOMIC_GLOB; props->max_pkeys = qib_get_npkeys(dd); props->max_mcast_grp = ib_qib_max_mcast_grps; props->max_mcast_qp_attach = ib_qib_max_mcast_qp_attached; props->max_total_mcast_qp_attach = props->max_mcast_qp_attach * props->max_mcast_grp; return 0; } static int qib_query_port(struct ib_device *ibdev, u8 port, struct ib_port_attr *props) { struct qib_devdata *dd = dd_from_ibdev(ibdev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); enum ib_mtu mtu; u16 lid = ppd->lid; memset(props, 0, sizeof(*props)); props->lid = lid ? lid : be16_to_cpu(IB_LID_PERMISSIVE); props->lmc = ppd->lmc; props->sm_lid = ibp->sm_lid; props->sm_sl = ibp->sm_sl; props->state = dd->f_iblink_state(ppd->lastibcstat); props->phys_state = dd->f_ibphys_portstate(ppd->lastibcstat); props->port_cap_flags = ibp->port_cap_flags; props->gid_tbl_len = QIB_GUIDS_PER_PORT; props->max_msg_sz = 0x80000000; props->pkey_tbl_len = qib_get_npkeys(dd); props->bad_pkey_cntr = ibp->pkey_violations; props->qkey_viol_cntr = ibp->qkey_violations; props->active_width = ppd->link_width_active; /* See rate_show() */ props->active_speed = ppd->link_speed_active; props->max_vl_num = qib_num_vls(ppd->vls_supported); props->init_type_reply = 0; props->max_mtu = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; switch (ppd->ibmtu) { case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; default: mtu = IB_MTU_2048; } props->active_mtu = mtu; props->subnet_timeout = ibp->subnet_timeout; return 0; } static int qib_modify_device(struct ib_device *device, int device_modify_mask, struct ib_device_modify *device_modify) { struct qib_devdata *dd = dd_from_ibdev(device); unsigned i; int ret; if (device_modify_mask & ~(IB_DEVICE_MODIFY_SYS_IMAGE_GUID | IB_DEVICE_MODIFY_NODE_DESC)) { ret = -EOPNOTSUPP; goto bail; } if (device_modify_mask & IB_DEVICE_MODIFY_NODE_DESC) { memcpy(device->node_desc, device_modify->node_desc, 64); for (i = 0; i < dd->num_pports; i++) { struct qib_ibport *ibp = &dd->pport[i].ibport_data; qib_node_desc_chg(ibp); } } if (device_modify_mask & IB_DEVICE_MODIFY_SYS_IMAGE_GUID) { ib_qib_sys_image_guid = cpu_to_be64(device_modify->sys_image_guid); for (i = 0; i < dd->num_pports; i++) { struct qib_ibport *ibp = &dd->pport[i].ibport_data; qib_sys_guid_chg(ibp); } } ret = 0; bail: return ret; } static int qib_modify_port(struct ib_device *ibdev, u8 port, int port_modify_mask, struct ib_port_modify *props) { struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); ibp->port_cap_flags |= props->set_port_cap_mask; ibp->port_cap_flags &= ~props->clr_port_cap_mask; if (props->set_port_cap_mask || props->clr_port_cap_mask) qib_cap_mask_chg(ibp); if (port_modify_mask & IB_PORT_SHUTDOWN) qib_set_linkstate(ppd, QIB_IB_LINKDOWN); if (port_modify_mask & IB_PORT_RESET_QKEY_CNTR) ibp->qkey_violations = 0; return 0; } static int qib_query_gid(struct ib_device *ibdev, u8 port, int index, union ib_gid *gid) { struct qib_devdata *dd = dd_from_ibdev(ibdev); int ret = 0; if (!port || port > dd->num_pports) ret = -EINVAL; else { struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); gid->global.subnet_prefix = ibp->gid_prefix; if (index == 0) gid->global.interface_id = ppd->guid; else if (index < QIB_GUIDS_PER_PORT) gid->global.interface_id = ibp->guids[index - 1]; else ret = -EINVAL; } return ret; } static struct ib_pd *qib_alloc_pd(struct ib_device *ibdev, struct ib_ucontext *context, struct ib_udata *udata) { struct qib_ibdev *dev = to_idev(ibdev); struct qib_pd *pd; struct ib_pd *ret; /* * This is actually totally arbitrary. Some correctness tests * assume there's a maximum number of PDs that can be allocated. * We don't actually have this limit, but we fail the test if * we allow allocations of more than we report for this value. */ pd = kmalloc(sizeof *pd, GFP_KERNEL); if (!pd) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock(&dev->n_pds_lock); if (dev->n_pds_allocated == ib_qib_max_pds) { spin_unlock(&dev->n_pds_lock); kfree(pd); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_pds_allocated++; spin_unlock(&dev->n_pds_lock); /* ib_alloc_pd() will initialize pd->ibpd. */ pd->user = udata != NULL; ret = &pd->ibpd; bail: return ret; } static int qib_dealloc_pd(struct ib_pd *ibpd) { struct qib_pd *pd = to_ipd(ibpd); struct qib_ibdev *dev = to_idev(ibpd->device); spin_lock(&dev->n_pds_lock); dev->n_pds_allocated--; spin_unlock(&dev->n_pds_lock); kfree(pd); return 0; } int qib_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr) { /* A multicast address requires a GRH (see ch. 8.4.1). */ if (ah_attr->dlid >= QIB_MULTICAST_LID_BASE && ah_attr->dlid != QIB_PERMISSIVE_LID && !(ah_attr->ah_flags & IB_AH_GRH)) goto bail; if ((ah_attr->ah_flags & IB_AH_GRH) && ah_attr->grh.sgid_index >= QIB_GUIDS_PER_PORT) goto bail; if (ah_attr->dlid == 0) goto bail; if (ah_attr->port_num < 1 || ah_attr->port_num > ibdev->phys_port_cnt) goto bail; if (ah_attr->static_rate != IB_RATE_PORT_CURRENT && ib_rate_to_mult(ah_attr->static_rate) < 0) goto bail; if (ah_attr->sl > 15) goto bail; return 0; bail: return -EINVAL; } /** * qib_create_ah - create an address handle * @pd: the protection domain * @ah_attr: the attributes of the AH * * This may be called from interrupt context. */ static struct ib_ah *qib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr) { struct qib_ah *ah; struct ib_ah *ret; struct qib_ibdev *dev = to_idev(pd->device); unsigned long flags; if (qib_check_ah(pd->device, ah_attr)) { ret = ERR_PTR(-EINVAL); goto bail; } ah = kmalloc(sizeof *ah, GFP_ATOMIC); if (!ah) { ret = ERR_PTR(-ENOMEM); goto bail; } spin_lock_irqsave(&dev->n_ahs_lock, flags); if (dev->n_ahs_allocated == ib_qib_max_ahs) { spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); ret = ERR_PTR(-ENOMEM); goto bail; } dev->n_ahs_allocated++; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); /* ib_create_ah() will initialize ah->ibah. */ ah->attr = *ah_attr; atomic_set(&ah->refcount, 0); ret = &ah->ibah; bail: return ret; } /** * qib_destroy_ah - destroy an address handle * @ibah: the AH to destroy * * This may be called from interrupt context. */ static int qib_destroy_ah(struct ib_ah *ibah) { struct qib_ibdev *dev = to_idev(ibah->device); struct qib_ah *ah = to_iah(ibah); unsigned long flags; if (atomic_read(&ah->refcount) != 0) return -EBUSY; spin_lock_irqsave(&dev->n_ahs_lock, flags); dev->n_ahs_allocated--; spin_unlock_irqrestore(&dev->n_ahs_lock, flags); kfree(ah); return 0; } static int qib_modify_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) { struct qib_ah *ah = to_iah(ibah); if (qib_check_ah(ibah->device, ah_attr)) return -EINVAL; ah->attr = *ah_attr; return 0; } static int qib_query_ah(struct ib_ah *ibah, struct ib_ah_attr *ah_attr) { struct qib_ah *ah = to_iah(ibah); *ah_attr = ah->attr; return 0; } /** * qib_get_npkeys - return the size of the PKEY table for context 0 * @dd: the qlogic_ib device */ unsigned qib_get_npkeys(struct qib_devdata *dd) { return ARRAY_SIZE(dd->rcd[0]->pkeys); } /* * Return the indexed PKEY from the port PKEY table. * No need to validate rcd[ctxt]; the port is setup if we are here. */ unsigned qib_get_pkey(struct qib_ibport *ibp, unsigned index) { struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_devdata *dd = ppd->dd; unsigned ctxt = ppd->hw_pidx; unsigned ret; /* dd->rcd null if mini_init or some init failures */ if (!dd->rcd || index >= ARRAY_SIZE(dd->rcd[ctxt]->pkeys)) ret = 0; else ret = dd->rcd[ctxt]->pkeys[index]; return ret; } static int qib_query_pkey(struct ib_device *ibdev, u8 port, u16 index, u16 *pkey) { struct qib_devdata *dd = dd_from_ibdev(ibdev); int ret; if (index >= qib_get_npkeys(dd)) { ret = -EINVAL; goto bail; } *pkey = qib_get_pkey(to_iport(ibdev, port), index); ret = 0; bail: return ret; } /** * qib_alloc_ucontext - allocate a ucontest * @ibdev: the infiniband device * @udata: not used by the QLogic_IB driver */ static struct ib_ucontext *qib_alloc_ucontext(struct ib_device *ibdev, struct ib_udata *udata) { struct qib_ucontext *context; struct ib_ucontext *ret; context = kmalloc(sizeof *context, GFP_KERNEL); if (!context) { ret = ERR_PTR(-ENOMEM); goto bail; } ret = &context->ibucontext; bail: return ret; } static int qib_dealloc_ucontext(struct ib_ucontext *context) { kfree(to_iucontext(context)); return 0; } static void init_ibport(struct qib_pportdata *ppd) { struct qib_verbs_counters cntrs; struct qib_ibport *ibp = &ppd->ibport_data; spin_lock_init(&ibp->lock); /* Set the prefix to the default value (see ch. 4.1.1) */ ibp->gid_prefix = IB_DEFAULT_GID_PREFIX; ibp->sm_lid = be16_to_cpu(IB_LID_PERMISSIVE); ibp->port_cap_flags = IB_PORT_SYS_IMAGE_GUID_SUP | IB_PORT_CLIENT_REG_SUP | IB_PORT_SL_MAP_SUP | IB_PORT_TRAP_SUP | IB_PORT_AUTO_MIGR_SUP | IB_PORT_DR_NOTICE_SUP | IB_PORT_CAP_MASK_NOTICE_SUP | IB_PORT_OTHER_LOCAL_CHANGES_SUP; if (ppd->dd->flags & QIB_HAS_LINK_LATENCY) ibp->port_cap_flags |= IB_PORT_LINK_LATENCY_SUP; ibp->pma_counter_select[0] = IB_PMA_PORT_XMIT_DATA; ibp->pma_counter_select[1] = IB_PMA_PORT_RCV_DATA; ibp->pma_counter_select[2] = IB_PMA_PORT_XMIT_PKTS; ibp->pma_counter_select[3] = IB_PMA_PORT_RCV_PKTS; ibp->pma_counter_select[4] = IB_PMA_PORT_XMIT_WAIT; /* Snapshot current HW counters to "clear" them. */ qib_get_counters(ppd, &cntrs); ibp->z_symbol_error_counter = cntrs.symbol_error_counter; ibp->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; ibp->z_link_downed_counter = cntrs.link_downed_counter; ibp->z_port_rcv_errors = cntrs.port_rcv_errors; ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; ibp->z_port_xmit_discards = cntrs.port_xmit_discards; ibp->z_port_xmit_data = cntrs.port_xmit_data; ibp->z_port_rcv_data = cntrs.port_rcv_data; ibp->z_port_xmit_packets = cntrs.port_xmit_packets; ibp->z_port_rcv_packets = cntrs.port_rcv_packets; ibp->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; ibp->z_vl15_dropped = cntrs.vl15_dropped; RCU_INIT_POINTER(ibp->qp0, NULL); RCU_INIT_POINTER(ibp->qp1, NULL); } /** * qib_register_ib_device - register our device with the infiniband core * @dd: the device data structure * Return the allocated qib_ibdev pointer or NULL on error. */ int qib_register_ib_device(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; struct ib_device *ibdev = &dev->ibdev; struct qib_pportdata *ppd = dd->pport; unsigned i, lk_tab_size; int ret; dev->qp_table_size = ib_qib_qp_table_size; get_random_bytes(&dev->qp_rnd, sizeof(dev->qp_rnd)); dev->qp_table = kmalloc(dev->qp_table_size * sizeof *dev->qp_table, GFP_KERNEL); if (!dev->qp_table) { ret = -ENOMEM; goto err_qpt; } for (i = 0; i < dev->qp_table_size; i++) RCU_INIT_POINTER(dev->qp_table[i], NULL); for (i = 0; i < dd->num_pports; i++) init_ibport(ppd + i); /* Only need to initialize non-zero fields. */ spin_lock_init(&dev->qpt_lock); spin_lock_init(&dev->n_pds_lock); spin_lock_init(&dev->n_ahs_lock); spin_lock_init(&dev->n_cqs_lock); spin_lock_init(&dev->n_qps_lock); spin_lock_init(&dev->n_srqs_lock); spin_lock_init(&dev->n_mcast_grps_lock); init_timer(&dev->mem_timer); dev->mem_timer.function = mem_timer; dev->mem_timer.data = (unsigned long) dev; qib_init_qpn_table(dd, &dev->qpn_table); /* * The top ib_qib_lkey_table_size bits are used to index the * table. The lower 8 bits can be owned by the user (copied from * the LKEY). The remaining bits act as a generation number or tag. */ spin_lock_init(&dev->lk_table.lock); dev->lk_table.max = 1 << ib_qib_lkey_table_size; lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); dev->lk_table.table = (struct qib_mregion **) __get_free_pages(GFP_KERNEL, get_order(lk_tab_size)); if (dev->lk_table.table == NULL) { ret = -ENOMEM; goto err_lk; } memset(dev->lk_table.table, 0, lk_tab_size); INIT_LIST_HEAD(&dev->pending_mmaps); spin_lock_init(&dev->pending_lock); dev->mmap_offset = PAGE_SIZE; spin_lock_init(&dev->mmap_offset_lock); INIT_LIST_HEAD(&dev->piowait); INIT_LIST_HEAD(&dev->dmawait); INIT_LIST_HEAD(&dev->txwait); INIT_LIST_HEAD(&dev->memwait); INIT_LIST_HEAD(&dev->txreq_free); if (ppd->sdma_descq_cnt) { dev->pio_hdrs = dma_alloc_coherent(&dd->pcidev->dev, ppd->sdma_descq_cnt * sizeof(struct qib_pio_header), &dev->pio_hdrs_phys, GFP_KERNEL); if (!dev->pio_hdrs) { ret = -ENOMEM; goto err_hdrs; } } for (i = 0; i < ppd->sdma_descq_cnt; i++) { struct qib_verbs_txreq *tx; tx = kzalloc(sizeof *tx, GFP_KERNEL); if (!tx) { ret = -ENOMEM; goto err_tx; } tx->hdr_inx = i; list_add(&tx->txreq.list, &dev->txreq_free); } /* * The system image GUID is supposed to be the same for all * IB HCAs in a single system but since there can be other * device types in the system, we can't be sure this is unique. */ if (!ib_qib_sys_image_guid) ib_qib_sys_image_guid = ppd->guid; strlcpy(ibdev->name, "qib%d", IB_DEVICE_NAME_MAX); ibdev->owner = THIS_MODULE; ibdev->node_guid = ppd->guid; ibdev->uverbs_abi_ver = QIB_UVERBS_ABI_VERSION; ibdev->uverbs_cmd_mask = (1ull << IB_USER_VERBS_CMD_GET_CONTEXT) | (1ull << IB_USER_VERBS_CMD_QUERY_DEVICE) | (1ull << IB_USER_VERBS_CMD_QUERY_PORT) | (1ull << IB_USER_VERBS_CMD_ALLOC_PD) | (1ull << IB_USER_VERBS_CMD_DEALLOC_PD) | (1ull << IB_USER_VERBS_CMD_CREATE_AH) | (1ull << IB_USER_VERBS_CMD_MODIFY_AH) | (1ull << IB_USER_VERBS_CMD_QUERY_AH) | (1ull << IB_USER_VERBS_CMD_DESTROY_AH) | (1ull << IB_USER_VERBS_CMD_REG_MR) | (1ull << IB_USER_VERBS_CMD_DEREG_MR) | (1ull << IB_USER_VERBS_CMD_CREATE_COMP_CHANNEL) | (1ull << IB_USER_VERBS_CMD_CREATE_CQ) | (1ull << IB_USER_VERBS_CMD_RESIZE_CQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_CQ) | (1ull << IB_USER_VERBS_CMD_POLL_CQ) | (1ull << IB_USER_VERBS_CMD_REQ_NOTIFY_CQ) | (1ull << IB_USER_VERBS_CMD_CREATE_QP) | (1ull << IB_USER_VERBS_CMD_QUERY_QP) | (1ull << IB_USER_VERBS_CMD_MODIFY_QP) | (1ull << IB_USER_VERBS_CMD_DESTROY_QP) | (1ull << IB_USER_VERBS_CMD_POST_SEND) | (1ull << IB_USER_VERBS_CMD_POST_RECV) | (1ull << IB_USER_VERBS_CMD_ATTACH_MCAST) | (1ull << IB_USER_VERBS_CMD_DETACH_MCAST) | (1ull << IB_USER_VERBS_CMD_CREATE_SRQ) | (1ull << IB_USER_VERBS_CMD_MODIFY_SRQ) | (1ull << IB_USER_VERBS_CMD_QUERY_SRQ) | (1ull << IB_USER_VERBS_CMD_DESTROY_SRQ) | (1ull << IB_USER_VERBS_CMD_POST_SRQ_RECV); ibdev->node_type = RDMA_NODE_IB_CA; ibdev->phys_port_cnt = dd->num_pports; ibdev->num_comp_vectors = 1; ibdev->dma_device = &dd->pcidev->dev; ibdev->query_device = qib_query_device; ibdev->modify_device = qib_modify_device; ibdev->query_port = qib_query_port; ibdev->modify_port = qib_modify_port; ibdev->query_pkey = qib_query_pkey; ibdev->query_gid = qib_query_gid; ibdev->alloc_ucontext = qib_alloc_ucontext; ibdev->dealloc_ucontext = qib_dealloc_ucontext; ibdev->alloc_pd = qib_alloc_pd; ibdev->dealloc_pd = qib_dealloc_pd; ibdev->create_ah = qib_create_ah; ibdev->destroy_ah = qib_destroy_ah; ibdev->modify_ah = qib_modify_ah; ibdev->query_ah = qib_query_ah; ibdev->create_srq = qib_create_srq; ibdev->modify_srq = qib_modify_srq; ibdev->query_srq = qib_query_srq; ibdev->destroy_srq = qib_destroy_srq; ibdev->create_qp = qib_create_qp; ibdev->modify_qp = qib_modify_qp; ibdev->query_qp = qib_query_qp; ibdev->destroy_qp = qib_destroy_qp; ibdev->post_send = qib_post_send; ibdev->post_recv = qib_post_receive; ibdev->post_srq_recv = qib_post_srq_receive; ibdev->create_cq = qib_create_cq; ibdev->destroy_cq = qib_destroy_cq; ibdev->resize_cq = qib_resize_cq; ibdev->poll_cq = qib_poll_cq; ibdev->req_notify_cq = qib_req_notify_cq; ibdev->get_dma_mr = qib_get_dma_mr; ibdev->reg_phys_mr = qib_reg_phys_mr; ibdev->reg_user_mr = qib_reg_user_mr; ibdev->dereg_mr = qib_dereg_mr; ibdev->alloc_fast_reg_mr = qib_alloc_fast_reg_mr; ibdev->alloc_fast_reg_page_list = qib_alloc_fast_reg_page_list; ibdev->free_fast_reg_page_list = qib_free_fast_reg_page_list; ibdev->alloc_fmr = qib_alloc_fmr; ibdev->map_phys_fmr = qib_map_phys_fmr; ibdev->unmap_fmr = qib_unmap_fmr; ibdev->dealloc_fmr = qib_dealloc_fmr; ibdev->attach_mcast = qib_multicast_attach; ibdev->detach_mcast = qib_multicast_detach; ibdev->process_mad = qib_process_mad; ibdev->mmap = qib_mmap; ibdev->dma_ops = &qib_dma_mapping_ops; snprintf(ibdev->node_desc, sizeof(ibdev->node_desc), QIB_IDSTR " %s", init_utsname()->nodename); ret = ib_register_device(ibdev, qib_create_port_files); if (ret) goto err_reg; ret = qib_create_agents(dev); if (ret) goto err_agents; if (qib_verbs_register_sysfs(dd)) goto err_class; goto bail; err_class: qib_free_agents(dev); err_agents: ib_unregister_device(ibdev); err_reg: err_tx: while (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; struct qib_verbs_txreq *tx; list_del(l); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); kfree(tx); } if (ppd->sdma_descq_cnt) dma_free_coherent(&dd->pcidev->dev, ppd->sdma_descq_cnt * sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); err_hdrs: free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); err_lk: kfree(dev->qp_table); err_qpt: qib_dev_err(dd, "cannot register verbs: %d!\n", -ret); bail: return ret; } void qib_unregister_ib_device(struct qib_devdata *dd) { struct qib_ibdev *dev = &dd->verbs_dev; struct ib_device *ibdev = &dev->ibdev; u32 qps_inuse; unsigned lk_tab_size; qib_verbs_unregister_sysfs(dd); qib_free_agents(dev); ib_unregister_device(ibdev); if (!list_empty(&dev->piowait)) qib_dev_err(dd, "piowait list not empty!\n"); if (!list_empty(&dev->dmawait)) qib_dev_err(dd, "dmawait list not empty!\n"); if (!list_empty(&dev->txwait)) qib_dev_err(dd, "txwait list not empty!\n"); if (!list_empty(&dev->memwait)) qib_dev_err(dd, "memwait list not empty!\n"); if (dev->dma_mr) qib_dev_err(dd, "DMA MR not NULL!\n"); qps_inuse = qib_free_all_qps(dd); if (qps_inuse) qib_dev_err(dd, "QP memory leak! %u still in use\n", qps_inuse); del_timer_sync(&dev->mem_timer); qib_free_qpn_table(&dev->qpn_table); while (!list_empty(&dev->txreq_free)) { struct list_head *l = dev->txreq_free.next; struct qib_verbs_txreq *tx; list_del(l); tx = list_entry(l, struct qib_verbs_txreq, txreq.list); kfree(tx); } if (dd->pport->sdma_descq_cnt) dma_free_coherent(&dd->pcidev->dev, dd->pport->sdma_descq_cnt * sizeof(struct qib_pio_header), dev->pio_hdrs, dev->pio_hdrs_phys); lk_tab_size = dev->lk_table.max * sizeof(*dev->lk_table.table); free_pages((unsigned long) dev->lk_table.table, get_order(lk_tab_size)); kfree(dev->qp_table); }
gpl-2.0
thicklizard/GPEweepingangel
arch/powerpc/sysdev/fsl_ifc.c
4641
8276
/* * Copyright 2011 Freescale Semiconductor, Inc * * Freescale Integrated Flash Controller * * Author: Dipen Dudhat <Dipen.Dudhat@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/compiler.h> #include <linux/spinlock.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <asm/prom.h> #include <asm/fsl_ifc.h> struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; EXPORT_SYMBOL(fsl_ifc_ctrl_dev); /* * convert_ifc_address - convert the base address * @addr_base: base address of the memory bank */ unsigned int convert_ifc_address(phys_addr_t addr_base) { return addr_base & CSPR_BA; } EXPORT_SYMBOL(convert_ifc_address); /* * fsl_ifc_find - find IFC bank * @addr_base: base address of the memory bank * * This function walks IFC banks comparing "Base address" field of the CSPR * registers with the supplied addr_base argument. When bases match this * function returns bank number (starting with 0), otherwise it returns * appropriate errno value. */ int fsl_ifc_find(phys_addr_t addr_base) { int i = 0; if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) return -ENODEV; for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { __be32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); if (cspr & CSPR_V && (cspr & CSPR_BA) == convert_ifc_address(addr_base)) return i; } return -ENOENT; } EXPORT_SYMBOL(fsl_ifc_find); static int __devinit fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) { struct fsl_ifc_regs __iomem *ifc = ctrl->regs; /* * Clear all the common status and event registers */ if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); /* enable all error and events */ out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); /* enable all error and event interrupts */ out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); out_be32(&ifc->cm_erattr0, 0x0); out_be32(&ifc->cm_erattr1, 0x0); return 0; } static int fsl_ifc_ctrl_remove(struct platform_device *dev) { struct fsl_ifc_ctrl *ctrl = dev_get_drvdata(&dev->dev); free_irq(ctrl->nand_irq, ctrl); free_irq(ctrl->irq, ctrl); irq_dispose_mapping(ctrl->nand_irq); irq_dispose_mapping(ctrl->irq); iounmap(ctrl->regs); dev_set_drvdata(&dev->dev, NULL); kfree(ctrl); return 0; } /* * NAND events are split between an operational interrupt which only * receives OPC, and an error interrupt that receives everything else, * including non-NAND errors. Whichever interrupt gets to it first * records the status and wakes the wait queue. */ static DEFINE_SPINLOCK(nand_irq_lock); static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) { struct fsl_ifc_regs __iomem *ifc = ctrl->regs; unsigned long flags; u32 stat; spin_lock_irqsave(&nand_irq_lock, flags); stat = in_be32(&ifc->ifc_nand.nand_evter_stat); if (stat) { out_be32(&ifc->ifc_nand.nand_evter_stat, stat); ctrl->nand_stat = stat; wake_up(&ctrl->nand_wait); } spin_unlock_irqrestore(&nand_irq_lock, flags); return stat; } static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) { struct fsl_ifc_ctrl *ctrl = data; if (check_nand_stat(ctrl)) return IRQ_HANDLED; return IRQ_NONE; } /* * NOTE: This interrupt is used to report ifc events of various kinds, * such as transaction errors on the chipselects. */ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) { struct fsl_ifc_ctrl *ctrl = data; struct fsl_ifc_regs __iomem *ifc = ctrl->regs; u32 err_axiid, err_srcid, status, cs_err, err_addr; irqreturn_t ret = IRQ_NONE; /* read for chip select error */ cs_err = in_be32(&ifc->cm_evter_stat); if (cs_err) { dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" "any memory bank 0x%08X\n", cs_err); /* clear the chip select error */ out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); /* read error attribute registers print the error information */ status = in_be32(&ifc->cm_erattr0); err_addr = in_be32(&ifc->cm_erattr1); if (status & IFC_CM_ERATTR0_ERTYP_READ) dev_err(ctrl->dev, "Read transaction error" "CM_ERATTR0 0x%08X\n", status); else dev_err(ctrl->dev, "Write transaction error" "CM_ERATTR0 0x%08X\n", status); err_axiid = (status & IFC_CM_ERATTR0_ERAID) >> IFC_CM_ERATTR0_ERAID_SHIFT; dev_err(ctrl->dev, "AXI ID of the error" "transaction 0x%08X\n", err_axiid); err_srcid = (status & IFC_CM_ERATTR0_ESRCID) >> IFC_CM_ERATTR0_ESRCID_SHIFT; dev_err(ctrl->dev, "SRC ID of the error" "transaction 0x%08X\n", err_srcid); dev_err(ctrl->dev, "Transaction Address corresponding to error" "ERADDR 0x%08X\n", err_addr); ret = IRQ_HANDLED; } if (check_nand_stat(ctrl)) ret = IRQ_HANDLED; return ret; } /* * fsl_ifc_ctrl_probe * * called by device layer when it finds a device matching * one our driver can handled. This code allocates all of * the resources needed for the controller only. The * resources for the NAND banks themselves are allocated * in the chip probe function. */ static int __devinit fsl_ifc_ctrl_probe(struct platform_device *dev) { int ret = 0; dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); fsl_ifc_ctrl_dev = kzalloc(sizeof(*fsl_ifc_ctrl_dev), GFP_KERNEL); if (!fsl_ifc_ctrl_dev) return -ENOMEM; dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); /* IOMAP the entire IFC region */ fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); if (!fsl_ifc_ctrl_dev->regs) { dev_err(&dev->dev, "failed to get memory region\n"); ret = -ENODEV; goto err; } /* get the Controller level irq */ fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { dev_err(&dev->dev, "failed to get irq resource " "for IFC\n"); ret = -ENODEV; goto err; } /* get the nand machine irq */ fsl_ifc_ctrl_dev->nand_irq = irq_of_parse_and_map(dev->dev.of_node, 1); if (fsl_ifc_ctrl_dev->nand_irq == NO_IRQ) { dev_err(&dev->dev, "failed to get irq resource " "for NAND Machine\n"); ret = -ENODEV; goto err; } fsl_ifc_ctrl_dev->dev = &dev->dev; ret = fsl_ifc_ctrl_init(fsl_ifc_ctrl_dev); if (ret < 0) goto err; init_waitqueue_head(&fsl_ifc_ctrl_dev->nand_wait); ret = request_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_irq, IRQF_SHARED, "fsl-ifc", fsl_ifc_ctrl_dev); if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->irq); goto err_irq; } ret = request_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_nand_irq, 0, "fsl-ifc-nand", fsl_ifc_ctrl_dev); if (ret != 0) { dev_err(&dev->dev, "failed to install irq (%d)\n", fsl_ifc_ctrl_dev->nand_irq); goto err_nandirq; } return 0; err_nandirq: free_irq(fsl_ifc_ctrl_dev->nand_irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->nand_irq); err_irq: free_irq(fsl_ifc_ctrl_dev->irq, fsl_ifc_ctrl_dev); irq_dispose_mapping(fsl_ifc_ctrl_dev->irq); err: return ret; } static const struct of_device_id fsl_ifc_match[] = { { .compatible = "fsl,ifc", }, {}, }; static struct platform_driver fsl_ifc_ctrl_driver = { .driver = { .name = "fsl-ifc", .of_match_table = fsl_ifc_match, }, .probe = fsl_ifc_ctrl_probe, .remove = fsl_ifc_ctrl_remove, }; module_platform_driver(fsl_ifc_ctrl_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Freescale Semiconductor"); MODULE_DESCRIPTION("Freescale Integrated Flash Controller driver");
gpl-2.0
UISS-Dev-Team/android_kernel_huawei_c8813
drivers/media/video/s5p-fimc/fimc-capture.c
4641
43247
/* * Samsung S5P/EXYNOS4 SoC series camera interface (camera capture) driver * * Copyright (C) 2010 - 2011 Samsung Electronics Co., Ltd. * Author: Sylwester Nawrocki, <s.nawrocki@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/errno.h> #include <linux/bug.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <linux/list.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-mem2mem.h> #include <media/videobuf2-core.h> #include <media/videobuf2-dma-contig.h> #include "fimc-mdevice.h" #include "fimc-core.h" static int fimc_init_capture(struct fimc_dev *fimc) { struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_sensor_info *sensor; unsigned long flags; int ret = 0; if (fimc->pipeline.sensor == NULL || ctx == NULL) return -ENXIO; if (ctx->s_frame.fmt == NULL) return -EINVAL; sensor = v4l2_get_subdev_hostdata(fimc->pipeline.sensor); spin_lock_irqsave(&fimc->slock, flags); fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_set_yuv_order(ctx); fimc_hw_set_camera_polarity(fimc, sensor->pdata); fimc_hw_set_camera_type(fimc, sensor->pdata); fimc_hw_set_camera_source(fimc, sensor->pdata); fimc_hw_set_camera_offset(fimc, &ctx->s_frame); ret = fimc_set_scaler_info(ctx); if (!ret) { fimc_hw_set_input_path(ctx); fimc_hw_set_prescaler(ctx); fimc_hw_set_mainscaler(ctx); fimc_hw_set_target_format(ctx); fimc_hw_set_rotation(ctx); fimc_hw_set_effect(ctx, false); fimc_hw_set_output_path(ctx); fimc_hw_set_out_dma(ctx); if (fimc->variant->has_alpha) fimc_hw_set_rgb_alpha(ctx); clear_bit(ST_CAPT_APPLY_CFG, &fimc->state); } spin_unlock_irqrestore(&fimc->slock, flags); return ret; } static int fimc_capture_state_cleanup(struct fimc_dev *fimc, bool suspend) { struct fimc_vid_cap *cap = &fimc->vid_cap; struct fimc_vid_buffer *buf; unsigned long flags; bool streaming; spin_lock_irqsave(&fimc->slock, flags); streaming = fimc->state & (1 << ST_CAPT_ISP_STREAM); fimc->state &= ~(1 << ST_CAPT_RUN | 1 << ST_CAPT_SHUT | 1 << ST_CAPT_STREAM | 1 << ST_CAPT_ISP_STREAM); if (!suspend) fimc->state &= ~(1 << ST_CAPT_PEND | 1 << ST_CAPT_SUSPENDED); /* Release unused buffers */ while (!suspend && !list_empty(&cap->pending_buf_q)) { buf = fimc_pending_queue_pop(cap); vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } /* If suspending put unused buffers onto pending queue */ while (!list_empty(&cap->active_buf_q)) { buf = fimc_active_queue_pop(cap); if (suspend) fimc_pending_queue_add(cap, buf); else vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } set_bit(ST_CAPT_SUSPENDED, &fimc->state); fimc_hw_reset(fimc); cap->buf_index = 0; spin_unlock_irqrestore(&fimc->slock, flags); if (streaming) return fimc_pipeline_s_stream(fimc, 0); else return 0; } static int fimc_stop_capture(struct fimc_dev *fimc, bool suspend) { unsigned long flags; if (!fimc_capture_active(fimc)) return 0; spin_lock_irqsave(&fimc->slock, flags); set_bit(ST_CAPT_SHUT, &fimc->state); fimc_deactivate_capture(fimc); spin_unlock_irqrestore(&fimc->slock, flags); wait_event_timeout(fimc->irq_queue, !test_bit(ST_CAPT_SHUT, &fimc->state), (2*HZ/10)); /* 200 ms */ return fimc_capture_state_cleanup(fimc, suspend); } /** * fimc_capture_config_update - apply the camera interface configuration * * To be called from within the interrupt handler with fimc.slock * spinlock held. It updates the camera pixel crop, rotation and * image flip in H/W. */ int fimc_capture_config_update(struct fimc_ctx *ctx) { struct fimc_dev *fimc = ctx->fimc_dev; int ret; if (!test_bit(ST_CAPT_APPLY_CFG, &fimc->state)) return 0; spin_lock(&ctx->slock); fimc_hw_set_camera_offset(fimc, &ctx->s_frame); ret = fimc_set_scaler_info(ctx); if (ret == 0) { fimc_hw_set_prescaler(ctx); fimc_hw_set_mainscaler(ctx); fimc_hw_set_target_format(ctx); fimc_hw_set_rotation(ctx); fimc_prepare_dma_offset(ctx, &ctx->d_frame); fimc_hw_set_out_dma(ctx); if (fimc->variant->has_alpha) fimc_hw_set_rgb_alpha(ctx); clear_bit(ST_CAPT_APPLY_CFG, &fimc->state); } spin_unlock(&ctx->slock); return ret; } static int start_streaming(struct vb2_queue *q, unsigned int count) { struct fimc_ctx *ctx = q->drv_priv; struct fimc_dev *fimc = ctx->fimc_dev; struct fimc_vid_cap *vid_cap = &fimc->vid_cap; int min_bufs; int ret; vid_cap->frame_count = 0; ret = fimc_init_capture(fimc); if (ret) goto error; set_bit(ST_CAPT_PEND, &fimc->state); min_bufs = fimc->vid_cap.reqbufs_count > 1 ? 2 : 1; if (vid_cap->active_buf_cnt >= min_bufs && !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) { fimc_activate_capture(ctx); if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state)) fimc_pipeline_s_stream(fimc, 1); } return 0; error: fimc_capture_state_cleanup(fimc, false); return ret; } static int stop_streaming(struct vb2_queue *q) { struct fimc_ctx *ctx = q->drv_priv; struct fimc_dev *fimc = ctx->fimc_dev; if (!fimc_capture_active(fimc)) return -EINVAL; return fimc_stop_capture(fimc, false); } int fimc_capture_suspend(struct fimc_dev *fimc) { bool suspend = fimc_capture_busy(fimc); int ret = fimc_stop_capture(fimc, suspend); if (ret) return ret; return fimc_pipeline_shutdown(fimc); } static void buffer_queue(struct vb2_buffer *vb); int fimc_capture_resume(struct fimc_dev *fimc) { struct fimc_vid_cap *vid_cap = &fimc->vid_cap; struct fimc_vid_buffer *buf; int i; if (!test_and_clear_bit(ST_CAPT_SUSPENDED, &fimc->state)) return 0; INIT_LIST_HEAD(&fimc->vid_cap.active_buf_q); vid_cap->buf_index = 0; fimc_pipeline_initialize(fimc, &fimc->vid_cap.vfd->entity, false); fimc_init_capture(fimc); clear_bit(ST_CAPT_SUSPENDED, &fimc->state); for (i = 0; i < vid_cap->reqbufs_count; i++) { if (list_empty(&vid_cap->pending_buf_q)) break; buf = fimc_pending_queue_pop(vid_cap); buffer_queue(&buf->vb); } return 0; } static int queue_setup(struct vb2_queue *vq, const struct v4l2_format *pfmt, unsigned int *num_buffers, unsigned int *num_planes, unsigned int sizes[], void *allocators[]) { const struct v4l2_pix_format_mplane *pixm = NULL; struct fimc_ctx *ctx = vq->drv_priv; struct fimc_frame *frame = &ctx->d_frame; struct fimc_fmt *fmt = frame->fmt; unsigned long wh; int i; if (pfmt) { pixm = &pfmt->fmt.pix_mp; fmt = fimc_find_format(&pixm->pixelformat, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M, -1); wh = pixm->width * pixm->height; } else { wh = frame->f_width * frame->f_height; } if (fmt == NULL) return -EINVAL; *num_planes = fmt->memplanes; for (i = 0; i < fmt->memplanes; i++) { unsigned int size = (wh * fmt->depth[i]) / 8; if (pixm) sizes[i] = max(size, pixm->plane_fmt[i].sizeimage); else sizes[i] = size; allocators[i] = ctx->fimc_dev->alloc_ctx; } return 0; } static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_ctx *ctx = vq->drv_priv; int i; if (ctx->d_frame.fmt == NULL) return -EINVAL; for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) { unsigned long size = ctx->d_frame.payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(ctx->fimc_dev->vid_cap.vfd, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; } static void buffer_queue(struct vb2_buffer *vb) { struct fimc_vid_buffer *buf = container_of(vb, struct fimc_vid_buffer, vb); struct fimc_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct fimc_dev *fimc = ctx->fimc_dev; struct fimc_vid_cap *vid_cap = &fimc->vid_cap; unsigned long flags; int min_bufs; spin_lock_irqsave(&fimc->slock, flags); fimc_prepare_addr(ctx, &buf->vb, &ctx->d_frame, &buf->paddr); if (!test_bit(ST_CAPT_SUSPENDED, &fimc->state) && !test_bit(ST_CAPT_STREAM, &fimc->state) && vid_cap->active_buf_cnt < FIMC_MAX_OUT_BUFS) { /* Setup the buffer directly for processing. */ int buf_id = (vid_cap->reqbufs_count == 1) ? -1 : vid_cap->buf_index; fimc_hw_set_output_addr(fimc, &buf->paddr, buf_id); buf->index = vid_cap->buf_index; fimc_active_queue_add(vid_cap, buf); if (++vid_cap->buf_index >= FIMC_MAX_OUT_BUFS) vid_cap->buf_index = 0; } else { fimc_pending_queue_add(vid_cap, buf); } min_bufs = vid_cap->reqbufs_count > 1 ? 2 : 1; if (vb2_is_streaming(&vid_cap->vbq) && vid_cap->active_buf_cnt >= min_bufs && !test_and_set_bit(ST_CAPT_STREAM, &fimc->state)) { fimc_activate_capture(ctx); spin_unlock_irqrestore(&fimc->slock, flags); if (!test_and_set_bit(ST_CAPT_ISP_STREAM, &fimc->state)) fimc_pipeline_s_stream(fimc, 1); return; } spin_unlock_irqrestore(&fimc->slock, flags); } static void fimc_lock(struct vb2_queue *vq) { struct fimc_ctx *ctx = vb2_get_drv_priv(vq); mutex_lock(&ctx->fimc_dev->lock); } static void fimc_unlock(struct vb2_queue *vq) { struct fimc_ctx *ctx = vb2_get_drv_priv(vq); mutex_unlock(&ctx->fimc_dev->lock); } static struct vb2_ops fimc_capture_qops = { .queue_setup = queue_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .wait_prepare = fimc_unlock, .wait_finish = fimc_lock, .start_streaming = start_streaming, .stop_streaming = stop_streaming, }; /** * fimc_capture_ctrls_create - initialize the control handler * Initialize the capture video node control handler and fill it * with the FIMC controls. Inherit any sensor's controls if the * 'user_subdev_api' flag is false (default behaviour). * This function need to be called with the graph mutex held. */ int fimc_capture_ctrls_create(struct fimc_dev *fimc) { struct fimc_vid_cap *vid_cap = &fimc->vid_cap; int ret; if (WARN_ON(vid_cap->ctx == NULL)) return -ENXIO; if (vid_cap->ctx->ctrls_rdy) return 0; ret = fimc_ctrls_create(vid_cap->ctx); if (ret || vid_cap->user_subdev_api) return ret; return v4l2_ctrl_add_handler(&vid_cap->ctx->ctrl_handler, fimc->pipeline.sensor->ctrl_handler); } static int fimc_capture_set_default_format(struct fimc_dev *fimc); static int fimc_capture_open(struct file *file) { struct fimc_dev *fimc = video_drvdata(file); int ret = v4l2_fh_open(file); if (ret) return ret; dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); /* Return if the corresponding video mem2mem node is already opened. */ if (fimc_m2m_active(fimc)) return -EBUSY; set_bit(ST_CAPT_BUSY, &fimc->state); pm_runtime_get_sync(&fimc->pdev->dev); if (++fimc->vid_cap.refcnt == 1) { ret = fimc_pipeline_initialize(fimc, &fimc->vid_cap.vfd->entity, true); if (ret < 0) { dev_err(&fimc->pdev->dev, "Video pipeline initialization failed\n"); pm_runtime_put_sync(&fimc->pdev->dev); fimc->vid_cap.refcnt--; v4l2_fh_release(file); clear_bit(ST_CAPT_BUSY, &fimc->state); return ret; } ret = fimc_capture_ctrls_create(fimc); if (!ret && !fimc->vid_cap.user_subdev_api) ret = fimc_capture_set_default_format(fimc); } return ret; } static int fimc_capture_close(struct file *file) { struct fimc_dev *fimc = video_drvdata(file); dbg("pid: %d, state: 0x%lx", task_pid_nr(current), fimc->state); if (--fimc->vid_cap.refcnt == 0) { clear_bit(ST_CAPT_BUSY, &fimc->state); fimc_stop_capture(fimc, false); fimc_pipeline_shutdown(fimc); clear_bit(ST_CAPT_SUSPENDED, &fimc->state); } pm_runtime_put(&fimc->pdev->dev); if (fimc->vid_cap.refcnt == 0) { vb2_queue_release(&fimc->vid_cap.vbq); fimc_ctrls_delete(fimc->vid_cap.ctx); } return v4l2_fh_release(file); } static unsigned int fimc_capture_poll(struct file *file, struct poll_table_struct *wait) { struct fimc_dev *fimc = video_drvdata(file); return vb2_poll(&fimc->vid_cap.vbq, file, wait); } static int fimc_capture_mmap(struct file *file, struct vm_area_struct *vma) { struct fimc_dev *fimc = video_drvdata(file); return vb2_mmap(&fimc->vid_cap.vbq, vma); } static const struct v4l2_file_operations fimc_capture_fops = { .owner = THIS_MODULE, .open = fimc_capture_open, .release = fimc_capture_close, .poll = fimc_capture_poll, .unlocked_ioctl = video_ioctl2, .mmap = fimc_capture_mmap, }; /* * Format and crop negotiation helpers */ static struct fimc_fmt *fimc_capture_try_format(struct fimc_ctx *ctx, u32 *width, u32 *height, u32 *code, u32 *fourcc, int pad) { bool rotation = ctx->rotation == 90 || ctx->rotation == 270; struct fimc_dev *fimc = ctx->fimc_dev; struct samsung_fimc_variant *var = fimc->variant; struct fimc_pix_limit *pl = var->pix_limit; struct fimc_frame *dst = &ctx->d_frame; u32 depth, min_w, max_w, min_h, align_h = 3; u32 mask = FMT_FLAGS_CAM; struct fimc_fmt *ffmt; /* Color conversion from/to JPEG is not supported */ if (code && ctx->s_frame.fmt && pad == FIMC_SD_PAD_SOURCE && fimc_fmt_is_jpeg(ctx->s_frame.fmt->color)) *code = V4L2_MBUS_FMT_JPEG_1X8; if (fourcc && *fourcc != V4L2_PIX_FMT_JPEG && pad != FIMC_SD_PAD_SINK) mask |= FMT_FLAGS_M2M; ffmt = fimc_find_format(fourcc, code, mask, 0); if (WARN_ON(!ffmt)) return NULL; if (code) *code = ffmt->mbus_code; if (fourcc) *fourcc = ffmt->fourcc; if (pad == FIMC_SD_PAD_SINK) { max_w = fimc_fmt_is_jpeg(ffmt->color) ? pl->scaler_dis_w : pl->scaler_en_w; /* Apply the camera input interface pixel constraints */ v4l_bound_align_image(width, max_t(u32, *width, 32), max_w, 4, height, max_t(u32, *height, 32), FIMC_CAMIF_MAX_HEIGHT, fimc_fmt_is_jpeg(ffmt->color) ? 3 : 1, 0); return ffmt; } /* Can't scale or crop in transparent (JPEG) transfer mode */ if (fimc_fmt_is_jpeg(ffmt->color)) { *width = ctx->s_frame.f_width; *height = ctx->s_frame.f_height; return ffmt; } /* Apply the scaler and the output DMA constraints */ max_w = rotation ? pl->out_rot_en_w : pl->out_rot_dis_w; min_w = ctx->state & FIMC_DST_CROP ? dst->width : var->min_out_pixsize; min_h = ctx->state & FIMC_DST_CROP ? dst->height : var->min_out_pixsize; if (var->min_vsize_align == 1 && !rotation) align_h = fimc_fmt_is_rgb(ffmt->color) ? 0 : 1; depth = fimc_get_format_depth(ffmt); v4l_bound_align_image(width, min_w, max_w, ffs(var->min_out_pixsize) - 1, height, min_h, FIMC_CAMIF_MAX_HEIGHT, align_h, 64/(ALIGN(depth, 8))); dbg("pad%d: code: 0x%x, %dx%d. dst fmt: %dx%d", pad, code ? *code : 0, *width, *height, dst->f_width, dst->f_height); return ffmt; } static void fimc_capture_try_crop(struct fimc_ctx *ctx, struct v4l2_rect *r, int pad) { bool rotate = ctx->rotation == 90 || ctx->rotation == 270; struct fimc_dev *fimc = ctx->fimc_dev; struct samsung_fimc_variant *var = fimc->variant; struct fimc_pix_limit *pl = var->pix_limit; struct fimc_frame *sink = &ctx->s_frame; u32 max_w, max_h, min_w = 0, min_h = 0, min_sz; u32 align_sz = 0, align_h = 4; u32 max_sc_h, max_sc_v; /* In JPEG transparent transfer mode cropping is not supported */ if (fimc_fmt_is_jpeg(ctx->d_frame.fmt->color)) { r->width = sink->f_width; r->height = sink->f_height; r->left = r->top = 0; return; } if (pad == FIMC_SD_PAD_SOURCE) { if (ctx->rotation != 90 && ctx->rotation != 270) align_h = 1; max_sc_h = min(SCALER_MAX_HRATIO, 1 << (ffs(sink->width) - 3)); max_sc_v = min(SCALER_MAX_VRATIO, 1 << (ffs(sink->height) - 1)); min_sz = var->min_out_pixsize; } else { u32 depth = fimc_get_format_depth(sink->fmt); align_sz = 64/ALIGN(depth, 8); min_sz = var->min_inp_pixsize; min_w = min_h = min_sz; max_sc_h = max_sc_v = 1; } /* * For the crop rectangle at source pad the following constraints * must be met: * - it must fit in the sink pad format rectangle (f_width/f_height); * - maximum downscaling ratio is 64; * - maximum crop size depends if the rotator is used or not; * - the sink pad format width/height must be 4 multiple of the * prescaler ratios determined by sink pad size and source pad crop, * the prescaler ratio is returned by fimc_get_scaler_factor(). */ max_w = min_t(u32, rotate ? pl->out_rot_en_w : pl->out_rot_dis_w, rotate ? sink->f_height : sink->f_width); max_h = min_t(u32, FIMC_CAMIF_MAX_HEIGHT, sink->f_height); if (pad == FIMC_SD_PAD_SOURCE) { min_w = min_t(u32, max_w, sink->f_width / max_sc_h); min_h = min_t(u32, max_h, sink->f_height / max_sc_v); if (rotate) { swap(max_sc_h, max_sc_v); swap(min_w, min_h); } } v4l_bound_align_image(&r->width, min_w, max_w, ffs(min_sz) - 1, &r->height, min_h, max_h, align_h, align_sz); /* Adjust left/top if cropping rectangle is out of bounds */ r->left = clamp_t(u32, r->left, 0, sink->f_width - r->width); r->top = clamp_t(u32, r->top, 0, sink->f_height - r->height); r->left = round_down(r->left, var->hor_offs_align); dbg("pad%d: (%d,%d)/%dx%d, sink fmt: %dx%d", pad, r->left, r->top, r->width, r->height, sink->f_width, sink->f_height); } /* * The video node ioctl operations */ static int fimc_vidioc_querycap_capture(struct file *file, void *priv, struct v4l2_capability *cap) { struct fimc_dev *fimc = video_drvdata(file); strncpy(cap->driver, fimc->pdev->name, sizeof(cap->driver) - 1); strncpy(cap->card, fimc->pdev->name, sizeof(cap->card) - 1); cap->bus_info[0] = 0; cap->capabilities = V4L2_CAP_STREAMING | V4L2_CAP_VIDEO_CAPTURE_MPLANE; return 0; } static int fimc_cap_enum_fmt_mplane(struct file *file, void *priv, struct v4l2_fmtdesc *f) { struct fimc_fmt *fmt; fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM | FMT_FLAGS_M2M, f->index); if (!fmt) return -EINVAL; strncpy(f->description, fmt->name, sizeof(f->description) - 1); f->pixelformat = fmt->fourcc; if (fmt->fourcc == V4L2_MBUS_FMT_JPEG_1X8) f->flags |= V4L2_FMT_FLAG_COMPRESSED; return 0; } /** * fimc_pipeline_try_format - negotiate and/or set formats at pipeline * elements * @ctx: FIMC capture context * @tfmt: media bus format to try/set on subdevs * @fmt_id: fimc pixel format id corresponding to returned @tfmt (output) * @set: true to set format on subdevs, false to try only */ static int fimc_pipeline_try_format(struct fimc_ctx *ctx, struct v4l2_mbus_framefmt *tfmt, struct fimc_fmt **fmt_id, bool set) { struct fimc_dev *fimc = ctx->fimc_dev; struct v4l2_subdev *sd = fimc->pipeline.sensor; struct v4l2_subdev *csis = fimc->pipeline.csis; struct v4l2_subdev_format sfmt; struct v4l2_mbus_framefmt *mf = &sfmt.format; struct fimc_fmt *ffmt = NULL; int ret, i = 0; if (WARN_ON(!sd || !tfmt)) return -EINVAL; memset(&sfmt, 0, sizeof(sfmt)); sfmt.format = *tfmt; sfmt.which = set ? V4L2_SUBDEV_FORMAT_ACTIVE : V4L2_SUBDEV_FORMAT_TRY; while (1) { ffmt = fimc_find_format(NULL, mf->code != 0 ? &mf->code : NULL, FMT_FLAGS_CAM, i++); if (ffmt == NULL) { /* * Notify user-space if common pixel code for * host and sensor does not exist. */ return -EINVAL; } mf->code = tfmt->code = ffmt->mbus_code; ret = v4l2_subdev_call(sd, pad, set_fmt, NULL, &sfmt); if (ret) return ret; if (mf->code != tfmt->code) { mf->code = 0; continue; } if (mf->width != tfmt->width || mf->height != tfmt->height) { u32 fcc = ffmt->fourcc; tfmt->width = mf->width; tfmt->height = mf->height; ffmt = fimc_capture_try_format(ctx, &tfmt->width, &tfmt->height, NULL, &fcc, FIMC_SD_PAD_SOURCE); if (ffmt && ffmt->mbus_code) mf->code = ffmt->mbus_code; if (mf->width != tfmt->width || mf->height != tfmt->height) continue; tfmt->code = mf->code; } if (csis) ret = v4l2_subdev_call(csis, pad, set_fmt, NULL, &sfmt); if (mf->code == tfmt->code && mf->width == tfmt->width && mf->height == tfmt->height) break; } if (fmt_id && ffmt) *fmt_id = ffmt; *tfmt = *mf; dbg("code: 0x%x, %dx%d, %p", mf->code, mf->width, mf->height, ffmt); return 0; } static int fimc_cap_g_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; return fimc_fill_format(&ctx->d_frame, f); } static int fimc_cap_try_fmt_mplane(struct file *file, void *fh, struct v4l2_format *f) { struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_mbus_framefmt mf; struct fimc_fmt *ffmt = NULL; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SINK); ctx->s_frame.f_width = pix->width; ctx->s_frame.f_height = pix->height; } ffmt = fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SOURCE); if (!ffmt) return -EINVAL; if (!fimc->vid_cap.user_subdev_api) { mf.width = pix->width; mf.height = pix->height; mf.code = ffmt->mbus_code; fimc_md_graph_lock(fimc); fimc_pipeline_try_format(ctx, &mf, &ffmt, false); fimc_md_graph_unlock(fimc); pix->width = mf.width; pix->height = mf.height; if (ffmt) pix->pixelformat = ffmt->fourcc; } fimc_adjust_mplane_format(ffmt, pix->width, pix->height, pix); return 0; } static void fimc_capture_mark_jpeg_xfer(struct fimc_ctx *ctx, bool jpeg) { ctx->scaler.enabled = !jpeg; fimc_ctrls_activate(ctx, !jpeg); if (jpeg) set_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); else clear_bit(ST_CAPT_JPEG, &ctx->fimc_dev->state); } static int fimc_capture_set_format(struct fimc_dev *fimc, struct v4l2_format *f) { struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_pix_format_mplane *pix = &f->fmt.pix_mp; struct v4l2_mbus_framefmt *mf = &fimc->vid_cap.mf; struct fimc_frame *ff = &ctx->d_frame; struct fimc_fmt *s_fmt = NULL; int ret, i; if (f->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; if (vb2_is_busy(&fimc->vid_cap.vbq)) return -EBUSY; /* Pre-configure format at camera interface input, for JPEG only */ if (pix->pixelformat == V4L2_PIX_FMT_JPEG) { fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SINK); ctx->s_frame.f_width = pix->width; ctx->s_frame.f_height = pix->height; } /* Try the format at the scaler and the DMA output */ ff->fmt = fimc_capture_try_format(ctx, &pix->width, &pix->height, NULL, &pix->pixelformat, FIMC_SD_PAD_SOURCE); if (!ff->fmt) return -EINVAL; /* Update RGB Alpha control state and value range */ fimc_alpha_ctrl_update(ctx); /* Try to match format at the host and the sensor */ if (!fimc->vid_cap.user_subdev_api) { mf->code = ff->fmt->mbus_code; mf->width = pix->width; mf->height = pix->height; fimc_md_graph_lock(fimc); ret = fimc_pipeline_try_format(ctx, mf, &s_fmt, true); fimc_md_graph_unlock(fimc); if (ret) return ret; pix->width = mf->width; pix->height = mf->height; } fimc_adjust_mplane_format(ff->fmt, pix->width, pix->height, pix); for (i = 0; i < ff->fmt->colplanes; i++) ff->payload[i] = (pix->width * pix->height * ff->fmt->depth[i]) / 8; set_frame_bounds(ff, pix->width, pix->height); /* Reset the composition rectangle if not yet configured */ if (!(ctx->state & FIMC_DST_CROP)) set_frame_crop(ff, 0, 0, pix->width, pix->height); fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ff->fmt->color)); /* Reset cropping and set format at the camera interface input */ if (!fimc->vid_cap.user_subdev_api) { ctx->s_frame.fmt = s_fmt; set_frame_bounds(&ctx->s_frame, pix->width, pix->height); set_frame_crop(&ctx->s_frame, 0, 0, pix->width, pix->height); } return ret; } static int fimc_cap_s_fmt_mplane(struct file *file, void *priv, struct v4l2_format *f) { struct fimc_dev *fimc = video_drvdata(file); return fimc_capture_set_format(fimc, f); } static int fimc_cap_enum_input(struct file *file, void *priv, struct v4l2_input *i) { struct fimc_dev *fimc = video_drvdata(file); struct v4l2_subdev *sd = fimc->pipeline.sensor; if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; if (sd) strlcpy(i->name, sd->name, sizeof(i->name)); return 0; } static int fimc_cap_s_input(struct file *file, void *priv, unsigned int i) { return i == 0 ? i : -EINVAL; } static int fimc_cap_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } /** * fimc_pipeline_validate - check for formats inconsistencies * between source and sink pad of each link * * Return 0 if all formats match or -EPIPE otherwise. */ static int fimc_pipeline_validate(struct fimc_dev *fimc) { struct v4l2_subdev_format sink_fmt, src_fmt; struct fimc_vid_cap *vid_cap = &fimc->vid_cap; struct v4l2_subdev *sd; struct media_pad *pad; int ret; /* Start with the video capture node pad */ pad = media_entity_remote_source(&vid_cap->vd_pad); if (pad == NULL) return -EPIPE; /* FIMC.{N} subdevice */ sd = media_entity_to_v4l2_subdev(pad->entity); while (1) { /* Retrieve format at the sink pad */ pad = &sd->entity.pads[0]; if (!(pad->flags & MEDIA_PAD_FL_SINK)) break; /* Don't call FIMC subdev operation to avoid nested locking */ if (sd == fimc->vid_cap.subdev) { struct fimc_frame *ff = &vid_cap->ctx->s_frame; sink_fmt.format.width = ff->f_width; sink_fmt.format.height = ff->f_height; sink_fmt.format.code = ff->fmt ? ff->fmt->mbus_code : 0; } else { sink_fmt.pad = pad->index; sink_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &sink_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; } /* Retrieve format at the source pad */ pad = media_entity_remote_source(pad); if (pad == NULL || media_entity_type(pad->entity) != MEDIA_ENT_T_V4L2_SUBDEV) break; sd = media_entity_to_v4l2_subdev(pad->entity); src_fmt.pad = pad->index; src_fmt.which = V4L2_SUBDEV_FORMAT_ACTIVE; ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &src_fmt); if (ret < 0 && ret != -ENOIOCTLCMD) return -EPIPE; if (src_fmt.format.width != sink_fmt.format.width || src_fmt.format.height != sink_fmt.format.height || src_fmt.format.code != sink_fmt.format.code) return -EPIPE; } return 0; } static int fimc_cap_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_pipeline *p = &fimc->pipeline; int ret; if (fimc_capture_active(fimc)) return -EBUSY; media_entity_pipeline_start(&p->sensor->entity, p->pipe); if (fimc->vid_cap.user_subdev_api) { ret = fimc_pipeline_validate(fimc); if (ret) return ret; } return vb2_streamon(&fimc->vid_cap.vbq, type); } static int fimc_cap_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct fimc_dev *fimc = video_drvdata(file); struct v4l2_subdev *sd = fimc->pipeline.sensor; int ret; ret = vb2_streamoff(&fimc->vid_cap.vbq, type); if (ret == 0) media_entity_pipeline_stop(&sd->entity); return ret; } static int fimc_cap_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *reqbufs) { struct fimc_dev *fimc = video_drvdata(file); int ret = vb2_reqbufs(&fimc->vid_cap.vbq, reqbufs); if (!ret) fimc->vid_cap.reqbufs_count = reqbufs->count; return ret; } static int fimc_cap_querybuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct fimc_dev *fimc = video_drvdata(file); return vb2_querybuf(&fimc->vid_cap.vbq, buf); } static int fimc_cap_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct fimc_dev *fimc = video_drvdata(file); return vb2_qbuf(&fimc->vid_cap.vbq, buf); } static int fimc_cap_dqbuf(struct file *file, void *priv, struct v4l2_buffer *buf) { struct fimc_dev *fimc = video_drvdata(file); return vb2_dqbuf(&fimc->vid_cap.vbq, buf, file->f_flags & O_NONBLOCK); } static int fimc_cap_create_bufs(struct file *file, void *priv, struct v4l2_create_buffers *create) { struct fimc_dev *fimc = video_drvdata(file); return vb2_create_bufs(&fimc->vid_cap.vbq, create); } static int fimc_cap_prepare_buf(struct file *file, void *priv, struct v4l2_buffer *b) { struct fimc_dev *fimc = video_drvdata(file); return vb2_prepare_buf(&fimc->vid_cap.vbq, b); } static int fimc_cap_g_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *f = &ctx->s_frame; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: f = &ctx->d_frame; case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: s->r.left = 0; s->r.top = 0; s->r.width = f->o_width; s->r.height = f->o_height; return 0; case V4L2_SEL_TGT_COMPOSE_ACTIVE: f = &ctx->d_frame; case V4L2_SEL_TGT_CROP_ACTIVE: s->r.left = f->offs_h; s->r.top = f->offs_v; s->r.width = f->width; s->r.height = f->height; return 0; } return -EINVAL; } /* Return 1 if rectangle a is enclosed in rectangle b, or 0 otherwise. */ int enclosed_rectangle(struct v4l2_rect *a, struct v4l2_rect *b) { if (a->left < b->left || a->top < b->top) return 0; if (a->left + a->width > b->left + b->width) return 0; if (a->top + a->height > b->top + b->height) return 0; return 1; } static int fimc_cap_s_selection(struct file *file, void *fh, struct v4l2_selection *s) { struct fimc_dev *fimc = video_drvdata(file); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_rect rect = s->r; struct fimc_frame *f; unsigned long flags; unsigned int pad; if (s->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; switch (s->target) { case V4L2_SEL_TGT_COMPOSE_DEFAULT: case V4L2_SEL_TGT_COMPOSE_BOUNDS: case V4L2_SEL_TGT_COMPOSE_ACTIVE: f = &ctx->d_frame; pad = FIMC_SD_PAD_SOURCE; break; case V4L2_SEL_TGT_CROP_BOUNDS: case V4L2_SEL_TGT_CROP_DEFAULT: case V4L2_SEL_TGT_CROP_ACTIVE: f = &ctx->s_frame; pad = FIMC_SD_PAD_SINK; break; default: return -EINVAL; } fimc_capture_try_crop(ctx, &rect, pad); if (s->flags & V4L2_SEL_FLAG_LE && !enclosed_rectangle(&rect, &s->r)) return -ERANGE; if (s->flags & V4L2_SEL_FLAG_GE && !enclosed_rectangle(&s->r, &rect)) return -ERANGE; s->r = rect; spin_lock_irqsave(&fimc->slock, flags); set_frame_crop(f, s->r.left, s->r.top, s->r.width, s->r.height); spin_unlock_irqrestore(&fimc->slock, flags); set_bit(ST_CAPT_APPLY_CFG, &fimc->state); return 0; } static const struct v4l2_ioctl_ops fimc_capture_ioctl_ops = { .vidioc_querycap = fimc_vidioc_querycap_capture, .vidioc_enum_fmt_vid_cap_mplane = fimc_cap_enum_fmt_mplane, .vidioc_try_fmt_vid_cap_mplane = fimc_cap_try_fmt_mplane, .vidioc_s_fmt_vid_cap_mplane = fimc_cap_s_fmt_mplane, .vidioc_g_fmt_vid_cap_mplane = fimc_cap_g_fmt_mplane, .vidioc_reqbufs = fimc_cap_reqbufs, .vidioc_querybuf = fimc_cap_querybuf, .vidioc_qbuf = fimc_cap_qbuf, .vidioc_dqbuf = fimc_cap_dqbuf, .vidioc_prepare_buf = fimc_cap_prepare_buf, .vidioc_create_bufs = fimc_cap_create_bufs, .vidioc_streamon = fimc_cap_streamon, .vidioc_streamoff = fimc_cap_streamoff, .vidioc_g_selection = fimc_cap_g_selection, .vidioc_s_selection = fimc_cap_s_selection, .vidioc_enum_input = fimc_cap_enum_input, .vidioc_s_input = fimc_cap_s_input, .vidioc_g_input = fimc_cap_g_input, }; /* Capture subdev media entity operations */ static int fimc_link_setup(struct media_entity *entity, const struct media_pad *local, const struct media_pad *remote, u32 flags) { struct v4l2_subdev *sd = media_entity_to_v4l2_subdev(entity); struct fimc_dev *fimc = v4l2_get_subdevdata(sd); if (media_entity_type(remote->entity) != MEDIA_ENT_T_V4L2_SUBDEV) return -EINVAL; if (WARN_ON(fimc == NULL)) return 0; dbg("%s --> %s, flags: 0x%x. input: 0x%x", local->entity->name, remote->entity->name, flags, fimc->vid_cap.input); if (flags & MEDIA_LNK_FL_ENABLED) { if (fimc->vid_cap.input != 0) return -EBUSY; fimc->vid_cap.input = sd->grp_id; return 0; } fimc->vid_cap.input = 0; return 0; } static const struct media_entity_operations fimc_sd_media_ops = { .link_setup = fimc_link_setup, }; /** * fimc_sensor_notify - v4l2_device notification from a sensor subdev * @sd: pointer to a subdev generating the notification * @notification: the notification type, must be S5P_FIMC_TX_END_NOTIFY * @arg: pointer to an u32 type integer that stores the frame payload value * * The End Of Frame notification sent by sensor subdev in its still capture * mode. If there is only a single VSYNC generated by the sensor at the * beginning of a frame transmission, FIMC does not issue the LastIrq * (end of frame) interrupt. And this notification is used to complete the * frame capture and returning a buffer to user-space. Subdev drivers should * call this notification from their last 'End of frame capture' interrupt. */ void fimc_sensor_notify(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct fimc_sensor_info *sensor; struct fimc_vid_buffer *buf; struct fimc_md *fmd; struct fimc_dev *fimc; unsigned long flags; if (sd == NULL) return; sensor = v4l2_get_subdev_hostdata(sd); fmd = entity_to_fimc_mdev(&sd->entity); spin_lock_irqsave(&fmd->slock, flags); fimc = sensor ? sensor->host : NULL; if (fimc && arg && notification == S5P_FIMC_TX_END_NOTIFY && test_bit(ST_CAPT_PEND, &fimc->state)) { unsigned long irq_flags; spin_lock_irqsave(&fimc->slock, irq_flags); if (!list_empty(&fimc->vid_cap.active_buf_q)) { buf = list_entry(fimc->vid_cap.active_buf_q.next, struct fimc_vid_buffer, list); vb2_set_plane_payload(&buf->vb, 0, *((u32 *)arg)); } fimc_capture_irq_handler(fimc, true); fimc_deactivate_capture(fimc); spin_unlock_irqrestore(&fimc->slock, irq_flags); } spin_unlock_irqrestore(&fmd->slock, flags); } static int fimc_subdev_enum_mbus_code(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { struct fimc_fmt *fmt; fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, code->index); if (!fmt) return -EINVAL; code->code = fmt->mbus_code; return 0; } static int fimc_subdev_get_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_mbus_framefmt *mf; struct fimc_frame *ff; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { mf = v4l2_subdev_get_try_format(fh, fmt->pad); fmt->format = *mf; return 0; } mf = &fmt->format; mf->colorspace = V4L2_COLORSPACE_JPEG; ff = fmt->pad == FIMC_SD_PAD_SINK ? &ctx->s_frame : &ctx->d_frame; mutex_lock(&fimc->lock); /* The pixel code is same on both input and output pad */ if (!WARN_ON(ctx->s_frame.fmt == NULL)) mf->code = ctx->s_frame.fmt->mbus_code; mf->width = ff->f_width; mf->height = ff->f_height; mutex_unlock(&fimc->lock); return 0; } static int fimc_subdev_set_fmt(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct v4l2_mbus_framefmt *mf = &fmt->format; struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct fimc_frame *ff; struct fimc_fmt *ffmt; dbg("pad%d: code: 0x%x, %dx%d", fmt->pad, mf->code, mf->width, mf->height); if (fmt->pad == FIMC_SD_PAD_SOURCE && vb2_is_busy(&fimc->vid_cap.vbq)) return -EBUSY; mutex_lock(&fimc->lock); ffmt = fimc_capture_try_format(ctx, &mf->width, &mf->height, &mf->code, NULL, fmt->pad); mutex_unlock(&fimc->lock); mf->colorspace = V4L2_COLORSPACE_JPEG; if (fmt->which == V4L2_SUBDEV_FORMAT_TRY) { mf = v4l2_subdev_get_try_format(fh, fmt->pad); *mf = fmt->format; return 0; } /* Update RGB Alpha control state and value range */ fimc_alpha_ctrl_update(ctx); fimc_capture_mark_jpeg_xfer(ctx, fimc_fmt_is_jpeg(ffmt->color)); ff = fmt->pad == FIMC_SD_PAD_SINK ? &ctx->s_frame : &ctx->d_frame; mutex_lock(&fimc->lock); set_frame_bounds(ff, mf->width, mf->height); fimc->vid_cap.mf = *mf; ff->fmt = ffmt; /* Reset the crop rectangle if required. */ if (!(fmt->pad == FIMC_SD_PAD_SOURCE && (ctx->state & FIMC_DST_CROP))) set_frame_crop(ff, 0, 0, mf->width, mf->height); if (fmt->pad == FIMC_SD_PAD_SINK) ctx->state &= ~FIMC_DST_CROP; mutex_unlock(&fimc->lock); return 0; } static int fimc_subdev_get_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_rect *r = &crop->rect; struct fimc_frame *ff; if (crop->which == V4L2_SUBDEV_FORMAT_TRY) { crop->rect = *v4l2_subdev_get_try_crop(fh, crop->pad); return 0; } ff = crop->pad == FIMC_SD_PAD_SINK ? &ctx->s_frame : &ctx->d_frame; mutex_lock(&fimc->lock); r->left = ff->offs_h; r->top = ff->offs_v; r->width = ff->width; r->height = ff->height; mutex_unlock(&fimc->lock); dbg("ff:%p, pad%d: l:%d, t:%d, %dx%d, f_w: %d, f_h: %d", ff, crop->pad, r->left, r->top, r->width, r->height, ff->f_width, ff->f_height); return 0; } static int fimc_subdev_set_crop(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct fimc_dev *fimc = v4l2_get_subdevdata(sd); struct fimc_ctx *ctx = fimc->vid_cap.ctx; struct v4l2_rect *r = &crop->rect; struct fimc_frame *ff; unsigned long flags; dbg("(%d,%d)/%dx%d", r->left, r->top, r->width, r->height); ff = crop->pad == FIMC_SD_PAD_SOURCE ? &ctx->d_frame : &ctx->s_frame; mutex_lock(&fimc->lock); fimc_capture_try_crop(ctx, r, crop->pad); if (crop->which == V4L2_SUBDEV_FORMAT_TRY) { mutex_unlock(&fimc->lock); *v4l2_subdev_get_try_crop(fh, crop->pad) = *r; return 0; } spin_lock_irqsave(&fimc->slock, flags); set_frame_crop(ff, r->left, r->top, r->width, r->height); if (crop->pad == FIMC_SD_PAD_SOURCE) ctx->state |= FIMC_DST_CROP; set_bit(ST_CAPT_APPLY_CFG, &fimc->state); spin_unlock_irqrestore(&fimc->slock, flags); dbg("pad%d: (%d,%d)/%dx%d", crop->pad, r->left, r->top, r->width, r->height); mutex_unlock(&fimc->lock); return 0; } static struct v4l2_subdev_pad_ops fimc_subdev_pad_ops = { .enum_mbus_code = fimc_subdev_enum_mbus_code, .get_fmt = fimc_subdev_get_fmt, .set_fmt = fimc_subdev_set_fmt, .get_crop = fimc_subdev_get_crop, .set_crop = fimc_subdev_set_crop, }; static struct v4l2_subdev_ops fimc_subdev_ops = { .pad = &fimc_subdev_pad_ops, }; static int fimc_create_capture_subdev(struct fimc_dev *fimc, struct v4l2_device *v4l2_dev) { struct v4l2_subdev *sd; int ret; sd = kzalloc(sizeof(*sd), GFP_KERNEL); if (!sd) return -ENOMEM; v4l2_subdev_init(sd, &fimc_subdev_ops); sd->flags = V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(sd->name, sizeof(sd->name), "FIMC.%d", fimc->pdev->id); fimc->vid_cap.sd_pads[FIMC_SD_PAD_SINK].flags = MEDIA_PAD_FL_SINK; fimc->vid_cap.sd_pads[FIMC_SD_PAD_SOURCE].flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&sd->entity, FIMC_SD_PADS_NUM, fimc->vid_cap.sd_pads, 0); if (ret) goto me_err; ret = v4l2_device_register_subdev(v4l2_dev, sd); if (ret) goto sd_err; fimc->vid_cap.subdev = sd; v4l2_set_subdevdata(sd, fimc); sd->entity.ops = &fimc_sd_media_ops; return 0; sd_err: media_entity_cleanup(&sd->entity); me_err: kfree(sd); return ret; } static void fimc_destroy_capture_subdev(struct fimc_dev *fimc) { struct v4l2_subdev *sd = fimc->vid_cap.subdev; if (!sd) return; media_entity_cleanup(&sd->entity); v4l2_device_unregister_subdev(sd); kfree(sd); fimc->vid_cap.subdev = NULL; } /* Set default format at the sensor and host interface */ static int fimc_capture_set_default_format(struct fimc_dev *fimc) { struct v4l2_format fmt = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE, .fmt.pix_mp = { .width = 640, .height = 480, .pixelformat = V4L2_PIX_FMT_YUYV, .field = V4L2_FIELD_NONE, .colorspace = V4L2_COLORSPACE_JPEG, }, }; return fimc_capture_set_format(fimc, &fmt); } /* fimc->lock must be already initialized */ int fimc_register_capture_device(struct fimc_dev *fimc, struct v4l2_device *v4l2_dev) { struct video_device *vfd; struct fimc_vid_cap *vid_cap; struct fimc_ctx *ctx; struct vb2_queue *q; int ret = -ENOMEM; ctx = kzalloc(sizeof *ctx, GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->fimc_dev = fimc; ctx->in_path = FIMC_CAMERA; ctx->out_path = FIMC_DMA; ctx->state = FIMC_CTX_CAP; ctx->s_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0); ctx->d_frame.fmt = fimc_find_format(NULL, NULL, FMT_FLAGS_CAM, 0); vfd = video_device_alloc(); if (!vfd) { v4l2_err(v4l2_dev, "Failed to allocate video device\n"); goto err_vd_alloc; } snprintf(vfd->name, sizeof(vfd->name), "%s.capture", dev_name(&fimc->pdev->dev)); vfd->fops = &fimc_capture_fops; vfd->ioctl_ops = &fimc_capture_ioctl_ops; vfd->v4l2_dev = v4l2_dev; vfd->minor = -1; vfd->release = video_device_release; vfd->lock = &fimc->lock; video_set_drvdata(vfd, fimc); vid_cap = &fimc->vid_cap; vid_cap->vfd = vfd; vid_cap->active_buf_cnt = 0; vid_cap->reqbufs_count = 0; vid_cap->refcnt = 0; INIT_LIST_HEAD(&vid_cap->pending_buf_q); INIT_LIST_HEAD(&vid_cap->active_buf_q); spin_lock_init(&ctx->slock); vid_cap->ctx = ctx; q = &fimc->vid_cap.vbq; memset(q, 0, sizeof(*q)); q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = fimc->vid_cap.ctx; q->ops = &fimc_capture_qops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct fimc_vid_buffer); vb2_queue_init(q); fimc->vid_cap.vd_pad.flags = MEDIA_PAD_FL_SINK; ret = media_entity_init(&vfd->entity, 1, &fimc->vid_cap.vd_pad, 0); if (ret) goto err_ent; ret = fimc_create_capture_subdev(fimc, v4l2_dev); if (ret) goto err_sd_reg; vfd->ctrl_handler = &ctx->ctrl_handler; return 0; err_sd_reg: media_entity_cleanup(&vfd->entity); err_ent: video_device_release(vfd); err_vd_alloc: kfree(ctx); return ret; } void fimc_unregister_capture_device(struct fimc_dev *fimc) { struct video_device *vfd = fimc->vid_cap.vfd; if (vfd) { media_entity_cleanup(&vfd->entity); /* Can also be called if video device was not registered */ video_unregister_device(vfd); } fimc_destroy_capture_subdev(fimc); kfree(fimc->vid_cap.ctx); fimc->vid_cap.ctx = NULL; }
gpl-2.0
mrjaydee82/SinLessKernelNew
arch/arm/mach-omap2/powerdomains2xxx_data.c
5153
2982
/* * OMAP2XXX powerdomain definitions * * Copyright (C) 2007-2008, 2011 Texas Instruments, Inc. * Copyright (C) 2007-2011 Nokia Corporation * * Paul Walmsley, Jouni Högander * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/init.h> #include "powerdomain.h" #include "powerdomains2xxx_3xxx_data.h" #include "prcm-common.h" #include "prm2xxx_3xxx.h" #include "prm-regbits-24xx.h" /* 24XX powerdomains and dependencies */ /* Powerdomains */ static struct powerdomain dsp_pwrdm = { .name = "dsp_pwrdm", .prcm_offs = OMAP24XX_DSP_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "core" }, }; static struct powerdomain mpu_24xx_pwrdm = { .name = "mpu_pwrdm", .prcm_offs = MPU_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_OFF_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, }, .pwrsts_mem_on = { [0] = PWRSTS_ON, }, .voltdm = { .name = "core" }, }; static struct powerdomain core_24xx_pwrdm = { .name = "core_pwrdm", .prcm_offs = CORE_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .banks = 3, .pwrsts_mem_ret = { [0] = PWRSTS_OFF_RET, /* MEM1RETSTATE */ [1] = PWRSTS_OFF_RET, /* MEM2RETSTATE */ [2] = PWRSTS_OFF_RET, /* MEM3RETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_OFF_RET_ON, /* MEM1ONSTATE */ [1] = PWRSTS_OFF_RET_ON, /* MEM2ONSTATE */ [2] = PWRSTS_OFF_RET_ON, /* MEM3ONSTATE */ }, .voltdm = { .name = "core" }, }; /* * 2430-specific powerdomains */ /* XXX 2430 KILLDOMAINWKUP bit? No current users apparently */ static struct powerdomain mdm_pwrdm = { .name = "mdm_pwrdm", .prcm_offs = OMAP2430_MDM_MOD, .pwrsts = PWRSTS_OFF_RET_ON, .pwrsts_logic_ret = PWRSTS_RET, .banks = 1, .pwrsts_mem_ret = { [0] = PWRSTS_RET, /* MEMRETSTATE */ }, .pwrsts_mem_on = { [0] = PWRSTS_ON, /* MEMONSTATE */ }, .voltdm = { .name = "core" }, }; /* * */ static struct powerdomain *powerdomains_omap24xx[] __initdata = { &wkup_omap2_pwrdm, &gfx_omap2_pwrdm, &dsp_pwrdm, &mpu_24xx_pwrdm, &core_24xx_pwrdm, NULL }; static struct powerdomain *powerdomains_omap2430[] __initdata = { &mdm_pwrdm, NULL }; void __init omap242x_powerdomains_init(void) { if (!cpu_is_omap2420()) return; pwrdm_register_platform_funcs(&omap2_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap24xx); pwrdm_complete_init(); } void __init omap243x_powerdomains_init(void) { if (!cpu_is_omap2430()) return; pwrdm_register_platform_funcs(&omap2_pwrdm_operations); pwrdm_register_pwrdms(powerdomains_omap24xx); pwrdm_register_pwrdms(powerdomains_omap2430); pwrdm_complete_init(); }
gpl-2.0
CyanogenMod/android_kernel_oppo_find5
drivers/gpu/drm/nouveau/nva3_copy.c
5921
6445
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <linux/firmware.h> #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_util.h" #include "nouveau_vm.h" #include "nouveau_ramht.h" #include "nva3_copy.fuc.h" struct nva3_copy_engine { struct nouveau_exec_engine base; }; static int nva3_copy_context_new(struct nouveau_channel *chan, int engine) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *ramin = chan->ramin; struct nouveau_gpuobj *ctx = NULL; int ret; NV_DEBUG(dev, "ch%d\n", chan->id); ret = nouveau_gpuobj_new(dev, chan, 256, 0, NVOBJ_FLAG_ZERO_ALLOC | NVOBJ_FLAG_ZERO_FREE, &ctx); if (ret) return ret; nv_wo32(ramin, 0xc0, 0x00190000); nv_wo32(ramin, 0xc4, ctx->vinst + ctx->size - 1); nv_wo32(ramin, 0xc8, ctx->vinst); nv_wo32(ramin, 0xcc, 0x00000000); nv_wo32(ramin, 0xd0, 0x00000000); nv_wo32(ramin, 0xd4, 0x00000000); dev_priv->engine.instmem.flush(dev); atomic_inc(&chan->vm->engref[engine]); chan->engctx[engine] = ctx; return 0; } static int nva3_copy_object_new(struct nouveau_channel *chan, int engine, u32 handle, u16 class) { struct nouveau_gpuobj *ctx = chan->engctx[engine]; /* fuc engine doesn't need an object, our ramht code does.. */ ctx->engine = 3; ctx->class = class; return nouveau_ramht_insert(chan, handle, ctx); } static void nva3_copy_context_del(struct nouveau_channel *chan, int engine) { struct nouveau_gpuobj *ctx = chan->engctx[engine]; struct drm_device *dev = chan->dev; u32 inst; inst = (chan->ramin->vinst >> 12); inst |= 0x40000000; /* disable fifo access */ nv_wr32(dev, 0x104048, 0x00000000); /* mark channel as unloaded if it's currently active */ if (nv_rd32(dev, 0x104050) == inst) nv_mask(dev, 0x104050, 0x40000000, 0x00000000); /* mark next channel as invalid if it's about to be loaded */ if (nv_rd32(dev, 0x104054) == inst) nv_mask(dev, 0x104054, 0x40000000, 0x00000000); /* restore fifo access */ nv_wr32(dev, 0x104048, 0x00000003); for (inst = 0xc0; inst <= 0xd4; inst += 4) nv_wo32(chan->ramin, inst, 0x00000000); nouveau_gpuobj_ref(NULL, &ctx); atomic_dec(&chan->vm->engref[engine]); chan->engctx[engine] = ctx; } static void nva3_copy_tlb_flush(struct drm_device *dev, int engine) { nv50_vm_flush_engine(dev, 0x0d); } static int nva3_copy_init(struct drm_device *dev, int engine) { int i; nv_mask(dev, 0x000200, 0x00002000, 0x00000000); nv_mask(dev, 0x000200, 0x00002000, 0x00002000); nv_wr32(dev, 0x104014, 0xffffffff); /* disable all interrupts */ /* upload ucode */ nv_wr32(dev, 0x1041c0, 0x01000000); for (i = 0; i < sizeof(nva3_pcopy_data) / 4; i++) nv_wr32(dev, 0x1041c4, nva3_pcopy_data[i]); nv_wr32(dev, 0x104180, 0x01000000); for (i = 0; i < sizeof(nva3_pcopy_code) / 4; i++) { if ((i & 0x3f) == 0) nv_wr32(dev, 0x104188, i >> 6); nv_wr32(dev, 0x104184, nva3_pcopy_code[i]); } /* start it running */ nv_wr32(dev, 0x10410c, 0x00000000); nv_wr32(dev, 0x104104, 0x00000000); /* ENTRY */ nv_wr32(dev, 0x104100, 0x00000002); /* TRIGGER */ return 0; } static int nva3_copy_fini(struct drm_device *dev, int engine, bool suspend) { nv_mask(dev, 0x104048, 0x00000003, 0x00000000); /* trigger fuc context unload */ nv_wait(dev, 0x104008, 0x0000000c, 0x00000000); nv_mask(dev, 0x104054, 0x40000000, 0x00000000); nv_wr32(dev, 0x104000, 0x00000008); nv_wait(dev, 0x104008, 0x00000008, 0x00000000); nv_wr32(dev, 0x104014, 0xffffffff); return 0; } static struct nouveau_enum nva3_copy_isr_error_name[] = { { 0x0001, "ILLEGAL_MTHD" }, { 0x0002, "INVALID_ENUM" }, { 0x0003, "INVALID_BITFIELD" }, {} }; static void nva3_copy_isr(struct drm_device *dev) { u32 dispatch = nv_rd32(dev, 0x10401c); u32 stat = nv_rd32(dev, 0x104008) & dispatch & ~(dispatch >> 16); u32 inst = nv_rd32(dev, 0x104050) & 0x3fffffff; u32 ssta = nv_rd32(dev, 0x104040) & 0x0000ffff; u32 addr = nv_rd32(dev, 0x104040) >> 16; u32 mthd = (addr & 0x07ff) << 2; u32 subc = (addr & 0x3800) >> 11; u32 data = nv_rd32(dev, 0x104044); int chid = nv50_graph_isr_chid(dev, inst); if (stat & 0x00000040) { NV_INFO(dev, "PCOPY: DISPATCH_ERROR ["); nouveau_enum_print(nva3_copy_isr_error_name, ssta); printk("] ch %d [0x%08x] subc %d mthd 0x%04x data 0x%08x\n", chid, inst, subc, mthd, data); nv_wr32(dev, 0x104004, 0x00000040); stat &= ~0x00000040; } if (stat) { NV_INFO(dev, "PCOPY: unhandled intr 0x%08x\n", stat); nv_wr32(dev, 0x104004, stat); } nv50_fb_vm_trap(dev, 1); } static void nva3_copy_destroy(struct drm_device *dev, int engine) { struct nva3_copy_engine *pcopy = nv_engine(dev, engine); nouveau_irq_unregister(dev, 22); NVOBJ_ENGINE_DEL(dev, COPY0); kfree(pcopy); } int nva3_copy_create(struct drm_device *dev) { struct nva3_copy_engine *pcopy; pcopy = kzalloc(sizeof(*pcopy), GFP_KERNEL); if (!pcopy) return -ENOMEM; pcopy->base.destroy = nva3_copy_destroy; pcopy->base.init = nva3_copy_init; pcopy->base.fini = nva3_copy_fini; pcopy->base.context_new = nva3_copy_context_new; pcopy->base.context_del = nva3_copy_context_del; pcopy->base.object_new = nva3_copy_object_new; pcopy->base.tlb_flush = nva3_copy_tlb_flush; nouveau_irq_register(dev, 22, nva3_copy_isr); NVOBJ_ENGINE_ADD(dev, COPY0, &pcopy->base); NVOBJ_CLASS(dev, 0x85b5, COPY0); return 0; }
gpl-2.0
yeewang/linux-sunxi
drivers/mtd/redboot.c
7457
8637
/* * Parse RedBoot-style Flash Image System (FIS) tables and * produce a Linux partition array to match. * * Copyright © 2001 Red Hat UK Limited * Copyright © 2001-2010 David Woodhouse <dwmw2@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/vmalloc.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/module.h> struct fis_image_desc { unsigned char name[16]; // Null terminated name uint32_t flash_base; // Address within FLASH of image uint32_t mem_base; // Address in memory where it executes uint32_t size; // Length of image uint32_t entry_point; // Execution entry point uint32_t data_length; // Length of actual data unsigned char _pad[256-(16+7*sizeof(uint32_t))]; uint32_t desc_cksum; // Checksum over image descriptor uint32_t file_cksum; // Checksum over image data }; struct fis_list { struct fis_image_desc *img; struct fis_list *next; }; static int directory = CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK; module_param(directory, int, 0); static inline int redboot_checksum(struct fis_image_desc *img) { /* RedBoot doesn't actually write the desc_cksum field yet AFAICT */ return 1; } static int parse_redboot_partitions(struct mtd_info *master, struct mtd_partition **pparts, struct mtd_part_parser_data *data) { int nrparts = 0; struct fis_image_desc *buf; struct mtd_partition *parts; struct fis_list *fl = NULL, *tmp_fl; int ret, i; size_t retlen; char *names; char *nullname; int namelen = 0; int nulllen = 0; int numslots; unsigned long offset; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED static char nullstring[] = "unallocated"; #endif if ( directory < 0 ) { offset = master->size + directory * master->erasesize; while (mtd_block_isbad(master, offset)) { if (!offset) { nogood: printk(KERN_NOTICE "Failed to find a non-bad block to check for RedBoot partition table\n"); return -EIO; } offset -= master->erasesize; } } else { offset = directory * master->erasesize; while (mtd_block_isbad(master, offset)) { offset += master->erasesize; if (offset == master->size) goto nogood; } } buf = vmalloc(master->erasesize); if (!buf) return -ENOMEM; printk(KERN_NOTICE "Searching for RedBoot partition table in %s at offset 0x%lx\n", master->name, offset); ret = mtd_read(master, offset, master->erasesize, &retlen, (void *)buf); if (ret) goto out; if (retlen != master->erasesize) { ret = -EIO; goto out; } numslots = (master->erasesize / sizeof(struct fis_image_desc)); for (i = 0; i < numslots; i++) { if (!memcmp(buf[i].name, "FIS directory", 14)) { /* This is apparently the FIS directory entry for the * FIS directory itself. The FIS directory size is * one erase block; if the buf[i].size field is * swab32(erasesize) then we know we are looking at * a byte swapped FIS directory - swap all the entries! * (NOTE: this is 'size' not 'data_length'; size is * the full size of the entry.) */ /* RedBoot can combine the FIS directory and config partitions into a single eraseblock; we assume wrong-endian if either the swapped 'size' matches the eraseblock size precisely, or if the swapped size actually fits in an eraseblock while the unswapped size doesn't. */ if (swab32(buf[i].size) == master->erasesize || (buf[i].size > master->erasesize && swab32(buf[i].size) < master->erasesize)) { int j; /* Update numslots based on actual FIS directory size */ numslots = swab32(buf[i].size) / sizeof (struct fis_image_desc); for (j = 0; j < numslots; ++j) { /* A single 0xff denotes a deleted entry. * Two of them in a row is the end of the table. */ if (buf[j].name[0] == 0xff) { if (buf[j].name[1] == 0xff) { break; } else { continue; } } /* The unsigned long fields were written with the * wrong byte sex, name and pad have no byte sex. */ swab32s(&buf[j].flash_base); swab32s(&buf[j].mem_base); swab32s(&buf[j].size); swab32s(&buf[j].entry_point); swab32s(&buf[j].data_length); swab32s(&buf[j].desc_cksum); swab32s(&buf[j].file_cksum); } } else if (buf[i].size < master->erasesize) { /* Update numslots based on actual FIS directory size */ numslots = buf[i].size / sizeof(struct fis_image_desc); } break; } } if (i == numslots) { /* Didn't find it */ printk(KERN_NOTICE "No RedBoot partition table detected in %s\n", master->name); ret = 0; goto out; } for (i = 0; i < numslots; i++) { struct fis_list *new_fl, **prev; if (buf[i].name[0] == 0xff) { if (buf[i].name[1] == 0xff) { break; } else { continue; } } if (!redboot_checksum(&buf[i])) break; new_fl = kmalloc(sizeof(struct fis_list), GFP_KERNEL); namelen += strlen(buf[i].name)+1; if (!new_fl) { ret = -ENOMEM; goto out; } new_fl->img = &buf[i]; if (data && data->origin) buf[i].flash_base -= data->origin; else buf[i].flash_base &= master->size-1; /* I'm sure the JFFS2 code has done me permanent damage. * I now think the following is _normal_ */ prev = &fl; while(*prev && (*prev)->img->flash_base < new_fl->img->flash_base) prev = &(*prev)->next; new_fl->next = *prev; *prev = new_fl; nrparts++; } #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (fl->img->flash_base) { nrparts++; nulllen = sizeof(nullstring); } for (tmp_fl = fl; tmp_fl->next; tmp_fl = tmp_fl->next) { if (tmp_fl->img->flash_base + tmp_fl->img->size + master->erasesize <= tmp_fl->next->img->flash_base) { nrparts++; nulllen = sizeof(nullstring); } } #endif parts = kzalloc(sizeof(*parts)*nrparts + nulllen + namelen, GFP_KERNEL); if (!parts) { ret = -ENOMEM; goto out; } nullname = (char *)&parts[nrparts]; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (nulllen > 0) { strcpy(nullname, nullstring); } #endif names = nullname + nulllen; i=0; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if (fl->img->flash_base) { parts[0].name = nullname; parts[0].size = fl->img->flash_base; parts[0].offset = 0; i++; } #endif for ( ; i<nrparts; i++) { parts[i].size = fl->img->size; parts[i].offset = fl->img->flash_base; parts[i].name = names; strcpy(names, fl->img->name); #ifdef CONFIG_MTD_REDBOOT_PARTS_READONLY if (!memcmp(names, "RedBoot", 8) || !memcmp(names, "RedBoot config", 15) || !memcmp(names, "FIS directory", 14)) { parts[i].mask_flags = MTD_WRITEABLE; } #endif names += strlen(names)+1; #ifdef CONFIG_MTD_REDBOOT_PARTS_UNALLOCATED if(fl->next && fl->img->flash_base + fl->img->size + master->erasesize <= fl->next->img->flash_base) { i++; parts[i].offset = parts[i-1].size + parts[i-1].offset; parts[i].size = fl->next->img->flash_base - parts[i].offset; parts[i].name = nullname; } #endif tmp_fl = fl; fl = fl->next; kfree(tmp_fl); } ret = nrparts; *pparts = parts; out: while (fl) { struct fis_list *old = fl; fl = fl->next; kfree(old); } vfree(buf); return ret; } static struct mtd_part_parser redboot_parser = { .owner = THIS_MODULE, .parse_fn = parse_redboot_partitions, .name = "RedBoot", }; /* mtd parsers will request the module by parser name */ MODULE_ALIAS("RedBoot"); static int __init redboot_parser_init(void) { return register_mtd_parser(&redboot_parser); } static void __exit redboot_parser_exit(void) { deregister_mtd_parser(&redboot_parser); } module_init(redboot_parser_init); module_exit(redboot_parser_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("David Woodhouse <dwmw2@infradead.org>"); MODULE_DESCRIPTION("Parsing code for RedBoot Flash Image System (FIS) tables");
gpl-2.0
jmztaylor/android_kernel_htc_k2plccl
sound/soc/pxa/spitz.c
7713
9626
/* * spitz.c -- SoC audio for Sharp SL-Cxx00 models Spitz, Borzoi and Akita * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/spitz.h> #include "../codecs/wm8750.h" #include "pxa2xx-i2s.h" #define SPITZ_HP 0 #define SPITZ_MIC 1 #define SPITZ_LINE 2 #define SPITZ_HEADSET 3 #define SPITZ_HP_OFF 4 #define SPITZ_SPK_ON 0 #define SPITZ_SPK_OFF 1 /* audio clock in Hz - rounded from 12.235MHz */ #define SPITZ_AUDIO_CLOCK 12288000 static int spitz_jack_func; static int spitz_spk_func; static int spitz_mic_gpio; static void spitz_ext_control(struct snd_soc_dapm_context *dapm) { if (spitz_spk_func == SPITZ_SPK_ON) snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* set up jack connection */ switch (spitz_jack_func) { case SPITZ_HP: /* enable and unmute hp jack, disable mic bias */ snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 1); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_MIC: /* enable mic jack and bias, mute hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_LINE: /* enable line jack, disable mic bias and mute hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_enable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_HEADSET: /* enable and unmute headset jack enable mic bias, mute L hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Headset Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_HP_OFF: /* jack removed, everything off */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; } snd_soc_dapm_sync(dapm); } static int spitz_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; mutex_lock(&codec->mutex); /* check the jack status at stream startup */ spitz_ext_control(&codec->dapm); mutex_unlock(&codec->mutex); return 0; } static int spitz_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int clk = 0; int ret = 0; switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: clk = 12288000; break; case 11025: case 22050: case 44100: clk = 11289600; break; } /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set the I2S system clock as input (unused) */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_ops spitz_ops = { .startup = spitz_startup, .hw_params = spitz_hw_params, }; static int spitz_get_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = spitz_jack_func; return 0; } static int spitz_set_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (spitz_jack_func == ucontrol->value.integer.value[0]) return 0; spitz_jack_func = ucontrol->value.integer.value[0]; spitz_ext_control(&card->dapm); return 1; } static int spitz_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = spitz_spk_func; return 0; } static int spitz_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (spitz_spk_func == ucontrol->value.integer.value[0]) return 0; spitz_spk_func = ucontrol->value.integer.value[0]; spitz_ext_control(&card->dapm); return 1; } static int spitz_mic_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event)); return 0; } /* spitz machine dapm widgets */ static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_LINE("Line Jack", NULL), /* headset is a mic and mono headphone */ SND_SOC_DAPM_HP("Headset Jack", NULL), }; /* Spitz machine audio_map */ static const struct snd_soc_dapm_route spitz_audio_map[] = { /* headphone connected to LOUT1, ROUT1 */ {"Headphone Jack", NULL, "LOUT1"}, {"Headphone Jack", NULL, "ROUT1"}, /* headset connected to ROUT1 and LINPUT1 with bias (def below) */ {"Headset Jack", NULL, "ROUT1"}, /* ext speaker connected to LOUT2, ROUT2 */ {"Ext Spk", NULL , "ROUT2"}, {"Ext Spk", NULL , "LOUT2"}, /* mic is connected to input 1 - with bias */ {"LINPUT1", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Mic Jack"}, /* line is connected to input 1 - no bias */ {"LINPUT1", NULL, "Line Jack"}, }; static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset", "Off"}; static const char *spk_function[] = {"On", "Off"}; static const struct soc_enum spitz_enum[] = { SOC_ENUM_SINGLE_EXT(5, jack_function), SOC_ENUM_SINGLE_EXT(2, spk_function), }; static const struct snd_kcontrol_new wm8750_spitz_controls[] = { SOC_ENUM_EXT("Jack Function", spitz_enum[0], spitz_get_jack, spitz_set_jack), SOC_ENUM_EXT("Speaker Function", spitz_enum[1], spitz_get_spk, spitz_set_spk), }; /* * Logic for a wm8750 as connected on a Sharp SL-Cxx00 Device */ static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; /* NC codec pins */ snd_soc_dapm_nc_pin(dapm, "RINPUT1"); snd_soc_dapm_nc_pin(dapm, "LINPUT2"); snd_soc_dapm_nc_pin(dapm, "RINPUT2"); snd_soc_dapm_nc_pin(dapm, "LINPUT3"); snd_soc_dapm_nc_pin(dapm, "RINPUT3"); snd_soc_dapm_nc_pin(dapm, "OUT3"); snd_soc_dapm_nc_pin(dapm, "MONO1"); return 0; } /* spitz digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link spitz_dai = { .name = "wm8750", .stream_name = "WM8750", .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "wm8750-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm8750.0-001b", .init = spitz_wm8750_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &spitz_ops, }; /* spitz audio machine driver */ static struct snd_soc_card snd_soc_spitz = { .name = "Spitz", .owner = THIS_MODULE, .dai_link = &spitz_dai, .num_links = 1, .controls = wm8750_spitz_controls, .num_controls = ARRAY_SIZE(wm8750_spitz_controls), .dapm_widgets = wm8750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), .dapm_routes = spitz_audio_map, .num_dapm_routes = ARRAY_SIZE(spitz_audio_map), }; static struct platform_device *spitz_snd_device; static int __init spitz_init(void) { int ret; if (!(machine_is_spitz() || machine_is_borzoi() || machine_is_akita())) return -ENODEV; if (machine_is_borzoi() || machine_is_spitz()) spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS; else spitz_mic_gpio = AKITA_GPIO_MIC_BIAS; ret = gpio_request(spitz_mic_gpio, "MIC GPIO"); if (ret) goto err1; ret = gpio_direction_output(spitz_mic_gpio, 0); if (ret) goto err2; spitz_snd_device = platform_device_alloc("soc-audio", -1); if (!spitz_snd_device) { ret = -ENOMEM; goto err2; } platform_set_drvdata(spitz_snd_device, &snd_soc_spitz); ret = platform_device_add(spitz_snd_device); if (ret) goto err3; return 0; err3: platform_device_put(spitz_snd_device); err2: gpio_free(spitz_mic_gpio); err1: return ret; } static void __exit spitz_exit(void) { platform_device_unregister(spitz_snd_device); gpio_free(spitz_mic_gpio); } module_init(spitz_init); module_exit(spitz_exit); MODULE_AUTHOR("Richard Purdie"); MODULE_DESCRIPTION("ALSA SoC Spitz"); MODULE_LICENSE("GPL");
gpl-2.0
zf2-laser-dev/android_kernel_asus_msm8916
sound/soc/pxa/spitz.c
7713
9626
/* * spitz.c -- SoC audio for Sharp SL-Cxx00 models Spitz, Borzoi and Akita * * Copyright 2005 Wolfson Microelectronics PLC. * Copyright 2005 Openedhand Ltd. * * Authors: Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/platform_device.h> #include <linux/gpio.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include <mach/spitz.h> #include "../codecs/wm8750.h" #include "pxa2xx-i2s.h" #define SPITZ_HP 0 #define SPITZ_MIC 1 #define SPITZ_LINE 2 #define SPITZ_HEADSET 3 #define SPITZ_HP_OFF 4 #define SPITZ_SPK_ON 0 #define SPITZ_SPK_OFF 1 /* audio clock in Hz - rounded from 12.235MHz */ #define SPITZ_AUDIO_CLOCK 12288000 static int spitz_jack_func; static int spitz_spk_func; static int spitz_mic_gpio; static void spitz_ext_control(struct snd_soc_dapm_context *dapm) { if (spitz_spk_func == SPITZ_SPK_ON) snd_soc_dapm_enable_pin(dapm, "Ext Spk"); else snd_soc_dapm_disable_pin(dapm, "Ext Spk"); /* set up jack connection */ switch (spitz_jack_func) { case SPITZ_HP: /* enable and unmute hp jack, disable mic bias */ snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Headphone Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 1); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_MIC: /* enable mic jack and bias, mute hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_LINE: /* enable line jack, disable mic bias and mute hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_enable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; case SPITZ_HEADSET: /* enable and unmute headset jack enable mic bias, mute L hp */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_enable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); snd_soc_dapm_enable_pin(dapm, "Headset Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 1); break; case SPITZ_HP_OFF: /* jack removed, everything off */ snd_soc_dapm_disable_pin(dapm, "Headphone Jack"); snd_soc_dapm_disable_pin(dapm, "Headset Jack"); snd_soc_dapm_disable_pin(dapm, "Mic Jack"); snd_soc_dapm_disable_pin(dapm, "Line Jack"); gpio_set_value(SPITZ_GPIO_MUTE_L, 0); gpio_set_value(SPITZ_GPIO_MUTE_R, 0); break; } snd_soc_dapm_sync(dapm); } static int spitz_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; mutex_lock(&codec->mutex); /* check the jack status at stream startup */ spitz_ext_control(&codec->dapm); mutex_unlock(&codec->mutex); return 0; } static int spitz_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int clk = 0; int ret = 0; switch (params_rate(params)) { case 8000: case 16000: case 48000: case 96000: clk = 12288000; break; case 11025: case 22050: case 44100: clk = 11289600; break; } /* set the codec system clock for DAC and ADC */ ret = snd_soc_dai_set_sysclk(codec_dai, WM8750_SYSCLK, clk, SND_SOC_CLOCK_IN); if (ret < 0) return ret; /* set the I2S system clock as input (unused) */ ret = snd_soc_dai_set_sysclk(cpu_dai, PXA2XX_I2S_SYSCLK, 0, SND_SOC_CLOCK_IN); if (ret < 0) return ret; return 0; } static struct snd_soc_ops spitz_ops = { .startup = spitz_startup, .hw_params = spitz_hw_params, }; static int spitz_get_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = spitz_jack_func; return 0; } static int spitz_set_jack(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (spitz_jack_func == ucontrol->value.integer.value[0]) return 0; spitz_jack_func = ucontrol->value.integer.value[0]; spitz_ext_control(&card->dapm); return 1; } static int spitz_get_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { ucontrol->value.integer.value[0] = spitz_spk_func; return 0; } static int spitz_set_spk(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_card *card = snd_kcontrol_chip(kcontrol); if (spitz_spk_func == ucontrol->value.integer.value[0]) return 0; spitz_spk_func = ucontrol->value.integer.value[0]; spitz_ext_control(&card->dapm); return 1; } static int spitz_mic_bias(struct snd_soc_dapm_widget *w, struct snd_kcontrol *k, int event) { gpio_set_value_cansleep(spitz_mic_gpio, SND_SOC_DAPM_EVENT_ON(event)); return 0; } /* spitz machine dapm widgets */ static const struct snd_soc_dapm_widget wm8750_dapm_widgets[] = { SND_SOC_DAPM_HP("Headphone Jack", NULL), SND_SOC_DAPM_MIC("Mic Jack", spitz_mic_bias), SND_SOC_DAPM_SPK("Ext Spk", NULL), SND_SOC_DAPM_LINE("Line Jack", NULL), /* headset is a mic and mono headphone */ SND_SOC_DAPM_HP("Headset Jack", NULL), }; /* Spitz machine audio_map */ static const struct snd_soc_dapm_route spitz_audio_map[] = { /* headphone connected to LOUT1, ROUT1 */ {"Headphone Jack", NULL, "LOUT1"}, {"Headphone Jack", NULL, "ROUT1"}, /* headset connected to ROUT1 and LINPUT1 with bias (def below) */ {"Headset Jack", NULL, "ROUT1"}, /* ext speaker connected to LOUT2, ROUT2 */ {"Ext Spk", NULL , "ROUT2"}, {"Ext Spk", NULL , "LOUT2"}, /* mic is connected to input 1 - with bias */ {"LINPUT1", NULL, "Mic Bias"}, {"Mic Bias", NULL, "Mic Jack"}, /* line is connected to input 1 - no bias */ {"LINPUT1", NULL, "Line Jack"}, }; static const char *jack_function[] = {"Headphone", "Mic", "Line", "Headset", "Off"}; static const char *spk_function[] = {"On", "Off"}; static const struct soc_enum spitz_enum[] = { SOC_ENUM_SINGLE_EXT(5, jack_function), SOC_ENUM_SINGLE_EXT(2, spk_function), }; static const struct snd_kcontrol_new wm8750_spitz_controls[] = { SOC_ENUM_EXT("Jack Function", spitz_enum[0], spitz_get_jack, spitz_set_jack), SOC_ENUM_EXT("Speaker Function", spitz_enum[1], spitz_get_spk, spitz_set_spk), }; /* * Logic for a wm8750 as connected on a Sharp SL-Cxx00 Device */ static int spitz_wm8750_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; /* NC codec pins */ snd_soc_dapm_nc_pin(dapm, "RINPUT1"); snd_soc_dapm_nc_pin(dapm, "LINPUT2"); snd_soc_dapm_nc_pin(dapm, "RINPUT2"); snd_soc_dapm_nc_pin(dapm, "LINPUT3"); snd_soc_dapm_nc_pin(dapm, "RINPUT3"); snd_soc_dapm_nc_pin(dapm, "OUT3"); snd_soc_dapm_nc_pin(dapm, "MONO1"); return 0; } /* spitz digital audio interface glue - connects codec <--> CPU */ static struct snd_soc_dai_link spitz_dai = { .name = "wm8750", .stream_name = "WM8750", .cpu_dai_name = "pxa2xx-i2s", .codec_dai_name = "wm8750-hifi", .platform_name = "pxa-pcm-audio", .codec_name = "wm8750.0-001b", .init = spitz_wm8750_init, .dai_fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS, .ops = &spitz_ops, }; /* spitz audio machine driver */ static struct snd_soc_card snd_soc_spitz = { .name = "Spitz", .owner = THIS_MODULE, .dai_link = &spitz_dai, .num_links = 1, .controls = wm8750_spitz_controls, .num_controls = ARRAY_SIZE(wm8750_spitz_controls), .dapm_widgets = wm8750_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8750_dapm_widgets), .dapm_routes = spitz_audio_map, .num_dapm_routes = ARRAY_SIZE(spitz_audio_map), }; static struct platform_device *spitz_snd_device; static int __init spitz_init(void) { int ret; if (!(machine_is_spitz() || machine_is_borzoi() || machine_is_akita())) return -ENODEV; if (machine_is_borzoi() || machine_is_spitz()) spitz_mic_gpio = SPITZ_GPIO_MIC_BIAS; else spitz_mic_gpio = AKITA_GPIO_MIC_BIAS; ret = gpio_request(spitz_mic_gpio, "MIC GPIO"); if (ret) goto err1; ret = gpio_direction_output(spitz_mic_gpio, 0); if (ret) goto err2; spitz_snd_device = platform_device_alloc("soc-audio", -1); if (!spitz_snd_device) { ret = -ENOMEM; goto err2; } platform_set_drvdata(spitz_snd_device, &snd_soc_spitz); ret = platform_device_add(spitz_snd_device); if (ret) goto err3; return 0; err3: platform_device_put(spitz_snd_device); err2: gpio_free(spitz_mic_gpio); err1: return ret; } static void __exit spitz_exit(void) { platform_device_unregister(spitz_snd_device); gpio_free(spitz_mic_gpio); } module_init(spitz_init); module_exit(spitz_exit); MODULE_AUTHOR("Richard Purdie"); MODULE_DESCRIPTION("ALSA SoC Spitz"); MODULE_LICENSE("GPL");
gpl-2.0
omnirom/android_kernel_sony_msm8930
arch/parisc/lib/bitops.c
8737
1819
/* * bitops.c: atomic operations which got too long to be inlined all over * the place. * * Copyright 1999 Philipp Rumpf (prumpf@tux.org) * Copyright 2000 Grant Grundler (grundler@cup.hp.com) */ #include <linux/kernel.h> #include <linux/spinlock.h> #include <linux/atomic.h> #ifdef CONFIG_SMP arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned = { [0 ... (ATOMIC_HASH_SIZE-1)] = __ARCH_SPIN_LOCK_UNLOCKED }; #endif #ifdef CONFIG_64BIT unsigned long __xchg64(unsigned long x, unsigned long *ptr) { unsigned long temp, flags; _atomic_spin_lock_irqsave(ptr, flags); temp = *ptr; *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return temp; } #endif unsigned long __xchg32(int x, int *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } unsigned long __xchg8(char x, char *ptr) { unsigned long flags; long temp; _atomic_spin_lock_irqsave(ptr, flags); temp = (long) *ptr; /* XXX - sign extension wanted? */ *ptr = x; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)temp; } #ifdef CONFIG_64BIT unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new) { unsigned long flags; unsigned long prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return prev; } #endif unsigned long __cmpxchg_u32(volatile unsigned int *ptr, unsigned int old, unsigned int new) { unsigned long flags; unsigned int prev; _atomic_spin_lock_irqsave(ptr, flags); if ((prev = *ptr) == old) *ptr = new; _atomic_spin_unlock_irqrestore(ptr, flags); return (unsigned long)prev; }
gpl-2.0
BruceBushby/linux-4.1
arch/sh/mm/cache-sh7705.c
12065
4964
/* * arch/sh/mm/cache-sh7705.c * * Copyright (C) 1999, 2000 Niibe Yutaka * Copyright (C) 2004 Alex Song * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * */ #include <linux/init.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/threads.h> #include <asm/addrspace.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/cache.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/pgalloc.h> #include <asm/mmu_context.h> #include <asm/cacheflush.h> /* * The 32KB cache on the SH7705 suffers from the same synonym problem * as SH4 CPUs */ static inline void cache_wback_all(void) { unsigned long ways, waysize, addrstart; ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; int v = SH_CACHE_UPDATED | SH_CACHE_VALID; data = __raw_readl(addr); if ((data & v) == v) __raw_writel(data & ~v, addr); } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); } /* * Write back the range of D-cache, and purge the I-cache. * * Called from kernel/module.c:sys_init_module and routine for a.out format. */ static void sh7705_flush_icache_range(void *args) { struct flusher_data *data = args; unsigned long start, end; start = data->addr1; end = data->addr2; __flush_wback_region((void *)start, end - start); } /* * Writeback&Invalidate the D-cache of the page */ static void __flush_dcache_page(unsigned long phys) { unsigned long ways, waysize, addrstart; unsigned long flags; phys |= SH_CACHE_VALID; /* * Here, phys is the physical address of the page. We check all the * tags in the cache for those with the same page number as this page * (by masking off the lowest 2 bits of the 19-bit tag; these bits are * derived from the offset within in the 4k page). Matching valid * entries are invalidated. * * Since 2 bits of the cache index are derived from the virtual page * number, knowing this would reduce the number of cache entries to be * searched by a factor of 4. However this function exists to deal with * potential cache aliasing, therefore the optimisation is probably not * possible. */ local_irq_save(flags); jump_to_uncached(); ways = current_cpu_data.dcache.ways; waysize = current_cpu_data.dcache.sets; waysize <<= current_cpu_data.dcache.entry_shift; addrstart = CACHE_OC_ADDRESS_ARRAY; do { unsigned long addr; for (addr = addrstart; addr < addrstart + waysize; addr += current_cpu_data.dcache.linesz) { unsigned long data; data = __raw_readl(addr) & (0x1ffffC00 | SH_CACHE_VALID); if (data == phys) { data &= ~(SH_CACHE_VALID | SH_CACHE_UPDATED); __raw_writel(data, addr); } } addrstart += current_cpu_data.dcache.way_incr; } while (--ways); back_to_cached(); local_irq_restore(flags); } /* * Write back & invalidate the D-cache of the page. * (To avoid "alias" issues) */ static void sh7705_flush_dcache_page(void *arg) { struct page *page = arg; struct address_space *mapping = page_mapping(page); if (mapping && !mapping_mapped(mapping)) clear_bit(PG_dcache_clean, &page->flags); else __flush_dcache_page(__pa(page_address(page))); } static void sh7705_flush_cache_all(void *args) { unsigned long flags; local_irq_save(flags); jump_to_uncached(); cache_wback_all(); back_to_cached(); local_irq_restore(flags); } /* * Write back and invalidate I/D-caches for the page. * * ADDRESS: Virtual Address (U0 address) */ static void sh7705_flush_cache_page(void *args) { struct flusher_data *data = args; unsigned long pfn = data->addr2; __flush_dcache_page(pfn << PAGE_SHIFT); } /* * This is called when a page-cache page is about to be mapped into a * user process' address space. It offers an opportunity for a * port to ensure d-cache/i-cache coherency if necessary. * * Not entirely sure why this is necessary on SH3 with 32K cache but * without it we get occasional "Memory fault" when loading a program. */ static void sh7705_flush_icache_page(void *page) { __flush_purge_region(page_address(page), PAGE_SIZE); } void __init sh7705_cache_init(void) { local_flush_icache_range = sh7705_flush_icache_range; local_flush_dcache_page = sh7705_flush_dcache_page; local_flush_cache_all = sh7705_flush_cache_all; local_flush_cache_mm = sh7705_flush_cache_all; local_flush_cache_dup_mm = sh7705_flush_cache_all; local_flush_cache_range = sh7705_flush_cache_all; local_flush_cache_page = sh7705_flush_cache_page; local_flush_icache_page = sh7705_flush_icache_page; }
gpl-2.0
zarboz/Beastmode.Evita.2.0
arch/sh/mm/ioremap.c
12833
3298
/* * arch/sh/mm/ioremap.c * * (C) Copyright 1995 1996 Linus Torvalds * (C) Copyright 2005 - 2010 Paul Mundt * * Re-map IO memory to kernel address space so that we can access it. * This is needed for high PCI addresses that aren't mapped in the * 640k-1MB IO memory area on PC's * * This file is subject to the terms and conditions of the GNU General * Public License. See the file "COPYING" in the main directory of this * archive for more details. */ #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/io.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/addrspace.h> #include <asm/cacheflush.h> #include <asm/tlbflush.h> #include <asm/mmu.h> /* * Remap an arbitrary physical address space into the kernel virtual * address space. Needed when the kernel wants to access high addresses * directly. * * NOTE! We need to allow non-page-aligned mappings too: we will obviously * have to convert them into an offset in a page-aligned mapping, but the * caller shouldn't need to know that small detail. */ void __iomem * __init_refok __ioremap_caller(phys_addr_t phys_addr, unsigned long size, pgprot_t pgprot, void *caller) { struct vm_struct *area; unsigned long offset, last_addr, addr, orig_addr; void __iomem *mapped; /* Don't allow wraparound or zero size */ last_addr = phys_addr + size - 1; if (!size || last_addr < phys_addr) return NULL; /* * If we can't yet use the regular approach, go the fixmap route. */ if (!mem_init_done) return ioremap_fixed(phys_addr, size, pgprot); /* * First try to remap through the PMB. * PMB entries are all pre-faulted. */ mapped = pmb_remap_caller(phys_addr, size, pgprot, caller); if (mapped && !IS_ERR(mapped)) return mapped; /* * Mappings have to be page-aligned */ offset = phys_addr & ~PAGE_MASK; phys_addr &= PAGE_MASK; size = PAGE_ALIGN(last_addr+1) - phys_addr; /* * Ok, go for it.. */ area = get_vm_area_caller(size, VM_IOREMAP, caller); if (!area) return NULL; area->phys_addr = phys_addr; orig_addr = addr = (unsigned long)area->addr; if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { vunmap((void *)orig_addr); return NULL; } return (void __iomem *)(offset + (char *)orig_addr); } EXPORT_SYMBOL(__ioremap_caller); /* * Simple checks for non-translatable mappings. */ static inline int iomapping_nontranslatable(unsigned long offset) { #ifdef CONFIG_29BIT /* * In 29-bit mode this includes the fixed P1/P2 areas, as well as * parts of P3. */ if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) return 1; #endif return 0; } void __iounmap(void __iomem *addr) { unsigned long vaddr = (unsigned long __force)addr; struct vm_struct *p; /* * Nothing to do if there is no translatable mapping. */ if (iomapping_nontranslatable(vaddr)) return; /* * There's no VMA if it's from an early fixed mapping. */ if (iounmap_fixed(addr) == 0) return; /* * If the PMB handled it, there's nothing else to do. */ if (pmb_unmap(addr) == 0) return; p = remove_vm_area((void *)(vaddr & PAGE_MASK)); if (!p) { printk(KERN_ERR "%s: bad address %p\n", __func__, addr); return; } kfree(p); } EXPORT_SYMBOL(__iounmap);
gpl-2.0
1N4148/android_kernel_samsung_msm8974
block/partitions/sysv68.c
13089
1939
/* * fs/partitions/sysv68.c * * Copyright (C) 2007 Philippe De Muyter <phdm@macqel.be> */ #include "check.h" #include "sysv68.h" /* * Volume ID structure: on first 256-bytes sector of disk */ struct volumeid { u8 vid_unused[248]; u8 vid_mac[8]; /* ASCII string "MOTOROLA" */ }; /* * config block: second 256-bytes sector on disk */ struct dkconfig { u8 ios_unused0[128]; __be32 ios_slcblk; /* Slice table block number */ __be16 ios_slccnt; /* Number of entries in slice table */ u8 ios_unused1[122]; }; /* * combined volumeid and dkconfig block */ struct dkblk0 { struct volumeid dk_vid; struct dkconfig dk_ios; }; /* * Slice Table Structure */ struct slice { __be32 nblocks; /* slice size (in blocks) */ __be32 blkoff; /* block offset of slice */ }; int sysv68_partition(struct parsed_partitions *state) { int i, slices; int slot = 1; Sector sect; unsigned char *data; struct dkblk0 *b; struct slice *slice; char tmp[64]; data = read_part_sector(state, 0, &sect); if (!data) return -1; b = (struct dkblk0 *)data; if (memcmp(b->dk_vid.vid_mac, "MOTOROLA", sizeof(b->dk_vid.vid_mac))) { put_dev_sector(sect); return 0; } slices = be16_to_cpu(b->dk_ios.ios_slccnt); i = be32_to_cpu(b->dk_ios.ios_slcblk); put_dev_sector(sect); data = read_part_sector(state, i, &sect); if (!data) return -1; slices -= 1; /* last slice is the whole disk */ snprintf(tmp, sizeof(tmp), "sysV68: %s(s%u)", state->name, slices); strlcat(state->pp_buf, tmp, PAGE_SIZE); slice = (struct slice *)data; for (i = 0; i < slices; i++, slice++) { if (slot == state->limit) break; if (be32_to_cpu(slice->nblocks)) { put_partition(state, slot, be32_to_cpu(slice->blkoff), be32_to_cpu(slice->nblocks)); snprintf(tmp, sizeof(tmp), "(s%u)", i); strlcat(state->pp_buf, tmp, PAGE_SIZE); } slot++; } strlcat(state->pp_buf, "\n", PAGE_SIZE); put_dev_sector(sect); return 1; }
gpl-2.0
jmztaylor/android_kernel_htc_m8ul
arch/arm/mach-s5p64x0/mach-smdk6440.c
34
6600
/* linux/arch/arm/mach-s5p64x0/mach-smdk6440.c * * Copyright (c) 2009-2010 Samsung Electronics Co., Ltd. * http://www.samsung.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/i2c.h> #include <linux/serial_core.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/pwm_backlight.h> #include <linux/fb.h> #include <linux/mmc/host.h> #include <video/platform_lcd.h> #include <asm/hardware/vic.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include <mach/map.h> #include <mach/regs-clock.h> #include <mach/i2c.h> #include <mach/regs-gpio.h> #include <plat/regs-serial.h> #include <plat/gpio-cfg.h> #include <plat/clock.h> #include <plat/devs.h> #include <plat/cpu.h> #include <plat/iic.h> #include <plat/pll.h> #include <plat/adc.h> #include <plat/ts.h> #include <plat/s5p-time.h> #include <plat/backlight.h> #include <plat/fb.h> #include <plat/regs-fb.h> #include <plat/sdhci.h> #include "common.h" #define SMDK6440_UCON_DEFAULT (S3C2410_UCON_TXILEVEL | \ S3C2410_UCON_RXILEVEL | \ S3C2410_UCON_TXIRQMODE | \ S3C2410_UCON_RXIRQMODE | \ S3C2410_UCON_RXFIFO_TOI | \ S3C2443_UCON_RXERR_IRQEN) #define SMDK6440_ULCON_DEFAULT S3C2410_LCON_CS8 #define SMDK6440_UFCON_DEFAULT (S3C2410_UFCON_FIFOMODE | \ S3C2440_UFCON_TXTRIG16 | \ S3C2410_UFCON_RXTRIG8) static struct s3c2410_uartcfg smdk6440_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [1] = { .hwport = 1, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [2] = { .hwport = 2, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, [3] = { .hwport = 3, .flags = 0, .ucon = SMDK6440_UCON_DEFAULT, .ulcon = SMDK6440_ULCON_DEFAULT, .ufcon = SMDK6440_UFCON_DEFAULT, }, }; static struct s3c_fb_pd_win smdk6440_fb_win0 = { .win_mode = { .left_margin = 8, .right_margin = 13, .upper_margin = 7, .lower_margin = 5, .hsync_len = 3, .vsync_len = 1, .xres = 800, .yres = 480, }, .max_bpp = 32, .default_bpp = 24, }; static struct s3c_fb_platdata smdk6440_lcd_pdata __initdata = { .win[0] = &smdk6440_fb_win0, .vidcon0 = VIDCON0_VIDOUT_RGB | VIDCON0_PNRMODE_RGB, .vidcon1 = VIDCON1_INV_HSYNC | VIDCON1_INV_VSYNC, .setup_gpio = s5p64x0_fb_gpio_setup_24bpp, }; static void smdk6440_lte480_reset_power(struct plat_lcd_data *pd, unsigned int power) { int err; if (power) { err = gpio_request(S5P6440_GPN(5), "GPN"); if (err) { printk(KERN_ERR "failed to request GPN for lcd reset\n"); return; } gpio_direction_output(S5P6440_GPN(5), 1); gpio_set_value(S5P6440_GPN(5), 0); gpio_set_value(S5P6440_GPN(5), 1); gpio_free(S5P6440_GPN(5)); } } static struct plat_lcd_data smdk6440_lcd_power_data = { .set_power = smdk6440_lte480_reset_power, }; static struct platform_device smdk6440_lcd_lte480wv = { .name = "platform-lcd", .dev.parent = &s3c_device_fb.dev, .dev.platform_data = &smdk6440_lcd_power_data, }; static struct platform_device *smdk6440_devices[] __initdata = { &s3c_device_adc, &s3c_device_rtc, &s3c_device_i2c0, &s3c_device_i2c1, &s3c_device_ts, &s3c_device_wdt, &samsung_asoc_dma, &s5p6440_device_iis, &s3c_device_fb, &smdk6440_lcd_lte480wv, &s3c_device_hsmmc0, &s3c_device_hsmmc1, &s3c_device_hsmmc2, }; static struct s3c_sdhci_platdata smdk6440_hsmmc0_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c_sdhci_platdata smdk6440_hsmmc1_pdata __initdata = { .cd_type = S3C_SDHCI_CD_INTERNAL, #if defined(CONFIG_S5P64X0_SD_CH1_8BIT) .max_width = 8, .host_caps = MMC_CAP_8_BIT_DATA, #endif }; static struct s3c_sdhci_platdata smdk6440_hsmmc2_pdata __initdata = { .cd_type = S3C_SDHCI_CD_NONE, }; static struct s3c2410_platform_i2c s5p6440_i2c0_data __initdata = { .flags = 0, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6440_i2c0_cfg_gpio, }; static struct s3c2410_platform_i2c s5p6440_i2c1_data __initdata = { .flags = 0, .bus_num = 1, .slave_addr = 0x10, .frequency = 100*1000, .sda_delay = 100, .cfg_gpio = s5p6440_i2c1_cfg_gpio, }; static struct i2c_board_info smdk6440_i2c_devs0[] __initdata = { { I2C_BOARD_INFO("24c08", 0x50), }, { I2C_BOARD_INFO("wm8580", 0x1b), }, }; static struct i2c_board_info smdk6440_i2c_devs1[] __initdata = { }; static struct samsung_bl_gpio_info smdk6440_bl_gpio_info = { .no = S5P6440_GPF(15), .func = S3C_GPIO_SFN(2), }; static struct platform_pwm_backlight_data smdk6440_bl_data = { .pwm_id = 1, }; static void __init smdk6440_map_io(void) { s5p64x0_init_io(NULL, 0); s3c24xx_init_clocks(12000000); s3c24xx_init_uarts(smdk6440_uartcfgs, ARRAY_SIZE(smdk6440_uartcfgs)); s5p_set_timer_source(S5P_PWM3, S5P_PWM4); } static void s5p6440_set_lcd_interface(void) { unsigned int cfg; cfg = __raw_readl(S5P64X0_SPCON0); cfg &= ~S5P64X0_SPCON0_LCD_SEL_MASK; cfg |= S5P64X0_SPCON0_LCD_SEL_RGB; __raw_writel(cfg, S5P64X0_SPCON0); } static void __init smdk6440_machine_init(void) { s3c24xx_ts_set_platdata(NULL); s3c_i2c0_set_platdata(&s5p6440_i2c0_data); s3c_i2c1_set_platdata(&s5p6440_i2c1_data); i2c_register_board_info(0, smdk6440_i2c_devs0, ARRAY_SIZE(smdk6440_i2c_devs0)); i2c_register_board_info(1, smdk6440_i2c_devs1, ARRAY_SIZE(smdk6440_i2c_devs1)); samsung_bl_set(&smdk6440_bl_gpio_info, &smdk6440_bl_data); s5p6440_set_lcd_interface(); s3c_fb_set_platdata(&smdk6440_lcd_pdata); s3c_sdhci0_set_platdata(&smdk6440_hsmmc0_pdata); s3c_sdhci1_set_platdata(&smdk6440_hsmmc1_pdata); s3c_sdhci2_set_platdata(&smdk6440_hsmmc2_pdata); platform_add_devices(smdk6440_devices, ARRAY_SIZE(smdk6440_devices)); } MACHINE_START(SMDK6440, "SMDK6440") .atag_offset = 0x100, .init_irq = s5p6440_init_irq, .handle_irq = vic_handle_irq, .map_io = smdk6440_map_io, .init_machine = smdk6440_machine_init, .timer = &s5p_timer, .restart = s5p64x0_restart, MACHINE_END
gpl-2.0
jameshilliard/m8-3.4.0-gb1fa77f
arch/unicore32/mm/mmu.c
34
10494
/* * linux/arch/unicore32/mm/mmu.c * * Code specific to PKUnity SoC and UniCore ISA * * Copyright (C) 2001-2010 GUAN Xue-tao * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/mman.h> #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/fs.h> #include <linux/bootmem.h> #include <linux/io.h> #include <asm/cputype.h> #include <asm/sections.h> #include <asm/setup.h> #include <asm/sizes.h> #include <asm/tlb.h> #include <asm/memblock.h> #include <mach/map.h> #include "mm.h" struct page *empty_zero_page; EXPORT_SYMBOL(empty_zero_page); pmd_t *top_pmd; pgprot_t pgprot_user; EXPORT_SYMBOL(pgprot_user); pgprot_t pgprot_kernel; EXPORT_SYMBOL(pgprot_kernel); static int __init noalign_setup(char *__unused) { cr_alignment &= ~CR_A; cr_no_alignment &= ~CR_A; set_cr(cr_alignment); return 1; } __setup("noalign", noalign_setup); void adjust_cr(unsigned long mask, unsigned long set) { unsigned long flags; mask &= ~CR_A; set &= mask; local_irq_save(flags); cr_no_alignment = (cr_no_alignment & ~mask) | set; cr_alignment = (cr_alignment & ~mask) | set; set_cr((get_cr() & ~mask) | set); local_irq_restore(flags); } struct map_desc { unsigned long virtual; unsigned long pfn; unsigned long length; unsigned int type; }; #define PROT_PTE_DEVICE (PTE_PRESENT | PTE_YOUNG | \ PTE_DIRTY | PTE_READ | PTE_WRITE) #define PROT_SECT_DEVICE (PMD_TYPE_SECT | PMD_PRESENT | \ PMD_SECT_READ | PMD_SECT_WRITE) static struct mem_type mem_types[] = { [MT_DEVICE] = { .prot_pte = PROT_PTE_DEVICE, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PROT_SECT_DEVICE, }, [MT_KUSER] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_CACHEABLE | PTE_READ | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PROT_SECT_DEVICE, }, [MT_HIGH_VECTORS] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_CACHEABLE | PTE_READ | PTE_WRITE | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, }, [MT_MEMORY] = { .prot_pte = PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_WRITE | PTE_EXEC, .prot_l1 = PMD_TYPE_TABLE | PMD_PRESENT, .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | PMD_SECT_READ | PMD_SECT_WRITE | PMD_SECT_EXEC, }, [MT_ROM] = { .prot_sect = PMD_TYPE_SECT | PMD_PRESENT | PMD_SECT_CACHEABLE | PMD_SECT_READ, }, }; const struct mem_type *get_mem_type(unsigned int type) { return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; } EXPORT_SYMBOL(get_mem_type); static void __init build_mem_type_table(void) { pgprot_user = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_CACHEABLE); pgprot_kernel = __pgprot(PTE_PRESENT | PTE_YOUNG | PTE_DIRTY | PTE_READ | PTE_WRITE | PTE_EXEC | PTE_CACHEABLE); } #define vectors_base() (vectors_high() ? 0xffff0000 : 0) static void __init *early_alloc(unsigned long sz) { void *ptr = __va(memblock_alloc(sz, sz)); memset(ptr, 0, sz); return ptr; } static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot) { if (pmd_none(*pmd)) { pte_t *pte = early_alloc(PTRS_PER_PTE * sizeof(pte_t)); __pmd_populate(pmd, __pa(pte) | prot); } BUG_ON(pmd_bad(*pmd)); return pte_offset_kernel(pmd, addr); } static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, unsigned long end, unsigned long pfn, const struct mem_type *type) { pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1); do { set_pte(pte, pfn_pte(pfn, __pgprot(type->prot_pte))); pfn++; } while (pte++, addr += PAGE_SIZE, addr != end); } static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, unsigned long end, unsigned long phys, const struct mem_type *type) { pmd_t *pmd = pmd_offset((pud_t *)pgd, addr); if (((addr | end | phys) & ~SECTION_MASK) == 0) { pmd_t *p = pmd; do { set_pmd(pmd, __pmd(phys | type->prot_sect)); phys += SECTION_SIZE; } while (pmd++, addr += SECTION_SIZE, addr != end); flush_pmd_entry(p); } else { alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); } } static void __init create_mapping(struct map_desc *md) { unsigned long phys, addr, length, end; const struct mem_type *type; pgd_t *pgd; if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { printk(KERN_WARNING "BUG: not creating mapping for " "0x%08llx at 0x%08lx in user region\n", __pfn_to_phys((u64)md->pfn), md->virtual); return; } if ((md->type == MT_DEVICE || md->type == MT_ROM) && md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " "overlaps vmalloc space\n", __pfn_to_phys((u64)md->pfn), md->virtual); } type = &mem_types[md->type]; addr = md->virtual & PAGE_MASK; phys = (unsigned long)__pfn_to_phys(md->pfn); length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " "be mapped using pages, ignoring.\n", __pfn_to_phys(md->pfn), addr); return; } pgd = pgd_offset_k(addr); end = addr + length; do { unsigned long next = pgd_addr_end(addr, end); alloc_init_section(pgd, addr, next, phys, type); phys += next - addr; addr = next; } while (pgd++, addr != end); } static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M); static int __init early_vmalloc(char *arg) { unsigned long vmalloc_reserve = memparse(arg, NULL); if (vmalloc_reserve < SZ_16M) { vmalloc_reserve = SZ_16M; printk(KERN_WARNING "vmalloc area too small, limiting to %luMB\n", vmalloc_reserve >> 20); } if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); printk(KERN_WARNING "vmalloc area is too big, limiting to %luMB\n", vmalloc_reserve >> 20); } vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve); return 0; } early_param("vmalloc", early_vmalloc); static phys_addr_t lowmem_limit __initdata = SZ_1G; static void __init sanity_check_meminfo(void) { int i, j; lowmem_limit = __pa(vmalloc_min - 1) + 1; memblock_set_current_limit(lowmem_limit); for (i = 0, j = 0; i < meminfo.nr_banks; i++) { struct membank *bank = &meminfo.bank[j]; *bank = meminfo.bank[i]; j++; } meminfo.nr_banks = j; } static inline void prepare_page_table(void) { unsigned long addr; phys_addr_t end; for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; if (end >= lowmem_limit) end = lowmem_limit; for (addr = __phys_to_virt(end); addr < VMALLOC_END; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); } void __init uc32_mm_memblock_reserve(void) { memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t)); } static void __init devicemaps_init(void) { struct map_desc map; unsigned long addr; void *vectors; vectors = early_alloc(PAGE_SIZE); for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) pmd_clear(pmd_off_k(addr)); map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = VECTORS_BASE; map.length = PAGE_SIZE; map.type = MT_HIGH_VECTORS; create_mapping(&map); map.pfn = __phys_to_pfn(virt_to_phys(vectors)); map.virtual = KUSER_VECPAGE_BASE; map.length = PAGE_SIZE; map.type = MT_KUSER; create_mapping(&map); /* * Finally flush the caches and tlb to ensure that we're in a * consistent state wrt the writebuffer. This also ensures that * any write-allocated cache lines in the vector page are written * back. After this point, we can start to touch devices again. */ local_flush_tlb_all(); flush_cache_all(); } static void __init map_lowmem(void) { struct memblock_region *reg; for_each_memblock(memory, reg) { phys_addr_t start = reg->base; phys_addr_t end = start + reg->size; struct map_desc map; if (end > lowmem_limit) end = lowmem_limit; if (start >= end) break; map.pfn = __phys_to_pfn(start); map.virtual = __phys_to_virt(start); map.length = end - start; map.type = MT_MEMORY; create_mapping(&map); } } void __init paging_init(void) { void *zero_page; build_mem_type_table(); sanity_check_meminfo(); prepare_page_table(); map_lowmem(); devicemaps_init(); top_pmd = pmd_off_k(0xffff0000); zero_page = early_alloc(PAGE_SIZE); bootmem_init(); empty_zero_page = virt_to_page(zero_page); __flush_dcache_page(NULL, empty_zero_page); } void setup_mm_for_reboot(char mode) { unsigned long base_pmdval; pgd_t *pgd; int i; pgd = current->active_mm->pgd; base_pmdval = PMD_SECT_WRITE | PMD_SECT_READ | PMD_TYPE_SECT; for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; pmd_t *pmd; pmd = pmd_off(pgd, i << PGDIR_SHIFT); set_pmd(pmd, __pmd(pmdval)); flush_pmd_entry(pmd); } local_flush_tlb_all(); } /* * Take care of architecture specific things when placing a new PTE into * a page table, or changing an existing PTE. Basically, there are two * things that we need to take care of: * * 1. If PG_dcache_clean is not set for the page, we need to ensure * that any cache entries for the kernels virtual memory * range are written back to the page. * 2. If we have multiple shared mappings of the same space in * an object, we need to deal with the cache aliasing issues. * * Note that the pte lock will be held. */ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep) { unsigned long pfn = pte_pfn(*ptep); struct address_space *mapping; struct page *page; if (!pfn_valid(pfn)) return; /* * The zero page is never written to, so never has any dirty * cache lines, and therefore never needs to be flushed. */ page = pfn_to_page(pfn); if (page == ZERO_PAGE(0)) return; mapping = page_mapping(page); if (!test_and_set_bit(PG_dcache_clean, &page->flags)) __flush_dcache_page(mapping, page); if (mapping) if (vma->vm_flags & VM_EXEC) __flush_icache_all(); }
gpl-2.0
santod/android_kernel_htc_m8
arch/powerpc/kernel/machine_kexec_32.c
34
1569
/* * PPC32 code to handle Linux booting another kernel. * * Copyright (C) 2002-2003 Eric Biederman <ebiederm@xmission.com> * GameCube/ppc32 port Copyright (C) 2004 Albert Herranz * Copyright (C) 2005 IBM Corporation. * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include <linux/kexec.h> #include <linux/mm.h> #include <linux/string.h> #include <asm/cacheflush.h> #include <asm/hw_irq.h> #include <asm/io.h> typedef void (*relocate_new_kernel_t)( unsigned long indirection_page, unsigned long reboot_code_buffer, unsigned long start_address) __noreturn; void default_machine_kexec(struct kimage *image) { extern const unsigned char relocate_new_kernel[]; extern const unsigned int relocate_new_kernel_size; unsigned long page_list; unsigned long reboot_code_buffer, reboot_code_buffer_phys; relocate_new_kernel_t rnk; local_irq_disable(); machine_kexec_mask_interrupts(); page_list = image->head; reboot_code_buffer = (unsigned long)page_address(image->control_code_page); reboot_code_buffer_phys = virt_to_phys((void *)reboot_code_buffer); memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size); flush_icache_range(reboot_code_buffer, reboot_code_buffer + KEXEC_CONTROL_PAGE_SIZE); printk(KERN_INFO "Bye!\n"); rnk = (relocate_new_kernel_t) reboot_code_buffer; (*rnk)(page_list, reboot_code_buffer_phys, image->start); } int default_machine_kexec_prepare(struct kimage *image) { return 0; }
gpl-2.0
gianlucaborello/linux
drivers/staging/rtl8192u/ieee80211/ieee80211_rx.c
34
78355
/* * Original code based Host AP (software wireless LAN access point) driver * for Intersil Prism2/2.5/3 - hostap.o module, common routines * * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen * <jkmaline@cc.hut.fi> * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi> * Copyright (c) 2004, Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. See README and COPYING for * more details. ****************************************************************************** Few modifications for Realtek's Wi-Fi drivers by Andrea Merello <andrea.merello@gmail.com> A special thanks goes to Realtek for their support ! ******************************************************************************/ #include <linux/compiler.h> #include <linux/errno.h> #include <linux/if_arp.h> #include <linux/in6.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/netdevice.h> #include <linux/pci.h> #include <linux/proc_fs.h> #include <linux/skbuff.h> #include <linux/slab.h> #include <linux/tcp.h> #include <linux/types.h> #include <linux/wireless.h> #include <linux/etherdevice.h> #include <linux/uaccess.h> #include <linux/ctype.h> #include "ieee80211.h" #include "dot11d.h" static inline void ieee80211_monitor_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct rtl_80211_hdr_4addr *hdr = (struct rtl_80211_hdr_4addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); skb->dev = ieee->dev; skb_reset_mac_header(skb); skb_pull(skb, ieee80211_get_hdrlen(fc)); skb->pkt_type = PACKET_OTHERHOST; skb->protocol = htons(ETH_P_80211_RAW); memset(skb->cb, 0, sizeof(skb->cb)); netif_rx(skb); } /* Called only as a tasklet (software IRQ) */ static struct ieee80211_frag_entry * ieee80211_frag_cache_find(struct ieee80211_device *ieee, unsigned int seq, unsigned int frag, u8 tid, u8 *src, u8 *dst) { struct ieee80211_frag_entry *entry; int i; for (i = 0; i < IEEE80211_FRAG_CACHE_LEN; i++) { entry = &ieee->frag_cache[tid][i]; if (entry->skb != NULL && time_after(jiffies, entry->first_frag_time + 2 * HZ)) { IEEE80211_DEBUG_FRAG( "expiring fragment cache entry " "seq=%u last_frag=%u\n", entry->seq, entry->last_frag); dev_kfree_skb_any(entry->skb); entry->skb = NULL; } if (entry->skb != NULL && entry->seq == seq && (entry->last_frag + 1 == frag || frag == -1) && memcmp(entry->src_addr, src, ETH_ALEN) == 0 && memcmp(entry->dst_addr, dst, ETH_ALEN) == 0) return entry; } return NULL; } /* Called only as a tasklet (software IRQ) */ static struct sk_buff * ieee80211_frag_cache_get(struct ieee80211_device *ieee, struct rtl_80211_hdr_4addr *hdr) { struct sk_buff *skb = NULL; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int frag = WLAN_GET_SEQ_FRAG(sc); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct rtl_80211_hdr_3addrqos *hdr_3addrqos; struct rtl_80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if (((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } if (frag == 0) { /* Reserve enough space to fit maximum frame length */ skb = dev_alloc_skb(ieee->dev->mtu + sizeof(struct rtl_80211_hdr_4addr) + 8 /* LLC */ + 2 /* alignment */ + 8 /* WEP */ + ETH_ALEN /* WDS */ + (IEEE80211_QOS_HAS_SEQ(fc)?2:0) /* QOS Control */); if (!skb) return NULL; entry = &ieee->frag_cache[tid][ieee->frag_next_idx[tid]]; ieee->frag_next_idx[tid]++; if (ieee->frag_next_idx[tid] >= IEEE80211_FRAG_CACHE_LEN) ieee->frag_next_idx[tid] = 0; if (entry->skb != NULL) dev_kfree_skb_any(entry->skb); entry->first_frag_time = jiffies; entry->seq = seq; entry->last_frag = frag; entry->skb = skb; memcpy(entry->src_addr, hdr->addr2, ETH_ALEN); memcpy(entry->dst_addr, hdr->addr1, ETH_ALEN); } else { /* received a fragment of a frame for which the head fragment * should have already been received */ entry = ieee80211_frag_cache_find(ieee, seq, frag, tid,hdr->addr2, hdr->addr1); if (entry != NULL) { entry->last_frag = frag; skb = entry->skb; } } return skb; } /* Called only as a tasklet (software IRQ) */ static int ieee80211_frag_cache_invalidate(struct ieee80211_device *ieee, struct rtl_80211_hdr_4addr *hdr) { u16 fc = le16_to_cpu(hdr->frame_ctl); u16 sc = le16_to_cpu(hdr->seq_ctl); unsigned int seq = WLAN_GET_SEQ_SEQ(sc); struct ieee80211_frag_entry *entry; struct rtl_80211_hdr_3addrqos *hdr_3addrqos; struct rtl_80211_hdr_4addrqos *hdr_4addrqos; u8 tid; if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)hdr; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if (IEEE80211_QOS_HAS_SEQ(fc)) { hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)hdr; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { tid = 0; } entry = ieee80211_frag_cache_find(ieee, seq, -1, tid, hdr->addr2, hdr->addr1); if (entry == NULL) { IEEE80211_DEBUG_FRAG( "could not invalidate fragment cache " "entry (seq=%u)\n", seq); return -1; } entry->skb = NULL; return 0; } /* ieee80211_rx_frame_mgtmt * * Responsible for handling management control frames * * Called by ieee80211_rx */ static inline int ieee80211_rx_frame_mgmt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, u16 type, u16 stype) { /* On the struct stats definition there is written that * this is not mandatory.... but seems that the probe * response parser uses it */ struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data; rx_stats->len = skb->len; ieee80211_rx_mgt(ieee,(struct rtl_80211_hdr_4addr *)skb->data,rx_stats); /* if ((ieee->state == IEEE80211_LINKED) && (memcmp(hdr->addr3, ieee->current_network.bssid, ETH_ALEN))) */ if ((memcmp(hdr->addr1, ieee->dev->dev_addr, ETH_ALEN)))/* use ADDR1 to perform address matching for Management frames */ { dev_kfree_skb_any(skb); return 0; } ieee80211_rx_frame_softmac(ieee, skb, rx_stats, type, stype); dev_kfree_skb_any(skb); return 0; #ifdef NOT_YET if (ieee->iw_mode == IW_MODE_MASTER) { printk(KERN_DEBUG "%s: Master mode not yet supported.\n", ieee->dev->name); return 0; /* hostap_update_sta_ps(ieee, (struct hostap_ieee80211_hdr_4addr *) skb->data);*/ } if (ieee->hostapd && type == IEEE80211_TYPE_MGMT) { if (stype == WLAN_FC_STYPE_BEACON && ieee->iw_mode == IW_MODE_MASTER) { struct sk_buff *skb2; /* Process beacon frames also in kernel driver to * update STA(AP) table statistics */ skb2 = skb_clone(skb, GFP_ATOMIC); if (skb2) hostap_rx(skb2->dev, skb2, rx_stats); } /* send management frames to the user space daemon for * processing */ ieee->apdevstats.rx_packets++; ieee->apdevstats.rx_bytes += skb->len; prism2_rx_80211(ieee->apdev, skb, rx_stats, PRISM2_RX_MGMT); return 0; } if (ieee->iw_mode == IW_MODE_MASTER) { if (type != WLAN_FC_TYPE_MGMT && type != WLAN_FC_TYPE_CTRL) { printk(KERN_DEBUG "%s: unknown management frame " "(type=0x%02x, stype=0x%02x) dropped\n", skb->dev->name, type, stype); return -1; } hostap_rx(skb->dev, skb, rx_stats); return 0; } printk(KERN_DEBUG "%s: hostap_rx_frame_mgmt: management frame " "received in non-Host AP mode\n", skb->dev->name); return -1; #endif } /* See IEEE 802.1H for LLC/SNAP encapsulation/decapsulation */ /* Ethernet-II snap header (RFC1042 for most EtherTypes) */ static unsigned char rfc1042_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00 }; /* Bridge-Tunnel header (for EtherTypes ETH_P_AARP and ETH_P_IPX) */ static unsigned char bridge_tunnel_header[] = { 0xaa, 0xaa, 0x03, 0x00, 0x00, 0xf8 }; /* No encapsulation header if EtherType < 0x600 (=length) */ /* Called by ieee80211_rx_frame_decrypt */ static int ieee80211_is_eapol_frame(struct ieee80211_device *ieee, struct sk_buff *skb, size_t hdrlen) { struct net_device *dev = ieee->dev; u16 fc, ethertype; struct rtl_80211_hdr_4addr *hdr; u8 *pos; if (skb->len < 24) return 0; hdr = (struct rtl_80211_hdr_4addr *) skb->data; fc = le16_to_cpu(hdr->frame_ctl); /* check that the frame is unicast frame to us */ if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_TODS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0 && memcmp(hdr->addr3, dev->dev_addr, ETH_ALEN) == 0) { /* ToDS frame with own addr BSSID and DA */ } else if ((fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && memcmp(hdr->addr1, dev->dev_addr, ETH_ALEN) == 0) { /* FromDS frame with own addr as DA */ } else return 0; if (skb->len < 24 + 8) return 0; /* check for port access entity Ethernet type */ // pos = skb->data + 24; pos = skb->data + hdrlen; ethertype = (pos[6] << 8) | pos[7]; if (ethertype == ETH_P_PAE) return 1; return 0; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_crypt_data *crypt) { struct rtl_80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_mpdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct rtl_80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); if (ieee->tkip_countermeasures && strcmp(crypt->ops->name, "TKIP") == 0) { if (net_ratelimit()) { printk(KERN_DEBUG "%s: TKIP countermeasures: dropped " "received packet from %pM\n", ieee->dev->name, hdr->addr2); } return -1; } atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_mpdu(skb, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { IEEE80211_DEBUG_DROP( "decryption failed (SA=%pM" ") res=%d\n", hdr->addr2, res); if (res == -2) IEEE80211_DEBUG_DROP("Decryption failed ICV " "mismatch (key %d)\n", skb->data[hdrlen + 3] >> 6); ieee->ieee_stats.rx_discards_undecryptable++; return -1; } return res; } /* Called only as a tasklet (software IRQ), by ieee80211_rx */ static inline int ieee80211_rx_frame_decrypt_msdu(struct ieee80211_device *ieee, struct sk_buff *skb, int keyidx, struct ieee80211_crypt_data *crypt) { struct rtl_80211_hdr_4addr *hdr; int res, hdrlen; if (crypt == NULL || crypt->ops->decrypt_msdu == NULL) return 0; if (ieee->hwsec_active) { cb_desc *tcb_desc = (cb_desc *)(skb->cb+ MAX_DEV_ADDR_SIZE); tcb_desc->bHwSec = 1; } hdr = (struct rtl_80211_hdr_4addr *) skb->data; hdrlen = ieee80211_get_hdrlen(le16_to_cpu(hdr->frame_ctl)); atomic_inc(&crypt->refcnt); res = crypt->ops->decrypt_msdu(skb, keyidx, hdrlen, crypt->priv); atomic_dec(&crypt->refcnt); if (res < 0) { printk(KERN_DEBUG "%s: MSDU decryption/MIC verification failed" " (SA=%pM keyidx=%d)\n", ieee->dev->name, hdr->addr2, keyidx); return -1; } return 0; } /* this function is stolen from ipw2200 driver*/ #define IEEE_PACKET_RETRY_TIME (5*HZ) static int is_duplicate_packet(struct ieee80211_device *ieee, struct rtl_80211_hdr_4addr *header) { u16 fc = le16_to_cpu(header->frame_ctl); u16 sc = le16_to_cpu(header->seq_ctl); u16 seq = WLAN_GET_SEQ_SEQ(sc); u16 frag = WLAN_GET_SEQ_FRAG(sc); u16 *last_seq, *last_frag; unsigned long *last_time; struct rtl_80211_hdr_3addrqos *hdr_3addrqos; struct rtl_80211_hdr_4addrqos *hdr_4addrqos; u8 tid; //TO2DS and QoS if(((fc & IEEE80211_FCTL_DSTODS) == IEEE80211_FCTL_DSTODS)&&IEEE80211_QOS_HAS_SEQ(fc)) { hdr_4addrqos = (struct rtl_80211_hdr_4addrqos *)header; tid = le16_to_cpu(hdr_4addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else if(IEEE80211_QOS_HAS_SEQ(fc)) { //QoS hdr_3addrqos = (struct rtl_80211_hdr_3addrqos *)header; tid = le16_to_cpu(hdr_3addrqos->qos_ctl) & IEEE80211_QCTL_TID; tid = UP2AC(tid); tid ++; } else { // no QoS tid = 0; } switch (ieee->iw_mode) { case IW_MODE_ADHOC: { struct list_head *p; struct ieee_ibss_seq *entry = NULL; u8 *mac = header->addr2; int index = mac[5] % IEEE_IBSS_MAC_HASH_SIZE; list_for_each(p, &ieee->ibss_mac_hash[index]) { entry = list_entry(p, struct ieee_ibss_seq, list); if (!memcmp(entry->mac, mac, ETH_ALEN)) break; } // if (memcmp(entry->mac, mac, ETH_ALEN)){ if (p == &ieee->ibss_mac_hash[index]) { entry = kmalloc(sizeof(struct ieee_ibss_seq), GFP_ATOMIC); if (!entry) return 0; memcpy(entry->mac, mac, ETH_ALEN); entry->seq_num[tid] = seq; entry->frag_num[tid] = frag; entry->packet_time[tid] = jiffies; list_add(&entry->list, &ieee->ibss_mac_hash[index]); return 0; } last_seq = &entry->seq_num[tid]; last_frag = &entry->frag_num[tid]; last_time = &entry->packet_time[tid]; break; } case IW_MODE_INFRA: last_seq = &ieee->last_rxseq_num[tid]; last_frag = &ieee->last_rxfrag_num[tid]; last_time = &ieee->last_packet_time[tid]; break; default: return 0; } // if(tid != 0) { // printk(KERN_WARNING ":)))))))))))%x %x %x, fc(%x)\n", tid, *last_seq, seq, header->frame_ctl); // } if ((*last_seq == seq) && time_after(*last_time + IEEE_PACKET_RETRY_TIME, jiffies)) { if (*last_frag == frag) goto drop; if (*last_frag + 1 != frag) /* out-of-order fragment */ goto drop; } else *last_seq = seq; *last_frag = frag; *last_time = jiffies; return 0; drop: // BUG_ON(!(fc & IEEE80211_FCTL_RETRY)); return 1; } static bool AddReorderEntry(PRX_TS_RECORD pTS, PRX_REORDER_ENTRY pReorderEntry) { struct list_head *pList = &pTS->RxPendingPktList; while(pList->next != &pTS->RxPendingPktList) { if( SN_LESS(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { pList = pList->next; } else if( SN_EQUAL(pReorderEntry->SeqNum, ((PRX_REORDER_ENTRY)list_entry(pList->next,RX_REORDER_ENTRY,List))->SeqNum) ) { return false; } else { break; } } pReorderEntry->List.next = pList->next; pReorderEntry->List.next->prev = &pReorderEntry->List; pReorderEntry->List.prev = pList; pList->next = &pReorderEntry->List; return true; } void ieee80211_indicate_packets(struct ieee80211_device *ieee, struct ieee80211_rxb **prxbIndicateArray,u8 index) { u8 i = 0 , j=0; u16 ethertype; // if(index > 1) // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): hahahahhhh, We indicate packet from reorder list, index is %u\n",__func__,index); for(j = 0; j<index; j++) { //added by amy for reorder struct ieee80211_rxb *prxb = prxbIndicateArray[j]; for(i = 0; i<prxb->nr_subframes; i++) { struct sk_buff *sub_skb = prxb->subframes[i]; /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), prxb->dst, ETH_ALEN); } //stats->rx_packets++; //stats->rx_bytes += sub_skb->len; /* Indicat the packets to upper layer */ if (sub_skb) { sub_skb->protocol = eth_type_trans(sub_skb, ieee->dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = ieee->dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; netif_rx(sub_skb); } } kfree(prxb); prxb = NULL; } } static void RxReorderIndicatePacket(struct ieee80211_device *ieee, struct ieee80211_rxb *prxb, PRX_TS_RECORD pTS, u16 SeqNum) { PRT_HIGH_THROUGHPUT pHTInfo = ieee->pHTInfo; PRX_REORDER_ENTRY pReorderEntry = NULL; struct ieee80211_rxb **prxbIndicateArray; u8 WinSize = pHTInfo->RxReorderWinSize; u16 WinEnd = (pTS->RxIndicateSeq + WinSize -1)%4096; u8 index = 0; bool bMatchWinStart = false, bPktInBuf = false; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): Seq is %d,pTS->RxIndicateSeq is %d, WinSize is %d\n",__func__,SeqNum,pTS->RxIndicateSeq,WinSize); prxbIndicateArray = kmalloc(sizeof(struct ieee80211_rxb *) * REORDER_WIN_SIZE, GFP_KERNEL); if (!prxbIndicateArray) return; /* Rx Reorder initialize condition.*/ if (pTS->RxIndicateSeq == 0xffff) { pTS->RxIndicateSeq = SeqNum; } /* Drop out the packet which SeqNum is smaller than WinStart */ if (SN_LESS(SeqNum, pTS->RxIndicateSeq)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packet Drop! IndicateSeq: %d, NewSeq: %d\n", pTS->RxIndicateSeq, SeqNum); pHTInfo->RxReorderDropCounter++; { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } kfree(prxbIndicateArray); return; } /* * Sliding window manipulation. Conditions includes: * 1. Incoming SeqNum is equal to WinStart =>Window shift 1 * 2. Incoming SeqNum is larger than the WinEnd => Window shift N */ if(SN_EQUAL(SeqNum, pTS->RxIndicateSeq)) { pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; bMatchWinStart = true; } else if(SN_LESS(WinEnd, SeqNum)) { if(SeqNum >= (WinSize - 1)) { pTS->RxIndicateSeq = SeqNum + 1 -WinSize; } else { pTS->RxIndicateSeq = 4095 - (WinSize - (SeqNum +1)) + 1; } IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Window Shift! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } /* * Indication process. * After Packet dropping and Sliding Window shifting as above, we can now just indicate the packets * with the SeqNum smaller than latest WinStart and buffer other packets. */ /* For Rx Reorder condition: * 1. All packets with SeqNum smaller than WinStart => Indicate * 2. All packets with SeqNum larger than or equal to WinStart => Buffer it. */ if(bMatchWinStart) { /* Current packet is going to be indicated.*/ IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Packets indication!! IndicateSeq: %d, NewSeq: %d\n",\ pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[0] = prxb; // printk("========================>%s(): SeqNum is %d\n",__func__,SeqNum); index = 1; } else { /* Current packet is going to be inserted into pending list.*/ //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): We RX no ordered packed, insert to ordered list\n",__func__); if(!list_empty(&ieee->RxReorder_Unused_List)) { pReorderEntry = (PRX_REORDER_ENTRY)list_entry(ieee->RxReorder_Unused_List.next,RX_REORDER_ENTRY,List); list_del_init(&pReorderEntry->List); /* Make a reorder entry and insert into a the packet list.*/ pReorderEntry->SeqNum = SeqNum; pReorderEntry->prxb = prxb; // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pREorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum); if(!AddReorderEntry(pTS, pReorderEntry)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "%s(): Duplicate packet is dropped!! IndicateSeq: %d, NewSeq: %d\n", __func__, pTS->RxIndicateSeq, SeqNum); list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER, "Pkt insert into buffer!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); } } else { /* * Packets are dropped if there is not enough reorder entries. * This part shall be modified!! We can just indicate all the * packets in buffer and get reorder entries. */ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): There is no reorder entry!! Packet is dropped!!\n"); { int i; for(i =0; i < prxb->nr_subframes; i++) { dev_kfree_skb(prxb->subframes[i]); } kfree(prxb); prxb = NULL; } } } /* Check if there is any packet need indicate.*/ while(!list_empty(&pTS->RxPendingPktList)) { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): start RREORDER indicate\n",__func__); pReorderEntry = (PRX_REORDER_ENTRY)list_entry(pTS->RxPendingPktList.prev,RX_REORDER_ENTRY,List); if (SN_LESS(pReorderEntry->SeqNum, pTS->RxIndicateSeq) || SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) { /* This protect buffer from overflow. */ if (index >= REORDER_WIN_SIZE) { IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Buffer overflow!! \n"); bPktInBuf = true; break; } list_del_init(&pReorderEntry->List); if(SN_EQUAL(pReorderEntry->SeqNum, pTS->RxIndicateSeq)) pTS->RxIndicateSeq = (pTS->RxIndicateSeq + 1) % 4096; IEEE80211_DEBUG(IEEE80211_DL_REORDER,"Packets indication!! IndicateSeq: %d, NewSeq: %d\n",pTS->RxIndicateSeq, SeqNum); prxbIndicateArray[index] = pReorderEntry->prxb; // printk("========================>%s(): pReorderEntry->SeqNum is %d\n",__func__,pReorderEntry->SeqNum); index++; list_add_tail(&pReorderEntry->List,&ieee->RxReorder_Unused_List); } else { bPktInBuf = true; break; } } /* Handling pending timer. Set this timer to prevent from long time Rx buffering.*/ if (index>0) { // Cancel previous pending timer. // del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxTimeoutIndicateSeq = 0xffff; // Indicate packets if(index>REORDER_WIN_SIZE){ IEEE80211_DEBUG(IEEE80211_DL_ERR, "RxReorderIndicatePacket(): Rx Reorder buffer full!! \n"); kfree(prxbIndicateArray); return; } ieee80211_indicate_packets(ieee, prxbIndicateArray, index); } if (bPktInBuf && pTS->RxTimeoutIndicateSeq==0xffff) { // Set new pending timer. IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): SET rx timeout timer\n", __func__); pTS->RxTimeoutIndicateSeq = pTS->RxIndicateSeq; if(timer_pending(&pTS->RxPktPendingTimer)) del_timer_sync(&pTS->RxPktPendingTimer); pTS->RxPktPendingTimer.expires = jiffies + msecs_to_jiffies(pHTInfo->RxReorderPendingTime); add_timer(&pTS->RxPktPendingTimer); } kfree(prxbIndicateArray); } static u8 parse_subframe(struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats, struct ieee80211_rxb *rxb, u8 *src, u8 *dst) { struct rtl_80211_hdr_3addr *hdr = (struct rtl_80211_hdr_3addr *)skb->data; u16 fc = le16_to_cpu(hdr->frame_ctl); u16 LLCOffset= sizeof(struct rtl_80211_hdr_3addr); u16 ChkLength; bool bIsAggregateFrame = false; u16 nSubframe_Length; u8 nPadding_Length = 0; u16 SeqNum=0; struct sk_buff *sub_skb; u8 *data_ptr; /* just for debug purpose */ SeqNum = WLAN_GET_SEQ_SEQ(le16_to_cpu(hdr->seq_ctl)); if ((IEEE80211_QOS_HAS_SEQ(fc))&&\ (((frameqos *)(skb->data + IEEE80211_3ADDR_LEN))->field.reserved)) { bIsAggregateFrame = true; } if (IEEE80211_QOS_HAS_SEQ(fc)) { LLCOffset += 2; } if (rx_stats->bContainHTC) { LLCOffset += sHTCLng; } // Null packet, don't indicate it to upper layer ChkLength = LLCOffset;/* + (Frame_WEP(frame)!=0 ?Adapter->MgntInfo.SecurityInfo.EncryptionHeadOverhead:0);*/ if (skb->len <= ChkLength) return 0; skb_pull(skb, LLCOffset); if(!bIsAggregateFrame) { rxb->nr_subframes = 1; #ifdef JOHN_NOCPY rxb->subframes[0] = skb; #else rxb->subframes[0] = skb_copy(skb, GFP_ATOMIC); #endif memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); //IEEE80211_DEBUG_DATA(IEEE80211_DL_RX,skb->data,skb->len); return 1; } else { rxb->nr_subframes = 0; memcpy(rxb->src,src,ETH_ALEN); memcpy(rxb->dst,dst,ETH_ALEN); while(skb->len > ETHERNET_HEADER_SIZE) { /* Offset 12 denote 2 mac address */ nSubframe_Length = *((u16 *)(skb->data + 12)); //==m==>change the length order nSubframe_Length = (nSubframe_Length>>8) + (nSubframe_Length<<8); if (skb->len<(ETHERNET_HEADER_SIZE + nSubframe_Length)) { printk("%s: A-MSDU parse error!! pRfd->nTotalSubframe : %d\n",\ __func__, rxb->nr_subframes); printk("%s: A-MSDU parse error!! Subframe Length: %d\n",__func__, nSubframe_Length); printk("nRemain_Length is %d and nSubframe_Length is : %d\n",skb->len,nSubframe_Length); printk("The Packet SeqNum is %d\n",SeqNum); return 0; } /* move the data point to data content */ skb_pull(skb, ETHERNET_HEADER_SIZE); #ifdef JOHN_NOCPY sub_skb = skb_clone(skb, GFP_ATOMIC); sub_skb->len = nSubframe_Length; sub_skb->tail = sub_skb->data + nSubframe_Length; #else /* Allocate new skb for releasing to upper layer */ sub_skb = dev_alloc_skb(nSubframe_Length + 12); if (!sub_skb) return 0; skb_reserve(sub_skb, 12); data_ptr = (u8 *)skb_put(sub_skb, nSubframe_Length); memcpy(data_ptr, skb->data, nSubframe_Length); #endif rxb->subframes[rxb->nr_subframes++] = sub_skb; if (rxb->nr_subframes >= MAX_SUBFRAME_COUNT) { IEEE80211_DEBUG_RX("ParseSubframe(): Too many Subframes! Packets dropped!\n"); break; } skb_pull(skb, nSubframe_Length); if (skb->len != 0) { nPadding_Length = 4 - ((nSubframe_Length + ETHERNET_HEADER_SIZE) % 4); if (nPadding_Length == 4) { nPadding_Length = 0; } if (skb->len < nPadding_Length) { return 0; } skb_pull(skb, nPadding_Length); } } #ifdef JOHN_NOCPY dev_kfree_skb(skb); #endif //{just for debug added by david //printk("AMSDU::rxb->nr_subframes = %d\n",rxb->nr_subframes); //} return rxb->nr_subframes; } } /* All received frames are sent to this function. @skb contains the frame in * IEEE 802.11 format, i.e., in the format it was sent over air. * This function is called only as a tasklet (software IRQ). */ int ieee80211_rx(struct ieee80211_device *ieee, struct sk_buff *skb, struct ieee80211_rx_stats *rx_stats) { struct net_device *dev = ieee->dev; struct rtl_80211_hdr_4addr *hdr; //struct rtl_80211_hdr_3addrqos *hdr; size_t hdrlen; u16 fc, type, stype, sc; struct net_device_stats *stats; unsigned int frag; u8 *payload; u16 ethertype; //added by amy for reorder u8 TID = 0; u16 SeqNum = 0; PRX_TS_RECORD pTS = NULL; //bool bIsAggregateFrame = false; //added by amy for reorder #ifdef NOT_YET struct net_device *wds = NULL; struct net_device *wds = NULL; int from_assoc_ap = 0; void *sta = NULL; #endif // u16 qos_ctl = 0; u8 dst[ETH_ALEN]; u8 src[ETH_ALEN]; u8 bssid[ETH_ALEN]; struct ieee80211_crypt_data *crypt = NULL; int keyidx = 0; int i; struct ieee80211_rxb *rxb = NULL; // cheat the the hdr type hdr = (struct rtl_80211_hdr_4addr *)skb->data; stats = &ieee->stats; if (skb->len < 10) { printk(KERN_INFO "%s: SKB length < 10\n", dev->name); goto rx_dropped; } fc = le16_to_cpu(hdr->frame_ctl); type = WLAN_FC_GET_TYPE(fc); stype = WLAN_FC_GET_STYPE(fc); sc = le16_to_cpu(hdr->seq_ctl); frag = WLAN_GET_SEQ_FRAG(sc); hdrlen = ieee80211_get_hdrlen(fc); if (HTCCheck(ieee, skb->data)) { if(net_ratelimit()) printk("find HTCControl\n"); hdrlen += 4; rx_stats->bContainHTC = true; } //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); #ifdef NOT_YET /* Put this code here so that we avoid duplicating it in all * Rx paths. - Jean II */ #ifdef IW_WIRELESS_SPY /* defined in iw_handler.h */ /* If spy monitoring on */ if (iface->spy_data.spy_number > 0) { struct iw_quality wstats; wstats.level = rx_stats->rssi; wstats.noise = rx_stats->noise; wstats.updated = 6; /* No qual value */ /* Update spy records */ wireless_spy_update(dev, hdr->addr2, &wstats); } #endif /* IW_WIRELESS_SPY */ hostap_update_rx_stats(local->ap, hdr, rx_stats); #endif if (ieee->iw_mode == IW_MODE_MONITOR) { ieee80211_monitor_rx(ieee, skb, rx_stats); stats->rx_packets++; stats->rx_bytes += skb->len; return 1; } if (ieee->host_decrypt) { int idx = 0; if (skb->len >= hdrlen + 3) idx = skb->data[hdrlen + 3] >> 6; crypt = ieee->crypt[idx]; #ifdef NOT_YET sta = NULL; /* Use station specific key to override default keys if the * receiver address is a unicast address ("individual RA"). If * bcrx_sta_key parameter is set, station specific key is used * even with broad/multicast targets (this is against IEEE * 802.11, but makes it easier to use different keys with * stations that do not support WEP key mapping). */ if (!(hdr->addr1[0] & 0x01) || local->bcrx_sta_key) (void) hostap_handle_sta_crypto(local, hdr, &crypt, &sta); #endif /* allow NULL decrypt to indicate an station specific override * for default encryption */ if (crypt && (crypt->ops == NULL || crypt->ops->decrypt_mpdu == NULL)) crypt = NULL; if (!crypt && (fc & IEEE80211_FCTL_WEP)) { /* This seems to be triggered by some (multicast?) * frames from other than current BSS, so just drop the * frames silently instead of filling system log with * these reports. */ IEEE80211_DEBUG_DROP("Decryption failed (not set)" " (SA=%pM)\n", hdr->addr2); ieee->ieee_stats.rx_discards_undecryptable++; goto rx_dropped; } } if (skb->len < IEEE80211_DATA_HDR3_LEN) goto rx_dropped; // if QoS enabled, should check the sequence for each of the AC if ((!ieee->pHTInfo->bCurRxReorderEnable) || !ieee->current_network.qos_data.active|| !IsDataFrame(skb->data) || IsLegacyDataFrame(skb->data)) { if (is_duplicate_packet(ieee, hdr)) goto rx_dropped; } else { PRX_TS_RECORD pRxTS = NULL; //IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): QOS ENABLE AND RECEIVE QOS DATA , we will get Ts, tid:%d\n",__func__, tid); if(GetTs( ieee, (PTS_COMMON_INFO *) &pRxTS, hdr->addr2, Frame_QoSTID((u8 *)(skb->data)), RX_DIR, true)) { // IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): pRxTS->RxLastFragNum is %d,frag is %d,pRxTS->RxLastSeqNum is %d,seq is %d\n",__func__,pRxTS->RxLastFragNum,frag,pRxTS->RxLastSeqNum,WLAN_GET_SEQ_SEQ(sc)); if ((fc & (1<<11)) && (frag == pRxTS->RxLastFragNum) && (WLAN_GET_SEQ_SEQ(sc) == pRxTS->RxLastSeqNum)) { goto rx_dropped; } else { pRxTS->RxLastFragNum = frag; pRxTS->RxLastSeqNum = WLAN_GET_SEQ_SEQ(sc); } } else { IEEE80211_DEBUG(IEEE80211_DL_ERR, "%s(): No TS!! Skip the check!!\n",__func__); goto rx_dropped; } } if (type == IEEE80211_FTYPE_MGMT) { //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); if (ieee80211_rx_frame_mgmt(ieee, skb, rx_stats, type, stype)) goto rx_dropped; else goto rx_exit; } /* Data frame - extract src/dst addresses */ switch (fc & (IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS)) { case IEEE80211_FCTL_FROMDS: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr3, ETH_ALEN); memcpy(bssid, hdr->addr2, ETH_ALEN); break; case IEEE80211_FCTL_TODS: memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr1, ETH_ALEN); break; case IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS: if (skb->len < IEEE80211_DATA_HDR4_LEN) goto rx_dropped; memcpy(dst, hdr->addr3, ETH_ALEN); memcpy(src, hdr->addr4, ETH_ALEN); memcpy(bssid, ieee->current_network.bssid, ETH_ALEN); break; default: memcpy(dst, hdr->addr1, ETH_ALEN); memcpy(src, hdr->addr2, ETH_ALEN); memcpy(bssid, hdr->addr3, ETH_ALEN); break; } #ifdef NOT_YET if (hostap_rx_frame_wds(ieee, hdr, fc, &wds)) goto rx_dropped; if (wds) { skb->dev = dev = wds; stats = hostap_get_stats(dev); } if (ieee->iw_mode == IW_MODE_MASTER && !wds && (fc & (IEEE80211_FCTL_TODS | IEEE80211_FCTL_FROMDS)) == IEEE80211_FCTL_FROMDS && ieee->stadev && memcmp(hdr->addr2, ieee->assoc_ap_addr, ETH_ALEN) == 0) { /* Frame from BSSID of the AP for which we are a client */ skb->dev = dev = ieee->stadev; stats = hostap_get_stats(dev); from_assoc_ap = 1; } #endif dev->last_rx = jiffies; #ifdef NOT_YET if ((ieee->iw_mode == IW_MODE_MASTER || ieee->iw_mode == IW_MODE_REPEAT) && !from_assoc_ap) { switch (hostap_handle_sta_rx(ieee, dev, skb, rx_stats, wds != NULL)) { case AP_RX_CONTINUE_NOT_AUTHORIZED: case AP_RX_CONTINUE: break; case AP_RX_DROP: goto rx_dropped; case AP_RX_EXIT: goto rx_exit; } } #endif //IEEE80211_DEBUG_DATA(IEEE80211_DL_DATA, skb->data, skb->len); /* Nullfunc frames may have PS-bit set, so they must be passed to * hostap_handle_sta_rx() before being dropped here. */ if (stype != IEEE80211_STYPE_DATA && stype != IEEE80211_STYPE_DATA_CFACK && stype != IEEE80211_STYPE_DATA_CFPOLL && stype != IEEE80211_STYPE_DATA_CFACKPOLL&& stype != IEEE80211_STYPE_QOS_DATA//add by David,2006.8.4 ) { if (stype != IEEE80211_STYPE_NULLFUNC) IEEE80211_DEBUG_DROP( "RX: dropped data frame " "with no data (type=0x%02x, " "subtype=0x%02x, len=%d)\n", type, stype, skb->len); goto rx_dropped; } if (memcmp(bssid, ieee->current_network.bssid, ETH_ALEN)) goto rx_dropped; /* skb: hdr + (possibly fragmented, possibly encrypted) payload */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && (keyidx = ieee80211_rx_frame_decrypt(ieee, skb, crypt)) < 0) { printk("decrypt frame error\n"); goto rx_dropped; } hdr = (struct rtl_80211_hdr_4addr *) skb->data; /* skb: hdr + (possibly fragmented) plaintext payload */ // PR: FIXME: hostap has additional conditions in the "if" below: // ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && if ((frag != 0 || (fc & IEEE80211_FCTL_MOREFRAGS))) { int flen; struct sk_buff *frag_skb = ieee80211_frag_cache_get(ieee, hdr); IEEE80211_DEBUG_FRAG("Rx Fragment received (%u)\n", frag); if (!frag_skb) { IEEE80211_DEBUG(IEEE80211_DL_RX | IEEE80211_DL_FRAG, "Rx cannot get skb from fragment " "cache (morefrag=%d seq=%u frag=%u)\n", (fc & IEEE80211_FCTL_MOREFRAGS) != 0, WLAN_GET_SEQ_SEQ(sc), frag); goto rx_dropped; } flen = skb->len; if (frag != 0) flen -= hdrlen; if (frag_skb->tail + flen > frag_skb->end) { printk(KERN_WARNING "%s: host decrypted and " "reassembled frame did not fit skb\n", dev->name); ieee80211_frag_cache_invalidate(ieee, hdr); goto rx_dropped; } if (frag == 0) { /* copy first fragment (including full headers) into * beginning of the fragment cache skb */ memcpy(skb_put(frag_skb, flen), skb->data, flen); } else { /* append frame payload to the end of the fragment * cache skb */ memcpy(skb_put(frag_skb, flen), skb->data + hdrlen, flen); } dev_kfree_skb_any(skb); skb = NULL; if (fc & IEEE80211_FCTL_MOREFRAGS) { /* more fragments expected - leave the skb in fragment * cache for now; it will be delivered to upper layers * after all fragments have been received */ goto rx_exit; } /* this was the last fragment and the frame will be * delivered, so remove skb from fragment cache */ skb = frag_skb; hdr = (struct rtl_80211_hdr_4addr *) skb->data; ieee80211_frag_cache_invalidate(ieee, hdr); } /* skb: hdr + (possible reassembled) full MSDU payload; possibly still * encrypted/authenticated */ if (ieee->host_decrypt && (fc & IEEE80211_FCTL_WEP) && ieee80211_rx_frame_decrypt_msdu(ieee, skb, keyidx, crypt)) { printk("==>decrypt msdu error\n"); goto rx_dropped; } //added by amy for AP roaming ieee->LinkDetectInfo.NumRecvDataInPeriod++; ieee->LinkDetectInfo.NumRxOkInPeriod++; hdr = (struct rtl_80211_hdr_4addr *) skb->data; if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep) { if (/*ieee->ieee802_1x &&*/ ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { #ifdef CONFIG_IEEE80211_DEBUG /* pass unencrypted EAPOL frames even if encryption is * configured */ struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); #endif } else { IEEE80211_DEBUG_DROP( "encryption configured, but RX " "frame not encrypted (SA=%pM)\n", hdr->addr2); goto rx_dropped; } } #ifdef CONFIG_IEEE80211_DEBUG if (crypt && !(fc & IEEE80211_FCTL_WEP) && ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { struct eapol *eap = (struct eapol *)(skb->data + 24); IEEE80211_DEBUG_EAP("RX: IEEE 802.1X EAPOL frame: %s\n", eap_get_type(eap->type)); } #endif if (crypt && !(fc & IEEE80211_FCTL_WEP) && !ieee->open_wep && !ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { IEEE80211_DEBUG_DROP( "dropped unencrypted RX data " "frame from %pM" " (drop_unencrypted=1)\n", hdr->addr2); goto rx_dropped; } /* if(ieee80211_is_eapol_frame(ieee, skb, hdrlen)) { printk(KERN_WARNING "RX: IEEE802.1X EPAOL frame!\n"); } */ //added by amy for reorder if (ieee->current_network.qos_data.active && IsQoSDataFrame(skb->data) && !is_multicast_ether_addr(hdr->addr1)) { TID = Frame_QoSTID(skb->data); SeqNum = WLAN_GET_SEQ_SEQ(sc); GetTs(ieee,(PTS_COMMON_INFO *) &pTS,hdr->addr2,TID,RX_DIR,true); if (TID !=0 && TID !=3) { ieee->bis_any_nonbepkts = true; } } //added by amy for reorder /* skb: hdr + (possible reassembled) full plaintext payload */ payload = skb->data + hdrlen; //ethertype = (payload[6] << 8) | payload[7]; rxb = kmalloc(sizeof(struct ieee80211_rxb), GFP_ATOMIC); if (!rxb) goto rx_dropped; /* to parse amsdu packets */ /* qos data packets & reserved bit is 1 */ if (parse_subframe(skb, rx_stats, rxb, src, dst) == 0) { /* only to free rxb, and not submit the packets to upper layer */ for(i =0; i < rxb->nr_subframes; i++) { dev_kfree_skb(rxb->subframes[i]); } kfree(rxb); rxb = NULL; goto rx_dropped; } //added by amy for reorder if (!ieee->pHTInfo->bCurRxReorderEnable || pTS == NULL){ //added by amy for reorder for(i = 0; i<rxb->nr_subframes; i++) { struct sk_buff *sub_skb = rxb->subframes[i]; if (sub_skb) { /* convert hdr + possible LLC headers into Ethernet header */ ethertype = (sub_skb->data[6] << 8) | sub_skb->data[7]; if (sub_skb->len >= 8 && ((memcmp(sub_skb->data, rfc1042_header, SNAP_SIZE) == 0 && ethertype != ETH_P_AARP && ethertype != ETH_P_IPX) || memcmp(sub_skb->data, bridge_tunnel_header, SNAP_SIZE) == 0)) { /* remove RFC1042 or Bridge-Tunnel encapsulation and * replace EtherType */ skb_pull(sub_skb, SNAP_SIZE); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } else { u16 len; /* Leave Ethernet header part of hdr and full payload */ len = htons(sub_skb->len); memcpy(skb_push(sub_skb, 2), &len, 2); memcpy(skb_push(sub_skb, ETH_ALEN), src, ETH_ALEN); memcpy(skb_push(sub_skb, ETH_ALEN), dst, ETH_ALEN); } stats->rx_packets++; stats->rx_bytes += sub_skb->len; if (is_multicast_ether_addr(dst)) { stats->multicast++; } /* Indicat the packets to upper layer */ sub_skb->protocol = eth_type_trans(sub_skb, dev); memset(sub_skb->cb, 0, sizeof(sub_skb->cb)); sub_skb->dev = dev; sub_skb->ip_summed = CHECKSUM_NONE; /* 802.11 crc not sufficient */ //skb->ip_summed = CHECKSUM_UNNECESSARY; /* 802.11 crc not sufficient */ ieee->last_rx_ps_time = jiffies; netif_rx(sub_skb); } } kfree(rxb); rxb = NULL; } else { IEEE80211_DEBUG(IEEE80211_DL_REORDER,"%s(): REORDER ENABLE AND PTS not NULL, and we will enter RxReorderIndicatePacket()\n",__func__); RxReorderIndicatePacket(ieee, rxb, pTS, SeqNum); } #ifndef JOHN_NOCPY dev_kfree_skb(skb); #endif rx_exit: #ifdef NOT_YET if (sta) hostap_handle_sta_release(sta); #endif return 1; rx_dropped: kfree(rxb); rxb = NULL; stats->rx_dropped++; /* Returning 0 indicates to caller that we have not handled the SKB-- * so it is still allocated and can be used again by underlying * hardware as a DMA target */ return 0; } EXPORT_SYMBOL(ieee80211_rx); #define MGMT_FRAME_FIXED_PART_LENGTH 0x24 static u8 qos_oui[QOS_OUI_LEN] = { 0x00, 0x50, 0xF2 }; /* * Make the structure we read from the beacon packet to have * the right values */ static int ieee80211_verify_qos_info(struct ieee80211_qos_information_element *info_element, int sub_type) { if (info_element->qui_subtype != sub_type) return -1; if (memcmp(info_element->qui, qos_oui, QOS_OUI_LEN)) return -1; if (info_element->qui_type != QOS_OUI_TYPE) return -1; if (info_element->version != QOS_VERSION_1) return -1; return 0; } /* * Parse a QoS parameter element */ static int ieee80211_read_qos_param_element(struct ieee80211_qos_parameter_info *element_param, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_parameter_info) - 2; if ((info_element == NULL) || (element_param == NULL)) return -1; if (info_element->id == QOS_ELEMENT_ID && info_element->len == size) { memcpy(element_param->info_element.qui, info_element->data, info_element->len); element_param->info_element.elementID = info_element->id; element_param->info_element.length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(&element_param->info_element, QOS_OUI_PARAM_SUB_TYPE); return ret; } /* * Parse a QoS information element */ static int ieee80211_read_qos_info_element(struct ieee80211_qos_information_element *element_info, struct ieee80211_info_element *info_element) { int ret = 0; u16 size = sizeof(struct ieee80211_qos_information_element) - 2; if (element_info == NULL) return -1; if (info_element == NULL) return -1; if ((info_element->id == QOS_ELEMENT_ID) && (info_element->len == size)) { memcpy(element_info->qui, info_element->data, info_element->len); element_info->elementID = info_element->id; element_info->length = info_element->len; } else ret = -1; if (ret == 0) ret = ieee80211_verify_qos_info(element_info, QOS_OUI_INFO_SUB_TYPE); return ret; } /* * Write QoS parameters from the ac parameters. */ static int ieee80211_qos_convert_ac_to_parameters(struct ieee80211_qos_parameter_info *param_elm, struct ieee80211_qos_parameters *qos_param) { int i; struct ieee80211_qos_ac_parameter *ac_params; u8 aci; //u8 cw_min; //u8 cw_max; for (i = 0; i < QOS_QUEUE_NUM; i++) { ac_params = &(param_elm->ac_params_record[i]); aci = (ac_params->aci_aifsn & 0x60) >> 5; if(aci >= QOS_QUEUE_NUM) continue; qos_param->aifs[aci] = (ac_params->aci_aifsn) & 0x0f; /* WMM spec P.11: The minimum value for AIFSN shall be 2 */ qos_param->aifs[aci] = (qos_param->aifs[aci] < 2) ? 2:qos_param->aifs[aci]; qos_param->cw_min[aci] = ac_params->ecw_min_max & 0x0F; qos_param->cw_max[aci] = (ac_params->ecw_min_max & 0xF0) >> 4; qos_param->flag[aci] = (ac_params->aci_aifsn & 0x10) ? 0x01 : 0x00; qos_param->tx_op_limit[aci] = le16_to_cpu(ac_params->tx_op_limit); } return 0; } /* * we have a generic data element which it may contain QoS information or * parameters element. check the information element length to decide * which type to read */ static int ieee80211_parse_qos_info_param_IE(struct ieee80211_info_element *info_element, struct ieee80211_network *network) { int rc = 0; struct ieee80211_qos_parameters *qos_param = NULL; struct ieee80211_qos_information_element qos_info_element; rc = ieee80211_read_qos_info_element(&qos_info_element, info_element); if (rc == 0) { network->qos_data.param_count = qos_info_element.ac_info & 0x0F; network->flags |= NETWORK_HAS_QOS_INFORMATION; } else { struct ieee80211_qos_parameter_info param_element; rc = ieee80211_read_qos_param_element(&param_element, info_element); if (rc == 0) { qos_param = &(network->qos_data.parameters); ieee80211_qos_convert_ac_to_parameters(&param_element, qos_param); network->flags |= NETWORK_HAS_QOS_PARAMETERS; network->qos_data.param_count = param_element.info_element.ac_info & 0x0F; } } if (rc == 0) { IEEE80211_DEBUG_QOS("QoS is supported\n"); network->qos_data.supported = 1; } return rc; } #ifdef CONFIG_IEEE80211_DEBUG #define MFIE_STRING(x) case MFIE_TYPE_ ##x: return #x static const char *get_info_element_string(u16 id) { switch (id) { MFIE_STRING(SSID); MFIE_STRING(RATES); MFIE_STRING(FH_SET); MFIE_STRING(DS_SET); MFIE_STRING(CF_SET); MFIE_STRING(TIM); MFIE_STRING(IBSS_SET); MFIE_STRING(COUNTRY); MFIE_STRING(HOP_PARAMS); MFIE_STRING(HOP_TABLE); MFIE_STRING(REQUEST); MFIE_STRING(CHALLENGE); MFIE_STRING(POWER_CONSTRAINT); MFIE_STRING(POWER_CAPABILITY); MFIE_STRING(TPC_REQUEST); MFIE_STRING(TPC_REPORT); MFIE_STRING(SUPP_CHANNELS); MFIE_STRING(CSA); MFIE_STRING(MEASURE_REQUEST); MFIE_STRING(MEASURE_REPORT); MFIE_STRING(QUIET); MFIE_STRING(IBSS_DFS); // MFIE_STRING(ERP_INFO); MFIE_STRING(RSN); MFIE_STRING(RATES_EX); MFIE_STRING(GENERIC); MFIE_STRING(QOS_PARAMETER); default: return "UNKNOWN"; } } #endif static inline void ieee80211_extract_country_ie( struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, struct ieee80211_network *network, u8 *addr2 ) { if (IS_DOT11D_ENABLE(ieee)) { if (info_element->len!= 0) { memcpy(network->CountryIeBuf, info_element->data, info_element->len); network->CountryIeLen = info_element->len; if (!IS_COUNTRY_IE_VALID(ieee)) { Dot11d_UpdateCountryIe(ieee, addr2, info_element->len, info_element->data); } } // // 070305, rcnjko: I update country IE watch dog here because // some AP (e.g. Cisco 1242) don't include country IE in their // probe response frame. // if (IS_EQUAL_CIE_SRC(ieee, addr2) ) { UPDATE_CIE_WATCHDOG(ieee); } } } int ieee80211_parse_info_param(struct ieee80211_device *ieee, struct ieee80211_info_element *info_element, u16 length, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { u8 i; short offset; u16 tmp_htcap_len=0; u16 tmp_htinfo_len=0; u16 ht_realtek_agg_len=0; u8 ht_realtek_agg_buf[MAX_IE_LEN]; // u16 broadcom_len = 0; #ifdef CONFIG_IEEE80211_DEBUG char rates_str[64]; char *p; #endif while (length >= sizeof(*info_element)) { if (sizeof(*info_element) + info_element->len > length) { IEEE80211_DEBUG_MGMT("Info elem: parse failed: " "info_element->len + 2 > left : " "info_element->len+2=%zd left=%d, id=%d.\n", info_element->len + sizeof(*info_element), length, info_element->id); /* We stop processing but don't return an error here * because some misbehaviour APs break this rule. ie. * Orinoco AP1000. */ break; } switch (info_element->id) { case MFIE_TYPE_SSID: if (ieee80211_is_empty_essid(info_element->data, info_element->len)) { network->flags |= NETWORK_EMPTY_ESSID; break; } network->ssid_len = min(info_element->len, (u8) IW_ESSID_MAX_SIZE); memcpy(network->ssid, info_element->data, network->ssid_len); if (network->ssid_len < IW_ESSID_MAX_SIZE) memset(network->ssid + network->ssid_len, 0, IW_ESSID_MAX_SIZE - network->ssid_len); IEEE80211_DEBUG_MGMT("MFIE_TYPE_SSID: '%s' len=%d.\n", network->ssid, network->ssid_len); break; case MFIE_TYPE_RATES: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_len = min(info_element->len, MAX_RATES_LENGTH); for (i = 0; i < network->rates_len; i++) { network->rates[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES: '%s' (%d)\n", rates_str, network->rates_len); break; case MFIE_TYPE_RATES_EX: #ifdef CONFIG_IEEE80211_DEBUG p = rates_str; #endif network->rates_ex_len = min(info_element->len, MAX_RATES_EX_LENGTH); for (i = 0; i < network->rates_ex_len; i++) { network->rates_ex[i] = info_element->data[i]; #ifdef CONFIG_IEEE80211_DEBUG p += snprintf(p, sizeof(rates_str) - (p - rates_str), "%02X ", network->rates_ex[i]); #endif if (ieee80211_is_ofdm_rate (info_element->data[i])) { network->flags |= NETWORK_HAS_OFDM; if (info_element->data[i] & IEEE80211_BASIC_RATE_MASK) network->flags &= ~NETWORK_HAS_CCK; } } IEEE80211_DEBUG_MGMT("MFIE_TYPE_RATES_EX: '%s' (%d)\n", rates_str, network->rates_ex_len); break; case MFIE_TYPE_DS_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_DS_SET: %d\n", info_element->data[0]); network->channel = info_element->data[0]; break; case MFIE_TYPE_FH_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_FH_SET: ignored\n"); break; case MFIE_TYPE_CF_SET: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CF_SET: ignored\n"); break; case MFIE_TYPE_TIM: if(info_element->len < 4) break; network->tim.tim_count = info_element->data[0]; network->tim.tim_period = info_element->data[1]; network->dtim_period = info_element->data[1]; if(ieee->state != IEEE80211_LINKED) break; network->last_dtim_sta_time[0] = stats->mac_time[0]; network->last_dtim_sta_time[1] = stats->mac_time[1]; network->dtim_data = IEEE80211_DTIM_VALID; if(info_element->data[0] != 0) break; if(info_element->data[2] & 1) network->dtim_data |= IEEE80211_DTIM_MBCAST; offset = (info_element->data[2] >> 1)*2; if(ieee->assoc_id < 8*offset || ieee->assoc_id > 8*(offset + info_element->len -3)) break; offset = (ieee->assoc_id / 8) - offset;// + ((aid % 8)? 0 : 1) ; if(info_element->data[3+offset] & (1<<(ieee->assoc_id%8))) network->dtim_data |= IEEE80211_DTIM_UCAST; //IEEE80211_DEBUG_MGMT("MFIE_TYPE_TIM: partially ignored\n"); break; case MFIE_TYPE_ERP: network->erp_value = info_element->data[0]; network->flags |= NETWORK_HAS_ERP_VALUE; IEEE80211_DEBUG_MGMT("MFIE_TYPE_ERP_SET: %d\n", network->erp_value); break; case MFIE_TYPE_IBSS_SET: network->atim_window = info_element->data[0]; IEEE80211_DEBUG_MGMT("MFIE_TYPE_IBSS_SET: %d\n", network->atim_window); break; case MFIE_TYPE_CHALLENGE: IEEE80211_DEBUG_MGMT("MFIE_TYPE_CHALLENGE: ignored\n"); break; case MFIE_TYPE_GENERIC: IEEE80211_DEBUG_MGMT("MFIE_TYPE_GENERIC: %d bytes\n", info_element->len); if (!ieee80211_parse_qos_info_param_IE(info_element, network)) break; if (info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x50 && info_element->data[2] == 0xf2 && info_element->data[3] == 0x01) { network->wpa_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->wpa_ie, info_element, network->wpa_ie_len); break; } #ifdef THOMAS_TURBO if (info_element->len == 7 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x01 && info_element->data[4] == 0x02) { network->Turbo_Enable = 1; } #endif //for HTcap and HTinfo parameters if(tmp_htcap_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x033){ tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); } } if(tmp_htcap_len != 0) network->bssht.bdSupportHT = true; else network->bssht.bdSupportHT = false; } if(tmp_htinfo_len == 0){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x90 && info_element->data[2] == 0x4c && info_element->data[3] == 0x034){ tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; if(tmp_htinfo_len){ network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } } } } if(ieee->aggregation){ if(network->bssht.bdSupportHT){ if(info_element->len >= 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0xe0 && info_element->data[2] == 0x4c && info_element->data[3] == 0x02){ ht_realtek_agg_len = min(info_element->len,(u8)MAX_IE_LEN); memcpy(ht_realtek_agg_buf,info_element->data,info_element->len); } if(ht_realtek_agg_len >= 5){ network->bssht.bdRT2RTAggregation = true; if((ht_realtek_agg_buf[4] == 1) && (ht_realtek_agg_buf[5] & 0x02)) network->bssht.bdRT2RTLongSlotTime = true; } } } //if(tmp_htcap_len !=0 || tmp_htinfo_len != 0) { if ((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x05 && info_element->data[2] == 0xb5) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0a && info_element->data[2] == 0xf7) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x10 && info_element->data[2] == 0x18)){ network->broadcom_cap_exist = true; } } if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x0c && info_element->data[2] == 0x43) { network->ralink_cap_exist = true; } else network->ralink_cap_exist = false; //added by amy for atheros AP if((info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x03 && info_element->data[2] == 0x7f) || (info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x13 && info_element->data[2] == 0x74)) { printk("========>%s(): athros AP is exist\n",__func__); network->atheros_cap_exist = true; } else network->atheros_cap_exist = false; if(info_element->len >= 3 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96) { network->cisco_cap_exist = true; } else network->cisco_cap_exist = false; //added by amy for LEAP of cisco if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x01) { if(info_element->len == 6) { memcpy(network->CcxRmState, &info_element[4], 2); if(network->CcxRmState[0] != 0) { network->bCcxRmEnable = true; } else network->bCcxRmEnable = false; // // CCXv4 Table 59-1 MBSSID Masks. // network->MBssidMask = network->CcxRmState[1] & 0x07; if(network->MBssidMask != 0) { network->bMBssidValid = true; network->MBssidMask = 0xff << (network->MBssidMask); cpMacAddr(network->MBssid, network->bssid); network->MBssid[5] &= network->MBssidMask; } else { network->bMBssidValid = false; } } else { network->bCcxRmEnable = false; } } if (info_element->len > 4 && info_element->data[0] == 0x00 && info_element->data[1] == 0x40 && info_element->data[2] == 0x96 && info_element->data[3] == 0x03) { if(info_element->len == 5) { network->bWithCcxVerNum = true; network->BssCcxVerNumber = info_element->data[4]; } else { network->bWithCcxVerNum = false; network->BssCcxVerNumber = 0; } } break; case MFIE_TYPE_RSN: IEEE80211_DEBUG_MGMT("MFIE_TYPE_RSN: %d bytes\n", info_element->len); network->rsn_ie_len = min(info_element->len + 2, MAX_WPA_IE_LEN); memcpy(network->rsn_ie, info_element, network->rsn_ie_len); break; //HT related element. case MFIE_TYPE_HT_CAP: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_CAP: %d bytes\n", info_element->len); tmp_htcap_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htcap_len != 0){ network->bssht.bdHTSpecVer = HT_SPEC_VER_EWC; network->bssht.bdHTCapLen = tmp_htcap_len > sizeof(network->bssht.bdHTCapBuf)?\ sizeof(network->bssht.bdHTCapBuf):tmp_htcap_len; memcpy(network->bssht.bdHTCapBuf,info_element->data,network->bssht.bdHTCapLen); //If peer is HT, but not WMM, call QosSetLegacyWMMParamWithHT() // windows driver will update WMM parameters each beacon received once connected // Linux driver is a bit different. network->bssht.bdSupportHT = true; } else network->bssht.bdSupportHT = false; break; case MFIE_TYPE_HT_INFO: IEEE80211_DEBUG_SCAN("MFIE_TYPE_HT_INFO: %d bytes\n", info_element->len); tmp_htinfo_len = min(info_element->len,(u8)MAX_IE_LEN); if(tmp_htinfo_len){ network->bssht.bdHTSpecVer = HT_SPEC_VER_IEEE; network->bssht.bdHTInfoLen = tmp_htinfo_len > sizeof(network->bssht.bdHTInfoBuf)?\ sizeof(network->bssht.bdHTInfoBuf):tmp_htinfo_len; memcpy(network->bssht.bdHTInfoBuf,info_element->data,network->bssht.bdHTInfoLen); } break; case MFIE_TYPE_AIRONET: IEEE80211_DEBUG_SCAN("MFIE_TYPE_AIRONET: %d bytes\n", info_element->len); if(info_element->len >IE_CISCO_FLAG_POSITION) { network->bWithAironetIE = true; // CCX 1 spec v1.13, A01.1 CKIP Negotiation (page23): // "A Cisco access point advertises support for CKIP in beacon and probe response packets, // by adding an Aironet element and setting one or both of the CKIP negotiation bits." if( (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_MIC) || (info_element->data[IE_CISCO_FLAG_POSITION]&SUPPORT_CKIP_PK) ) { network->bCkipSupported = true; } else { network->bCkipSupported = false; } } else { network->bWithAironetIE = false; network->bCkipSupported = false; } break; case MFIE_TYPE_QOS_PARAMETER: printk(KERN_ERR "QoS Error need to parse QOS_PARAMETER IE\n"); break; case MFIE_TYPE_COUNTRY: IEEE80211_DEBUG_SCAN("MFIE_TYPE_COUNTRY: %d bytes\n", info_element->len); ieee80211_extract_country_ie(ieee, info_element, network, network->bssid);//addr2 is same as addr3 when from an AP break; /* TODO */ default: IEEE80211_DEBUG_MGMT ("Unsupported info element: %s (%d)\n", get_info_element_string(info_element->id), info_element->id); break; } length -= sizeof(*info_element) + info_element->len; info_element = (struct ieee80211_info_element *)&info_element-> data[info_element->len]; } if(!network->atheros_cap_exist && !network->broadcom_cap_exist && !network->cisco_cap_exist && !network->ralink_cap_exist && !network->bssht.bdRT2RTAggregation) { network->unknown_cap_exist = true; } else { network->unknown_cap_exist = false; } return 0; } static inline u8 ieee80211_SignalStrengthTranslate( u8 CurrSS ) { u8 RetSS; // Step 1. Scale mapping. if(CurrSS >= 71 && CurrSS <= 100) { RetSS = 90 + ((CurrSS - 70) / 3); } else if(CurrSS >= 41 && CurrSS <= 70) { RetSS = 78 + ((CurrSS - 40) / 3); } else if(CurrSS >= 31 && CurrSS <= 40) { RetSS = 66 + (CurrSS - 30); } else if(CurrSS >= 21 && CurrSS <= 30) { RetSS = 54 + (CurrSS - 20); } else if(CurrSS >= 5 && CurrSS <= 20) { RetSS = 42 + (((CurrSS - 5) * 2) / 3); } else if(CurrSS == 4) { RetSS = 36; } else if(CurrSS == 3) { RetSS = 27; } else if(CurrSS == 2) { RetSS = 18; } else if(CurrSS == 1) { RetSS = 9; } else { RetSS = CurrSS; } //RT_TRACE(COMP_DBG, DBG_LOUD, ("##### After Mapping: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); // Step 2. Smoothing. //RT_TRACE(COMP_DBG, DBG_LOUD, ("$$$$$ After Smoothing: LastSS: %d, CurrSS: %d, RetSS: %d\n", LastSS, CurrSS, RetSS)); return RetSS; } /* 0-100 index */ static long ieee80211_translate_todbm(u8 signal_strength_index) { long signal_power; // in dBm. // Translate to dBm (x=0.5y-95). signal_power = (long)((signal_strength_index + 1) >> 1); signal_power -= 95; return signal_power; } static inline int ieee80211_network_init( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_network *network, struct ieee80211_rx_stats *stats) { #ifdef CONFIG_IEEE80211_DEBUG //char rates_str[64]; //char *p; #endif network->qos_data.active = 0; network->qos_data.supported = 0; network->qos_data.param_count = 0; network->qos_data.old_param_count = 0; /* Pull out fixed field data */ memcpy(network->bssid, beacon->header.addr3, ETH_ALEN); network->capability = le16_to_cpu(beacon->capability); network->last_scanned = jiffies; network->time_stamp[0] = le32_to_cpu(beacon->time_stamp[0]); network->time_stamp[1] = le32_to_cpu(beacon->time_stamp[1]); network->beacon_interval = le16_to_cpu(beacon->beacon_interval); /* Where to pull this? beacon->listen_interval;*/ network->listen_interval = 0x0A; network->rates_len = network->rates_ex_len = 0; network->last_associate = 0; network->ssid_len = 0; network->flags = 0; network->atim_window = 0; network->erp_value = (network->capability & WLAN_CAPABILITY_IBSS) ? 0x3 : 0x0; network->berp_info_valid = false; network->broadcom_cap_exist = false; network->ralink_cap_exist = false; network->atheros_cap_exist = false; network->cisco_cap_exist = false; network->unknown_cap_exist = false; #ifdef THOMAS_TURBO network->Turbo_Enable = 0; #endif network->CountryIeLen = 0; memset(network->CountryIeBuf, 0, MAX_IE_LEN); //Initialize HT parameters //ieee80211_ht_initialize(&network->bssht); HTInitializeBssDesc(&network->bssht); if (stats->freq == IEEE80211_52GHZ_BAND) { /* for A band (No DS info) */ network->channel = stats->received_channel; } else network->flags |= NETWORK_HAS_CCK; network->wpa_ie_len = 0; network->rsn_ie_len = 0; if (ieee80211_parse_info_param (ieee,beacon->info_element, stats->len - sizeof(*beacon), network, stats)) return 1; network->mode = 0; if (stats->freq == IEEE80211_52GHZ_BAND) network->mode = IEEE_A; else { if (network->flags & NETWORK_HAS_OFDM) network->mode |= IEEE_G; if (network->flags & NETWORK_HAS_CCK) network->mode |= IEEE_B; } if (network->mode == 0) { IEEE80211_DEBUG_SCAN("Filtered out '%s (%pM)' " "network.\n", escape_essid(network->ssid, network->ssid_len), network->bssid); return 1; } if(network->bssht.bdSupportHT){ if(network->mode == IEEE_A) network->mode = IEEE_N_5G; else if(network->mode & (IEEE_G | IEEE_B)) network->mode = IEEE_N_24G; } if (ieee80211_is_empty_essid(network->ssid, network->ssid_len)) network->flags |= NETWORK_EMPTY_ESSID; stats->signal = 30 + (stats->SignalStrength * 70) / 100; //stats->signal = ieee80211_SignalStrengthTranslate(stats->signal); stats->noise = ieee80211_translate_todbm((u8)(100-stats->signal)) -25; memcpy(&network->stats, stats, sizeof(network->stats)); return 0; } static inline int is_same_network(struct ieee80211_network *src, struct ieee80211_network *dst, struct ieee80211_device *ieee) { /* A network is only a duplicate if the channel, BSSID, ESSID * and the capability field (in particular IBSS and BSS) all match. * We treat all <hidden> with the same BSSID and channel * as one network */ return //((src->ssid_len == dst->ssid_len) && (((src->ssid_len == dst->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && (src->channel == dst->channel) && !memcmp(src->bssid, dst->bssid, ETH_ALEN) && //!memcmp(src->ssid, dst->ssid, src->ssid_len) && (!memcmp(src->ssid, dst->ssid, src->ssid_len) || (ieee->iw_mode == IW_MODE_INFRA)) && ((src->capability & WLAN_CAPABILITY_IBSS) == (dst->capability & WLAN_CAPABILITY_IBSS)) && ((src->capability & WLAN_CAPABILITY_BSS) == (dst->capability & WLAN_CAPABILITY_BSS))); } static inline void update_network(struct ieee80211_network *dst, struct ieee80211_network *src) { int qos_active; u8 old_param; memcpy(&dst->stats, &src->stats, sizeof(struct ieee80211_rx_stats)); dst->capability = src->capability; memcpy(dst->rates, src->rates, src->rates_len); dst->rates_len = src->rates_len; memcpy(dst->rates_ex, src->rates_ex, src->rates_ex_len); dst->rates_ex_len = src->rates_ex_len; if (src->ssid_len > 0) { memset(dst->ssid, 0, dst->ssid_len); dst->ssid_len = src->ssid_len; memcpy(dst->ssid, src->ssid, src->ssid_len); } dst->mode = src->mode; dst->flags = src->flags; dst->time_stamp[0] = src->time_stamp[0]; dst->time_stamp[1] = src->time_stamp[1]; if (src->flags & NETWORK_HAS_ERP_VALUE) { dst->erp_value = src->erp_value; dst->berp_info_valid = src->berp_info_valid = true; } dst->beacon_interval = src->beacon_interval; dst->listen_interval = src->listen_interval; dst->atim_window = src->atim_window; dst->dtim_period = src->dtim_period; dst->dtim_data = src->dtim_data; dst->last_dtim_sta_time[0] = src->last_dtim_sta_time[0]; dst->last_dtim_sta_time[1] = src->last_dtim_sta_time[1]; memcpy(&dst->tim, &src->tim, sizeof(struct ieee80211_tim_parameters)); dst->bssht.bdSupportHT = src->bssht.bdSupportHT; dst->bssht.bdRT2RTAggregation = src->bssht.bdRT2RTAggregation; dst->bssht.bdHTCapLen= src->bssht.bdHTCapLen; memcpy(dst->bssht.bdHTCapBuf,src->bssht.bdHTCapBuf,src->bssht.bdHTCapLen); dst->bssht.bdHTInfoLen= src->bssht.bdHTInfoLen; memcpy(dst->bssht.bdHTInfoBuf,src->bssht.bdHTInfoBuf,src->bssht.bdHTInfoLen); dst->bssht.bdHTSpecVer = src->bssht.bdHTSpecVer; dst->bssht.bdRT2RTLongSlotTime = src->bssht.bdRT2RTLongSlotTime; dst->broadcom_cap_exist = src->broadcom_cap_exist; dst->ralink_cap_exist = src->ralink_cap_exist; dst->atheros_cap_exist = src->atheros_cap_exist; dst->cisco_cap_exist = src->cisco_cap_exist; dst->unknown_cap_exist = src->unknown_cap_exist; memcpy(dst->wpa_ie, src->wpa_ie, src->wpa_ie_len); dst->wpa_ie_len = src->wpa_ie_len; memcpy(dst->rsn_ie, src->rsn_ie, src->rsn_ie_len); dst->rsn_ie_len = src->rsn_ie_len; dst->last_scanned = jiffies; /* qos related parameters */ //qos_active = src->qos_data.active; qos_active = dst->qos_data.active; //old_param = dst->qos_data.old_param_count; old_param = dst->qos_data.param_count; if(dst->flags & NETWORK_HAS_QOS_MASK) memcpy(&dst->qos_data, &src->qos_data, sizeof(struct ieee80211_qos_data)); else { dst->qos_data.supported = src->qos_data.supported; dst->qos_data.param_count = src->qos_data.param_count; } if (dst->qos_data.supported == 1) { dst->QoS_Enable = 1; if(dst->ssid_len) IEEE80211_DEBUG_QOS ("QoS the network %s is QoS supported\n", dst->ssid); else IEEE80211_DEBUG_QOS ("QoS the network is QoS supported\n"); } dst->qos_data.active = qos_active; dst->qos_data.old_param_count = old_param; /* dst->last_associate is not overwritten */ dst->wmm_info = src->wmm_info; //sure to exist in beacon or probe response frame. if (src->wmm_param[0].aci_aifsn|| \ src->wmm_param[1].aci_aifsn|| \ src->wmm_param[2].aci_aifsn|| \ src->wmm_param[3].aci_aifsn) { memcpy(dst->wmm_param, src->wmm_param, WME_AC_PRAM_LEN); } //dst->QoS_Enable = src->QoS_Enable; #ifdef THOMAS_TURBO dst->Turbo_Enable = src->Turbo_Enable; #endif dst->CountryIeLen = src->CountryIeLen; memcpy(dst->CountryIeBuf, src->CountryIeBuf, src->CountryIeLen); //added by amy for LEAP dst->bWithAironetIE = src->bWithAironetIE; dst->bCkipSupported = src->bCkipSupported; memcpy(dst->CcxRmState, src->CcxRmState, 2); dst->bCcxRmEnable = src->bCcxRmEnable; dst->MBssidMask = src->MBssidMask; dst->bMBssidValid = src->bMBssidValid; memcpy(dst->MBssid, src->MBssid, 6); dst->bWithCcxVerNum = src->bWithCcxVerNum; dst->BssCcxVerNumber = src->BssCcxVerNumber; } static inline int is_beacon(__le16 fc) { return (WLAN_FC_GET_STYPE(le16_to_cpu(fc)) == IEEE80211_STYPE_BEACON); } static inline void ieee80211_process_probe_response( struct ieee80211_device *ieee, struct ieee80211_probe_response *beacon, struct ieee80211_rx_stats *stats) { struct ieee80211_network network; struct ieee80211_network *target; struct ieee80211_network *oldest = NULL; #ifdef CONFIG_IEEE80211_DEBUG struct ieee80211_info_element *info_element = &beacon->info_element[0]; #endif unsigned long flags; short renew; //u8 wmm_info; memset(&network, 0, sizeof(struct ieee80211_network)); IEEE80211_DEBUG_SCAN( "'%s' (%pM): %c%c%c%c %c%c%c%c-%c%c%c%c %c%c%c%c\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, (beacon->capability & (1<<0xf)) ? '1' : '0', (beacon->capability & (1<<0xe)) ? '1' : '0', (beacon->capability & (1<<0xd)) ? '1' : '0', (beacon->capability & (1<<0xc)) ? '1' : '0', (beacon->capability & (1<<0xb)) ? '1' : '0', (beacon->capability & (1<<0xa)) ? '1' : '0', (beacon->capability & (1<<0x9)) ? '1' : '0', (beacon->capability & (1<<0x8)) ? '1' : '0', (beacon->capability & (1<<0x7)) ? '1' : '0', (beacon->capability & (1<<0x6)) ? '1' : '0', (beacon->capability & (1<<0x5)) ? '1' : '0', (beacon->capability & (1<<0x4)) ? '1' : '0', (beacon->capability & (1<<0x3)) ? '1' : '0', (beacon->capability & (1<<0x2)) ? '1' : '0', (beacon->capability & (1<<0x1)) ? '1' : '0', (beacon->capability & (1<<0x0)) ? '1' : '0'); if (ieee80211_network_init(ieee, beacon, &network, stats)) { IEEE80211_DEBUG_SCAN("Dropped '%s' (%pM) via %s.\n", escape_essid(info_element->data, info_element->len), beacon->header.addr3, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); return; } // For Asus EeePc request, // (1) if wireless adapter receive get any 802.11d country code in AP beacon, // wireless adapter should follow the country code. // (2) If there is no any country code in beacon, // then wireless adapter should do active scan from ch1~11 and // passive scan from ch12~14 if (!IsLegalChannel(ieee, network.channel)) return; if (ieee->bGlobalDomain) { if (WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP) { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if (!IsLegalChannel(ieee, network.channel)) { printk("GetScanInfo(): For Country code, filter probe response at channel(%d).\n", network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if (network.channel > 11) { printk("GetScanInfo(): For Global Domain, filter probe response at channel(%d).\n", network.channel); return; } } } else { // Case 1: Country code if(IS_COUNTRY_IE_VALID(ieee) ) { if (!IsLegalChannel(ieee, network.channel)) { printk("GetScanInfo(): For Country code, filter beacon at channel(%d).\n",network.channel); return; } } // Case 2: No any country code. else { // Filter over channel ch12~14 if (network.channel > 14) { printk("GetScanInfo(): For Global Domain, filter beacon at channel(%d).\n",network.channel); return; } } } } /* The network parsed correctly -- so now we scan our known networks * to see if we can find it in our list. * * NOTE: This search is definitely not optimized. Once its doing * the "right thing" we'll optimize it for efficiency if * necessary */ /* Search for this entry in the list and update it if it is * already there. */ spin_lock_irqsave(&ieee->lock, flags); if (is_same_network(&ieee->current_network, &network, ieee)) { update_network(&ieee->current_network, &network); if ((ieee->current_network.mode == IEEE_N_24G || ieee->current_network.mode == IEEE_G) && ieee->current_network.berp_info_valid){ if(ieee->current_network.erp_value& ERP_UseProtection) ieee->current_network.buseprotection = true; else ieee->current_network.buseprotection = false; } if(is_beacon(beacon->header.frame_ctl)) { if(ieee->state == IEEE80211_LINKED) ieee->LinkDetectInfo.NumRecvBcnInPeriod++; } else //hidden AP network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & ieee->current_network.flags); } list_for_each_entry(target, &ieee->network_list, list) { if (is_same_network(target, &network, ieee)) break; if ((oldest == NULL) || (target->last_scanned < oldest->last_scanned)) oldest = target; } /* If we didn't find a match, then get a new network slot to initialize * with this beacon's information */ if (&target->list == &ieee->network_list) { if (list_empty(&ieee->network_free_list)) { /* If there are no more slots, expire the oldest */ list_del(&oldest->list); target = oldest; IEEE80211_DEBUG_SCAN("Expired '%s' (%pM) from " "network list.\n", escape_essid(target->ssid, target->ssid_len), target->bssid); } else { /* Otherwise just pull from the free list */ target = list_entry(ieee->network_free_list.next, struct ieee80211_network, list); list_del(ieee->network_free_list.next); } #ifdef CONFIG_IEEE80211_DEBUG IEEE80211_DEBUG_SCAN("Adding '%s' (%pM) via %s.\n", escape_essid(network.ssid, network.ssid_len), network.bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); #endif memcpy(target, &network, sizeof(*target)); list_add_tail(&target->list, &ieee->network_list); if(ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE) ieee80211_softmac_new_net(ieee,&network); } else { IEEE80211_DEBUG_SCAN("Updating '%s' (%pM) via %s.\n", escape_essid(target->ssid, target->ssid_len), target->bssid, WLAN_FC_GET_STYPE(beacon->header.frame_ctl) == IEEE80211_STYPE_PROBE_RESP ? "PROBE RESPONSE" : "BEACON"); /* we have an entry and we are going to update it. But this entry may * be already expired. In this case we do the same as we found a new * net and call the new_net handler */ renew = !time_after(target->last_scanned + ieee->scan_age, jiffies); //YJ,add,080819,for hidden ap if(is_beacon(beacon->header.frame_ctl) == 0) network.flags = (~NETWORK_EMPTY_ESSID & network.flags)|(NETWORK_EMPTY_ESSID & target->flags); //if(strncmp(network.ssid, "linksys-c",9) == 0) // printk("====>2 network.ssid=%s FLAG=%d target.ssid=%s FLAG=%d\n", network.ssid, network.flags, target->ssid, target->flags); if(((network.flags & NETWORK_EMPTY_ESSID) == NETWORK_EMPTY_ESSID) \ && (((network.ssid_len > 0) && (strncmp(target->ssid, network.ssid, network.ssid_len)))\ ||((ieee->current_network.ssid_len == network.ssid_len)&&(strncmp(ieee->current_network.ssid, network.ssid, network.ssid_len) == 0)&&(ieee->state == IEEE80211_NOLINK)))) renew = 1; //YJ,add,080819,for hidden ap,end update_network(target, &network); if(renew && (ieee->softmac_features & IEEE_SOFTMAC_ASSOCIATE)) ieee80211_softmac_new_net(ieee,&network); } spin_unlock_irqrestore(&ieee->lock, flags); if (is_beacon(beacon->header.frame_ctl)&&is_same_network(&ieee->current_network, &network, ieee)&&\ (ieee->state == IEEE80211_LINKED)) { if (ieee->handle_beacon != NULL) { ieee->handle_beacon(ieee->dev,beacon,&ieee->current_network); } } } void ieee80211_rx_mgt(struct ieee80211_device *ieee, struct rtl_80211_hdr_4addr *header, struct ieee80211_rx_stats *stats) { switch (WLAN_FC_GET_STYPE(header->frame_ctl)) { case IEEE80211_STYPE_BEACON: IEEE80211_DEBUG_MGMT("received BEACON (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Beacon\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; case IEEE80211_STYPE_PROBE_RESP: IEEE80211_DEBUG_MGMT("received PROBE RESPONSE (%d)\n", WLAN_FC_GET_STYPE(header->frame_ctl)); IEEE80211_DEBUG_SCAN("Probe response\n"); ieee80211_process_probe_response( ieee, (struct ieee80211_probe_response *)header, stats); break; } } EXPORT_SYMBOL(ieee80211_rx_mgt);
gpl-2.0
Owain94/android_kernel_htc_msm8974
arch/arm/mach-davinci/board-tnetv107x-evm.c
34
6360
/* * Texas Instruments TNETV107X EVM Board Support * * Copyright (C) 2010 Texas Instruments * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/console.h> #include <linux/dma-mapping.h> #include <linux/interrupt.h> #include <linux/gpio.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <linux/ratelimit.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/input.h> #include <linux/input/matrix_keypad.h> #include <linux/spi/spi.h> #include <asm/mach/arch.h> #include <asm/mach-types.h> #include <mach/irqs.h> #include <mach/edma.h> #include <mach/mux.h> #include <mach/cp_intc.h> #include <mach/tnetv107x.h> #define EVM_MMC_WP_GPIO 21 #define EVM_MMC_CD_GPIO 24 #define EVM_SPI_CS_GPIO 54 static int initialize_gpio(int gpio, char *desc) { int ret; ret = gpio_request(gpio, desc); if (ret < 0) { pr_err_ratelimited("cannot open %s gpio\n", desc); return -ENOSYS; } gpio_direction_input(gpio); return gpio; } static int mmc_get_cd(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_CD_GPIO, "mmc card detect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 0 : 1; } static int mmc_get_ro(int index) { static int gpio; if (!gpio) gpio = initialize_gpio(EVM_MMC_WP_GPIO, "mmc write protect"); if (gpio < 0) return gpio; return gpio_get_value(gpio) ? 1 : 0; } static struct davinci_mmc_config mmc_config = { .get_cd = mmc_get_cd, .get_ro = mmc_get_ro, .wires = 4, .max_freq = 50000000, .caps = MMC_CAP_MMC_HIGHSPEED | MMC_CAP_SD_HIGHSPEED, .version = MMC_CTLR_VERSION_1, }; static const short sdio1_pins[] __initdata = { TNETV107X_SDIO1_CLK_1, TNETV107X_SDIO1_CMD_1, TNETV107X_SDIO1_DATA0_1, TNETV107X_SDIO1_DATA1_1, TNETV107X_SDIO1_DATA2_1, TNETV107X_SDIO1_DATA3_1, TNETV107X_GPIO21, TNETV107X_GPIO24, -1 }; static const short uart1_pins[] __initdata = { TNETV107X_UART1_RD, TNETV107X_UART1_TD, -1 }; static const short ssp_pins[] __initdata = { TNETV107X_SSP0_0, TNETV107X_SSP0_1, TNETV107X_SSP0_2, TNETV107X_SSP1_0, TNETV107X_SSP1_1, TNETV107X_SSP1_2, TNETV107X_SSP1_3, -1 }; static struct mtd_partition nand_partitions[] = { { .name = "bootloader", .offset = 0, .size = (12*SZ_128K), .mask_flags = MTD_WRITEABLE, }, { .name = "params", .offset = MTDPART_OFS_NXTBLK, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = SZ_4M, .mask_flags = 0, }, { .name = "filesystem", .offset = MTDPART_OFS_NXTBLK, .size = MTDPART_SIZ_FULL, .mask_flags = 0, } }; static struct davinci_nand_pdata nand_config = { .mask_cle = 0x4000, .mask_ale = 0x2000, .parts = nand_partitions, .nr_parts = ARRAY_SIZE(nand_partitions), .ecc_mode = NAND_ECC_HW, .bbt_options = NAND_BBT_USE_FLASH, .ecc_bits = 1, }; static struct davinci_uart_config serial_config __initconst = { .enabled_uarts = BIT(1), }; static const uint32_t keymap[] = { KEY(0, 0, KEY_NUMERIC_1), KEY(0, 1, KEY_NUMERIC_2), KEY(0, 2, KEY_NUMERIC_3), KEY(0, 3, KEY_FN_F1), KEY(0, 4, KEY_MENU), KEY(1, 0, KEY_NUMERIC_4), KEY(1, 1, KEY_NUMERIC_5), KEY(1, 2, KEY_NUMERIC_6), KEY(1, 3, KEY_UP), KEY(1, 4, KEY_FN_F2), KEY(2, 0, KEY_NUMERIC_7), KEY(2, 1, KEY_NUMERIC_8), KEY(2, 2, KEY_NUMERIC_9), KEY(2, 3, KEY_LEFT), KEY(2, 4, KEY_ENTER), KEY(3, 0, KEY_NUMERIC_STAR), KEY(3, 1, KEY_NUMERIC_0), KEY(3, 2, KEY_NUMERIC_POUND), KEY(3, 3, KEY_DOWN), KEY(3, 4, KEY_RIGHT), KEY(4, 0, KEY_FN_F3), KEY(4, 1, KEY_FN_F4), KEY(4, 2, KEY_MUTE), KEY(4, 3, KEY_HOME), KEY(4, 4, KEY_BACK), KEY(5, 0, KEY_VOLUMEDOWN), KEY(5, 1, KEY_VOLUMEUP), KEY(5, 2, KEY_F1), KEY(5, 3, KEY_F2), KEY(5, 4, KEY_F3), }; static const struct matrix_keymap_data keymap_data = { .keymap = keymap, .keymap_size = ARRAY_SIZE(keymap), }; static struct matrix_keypad_platform_data keypad_config = { .keymap_data = &keymap_data, .num_row_gpios = 6, .num_col_gpios = 5, .debounce_ms = 0, .active_low = 0, .no_autorepeat = 0, }; static void spi_select_device(int cs) { static int gpio; if (!gpio) { int ret; ret = gpio_request(EVM_SPI_CS_GPIO, "spi chipsel"); if (ret < 0) { pr_err("cannot open spi chipsel gpio\n"); gpio = -ENOSYS; return; } else { gpio = EVM_SPI_CS_GPIO; gpio_direction_output(gpio, 0); } } if (gpio < 0) return; return gpio_set_value(gpio, cs ? 1 : 0); } static struct ti_ssp_spi_data spi_master_data = { .num_cs = 2, .select = spi_select_device, .iosel = SSP_PIN_SEL(0, SSP_CLOCK) | SSP_PIN_SEL(1, SSP_DATA) | SSP_PIN_SEL(2, SSP_CHIPSEL) | SSP_PIN_SEL(3, SSP_IN) | SSP_INPUT_SEL(3), }; static struct ti_ssp_data ssp_config = { .out_clock = 250 * 1000, .dev_data = { [1] = { .dev_name = "ti-ssp-spi", .pdata = &spi_master_data, .pdata_size = sizeof(spi_master_data), }, }, }; static struct tnetv107x_device_info evm_device_info __initconst = { .serial_config = &serial_config, .mmc_config[1] = &mmc_config, .nand_config[0] = &nand_config, .keypad_config = &keypad_config, .ssp_config = &ssp_config, }; static struct spi_board_info spi_info[] __initconst = { }; static __init void tnetv107x_evm_board_init(void) { davinci_cfg_reg_list(sdio1_pins); davinci_cfg_reg_list(uart1_pins); davinci_cfg_reg_list(ssp_pins); tnetv107x_devices_init(&evm_device_info); spi_register_board_info(spi_info, ARRAY_SIZE(spi_info)); } #ifdef CONFIG_SERIAL_8250_CONSOLE static int __init tnetv107x_evm_console_init(void) { return add_preferred_console("ttyS", 0, "115200"); } console_initcall(tnetv107x_evm_console_init); #endif MACHINE_START(TNETV107X, "TNETV107X EVM") .atag_offset = 0x100, .map_io = tnetv107x_init, .init_irq = cp_intc_init, .timer = &davinci_timer, .init_machine = tnetv107x_evm_board_init, .dma_zone_size = SZ_128M, .restart = tnetv107x_restart, MACHINE_END
gpl-2.0
oostenveld/fieldtrip
external/dmlt/external/gpstuff/SuiteSparse/CHOLMOD/Core/t_cholmod_dense.c
34
7348
/* ========================================================================== */ /* === Core/t_cholmod_dense ================================================= */ /* ========================================================================== */ /* ----------------------------------------------------------------------------- * CHOLMOD/Core Module. Copyright (C) 2005-2006, * Univ. of Florida. Author: Timothy A. Davis * The CHOLMOD/Core Module is licensed under Version 2.1 of the GNU * Lesser General Public License. See lesser.txt for a text of the license. * CHOLMOD is also available under other licenses; contact authors for details. * http://www.cise.ufl.edu/research/sparse * -------------------------------------------------------------------------- */ /* Template routine for cholmod_dense. All xtypes supported, except that there * are no dense matrices with an xtype of pattern. */ #include "cholmod_template.h" /* ========================================================================== */ /* === t_cholmod_sparse_to_dense ============================================ */ /* ========================================================================== */ static cholmod_dense *TEMPLATE (cholmod_sparse_to_dense) ( /* ---- input ---- */ cholmod_sparse *A, /* matrix to copy */ /* --------------- */ cholmod_common *Common ) { double *Ax, *Xx, *Az, *Xz ; Int *Ap, *Ai, *Anz ; cholmod_dense *X ; Int i, j, p, pend, nrow, ncol, packed ; /* ---------------------------------------------------------------------- */ /* get inputs */ /* ---------------------------------------------------------------------- */ nrow = A->nrow ; ncol = A->ncol ; packed = A->packed ; Ap = A->p ; Ai = A->i ; Ax = A->x ; Az = A->z ; Anz = A->nz ; /* ---------------------------------------------------------------------- */ /* allocate result */ /* ---------------------------------------------------------------------- */ X = CHOLMOD(zeros) (nrow, ncol, XTYPE2, Common) ; if (Common->status < CHOLMOD_OK) { return (NULL) ; /* out of memory */ } Xx = X->x ; Xz = X->z ; /* ---------------------------------------------------------------------- */ /* copy into dense matrix */ /* ---------------------------------------------------------------------- */ if (A->stype < 0) { /* A is symmetric with lower stored, but both parts of X are present */ for (j = 0 ; j < ncol ; j++) { p = Ap [j] ; pend = (packed) ? (Ap [j+1]) : (p + Anz [j]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i >= j) { ASSIGN2 (Xx, Xz, i+j*nrow, Ax, Az, p) ; ASSIGN2_CONJ (Xx, Xz, j+i*nrow, Ax, Az, p) ; } } } } else if (A->stype > 0) { /* A is symmetric with upper stored, but both parts of X are present */ for (j = 0 ; j < ncol ; j++) { p = Ap [j] ; pend = (packed) ? (Ap [j+1]) : (p + Anz [j]) ; for ( ; p < pend ; p++) { i = Ai [p] ; if (i <= j) { ASSIGN2 (Xx, Xz, i+j*nrow, Ax, Az, p) ; ASSIGN2_CONJ (Xx, Xz, j+i*nrow, Ax, Az, p) ; } } } } else { /* both parts of A and X are present */ for (j = 0 ; j < ncol ; j++) { p = Ap [j] ; pend = (packed) ? (Ap [j+1]) : (p + Anz [j]) ; for ( ; p < pend ; p++) { i = Ai [p] ; ASSIGN2 (Xx, Xz, i+j*nrow, Ax, Az, p) ; } } } return (X) ; } #ifndef PATTERN /* There are no dense matrices of xtype CHOLMOD_PATTERN */ /* ========================================================================== */ /* === t_cholmod_dense_to_sparse ============================================ */ /* ========================================================================== */ static cholmod_sparse *TEMPLATE (cholmod_dense_to_sparse) ( /* ---- input ---- */ cholmod_dense *X, /* matrix to copy */ int values, /* TRUE if values to be copied, FALSE otherwise */ /* --------------- */ cholmod_common *Common ) { double *Xx, *Cx, *Xz, *Cz ; Int *Ci, *Cp ; cholmod_sparse *C ; Int i, j, p, d, nrow, ncol, nz ; /* ---------------------------------------------------------------------- */ /* get inputs */ /* ---------------------------------------------------------------------- */ nrow = X->nrow ; ncol = X->ncol ; d = X->d ; Xx = X->x ; Xz = X->z ; /* ---------------------------------------------------------------------- */ /* count the number of nonzeros in the result */ /* ---------------------------------------------------------------------- */ nz = 0 ; for (j = 0 ; j < ncol ; j++) { for (i = 0 ; i < nrow ; i++) { if (ENTRY_IS_NONZERO (Xx, Xz, i+j*d)) { nz++ ; } } } /* ---------------------------------------------------------------------- */ /* allocate the result C */ /* ---------------------------------------------------------------------- */ C = CHOLMOD(allocate_sparse) (nrow, ncol, nz, TRUE, TRUE, 0, values ? XTYPE : CHOLMOD_PATTERN, Common) ; if (Common->status < CHOLMOD_OK) { return (NULL) ; /* out of memory */ } Cp = C->p ; Ci = C->i ; Cx = C->x ; Cz = C->z ; /* ---------------------------------------------------------------------- */ /* copy the dense matrix X into the sparse matrix C */ /* ---------------------------------------------------------------------- */ p = 0 ; for (j = 0 ; j < ncol ; j++) { Cp [j] = p ; for (i = 0 ; i < nrow ; i++) { if (ENTRY_IS_NONZERO (Xx, Xz, i+j*d)) { Ci [p] = i ; if (values) { ASSIGN (Cx, Cz, p, Xx, Xz, i+j*d) ; } p++ ; } } } ASSERT (p == nz) ; Cp [ncol] = nz ; /* ---------------------------------------------------------------------- */ /* return result */ /* ---------------------------------------------------------------------- */ ASSERT (CHOLMOD(dump_sparse) (C, "C", Common) >= 0) ; return (C) ; } /* ========================================================================== */ /* === t_cholmod_copy_dense2 ================================================ */ /* ========================================================================== */ /* Y = X, where X and Y are both already allocated. */ static int TEMPLATE (cholmod_copy_dense2) ( /* ---- input ---- */ cholmod_dense *X, /* matrix to copy */ /* ---- output --- */ cholmod_dense *Y /* copy of matrix X */ ) { double *Xx, *Xz, *Yx, *Yz ; Int i, j, nrow, ncol, dy, dx ; /* ---------------------------------------------------------------------- */ /* get inputs */ /* ---------------------------------------------------------------------- */ Xx = X->x ; Xz = X->z ; Yx = Y->x ; Yz = Y->z ; dx = X->d ; dy = Y->d ; nrow = X->nrow ; ncol = X->ncol ; /* ---------------------------------------------------------------------- */ /* copy */ /* ---------------------------------------------------------------------- */ CLEAR (Yx, Yz, 0) ; for (j = 0 ; j < ncol ; j++) { for (i = 0 ; i < nrow ; i++) { ASSIGN (Yx, Yz, i+j*dy, Xx, Xz, i+j*dx) ; } } return (TRUE) ; } #endif #undef PATTERN #undef REAL #undef COMPLEX #undef ZOMPLEX
gpl-2.0
raumfeld/linux-am33xx
kernel/power/swap.c
34
38453
/* * linux/kernel/power/swap.c * * This file provides functions for reading the suspend image from * and writing it to a swap partition. * * Copyright (C) 1998,2001-2005 Pavel Machek <pavel@ucw.cz> * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> * Copyright (C) 2010-2012 Bojan Smojver <bojan@rexursive.com> * * This file is released under the GPLv2. * */ #define pr_fmt(fmt) "PM: " fmt #include <linux/module.h> #include <linux/file.h> #include <linux/delay.h> #include <linux/bitops.h> #include <linux/genhd.h> #include <linux/device.h> #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/swap.h> #include <linux/swapops.h> #include <linux/pm.h> #include <linux/slab.h> #include <linux/lzo.h> #include <linux/vmalloc.h> #include <linux/cpumask.h> #include <linux/atomic.h> #include <linux/kthread.h> #include <linux/crc32.h> #include <linux/ktime.h> #include "power.h" #define HIBERNATE_SIG "S1SUSPEND" /* * When reading an {un,}compressed image, we may restore pages in place, * in which case some architectures need these pages cleaning before they * can be executed. We don't know which pages these may be, so clean the lot. */ static bool clean_pages_on_read; static bool clean_pages_on_decompress; /* * The swap map is a data structure used for keeping track of each page * written to a swap partition. It consists of many swap_map_page * structures that contain each an array of MAP_PAGE_ENTRIES swap entries. * These structures are stored on the swap and linked together with the * help of the .next_swap member. * * The swap map is created during suspend. The swap map pages are * allocated and populated one at a time, so we only need one memory * page to set up the entire structure. * * During resume we pick up all swap_map_page structures into a list. */ #define MAP_PAGE_ENTRIES (PAGE_SIZE / sizeof(sector_t) - 1) /* * Number of free pages that are not high. */ static inline unsigned long low_free_pages(void) { return nr_free_pages() - nr_free_highpages(); } /* * Number of pages required to be kept free while writing the image. Always * half of all available low pages before the writing starts. */ static inline unsigned long reqd_free_pages(void) { return low_free_pages() / 2; } struct swap_map_page { sector_t entries[MAP_PAGE_ENTRIES]; sector_t next_swap; }; struct swap_map_page_list { struct swap_map_page *map; struct swap_map_page_list *next; }; /** * The swap_map_handle structure is used for handling swap in * a file-alike way */ struct swap_map_handle { struct swap_map_page *cur; struct swap_map_page_list *maps; sector_t cur_swap; sector_t first_sector; unsigned int k; unsigned long reqd_free_pages; u32 crc32; }; struct swsusp_header { char reserved[PAGE_SIZE - 20 - sizeof(sector_t) - sizeof(int) - sizeof(u32)]; u32 crc32; sector_t image; unsigned int flags; /* Flags to pass to the "boot" kernel */ char orig_sig[10]; char sig[10]; } __packed; static struct swsusp_header *swsusp_header; /** * The following functions are used for tracing the allocated * swap pages, so that they can be freed in case of an error. */ struct swsusp_extent { struct rb_node node; unsigned long start; unsigned long end; }; static struct rb_root swsusp_extents = RB_ROOT; static int swsusp_extents_insert(unsigned long swap_offset) { struct rb_node **new = &(swsusp_extents.rb_node); struct rb_node *parent = NULL; struct swsusp_extent *ext; /* Figure out where to put the new node */ while (*new) { ext = rb_entry(*new, struct swsusp_extent, node); parent = *new; if (swap_offset < ext->start) { /* Try to merge */ if (swap_offset == ext->start - 1) { ext->start--; return 0; } new = &((*new)->rb_left); } else if (swap_offset > ext->end) { /* Try to merge */ if (swap_offset == ext->end + 1) { ext->end++; return 0; } new = &((*new)->rb_right); } else { /* It already is in the tree */ return -EINVAL; } } /* Add the new node and rebalance the tree. */ ext = kzalloc(sizeof(struct swsusp_extent), GFP_KERNEL); if (!ext) return -ENOMEM; ext->start = swap_offset; ext->end = swap_offset; rb_link_node(&ext->node, parent, new); rb_insert_color(&ext->node, &swsusp_extents); return 0; } /** * alloc_swapdev_block - allocate a swap page and register that it has * been allocated, so that it can be freed in case of an error. */ sector_t alloc_swapdev_block(int swap) { unsigned long offset; offset = swp_offset(get_swap_page_of_type(swap)); if (offset) { if (swsusp_extents_insert(offset)) swap_free(swp_entry(swap, offset)); else return swapdev_block(swap, offset); } return 0; } /** * free_all_swap_pages - free swap pages allocated for saving image data. * It also frees the extents used to register which swap entries had been * allocated. */ void free_all_swap_pages(int swap) { struct rb_node *node; while ((node = swsusp_extents.rb_node)) { struct swsusp_extent *ext; unsigned long offset; ext = rb_entry(node, struct swsusp_extent, node); rb_erase(node, &swsusp_extents); for (offset = ext->start; offset <= ext->end; offset++) swap_free(swp_entry(swap, offset)); kfree(ext); } } int swsusp_swap_in_use(void) { return (swsusp_extents.rb_node != NULL); } /* * General things */ static unsigned short root_swap = 0xffff; static struct block_device *hib_resume_bdev; struct hib_bio_batch { atomic_t count; wait_queue_head_t wait; blk_status_t error; }; static void hib_init_batch(struct hib_bio_batch *hb) { atomic_set(&hb->count, 0); init_waitqueue_head(&hb->wait); hb->error = BLK_STS_OK; } static void hib_end_io(struct bio *bio) { struct hib_bio_batch *hb = bio->bi_private; struct page *page = bio_first_page_all(bio); if (bio->bi_status) { pr_alert("Read-error on swap-device (%u:%u:%Lu)\n", MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)), (unsigned long long)bio->bi_iter.bi_sector); } if (bio_data_dir(bio) == WRITE) put_page(page); else if (clean_pages_on_read) flush_icache_range((unsigned long)page_address(page), (unsigned long)page_address(page) + PAGE_SIZE); if (bio->bi_status && !hb->error) hb->error = bio->bi_status; if (atomic_dec_and_test(&hb->count)) wake_up(&hb->wait); bio_put(bio); } static int hib_submit_io(int op, int op_flags, pgoff_t page_off, void *addr, struct hib_bio_batch *hb) { struct page *page = virt_to_page(addr); struct bio *bio; int error = 0; bio = bio_alloc(__GFP_RECLAIM | __GFP_HIGH, 1); bio->bi_iter.bi_sector = page_off * (PAGE_SIZE >> 9); bio_set_dev(bio, hib_resume_bdev); bio_set_op_attrs(bio, op, op_flags); if (bio_add_page(bio, page, PAGE_SIZE, 0) < PAGE_SIZE) { pr_err("Adding page to bio failed at %llu\n", (unsigned long long)bio->bi_iter.bi_sector); bio_put(bio); return -EFAULT; } if (hb) { bio->bi_end_io = hib_end_io; bio->bi_private = hb; atomic_inc(&hb->count); submit_bio(bio); } else { error = submit_bio_wait(bio); bio_put(bio); } return error; } static blk_status_t hib_wait_io(struct hib_bio_batch *hb) { wait_event(hb->wait, atomic_read(&hb->count) == 0); return blk_status_to_errno(hb->error); } /* * Saving part */ static int mark_swapfiles(struct swap_map_handle *handle, unsigned int flags) { int error; hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, swsusp_header, NULL); if (!memcmp("SWAP-SPACE",swsusp_header->sig, 10) || !memcmp("SWAPSPACE2",swsusp_header->sig, 10)) { memcpy(swsusp_header->orig_sig,swsusp_header->sig, 10); memcpy(swsusp_header->sig, HIBERNATE_SIG, 10); swsusp_header->image = handle->first_sector; swsusp_header->flags = flags; if (flags & SF_CRC32_MODE) swsusp_header->crc32 = handle->crc32; error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, swsusp_resume_block, swsusp_header, NULL); } else { pr_err("Swap header not found!\n"); error = -ENODEV; } return error; } /** * swsusp_swap_check - check if the resume device is a swap device * and get its index (if so) * * This is called before saving image */ static int swsusp_swap_check(void) { int res; res = swap_type_of(swsusp_resume_device, swsusp_resume_block, &hib_resume_bdev); if (res < 0) return res; root_swap = res; res = blkdev_get(hib_resume_bdev, FMODE_WRITE, NULL); if (res) return res; res = set_blocksize(hib_resume_bdev, PAGE_SIZE); if (res < 0) blkdev_put(hib_resume_bdev, FMODE_WRITE); /* * Update the resume device to the one actually used, * so the test_resume mode can use it in case it is * invoked from hibernate() to test the snapshot. */ swsusp_resume_device = hib_resume_bdev->bd_dev; return res; } /** * write_page - Write one page to given swap location. * @buf: Address we're writing. * @offset: Offset of the swap page we're writing to. * @hb: bio completion batch */ static int write_page(void *buf, sector_t offset, struct hib_bio_batch *hb) { void *src; int ret; if (!offset) return -ENOSPC; if (hb) { src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY); if (src) { copy_page(src, buf); } else { ret = hib_wait_io(hb); /* Free pages */ if (ret) return ret; src = (void *)__get_free_page(__GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY); if (src) { copy_page(src, buf); } else { WARN_ON_ONCE(1); hb = NULL; /* Go synchronous */ src = buf; } } } else { src = buf; } return hib_submit_io(REQ_OP_WRITE, REQ_SYNC, offset, src, hb); } static void release_swap_writer(struct swap_map_handle *handle) { if (handle->cur) free_page((unsigned long)handle->cur); handle->cur = NULL; } static int get_swap_writer(struct swap_map_handle *handle) { int ret; ret = swsusp_swap_check(); if (ret) { if (ret != -ENOSPC) pr_err("Cannot find swap device, try swapon -a\n"); return ret; } handle->cur = (struct swap_map_page *)get_zeroed_page(GFP_KERNEL); if (!handle->cur) { ret = -ENOMEM; goto err_close; } handle->cur_swap = alloc_swapdev_block(root_swap); if (!handle->cur_swap) { ret = -ENOSPC; goto err_rel; } handle->k = 0; handle->reqd_free_pages = reqd_free_pages(); handle->first_sector = handle->cur_swap; return 0; err_rel: release_swap_writer(handle); err_close: swsusp_close(FMODE_WRITE); return ret; } static int swap_write_page(struct swap_map_handle *handle, void *buf, struct hib_bio_batch *hb) { int error = 0; sector_t offset; if (!handle->cur) return -EINVAL; offset = alloc_swapdev_block(root_swap); error = write_page(buf, offset, hb); if (error) return error; handle->cur->entries[handle->k++] = offset; if (handle->k >= MAP_PAGE_ENTRIES) { offset = alloc_swapdev_block(root_swap); if (!offset) return -ENOSPC; handle->cur->next_swap = offset; error = write_page(handle->cur, handle->cur_swap, hb); if (error) goto out; clear_page(handle->cur); handle->cur_swap = offset; handle->k = 0; if (hb && low_free_pages() <= handle->reqd_free_pages) { error = hib_wait_io(hb); if (error) goto out; /* * Recalculate the number of required free pages, to * make sure we never take more than half. */ handle->reqd_free_pages = reqd_free_pages(); } } out: return error; } static int flush_swap_writer(struct swap_map_handle *handle) { if (handle->cur && handle->cur_swap) return write_page(handle->cur, handle->cur_swap, NULL); else return -EINVAL; } static int swap_writer_finish(struct swap_map_handle *handle, unsigned int flags, int error) { if (!error) { flush_swap_writer(handle); pr_info("S"); error = mark_swapfiles(handle, flags); pr_cont("|\n"); } if (error) free_all_swap_pages(root_swap); release_swap_writer(handle); swsusp_close(FMODE_WRITE); return error; } /* We need to remember how much compressed data we need to read. */ #define LZO_HEADER sizeof(size_t) /* Number of pages/bytes we'll compress at one time. */ #define LZO_UNC_PAGES 32 #define LZO_UNC_SIZE (LZO_UNC_PAGES * PAGE_SIZE) /* Number of pages/bytes we need for compressed data (worst case). */ #define LZO_CMP_PAGES DIV_ROUND_UP(lzo1x_worst_compress(LZO_UNC_SIZE) + \ LZO_HEADER, PAGE_SIZE) #define LZO_CMP_SIZE (LZO_CMP_PAGES * PAGE_SIZE) /* Maximum number of threads for compression/decompression. */ #define LZO_THREADS 3 /* Minimum/maximum number of pages for read buffering. */ #define LZO_MIN_RD_PAGES 1024 #define LZO_MAX_RD_PAGES 8192 /** * save_image - save the suspend image data */ static int save_image(struct swap_map_handle *handle, struct snapshot_handle *snapshot, unsigned int nr_to_write) { unsigned int m; int ret; int nr_pages; int err2; struct hib_bio_batch hb; ktime_t start; ktime_t stop; hib_init_batch(&hb); pr_info("Saving image data pages (%u pages)...\n", nr_to_write); m = nr_to_write / 10; if (!m) m = 1; nr_pages = 0; start = ktime_get(); while (1) { ret = snapshot_read_next(snapshot); if (ret <= 0) break; ret = swap_write_page(handle, data_of(*snapshot), &hb); if (ret) break; if (!(nr_pages % m)) pr_info("Image saving progress: %3d%%\n", nr_pages / m * 10); nr_pages++; } err2 = hib_wait_io(&hb); stop = ktime_get(); if (!ret) ret = err2; if (!ret) pr_info("Image saving done\n"); swsusp_show_speed(start, stop, nr_to_write, "Wrote"); return ret; } /** * Structure used for CRC32. */ struct crc_data { struct task_struct *thr; /* thread */ atomic_t ready; /* ready to start flag */ atomic_t stop; /* ready to stop flag */ unsigned run_threads; /* nr current threads */ wait_queue_head_t go; /* start crc update */ wait_queue_head_t done; /* crc update done */ u32 *crc32; /* points to handle's crc32 */ size_t *unc_len[LZO_THREADS]; /* uncompressed lengths */ unsigned char *unc[LZO_THREADS]; /* uncompressed data */ }; /** * CRC32 update function that runs in its own thread. */ static int crc32_threadfn(void *data) { struct crc_data *d = data; unsigned i; while (1) { wait_event(d->go, atomic_read(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; atomic_set(&d->stop, 1); wake_up(&d->done); break; } atomic_set(&d->ready, 0); for (i = 0; i < d->run_threads; i++) *d->crc32 = crc32_le(*d->crc32, d->unc[i], *d->unc_len[i]); atomic_set(&d->stop, 1); wake_up(&d->done); } return 0; } /** * Structure used for LZO data compression. */ struct cmp_data { struct task_struct *thr; /* thread */ atomic_t ready; /* ready to start flag */ atomic_t stop; /* ready to stop flag */ int ret; /* return code */ wait_queue_head_t go; /* start compression */ wait_queue_head_t done; /* compression done */ size_t unc_len; /* uncompressed length */ size_t cmp_len; /* compressed length */ unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ unsigned char wrk[LZO1X_1_MEM_COMPRESS]; /* compression workspace */ }; /** * Compression function that runs in its own thread. */ static int lzo_compress_threadfn(void *data) { struct cmp_data *d = data; while (1) { wait_event(d->go, atomic_read(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; d->ret = -1; atomic_set(&d->stop, 1); wake_up(&d->done); break; } atomic_set(&d->ready, 0); d->ret = lzo1x_1_compress(d->unc, d->unc_len, d->cmp + LZO_HEADER, &d->cmp_len, d->wrk); atomic_set(&d->stop, 1); wake_up(&d->done); } return 0; } /** * save_image_lzo - Save the suspend image data compressed with LZO. * @handle: Swap map handle to use for saving the image. * @snapshot: Image to read data from. * @nr_to_write: Number of pages to save. */ static int save_image_lzo(struct swap_map_handle *handle, struct snapshot_handle *snapshot, unsigned int nr_to_write) { unsigned int m; int ret = 0; int nr_pages; int err2; struct hib_bio_batch hb; ktime_t start; ktime_t stop; size_t off; unsigned thr, run_threads, nr_threads; unsigned char *page = NULL; struct cmp_data *data = NULL; struct crc_data *crc = NULL; hib_init_batch(&hb); /* * We'll limit the number of threads for compression to limit memory * footprint. */ nr_threads = num_online_cpus() - 1; nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); page = (void *)__get_free_page(__GFP_RECLAIM | __GFP_HIGH); if (!page) { pr_err("Failed to allocate LZO page\n"); ret = -ENOMEM; goto out_clean; } data = vmalloc(sizeof(*data) * nr_threads); if (!data) { pr_err("Failed to allocate LZO data\n"); ret = -ENOMEM; goto out_clean; } for (thr = 0; thr < nr_threads; thr++) memset(&data[thr], 0, offsetof(struct cmp_data, go)); crc = kmalloc(sizeof(*crc), GFP_KERNEL); if (!crc) { pr_err("Failed to allocate crc\n"); ret = -ENOMEM; goto out_clean; } memset(crc, 0, offsetof(struct crc_data, go)); /* * Start the compression threads. */ for (thr = 0; thr < nr_threads; thr++) { init_waitqueue_head(&data[thr].go); init_waitqueue_head(&data[thr].done); data[thr].thr = kthread_run(lzo_compress_threadfn, &data[thr], "image_compress/%u", thr); if (IS_ERR(data[thr].thr)) { data[thr].thr = NULL; pr_err("Cannot start compression threads\n"); ret = -ENOMEM; goto out_clean; } } /* * Start the CRC32 thread. */ init_waitqueue_head(&crc->go); init_waitqueue_head(&crc->done); handle->crc32 = 0; crc->crc32 = &handle->crc32; for (thr = 0; thr < nr_threads; thr++) { crc->unc[thr] = data[thr].unc; crc->unc_len[thr] = &data[thr].unc_len; } crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); if (IS_ERR(crc->thr)) { crc->thr = NULL; pr_err("Cannot start CRC32 thread\n"); ret = -ENOMEM; goto out_clean; } /* * Adjust the number of required free pages after all allocations have * been done. We don't want to run out of pages when writing. */ handle->reqd_free_pages = reqd_free_pages(); pr_info("Using %u thread(s) for compression\n", nr_threads); pr_info("Compressing and saving image data (%u pages)...\n", nr_to_write); m = nr_to_write / 10; if (!m) m = 1; nr_pages = 0; start = ktime_get(); for (;;) { for (thr = 0; thr < nr_threads; thr++) { for (off = 0; off < LZO_UNC_SIZE; off += PAGE_SIZE) { ret = snapshot_read_next(snapshot); if (ret < 0) goto out_finish; if (!ret) break; memcpy(data[thr].unc + off, data_of(*snapshot), PAGE_SIZE); if (!(nr_pages % m)) pr_info("Image saving progress: %3d%%\n", nr_pages / m * 10); nr_pages++; } if (!off) break; data[thr].unc_len = off; atomic_set(&data[thr].ready, 1); wake_up(&data[thr].go); } if (!thr) break; crc->run_threads = thr; atomic_set(&crc->ready, 1); wake_up(&crc->go); for (run_threads = thr, thr = 0; thr < run_threads; thr++) { wait_event(data[thr].done, atomic_read(&data[thr].stop)); atomic_set(&data[thr].stop, 0); ret = data[thr].ret; if (ret < 0) { pr_err("LZO compression failed\n"); goto out_finish; } if (unlikely(!data[thr].cmp_len || data[thr].cmp_len > lzo1x_worst_compress(data[thr].unc_len))) { pr_err("Invalid LZO compressed length\n"); ret = -1; goto out_finish; } *(size_t *)data[thr].cmp = data[thr].cmp_len; /* * Given we are writing one page at a time to disk, we * copy that much from the buffer, although the last * bit will likely be smaller than full page. This is * OK - we saved the length of the compressed data, so * any garbage at the end will be discarded when we * read it. */ for (off = 0; off < LZO_HEADER + data[thr].cmp_len; off += PAGE_SIZE) { memcpy(page, data[thr].cmp + off, PAGE_SIZE); ret = swap_write_page(handle, page, &hb); if (ret) goto out_finish; } } wait_event(crc->done, atomic_read(&crc->stop)); atomic_set(&crc->stop, 0); } out_finish: err2 = hib_wait_io(&hb); stop = ktime_get(); if (!ret) ret = err2; if (!ret) pr_info("Image saving done\n"); swsusp_show_speed(start, stop, nr_to_write, "Wrote"); out_clean: if (crc) { if (crc->thr) kthread_stop(crc->thr); kfree(crc); } if (data) { for (thr = 0; thr < nr_threads; thr++) if (data[thr].thr) kthread_stop(data[thr].thr); vfree(data); } if (page) free_page((unsigned long)page); return ret; } /** * enough_swap - Make sure we have enough swap to save the image. * * Returns TRUE or FALSE after checking the total amount of swap * space avaiable from the resume partition. */ static int enough_swap(unsigned int nr_pages) { unsigned int free_swap = count_swap_pages(root_swap, 1); unsigned int required; pr_debug("Free swap pages: %u\n", free_swap); required = PAGES_FOR_IO + nr_pages; return free_swap > required; } /** * swsusp_write - Write entire image and metadata. * @flags: flags to pass to the "boot" kernel in the image header * * It is important _NOT_ to umount filesystems at this point. We want * them synced (in case something goes wrong) but we DO not want to mark * filesystem clean: it is not. (And it does not matter, if we resume * correctly, we'll mark system clean, anyway.) */ int swsusp_write(unsigned int flags) { struct swap_map_handle handle; struct snapshot_handle snapshot; struct swsusp_info *header; unsigned long pages; int error; pages = snapshot_get_image_size(); error = get_swap_writer(&handle); if (error) { pr_err("Cannot get swap writer\n"); return error; } if (flags & SF_NOCOMPRESS_MODE) { if (!enough_swap(pages)) { pr_err("Not enough free swap\n"); error = -ENOSPC; goto out_finish; } } memset(&snapshot, 0, sizeof(struct snapshot_handle)); error = snapshot_read_next(&snapshot); if (error < PAGE_SIZE) { if (error >= 0) error = -EFAULT; goto out_finish; } header = (struct swsusp_info *)data_of(snapshot); error = swap_write_page(&handle, header, NULL); if (!error) { error = (flags & SF_NOCOMPRESS_MODE) ? save_image(&handle, &snapshot, pages - 1) : save_image_lzo(&handle, &snapshot, pages - 1); } out_finish: error = swap_writer_finish(&handle, flags, error); return error; } /** * The following functions allow us to read data using a swap map * in a file-alike way */ static void release_swap_reader(struct swap_map_handle *handle) { struct swap_map_page_list *tmp; while (handle->maps) { if (handle->maps->map) free_page((unsigned long)handle->maps->map); tmp = handle->maps; handle->maps = handle->maps->next; kfree(tmp); } handle->cur = NULL; } static int get_swap_reader(struct swap_map_handle *handle, unsigned int *flags_p) { int error; struct swap_map_page_list *tmp, *last; sector_t offset; *flags_p = swsusp_header->flags; if (!swsusp_header->image) /* how can this happen? */ return -EINVAL; handle->cur = NULL; last = handle->maps = NULL; offset = swsusp_header->image; while (offset) { tmp = kmalloc(sizeof(*handle->maps), GFP_KERNEL); if (!tmp) { release_swap_reader(handle); return -ENOMEM; } memset(tmp, 0, sizeof(*tmp)); if (!handle->maps) handle->maps = tmp; if (last) last->next = tmp; last = tmp; tmp->map = (struct swap_map_page *) __get_free_page(__GFP_RECLAIM | __GFP_HIGH); if (!tmp->map) { release_swap_reader(handle); return -ENOMEM; } error = hib_submit_io(REQ_OP_READ, 0, offset, tmp->map, NULL); if (error) { release_swap_reader(handle); return error; } offset = tmp->map->next_swap; } handle->k = 0; handle->cur = handle->maps->map; return 0; } static int swap_read_page(struct swap_map_handle *handle, void *buf, struct hib_bio_batch *hb) { sector_t offset; int error; struct swap_map_page_list *tmp; if (!handle->cur) return -EINVAL; offset = handle->cur->entries[handle->k]; if (!offset) return -EFAULT; error = hib_submit_io(REQ_OP_READ, 0, offset, buf, hb); if (error) return error; if (++handle->k >= MAP_PAGE_ENTRIES) { handle->k = 0; free_page((unsigned long)handle->maps->map); tmp = handle->maps; handle->maps = handle->maps->next; kfree(tmp); if (!handle->maps) release_swap_reader(handle); else handle->cur = handle->maps->map; } return error; } static int swap_reader_finish(struct swap_map_handle *handle) { release_swap_reader(handle); return 0; } /** * load_image - load the image using the swap map handle * @handle and the snapshot handle @snapshot * (assume there are @nr_pages pages to load) */ static int load_image(struct swap_map_handle *handle, struct snapshot_handle *snapshot, unsigned int nr_to_read) { unsigned int m; int ret = 0; ktime_t start; ktime_t stop; struct hib_bio_batch hb; int err2; unsigned nr_pages; hib_init_batch(&hb); clean_pages_on_read = true; pr_info("Loading image data pages (%u pages)...\n", nr_to_read); m = nr_to_read / 10; if (!m) m = 1; nr_pages = 0; start = ktime_get(); for ( ; ; ) { ret = snapshot_write_next(snapshot); if (ret <= 0) break; ret = swap_read_page(handle, data_of(*snapshot), &hb); if (ret) break; if (snapshot->sync_read) ret = hib_wait_io(&hb); if (ret) break; if (!(nr_pages % m)) pr_info("Image loading progress: %3d%%\n", nr_pages / m * 10); nr_pages++; } err2 = hib_wait_io(&hb); stop = ktime_get(); if (!ret) ret = err2; if (!ret) { pr_info("Image loading done\n"); snapshot_write_finalize(snapshot); if (!snapshot_image_loaded(snapshot)) ret = -ENODATA; } swsusp_show_speed(start, stop, nr_to_read, "Read"); return ret; } /** * Structure used for LZO data decompression. */ struct dec_data { struct task_struct *thr; /* thread */ atomic_t ready; /* ready to start flag */ atomic_t stop; /* ready to stop flag */ int ret; /* return code */ wait_queue_head_t go; /* start decompression */ wait_queue_head_t done; /* decompression done */ size_t unc_len; /* uncompressed length */ size_t cmp_len; /* compressed length */ unsigned char unc[LZO_UNC_SIZE]; /* uncompressed buffer */ unsigned char cmp[LZO_CMP_SIZE]; /* compressed buffer */ }; /** * Deompression function that runs in its own thread. */ static int lzo_decompress_threadfn(void *data) { struct dec_data *d = data; while (1) { wait_event(d->go, atomic_read(&d->ready) || kthread_should_stop()); if (kthread_should_stop()) { d->thr = NULL; d->ret = -1; atomic_set(&d->stop, 1); wake_up(&d->done); break; } atomic_set(&d->ready, 0); d->unc_len = LZO_UNC_SIZE; d->ret = lzo1x_decompress_safe(d->cmp + LZO_HEADER, d->cmp_len, d->unc, &d->unc_len); if (clean_pages_on_decompress) flush_icache_range((unsigned long)d->unc, (unsigned long)d->unc + d->unc_len); atomic_set(&d->stop, 1); wake_up(&d->done); } return 0; } /** * load_image_lzo - Load compressed image data and decompress them with LZO. * @handle: Swap map handle to use for loading data. * @snapshot: Image to copy uncompressed data into. * @nr_to_read: Number of pages to load. */ static int load_image_lzo(struct swap_map_handle *handle, struct snapshot_handle *snapshot, unsigned int nr_to_read) { unsigned int m; int ret = 0; int eof = 0; struct hib_bio_batch hb; ktime_t start; ktime_t stop; unsigned nr_pages; size_t off; unsigned i, thr, run_threads, nr_threads; unsigned ring = 0, pg = 0, ring_size = 0, have = 0, want, need, asked = 0; unsigned long read_pages = 0; unsigned char **page = NULL; struct dec_data *data = NULL; struct crc_data *crc = NULL; hib_init_batch(&hb); /* * We'll limit the number of threads for decompression to limit memory * footprint. */ nr_threads = num_online_cpus() - 1; nr_threads = clamp_val(nr_threads, 1, LZO_THREADS); page = vmalloc(sizeof(*page) * LZO_MAX_RD_PAGES); if (!page) { pr_err("Failed to allocate LZO page\n"); ret = -ENOMEM; goto out_clean; } data = vmalloc(sizeof(*data) * nr_threads); if (!data) { pr_err("Failed to allocate LZO data\n"); ret = -ENOMEM; goto out_clean; } for (thr = 0; thr < nr_threads; thr++) memset(&data[thr], 0, offsetof(struct dec_data, go)); crc = kmalloc(sizeof(*crc), GFP_KERNEL); if (!crc) { pr_err("Failed to allocate crc\n"); ret = -ENOMEM; goto out_clean; } memset(crc, 0, offsetof(struct crc_data, go)); clean_pages_on_decompress = true; /* * Start the decompression threads. */ for (thr = 0; thr < nr_threads; thr++) { init_waitqueue_head(&data[thr].go); init_waitqueue_head(&data[thr].done); data[thr].thr = kthread_run(lzo_decompress_threadfn, &data[thr], "image_decompress/%u", thr); if (IS_ERR(data[thr].thr)) { data[thr].thr = NULL; pr_err("Cannot start decompression threads\n"); ret = -ENOMEM; goto out_clean; } } /* * Start the CRC32 thread. */ init_waitqueue_head(&crc->go); init_waitqueue_head(&crc->done); handle->crc32 = 0; crc->crc32 = &handle->crc32; for (thr = 0; thr < nr_threads; thr++) { crc->unc[thr] = data[thr].unc; crc->unc_len[thr] = &data[thr].unc_len; } crc->thr = kthread_run(crc32_threadfn, crc, "image_crc32"); if (IS_ERR(crc->thr)) { crc->thr = NULL; pr_err("Cannot start CRC32 thread\n"); ret = -ENOMEM; goto out_clean; } /* * Set the number of pages for read buffering. * This is complete guesswork, because we'll only know the real * picture once prepare_image() is called, which is much later on * during the image load phase. We'll assume the worst case and * say that none of the image pages are from high memory. */ if (low_free_pages() > snapshot_get_image_size()) read_pages = (low_free_pages() - snapshot_get_image_size()) / 2; read_pages = clamp_val(read_pages, LZO_MIN_RD_PAGES, LZO_MAX_RD_PAGES); for (i = 0; i < read_pages; i++) { page[i] = (void *)__get_free_page(i < LZO_CMP_PAGES ? __GFP_RECLAIM | __GFP_HIGH : __GFP_RECLAIM | __GFP_NOWARN | __GFP_NORETRY); if (!page[i]) { if (i < LZO_CMP_PAGES) { ring_size = i; pr_err("Failed to allocate LZO pages\n"); ret = -ENOMEM; goto out_clean; } else { break; } } } want = ring_size = i; pr_info("Using %u thread(s) for decompression\n", nr_threads); pr_info("Loading and decompressing image data (%u pages)...\n", nr_to_read); m = nr_to_read / 10; if (!m) m = 1; nr_pages = 0; start = ktime_get(); ret = snapshot_write_next(snapshot); if (ret <= 0) goto out_finish; for(;;) { for (i = 0; !eof && i < want; i++) { ret = swap_read_page(handle, page[ring], &hb); if (ret) { /* * On real read error, finish. On end of data, * set EOF flag and just exit the read loop. */ if (handle->cur && handle->cur->entries[handle->k]) { goto out_finish; } else { eof = 1; break; } } if (++ring >= ring_size) ring = 0; } asked += i; want -= i; /* * We are out of data, wait for some more. */ if (!have) { if (!asked) break; ret = hib_wait_io(&hb); if (ret) goto out_finish; have += asked; asked = 0; if (eof) eof = 2; } if (crc->run_threads) { wait_event(crc->done, atomic_read(&crc->stop)); atomic_set(&crc->stop, 0); crc->run_threads = 0; } for (thr = 0; have && thr < nr_threads; thr++) { data[thr].cmp_len = *(size_t *)page[pg]; if (unlikely(!data[thr].cmp_len || data[thr].cmp_len > lzo1x_worst_compress(LZO_UNC_SIZE))) { pr_err("Invalid LZO compressed length\n"); ret = -1; goto out_finish; } need = DIV_ROUND_UP(data[thr].cmp_len + LZO_HEADER, PAGE_SIZE); if (need > have) { if (eof > 1) { ret = -1; goto out_finish; } break; } for (off = 0; off < LZO_HEADER + data[thr].cmp_len; off += PAGE_SIZE) { memcpy(data[thr].cmp + off, page[pg], PAGE_SIZE); have--; want++; if (++pg >= ring_size) pg = 0; } atomic_set(&data[thr].ready, 1); wake_up(&data[thr].go); } /* * Wait for more data while we are decompressing. */ if (have < LZO_CMP_PAGES && asked) { ret = hib_wait_io(&hb); if (ret) goto out_finish; have += asked; asked = 0; if (eof) eof = 2; } for (run_threads = thr, thr = 0; thr < run_threads; thr++) { wait_event(data[thr].done, atomic_read(&data[thr].stop)); atomic_set(&data[thr].stop, 0); ret = data[thr].ret; if (ret < 0) { pr_err("LZO decompression failed\n"); goto out_finish; } if (unlikely(!data[thr].unc_len || data[thr].unc_len > LZO_UNC_SIZE || data[thr].unc_len & (PAGE_SIZE - 1))) { pr_err("Invalid LZO uncompressed length\n"); ret = -1; goto out_finish; } for (off = 0; off < data[thr].unc_len; off += PAGE_SIZE) { memcpy(data_of(*snapshot), data[thr].unc + off, PAGE_SIZE); if (!(nr_pages % m)) pr_info("Image loading progress: %3d%%\n", nr_pages / m * 10); nr_pages++; ret = snapshot_write_next(snapshot); if (ret <= 0) { crc->run_threads = thr + 1; atomic_set(&crc->ready, 1); wake_up(&crc->go); goto out_finish; } } } crc->run_threads = thr; atomic_set(&crc->ready, 1); wake_up(&crc->go); } out_finish: if (crc->run_threads) { wait_event(crc->done, atomic_read(&crc->stop)); atomic_set(&crc->stop, 0); } stop = ktime_get(); if (!ret) { pr_info("Image loading done\n"); snapshot_write_finalize(snapshot); if (!snapshot_image_loaded(snapshot)) ret = -ENODATA; if (!ret) { if (swsusp_header->flags & SF_CRC32_MODE) { if(handle->crc32 != swsusp_header->crc32) { pr_err("Invalid image CRC32!\n"); ret = -ENODATA; } } } } swsusp_show_speed(start, stop, nr_to_read, "Read"); out_clean: for (i = 0; i < ring_size; i++) free_page((unsigned long)page[i]); if (crc) { if (crc->thr) kthread_stop(crc->thr); kfree(crc); } if (data) { for (thr = 0; thr < nr_threads; thr++) if (data[thr].thr) kthread_stop(data[thr].thr); vfree(data); } vfree(page); return ret; } /** * swsusp_read - read the hibernation image. * @flags_p: flags passed by the "frozen" kernel in the image header should * be written into this memory location */ int swsusp_read(unsigned int *flags_p) { int error; struct swap_map_handle handle; struct snapshot_handle snapshot; struct swsusp_info *header; memset(&snapshot, 0, sizeof(struct snapshot_handle)); error = snapshot_write_next(&snapshot); if (error < PAGE_SIZE) return error < 0 ? error : -EFAULT; header = (struct swsusp_info *)data_of(snapshot); error = get_swap_reader(&handle, flags_p); if (error) goto end; if (!error) error = swap_read_page(&handle, header, NULL); if (!error) { error = (*flags_p & SF_NOCOMPRESS_MODE) ? load_image(&handle, &snapshot, header->pages - 1) : load_image_lzo(&handle, &snapshot, header->pages - 1); } swap_reader_finish(&handle); end: if (!error) pr_debug("Image successfully loaded\n"); else pr_debug("Error %d resuming\n", error); return error; } /** * swsusp_check - Check for swsusp signature in the resume device */ int swsusp_check(void) { int error; hib_resume_bdev = blkdev_get_by_dev(swsusp_resume_device, FMODE_READ, NULL); if (!IS_ERR(hib_resume_bdev)) { set_blocksize(hib_resume_bdev, PAGE_SIZE); clear_page(swsusp_header); error = hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, swsusp_header, NULL); if (error) goto put; if (!memcmp(HIBERNATE_SIG, swsusp_header->sig, 10)) { memcpy(swsusp_header->sig, swsusp_header->orig_sig, 10); /* Reset swap signature now */ error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, swsusp_resume_block, swsusp_header, NULL); } else { error = -EINVAL; } put: if (error) blkdev_put(hib_resume_bdev, FMODE_READ); else pr_debug("Image signature found, resuming\n"); } else { error = PTR_ERR(hib_resume_bdev); } if (error) pr_debug("Image not found (code %d)\n", error); return error; } /** * swsusp_close - close swap device. */ void swsusp_close(fmode_t mode) { if (IS_ERR(hib_resume_bdev)) { pr_debug("Image device not initialised\n"); return; } blkdev_put(hib_resume_bdev, mode); } /** * swsusp_unmark - Unmark swsusp signature in the resume device */ #ifdef CONFIG_SUSPEND int swsusp_unmark(void) { int error; hib_submit_io(REQ_OP_READ, 0, swsusp_resume_block, swsusp_header, NULL); if (!memcmp(HIBERNATE_SIG,swsusp_header->sig, 10)) { memcpy(swsusp_header->sig,swsusp_header->orig_sig, 10); error = hib_submit_io(REQ_OP_WRITE, REQ_SYNC, swsusp_resume_block, swsusp_header, NULL); } else { pr_err("Cannot find swsusp signature!\n"); error = -ENODEV; } /* * We just returned from suspend, we don't need the image any more. */ free_all_swap_pages(root_swap); return error; } #endif static int swsusp_header_init(void) { swsusp_header = (struct swsusp_header*) __get_free_page(GFP_KERNEL); if (!swsusp_header) panic("Could not allocate memory for swsusp_header\n"); return 0; } core_initcall(swsusp_header_init);
gpl-2.0
santod/android_kernel_htc_m8
drivers/net/wireless/b43/tables_nphy.c
34
135842
/* Broadcom B43 wireless driver IEEE 802.11n PHY data tables Copyright (c) 2008 Michael Buesch <m@bues.ch> Copyright (c) 2010 Rafał Miłecki <zajec5@gmail.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; see the file COPYING. If not, write to the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor, Boston, MA 02110-1301, USA. */ #include "b43.h" #include "tables_nphy.h" #include "phy_common.h" #include "phy_n.h" static const u8 b43_ntab_adjustpower0[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u8 b43_ntab_adjustpower1[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u16 b43_ntab_bdi[] = { 0x0070, 0x0126, 0x012C, 0x0246, 0x048D, 0x04D2, }; static const u32 b43_ntab_channelest[] = { 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, }; static const u8 b43_ntab_estimatepowerlt0[] = { 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, }; static const u8 b43_ntab_estimatepowerlt1[] = { 0x50, 0x4F, 0x4E, 0x4D, 0x4C, 0x4B, 0x4A, 0x49, 0x48, 0x47, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x3F, 0x3E, 0x3D, 0x3C, 0x3B, 0x3A, 0x39, 0x38, 0x37, 0x36, 0x35, 0x34, 0x33, 0x32, 0x31, 0x30, 0x2F, 0x2E, 0x2D, 0x2C, 0x2B, 0x2A, 0x29, 0x28, 0x27, 0x26, 0x25, 0x24, 0x23, 0x22, 0x21, 0x20, 0x1F, 0x1E, 0x1D, 0x1C, 0x1B, 0x1A, 0x19, 0x18, 0x17, 0x16, 0x15, 0x14, 0x13, 0x12, 0x11, }; static const u8 b43_ntab_framelookup[] = { 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, 0x0A, 0x0C, 0x1C, 0x1C, 0x0B, 0x0D, 0x1E, 0x1E, 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1A, 0x1A, 0x0E, 0x10, 0x20, 0x28, 0x0F, 0x11, 0x22, 0x2A, }; static const u32 b43_ntab_framestruct[] = { 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 0x09804506, 0x00100030, 0x09804507, 0x00100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004A0C, 0x00100004, 0x01000A0D, 0x00100024, 0x0980450E, 0x00100034, 0x0980450F, 0x00100034, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000A04, 0x00100000, 0x11008A05, 0x00100020, 0x1980C506, 0x00100030, 0x21810506, 0x00100030, 0x21810506, 0x00100030, 0x01800504, 0x00100030, 0x11808505, 0x00100030, 0x29814507, 0x01100030, 0x00000A04, 0x00100000, 0x11008A05, 0x00100020, 0x21810506, 0x00100030, 0x21810506, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, 0x1980C50E, 0x00100038, 0x2181050E, 0x00100038, 0x2181050E, 0x00100038, 0x0180050C, 0x00100038, 0x1180850D, 0x00100038, 0x2981450F, 0x01100038, 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, 0x2181050E, 0x00100038, 0x2181050E, 0x00100038, 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 0x1980C506, 0x00100030, 0x1980C506, 0x00100030, 0x11808504, 0x00100030, 0x3981CA05, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x10008A04, 0x00100000, 0x3981CA05, 0x00100030, 0x1980C506, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004A0C, 0x00100008, 0x01000A0D, 0x00100028, 0x1980C50E, 0x00100038, 0x1980C50E, 0x00100038, 0x1180850C, 0x00100038, 0x3981CA0D, 0x00100038, 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 0x10008A0C, 0x00100008, 0x3981CA0D, 0x00100038, 0x1980C50E, 0x00100038, 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x02001405, 0x00100040, 0x0B004A06, 0x01900060, 0x13008A06, 0x01900060, 0x13008A06, 0x01900060, 0x43020A04, 0x00100060, 0x1B00CA05, 0x00100060, 0x23010A07, 0x01500060, 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, 0x13008A06, 0x01900060, 0x13008A06, 0x01900060, 0x23010A07, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x00100010, 0x0200140D, 0x00100050, 0x0B004A0E, 0x01900070, 0x13008A0E, 0x01900070, 0x13008A0E, 0x01900070, 0x43020A0C, 0x00100070, 0x1B00CA0D, 0x00100070, 0x23010A0F, 0x01500070, 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, 0x13008A0E, 0x01900070, 0x13008A0E, 0x01900070, 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x50029404, 0x00100000, 0x32019405, 0x00100040, 0x0B004A06, 0x01900060, 0x0B004A06, 0x01900060, 0x5B02CA04, 0x00100060, 0x3B01D405, 0x00100060, 0x23010A07, 0x01500060, 0x00000000, 0x00000000, 0x5802D404, 0x00100000, 0x3B01D405, 0x00100060, 0x0B004A06, 0x01900060, 0x23010A07, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x5002940C, 0x00100010, 0x3201940D, 0x00100050, 0x0B004A0E, 0x01900070, 0x0B004A0E, 0x01900070, 0x5B02CA0C, 0x00100070, 0x3B01D40D, 0x00100070, 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 0x5802D40C, 0x00100010, 0x3B01D40D, 0x00100070, 0x0B004A0E, 0x01900070, 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x000F4800, 0x62031405, 0x00100040, 0x53028A06, 0x01900060, 0x53028A07, 0x01900060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x000F4808, 0x6203140D, 0x00100048, 0x53028A0E, 0x01900068, 0x53028A0F, 0x01900068, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000A0C, 0x00100004, 0x11008A0D, 0x00100024, 0x1980C50E, 0x00100034, 0x2181050E, 0x00100034, 0x2181050E, 0x00100034, 0x0180050C, 0x00100038, 0x1180850D, 0x00100038, 0x1181850D, 0x00100038, 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000A0C, 0x00100008, 0x11008A0D, 0x00100028, 0x2181050E, 0x00100038, 0x2181050E, 0x00100038, 0x1181850D, 0x00100038, 0x2981450F, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004A04, 0x00100000, 0x01000A05, 0x00100020, 0x0180C506, 0x00100030, 0x0180C506, 0x00100030, 0x2180C50C, 0x00100030, 0x49820A0D, 0x0016A130, 0x41824A0D, 0x0016A130, 0x2981450F, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x2000CA0C, 0x00100000, 0x49820A0D, 0x0016A130, 0x1980C50E, 0x00100030, 0x41824A0D, 0x0016A130, 0x2981450F, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x00100008, 0x0200140D, 0x00100048, 0x0B004A0E, 0x01900068, 0x13008A0E, 0x01900068, 0x13008A0E, 0x01900068, 0x43020A0C, 0x00100070, 0x1B00CA0D, 0x00100070, 0x1B014A0D, 0x00100070, 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, 0x13008A0E, 0x01900070, 0x13008A0E, 0x01900070, 0x1B014A0D, 0x00100070, 0x23010A0F, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x50029404, 0x00100000, 0x32019405, 0x00100040, 0x03004A06, 0x01900060, 0x03004A06, 0x01900060, 0x6B030A0C, 0x00100060, 0x4B02140D, 0x0016A160, 0x4302540D, 0x0016A160, 0x23010A0F, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x6B03140C, 0x00100060, 0x4B02140D, 0x0016A160, 0x0B004A0E, 0x01900060, 0x4302540D, 0x0016A160, 0x23010A0F, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, 0x53028A06, 0x01900060, 0x5B02CA06, 0x01900060, 0x5B02CA06, 0x01900060, 0x43020A04, 0x00100060, 0x1B00CA05, 0x00100060, 0x53028A07, 0x0190C060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, 0x53028A0E, 0x01900070, 0x5B02CA0E, 0x01900070, 0x5B02CA0E, 0x01900070, 0x43020A0C, 0x00100070, 0x1B00CA0D, 0x00100070, 0x53028A0F, 0x0190C070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x1A00D405, 0x00100040, 0x5B02CA06, 0x01900060, 0x5B02CA06, 0x01900060, 0x53028A07, 0x0190C060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140C, 0x00100010, 0x1A00D40D, 0x00100050, 0x5B02CA0E, 0x01900070, 0x5B02CA0E, 0x01900070, 0x53028A0F, 0x0190C070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_gainctl0[] = { 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E, 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42, 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B, 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34, 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E, 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38, 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32, 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44, 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D, 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36, 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40, 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39, 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33, 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D, 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E, 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38, 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42, 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B, 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34, 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44, 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D, 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36, 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30, 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B, 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26, 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22, 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E, 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B, 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00, }; static const u32 b43_ntab_gainctl1[] = { 0x03CC2B44, 0x03CC2B42, 0x03CC2B40, 0x03CC2B3E, 0x03CC2B3D, 0x03CC2B3B, 0x03C82B44, 0x03C82B42, 0x03C82B40, 0x03C82B3E, 0x03C82B3D, 0x03C82B3B, 0x03C82B39, 0x03C82B38, 0x03C82B36, 0x03C82B34, 0x03C42B44, 0x03C42B42, 0x03C42B40, 0x03C42B3E, 0x03C42B3D, 0x03C42B3B, 0x03C42B39, 0x03C42B38, 0x03C42B36, 0x03C42B34, 0x03C42B33, 0x03C42B32, 0x03C42B30, 0x03C42B2F, 0x03C42B2D, 0x03C02B44, 0x03C02B42, 0x03C02B40, 0x03C02B3E, 0x03C02B3D, 0x03C02B3B, 0x03C02B39, 0x03C02B38, 0x03C02B36, 0x03C02B34, 0x03B02B44, 0x03B02B42, 0x03B02B40, 0x03B02B3E, 0x03B02B3D, 0x03B02B3B, 0x03B02B39, 0x03B02B38, 0x03B02B36, 0x03B02B34, 0x03B02B33, 0x03B02B32, 0x03B02B30, 0x03B02B2F, 0x03B02B2D, 0x03A02B44, 0x03A02B42, 0x03A02B40, 0x03A02B3E, 0x03A02B3D, 0x03A02B3B, 0x03A02B39, 0x03A02B38, 0x03A02B36, 0x03A02B34, 0x03902B44, 0x03902B42, 0x03902B40, 0x03902B3E, 0x03902B3D, 0x03902B3B, 0x03902B39, 0x03902B38, 0x03902B36, 0x03902B34, 0x03902B33, 0x03902B32, 0x03902B30, 0x03802B44, 0x03802B42, 0x03802B40, 0x03802B3E, 0x03802B3D, 0x03802B3B, 0x03802B39, 0x03802B38, 0x03802B36, 0x03802B34, 0x03802B33, 0x03802B32, 0x03802B30, 0x03802B2F, 0x03802B2D, 0x03802B2C, 0x03802B2B, 0x03802B2A, 0x03802B29, 0x03802B27, 0x03802B26, 0x03802B25, 0x03802B24, 0x03802B23, 0x03802B22, 0x03802B21, 0x03802B20, 0x03802B1F, 0x03802B1E, 0x03802B1E, 0x03802B1D, 0x03802B1C, 0x03802B1B, 0x03802B1A, 0x03802B1A, 0x03802B19, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x03802B18, 0x00002B00, }; static const u32 b43_ntab_intlevel[] = { 0x00802070, 0x0671188D, 0x0A60192C, 0x0A300E46, 0x00C1188D, 0x080024D2, 0x00000070, }; static const u32 b43_ntab_iqlt0[] = { 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, }; static const u32 b43_ntab_iqlt1[] = { 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, 0x0000007F, }; static const u16 b43_ntab_loftlt0[] = { 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, }; static const u16 b43_ntab_loftlt1[] = { 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, 0x0000, 0x0101, 0x0002, 0x0103, }; static const u8 b43_ntab_mcs[] = { 0x00, 0x08, 0x0A, 0x10, 0x12, 0x19, 0x1A, 0x1C, 0x40, 0x48, 0x4A, 0x50, 0x52, 0x59, 0x5A, 0x5C, 0x80, 0x88, 0x8A, 0x90, 0x92, 0x99, 0x9A, 0x9C, 0xC0, 0xC8, 0xCA, 0xD0, 0xD2, 0xD9, 0xDA, 0xDC, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x02, 0x04, 0x08, 0x09, 0x0A, 0x0C, 0x10, 0x11, 0x12, 0x14, 0x18, 0x19, 0x1A, 0x1C, 0x20, 0x21, 0x22, 0x24, 0x40, 0x41, 0x42, 0x44, 0x48, 0x49, 0x4A, 0x4C, 0x50, 0x51, 0x52, 0x54, 0x58, 0x59, 0x5A, 0x5C, 0x60, 0x61, 0x62, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u32 b43_ntab_noisevar10[] = { 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, }; static const u32 b43_ntab_noisevar11[] = { 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, 0x020C020C, 0x0000014D, }; static const u16 b43_ntab_pilot[] = { 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0xFF08, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0x80D5, 0xFF0A, 0xFF82, 0xFFA0, 0xFF28, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xFF82, 0xFFA0, 0xFF28, 0xFF0A, 0xFFFF, 0xFFFF, 0xFFFF, 0xFFFF, 0xF83F, 0xFA1F, 0xFA97, 0xFAB5, 0xF2BD, 0xF0BF, 0xFFFF, 0xFFFF, 0xF017, 0xF815, 0xF215, 0xF095, 0xF035, 0xF01D, 0xFFFF, 0xFFFF, 0xFF08, 0xFF02, 0xFF80, 0xFF20, 0xFF08, 0xFF02, 0xFF80, 0xFF20, 0xF01F, 0xF817, 0xFA15, 0xF295, 0xF0B5, 0xF03D, 0xFFFF, 0xFFFF, 0xF82A, 0xFA0A, 0xFA82, 0xFAA0, 0xF2A8, 0xF0AA, 0xFFFF, 0xFFFF, 0xF002, 0xF800, 0xF200, 0xF080, 0xF020, 0xF008, 0xFFFF, 0xFFFF, 0xF00A, 0xF802, 0xFA00, 0xF280, 0xF0A0, 0xF028, 0xFFFF, 0xFFFF, }; static const u32 b43_ntab_pilotlt[] = { 0x76540123, 0x62407351, 0x76543201, 0x76540213, 0x76540123, 0x76430521, }; static const u32 b43_ntab_tdi20a0[] = { 0x00091226, 0x000A1429, 0x000B56AD, 0x000C58B0, 0x000D5AB3, 0x000E9CB6, 0x000F9EBA, 0x0000C13D, 0x00020301, 0x00030504, 0x00040708, 0x0005090B, 0x00064B8E, 0x00095291, 0x000A5494, 0x000B9718, 0x000C9927, 0x000D9B2A, 0x000EDD2E, 0x000FDF31, 0x000101B4, 0x000243B7, 0x000345BB, 0x000447BE, 0x00058982, 0x00068C05, 0x00099309, 0x000A950C, 0x000BD78F, 0x000CD992, 0x000DDB96, 0x000F1D99, 0x00005FA8, 0x0001422C, 0x0002842F, 0x00038632, 0x00048835, 0x0005CA38, 0x0006CCBC, 0x0009D3BF, 0x000B1603, 0x000C1806, 0x000D1A0A, 0x000E1C0D, 0x000F5E10, 0x00008093, 0x00018297, 0x0002C49A, 0x0003C680, 0x0004C880, 0x00060B00, 0x00070D00, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi20a1[] = { 0x00014B26, 0x00028D29, 0x000393AD, 0x00049630, 0x0005D833, 0x0006DA36, 0x00099C3A, 0x000A9E3D, 0x000BC081, 0x000CC284, 0x000DC488, 0x000F068B, 0x0000488E, 0x00018B91, 0x0002D214, 0x0003D418, 0x0004D6A7, 0x000618AA, 0x00071AAE, 0x0009DCB1, 0x000B1EB4, 0x000C0137, 0x000D033B, 0x000E053E, 0x000F4702, 0x00008905, 0x00020C09, 0x0003128C, 0x0004148F, 0x00051712, 0x00065916, 0x00091B19, 0x000A1D28, 0x000B5F2C, 0x000C41AF, 0x000D43B2, 0x000E85B5, 0x000F87B8, 0x0000C9BC, 0x00024CBF, 0x00035303, 0x00045506, 0x0005978A, 0x0006998D, 0x00095B90, 0x000A5D93, 0x000B9F97, 0x000C821A, 0x000D8400, 0x000EC600, 0x000FC800, 0x00010A00, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi40a0[] = { 0x0011A346, 0x00136CCF, 0x0014F5D9, 0x001641E2, 0x0017CB6B, 0x00195475, 0x001B2383, 0x001CAD0C, 0x001E7616, 0x0000821F, 0x00020BA8, 0x0003D4B2, 0x00056447, 0x00072DD0, 0x0008B6DA, 0x000A02E3, 0x000B8C6C, 0x000D15F6, 0x0011E484, 0x0013AE0D, 0x00153717, 0x00168320, 0x00180CA9, 0x00199633, 0x001B6548, 0x001CEED1, 0x001EB7DB, 0x0000C3E4, 0x00024D6D, 0x000416F7, 0x0005A585, 0x00076F0F, 0x0008F818, 0x000A4421, 0x000BCDAB, 0x000D9734, 0x00122649, 0x0013EFD2, 0x001578DC, 0x0016C4E5, 0x00184E6E, 0x001A17F8, 0x001BA686, 0x001D3010, 0x001EF999, 0x00010522, 0x00028EAC, 0x00045835, 0x0005E74A, 0x0007B0D3, 0x00093A5D, 0x000A85E6, 0x000C0F6F, 0x000DD8F9, 0x00126787, 0x00143111, 0x0015BA9A, 0x00170623, 0x00188FAD, 0x001A5936, 0x001BE84B, 0x001DB1D4, 0x001F3B5E, 0x000146E7, 0x00031070, 0x000499FA, 0x00062888, 0x0007F212, 0x00097B9B, 0x000AC7A4, 0x000C50AE, 0x000E1A37, 0x0012A94C, 0x001472D5, 0x0015FC5F, 0x00174868, 0x0018D171, 0x001A9AFB, 0x001C2989, 0x001DF313, 0x001F7C9C, 0x000188A5, 0x000351AF, 0x0004DB38, 0x0006AA4D, 0x000833D7, 0x0009BD60, 0x000B0969, 0x000C9273, 0x000E5BFC, 0x00132A8A, 0x0014B414, 0x00163D9D, 0x001789A6, 0x001912B0, 0x001ADC39, 0x001C6BCE, 0x001E34D8, 0x001FBE61, 0x0001CA6A, 0x00039374, 0x00051CFD, 0x0006EC0B, 0x00087515, 0x0009FE9E, 0x000B4AA7, 0x000CD3B1, 0x000E9D3A, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi40a1[] = { 0x001EDB36, 0x000129CA, 0x0002B353, 0x00047CDD, 0x0005C8E6, 0x000791EF, 0x00091BF9, 0x000AAA07, 0x000C3391, 0x000DFD1A, 0x00120923, 0x0013D22D, 0x00155C37, 0x0016EACB, 0x00187454, 0x001A3DDE, 0x001B89E7, 0x001D12F0, 0x001F1CFA, 0x00016B88, 0x00033492, 0x0004BE1B, 0x00060A24, 0x0007D32E, 0x00095D38, 0x000AEC4C, 0x000C7555, 0x000E3EDF, 0x00124AE8, 0x001413F1, 0x0015A37B, 0x00172C89, 0x0018B593, 0x001A419C, 0x001BCB25, 0x001D942F, 0x001F63B9, 0x0001AD4D, 0x00037657, 0x0004C260, 0x00068BE9, 0x000814F3, 0x0009A47C, 0x000B2D8A, 0x000CB694, 0x000E429D, 0x00128C26, 0x001455B0, 0x0015E4BA, 0x00176E4E, 0x0018F758, 0x001A8361, 0x001C0CEA, 0x001DD674, 0x001FA57D, 0x0001EE8B, 0x0003B795, 0x0005039E, 0x0006CD27, 0x000856B1, 0x0009E5C6, 0x000B6F4F, 0x000CF859, 0x000E8462, 0x00130DEB, 0x00149775, 0x00162603, 0x0017AF8C, 0x00193896, 0x001AC49F, 0x001C4E28, 0x001E17B2, 0x0000A6C7, 0x00023050, 0x0003F9DA, 0x00054563, 0x00070EEC, 0x00089876, 0x000A2704, 0x000BB08D, 0x000D3A17, 0x001185A0, 0x00134F29, 0x0014D8B3, 0x001667C8, 0x0017F151, 0x00197ADB, 0x001B0664, 0x001C8FED, 0x001E5977, 0x0000E805, 0x0002718F, 0x00043B18, 0x000586A1, 0x0007502B, 0x0008D9B4, 0x000A68C9, 0x000BF252, 0x000DBBDC, 0x0011C7E5, 0x001390EE, 0x00151A78, 0x0016A906, 0x00183290, 0x0019BC19, 0x001B4822, 0x001CD12C, 0x001E9AB5, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdtrn[] = { 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, 0x0C380000, 0x12F6FE52, 0xFE36F592, 0xEE680050, 0x061C061C, 0x0050EE68, 0xF592FE36, 0xFE5212F6, 0x00000C38, 0xFE5212F6, 0xF592FE36, 0x0050EE68, 0x061C061C, 0xEE680050, 0xFE36F592, 0x12F6FE52, 0x0C380000, 0x12F6FE52, 0xFE36F592, 0xEE680050, 0x05E305E3, 0x004DEF0C, 0xF5F3FE47, 0xFE611246, 0x00000BC7, 0xFE611246, 0xF5F3FE47, 0x004DEF0C, 0x05E305E3, 0xEF0C004D, 0xFE47F5F3, 0x1246FE61, 0x0BC70000, 0x1246FE61, 0xFE47F5F3, 0xEF0C004D, 0x05E305E3, 0x004DEF0C, 0xF5F3FE47, 0xFE611246, 0x00000BC7, 0xFE611246, 0xF5F3FE47, 0x004DEF0C, 0x05E305E3, 0xEF0C004D, 0xFE47F5F3, 0x1246FE61, 0x0BC70000, 0x1246FE61, 0xFE47F5F3, 0xEF0C004D, 0xFA58FA58, 0xF895043B, 0xFF4C09C0, 0xFBC6FFA8, 0xFB84F384, 0x0798F6F9, 0x05760122, 0x058409F6, 0x0B500000, 0x05B7F542, 0x08860432, 0x06DDFEE7, 0xFB84F384, 0xF9D90664, 0xF7E8025C, 0x00FFF7BD, 0x05A805A8, 0xF7BD00FF, 0x025CF7E8, 0x0664F9D9, 0xF384FB84, 0xFEE706DD, 0x04320886, 0xF54205B7, 0x00000B50, 0x09F60584, 0x01220576, 0xF6F90798, 0xF384FB84, 0xFFA8FBC6, 0x09C0FF4C, 0x043BF895, 0x02D402D4, 0x07DE0270, 0xFC96079C, 0xF90AFE94, 0xFE00FF2C, 0x02D4065D, 0x092A0096, 0x0014FBB8, 0xFD2CFD2C, 0x076AFB3C, 0x0096F752, 0xF991FD87, 0xFB2C0200, 0xFEB8F960, 0x08E0FC96, 0x049802A8, 0xFD2CFD2C, 0x02A80498, 0xFC9608E0, 0xF960FEB8, 0x0200FB2C, 0xFD87F991, 0xF7520096, 0xFB3C076A, 0xFD2CFD2C, 0xFBB80014, 0x0096092A, 0x065D02D4, 0xFF2CFE00, 0xFE94F90A, 0x079CFC96, 0x027007DE, 0x02D402D4, 0x027007DE, 0x079CFC96, 0xFE94F90A, 0xFF2CFE00, 0x065D02D4, 0x0096092A, 0xFBB80014, 0xFD2CFD2C, 0xFB3C076A, 0xF7520096, 0xFD87F991, 0x0200FB2C, 0xF960FEB8, 0xFC9608E0, 0x02A80498, 0xFD2CFD2C, 0x049802A8, 0x08E0FC96, 0xFEB8F960, 0xFB2C0200, 0xF991FD87, 0x0096F752, 0x076AFB3C, 0xFD2CFD2C, 0x0014FBB8, 0x092A0096, 0x02D4065D, 0xFE00FF2C, 0xF90AFE94, 0xFC96079C, 0x07DE0270, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x062A0000, 0xFEFA0759, 0x08B80908, 0xF396FC2D, 0xF9D6045C, 0xFC4EF608, 0xF748F596, 0x07B207BF, 0x062A062A, 0xF84EF841, 0xF748F596, 0x03B209F8, 0xF9D6045C, 0x0C6A03D3, 0x08B80908, 0x0106F8A7, 0x062A0000, 0xFEFAF8A7, 0x08B8F6F8, 0xF39603D3, 0xF9D6FBA4, 0xFC4E09F8, 0xF7480A6A, 0x07B2F841, 0x062AF9D6, 0xF84E07BF, 0xF7480A6A, 0x03B2F608, 0xF9D6FBA4, 0x0C6AFC2D, 0x08B8F6F8, 0x01060759, 0x062A0000, 0xFEFA0759, 0x08B80908, 0xF396FC2D, 0xF9D6045C, 0xFC4EF608, 0xF748F596, 0x07B207BF, 0x062A062A, 0xF84EF841, 0xF748F596, 0x03B209F8, 0xF9D6045C, 0x0C6A03D3, 0x08B80908, 0x0106F8A7, 0x062A0000, 0xFEFAF8A7, 0x08B8F6F8, 0xF39603D3, 0xF9D6FBA4, 0xFC4E09F8, 0xF7480A6A, 0x07B2F841, 0x062AF9D6, 0xF84E07BF, 0xF7480A6A, 0x03B2F608, 0xF9D6FBA4, 0x0C6AFC2D, 0x08B8F6F8, 0x01060759, 0x061C061C, 0xFF30009D, 0xFFB21141, 0xFD87FB54, 0xF65DFE59, 0x02EEF99E, 0x0166F03C, 0xFFF809B6, 0x000008A4, 0x000AF42B, 0x00EFF577, 0xFA840BF2, 0xFC02FF51, 0x08260F67, 0xFFF0036F, 0x0842F9C3, 0x00000000, 0x063DF7BE, 0xFC910010, 0xF099F7DA, 0x00AF03FE, 0xF40E057C, 0x0A89FF11, 0x0BD5FFF6, 0xF75C0000, 0xF64A0008, 0x0FC4FE9A, 0x0662FD12, 0x01A709A3, 0x04AC0279, 0xEEBF004E, 0xFF6300D0, 0xF9E4F9E4, 0x00D0FF63, 0x004EEEBF, 0x027904AC, 0x09A301A7, 0xFD120662, 0xFE9A0FC4, 0x0008F64A, 0x0000F75C, 0xFFF60BD5, 0xFF110A89, 0x057CF40E, 0x03FE00AF, 0xF7DAF099, 0x0010FC91, 0xF7BE063D, 0x00000000, 0xF9C30842, 0x036FFFF0, 0x0F670826, 0xFF51FC02, 0x0BF2FA84, 0xF57700EF, 0xF42B000A, 0x08A40000, 0x09B6FFF8, 0xF03C0166, 0xF99E02EE, 0xFE59F65D, 0xFB54FD87, 0x1141FFB2, 0x009DFF30, 0x05E30000, 0xFF060705, 0x085408A0, 0xF425FC59, 0xFA1D042A, 0xFC78F67A, 0xF7ACF60E, 0x075A0766, 0x05E305E3, 0xF8A6F89A, 0xF7ACF60E, 0x03880986, 0xFA1D042A, 0x0BDB03A7, 0x085408A0, 0x00FAF8FB, 0x05E30000, 0xFF06F8FB, 0x0854F760, 0xF42503A7, 0xFA1DFBD6, 0xFC780986, 0xF7AC09F2, 0x075AF89A, 0x05E3FA1D, 0xF8A60766, 0xF7AC09F2, 0x0388F67A, 0xFA1DFBD6, 0x0BDBFC59, 0x0854F760, 0x00FA0705, 0x05E30000, 0xFF060705, 0x085408A0, 0xF425FC59, 0xFA1D042A, 0xFC78F67A, 0xF7ACF60E, 0x075A0766, 0x05E305E3, 0xF8A6F89A, 0xF7ACF60E, 0x03880986, 0xFA1D042A, 0x0BDB03A7, 0x085408A0, 0x00FAF8FB, 0x05E30000, 0xFF06F8FB, 0x0854F760, 0xF42503A7, 0xFA1DFBD6, 0xFC780986, 0xF7AC09F2, 0x075AF89A, 0x05E3FA1D, 0xF8A60766, 0xF7AC09F2, 0x0388F67A, 0xFA1DFBD6, 0x0BDBFC59, 0x0854F760, 0x00FA0705, 0xFA58FA58, 0xF8F0FE00, 0x0448073D, 0xFDC9FE46, 0xF9910258, 0x089D0407, 0xFD5CF71A, 0x02AFFDE0, 0x083E0496, 0xFF5A0740, 0xFF7AFD97, 0x00FE01F1, 0x0009082E, 0xFA94FF75, 0xFECDF8EA, 0xFFB0F693, 0xFD2CFA58, 0x0433FF16, 0xFBA405DD, 0xFA610341, 0x06A606CB, 0x0039FD2D, 0x0677FA97, 0x01FA05E0, 0xF896003E, 0x075A068B, 0x012CFC3E, 0xFA23F98D, 0xFC7CFD43, 0xFF90FC0D, 0x01C10982, 0x00C601D6, 0xFD2CFD2C, 0x01D600C6, 0x098201C1, 0xFC0DFF90, 0xFD43FC7C, 0xF98DFA23, 0xFC3E012C, 0x068B075A, 0x003EF896, 0x05E001FA, 0xFA970677, 0xFD2D0039, 0x06CB06A6, 0x0341FA61, 0x05DDFBA4, 0xFF160433, 0xFA58FD2C, 0xF693FFB0, 0xF8EAFECD, 0xFF75FA94, 0x082E0009, 0x01F100FE, 0xFD97FF7A, 0x0740FF5A, 0x0496083E, 0xFDE002AF, 0xF71AFD5C, 0x0407089D, 0x0258F991, 0xFE46FDC9, 0x073D0448, 0xFE00F8F0, 0xFD2CFD2C, 0xFCE00500, 0xFC09FDDC, 0xFE680157, 0x04C70571, 0xFC3AFF21, 0xFCD70228, 0x056D0277, 0x0200FE00, 0x0022F927, 0xFE3C032B, 0xFC44FF3C, 0x03E9FBDB, 0x04570313, 0x04C9FF5C, 0x000D03B8, 0xFA580000, 0xFBE900D2, 0xF9D0FE0B, 0x0125FDF9, 0x042501BF, 0x0328FA2B, 0xFFA902F0, 0xFA250157, 0x0200FE00, 0x03740438, 0xFF0405FD, 0x030CFE52, 0x0037FB39, 0xFF6904C5, 0x04F8FD23, 0xFD31FC1B, 0xFD2CFD2C, 0xFC1BFD31, 0xFD2304F8, 0x04C5FF69, 0xFB390037, 0xFE52030C, 0x05FDFF04, 0x04380374, 0xFE000200, 0x0157FA25, 0x02F0FFA9, 0xFA2B0328, 0x01BF0425, 0xFDF90125, 0xFE0BF9D0, 0x00D2FBE9, 0x0000FA58, 0x03B8000D, 0xFF5C04C9, 0x03130457, 0xFBDB03E9, 0xFF3CFC44, 0x032BFE3C, 0xF9270022, 0xFE000200, 0x0277056D, 0x0228FCD7, 0xFF21FC3A, 0x057104C7, 0x0157FE68, 0xFDDCFC09, 0x0500FCE0, 0xFD2CFD2C, 0x0500FCE0, 0xFDDCFC09, 0x0157FE68, 0x057104C7, 0xFF21FC3A, 0x0228FCD7, 0x0277056D, 0xFE000200, 0xF9270022, 0x032BFE3C, 0xFF3CFC44, 0xFBDB03E9, 0x03130457, 0xFF5C04C9, 0x03B8000D, 0x0000FA58, 0x00D2FBE9, 0xFE0BF9D0, 0xFDF90125, 0x01BF0425, 0xFA2B0328, 0x02F0FFA9, 0x0157FA25, 0xFE000200, 0x04380374, 0x05FDFF04, 0xFE52030C, 0xFB390037, 0x04C5FF69, 0xFD2304F8, 0xFC1BFD31, 0xFD2CFD2C, 0xFD31FC1B, 0x04F8FD23, 0xFF6904C5, 0x0037FB39, 0x030CFE52, 0xFF0405FD, 0x03740438, 0x0200FE00, 0xFA250157, 0xFFA902F0, 0x0328FA2B, 0x042501BF, 0x0125FDF9, 0xF9D0FE0B, 0xFBE900D2, 0xFA580000, 0x000D03B8, 0x04C9FF5C, 0x04570313, 0x03E9FBDB, 0xFC44FF3C, 0xFE3C032B, 0x0022F927, 0x0200FE00, 0x056D0277, 0xFCD70228, 0xFC3AFF21, 0x04C70571, 0xFE680157, 0xFC09FDDC, 0xFCE00500, 0x05A80000, 0xFF1006BE, 0x0800084A, 0xF49CFC7E, 0xFA580400, 0xFC9CF6DA, 0xF800F672, 0x0710071C, 0x05A805A8, 0xF8F0F8E4, 0xF800F672, 0x03640926, 0xFA580400, 0x0B640382, 0x0800084A, 0x00F0F942, 0x05A80000, 0xFF10F942, 0x0800F7B6, 0xF49C0382, 0xFA58FC00, 0xFC9C0926, 0xF800098E, 0x0710F8E4, 0x05A8FA58, 0xF8F0071C, 0xF800098E, 0x0364F6DA, 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, 0x05A80000, 0xFF1006BE, 0x0800084A, 0xF49CFC7E, 0xFA580400, 0xFC9CF6DA, 0xF800F672, 0x0710071C, 0x05A805A8, 0xF8F0F8E4, 0xF800F672, 0x03640926, 0xFA580400, 0x0B640382, 0x0800084A, 0x00F0F942, 0x05A80000, 0xFF10F942, 0x0800F7B6, 0xF49C0382, 0xFA58FC00, 0xFC9C0926, 0xF800098E, 0x0710F8E4, 0x05A8FA58, 0xF8F0071C, 0xF800098E, 0x0364F6DA, 0xFA58FC00, 0x0B64FC7E, 0x0800F7B6, 0x00F006BE, }; static const u32 b43_ntab_tmap[] = { 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, 0x11000000, 0x1111F111, 0x11111111, 0x111111F1, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x000AA888, 0x88880000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, 0xA2222220, 0x22222222, 0x22C22222, 0x00000222, 0x22000000, 0x2222A222, 0x22222222, 0x222222A2, 0xF1111110, 0x11111111, 0x11F11111, 0x00011111, 0x11110000, 0x1111F111, 0x11111111, 0x111111F1, 0xA8AA88A0, 0xA88888A8, 0xA8A8A88A, 0x00088AAA, 0xAAAA0000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, 0xAAA8AAA0, 0x8AAA8AAA, 0xAA8A8A8A, 0x000AAA88, 0x8AAA0000, 0xAAA8A888, 0x8AA88A8A, 0x8A88A888, 0x08080A00, 0x0A08080A, 0x080A0A08, 0x00080808, 0x080A0000, 0x080A0808, 0x080A0808, 0x0A0A0A08, 0xA0A0A0A0, 0x80A0A080, 0x8080A0A0, 0x00008080, 0x80A00000, 0x80A080A0, 0xA080A0A0, 0x8080A0A0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x99999000, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, 0x9B99BB90, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00AAA888, 0x22000000, 0x2222B222, 0x22222222, 0x222222B2, 0xB2222220, 0x22222222, 0x22D22222, 0x00000222, 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, 0x33000000, 0x3333B333, 0x33333333, 0x333333B3, 0xB3333330, 0x33333333, 0x33D33333, 0x00000333, 0x22000000, 0x2222A222, 0x22222222, 0x222222A2, 0xA2222220, 0x22222222, 0x22C22222, 0x00000222, 0x99B99B00, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, 0x9B99BB99, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x08AAA888, 0x22222200, 0x2222F222, 0x22222222, 0x222222F2, 0x22222222, 0x22222222, 0x22F22222, 0x00000222, 0x11000000, 0x1111F111, 0x11111111, 0x11111111, 0xF1111111, 0x11111111, 0x11F11111, 0x01111111, 0xBB9BB900, 0xB9B9BB99, 0xB99BBBBB, 0xBBBB9B9B, 0xB9BB99BB, 0xB99999B9, 0xB9B9B99B, 0x00000BBB, 0xAA000000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, 0xA8AA88AA, 0xA88888A8, 0xA8A8A88A, 0x0A888AAA, 0xAA000000, 0xA8A8AA88, 0xA88AAAAA, 0xAAAA8A8A, 0xA8AA88A0, 0xA88888A8, 0xA8A8A88A, 0x00000AAA, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0xBBBBBB00, 0x999BBBBB, 0x9BB99B9B, 0xB9B9B9BB, 0xB9B99BBB, 0xB9B9B9BB, 0xB9BB9B99, 0x00000999, 0x8A000000, 0xAA88A888, 0xA88888AA, 0xA88A8A88, 0xA88AA88A, 0x88A8AAAA, 0xA8AA8AAA, 0x0888A88A, 0x0B0B0B00, 0x090B0B0B, 0x0B090B0B, 0x0909090B, 0x09090B0B, 0x09090B0B, 0x09090B09, 0x00000909, 0x0A000000, 0x0A080808, 0x080A080A, 0x080A0A08, 0x080A080A, 0x0808080A, 0x0A0A0A08, 0x0808080A, 0xB0B0B000, 0x9090B0B0, 0x90B09090, 0xB0B0B090, 0xB0B090B0, 0x90B0B0B0, 0xB0B09090, 0x00000090, 0x80000000, 0xA080A080, 0xA08080A0, 0xA0808080, 0xA080A080, 0x80A0A0A0, 0xA0A080A0, 0x00A0A0A0, 0x22000000, 0x2222F222, 0x22222222, 0x222222F2, 0xF2222220, 0x22222222, 0x22F22222, 0x00000222, 0x11000000, 0x1111F111, 0x11111111, 0x111111F1, 0xF1111110, 0x11111111, 0x11F11111, 0x00000111, 0x33000000, 0x3333F333, 0x33333333, 0x333333F3, 0xF3333330, 0x33333333, 0x33F33333, 0x00000333, 0x22000000, 0x2222F222, 0x22222222, 0x222222F2, 0xF2222220, 0x22222222, 0x22F22222, 0x00000222, 0x99000000, 0x9B9B99BB, 0x9BB99999, 0x9999B9B9, 0x9B99BB90, 0x9BBBBB9B, 0x9B9B9BB9, 0x00000999, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x88888000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00AAA888, 0x88A88A00, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA88, 0x8AAAAA8A, 0x8A8A8AA8, 0x08AAA888, 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, 0x11000000, 0x1111A111, 0x11111111, 0x111111A1, 0xA1111110, 0x11111111, 0x11C11111, 0x00000111, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x88000000, 0x8A8A88AA, 0x8AA88888, 0x8888A8A8, 0x8A88AA80, 0x8AAAAA8A, 0x8A8A8AA8, 0x00000888, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_framestruct_r3[] = { 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, 0x09804506, 0x00100030, 0x09804507, 0x00100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004a0c, 0x00100004, 0x01000a0d, 0x00100024, 0x0980450e, 0x00100034, 0x0980450f, 0x00100034, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000a04, 0x00100000, 0x11008a05, 0x00100020, 0x1980c506, 0x00100030, 0x21810506, 0x00100030, 0x21810506, 0x00100030, 0x01800504, 0x00100030, 0x11808505, 0x00100030, 0x29814507, 0x01100030, 0x00000a04, 0x00100000, 0x11008a05, 0x00100020, 0x21810506, 0x00100030, 0x21810506, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, 0x1980c50e, 0x00100038, 0x2181050e, 0x00100038, 0x2181050e, 0x00100038, 0x0180050c, 0x00100038, 0x1180850d, 0x00100038, 0x2981450f, 0x01100038, 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, 0x2181050e, 0x00100038, 0x2181050e, 0x00100038, 0x2981450f, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, 0x1980c506, 0x00100030, 0x1980c506, 0x00100030, 0x11808504, 0x00100030, 0x3981ca05, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x10008a04, 0x00100000, 0x3981ca05, 0x00100030, 0x1980c506, 0x00100030, 0x29814507, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004a0c, 0x00100008, 0x01000a0d, 0x00100028, 0x1980c50e, 0x00100038, 0x1980c50e, 0x00100038, 0x1180850c, 0x00100038, 0x3981ca0d, 0x00100038, 0x2981450f, 0x01100038, 0x00000000, 0x00000000, 0x10008a0c, 0x00100008, 0x3981ca0d, 0x00100038, 0x1980c50e, 0x00100038, 0x2981450f, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x02001405, 0x00100040, 0x0b004a06, 0x01900060, 0x13008a06, 0x01900060, 0x13008a06, 0x01900060, 0x43020a04, 0x00100060, 0x1b00ca05, 0x00100060, 0x23010a07, 0x01500060, 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, 0x13008a06, 0x01900060, 0x13008a06, 0x01900060, 0x23010a07, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x00100010, 0x0200140d, 0x00100050, 0x0b004a0e, 0x01900070, 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070, 0x43020a0c, 0x00100070, 0x1b00ca0d, 0x00100070, 0x23010a0f, 0x01500070, 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070, 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x50029404, 0x00100000, 0x32019405, 0x00100040, 0x0b004a06, 0x01900060, 0x0b004a06, 0x01900060, 0x5b02ca04, 0x00100060, 0x3b01d405, 0x00100060, 0x23010a07, 0x01500060, 0x00000000, 0x00000000, 0x5802d404, 0x00100000, 0x3b01d405, 0x00100060, 0x0b004a06, 0x01900060, 0x23010a07, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x5002940c, 0x00100010, 0x3201940d, 0x00100050, 0x0b004a0e, 0x01900070, 0x0b004a0e, 0x01900070, 0x5b02ca0c, 0x00100070, 0x3b01d40d, 0x00100070, 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, 0x5802d40c, 0x00100010, 0x3b01d40d, 0x00100070, 0x0b004a0e, 0x01900070, 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x000f4800, 0x62031405, 0x00100040, 0x53028a06, 0x01900060, 0x53028a07, 0x01900060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x000f4808, 0x6203140d, 0x00100048, 0x53028a0e, 0x01900068, 0x53028a0f, 0x01900068, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000a0c, 0x00100004, 0x11008a0d, 0x00100024, 0x1980c50e, 0x00100034, 0x2181050e, 0x00100034, 0x2181050e, 0x00100034, 0x0180050c, 0x00100038, 0x1180850d, 0x00100038, 0x1181850d, 0x00100038, 0x2981450f, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000a0c, 0x00100008, 0x11008a0d, 0x00100028, 0x2181050e, 0x00100038, 0x2181050e, 0x00100038, 0x1181850d, 0x00100038, 0x2981450f, 0x01100038, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x08004a04, 0x00100000, 0x01000a05, 0x00100020, 0x0180c506, 0x00100030, 0x0180c506, 0x00100030, 0x2180c50c, 0x00100030, 0x49820a0d, 0x0016a130, 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x2000ca0c, 0x00100000, 0x49820a0d, 0x0016a130, 0x1980c50e, 0x00100030, 0x41824a0d, 0x0016a130, 0x2981450f, 0x01100030, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x00100008, 0x0200140d, 0x00100048, 0x0b004a0e, 0x01900068, 0x13008a0e, 0x01900068, 0x13008a0e, 0x01900068, 0x43020a0c, 0x00100070, 0x1b00ca0d, 0x00100070, 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, 0x13008a0e, 0x01900070, 0x13008a0e, 0x01900070, 0x1b014a0d, 0x00100070, 0x23010a0f, 0x01500070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x50029404, 0x00100000, 0x32019405, 0x00100040, 0x03004a06, 0x01900060, 0x03004a06, 0x01900060, 0x6b030a0c, 0x00100060, 0x4b02140d, 0x0016a160, 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x6b03140c, 0x00100060, 0x4b02140d, 0x0016a160, 0x0b004a0e, 0x01900060, 0x4302540d, 0x0016a160, 0x23010a0f, 0x01500060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, 0x53028a06, 0x01900060, 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060, 0x43020a04, 0x00100060, 0x1b00ca05, 0x00100060, 0x53028a07, 0x0190c060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, 0x53028a0e, 0x01900070, 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070, 0x43020a0c, 0x00100070, 0x1b00ca0d, 0x00100070, 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x40021404, 0x00100000, 0x1a00d405, 0x00100040, 0x5b02ca06, 0x01900060, 0x5b02ca06, 0x01900060, 0x53028a07, 0x0190c060, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x4002140c, 0x00100010, 0x1a00d40d, 0x00100050, 0x5b02ca0e, 0x01900070, 0x5b02ca0e, 0x01900070, 0x53028a0f, 0x0190c070, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u16 b43_ntab_pilot_r3[] = { 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0xff08, 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0x80d5, 0xff0a, 0xff82, 0xffa0, 0xff28, 0xffff, 0xffff, 0xffff, 0xffff, 0xff82, 0xffa0, 0xff28, 0xff0a, 0xffff, 0xffff, 0xffff, 0xffff, 0xf83f, 0xfa1f, 0xfa97, 0xfab5, 0xf2bd, 0xf0bf, 0xffff, 0xffff, 0xf017, 0xf815, 0xf215, 0xf095, 0xf035, 0xf01d, 0xffff, 0xffff, 0xff08, 0xff02, 0xff80, 0xff20, 0xff08, 0xff02, 0xff80, 0xff20, 0xf01f, 0xf817, 0xfa15, 0xf295, 0xf0b5, 0xf03d, 0xffff, 0xffff, 0xf82a, 0xfa0a, 0xfa82, 0xfaa0, 0xf2a8, 0xf0aa, 0xffff, 0xffff, 0xf002, 0xf800, 0xf200, 0xf080, 0xf020, 0xf008, 0xffff, 0xffff, 0xf00a, 0xf802, 0xfa00, 0xf280, 0xf0a0, 0xf028, 0xffff, 0xffff, }; static const u32 b43_ntab_tmap_r3[] = { 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0xf1111110, 0x11111111, 0x11f11111, 0x00000111, 0x11000000, 0x1111f111, 0x11111111, 0x111111f1, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x000aa888, 0x88880000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, 0xa2222220, 0x22222222, 0x22c22222, 0x00000222, 0x22000000, 0x2222a222, 0x22222222, 0x222222a2, 0xf1111110, 0x11111111, 0x11f11111, 0x00011111, 0x11110000, 0x1111f111, 0x11111111, 0x111111f1, 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00088aaa, 0xaaaa0000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, 0xaaa8aaa0, 0x8aaa8aaa, 0xaa8a8a8a, 0x000aaa88, 0x8aaa0000, 0xaaa8a888, 0x8aa88a8a, 0x8a88a888, 0x08080a00, 0x0a08080a, 0x080a0a08, 0x00080808, 0x080a0000, 0x080a0808, 0x080a0808, 0x0a0a0a08, 0xa0a0a0a0, 0x80a0a080, 0x8080a0a0, 0x00008080, 0x80a00000, 0x80a080a0, 0xa080a0a0, 0x8080a0a0, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x99999000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888, 0x22000000, 0x2222b222, 0x22222222, 0x222222b2, 0xb2222220, 0x22222222, 0x22d22222, 0x00000222, 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, 0x33000000, 0x3333b333, 0x33333333, 0x333333b3, 0xb3333330, 0x33333333, 0x33d33333, 0x00000333, 0x22000000, 0x2222a222, 0x22222222, 0x222222a2, 0xa2222220, 0x22222222, 0x22c22222, 0x00000222, 0x99b99b00, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, 0x9b99bb99, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888, 0x22222200, 0x2222f222, 0x22222222, 0x222222f2, 0x22222222, 0x22222222, 0x22f22222, 0x00000222, 0x11000000, 0x1111f111, 0x11111111, 0x11111111, 0xf1111111, 0x11111111, 0x11f11111, 0x01111111, 0xbb9bb900, 0xb9b9bb99, 0xb99bbbbb, 0xbbbb9b9b, 0xb9bb99bb, 0xb99999b9, 0xb9b9b99b, 0x00000bbb, 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, 0xa8aa88aa, 0xa88888a8, 0xa8a8a88a, 0x0a888aaa, 0xaa000000, 0xa8a8aa88, 0xa88aaaaa, 0xaaaa8a8a, 0xa8aa88a0, 0xa88888a8, 0xa8a8a88a, 0x00000aaa, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0xbbbbbb00, 0x999bbbbb, 0x9bb99b9b, 0xb9b9b9bb, 0xb9b99bbb, 0xb9b9b9bb, 0xb9bb9b99, 0x00000999, 0x8a000000, 0xaa88a888, 0xa88888aa, 0xa88a8a88, 0xa88aa88a, 0x88a8aaaa, 0xa8aa8aaa, 0x0888a88a, 0x0b0b0b00, 0x090b0b0b, 0x0b090b0b, 0x0909090b, 0x09090b0b, 0x09090b0b, 0x09090b09, 0x00000909, 0x0a000000, 0x0a080808, 0x080a080a, 0x080a0a08, 0x080a080a, 0x0808080a, 0x0a0a0a08, 0x0808080a, 0xb0b0b000, 0x9090b0b0, 0x90b09090, 0xb0b0b090, 0xb0b090b0, 0x90b0b0b0, 0xb0b09090, 0x00000090, 0x80000000, 0xa080a080, 0xa08080a0, 0xa0808080, 0xa080a080, 0x80a0a0a0, 0xa0a080a0, 0x00a0a0a0, 0x22000000, 0x2222f222, 0x22222222, 0x222222f2, 0xf2222220, 0x22222222, 0x22f22222, 0x00000222, 0x11000000, 0x1111f111, 0x11111111, 0x111111f1, 0xf1111110, 0x11111111, 0x11f11111, 0x00000111, 0x33000000, 0x3333f333, 0x33333333, 0x333333f3, 0xf3333330, 0x33333333, 0x33f33333, 0x00000333, 0x22000000, 0x2222f222, 0x22222222, 0x222222f2, 0xf2222220, 0x22222222, 0x22f22222, 0x00000222, 0x99000000, 0x9b9b99bb, 0x9bb99999, 0x9999b9b9, 0x9b99bb90, 0x9bbbbb9b, 0x9b9b9bb9, 0x00000999, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x88888000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00aaa888, 0x88a88a00, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa88, 0x8aaaaa8a, 0x8a8a8aa8, 0x08aaa888, 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, 0x11000000, 0x1111a111, 0x11111111, 0x111111a1, 0xa1111110, 0x11111111, 0x11c11111, 0x00000111, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x88000000, 0x8a8a88aa, 0x8aa88888, 0x8888a8a8, 0x8a88aa80, 0x8aaaaa8a, 0x8a8a8aa8, 0x00000888, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_intlevel_r3[] = { 0x00802070, 0x0671188d, 0x0a60192c, 0x0a300e46, 0x00c1188d, 0x080024d2, 0x00000070, }; static const u32 b43_ntab_tdtrn_r3[] = { 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6, 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68, 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52, 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050, 0x061c061c, 0x0050ee68, 0xf592fe36, 0xfe5212f6, 0x00000c38, 0xfe5212f6, 0xf592fe36, 0x0050ee68, 0x061c061c, 0xee680050, 0xfe36f592, 0x12f6fe52, 0x0c380000, 0x12f6fe52, 0xfe36f592, 0xee680050, 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246, 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c, 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61, 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d, 0x05e305e3, 0x004def0c, 0xf5f3fe47, 0xfe611246, 0x00000bc7, 0xfe611246, 0xf5f3fe47, 0x004def0c, 0x05e305e3, 0xef0c004d, 0xfe47f5f3, 0x1246fe61, 0x0bc70000, 0x1246fe61, 0xfe47f5f3, 0xef0c004d, 0xfa58fa58, 0xf895043b, 0xff4c09c0, 0xfbc6ffa8, 0xfb84f384, 0x0798f6f9, 0x05760122, 0x058409f6, 0x0b500000, 0x05b7f542, 0x08860432, 0x06ddfee7, 0xfb84f384, 0xf9d90664, 0xf7e8025c, 0x00fff7bd, 0x05a805a8, 0xf7bd00ff, 0x025cf7e8, 0x0664f9d9, 0xf384fb84, 0xfee706dd, 0x04320886, 0xf54205b7, 0x00000b50, 0x09f60584, 0x01220576, 0xf6f90798, 0xf384fb84, 0xffa8fbc6, 0x09c0ff4c, 0x043bf895, 0x02d402d4, 0x07de0270, 0xfc96079c, 0xf90afe94, 0xfe00ff2c, 0x02d4065d, 0x092a0096, 0x0014fbb8, 0xfd2cfd2c, 0x076afb3c, 0x0096f752, 0xf991fd87, 0xfb2c0200, 0xfeb8f960, 0x08e0fc96, 0x049802a8, 0xfd2cfd2c, 0x02a80498, 0xfc9608e0, 0xf960feb8, 0x0200fb2c, 0xfd87f991, 0xf7520096, 0xfb3c076a, 0xfd2cfd2c, 0xfbb80014, 0x0096092a, 0x065d02d4, 0xff2cfe00, 0xfe94f90a, 0x079cfc96, 0x027007de, 0x02d402d4, 0x027007de, 0x079cfc96, 0xfe94f90a, 0xff2cfe00, 0x065d02d4, 0x0096092a, 0xfbb80014, 0xfd2cfd2c, 0xfb3c076a, 0xf7520096, 0xfd87f991, 0x0200fb2c, 0xf960feb8, 0xfc9608e0, 0x02a80498, 0xfd2cfd2c, 0x049802a8, 0x08e0fc96, 0xfeb8f960, 0xfb2c0200, 0xf991fd87, 0x0096f752, 0x076afb3c, 0xfd2cfd2c, 0x0014fbb8, 0x092a0096, 0x02d4065d, 0xfe00ff2c, 0xf90afe94, 0xfc96079c, 0x07de0270, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d, 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf, 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8, 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7, 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3, 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841, 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608, 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759, 0x062a0000, 0xfefa0759, 0x08b80908, 0xf396fc2d, 0xf9d6045c, 0xfc4ef608, 0xf748f596, 0x07b207bf, 0x062a062a, 0xf84ef841, 0xf748f596, 0x03b209f8, 0xf9d6045c, 0x0c6a03d3, 0x08b80908, 0x0106f8a7, 0x062a0000, 0xfefaf8a7, 0x08b8f6f8, 0xf39603d3, 0xf9d6fba4, 0xfc4e09f8, 0xf7480a6a, 0x07b2f841, 0x062af9d6, 0xf84e07bf, 0xf7480a6a, 0x03b2f608, 0xf9d6fba4, 0x0c6afc2d, 0x08b8f6f8, 0x01060759, 0x061c061c, 0xff30009d, 0xffb21141, 0xfd87fb54, 0xf65dfe59, 0x02eef99e, 0x0166f03c, 0xfff809b6, 0x000008a4, 0x000af42b, 0x00eff577, 0xfa840bf2, 0xfc02ff51, 0x08260f67, 0xfff0036f, 0x0842f9c3, 0x00000000, 0x063df7be, 0xfc910010, 0xf099f7da, 0x00af03fe, 0xf40e057c, 0x0a89ff11, 0x0bd5fff6, 0xf75c0000, 0xf64a0008, 0x0fc4fe9a, 0x0662fd12, 0x01a709a3, 0x04ac0279, 0xeebf004e, 0xff6300d0, 0xf9e4f9e4, 0x00d0ff63, 0x004eeebf, 0x027904ac, 0x09a301a7, 0xfd120662, 0xfe9a0fc4, 0x0008f64a, 0x0000f75c, 0xfff60bd5, 0xff110a89, 0x057cf40e, 0x03fe00af, 0xf7daf099, 0x0010fc91, 0xf7be063d, 0x00000000, 0xf9c30842, 0x036ffff0, 0x0f670826, 0xff51fc02, 0x0bf2fa84, 0xf57700ef, 0xf42b000a, 0x08a40000, 0x09b6fff8, 0xf03c0166, 0xf99e02ee, 0xfe59f65d, 0xfb54fd87, 0x1141ffb2, 0x009dff30, 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59, 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766, 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986, 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb, 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7, 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a, 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a, 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705, 0x05e30000, 0xff060705, 0x085408a0, 0xf425fc59, 0xfa1d042a, 0xfc78f67a, 0xf7acf60e, 0x075a0766, 0x05e305e3, 0xf8a6f89a, 0xf7acf60e, 0x03880986, 0xfa1d042a, 0x0bdb03a7, 0x085408a0, 0x00faf8fb, 0x05e30000, 0xff06f8fb, 0x0854f760, 0xf42503a7, 0xfa1dfbd6, 0xfc780986, 0xf7ac09f2, 0x075af89a, 0x05e3fa1d, 0xf8a60766, 0xf7ac09f2, 0x0388f67a, 0xfa1dfbd6, 0x0bdbfc59, 0x0854f760, 0x00fa0705, 0xfa58fa58, 0xf8f0fe00, 0x0448073d, 0xfdc9fe46, 0xf9910258, 0x089d0407, 0xfd5cf71a, 0x02affde0, 0x083e0496, 0xff5a0740, 0xff7afd97, 0x00fe01f1, 0x0009082e, 0xfa94ff75, 0xfecdf8ea, 0xffb0f693, 0xfd2cfa58, 0x0433ff16, 0xfba405dd, 0xfa610341, 0x06a606cb, 0x0039fd2d, 0x0677fa97, 0x01fa05e0, 0xf896003e, 0x075a068b, 0x012cfc3e, 0xfa23f98d, 0xfc7cfd43, 0xff90fc0d, 0x01c10982, 0x00c601d6, 0xfd2cfd2c, 0x01d600c6, 0x098201c1, 0xfc0dff90, 0xfd43fc7c, 0xf98dfa23, 0xfc3e012c, 0x068b075a, 0x003ef896, 0x05e001fa, 0xfa970677, 0xfd2d0039, 0x06cb06a6, 0x0341fa61, 0x05ddfba4, 0xff160433, 0xfa58fd2c, 0xf693ffb0, 0xf8eafecd, 0xff75fa94, 0x082e0009, 0x01f100fe, 0xfd97ff7a, 0x0740ff5a, 0x0496083e, 0xfde002af, 0xf71afd5c, 0x0407089d, 0x0258f991, 0xfe46fdc9, 0x073d0448, 0xfe00f8f0, 0xfd2cfd2c, 0xfce00500, 0xfc09fddc, 0xfe680157, 0x04c70571, 0xfc3aff21, 0xfcd70228, 0x056d0277, 0x0200fe00, 0x0022f927, 0xfe3c032b, 0xfc44ff3c, 0x03e9fbdb, 0x04570313, 0x04c9ff5c, 0x000d03b8, 0xfa580000, 0xfbe900d2, 0xf9d0fe0b, 0x0125fdf9, 0x042501bf, 0x0328fa2b, 0xffa902f0, 0xfa250157, 0x0200fe00, 0x03740438, 0xff0405fd, 0x030cfe52, 0x0037fb39, 0xff6904c5, 0x04f8fd23, 0xfd31fc1b, 0xfd2cfd2c, 0xfc1bfd31, 0xfd2304f8, 0x04c5ff69, 0xfb390037, 0xfe52030c, 0x05fdff04, 0x04380374, 0xfe000200, 0x0157fa25, 0x02f0ffa9, 0xfa2b0328, 0x01bf0425, 0xfdf90125, 0xfe0bf9d0, 0x00d2fbe9, 0x0000fa58, 0x03b8000d, 0xff5c04c9, 0x03130457, 0xfbdb03e9, 0xff3cfc44, 0x032bfe3c, 0xf9270022, 0xfe000200, 0x0277056d, 0x0228fcd7, 0xff21fc3a, 0x057104c7, 0x0157fe68, 0xfddcfc09, 0x0500fce0, 0xfd2cfd2c, 0x0500fce0, 0xfddcfc09, 0x0157fe68, 0x057104c7, 0xff21fc3a, 0x0228fcd7, 0x0277056d, 0xfe000200, 0xf9270022, 0x032bfe3c, 0xff3cfc44, 0xfbdb03e9, 0x03130457, 0xff5c04c9, 0x03b8000d, 0x0000fa58, 0x00d2fbe9, 0xfe0bf9d0, 0xfdf90125, 0x01bf0425, 0xfa2b0328, 0x02f0ffa9, 0x0157fa25, 0xfe000200, 0x04380374, 0x05fdff04, 0xfe52030c, 0xfb390037, 0x04c5ff69, 0xfd2304f8, 0xfc1bfd31, 0xfd2cfd2c, 0xfd31fc1b, 0x04f8fd23, 0xff6904c5, 0x0037fb39, 0x030cfe52, 0xff0405fd, 0x03740438, 0x0200fe00, 0xfa250157, 0xffa902f0, 0x0328fa2b, 0x042501bf, 0x0125fdf9, 0xf9d0fe0b, 0xfbe900d2, 0xfa580000, 0x000d03b8, 0x04c9ff5c, 0x04570313, 0x03e9fbdb, 0xfc44ff3c, 0xfe3c032b, 0x0022f927, 0x0200fe00, 0x056d0277, 0xfcd70228, 0xfc3aff21, 0x04c70571, 0xfe680157, 0xfc09fddc, 0xfce00500, 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e, 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c, 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926, 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942, 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382, 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4, 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da, 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, 0x05a80000, 0xff1006be, 0x0800084a, 0xf49cfc7e, 0xfa580400, 0xfc9cf6da, 0xf800f672, 0x0710071c, 0x05a805a8, 0xf8f0f8e4, 0xf800f672, 0x03640926, 0xfa580400, 0x0b640382, 0x0800084a, 0x00f0f942, 0x05a80000, 0xff10f942, 0x0800f7b6, 0xf49c0382, 0xfa58fc00, 0xfc9c0926, 0xf800098e, 0x0710f8e4, 0x05a8fa58, 0xf8f0071c, 0xf800098e, 0x0364f6da, 0xfa58fc00, 0x0b64fc7e, 0x0800f7b6, 0x00f006be, }; static const u32 b43_ntab_noisevar0_r3[] = { 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, }; static const u32 b43_ntab_noisevar1_r3[] = { 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, 0x02110211, 0x0000014d, }; static const u16 b43_ntab_mcs_r3[] = { 0x0000, 0x0008, 0x000a, 0x0010, 0x0012, 0x0019, 0x001a, 0x001c, 0x0080, 0x0088, 0x008a, 0x0090, 0x0092, 0x0099, 0x009a, 0x009c, 0x0100, 0x0108, 0x010a, 0x0110, 0x0112, 0x0119, 0x011a, 0x011c, 0x0180, 0x0188, 0x018a, 0x0190, 0x0192, 0x0199, 0x019a, 0x019c, 0x0000, 0x0098, 0x00a0, 0x00a8, 0x009a, 0x00a2, 0x00aa, 0x0120, 0x0128, 0x0128, 0x0130, 0x0138, 0x0138, 0x0140, 0x0122, 0x012a, 0x012a, 0x0132, 0x013a, 0x013a, 0x0142, 0x01a8, 0x01b0, 0x01b8, 0x01b0, 0x01b8, 0x01c0, 0x01c8, 0x01c0, 0x01c8, 0x01d0, 0x01d0, 0x01d8, 0x01aa, 0x01b2, 0x01ba, 0x01b2, 0x01ba, 0x01c2, 0x01ca, 0x01c2, 0x01ca, 0x01d2, 0x01d2, 0x01da, 0x0001, 0x0002, 0x0004, 0x0009, 0x000c, 0x0011, 0x0014, 0x0018, 0x0020, 0x0021, 0x0022, 0x0024, 0x0081, 0x0082, 0x0084, 0x0089, 0x008c, 0x0091, 0x0094, 0x0098, 0x00a0, 0x00a1, 0x00a2, 0x00a4, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, 0x0007, }; static const u32 b43_ntab_tdi20a0_r3[] = { 0x00091226, 0x000a1429, 0x000b56ad, 0x000c58b0, 0x000d5ab3, 0x000e9cb6, 0x000f9eba, 0x0000c13d, 0x00020301, 0x00030504, 0x00040708, 0x0005090b, 0x00064b8e, 0x00095291, 0x000a5494, 0x000b9718, 0x000c9927, 0x000d9b2a, 0x000edd2e, 0x000fdf31, 0x000101b4, 0x000243b7, 0x000345bb, 0x000447be, 0x00058982, 0x00068c05, 0x00099309, 0x000a950c, 0x000bd78f, 0x000cd992, 0x000ddb96, 0x000f1d99, 0x00005fa8, 0x0001422c, 0x0002842f, 0x00038632, 0x00048835, 0x0005ca38, 0x0006ccbc, 0x0009d3bf, 0x000b1603, 0x000c1806, 0x000d1a0a, 0x000e1c0d, 0x000f5e10, 0x00008093, 0x00018297, 0x0002c49a, 0x0003c680, 0x0004c880, 0x00060b00, 0x00070d00, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi20a1_r3[] = { 0x00014b26, 0x00028d29, 0x000393ad, 0x00049630, 0x0005d833, 0x0006da36, 0x00099c3a, 0x000a9e3d, 0x000bc081, 0x000cc284, 0x000dc488, 0x000f068b, 0x0000488e, 0x00018b91, 0x0002d214, 0x0003d418, 0x0004d6a7, 0x000618aa, 0x00071aae, 0x0009dcb1, 0x000b1eb4, 0x000c0137, 0x000d033b, 0x000e053e, 0x000f4702, 0x00008905, 0x00020c09, 0x0003128c, 0x0004148f, 0x00051712, 0x00065916, 0x00091b19, 0x000a1d28, 0x000b5f2c, 0x000c41af, 0x000d43b2, 0x000e85b5, 0x000f87b8, 0x0000c9bc, 0x00024cbf, 0x00035303, 0x00045506, 0x0005978a, 0x0006998d, 0x00095b90, 0x000a5d93, 0x000b9f97, 0x000c821a, 0x000d8400, 0x000ec600, 0x000fc800, 0x00010a00, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi40a0_r3[] = { 0x0011a346, 0x00136ccf, 0x0014f5d9, 0x001641e2, 0x0017cb6b, 0x00195475, 0x001b2383, 0x001cad0c, 0x001e7616, 0x0000821f, 0x00020ba8, 0x0003d4b2, 0x00056447, 0x00072dd0, 0x0008b6da, 0x000a02e3, 0x000b8c6c, 0x000d15f6, 0x0011e484, 0x0013ae0d, 0x00153717, 0x00168320, 0x00180ca9, 0x00199633, 0x001b6548, 0x001ceed1, 0x001eb7db, 0x0000c3e4, 0x00024d6d, 0x000416f7, 0x0005a585, 0x00076f0f, 0x0008f818, 0x000a4421, 0x000bcdab, 0x000d9734, 0x00122649, 0x0013efd2, 0x001578dc, 0x0016c4e5, 0x00184e6e, 0x001a17f8, 0x001ba686, 0x001d3010, 0x001ef999, 0x00010522, 0x00028eac, 0x00045835, 0x0005e74a, 0x0007b0d3, 0x00093a5d, 0x000a85e6, 0x000c0f6f, 0x000dd8f9, 0x00126787, 0x00143111, 0x0015ba9a, 0x00170623, 0x00188fad, 0x001a5936, 0x001be84b, 0x001db1d4, 0x001f3b5e, 0x000146e7, 0x00031070, 0x000499fa, 0x00062888, 0x0007f212, 0x00097b9b, 0x000ac7a4, 0x000c50ae, 0x000e1a37, 0x0012a94c, 0x001472d5, 0x0015fc5f, 0x00174868, 0x0018d171, 0x001a9afb, 0x001c2989, 0x001df313, 0x001f7c9c, 0x000188a5, 0x000351af, 0x0004db38, 0x0006aa4d, 0x000833d7, 0x0009bd60, 0x000b0969, 0x000c9273, 0x000e5bfc, 0x00132a8a, 0x0014b414, 0x00163d9d, 0x001789a6, 0x001912b0, 0x001adc39, 0x001c6bce, 0x001e34d8, 0x001fbe61, 0x0001ca6a, 0x00039374, 0x00051cfd, 0x0006ec0b, 0x00087515, 0x0009fe9e, 0x000b4aa7, 0x000cd3b1, 0x000e9d3a, 0x00000000, 0x00000000, }; static const u32 b43_ntab_tdi40a1_r3[] = { 0x001edb36, 0x000129ca, 0x0002b353, 0x00047cdd, 0x0005c8e6, 0x000791ef, 0x00091bf9, 0x000aaa07, 0x000c3391, 0x000dfd1a, 0x00120923, 0x0013d22d, 0x00155c37, 0x0016eacb, 0x00187454, 0x001a3dde, 0x001b89e7, 0x001d12f0, 0x001f1cfa, 0x00016b88, 0x00033492, 0x0004be1b, 0x00060a24, 0x0007d32e, 0x00095d38, 0x000aec4c, 0x000c7555, 0x000e3edf, 0x00124ae8, 0x001413f1, 0x0015a37b, 0x00172c89, 0x0018b593, 0x001a419c, 0x001bcb25, 0x001d942f, 0x001f63b9, 0x0001ad4d, 0x00037657, 0x0004c260, 0x00068be9, 0x000814f3, 0x0009a47c, 0x000b2d8a, 0x000cb694, 0x000e429d, 0x00128c26, 0x001455b0, 0x0015e4ba, 0x00176e4e, 0x0018f758, 0x001a8361, 0x001c0cea, 0x001dd674, 0x001fa57d, 0x0001ee8b, 0x0003b795, 0x0005039e, 0x0006cd27, 0x000856b1, 0x0009e5c6, 0x000b6f4f, 0x000cf859, 0x000e8462, 0x00130deb, 0x00149775, 0x00162603, 0x0017af8c, 0x00193896, 0x001ac49f, 0x001c4e28, 0x001e17b2, 0x0000a6c7, 0x00023050, 0x0003f9da, 0x00054563, 0x00070eec, 0x00089876, 0x000a2704, 0x000bb08d, 0x000d3a17, 0x001185a0, 0x00134f29, 0x0014d8b3, 0x001667c8, 0x0017f151, 0x00197adb, 0x001b0664, 0x001c8fed, 0x001e5977, 0x0000e805, 0x0002718f, 0x00043b18, 0x000586a1, 0x0007502b, 0x0008d9b4, 0x000a68c9, 0x000bf252, 0x000dbbdc, 0x0011c7e5, 0x001390ee, 0x00151a78, 0x0016a906, 0x00183290, 0x0019bc19, 0x001b4822, 0x001cd12c, 0x001e9ab5, 0x00000000, 0x00000000, }; static const u32 b43_ntab_pilotlt_r3[] = { 0x76540213, 0x62407351, 0x76543210, 0x76540213, 0x76540213, 0x76430521, }; static const u32 b43_ntab_channelest_r3[] = { 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x44444444, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, 0x10101010, }; static const u8 b43_ntab_framelookup_r3[] = { 0x02, 0x04, 0x14, 0x14, 0x03, 0x05, 0x16, 0x16, 0x0a, 0x0c, 0x1c, 0x1c, 0x0b, 0x0d, 0x1e, 0x1e, 0x06, 0x08, 0x18, 0x18, 0x07, 0x09, 0x1a, 0x1a, 0x0e, 0x10, 0x20, 0x28, 0x0f, 0x11, 0x22, 0x2a, }; static const u8 b43_ntab_estimatepowerlt0_r3[] = { 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c, 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b, 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a, 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd, }; static const u8 b43_ntab_estimatepowerlt1_r3[] = { 0x55, 0x54, 0x54, 0x53, 0x52, 0x52, 0x51, 0x51, 0x50, 0x4f, 0x4f, 0x4e, 0x4e, 0x4d, 0x4c, 0x4c, 0x4b, 0x4a, 0x49, 0x49, 0x48, 0x47, 0x46, 0x46, 0x45, 0x44, 0x43, 0x42, 0x41, 0x40, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x3a, 0x39, 0x38, 0x37, 0x36, 0x35, 0x33, 0x32, 0x31, 0x2f, 0x2e, 0x2c, 0x2b, 0x29, 0x27, 0x25, 0x23, 0x21, 0x1f, 0x1d, 0x1a, 0x18, 0x15, 0x12, 0x0e, 0x0b, 0x07, 0x02, 0xfd, }; static const u8 b43_ntab_adjustpower0_r3[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u8 b43_ntab_adjustpower1_r3[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, }; static const u32 b43_ntab_gainctl0_r3[] = { 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e, 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037, 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031, 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040, 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039, 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033, 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e, 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037, 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031, 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c, 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e, 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037, 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031, 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c, 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042, 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b, 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034, 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f, 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e, 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037, 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031, 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c, 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027, 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023, 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e, 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037, 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031, 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c, 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027, 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023, 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f, 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c, }; static const u32 b43_ntab_gainctl1_r3[] = { 0x5bf70044, 0x5bf70042, 0x5bf70040, 0x5bf7003e, 0x5bf7003c, 0x5bf7003b, 0x5bf70039, 0x5bf70037, 0x5bf70036, 0x5bf70034, 0x5bf70033, 0x5bf70031, 0x5bf70030, 0x5ba70044, 0x5ba70042, 0x5ba70040, 0x5ba7003e, 0x5ba7003c, 0x5ba7003b, 0x5ba70039, 0x5ba70037, 0x5ba70036, 0x5ba70034, 0x5ba70033, 0x5b770044, 0x5b770042, 0x5b770040, 0x5b77003e, 0x5b77003c, 0x5b77003b, 0x5b770039, 0x5b770037, 0x5b770036, 0x5b770034, 0x5b770033, 0x5b770031, 0x5b770030, 0x5b77002f, 0x5b77002d, 0x5b77002c, 0x5b470044, 0x5b470042, 0x5b470040, 0x5b47003e, 0x5b47003c, 0x5b47003b, 0x5b470039, 0x5b470037, 0x5b470036, 0x5b470034, 0x5b470033, 0x5b470031, 0x5b470030, 0x5b47002f, 0x5b47002d, 0x5b47002c, 0x5b47002b, 0x5b47002a, 0x5b270044, 0x5b270042, 0x5b270040, 0x5b27003e, 0x5b27003c, 0x5b27003b, 0x5b270039, 0x5b270037, 0x5b270036, 0x5b270034, 0x5b270033, 0x5b270031, 0x5b270030, 0x5b27002f, 0x5b170044, 0x5b170042, 0x5b170040, 0x5b17003e, 0x5b17003c, 0x5b17003b, 0x5b170039, 0x5b170037, 0x5b170036, 0x5b170034, 0x5b170033, 0x5b170031, 0x5b170030, 0x5b17002f, 0x5b17002d, 0x5b17002c, 0x5b17002b, 0x5b17002a, 0x5b170028, 0x5b170027, 0x5b170026, 0x5b170025, 0x5b170024, 0x5b170023, 0x5b070044, 0x5b070042, 0x5b070040, 0x5b07003e, 0x5b07003c, 0x5b07003b, 0x5b070039, 0x5b070037, 0x5b070036, 0x5b070034, 0x5b070033, 0x5b070031, 0x5b070030, 0x5b07002f, 0x5b07002d, 0x5b07002c, 0x5b07002b, 0x5b07002a, 0x5b070028, 0x5b070027, 0x5b070026, 0x5b070025, 0x5b070024, 0x5b070023, 0x5b070022, 0x5b070021, 0x5b070020, 0x5b07001f, 0x5b07001e, 0x5b07001d, 0x5b07001d, 0x5b07001c, }; static const u32 b43_ntab_iqlt0_r3[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u32 b43_ntab_iqlt1_r3[] = { 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, }; static const u16 b43_ntab_loftlt0_r3[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u16 b43_ntab_loftlt1_r3[] = { 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, }; static const u16 b43_ntab_antswctl2g_r3[4][32] = { { 0x0082, 0x0082, 0x0211, 0x0222, 0x0328, 0x0000, 0x0000, 0x0000, 0x0144, 0x0000, 0x0000, 0x0000, 0x0188, 0x0000, 0x0000, 0x0000, 0x0082, 0x0082, 0x0211, 0x0222, 0x0328, 0x0000, 0x0000, 0x0000, 0x0144, 0x0000, 0x0000, 0x0000, 0x0188, 0x0000, 0x0000, 0x0000, }, { 0x0022, 0x0022, 0x0011, 0x0022, 0x0022, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, 0x0000, 0x0022, 0x0022, 0x0011, 0x0022, 0x0022, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, 0x0000, }, { 0x0088, 0x0088, 0x0044, 0x0088, 0x0088, 0x0000, 0x0000, 0x0000, 0x0044, 0x0000, 0x0000, 0x0000, 0x0088, 0x0000, 0x0000, 0x0000, 0x0088, 0x0088, 0x0044, 0x0088, 0x0088, 0x0000, 0x0000, 0x0000, 0x0044, 0x0000, 0x0000, 0x0000, 0x0088, 0x0000, 0x0000, 0x0000, }, { 0x0022, 0x0022, 0x0011, 0x0022, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, 0x03cc, 0x0022, 0x0022, 0x0011, 0x0022, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x0000, 0x0000, 0x0022, 0x0000, 0x0000, 0x03cc, } }; static const u32 b43_ntab_tx_gain_rev0_1_2[] = { 0x03cc2b44, 0x03cc2b42, 0x03cc2a44, 0x03cc2a42, 0x03cc2944, 0x03c82b44, 0x03c82b42, 0x03c82a44, 0x03c82a42, 0x03c82944, 0x03c82942, 0x03c82844, 0x03c82842, 0x03c42b44, 0x03c42b42, 0x03c42a44, 0x03c42a42, 0x03c42944, 0x03c42942, 0x03c42844, 0x03c42842, 0x03c42744, 0x03c42742, 0x03c42644, 0x03c42642, 0x03c42544, 0x03c42542, 0x03c42444, 0x03c42442, 0x03c02b44, 0x03c02b42, 0x03c02a44, 0x03c02a42, 0x03c02944, 0x03c02942, 0x03c02844, 0x03c02842, 0x03c02744, 0x03c02742, 0x03b02b44, 0x03b02b42, 0x03b02a44, 0x03b02a42, 0x03b02944, 0x03b02942, 0x03b02844, 0x03b02842, 0x03b02744, 0x03b02742, 0x03b02644, 0x03b02642, 0x03b02544, 0x03b02542, 0x03a02b44, 0x03a02b42, 0x03a02a44, 0x03a02a42, 0x03a02944, 0x03a02942, 0x03a02844, 0x03a02842, 0x03a02744, 0x03a02742, 0x03902b44, 0x03902b42, 0x03902a44, 0x03902a42, 0x03902944, 0x03902942, 0x03902844, 0x03902842, 0x03902744, 0x03902742, 0x03902644, 0x03902642, 0x03902544, 0x03902542, 0x03802b44, 0x03802b42, 0x03802a44, 0x03802a42, 0x03802944, 0x03802942, 0x03802844, 0x03802842, 0x03802744, 0x03802742, 0x03802644, 0x03802642, 0x03802544, 0x03802542, 0x03802444, 0x03802442, 0x03802344, 0x03802342, 0x03802244, 0x03802242, 0x03802144, 0x03802142, 0x03802044, 0x03802042, 0x03801f44, 0x03801f42, 0x03801e44, 0x03801e42, 0x03801d44, 0x03801d42, 0x03801c44, 0x03801c42, 0x03801b44, 0x03801b42, 0x03801a44, 0x03801a42, 0x03801944, 0x03801942, 0x03801844, 0x03801842, 0x03801744, 0x03801742, 0x03801644, 0x03801642, 0x03801544, 0x03801542, 0x03801444, 0x03801442, 0x03801344, 0x03801342, 0x00002b00, }; static const u32 b43_ntab_tx_gain_rev3plus_2ghz[] = { 0x1f410044, 0x1f410042, 0x1f410040, 0x1f41003e, 0x1f41003c, 0x1f41003b, 0x1f410039, 0x1f410037, 0x1e410044, 0x1e410042, 0x1e410040, 0x1e41003e, 0x1e41003c, 0x1e41003b, 0x1e410039, 0x1e410037, 0x1d410044, 0x1d410042, 0x1d410040, 0x1d41003e, 0x1d41003c, 0x1d41003b, 0x1d410039, 0x1d410037, 0x1c410044, 0x1c410042, 0x1c410040, 0x1c41003e, 0x1c41003c, 0x1c41003b, 0x1c410039, 0x1c410037, 0x1b410044, 0x1b410042, 0x1b410040, 0x1b41003e, 0x1b41003c, 0x1b41003b, 0x1b410039, 0x1b410037, 0x1a410044, 0x1a410042, 0x1a410040, 0x1a41003e, 0x1a41003c, 0x1a41003b, 0x1a410039, 0x1a410037, 0x19410044, 0x19410042, 0x19410040, 0x1941003e, 0x1941003c, 0x1941003b, 0x19410039, 0x19410037, 0x18410044, 0x18410042, 0x18410040, 0x1841003e, 0x1841003c, 0x1841003b, 0x18410039, 0x18410037, 0x17410044, 0x17410042, 0x17410040, 0x1741003e, 0x1741003c, 0x1741003b, 0x17410039, 0x17410037, 0x16410044, 0x16410042, 0x16410040, 0x1641003e, 0x1641003c, 0x1641003b, 0x16410039, 0x16410037, 0x15410044, 0x15410042, 0x15410040, 0x1541003e, 0x1541003c, 0x1541003b, 0x15410039, 0x15410037, 0x14410044, 0x14410042, 0x14410040, 0x1441003e, 0x1441003c, 0x1441003b, 0x14410039, 0x14410037, 0x13410044, 0x13410042, 0x13410040, 0x1341003e, 0x1341003c, 0x1341003b, 0x13410039, 0x13410037, 0x12410044, 0x12410042, 0x12410040, 0x1241003e, 0x1241003c, 0x1241003b, 0x12410039, 0x12410037, 0x11410044, 0x11410042, 0x11410040, 0x1141003e, 0x1141003c, 0x1141003b, 0x11410039, 0x11410037, 0x10410044, 0x10410042, 0x10410040, 0x1041003e, 0x1041003c, 0x1041003b, 0x10410039, 0x10410037, }; static const u32 b43_ntab_tx_gain_rev3_5ghz[] = { 0xcff70044, 0xcff70042, 0xcff70040, 0xcff7003e, 0xcff7003c, 0xcff7003b, 0xcff70039, 0xcff70037, 0xcef70044, 0xcef70042, 0xcef70040, 0xcef7003e, 0xcef7003c, 0xcef7003b, 0xcef70039, 0xcef70037, 0xcdf70044, 0xcdf70042, 0xcdf70040, 0xcdf7003e, 0xcdf7003c, 0xcdf7003b, 0xcdf70039, 0xcdf70037, 0xccf70044, 0xccf70042, 0xccf70040, 0xccf7003e, 0xccf7003c, 0xccf7003b, 0xccf70039, 0xccf70037, 0xcbf70044, 0xcbf70042, 0xcbf70040, 0xcbf7003e, 0xcbf7003c, 0xcbf7003b, 0xcbf70039, 0xcbf70037, 0xcaf70044, 0xcaf70042, 0xcaf70040, 0xcaf7003e, 0xcaf7003c, 0xcaf7003b, 0xcaf70039, 0xcaf70037, 0xc9f70044, 0xc9f70042, 0xc9f70040, 0xc9f7003e, 0xc9f7003c, 0xc9f7003b, 0xc9f70039, 0xc9f70037, 0xc8f70044, 0xc8f70042, 0xc8f70040, 0xc8f7003e, 0xc8f7003c, 0xc8f7003b, 0xc8f70039, 0xc8f70037, 0xc7f70044, 0xc7f70042, 0xc7f70040, 0xc7f7003e, 0xc7f7003c, 0xc7f7003b, 0xc7f70039, 0xc7f70037, 0xc6f70044, 0xc6f70042, 0xc6f70040, 0xc6f7003e, 0xc6f7003c, 0xc6f7003b, 0xc6f70039, 0xc6f70037, 0xc5f70044, 0xc5f70042, 0xc5f70040, 0xc5f7003e, 0xc5f7003c, 0xc5f7003b, 0xc5f70039, 0xc5f70037, 0xc4f70044, 0xc4f70042, 0xc4f70040, 0xc4f7003e, 0xc4f7003c, 0xc4f7003b, 0xc4f70039, 0xc4f70037, 0xc3f70044, 0xc3f70042, 0xc3f70040, 0xc3f7003e, 0xc3f7003c, 0xc3f7003b, 0xc3f70039, 0xc3f70037, 0xc2f70044, 0xc2f70042, 0xc2f70040, 0xc2f7003e, 0xc2f7003c, 0xc2f7003b, 0xc2f70039, 0xc2f70037, 0xc1f70044, 0xc1f70042, 0xc1f70040, 0xc1f7003e, 0xc1f7003c, 0xc1f7003b, 0xc1f70039, 0xc1f70037, 0xc0f70044, 0xc0f70042, 0xc0f70040, 0xc0f7003e, 0xc0f7003c, 0xc0f7003b, 0xc0f70039, 0xc0f70037, }; static const u32 b43_ntab_tx_gain_rev4_5ghz[] = { 0x2ff20044, 0x2ff20042, 0x2ff20040, 0x2ff2003e, 0x2ff2003c, 0x2ff2003b, 0x2ff20039, 0x2ff20037, 0x2ef20044, 0x2ef20042, 0x2ef20040, 0x2ef2003e, 0x2ef2003c, 0x2ef2003b, 0x2ef20039, 0x2ef20037, 0x2df20044, 0x2df20042, 0x2df20040, 0x2df2003e, 0x2df2003c, 0x2df2003b, 0x2df20039, 0x2df20037, 0x2cf20044, 0x2cf20042, 0x2cf20040, 0x2cf2003e, 0x2cf2003c, 0x2cf2003b, 0x2cf20039, 0x2cf20037, 0x2bf20044, 0x2bf20042, 0x2bf20040, 0x2bf2003e, 0x2bf2003c, 0x2bf2003b, 0x2bf20039, 0x2bf20037, 0x2af20044, 0x2af20042, 0x2af20040, 0x2af2003e, 0x2af2003c, 0x2af2003b, 0x2af20039, 0x2af20037, 0x29f20044, 0x29f20042, 0x29f20040, 0x29f2003e, 0x29f2003c, 0x29f2003b, 0x29f20039, 0x29f20037, 0x28f20044, 0x28f20042, 0x28f20040, 0x28f2003e, 0x28f2003c, 0x28f2003b, 0x28f20039, 0x28f20037, 0x27f20044, 0x27f20042, 0x27f20040, 0x27f2003e, 0x27f2003c, 0x27f2003b, 0x27f20039, 0x27f20037, 0x26f20044, 0x26f20042, 0x26f20040, 0x26f2003e, 0x26f2003c, 0x26f2003b, 0x26f20039, 0x26f20037, 0x25f20044, 0x25f20042, 0x25f20040, 0x25f2003e, 0x25f2003c, 0x25f2003b, 0x25f20039, 0x25f20037, 0x24f20044, 0x24f20042, 0x24f20040, 0x24f2003e, 0x24f2003c, 0x24f2003b, 0x24f20039, 0x24f20038, 0x23f20041, 0x23f20040, 0x23f2003f, 0x23f2003e, 0x23f2003c, 0x23f2003b, 0x23f20039, 0x23f20037, 0x22f20044, 0x22f20042, 0x22f20040, 0x22f2003e, 0x22f2003c, 0x22f2003b, 0x22f20039, 0x22f20037, 0x21f20044, 0x21f20042, 0x21f20040, 0x21f2003e, 0x21f2003c, 0x21f2003b, 0x21f20039, 0x21f20037, 0x20d20043, 0x20d20041, 0x20d2003e, 0x20d2003c, 0x20d2003a, 0x20d20038, 0x20d20036, 0x20d20034, }; static const u32 b43_ntab_tx_gain_rev5plus_5ghz[] = { 0x0f62004a, 0x0f620048, 0x0f620046, 0x0f620044, 0x0f620042, 0x0f620040, 0x0f62003e, 0x0f62003c, 0x0e620044, 0x0e620042, 0x0e620040, 0x0e62003e, 0x0e62003c, 0x0e62003d, 0x0e62003b, 0x0e62003a, 0x0d620043, 0x0d620041, 0x0d620040, 0x0d62003e, 0x0d62003d, 0x0d62003c, 0x0d62003b, 0x0d62003a, 0x0c620041, 0x0c620040, 0x0c62003f, 0x0c62003e, 0x0c62003c, 0x0c62003b, 0x0c620039, 0x0c620037, 0x0b620046, 0x0b620044, 0x0b620042, 0x0b620040, 0x0b62003e, 0x0b62003c, 0x0b62003b, 0x0b62003a, 0x0a620041, 0x0a620040, 0x0a62003e, 0x0a62003c, 0x0a62003b, 0x0a62003a, 0x0a620039, 0x0a620038, 0x0962003e, 0x0962003d, 0x0962003c, 0x0962003b, 0x09620039, 0x09620037, 0x09620035, 0x09620033, 0x08620044, 0x08620042, 0x08620040, 0x0862003e, 0x0862003c, 0x0862003b, 0x0862003a, 0x08620039, 0x07620043, 0x07620042, 0x07620040, 0x0762003f, 0x0762003d, 0x0762003b, 0x0762003a, 0x07620039, 0x0662003e, 0x0662003d, 0x0662003c, 0x0662003b, 0x06620039, 0x06620037, 0x06620035, 0x06620033, 0x05620046, 0x05620044, 0x05620042, 0x05620040, 0x0562003e, 0x0562003c, 0x0562003b, 0x05620039, 0x04620044, 0x04620042, 0x04620040, 0x0462003e, 0x0462003c, 0x0462003b, 0x04620039, 0x04620038, 0x0362003c, 0x0362003b, 0x0362003a, 0x03620039, 0x03620038, 0x03620037, 0x03620035, 0x03620033, 0x0262004c, 0x0262004a, 0x02620048, 0x02620047, 0x02620046, 0x02620044, 0x02620043, 0x02620042, 0x0162004a, 0x01620048, 0x01620046, 0x01620044, 0x01620043, 0x01620042, 0x01620041, 0x01620040, 0x00620042, 0x00620040, 0x0062003e, 0x0062003c, 0x0062003b, 0x00620039, 0x00620037, 0x00620035, }; static const u32 txpwrctrl_tx_gain_ipa[] = { 0x5ff7002d, 0x5ff7002b, 0x5ff7002a, 0x5ff70029, 0x5ff70028, 0x5ff70027, 0x5ff70026, 0x5ff70025, 0x5ef7002d, 0x5ef7002b, 0x5ef7002a, 0x5ef70029, 0x5ef70028, 0x5ef70027, 0x5ef70026, 0x5ef70025, 0x5df7002d, 0x5df7002b, 0x5df7002a, 0x5df70029, 0x5df70028, 0x5df70027, 0x5df70026, 0x5df70025, 0x5cf7002d, 0x5cf7002b, 0x5cf7002a, 0x5cf70029, 0x5cf70028, 0x5cf70027, 0x5cf70026, 0x5cf70025, 0x5bf7002d, 0x5bf7002b, 0x5bf7002a, 0x5bf70029, 0x5bf70028, 0x5bf70027, 0x5bf70026, 0x5bf70025, 0x5af7002d, 0x5af7002b, 0x5af7002a, 0x5af70029, 0x5af70028, 0x5af70027, 0x5af70026, 0x5af70025, 0x59f7002d, 0x59f7002b, 0x59f7002a, 0x59f70029, 0x59f70028, 0x59f70027, 0x59f70026, 0x59f70025, 0x58f7002d, 0x58f7002b, 0x58f7002a, 0x58f70029, 0x58f70028, 0x58f70027, 0x58f70026, 0x58f70025, 0x57f7002d, 0x57f7002b, 0x57f7002a, 0x57f70029, 0x57f70028, 0x57f70027, 0x57f70026, 0x57f70025, 0x56f7002d, 0x56f7002b, 0x56f7002a, 0x56f70029, 0x56f70028, 0x56f70027, 0x56f70026, 0x56f70025, 0x55f7002d, 0x55f7002b, 0x55f7002a, 0x55f70029, 0x55f70028, 0x55f70027, 0x55f70026, 0x55f70025, 0x54f7002d, 0x54f7002b, 0x54f7002a, 0x54f70029, 0x54f70028, 0x54f70027, 0x54f70026, 0x54f70025, 0x53f7002d, 0x53f7002b, 0x53f7002a, 0x53f70029, 0x53f70028, 0x53f70027, 0x53f70026, 0x53f70025, 0x52f7002d, 0x52f7002b, 0x52f7002a, 0x52f70029, 0x52f70028, 0x52f70027, 0x52f70026, 0x52f70025, 0x51f7002d, 0x51f7002b, 0x51f7002a, 0x51f70029, 0x51f70028, 0x51f70027, 0x51f70026, 0x51f70025, 0x50f7002d, 0x50f7002b, 0x50f7002a, 0x50f70029, 0x50f70028, 0x50f70027, 0x50f70026, 0x50f70025, }; static const u32 txpwrctrl_tx_gain_ipa_rev5[] = { 0x1ff7002d, 0x1ff7002b, 0x1ff7002a, 0x1ff70029, 0x1ff70028, 0x1ff70027, 0x1ff70026, 0x1ff70025, 0x1ef7002d, 0x1ef7002b, 0x1ef7002a, 0x1ef70029, 0x1ef70028, 0x1ef70027, 0x1ef70026, 0x1ef70025, 0x1df7002d, 0x1df7002b, 0x1df7002a, 0x1df70029, 0x1df70028, 0x1df70027, 0x1df70026, 0x1df70025, 0x1cf7002d, 0x1cf7002b, 0x1cf7002a, 0x1cf70029, 0x1cf70028, 0x1cf70027, 0x1cf70026, 0x1cf70025, 0x1bf7002d, 0x1bf7002b, 0x1bf7002a, 0x1bf70029, 0x1bf70028, 0x1bf70027, 0x1bf70026, 0x1bf70025, 0x1af7002d, 0x1af7002b, 0x1af7002a, 0x1af70029, 0x1af70028, 0x1af70027, 0x1af70026, 0x1af70025, 0x19f7002d, 0x19f7002b, 0x19f7002a, 0x19f70029, 0x19f70028, 0x19f70027, 0x19f70026, 0x19f70025, 0x18f7002d, 0x18f7002b, 0x18f7002a, 0x18f70029, 0x18f70028, 0x18f70027, 0x18f70026, 0x18f70025, 0x17f7002d, 0x17f7002b, 0x17f7002a, 0x17f70029, 0x17f70028, 0x17f70027, 0x17f70026, 0x17f70025, 0x16f7002d, 0x16f7002b, 0x16f7002a, 0x16f70029, 0x16f70028, 0x16f70027, 0x16f70026, 0x16f70025, 0x15f7002d, 0x15f7002b, 0x15f7002a, 0x15f70029, 0x15f70028, 0x15f70027, 0x15f70026, 0x15f70025, 0x14f7002d, 0x14f7002b, 0x14f7002a, 0x14f70029, 0x14f70028, 0x14f70027, 0x14f70026, 0x14f70025, 0x13f7002d, 0x13f7002b, 0x13f7002a, 0x13f70029, 0x13f70028, 0x13f70027, 0x13f70026, 0x13f70025, 0x12f7002d, 0x12f7002b, 0x12f7002a, 0x12f70029, 0x12f70028, 0x12f70027, 0x12f70026, 0x12f70025, 0x11f7002d, 0x11f7002b, 0x11f7002a, 0x11f70029, 0x11f70028, 0x11f70027, 0x11f70026, 0x11f70025, 0x10f7002d, 0x10f7002b, 0x10f7002a, 0x10f70029, 0x10f70028, 0x10f70027, 0x10f70026, 0x10f70025, }; static const u32 txpwrctrl_tx_gain_ipa_rev6[] = { 0x0ff7002d, 0x0ff7002b, 0x0ff7002a, 0x0ff70029, 0x0ff70028, 0x0ff70027, 0x0ff70026, 0x0ff70025, 0x0ef7002d, 0x0ef7002b, 0x0ef7002a, 0x0ef70029, 0x0ef70028, 0x0ef70027, 0x0ef70026, 0x0ef70025, 0x0df7002d, 0x0df7002b, 0x0df7002a, 0x0df70029, 0x0df70028, 0x0df70027, 0x0df70026, 0x0df70025, 0x0cf7002d, 0x0cf7002b, 0x0cf7002a, 0x0cf70029, 0x0cf70028, 0x0cf70027, 0x0cf70026, 0x0cf70025, 0x0bf7002d, 0x0bf7002b, 0x0bf7002a, 0x0bf70029, 0x0bf70028, 0x0bf70027, 0x0bf70026, 0x0bf70025, 0x0af7002d, 0x0af7002b, 0x0af7002a, 0x0af70029, 0x0af70028, 0x0af70027, 0x0af70026, 0x0af70025, 0x09f7002d, 0x09f7002b, 0x09f7002a, 0x09f70029, 0x09f70028, 0x09f70027, 0x09f70026, 0x09f70025, 0x08f7002d, 0x08f7002b, 0x08f7002a, 0x08f70029, 0x08f70028, 0x08f70027, 0x08f70026, 0x08f70025, 0x07f7002d, 0x07f7002b, 0x07f7002a, 0x07f70029, 0x07f70028, 0x07f70027, 0x07f70026, 0x07f70025, 0x06f7002d, 0x06f7002b, 0x06f7002a, 0x06f70029, 0x06f70028, 0x06f70027, 0x06f70026, 0x06f70025, 0x05f7002d, 0x05f7002b, 0x05f7002a, 0x05f70029, 0x05f70028, 0x05f70027, 0x05f70026, 0x05f70025, 0x04f7002d, 0x04f7002b, 0x04f7002a, 0x04f70029, 0x04f70028, 0x04f70027, 0x04f70026, 0x04f70025, 0x03f7002d, 0x03f7002b, 0x03f7002a, 0x03f70029, 0x03f70028, 0x03f70027, 0x03f70026, 0x03f70025, 0x02f7002d, 0x02f7002b, 0x02f7002a, 0x02f70029, 0x02f70028, 0x02f70027, 0x02f70026, 0x02f70025, 0x01f7002d, 0x01f7002b, 0x01f7002a, 0x01f70029, 0x01f70028, 0x01f70027, 0x01f70026, 0x01f70025, 0x00f7002d, 0x00f7002b, 0x00f7002a, 0x00f70029, 0x00f70028, 0x00f70027, 0x00f70026, 0x00f70025, }; static const u32 txpwrctrl_tx_gain_ipa_5g[] = { 0x7ff70035, 0x7ff70033, 0x7ff70032, 0x7ff70031, 0x7ff7002f, 0x7ff7002e, 0x7ff7002d, 0x7ff7002b, 0x7ff7002a, 0x7ff70029, 0x7ff70028, 0x7ff70027, 0x7ff70026, 0x7ff70024, 0x7ff70023, 0x7ff70022, 0x7ef70028, 0x7ef70027, 0x7ef70026, 0x7ef70025, 0x7ef70024, 0x7ef70023, 0x7df70028, 0x7df70027, 0x7df70026, 0x7df70025, 0x7df70024, 0x7df70023, 0x7df70022, 0x7cf70029, 0x7cf70028, 0x7cf70027, 0x7cf70026, 0x7cf70025, 0x7cf70023, 0x7cf70022, 0x7bf70029, 0x7bf70028, 0x7bf70026, 0x7bf70025, 0x7bf70024, 0x7bf70023, 0x7bf70022, 0x7bf70021, 0x7af70029, 0x7af70028, 0x7af70027, 0x7af70026, 0x7af70025, 0x7af70024, 0x7af70023, 0x7af70022, 0x79f70029, 0x79f70028, 0x79f70027, 0x79f70026, 0x79f70025, 0x79f70024, 0x79f70023, 0x79f70022, 0x78f70029, 0x78f70028, 0x78f70027, 0x78f70026, 0x78f70025, 0x78f70024, 0x78f70023, 0x78f70022, 0x77f70029, 0x77f70028, 0x77f70027, 0x77f70026, 0x77f70025, 0x77f70024, 0x77f70023, 0x77f70022, 0x76f70029, 0x76f70028, 0x76f70027, 0x76f70026, 0x76f70024, 0x76f70023, 0x76f70022, 0x76f70021, 0x75f70029, 0x75f70028, 0x75f70027, 0x75f70026, 0x75f70025, 0x75f70024, 0x75f70023, 0x74f70029, 0x74f70028, 0x74f70026, 0x74f70025, 0x74f70024, 0x74f70023, 0x74f70022, 0x73f70029, 0x73f70027, 0x73f70026, 0x73f70025, 0x73f70024, 0x73f70023, 0x73f70022, 0x72f70028, 0x72f70027, 0x72f70026, 0x72f70025, 0x72f70024, 0x72f70023, 0x72f70022, 0x71f70028, 0x71f70027, 0x71f70026, 0x71f70025, 0x71f70024, 0x71f70023, 0x70f70028, 0x70f70027, 0x70f70026, 0x70f70024, 0x70f70023, 0x70f70022, 0x70f70021, 0x70f70020, 0x70f70020, 0x70f7001f, }; const s8 b43_ntab_papd_pga_gain_delta_ipa_2g[] = { -114, -108, -98, -91, -84, -78, -70, -62, -54, -46, -39, -31, -23, -15, -8, 0 }; const u16 tbl_iqcal_gainparams[2][9][8] = { { { 0x000, 0, 0, 2, 0x69, 0x69, 0x69, 0x69 }, { 0x700, 7, 0, 0, 0x69, 0x69, 0x69, 0x69 }, { 0x710, 7, 1, 0, 0x68, 0x68, 0x68, 0x68 }, { 0x720, 7, 2, 0, 0x67, 0x67, 0x67, 0x67 }, { 0x730, 7, 3, 0, 0x66, 0x66, 0x66, 0x66 }, { 0x740, 7, 4, 0, 0x65, 0x65, 0x65, 0x65 }, { 0x741, 7, 4, 1, 0x65, 0x65, 0x65, 0x65 }, { 0x742, 7, 4, 2, 0x65, 0x65, 0x65, 0x65 }, { 0x743, 7, 4, 3, 0x65, 0x65, 0x65, 0x65 } }, { { 0x000, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 }, { 0x700, 7, 0, 0, 0x79, 0x79, 0x79, 0x79 }, { 0x710, 7, 1, 0, 0x79, 0x79, 0x79, 0x79 }, { 0x720, 7, 2, 0, 0x78, 0x78, 0x78, 0x78 }, { 0x730, 7, 3, 0, 0x78, 0x78, 0x78, 0x78 }, { 0x740, 7, 4, 0, 0x78, 0x78, 0x78, 0x78 }, { 0x741, 7, 4, 1, 0x78, 0x78, 0x78, 0x78 }, { 0x742, 7, 4, 2, 0x78, 0x78, 0x78, 0x78 }, { 0x743, 7, 4, 3, 0x78, 0x78, 0x78, 0x78 } } }; const struct nphy_txiqcal_ladder ladder_lo[] = { { 3, 0 }, { 4, 0 }, { 6, 0 }, { 9, 0 }, { 13, 0 }, { 18, 0 }, { 25, 0 }, { 25, 1 }, { 25, 2 }, { 25, 3 }, { 25, 4 }, { 25, 5 }, { 25, 6 }, { 25, 7 }, { 35, 7 }, { 50, 7 }, { 71, 7 }, { 100, 7 } }; const struct nphy_txiqcal_ladder ladder_iq[] = { { 3, 0 }, { 4, 0 }, { 6, 0 }, { 9, 0 }, { 13, 0 }, { 18, 0 }, { 25, 0 }, { 35, 0 }, { 50, 0 }, { 71, 0 }, { 100, 0 }, { 100, 1 }, { 100, 2 }, { 100, 3 }, { 100, 4 }, { 100, 5 }, { 100, 6 }, { 100, 7 } }; const u16 loscale[] = { 256, 256, 271, 271, 287, 256, 256, 271, 271, 287, 287, 304, 304, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 341, 341, 362, 362, 383, 383, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 341, 341, 362, 362, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 341, 341, 362, 362, 256, 256, 271, 271, 287, 287, 304, 304, 322, 322, 341, 341, 362, 362, 383, 383, 406, 406, 430, 430, 455, 455, 482, 482, 511, 511, 541, 541, 573, 573, 607, 607, 643, 643, 681, 681, 722, 722, 764, 764, 810, 810, 858, 858, 908, 908, 962, 962, 1019, 1019, 256 }; const u16 tbl_tx_iqlo_cal_loft_ladder_40[] = { 0x0200, 0x0300, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1201, 0x1202, 0x1203, 0x1204, 0x1205, 0x1206, 0x1207, 0x1907, 0x2307, 0x3207, 0x4707 }; const u16 tbl_tx_iqlo_cal_loft_ladder_20[] = { 0x0300, 0x0500, 0x0700, 0x0900, 0x0d00, 0x1100, 0x1900, 0x1901, 0x1902, 0x1903, 0x1904, 0x1905, 0x1906, 0x1907, 0x2407, 0x3207, 0x4607, 0x6407 }; const u16 tbl_tx_iqlo_cal_iqimb_ladder_40[] = { 0x0100, 0x0200, 0x0400, 0x0700, 0x0900, 0x0c00, 0x1200, 0x1900, 0x2300, 0x3200, 0x4700, 0x4701, 0x4702, 0x4703, 0x4704, 0x4705, 0x4706, 0x4707 }; const u16 tbl_tx_iqlo_cal_iqimb_ladder_20[] = { 0x0200, 0x0300, 0x0600, 0x0900, 0x0d00, 0x1100, 0x1900, 0x2400, 0x3200, 0x4600, 0x6400, 0x6401, 0x6402, 0x6403, 0x6404, 0x6405, 0x6406, 0x6407 }; const u16 tbl_tx_iqlo_cal_startcoefs_nphyrev3[B43_NTAB_TX_IQLO_CAL_STARTCOEFS_REV3] = { }; const u16 tbl_tx_iqlo_cal_startcoefs[B43_NTAB_TX_IQLO_CAL_STARTCOEFS] = { }; const u16 tbl_tx_iqlo_cal_cmds_recal_nphyrev3[] = { 0x8423, 0x8323, 0x8073, 0x8256, 0x8045, 0x8223, 0x9423, 0x9323, 0x9073, 0x9256, 0x9045, 0x9223 }; const u16 tbl_tx_iqlo_cal_cmds_recal[] = { 0x8101, 0x8253, 0x8053, 0x8234, 0x8034, 0x9101, 0x9253, 0x9053, 0x9234, 0x9034 }; const u16 tbl_tx_iqlo_cal_cmds_fullcal[] = { 0x8123, 0x8264, 0x8086, 0x8245, 0x8056, 0x9123, 0x9264, 0x9086, 0x9245, 0x9056 }; const u16 tbl_tx_iqlo_cal_cmds_fullcal_nphyrev3[] = { 0x8434, 0x8334, 0x8084, 0x8267, 0x8056, 0x8234, 0x9434, 0x9334, 0x9084, 0x9267, 0x9056, 0x9234 }; const s16 tbl_tx_filter_coef_rev4[7][15] = { { -377, 137, -407, 208, -1527, 956, 93, 186, 93, 230, -44, 230, 201, -191, 201 }, { -77, 20, -98, 49, -93, 60, 56, 111, 56, 26, -5, 26, 34, -32, 34 }, { -360, 164, -376, 164, -1533, 576, 308, -314, 308, 121, -73, 121, 91, 124, 91 }, { -295, 200, -363, 142, -1391, 826, 151, 301, 151, 151, 301, 151, 602, -752, 602 }, { -92, 58, -96, 49, -104, 44, 17, 35, 17, 12, 25, 12, 13, 27, 13 }, { -375, 136, -399, 209, -1479, 949, 130, 260, 130, 230, -44, 230, 201, -191, 201 }, { 0xed9, 0xc8, 0xe95, 0x8e, 0xa91, 0x33a, 0x97, 0x12d, 0x97, 0x97, 0x12d, 0x97, 0x25a, 0xd10, 0x25a } }; const struct nphy_rf_control_override_rev2 tbl_rf_control_override_rev2[] = { { 0x78, 0x78, 0x0038, 3 }, { 0x7A, 0x7D, 0x0001, 0 }, { 0x7A, 0x7D, 0x0002, 1 }, { 0x7A, 0x7D, 0x0004, 2 }, { 0x7A, 0x7D, 0x0030, 4 }, { 0x7A, 0x7D, 0x00C0, 6 }, { 0x7A, 0x7D, 0x0100, 8 }, { 0x7A, 0x7D, 0x0200, 9 }, { 0x78, 0x78, 0x0004, 2 }, { 0x7B, 0x7E, 0x01FF, 0 }, { 0x7C, 0x7F, 0x01FF, 0 }, { 0x78, 0x78, 0x0100, 8 }, { 0x78, 0x78, 0x0200, 9 }, { 0x78, 0x78, 0xF000, 12 } }; const struct nphy_rf_control_override_rev3 tbl_rf_control_override_rev3[] = { { 0x8000, 15, 0xE5, 0xF9, 0xE6, 0xFB }, { 0x0001, 0, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0002, 1, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0004, 2, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0010, 4, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0020, 5, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0040, 6, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0080, 7, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0100, 8, 0xE7, 0x7A, 0xEC, 0x7D }, { 0x0007, 0, 0xE7, 0xF8, 0xEC, 0xFA }, { 0x0070, 4, 0xE7, 0xF8, 0xEC, 0xFA }, { 0xE000, 13, 0xE7, 0x7A, 0xEC, 0x7D }, { 0xFFFF, 0, 0xE7, 0x7B, 0xEC, 0x7E }, { 0xFFFF, 0, 0xE7, 0x7C, 0xEC, 0x7F }, { 0x00C0, 6, 0xE7, 0xF9, 0xEC, 0xFB } }; struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_wa_phy6_radio11_ghz2 = { { 10, 14, 19, 27 }, { -5, 6, 10, 15 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x427E, { 0x413F, 0x413F, 0x413F, 0x413F }, 0x007E, 0x0066, 0x1074, 0x18, 0x18, 0x18, 0x01D0, 0x5, }; struct nphy_gain_ctl_workaround_entry nphy_gain_ctl_workaround[2][4] = { { { { 7, 11, 16, 23 }, { -5, 6, 10, 14 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x627E, { 0x613F, 0x613F, 0x613F, 0x613F }, 0x107E, 0x0066, 0x0074, 0x18, 0x18, 0x18, 0x020D, 0x5, }, { { 8, 12, 17, 25 }, { -5, 6, 10, 14 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x527E, { 0x513F, 0x513F, 0x513F, 0x513F }, 0x007E, 0x0066, 0x0074, 0x18, 0x18, 0x18, 0x01A1, 0x5, }, { { 9, 13, 18, 26 }, { -3, 7, 11, 16 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x427E, { 0x413F, 0x413F, 0x413F, 0x413F }, 0x1076, 0x0066, 0x0000, 0x18, 0x18, 0x18, 0x01D0, 0x9, }, { { 8, 13, 18, 25 }, { -5, 6, 10, 14 }, { 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA, 0xA }, { 3, 3, 3, 3, 3, 3, 3, 3, 3, 3 }, 0x527E, { 0x513F, 0x513F, 0x513F, 0x513F }, 0x1076, 0x0066, 0x0000, 0x18, 0x18, 0x18, 0x01D0, 0x5, }, }, { { { 7, 11, 17, 23 }, { -6, 2, 6, 10 }, { 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13 }, { 6, 6, 6, 6, 6, 6, 6, 6, 6, 6 }, 0x52DE, { 0x516F, 0x516F, 0x516F, 0x516F }, 0x00DE, 0x00CA, 0x00CC, 0x1E, 0x1E, 0x1E, 0x01A1, 25, }, { { 8, 12, 18, 23 }, { -5, 2, 6, 10 }, { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, 0x629E, { 0x614F, 0x614F, 0x614F, 0x614F }, 0x029E, 0x1084, 0x0086, 0x24, 0x24, 0x24, 0x0107, 25, }, { { 6, 10, 16, 21 }, { -7, 0, 4, 8 }, { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, 0x729E, { 0x714F, 0x714F, 0x714F, 0x714F }, 0x029E, 0x2084, 0x2086, 0x24, 0x24, 0x24, 0x00A9, 25, }, { { 6, 10, 16, 21 }, { -7, 0, 4, 8 }, { 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD, 0xD }, { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4 }, 0x729E, { 0x714F, 0x714F, 0x714F, 0x714F }, 0x029E, 0x2084, 0x2086, 0x24, 0x24, 0x24, 0x00F0, 25, }, }, }; static inline void assert_ntab_array_sizes(void) { #undef check #define check(table, size) \ BUILD_BUG_ON(ARRAY_SIZE(b43_ntab_##table) != B43_NTAB_##size##_SIZE) check(adjustpower0, C0_ADJPLT); check(adjustpower1, C1_ADJPLT); check(bdi, BDI); check(channelest, CHANEST); check(estimatepowerlt0, C0_ESTPLT); check(estimatepowerlt1, C1_ESTPLT); check(framelookup, FRAMELT); check(framestruct, FRAMESTRUCT); check(gainctl0, C0_GAINCTL); check(gainctl1, C1_GAINCTL); check(intlevel, INTLEVEL); check(iqlt0, C0_IQLT); check(iqlt1, C1_IQLT); check(loftlt0, C0_LOFEEDTH); check(loftlt1, C1_LOFEEDTH); check(mcs, MCS); check(noisevar10, NOISEVAR10); check(noisevar11, NOISEVAR11); check(pilot, PILOT); check(pilotlt, PILOTLT); check(tdi20a0, TDI20A0); check(tdi20a1, TDI20A1); check(tdi40a0, TDI40A0); check(tdi40a1, TDI40A1); check(tdtrn, TDTRN); check(tmap, TMAP); #undef check } u32 b43_ntab_read(struct b43_wldev *dev, u32 offset) { u32 type, value; type = offset & B43_NTAB_TYPEMASK; offset &= ~B43_NTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); switch (type) { case B43_NTAB_8BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; break; case B43_NTAB_16BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); break; case B43_NTAB_32BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); value = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); value |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; break; default: B43_WARN_ON(1); value = 0; } return value; } void b43_ntab_read_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, void *_data) { u32 type; u8 *data = _data; unsigned int i; type = offset & B43_NTAB_TYPEMASK; offset &= ~B43_NTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { if (dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) { b43_phy_read(dev, B43_NPHY_TABLE_DATALO); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); } switch (type) { case B43_NTAB_8BIT: *data = b43_phy_read(dev, B43_NPHY_TABLE_DATALO) & 0xFF; data++; break; case B43_NTAB_16BIT: *((u16 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); data += 2; break; case B43_NTAB_32BIT: *((u32 *)data) = b43_phy_read(dev, B43_NPHY_TABLE_DATALO); *((u32 *)data) |= b43_phy_read(dev, B43_NPHY_TABLE_DATAHI) << 16; data += 4; break; default: B43_WARN_ON(1); } } } void b43_ntab_write(struct b43_wldev *dev, u32 offset, u32 value) { u32 type; type = offset & B43_NTAB_TYPEMASK; offset &= 0xFFFF; switch (type) { case B43_NTAB_8BIT: B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); break; case B43_NTAB_16BIT: B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); break; case B43_NTAB_32BIT: b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } return; assert_ntab_array_sizes(); } void b43_ntab_write_bulk(struct b43_wldev *dev, u32 offset, unsigned int nr_elements, const void *_data) { u32 type, value; const u8 *data = _data; unsigned int i; type = offset & B43_NTAB_TYPEMASK; offset &= ~B43_NTAB_TYPEMASK; B43_WARN_ON(offset > 0xFFFF); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset); for (i = 0; i < nr_elements; i++) { if ((offset >> 10) == 9 && dev->dev->chip_id == 43224 && dev->dev->chip_rev == 1) { b43_phy_read(dev, B43_NPHY_TABLE_DATALO); b43_phy_write(dev, B43_NPHY_TABLE_ADDR, offset + i); } switch (type) { case B43_NTAB_8BIT: value = *data; data++; B43_WARN_ON(value & ~0xFF); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); break; case B43_NTAB_16BIT: value = *((u16 *)data); data += 2; B43_WARN_ON(value & ~0xFFFF); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value); break; case B43_NTAB_32BIT: value = *((u32 *)data); data += 4; b43_phy_write(dev, B43_NPHY_TABLE_DATAHI, value >> 16); b43_phy_write(dev, B43_NPHY_TABLE_DATALO, value & 0xFFFF); break; default: B43_WARN_ON(1); } } } #define ntab_upload(dev, offset, data) do { \ b43_ntab_write_bulk(dev, offset, offset##_SIZE, data); \ } while (0) void b43_nphy_rev0_1_2_tables_init(struct b43_wldev *dev) { ntab_upload(dev, B43_NTAB_FRAMESTRUCT, b43_ntab_framestruct); ntab_upload(dev, B43_NTAB_FRAMELT, b43_ntab_framelookup); ntab_upload(dev, B43_NTAB_TMAP, b43_ntab_tmap); ntab_upload(dev, B43_NTAB_TDTRN, b43_ntab_tdtrn); ntab_upload(dev, B43_NTAB_INTLEVEL, b43_ntab_intlevel); ntab_upload(dev, B43_NTAB_PILOT, b43_ntab_pilot); ntab_upload(dev, B43_NTAB_TDI20A0, b43_ntab_tdi20a0); ntab_upload(dev, B43_NTAB_TDI20A1, b43_ntab_tdi20a1); ntab_upload(dev, B43_NTAB_TDI40A0, b43_ntab_tdi40a0); ntab_upload(dev, B43_NTAB_TDI40A1, b43_ntab_tdi40a1); ntab_upload(dev, B43_NTAB_CHANEST, b43_ntab_channelest); ntab_upload(dev, B43_NTAB_MCS, b43_ntab_mcs); ntab_upload(dev, B43_NTAB_NOISEVAR10, b43_ntab_noisevar10); ntab_upload(dev, B43_NTAB_NOISEVAR11, b43_ntab_noisevar11); ntab_upload(dev, B43_NTAB_BDI, b43_ntab_bdi); ntab_upload(dev, B43_NTAB_PILOTLT, b43_ntab_pilotlt); ntab_upload(dev, B43_NTAB_C0_GAINCTL, b43_ntab_gainctl0); ntab_upload(dev, B43_NTAB_C1_GAINCTL, b43_ntab_gainctl1); ntab_upload(dev, B43_NTAB_C0_ESTPLT, b43_ntab_estimatepowerlt0); ntab_upload(dev, B43_NTAB_C1_ESTPLT, b43_ntab_estimatepowerlt1); ntab_upload(dev, B43_NTAB_C0_ADJPLT, b43_ntab_adjustpower0); ntab_upload(dev, B43_NTAB_C1_ADJPLT, b43_ntab_adjustpower1); ntab_upload(dev, B43_NTAB_C0_IQLT, b43_ntab_iqlt0); ntab_upload(dev, B43_NTAB_C1_IQLT, b43_ntab_iqlt1); ntab_upload(dev, B43_NTAB_C0_LOFEEDTH, b43_ntab_loftlt0); ntab_upload(dev, B43_NTAB_C1_LOFEEDTH, b43_ntab_loftlt1); } #define ntab_upload_r3(dev, offset, data) do { \ b43_ntab_write_bulk(dev, offset, ARRAY_SIZE(data), data); \ } while (0) void b43_nphy_rev3plus_tables_init(struct b43_wldev *dev) { struct ssb_sprom *sprom = dev->dev->bus_sprom; ntab_upload_r3(dev, B43_NTAB_FRAMESTRUCT_R3, b43_ntab_framestruct_r3); ntab_upload_r3(dev, B43_NTAB_PILOT_R3, b43_ntab_pilot_r3); ntab_upload_r3(dev, B43_NTAB_TMAP_R3, b43_ntab_tmap_r3); ntab_upload_r3(dev, B43_NTAB_INTLEVEL_R3, b43_ntab_intlevel_r3); ntab_upload_r3(dev, B43_NTAB_TDTRN_R3, b43_ntab_tdtrn_r3); ntab_upload_r3(dev, B43_NTAB_NOISEVAR0_R3, b43_ntab_noisevar0_r3); ntab_upload_r3(dev, B43_NTAB_NOISEVAR1_R3, b43_ntab_noisevar1_r3); ntab_upload_r3(dev, B43_NTAB_MCS_R3, b43_ntab_mcs_r3); ntab_upload_r3(dev, B43_NTAB_TDI20A0_R3, b43_ntab_tdi20a0_r3); ntab_upload_r3(dev, B43_NTAB_TDI20A1_R3, b43_ntab_tdi20a1_r3); ntab_upload_r3(dev, B43_NTAB_TDI40A0_R3, b43_ntab_tdi40a0_r3); ntab_upload_r3(dev, B43_NTAB_TDI40A1_R3, b43_ntab_tdi40a1_r3); ntab_upload_r3(dev, B43_NTAB_PILOTLT_R3, b43_ntab_pilotlt_r3); ntab_upload_r3(dev, B43_NTAB_CHANEST_R3, b43_ntab_channelest_r3); ntab_upload_r3(dev, B43_NTAB_FRAMELT_R3, b43_ntab_framelookup_r3); ntab_upload_r3(dev, B43_NTAB_C0_ESTPLT_R3, b43_ntab_estimatepowerlt0_r3); ntab_upload_r3(dev, B43_NTAB_C1_ESTPLT_R3, b43_ntab_estimatepowerlt1_r3); ntab_upload_r3(dev, B43_NTAB_C0_ADJPLT_R3, b43_ntab_adjustpower0_r3); ntab_upload_r3(dev, B43_NTAB_C1_ADJPLT_R3, b43_ntab_adjustpower1_r3); ntab_upload_r3(dev, B43_NTAB_C0_GAINCTL_R3, b43_ntab_gainctl0_r3); ntab_upload_r3(dev, B43_NTAB_C1_GAINCTL_R3, b43_ntab_gainctl1_r3); ntab_upload_r3(dev, B43_NTAB_C0_IQLT_R3, b43_ntab_iqlt0_r3); ntab_upload_r3(dev, B43_NTAB_C1_IQLT_R3, b43_ntab_iqlt1_r3); ntab_upload_r3(dev, B43_NTAB_C0_LOFEEDTH_R3, b43_ntab_loftlt0_r3); ntab_upload_r3(dev, B43_NTAB_C1_LOFEEDTH_R3, b43_ntab_loftlt1_r3); if (sprom->fem.ghz2.antswlut < ARRAY_SIZE(b43_ntab_antswctl2g_r3)) ntab_upload_r3(dev, B43_NTAB_ANT_SW_CTL_R3, b43_ntab_antswctl2g_r3[sprom->fem.ghz2.antswlut]); else B43_WARN_ON(1); } static const u32 *b43_nphy_get_ipa_gain_table(struct b43_wldev *dev) { if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) { if (dev->phy.rev >= 6) { if (dev->dev->chip_id == 47162) return txpwrctrl_tx_gain_ipa_rev5; return txpwrctrl_tx_gain_ipa_rev6; } else if (dev->phy.rev >= 5) { return txpwrctrl_tx_gain_ipa_rev5; } else { return txpwrctrl_tx_gain_ipa; } } else { return txpwrctrl_tx_gain_ipa_5g; } } const u32 *b43_nphy_get_tx_gain_table(struct b43_wldev *dev) { enum ieee80211_band band = b43_current_band(dev->wl); struct ssb_sprom *sprom = dev->dev->bus_sprom; if (dev->phy.rev < 3) return b43_ntab_tx_gain_rev0_1_2; if ((dev->phy.n->ipa2g_on && band == IEEE80211_BAND_2GHZ) || (dev->phy.n->ipa5g_on && band == IEEE80211_BAND_5GHZ)) { return b43_nphy_get_ipa_gain_table(dev); } else if (b43_current_band(dev->wl) == IEEE80211_BAND_5GHZ) { if (dev->phy.rev == 3) return b43_ntab_tx_gain_rev3_5ghz; if (dev->phy.rev == 4) return sprom->fem.ghz5.extpa_gain == 3 ? b43_ntab_tx_gain_rev4_5ghz : b43_ntab_tx_gain_rev4_5ghz; else return b43_ntab_tx_gain_rev5plus_5ghz; } else { if (dev->phy.rev >= 5 && sprom->fem.ghz5.extpa_gain == 3) return b43_ntab_tx_gain_rev3plus_2ghz; else return b43_ntab_tx_gain_rev3plus_2ghz; } } struct nphy_gain_ctl_workaround_entry *b43_nphy_get_gain_ctl_workaround_ent( struct b43_wldev *dev, bool ghz5, bool ext_lna) { struct nphy_gain_ctl_workaround_entry *e; u8 phy_idx; u8 tr_iso = ghz5 ? dev->dev->bus_sprom->fem.ghz5.tr_iso : dev->dev->bus_sprom->fem.ghz2.tr_iso; if (!ghz5 && dev->phy.rev >= 6 && dev->phy.radio_rev == 11) return &nphy_gain_ctl_wa_phy6_radio11_ghz2; B43_WARN_ON(dev->phy.rev < 3); if (dev->phy.rev >= 6) phy_idx = 3; else if (dev->phy.rev == 5) phy_idx = 2; else if (dev->phy.rev == 4) phy_idx = 1; else phy_idx = 0; e = &nphy_gain_ctl_workaround[ghz5][phy_idx]; if (ghz5 && dev->phy.rev >= 6) { if (dev->phy.radio_rev == 11 && !b43_channel_type_is_40mhz(dev->phy.channel_type)) e->cliplo_gain = 0x2d; } else if (!ghz5 && dev->phy.rev >= 5) { if (ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; e->rfseq_init[2] &= ~0x4000; e->rfseq_init[3] &= ~0x4000; e->init_gain &= ~0x4000; } switch (tr_iso) { case 0: e->cliplo_gain = 0x0062; case 1: e->cliplo_gain = 0x0064; case 2: e->cliplo_gain = 0x006a; case 3: e->cliplo_gain = 0x106a; case 4: e->cliplo_gain = 0x106c; case 5: e->cliplo_gain = 0x1074; case 6: e->cliplo_gain = 0x107c; case 7: e->cliplo_gain = 0x207c; default: e->cliplo_gain = 0x106a; } } else if (ghz5 && dev->phy.rev == 4 && ext_lna) { e->rfseq_init[0] &= ~0x4000; e->rfseq_init[1] &= ~0x4000; e->rfseq_init[2] &= ~0x4000; e->rfseq_init[3] &= ~0x4000; e->init_gain &= ~0x4000; e->rfseq_init[0] |= 0x1000; e->rfseq_init[1] |= 0x1000; e->rfseq_init[2] |= 0x1000; e->rfseq_init[3] |= 0x1000; e->init_gain |= 0x1000; } return e; }
gpl-2.0
k5t4j5/kernel_htc_trinity
arch/um/os-Linux/irq.c
34
2532
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdlib.h> #include <errno.h> #include <poll.h> #include <signal.h> #include <string.h> #include "irq_user.h" #include "os.h" #include "um_malloc.h" static struct pollfd *pollfds = NULL; static int pollfds_num = 0; static int pollfds_size = 0; int os_waiting_for_events(struct irq_fd *active_fds) { struct irq_fd *irq_fd; int i, n, err; n = poll(pollfds, pollfds_num, 0); if (n < 0) { err = -errno; if (errno != EINTR) printk(UM_KERN_ERR "os_waiting_for_events:" " poll returned %d, errno = %d\n", n, errno); return err; } if (n == 0) return 0; irq_fd = active_fds; for (i = 0; i < pollfds_num; i++) { if (pollfds[i].revents != 0) { irq_fd->current_events = pollfds[i].revents; pollfds[i].fd = -1; } irq_fd = irq_fd->next; } return n; } int os_create_pollfd(int fd, int events, void *tmp_pfd, int size_tmpfds) { if (pollfds_num == pollfds_size) { if (size_tmpfds <= pollfds_size * sizeof(pollfds[0])) { return (pollfds_size + 1) * sizeof(pollfds[0]); } if (pollfds != NULL) { memcpy(tmp_pfd, pollfds, sizeof(pollfds[0]) * pollfds_size); kfree(pollfds); } pollfds = tmp_pfd; pollfds_size++; } else kfree(tmp_pfd); pollfds[pollfds_num] = ((struct pollfd) { .fd = fd, .events = events, .revents = 0 }); pollfds_num++; return 0; } void os_free_irq_by_cb(int (*test)(struct irq_fd *, void *), void *arg, struct irq_fd *active_fds, struct irq_fd ***last_irq_ptr2) { struct irq_fd **prev; int i = 0; prev = &active_fds; while (*prev != NULL) { if ((*test)(*prev, arg)) { struct irq_fd *old_fd = *prev; if ((pollfds[i].fd != -1) && (pollfds[i].fd != (*prev)->fd)) { printk(UM_KERN_ERR "os_free_irq_by_cb - " "mismatch between active_fds and " "pollfds, fd %d vs %d\n", (*prev)->fd, pollfds[i].fd); goto out; } pollfds_num--; memmove(&pollfds[i], &pollfds[i + 1], (pollfds_num - i) * sizeof(pollfds[0])); if (*last_irq_ptr2 == &old_fd->next) *last_irq_ptr2 = prev; *prev = (*prev)->next; if (old_fd->type == IRQ_WRITE) ignore_sigio_fd(old_fd->fd); kfree(old_fd); continue; } prev = &(*prev)->next; i++; } out: return; } int os_get_pollfd(int i) { return pollfds[i].fd; } void os_set_pollfd(int i, int fd) { pollfds[i].fd = fd; } void os_set_ioignore(void) { signal(SIGIO, SIG_IGN); }
gpl-2.0
kernelhackx/kernel_htc_m8
drivers/char/ipmi/ipmi_bt_sm.c
34
15607
/* * ipmi_bt_sm.c * * The state machine for an Open IPMI BT sub-driver under ipmi_si.c, part * of the driver architecture at http://sourceforge.net/projects/openipmi * * Author: Rocky Craig <first.last@hp.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR * TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE * USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/string.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/ipmi_msgdefs.h> #include "ipmi_si_sm.h" #define BT_DEBUG_OFF 0 #define BT_DEBUG_ENABLE 1 #define BT_DEBUG_MSG 2 #define BT_DEBUG_STATES 4 static int bt_debug; module_param(bt_debug, int, 0644); MODULE_PARM_DESC(bt_debug, "debug bitmask, 1=enable, 2=messages, 4=states"); #define BT_NORMAL_TIMEOUT 5 #define BT_NORMAL_RETRY_LIMIT 2 #define BT_RESET_DELAY 6 /* * States are written in chronological order and usually cover * multiple rows of the state table discussion in the IPMI spec. */ enum bt_states { BT_STATE_IDLE = 0, BT_STATE_XACTION_START, BT_STATE_WRITE_BYTES, BT_STATE_WRITE_CONSUME, BT_STATE_READ_WAIT, BT_STATE_CLEAR_B2H, BT_STATE_READ_BYTES, BT_STATE_RESET1, BT_STATE_RESET2, BT_STATE_RESET3, BT_STATE_RESTART, BT_STATE_PRINTME, BT_STATE_CAPABILITIES_BEGIN, BT_STATE_CAPABILITIES_END, BT_STATE_LONG_BUSY }; #define BT_STATE_CHANGE(X, Y) { bt->state = X; return Y; } #define BT_SI_SM_RETURN(Y) { last_printed = BT_STATE_PRINTME; return Y; } struct si_sm_data { enum bt_states state; unsigned char seq; struct si_sm_io *io; unsigned char write_data[IPMI_MAX_MSG_LENGTH]; int write_count; unsigned char read_data[IPMI_MAX_MSG_LENGTH]; int read_count; int truncated; long timeout; int error_retries; int nonzero_status; enum bt_states complete; int BT_CAP_outreqs; long BT_CAP_req2rsp; int BT_CAP_retries; }; #define BT_CLR_WR_PTR 0x01 #define BT_CLR_RD_PTR 0x02 #define BT_H2B_ATN 0x04 #define BT_B2H_ATN 0x08 #define BT_SMS_ATN 0x10 #define BT_OEM0 0x20 #define BT_H_BUSY 0x40 #define BT_B_BUSY 0x80 #define BT_STATUS bt->io->inputb(bt->io, 0) #define BT_CONTROL(x) bt->io->outputb(bt->io, 0, x) #define BMC2HOST bt->io->inputb(bt->io, 1) #define HOST2BMC(x) bt->io->outputb(bt->io, 1, x) #define BT_INTMASK_R bt->io->inputb(bt->io, 2) #define BT_INTMASK_W(x) bt->io->outputb(bt->io, 2, x) static char *state2txt(unsigned char state) { switch (state) { case BT_STATE_IDLE: return("IDLE"); case BT_STATE_XACTION_START: return("XACTION"); case BT_STATE_WRITE_BYTES: return("WR_BYTES"); case BT_STATE_WRITE_CONSUME: return("WR_CONSUME"); case BT_STATE_READ_WAIT: return("RD_WAIT"); case BT_STATE_CLEAR_B2H: return("CLEAR_B2H"); case BT_STATE_READ_BYTES: return("RD_BYTES"); case BT_STATE_RESET1: return("RESET1"); case BT_STATE_RESET2: return("RESET2"); case BT_STATE_RESET3: return("RESET3"); case BT_STATE_RESTART: return("RESTART"); case BT_STATE_LONG_BUSY: return("LONG_BUSY"); case BT_STATE_CAPABILITIES_BEGIN: return("CAP_BEGIN"); case BT_STATE_CAPABILITIES_END: return("CAP_END"); } return("BAD STATE"); } #define STATE2TXT state2txt(bt->state) static char *status2txt(unsigned char status) { static char buf[40]; strcpy(buf, "[ "); if (status & BT_B_BUSY) strcat(buf, "B_BUSY "); if (status & BT_H_BUSY) strcat(buf, "H_BUSY "); if (status & BT_OEM0) strcat(buf, "OEM0 "); if (status & BT_SMS_ATN) strcat(buf, "SMS "); if (status & BT_B2H_ATN) strcat(buf, "B2H "); if (status & BT_H2B_ATN) strcat(buf, "H2B "); strcat(buf, "]"); return buf; } #define STATUS2TXT status2txt(status) static unsigned int bt_init_data(struct si_sm_data *bt, struct si_sm_io *io) { memset(bt, 0, sizeof(struct si_sm_data)); if (bt->io != io) { bt->io = io; bt->seq = 0; } bt->state = BT_STATE_IDLE; bt->complete = BT_STATE_IDLE; bt->BT_CAP_req2rsp = BT_NORMAL_TIMEOUT * 1000000; bt->BT_CAP_retries = BT_NORMAL_RETRY_LIMIT; return 3; } static void force_result(struct si_sm_data *bt, unsigned char completion_code) { bt->read_data[0] = 4; bt->read_data[1] = bt->write_data[1] | 4; bt->read_data[2] = bt->write_data[2]; bt->read_data[3] = bt->write_data[3]; bt->read_data[4] = completion_code; bt->read_count = 5; } static int bt_start_transaction(struct si_sm_data *bt, unsigned char *data, unsigned int size) { unsigned int i; if (size < 2) return IPMI_REQ_LEN_INVALID_ERR; if (size > IPMI_MAX_MSG_LENGTH) return IPMI_REQ_LEN_EXCEEDED_ERR; if (bt->state == BT_STATE_LONG_BUSY) return IPMI_NODE_BUSY_ERR; if (bt->state != BT_STATE_IDLE) return IPMI_NOT_IN_MY_STATE_ERR; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: +++++++++++++++++ New command\n"); printk(KERN_WARNING "BT: NetFn/LUN CMD [%d data]:", size - 2); for (i = 0; i < size; i ++) printk(" %02x", data[i]); printk("\n"); } bt->write_data[0] = size + 1; bt->write_data[1] = *data; bt->write_data[2] = bt->seq++; memcpy(bt->write_data + 3, data + 1, size - 1); bt->write_count = size + 2; bt->error_retries = 0; bt->nonzero_status = 0; bt->truncated = 0; bt->state = BT_STATE_XACTION_START; bt->timeout = bt->BT_CAP_req2rsp; force_result(bt, IPMI_ERR_UNSPECIFIED); return 0; } static int bt_get_result(struct si_sm_data *bt, unsigned char *data, unsigned int length) { int i, msg_len; msg_len = bt->read_count - 2; if (msg_len < 3 || msg_len > IPMI_MAX_MSG_LENGTH) { force_result(bt, IPMI_ERR_UNSPECIFIED); msg_len = 3; } data[0] = bt->read_data[1]; data[1] = bt->read_data[3]; if (length < msg_len || bt->truncated) { data[2] = IPMI_ERR_MSG_TRUNCATED; msg_len = 3; } else memcpy(data + 2, bt->read_data + 4, msg_len - 2); if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: result %d bytes:", msg_len); for (i = 0; i < msg_len; i++) printk(" %02x", data[i]); printk("\n"); } return msg_len; } #define BT_BMC_HWRST 0x80 static void reset_flags(struct si_sm_data *bt) { if (bt_debug) printk(KERN_WARNING "IPMI BT: flag reset %s\n", status2txt(BT_STATUS)); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_WR_PTR); BT_CONTROL(BT_SMS_ATN); BT_INTMASK_W(BT_BMC_HWRST); } static void drain_BMC2HOST(struct si_sm_data *bt) { int i, size; if (!(BT_STATUS & BT_B2H_ATN)) return; BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_B2H_ATN); BT_STATUS; BT_CONTROL(BT_B2H_ATN); BT_CONTROL(BT_CLR_RD_PTR); if (bt_debug) printk(KERN_WARNING "IPMI BT: stale response %s; ", status2txt(BT_STATUS)); size = BMC2HOST; for (i = 0; i < size ; i++) BMC2HOST; BT_CONTROL(BT_H_BUSY); if (bt_debug) printk("drained %d bytes\n", size + 1); } static inline void write_all_bytes(struct si_sm_data *bt) { int i; if (bt_debug & BT_DEBUG_MSG) { printk(KERN_WARNING "BT: write %d bytes seq=0x%02X", bt->write_count, bt->seq); for (i = 0; i < bt->write_count; i++) printk(" %02x", bt->write_data[i]); printk("\n"); } for (i = 0; i < bt->write_count; i++) HOST2BMC(bt->write_data[i]); } static inline int read_all_bytes(struct si_sm_data *bt) { unsigned char i; bt->read_data[0] = BMC2HOST; bt->read_count = bt->read_data[0]; if (bt->read_count < 4 || bt->read_count >= IPMI_MAX_MSG_LENGTH) { if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "BT: bad raw rsp len=%d\n", bt->read_count); bt->truncated = 1; return 1; } for (i = 1; i <= bt->read_count; i++) bt->read_data[i] = BMC2HOST; bt->read_count++; if (bt_debug & BT_DEBUG_MSG) { int max = bt->read_count; printk(KERN_WARNING "BT: got %d bytes seq=0x%02X", max, bt->read_data[2]); if (max > 16) max = 16; for (i = 0; i < max; i++) printk(KERN_CONT " %02x", bt->read_data[i]); printk(KERN_CONT "%s\n", bt->read_count == max ? "" : " ..."); } if ((bt->read_data[3] == bt->write_data[3]) && (bt->read_data[2] == bt->write_data[2]) && ((bt->read_data[1] & 0xF8) == (bt->write_data[1] & 0xF8))) return 1; if (bt_debug & BT_DEBUG_MSG) printk(KERN_WARNING "IPMI BT: bad packet: " "want 0x(%02X, %02X, %02X) got (%02X, %02X, %02X)\n", bt->write_data[1] | 0x04, bt->write_data[2], bt->write_data[3], bt->read_data[1], bt->read_data[2], bt->read_data[3]); return 0; } static enum si_sm_result error_recovery(struct si_sm_data *bt, unsigned char status, unsigned char cCode) { char *reason; bt->timeout = bt->BT_CAP_req2rsp; switch (cCode) { case IPMI_TIMEOUT_ERR: reason = "timeout"; break; default: reason = "internal error"; break; } printk(KERN_WARNING "IPMI BT: %s in %s %s ", reason, STATE2TXT, STATUS2TXT); (bt->error_retries)++; if (bt->error_retries < bt->BT_CAP_retries) { printk("%d retries left\n", bt->BT_CAP_retries - bt->error_retries); bt->state = BT_STATE_RESTART; return SI_SM_CALL_WITHOUT_DELAY; } printk(KERN_WARNING "failed %d retries, sending error response\n", bt->BT_CAP_retries); if (!bt->nonzero_status) printk(KERN_ERR "IPMI BT: stuck, try power cycle\n"); else if (bt->seq <= (unsigned char)(bt->BT_CAP_retries & 0xFF)) { printk(KERN_WARNING "IPMI: BT reset (takes 5 secs)\n"); bt->state = BT_STATE_RESET1; return SI_SM_CALL_WITHOUT_DELAY; } bt->state = BT_STATE_IDLE; switch (cCode) { case IPMI_TIMEOUT_ERR: if (status & BT_B_BUSY) { cCode = IPMI_NODE_BUSY_ERR; bt->state = BT_STATE_LONG_BUSY; } break; default: break; } force_result(bt, cCode); return SI_SM_TRANSACTION_COMPLETE; } static enum si_sm_result bt_event(struct si_sm_data *bt, long time) { unsigned char status, BT_CAP[8]; static enum bt_states last_printed = BT_STATE_PRINTME; int i; status = BT_STATUS; bt->nonzero_status |= status; if ((bt_debug & BT_DEBUG_STATES) && (bt->state != last_printed)) { printk(KERN_WARNING "BT: %s %s TO=%ld - %ld \n", STATE2TXT, STATUS2TXT, bt->timeout, time); last_printed = bt->state; } if ((bt->state < BT_STATE_WRITE_BYTES) && (status & BT_B2H_ATN)) { drain_BMC2HOST(bt); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } if ((bt->state != BT_STATE_IDLE) && (bt->state < BT_STATE_PRINTME)) { bt->timeout -= time; if ((bt->timeout < 0) && (bt->state < BT_STATE_RESET1)) return error_recovery(bt, status, IPMI_TIMEOUT_ERR); } switch (bt->state) { case BT_STATE_IDLE: if (status & BT_SMS_ATN) { BT_CONTROL(BT_SMS_ATN); return SI_SM_ATTN; } if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); if (!bt->BT_CAP_outreqs) BT_STATE_CHANGE(BT_STATE_CAPABILITIES_BEGIN, SI_SM_CALL_WITHOUT_DELAY); bt->timeout = bt->BT_CAP_req2rsp; BT_SI_SM_RETURN(SI_SM_IDLE); case BT_STATE_XACTION_START: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); if (BT_STATUS & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); BT_STATE_CHANGE(BT_STATE_WRITE_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_BYTES: if (status & BT_H_BUSY) BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_WR_PTR); write_all_bytes(bt); BT_CONTROL(BT_H2B_ATN); BT_STATE_CHANGE(BT_STATE_WRITE_CONSUME, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_WRITE_CONSUME: if (status & (BT_B_BUSY | BT_H2B_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_WAIT: if (!(status & BT_B2H_ATN)) BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_B2H_ATN); BT_STATE_CHANGE(BT_STATE_CLEAR_B2H, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_CLEAR_B2H: if (status & BT_B2H_ATN) { BT_CONTROL(BT_B2H_ATN); BT_SI_SM_RETURN(SI_SM_CALL_WITH_DELAY); } BT_STATE_CHANGE(BT_STATE_READ_BYTES, SI_SM_CALL_WITHOUT_DELAY); case BT_STATE_READ_BYTES: if (!(status & BT_H_BUSY)) BT_CONTROL(BT_H_BUSY); BT_CONTROL(BT_CLR_RD_PTR); i = read_all_bytes(bt); BT_CONTROL(BT_H_BUSY); if (!i) BT_STATE_CHANGE(BT_STATE_READ_WAIT, SI_SM_CALL_WITHOUT_DELAY); bt->state = bt->complete; return bt->state == BT_STATE_IDLE ? SI_SM_TRANSACTION_COMPLETE : SI_SM_CALL_WITHOUT_DELAY; case BT_STATE_LONG_BUSY: if (!(status & BT_B_BUSY)) { reset_flags(bt); bt_init_data(bt, bt->io); } return SI_SM_CALL_WITH_DELAY; case BT_STATE_RESET1: reset_flags(bt); drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESET2, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET2: BT_CONTROL(BT_CLR_WR_PTR); HOST2BMC(3); HOST2BMC(0x18); HOST2BMC(42); HOST2BMC(3); BT_CONTROL(BT_H2B_ATN); bt->timeout = BT_RESET_DELAY * 1000000; BT_STATE_CHANGE(BT_STATE_RESET3, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESET3: if (bt->timeout > 0) return SI_SM_CALL_WITH_DELAY; drain_BMC2HOST(bt); BT_STATE_CHANGE(BT_STATE_RESTART, SI_SM_CALL_WITH_DELAY); case BT_STATE_RESTART: bt->read_count = 0; bt->nonzero_status = 0; bt->timeout = bt->BT_CAP_req2rsp; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); case BT_STATE_CAPABILITIES_BEGIN: bt->BT_CAP_outreqs = 1; { unsigned char GetBT_CAP[] = { 0x18, 0x36 }; bt->state = BT_STATE_IDLE; bt_start_transaction(bt, GetBT_CAP, sizeof(GetBT_CAP)); } bt->complete = BT_STATE_CAPABILITIES_END; BT_STATE_CHANGE(BT_STATE_XACTION_START, SI_SM_CALL_WITH_DELAY); case BT_STATE_CAPABILITIES_END: i = bt_get_result(bt, BT_CAP, sizeof(BT_CAP)); bt_init_data(bt, bt->io); if ((i == 8) && !BT_CAP[2]) { bt->BT_CAP_outreqs = BT_CAP[3]; bt->BT_CAP_req2rsp = BT_CAP[6] * 1000000; bt->BT_CAP_retries = BT_CAP[7]; } else printk(KERN_WARNING "IPMI BT: using default values\n"); if (!bt->BT_CAP_outreqs) bt->BT_CAP_outreqs = 1; printk(KERN_WARNING "IPMI BT: req2rsp=%ld secs retries=%d\n", bt->BT_CAP_req2rsp / 1000000L, bt->BT_CAP_retries); bt->timeout = bt->BT_CAP_req2rsp; return SI_SM_CALL_WITHOUT_DELAY; default: return error_recovery(bt, status, IPMI_ERR_UNSPECIFIED); } return SI_SM_CALL_WITH_DELAY; } static int bt_detect(struct si_sm_data *bt) { if ((BT_STATUS == 0xFF) && (BT_INTMASK_R == 0xFF)) return 1; reset_flags(bt); return 0; } static void bt_cleanup(struct si_sm_data *bt) { } static int bt_size(void) { return sizeof(struct si_sm_data); } struct si_sm_handlers bt_smi_handlers = { .init_data = bt_init_data, .start_transaction = bt_start_transaction, .get_result = bt_get_result, .event = bt_event, .detect = bt_detect, .cleanup = bt_cleanup, .size = bt_size, };
gpl-2.0
htc-msm8974/android_kernel_htc_msm8974
sound/pci/mixart/mixart_hwdep.c
34
15722
/* * Driver for Digigram miXart soundcards * * DSP firmware management * * Copyright (c) 2003 by Digigram <alsa@digigram.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/firmware.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/module.h> #include <asm/io.h> #include <sound/core.h> #include "mixart.h" #include "mixart_mixer.h" #include "mixart_core.h" #include "mixart_hwdep.h" static int mixart_wait_nice_for_register_value(struct mixart_mgr *mgr, u32 offset, int is_egal, u32 value, unsigned long timeout) { unsigned long end_time = jiffies + (timeout * HZ / 100); u32 read; do { cond_resched(); read = readl_be( MIXART_MEM( mgr, offset )); if(is_egal) { if(read == value) return 0; } else { if(read != value) return 0; } } while ( time_after_eq(end_time, jiffies) ); return -EBUSY; } struct snd_mixart_elf32_ehdr { u8 e_ident[16]; u16 e_type; u16 e_machine; u32 e_version; u32 e_entry; u32 e_phoff; u32 e_shoff; u32 e_flags; u16 e_ehsize; u16 e_phentsize; u16 e_phnum; u16 e_shentsize; u16 e_shnum; u16 e_shstrndx; }; struct snd_mixart_elf32_phdr { u32 p_type; u32 p_offset; u32 p_vaddr; u32 p_paddr; u32 p_filesz; u32 p_memsz; u32 p_flags; u32 p_align; }; static int mixart_load_elf(struct mixart_mgr *mgr, const struct firmware *dsp ) { char elf32_magic_number[4] = {0x7f,'E','L','F'}; struct snd_mixart_elf32_ehdr *elf_header; int i; elf_header = (struct snd_mixart_elf32_ehdr *)dsp->data; for( i=0; i<4; i++ ) if ( elf32_magic_number[i] != elf_header->e_ident[i] ) return -EINVAL; if( elf_header->e_phoff != 0 ) { struct snd_mixart_elf32_phdr elf_programheader; for( i=0; i < be16_to_cpu(elf_header->e_phnum); i++ ) { u32 pos = be32_to_cpu(elf_header->e_phoff) + (u32)(i * be16_to_cpu(elf_header->e_phentsize)); memcpy( &elf_programheader, dsp->data + pos, sizeof(elf_programheader) ); if(elf_programheader.p_type != 0) { if( elf_programheader.p_filesz != 0 ) { memcpy_toio( MIXART_MEM( mgr, be32_to_cpu(elf_programheader.p_vaddr)), dsp->data + be32_to_cpu( elf_programheader.p_offset ), be32_to_cpu( elf_programheader.p_filesz )); } } } } return 0; } #define MIXART_FIRST_ANA_AUDIO_ID 0 #define MIXART_FIRST_DIG_AUDIO_ID 8 static int mixart_enum_connectors(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_enum_connector_resp *connector; struct mixart_audio_info_req *audio_info_req; struct mixart_audio_info_resp *audio_info; connector = kmalloc(sizeof(*connector), GFP_KERNEL); audio_info_req = kmalloc(sizeof(*audio_info_req), GFP_KERNEL); audio_info = kmalloc(sizeof(*audio_info), GFP_KERNEL); if (! connector || ! audio_info_req || ! audio_info) { err = -ENOMEM; goto __error; } audio_info_req->line_max_level = MIXART_FLOAT_P_22_0_TO_HEX; audio_info_req->micro_max_level = MIXART_FLOAT_M_20_0_TO_HEX; audio_info_req->cd_max_level = MIXART_FLOAT____0_0_TO_HEX; request.message_id = MSG_SYSTEM_ENUM_PLAY_CONNECTOR; request.uid = (struct mixart_uid){0,0}; request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PLAY_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_out_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_out_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; } else { pipe->uid_left_connector = connector->uid[k]; } request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } } request.message_id = MSG_SYSTEM_ENUM_RECORD_CONNECTOR; request.uid = (struct mixart_uid){0,0}; request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(*connector), connector); if((err < 0) || (connector->error_code) || (connector->uid_count > MIXART_MAX_PHYS_CONNECTORS)) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_RECORD_CONNECTOR\n"); err = -EINVAL; goto __error; } for(k=0; k < connector->uid_count; k++) { struct mixart_pipe *pipe; if(k < MIXART_FIRST_DIG_AUDIO_ID) { pipe = &mgr->chip[k/2]->pipe_in_ana; } else { pipe = &mgr->chip[(k-MIXART_FIRST_DIG_AUDIO_ID)/2]->pipe_in_dig; } if(k & 1) { pipe->uid_right_connector = connector->uid[k]; } else { pipe->uid_left_connector = connector->uid[k]; } request.message_id = MSG_CONNECTOR_GET_AUDIO_INFO; request.uid = connector->uid[k]; request.data = audio_info_req; request.size = sizeof(*audio_info_req); err = snd_mixart_send_msg(mgr, &request, sizeof(*audio_info), audio_info); if( err < 0 ) { snd_printk(KERN_ERR "error MSG_CONNECTOR_GET_AUDIO_INFO\n"); goto __error; } } err = 0; __error: kfree(connector); kfree(audio_info_req); kfree(audio_info); return err; } static int mixart_enum_physio(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; struct mixart_uid get_console_mgr; struct mixart_return_uid console_mgr; struct mixart_uid_enumeration phys_io; get_console_mgr.object_id = 0; get_console_mgr.desc = MSG_CONSOLE_MANAGER | 0; request.message_id = MSG_CONSOLE_GET_CLOCK_UID; request.uid = get_console_mgr; request.data = &get_console_mgr; request.size = sizeof(get_console_mgr); err = snd_mixart_send_msg(mgr, &request, sizeof(console_mgr), &console_mgr); if( (err < 0) || (console_mgr.error_code != 0) ) { snd_printk(KERN_DEBUG "error MSG_CONSOLE_GET_CLOCK_UID : err=%x\n", console_mgr.error_code); return -EINVAL; } mgr->uid_console_manager = console_mgr.uid; request.message_id = MSG_SYSTEM_ENUM_PHYSICAL_IO; request.uid = (struct mixart_uid){0,0}; request.data = &console_mgr.uid; request.size = sizeof(console_mgr.uid); err = snd_mixart_send_msg(mgr, &request, sizeof(phys_io), &phys_io); if( (err < 0) || ( phys_io.error_code != 0 ) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_ENUM_PHYSICAL_IO err(%x) error_code(%x)\n", err, phys_io.error_code ); return -EINVAL; } if (phys_io.nb_uid < MIXART_MAX_CARDS * 2) return -EINVAL; for(k=0; k<mgr->num_cards; k++) { mgr->chip[k]->uid_in_analog_physio = phys_io.uid[k]; mgr->chip[k]->uid_out_analog_physio = phys_io.uid[phys_io.nb_uid/2 + k]; } return 0; } static int mixart_first_init(struct mixart_mgr *mgr) { u32 k; int err; struct mixart_msg request; if((err = mixart_enum_connectors(mgr)) < 0) return err; if((err = mixart_enum_physio(mgr)) < 0) return err; request.message_id = MSG_SYSTEM_SEND_SYNCHRO_CMD; request.uid = (struct mixart_uid){0,0}; request.data = NULL; request.size = 0; err = snd_mixart_send_msg(mgr, &request, sizeof(k), &k); if( (err < 0) || (k != 0) ) { snd_printk(KERN_ERR "error MSG_SYSTEM_SEND_SYNCHRO_CMD\n"); return err == 0 ? -EINVAL : err; } return 0; } #define MIXART_MOTHERBOARD_XLX_BASE_ADDRESS 0x00600000 static int mixart_dsp_load(struct mixart_mgr* mgr, int index, const struct firmware *dsp) { int err, card_index; u32 status_xilinx, status_elf, status_daught; u32 val; status_xilinx = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); status_elf = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); status_daught = readl_be( MIXART_MEM( mgr,MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); if (status_xilinx == 5) { snd_printk(KERN_ERR "miXart is resetting !\n"); return -EAGAIN; } switch (index) { case MIXART_MOTHERBOARD_XLX_INDEX: if (status_xilinx == 4) { snd_printk(KERN_DEBUG "xilinx is already loaded !\n"); return 0; } if (status_xilinx != 0) { snd_printk(KERN_ERR "xilinx load error ! status = %d\n", status_xilinx); return -EIO; } if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); writel_be( MIXART_MOTHERBOARD_XLX_BASE_ADDRESS, MIXART_MEM( mgr,MIXART_PSEUDOREG_MXLX_BASE_ADDR_OFFSET )); writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_SIZE_OFFSET )); memcpy_toio( MIXART_MEM( mgr, MIXART_MOTHERBOARD_XLX_BASE_ADDRESS), dsp->data, dsp->size); writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET )); return 0; case MIXART_MOTHERBOARD_ELF_INDEX: if (status_elf == 4) { snd_printk(KERN_DEBUG "elf file already loaded !\n"); return 0; } if (status_elf != 0) { snd_printk(KERN_ERR "elf load error ! status = %d\n", status_elf); return -EIO; } err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_MXLX_STATUS_OFFSET, 1, 4, 500); if (err < 0) { snd_printk(KERN_ERR "xilinx was not loaded or " "could not be started\n"); return err; } writel_be( 0, MIXART_MEM( mgr, MIXART_PSEUDOREG_BOARDNUMBER ) ); writel_be( 0, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); err = mixart_load_elf( mgr, dsp ); if (err < 0) return err; writel_be( 2, MIXART_MEM( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET )); err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_ELF_STATUS_OFFSET, 1, 4, 300); if (err < 0) { snd_printk(KERN_ERR "elf could not be started\n"); return err; } writel_be( (u32)mgr->flowinfo.addr, MIXART_MEM( mgr, MIXART_FLOWTABLE_PTR ) ); return 0; case MIXART_AESEBUBOARD_XLX_INDEX: default: if (status_elf != 4 || status_xilinx != 4) { printk(KERN_ERR "xilinx or elf not " "successfully loaded\n"); return -EIO; } err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DBRD_PRESENCE_OFFSET, 0, 0, 30); if (err < 0) { snd_printk(KERN_ERR "error starting elf file\n"); return err; } mgr->board_type = (DAUGHTER_TYPE_MASK & readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DBRD_TYPE_OFFSET))); if (mgr->board_type == MIXART_DAUGHTER_TYPE_NONE) break; if (mgr->board_type != MIXART_DAUGHTER_TYPE_AES ) return -EINVAL; if (status_daught != 0) { printk(KERN_ERR "daughter load error ! status = %d\n", status_daught); return -EIO; } if (((u32*)(dsp->data))[0] == 0xffffffff) return -EINVAL; if (dsp->size % 4) return -EINVAL; writel_be( dsp->size, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_SIZE_OFFSET )); writel_be( 1, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 2, 30); if (err < 0) { snd_printk(KERN_ERR "daughter board load error\n"); return err; } val = readl_be( MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_BASE_ADDR_OFFSET )); if (!val) return -EINVAL; memcpy_toio( MIXART_MEM( mgr, val), dsp->data, dsp->size); writel_be( 4, MIXART_MEM( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET )); break; } err = mixart_wait_nice_for_register_value( mgr, MIXART_PSEUDOREG_DXLX_STATUS_OFFSET, 1, 3, 300); if (err < 0) { snd_printk(KERN_ERR "daughter board could not be initialised\n"); return err; } snd_mixart_init_mailbox(mgr); err = mixart_first_init(mgr); if (err < 0) { snd_printk(KERN_ERR "miXart could not be set up\n"); return err; } for (card_index = 0; card_index < mgr->num_cards; card_index++) { struct snd_mixart *chip = mgr->chip[card_index]; if ((err = snd_mixart_create_pcm(chip)) < 0) return err; if (card_index == 0) { if ((err = snd_mixart_create_mixer(chip->mgr)) < 0) return err; } if ((err = snd_card_register(chip->card)) < 0) return err; }; snd_printdd("miXart firmware downloaded and successfully set up\n"); return 0; } #if defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE) #if !defined(CONFIG_USE_MIXARTLOADER) && !defined(CONFIG_SND_MIXART) #define SND_MIXART_FW_LOADER #endif #endif #ifdef SND_MIXART_FW_LOADER int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { static char *fw_files[3] = { "miXart8.xlx", "miXart8.elf", "miXart8AES.xlx" }; char path[32]; const struct firmware *fw_entry; int i, err; for (i = 0; i < 3; i++) { sprintf(path, "mixart/%s", fw_files[i]); if (request_firmware(&fw_entry, path, &mgr->pci->dev)) { snd_printk(KERN_ERR "miXart: can't load firmware %s\n", path); return -ENOENT; } err = mixart_dsp_load(mgr, i, fw_entry); release_firmware(fw_entry); if (err < 0) return err; mgr->dsp_loaded |= 1 << i; } return 0; } MODULE_FIRMWARE("mixart/miXart8.xlx"); MODULE_FIRMWARE("mixart/miXart8.elf"); MODULE_FIRMWARE("mixart/miXart8AES.xlx"); #else #define SND_MIXART_HWDEP_ID "miXart Loader" static int mixart_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { struct mixart_mgr *mgr = hw->private_data; strcpy(info->id, "miXart"); info->num_dsps = MIXART_HARDW_FILES_MAX_INDEX; if (mgr->dsp_loaded & (1 << MIXART_MOTHERBOARD_ELF_INDEX)) info->chip_ready = 1; info->version = MIXART_DRIVER_VERSION; return 0; } static int mixart_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct mixart_mgr* mgr = hw->private_data; struct firmware fw; int err; fw.size = dsp->length; fw.data = vmalloc(dsp->length); if (! fw.data) { snd_printk(KERN_ERR "miXart: cannot allocate image size %d\n", (int)dsp->length); return -ENOMEM; } if (copy_from_user((void *) fw.data, dsp->image, dsp->length)) { vfree(fw.data); return -EFAULT; } err = mixart_dsp_load(mgr, dsp->index, &fw); vfree(fw.data); if (err < 0) return err; mgr->dsp_loaded |= 1 << dsp->index; return err; } int snd_mixart_setup_firmware(struct mixart_mgr *mgr) { int err; struct snd_hwdep *hw; if ((err = snd_hwdep_new(mgr->chip[0]->card, SND_MIXART_HWDEP_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_MIXART; hw->private_data = mgr; hw->ops.dsp_status = mixart_hwdep_dsp_status; hw->ops.dsp_load = mixart_hwdep_dsp_load; hw->exclusive = 1; sprintf(hw->name, SND_MIXART_HWDEP_ID); mgr->dsp_loaded = 0; return snd_card_register(mgr->chip[0]->card); } #endif
gpl-2.0
Owain94/android_kernel_htc_msm8974
arch/arm/mach-at91/board-rsi-ews.c
34
4043
/* * board-rsi-ews.c * * Copyright (C) * 2005 SAN People, * 2008-2011 R-S-I Elektrotechnik GmbH & Co. KG * * Licensed under GPLv2 or later. */ #include <linux/types.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spi/spi.h> #include <linux/mtd/physmap.h> #include <asm/setup.h> #include <asm/mach-types.h> #include <asm/irq.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/board.h> #include <linux/gpio.h> #include "generic.h" static void __init rsi_ews_init_early(void) { at91_initialize(18432000); at91_init_leds(AT91_PIN_PB6, AT91_PIN_PB9); at91_register_uart(0, 0, 0); at91_register_uart(AT91RM9200_ID_US1, 2, ATMEL_UART_CTS | ATMEL_UART_RTS | ATMEL_UART_DTR | ATMEL_UART_DSR | ATMEL_UART_DCD | ATMEL_UART_RI); at91_register_uart(AT91RM9200_ID_US3, 4, ATMEL_UART_RTS); at91_set_serial_console(0); } static struct macb_platform_data rsi_ews_eth_data __initdata = { .phy_irq_pin = AT91_PIN_PC4, .is_rmii = 1, }; static struct at91_usbh_data rsi_ews_usbh_data __initdata = { .ports = 1, .vbus_pin = {-EINVAL, -EINVAL}, .overcurrent_pin= {-EINVAL, -EINVAL}, }; static struct at91_mmc_data rsi_ews_mmc_data __initdata = { .slot_b = 0, .wire4 = 1, .det_pin = AT91_PIN_PB27, .wp_pin = AT91_PIN_PB29, }; static struct i2c_board_info rsi_ews_i2c_devices[] __initdata = { { I2C_BOARD_INFO("ds1337", 0x68), }, { I2C_BOARD_INFO("24c01", 0x50), } }; static struct gpio_led rsi_ews_leds[] = { { .name = "led0", .gpio = AT91_PIN_PB6, .active_low = 0, }, { .name = "led1", .gpio = AT91_PIN_PB7, .active_low = 0, }, { .name = "led2", .gpio = AT91_PIN_PB8, .active_low = 0, }, { .name = "led3", .gpio = AT91_PIN_PB9, .active_low = 0, }, }; static struct spi_board_info rsi_ews_spi_devices[] = { { .modalias = "mtd_dataflash", .chip_select = 0, .max_speed_hz = 5 * 1000 * 1000, }, { .modalias = "mtd_dataflash", .chip_select = 1, .max_speed_hz = 5 * 1000 * 1000, }, }; static struct mtd_partition rsiews_nor_partitions[] = { { .name = "boot", .offset = 0, .size = 3 * SZ_128K, .mask_flags = MTD_WRITEABLE }, { .name = "kernel", .offset = MTDPART_OFS_NXTBLK, .size = SZ_2M - (3 * SZ_128K) }, { .name = "root", .offset = MTDPART_OFS_NXTBLK, .size = SZ_8M }, { .name = "kernelupd", .offset = MTDPART_OFS_NXTBLK, .size = 3 * SZ_512K, .mask_flags = MTD_WRITEABLE }, { .name = "rootupd", .offset = MTDPART_OFS_NXTBLK, .size = 9 * SZ_512K, .mask_flags = MTD_WRITEABLE }, }; static struct physmap_flash_data rsiews_nor_data = { .width = 2, .parts = rsiews_nor_partitions, .nr_parts = ARRAY_SIZE(rsiews_nor_partitions), }; #define NOR_BASE AT91_CHIPSELECT_0 #define NOR_SIZE SZ_16M static struct resource nor_flash_resources[] = { { .start = NOR_BASE, .end = NOR_BASE + NOR_SIZE - 1, .flags = IORESOURCE_MEM, } }; static struct platform_device rsiews_nor_flash = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &rsiews_nor_data, }, .resource = nor_flash_resources, .num_resources = ARRAY_SIZE(nor_flash_resources), }; static void __init rsi_ews_board_init(void) { at91_add_device_serial(); at91_set_gpio_output(AT91_PIN_PA21, 0); at91_add_device_eth(&rsi_ews_eth_data); at91_add_device_usbh(&rsi_ews_usbh_data); at91_add_device_i2c(rsi_ews_i2c_devices, ARRAY_SIZE(rsi_ews_i2c_devices)); at91_add_device_spi(rsi_ews_spi_devices, ARRAY_SIZE(rsi_ews_spi_devices)); at91_add_device_mmc(0, &rsi_ews_mmc_data); platform_device_register(&rsiews_nor_flash); at91_gpio_leds(rsi_ews_leds, ARRAY_SIZE(rsi_ews_leds)); } MACHINE_START(RSI_EWS, "RSI EWS") .timer = &at91rm9200_timer, .map_io = at91_map_io, .init_early = rsi_ews_init_early, .init_irq = at91_init_irq_default, .init_machine = rsi_ews_board_init, MACHINE_END
gpl-2.0
Owain94/android_kernel_htc_msm8974
drivers/scsi/device_handler/scsi_dh_alua.c
34
16548
/* * Generic SCSI-3 ALUA SCSI Device Handler * * Copyright (C) 2007-2010 Hannes Reinecke, SUSE Linux Products GmbH. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * */ #include <linux/slab.h> #include <linux/delay.h> #include <linux/module.h> #include <scsi/scsi.h> #include <scsi/scsi_eh.h> #include <scsi/scsi_dh.h> #define ALUA_DH_NAME "alua" #define ALUA_DH_VER "1.3" #define TPGS_STATE_OPTIMIZED 0x0 #define TPGS_STATE_NONOPTIMIZED 0x1 #define TPGS_STATE_STANDBY 0x2 #define TPGS_STATE_UNAVAILABLE 0x3 #define TPGS_STATE_LBA_DEPENDENT 0x4 #define TPGS_STATE_OFFLINE 0xe #define TPGS_STATE_TRANSITIONING 0xf #define TPGS_SUPPORT_NONE 0x00 #define TPGS_SUPPORT_OPTIMIZED 0x01 #define TPGS_SUPPORT_NONOPTIMIZED 0x02 #define TPGS_SUPPORT_STANDBY 0x04 #define TPGS_SUPPORT_UNAVAILABLE 0x08 #define TPGS_SUPPORT_LBA_DEPENDENT 0x10 #define TPGS_SUPPORT_OFFLINE 0x40 #define TPGS_SUPPORT_TRANSITION 0x80 #define TPGS_MODE_UNINITIALIZED -1 #define TPGS_MODE_NONE 0x0 #define TPGS_MODE_IMPLICIT 0x1 #define TPGS_MODE_EXPLICIT 0x2 #define ALUA_INQUIRY_SIZE 36 #define ALUA_FAILOVER_TIMEOUT (60 * HZ) #define ALUA_FAILOVER_RETRIES 5 struct alua_dh_data { int group_id; int rel_port; int tpgs; int state; unsigned char inq[ALUA_INQUIRY_SIZE]; unsigned char *buff; int bufflen; unsigned char sense[SCSI_SENSE_BUFFERSIZE]; int senselen; struct scsi_device *sdev; activate_complete callback_fn; void *callback_data; }; #define ALUA_POLICY_SWITCH_CURRENT 0 #define ALUA_POLICY_SWITCH_ALL 1 static char print_alua_state(int); static int alua_check_sense(struct scsi_device *, struct scsi_sense_hdr *); static inline struct alua_dh_data *get_alua_data(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data = sdev->scsi_dh_data; BUG_ON(scsi_dh_data == NULL); return ((struct alua_dh_data *) scsi_dh_data->buf); } static int realloc_buffer(struct alua_dh_data *h, unsigned len) { if (h->buff && h->buff != h->inq) kfree(h->buff); h->buff = kmalloc(len, GFP_NOIO); if (!h->buff) { h->buff = h->inq; h->bufflen = ALUA_INQUIRY_SIZE; return 1; } h->bufflen = len; return 0; } static struct request *get_alua_req(struct scsi_device *sdev, void *buffer, unsigned buflen, int rw) { struct request *rq; struct request_queue *q = sdev->request_queue; rq = blk_get_request(q, rw, GFP_NOIO); if (!rq) { sdev_printk(KERN_INFO, sdev, "%s: blk_get_request failed\n", __func__); return NULL; } if (buflen && blk_rq_map_kern(q, rq, buffer, buflen, GFP_NOIO)) { blk_put_request(rq); sdev_printk(KERN_INFO, sdev, "%s: blk_rq_map_kern failed\n", __func__); return NULL; } rq->cmd_type = REQ_TYPE_BLOCK_PC; rq->cmd_flags |= REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER; rq->retries = ALUA_FAILOVER_RETRIES; rq->timeout = ALUA_FAILOVER_TIMEOUT; return rq; } static int submit_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; rq = get_alua_req(sdev, h->buff, h->bufflen, READ); if (!rq) goto done; rq->cmd[0] = INQUIRY; rq->cmd[1] = 1; rq->cmd[2] = 0x83; rq->cmd[4] = h->bufflen; rq->cmd_len = COMMAND_SIZE(INQUIRY); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; err = blk_execute_rq(rq->q, NULL, rq, 1); if (err == -EIO) { sdev_printk(KERN_INFO, sdev, "%s: evpd inquiry failed with %x\n", ALUA_DH_NAME, rq->errors); h->senselen = rq->sense_len; err = SCSI_DH_IO; } blk_put_request(rq); done: return err; } static unsigned submit_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) { struct request *rq; int err = SCSI_DH_RES_TEMP_UNAVAIL; rq = get_alua_req(sdev, h->buff, h->bufflen, READ); if (!rq) goto done; rq->cmd[0] = MAINTENANCE_IN; rq->cmd[1] = MI_REPORT_TARGET_PGS; rq->cmd[6] = (h->bufflen >> 24) & 0xff; rq->cmd[7] = (h->bufflen >> 16) & 0xff; rq->cmd[8] = (h->bufflen >> 8) & 0xff; rq->cmd[9] = h->bufflen & 0xff; rq->cmd_len = COMMAND_SIZE(MAINTENANCE_IN); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; err = blk_execute_rq(rq->q, NULL, rq, 1); if (err == -EIO) { sdev_printk(KERN_INFO, sdev, "%s: rtpg failed with %x\n", ALUA_DH_NAME, rq->errors); h->senselen = rq->sense_len; err = SCSI_DH_IO; } blk_put_request(rq); done: return err; } static void stpg_endio(struct request *req, int error) { struct alua_dh_data *h = req->end_io_data; struct scsi_sense_hdr sense_hdr; unsigned err = SCSI_DH_OK; if (error || host_byte(req->errors) != DID_OK || msg_byte(req->errors) != COMMAND_COMPLETE) { err = SCSI_DH_IO; goto done; } if (h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) { err = SCSI_DH_IO; goto done; } err = alua_check_sense(h->sdev, &sense_hdr); if (err == ADD_TO_MLQUEUE) { err = SCSI_DH_RETRY; goto done; } sdev_printk(KERN_INFO, h->sdev, "%s: stpg sense code: %02x/%02x/%02x\n", ALUA_DH_NAME, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); err = SCSI_DH_IO; } if (err == SCSI_DH_OK) { h->state = TPGS_STATE_OPTIMIZED; sdev_printk(KERN_INFO, h->sdev, "%s: port group %02x switched to state %c\n", ALUA_DH_NAME, h->group_id, print_alua_state(h->state)); } done: req->end_io_data = NULL; __blk_put_request(req->q, req); if (h->callback_fn) { h->callback_fn(h->callback_data, err); h->callback_fn = h->callback_data = NULL; } return; } static unsigned submit_stpg(struct alua_dh_data *h) { struct request *rq; int stpg_len = 8; struct scsi_device *sdev = h->sdev; memset(h->buff, 0, stpg_len); h->buff[4] = TPGS_STATE_OPTIMIZED & 0x0f; h->buff[6] = (h->group_id >> 8) & 0xff; h->buff[7] = h->group_id & 0xff; rq = get_alua_req(sdev, h->buff, stpg_len, WRITE); if (!rq) return SCSI_DH_RES_TEMP_UNAVAIL; rq->cmd[0] = MAINTENANCE_OUT; rq->cmd[1] = MO_SET_TARGET_PGS; rq->cmd[6] = (stpg_len >> 24) & 0xff; rq->cmd[7] = (stpg_len >> 16) & 0xff; rq->cmd[8] = (stpg_len >> 8) & 0xff; rq->cmd[9] = stpg_len & 0xff; rq->cmd_len = COMMAND_SIZE(MAINTENANCE_OUT); rq->sense = h->sense; memset(rq->sense, 0, SCSI_SENSE_BUFFERSIZE); rq->sense_len = h->senselen = 0; rq->end_io_data = h; blk_execute_rq_nowait(rq->q, NULL, rq, 1, stpg_endio); return SCSI_DH_OK; } static int alua_check_tpgs(struct scsi_device *sdev, struct alua_dh_data *h) { int err = SCSI_DH_OK; h->tpgs = scsi_device_tpgs(sdev); switch (h->tpgs) { case TPGS_MODE_EXPLICIT|TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit and explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_EXPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports explicit TPGS\n", ALUA_DH_NAME); break; case TPGS_MODE_IMPLICIT: sdev_printk(KERN_INFO, sdev, "%s: supports implicit TPGS\n", ALUA_DH_NAME); break; default: h->tpgs = TPGS_MODE_NONE; sdev_printk(KERN_INFO, sdev, "%s: not supported\n", ALUA_DH_NAME); err = SCSI_DH_DEV_UNSUPP; break; } return err; } static int alua_vpd_inquiry(struct scsi_device *sdev, struct alua_dh_data *h) { int len; unsigned err; unsigned char *d; retry: err = submit_vpd_inquiry(sdev, h); if (err != SCSI_DH_OK) return err; len = (h->buff[2] << 8) + h->buff[3] + 4; if (len > h->bufflen) { if (realloc_buffer(h, len)) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n", ALUA_DH_NAME); return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } d = h->buff + 4; while (d < h->buff + len) { switch (d[1] & 0xf) { case 0x4: h->rel_port = (d[6] << 8) + d[7]; break; case 0x5: h->group_id = (d[6] << 8) + d[7]; break; default: break; } d += d[3] + 4; } if (h->group_id == -1) { sdev_printk(KERN_INFO, sdev, "%s: No target port descriptors found\n", ALUA_DH_NAME); h->state = TPGS_STATE_OPTIMIZED; h->tpgs = TPGS_MODE_NONE; err = SCSI_DH_DEV_UNSUPP; } else { sdev_printk(KERN_INFO, sdev, "%s: port group %02x rel port %02x\n", ALUA_DH_NAME, h->group_id, h->rel_port); } return err; } static char print_alua_state(int state) { switch (state) { case TPGS_STATE_OPTIMIZED: return 'A'; case TPGS_STATE_NONOPTIMIZED: return 'N'; case TPGS_STATE_STANDBY: return 'S'; case TPGS_STATE_UNAVAILABLE: return 'U'; case TPGS_STATE_LBA_DEPENDENT: return 'L'; case TPGS_STATE_OFFLINE: return 'O'; case TPGS_STATE_TRANSITIONING: return 'T'; default: return 'X'; } } static int alua_check_sense(struct scsi_device *sdev, struct scsi_sense_hdr *sense_hdr) { switch (sense_hdr->sense_key) { case NOT_READY: if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0a) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0b) return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x0c) return SUCCESS; if (sense_hdr->asc == 0x04 && sense_hdr->ascq == 0x12) return SUCCESS; break; case UNIT_ATTENTION: if (sense_hdr->asc == 0x29 && sense_hdr->ascq == 0x00) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x01) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x06) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x2a && sense_hdr->ascq == 0x07) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x03) return ADD_TO_MLQUEUE; if (sense_hdr->asc == 0x3f && sense_hdr->ascq == 0x0e) return ADD_TO_MLQUEUE; break; } return SCSI_RETURN_NOT_HANDLED; } static int alua_rtpg(struct scsi_device *sdev, struct alua_dh_data *h) { struct scsi_sense_hdr sense_hdr; int len, k, off, valid_states = 0; unsigned char *ucp; unsigned err; unsigned long expiry, interval = 1000; expiry = round_jiffies_up(jiffies + ALUA_FAILOVER_TIMEOUT); retry: err = submit_rtpg(sdev, h); if (err == SCSI_DH_IO && h->senselen > 0) { err = scsi_normalize_sense(h->sense, SCSI_SENSE_BUFFERSIZE, &sense_hdr); if (!err) return SCSI_DH_IO; err = alua_check_sense(sdev, &sense_hdr); if (err == ADD_TO_MLQUEUE && time_before(jiffies, expiry)) goto retry; sdev_printk(KERN_INFO, sdev, "%s: rtpg sense code %02x/%02x/%02x\n", ALUA_DH_NAME, sense_hdr.sense_key, sense_hdr.asc, sense_hdr.ascq); err = SCSI_DH_IO; } if (err != SCSI_DH_OK) return err; len = (h->buff[0] << 24) + (h->buff[1] << 16) + (h->buff[2] << 8) + h->buff[3] + 4; if (len > h->bufflen) { if (realloc_buffer(h, len)) { sdev_printk(KERN_WARNING, sdev, "%s: kmalloc buffer failed\n",__func__); return SCSI_DH_DEV_TEMP_BUSY; } goto retry; } for (k = 4, ucp = h->buff + 4; k < len; k += off, ucp += off) { if (h->group_id == (ucp[2] << 8) + ucp[3]) { h->state = ucp[0] & 0x0f; valid_states = ucp[1]; } off = 8 + (ucp[7] * 4); } sdev_printk(KERN_INFO, sdev, "%s: port group %02x state %c supports %c%c%c%c%c%c%c\n", ALUA_DH_NAME, h->group_id, print_alua_state(h->state), valid_states&TPGS_SUPPORT_TRANSITION?'T':'t', valid_states&TPGS_SUPPORT_OFFLINE?'O':'o', valid_states&TPGS_SUPPORT_LBA_DEPENDENT?'L':'l', valid_states&TPGS_SUPPORT_UNAVAILABLE?'U':'u', valid_states&TPGS_SUPPORT_STANDBY?'S':'s', valid_states&TPGS_SUPPORT_NONOPTIMIZED?'N':'n', valid_states&TPGS_SUPPORT_OPTIMIZED?'A':'a'); switch (h->state) { case TPGS_STATE_TRANSITIONING: if (time_before(jiffies, expiry)) { interval *= 2; msleep(interval); goto retry; } err = SCSI_DH_RETRY; h->state = TPGS_STATE_STANDBY; break; case TPGS_STATE_OFFLINE: case TPGS_STATE_UNAVAILABLE: err = SCSI_DH_DEV_OFFLINED; break; default: err = SCSI_DH_OK; break; } return err; } static int alua_initialize(struct scsi_device *sdev, struct alua_dh_data *h) { int err; err = alua_check_tpgs(sdev, h); if (err != SCSI_DH_OK) goto out; err = alua_vpd_inquiry(sdev, h); if (err != SCSI_DH_OK) goto out; err = alua_rtpg(sdev, h); if (err != SCSI_DH_OK) goto out; out: return err; } static int alua_activate(struct scsi_device *sdev, activate_complete fn, void *data) { struct alua_dh_data *h = get_alua_data(sdev); int err = SCSI_DH_OK; err = alua_rtpg(sdev, h); if (err != SCSI_DH_OK) goto out; if (h->tpgs & TPGS_MODE_EXPLICIT && h->state != TPGS_STATE_OPTIMIZED && h->state != TPGS_STATE_LBA_DEPENDENT) { h->callback_fn = fn; h->callback_data = data; err = submit_stpg(h); if (err == SCSI_DH_OK) return 0; h->callback_fn = h->callback_data = NULL; } out: if (fn) fn(data, err); return 0; } static int alua_prep_fn(struct scsi_device *sdev, struct request *req) { struct alua_dh_data *h = get_alua_data(sdev); int ret = BLKPREP_OK; if (h->state == TPGS_STATE_TRANSITIONING) ret = BLKPREP_DEFER; else if (h->state != TPGS_STATE_OPTIMIZED && h->state != TPGS_STATE_NONOPTIMIZED && h->state != TPGS_STATE_LBA_DEPENDENT) { ret = BLKPREP_KILL; req->cmd_flags |= REQ_QUIET; } return ret; } static bool alua_match(struct scsi_device *sdev) { return (scsi_device_tpgs(sdev) != 0); } static int alua_bus_attach(struct scsi_device *sdev); static void alua_bus_detach(struct scsi_device *sdev); static struct scsi_device_handler alua_dh = { .name = ALUA_DH_NAME, .module = THIS_MODULE, .attach = alua_bus_attach, .detach = alua_bus_detach, .prep_fn = alua_prep_fn, .check_sense = alua_check_sense, .activate = alua_activate, .match = alua_match, }; static int alua_bus_attach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct alua_dh_data *h; unsigned long flags; int err = SCSI_DH_OK; scsi_dh_data = kzalloc(sizeof(*scsi_dh_data) + sizeof(*h) , GFP_KERNEL); if (!scsi_dh_data) { sdev_printk(KERN_ERR, sdev, "%s: Attach failed\n", ALUA_DH_NAME); return -ENOMEM; } scsi_dh_data->scsi_dh = &alua_dh; h = (struct alua_dh_data *) scsi_dh_data->buf; h->tpgs = TPGS_MODE_UNINITIALIZED; h->state = TPGS_STATE_OPTIMIZED; h->group_id = -1; h->rel_port = -1; h->buff = h->inq; h->bufflen = ALUA_INQUIRY_SIZE; h->sdev = sdev; err = alua_initialize(sdev, h); if ((err != SCSI_DH_OK) && (err != SCSI_DH_DEV_OFFLINED)) goto failed; if (!try_module_get(THIS_MODULE)) goto failed; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); sdev->scsi_dh_data = scsi_dh_data; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); sdev_printk(KERN_NOTICE, sdev, "%s: Attached\n", ALUA_DH_NAME); return 0; failed: kfree(scsi_dh_data); sdev_printk(KERN_ERR, sdev, "%s: not attached\n", ALUA_DH_NAME); return -EINVAL; } static void alua_bus_detach(struct scsi_device *sdev) { struct scsi_dh_data *scsi_dh_data; struct alua_dh_data *h; unsigned long flags; spin_lock_irqsave(sdev->request_queue->queue_lock, flags); scsi_dh_data = sdev->scsi_dh_data; sdev->scsi_dh_data = NULL; spin_unlock_irqrestore(sdev->request_queue->queue_lock, flags); h = (struct alua_dh_data *) scsi_dh_data->buf; if (h->buff && h->inq != h->buff) kfree(h->buff); kfree(scsi_dh_data); module_put(THIS_MODULE); sdev_printk(KERN_NOTICE, sdev, "%s: Detached\n", ALUA_DH_NAME); } static int __init alua_init(void) { int r; r = scsi_register_device_handler(&alua_dh); if (r != 0) printk(KERN_ERR "%s: Failed to register scsi device handler", ALUA_DH_NAME); return r; } static void __exit alua_exit(void) { scsi_unregister_device_handler(&alua_dh); } module_init(alua_init); module_exit(alua_exit); MODULE_DESCRIPTION("DM Multipath ALUA support"); MODULE_AUTHOR("Hannes Reinecke <hare@suse.de>"); MODULE_LICENSE("GPL"); MODULE_VERSION(ALUA_DH_VER);
gpl-2.0
jameshilliard/m8whl-3.4.0-g278eae8
fs/affs/namei.c
34
10286
/* * linux/fs/affs/namei.c * * (c) 1996 Hans-Joachim Widmaier - Rewritten * * (C) 1993 Ray Burr - Modified for Amiga FFS filesystem. * * (C) 1991 Linus Torvalds - minix filesystem */ #include "affs.h" typedef int (*toupper_t)(int); static int affs_toupper(int ch); static int affs_hash_dentry(const struct dentry *, const struct inode *, struct qstr *); static int affs_compare_dentry(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name); static int affs_intl_toupper(int ch); static int affs_intl_hash_dentry(const struct dentry *, const struct inode *, struct qstr *); static int affs_intl_compare_dentry(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name); const struct dentry_operations affs_dentry_operations = { .d_hash = affs_hash_dentry, .d_compare = affs_compare_dentry, }; const struct dentry_operations affs_intl_dentry_operations = { .d_hash = affs_intl_hash_dentry, .d_compare = affs_intl_compare_dentry, }; static int affs_toupper(int ch) { return ch >= 'a' && ch <= 'z' ? ch -= ('a' - 'A') : ch; } static int affs_intl_toupper(int ch) { return (ch >= 'a' && ch <= 'z') || (ch >= 0xE0 && ch <= 0xFE && ch != 0xF7) ? ch - ('a' - 'A') : ch; } static inline toupper_t affs_get_toupper(struct super_block *sb) { return AFFS_SB(sb)->s_flags & SF_INTL ? affs_intl_toupper : affs_toupper; } static inline int __affs_hash_dentry(struct qstr *qstr, toupper_t toupper) { const u8 *name = qstr->name; unsigned long hash; int i; i = affs_check_name(qstr->name, qstr->len); if (i) return i; hash = init_name_hash(); i = min(qstr->len, 30u); for (; i > 0; name++, i--) hash = partial_name_hash(toupper(*name), hash); qstr->hash = end_name_hash(hash); return 0; } static int affs_hash_dentry(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { return __affs_hash_dentry(qstr, affs_toupper); } static int affs_intl_hash_dentry(const struct dentry *dentry, const struct inode *inode, struct qstr *qstr) { return __affs_hash_dentry(qstr, affs_intl_toupper); } static inline int __affs_compare_dentry(unsigned int len, const char *str, const struct qstr *name, toupper_t toupper) { const u8 *aname = str; const u8 *bname = name->name; if (affs_check_name(name->name, name->len)) return 1; if (len >= 30) { if (name->len < 30) return 1; len = 30; } else if (len != name->len) return 1; for (; len > 0; len--) if (toupper(*aname++) != toupper(*bname++)) return 1; return 0; } static int affs_compare_dentry(const struct dentry *parent, const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_toupper); } static int affs_intl_compare_dentry(const struct dentry *parent,const struct inode *pinode, const struct dentry *dentry, const struct inode *inode, unsigned int len, const char *str, const struct qstr *name) { return __affs_compare_dentry(len, str, name, affs_intl_toupper); } static inline int affs_match(struct dentry *dentry, const u8 *name2, toupper_t toupper) { const u8 *name = dentry->d_name.name; int len = dentry->d_name.len; if (len >= 30) { if (*name2 < 30) return 0; len = 30; } else if (len != *name2) return 0; for (name2++; len > 0; len--) if (toupper(*name++) != toupper(*name2++)) return 0; return 1; } int affs_hash_name(struct super_block *sb, const u8 *name, unsigned int len) { toupper_t toupper = affs_get_toupper(sb); int hash; hash = len = min(len, 30u); for (; len > 0; len--) hash = (hash * 13 + toupper(*name++)) & 0x7ff; return hash % AFFS_SB(sb)->s_hashsize; } static struct buffer_head * affs_find_entry(struct inode *dir, struct dentry *dentry) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; toupper_t toupper = affs_get_toupper(sb); u32 key; pr_debug("AFFS: find_entry(\"%.*s\")\n", (int)dentry->d_name.len, dentry->d_name.name); bh = affs_bread(sb, dir->i_ino); if (!bh) return ERR_PTR(-EIO); key = be32_to_cpu(AFFS_HEAD(bh)->table[affs_hash_name(sb, dentry->d_name.name, dentry->d_name.len)]); for (;;) { affs_brelse(bh); if (key == 0) return NULL; bh = affs_bread(sb, key); if (!bh) return ERR_PTR(-EIO); if (affs_match(dentry, AFFS_TAIL(sb, bh)->name, toupper)) return bh; key = be32_to_cpu(AFFS_TAIL(sb, bh)->hash_chain); } } struct dentry * affs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; struct inode *inode = NULL; pr_debug("AFFS: lookup(\"%.*s\")\n",(int)dentry->d_name.len,dentry->d_name.name); affs_lock_dir(dir); bh = affs_find_entry(dir, dentry); affs_unlock_dir(dir); if (IS_ERR(bh)) return ERR_CAST(bh); if (bh) { u32 ino = bh->b_blocknr; dentry->d_fsdata = (void *)(long)ino; switch (be32_to_cpu(AFFS_TAIL(sb, bh)->stype)) { case ST_LINKFILE: ino = be32_to_cpu(AFFS_TAIL(sb, bh)->original); } affs_brelse(bh); inode = affs_iget(sb, ino); if (IS_ERR(inode)) return ERR_CAST(inode); } d_add(dentry, inode); return NULL; } int affs_unlink(struct inode *dir, struct dentry *dentry) { pr_debug("AFFS: unlink(dir=%d, %lu \"%.*s\")\n", (u32)dir->i_ino, dentry->d_inode->i_ino, (int)dentry->d_name.len, dentry->d_name.name); return affs_remove_header(dentry); } int affs_create(struct inode *dir, struct dentry *dentry, umode_t mode, struct nameidata *nd) { struct super_block *sb = dir->i_sb; struct inode *inode; int error; pr_debug("AFFS: create(%lu,\"%.*s\",0%ho)\n",dir->i_ino,(int)dentry->d_name.len, dentry->d_name.name,mode); inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_mode = mode; mode_to_prot(inode); mark_inode_dirty(inode); inode->i_op = &affs_file_inode_operations; inode->i_fop = &affs_file_operations; inode->i_mapping->a_ops = (AFFS_SB(sb)->s_flags & SF_OFS) ? &affs_aops_ofs : &affs_aops; error = affs_add_entry(dir, inode, dentry, ST_FILE); if (error) { clear_nlink(inode); iput(inode); return error; } return 0; } int affs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) { struct inode *inode; int error; pr_debug("AFFS: mkdir(%lu,\"%.*s\",0%ho)\n",dir->i_ino, (int)dentry->d_name.len,dentry->d_name.name,mode); inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_mode = S_IFDIR | mode; mode_to_prot(inode); inode->i_op = &affs_dir_inode_operations; inode->i_fop = &affs_dir_operations; error = affs_add_entry(dir, inode, dentry, ST_USERDIR); if (error) { clear_nlink(inode); mark_inode_dirty(inode); iput(inode); return error; } return 0; } int affs_rmdir(struct inode *dir, struct dentry *dentry) { pr_debug("AFFS: rmdir(dir=%u, %lu \"%.*s\")\n", (u32)dir->i_ino, dentry->d_inode->i_ino, (int)dentry->d_name.len, dentry->d_name.name); return affs_remove_header(dentry); } int affs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct super_block *sb = dir->i_sb; struct buffer_head *bh; struct inode *inode; char *p; int i, maxlen, error; char c, lc; pr_debug("AFFS: symlink(%lu,\"%.*s\" -> \"%s\")\n",dir->i_ino, (int)dentry->d_name.len,dentry->d_name.name,symname); maxlen = AFFS_SB(sb)->s_hashsize * sizeof(u32) - 1; inode = affs_new_inode(dir); if (!inode) return -ENOSPC; inode->i_op = &affs_symlink_inode_operations; inode->i_data.a_ops = &affs_symlink_aops; inode->i_mode = S_IFLNK | 0777; mode_to_prot(inode); error = -EIO; bh = affs_bread(sb, inode->i_ino); if (!bh) goto err; i = 0; p = (char *)AFFS_HEAD(bh)->table; lc = '/'; if (*symname == '/') { struct affs_sb_info *sbi = AFFS_SB(sb); while (*symname == '/') symname++; spin_lock(&sbi->symlink_lock); while (sbi->s_volume[i]) *p++ = sbi->s_volume[i++]; spin_unlock(&sbi->symlink_lock); } while (i < maxlen && (c = *symname++)) { if (c == '.' && lc == '/' && *symname == '.' && symname[1] == '/') { *p++ = '/'; i++; symname += 2; lc = '/'; } else if (c == '.' && lc == '/' && *symname == '/') { symname++; lc = '/'; } else { *p++ = c; lc = c; i++; } if (lc == '/') while (*symname == '/') symname++; } *p = 0; mark_buffer_dirty_inode(bh, inode); affs_brelse(bh); mark_inode_dirty(inode); error = affs_add_entry(dir, inode, dentry, ST_SOFTLINK); if (error) goto err; return 0; err: clear_nlink(inode); mark_inode_dirty(inode); iput(inode); return error; } int affs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; pr_debug("AFFS: link(%u, %u, \"%.*s\")\n", (u32)inode->i_ino, (u32)dir->i_ino, (int)dentry->d_name.len,dentry->d_name.name); return affs_add_entry(dir, inode, dentry, ST_LINKFILE); } int affs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct super_block *sb = old_dir->i_sb; struct buffer_head *bh = NULL; int retval; pr_debug("AFFS: rename(old=%u,\"%*s\" to new=%u,\"%*s\")\n", (u32)old_dir->i_ino, (int)old_dentry->d_name.len, old_dentry->d_name.name, (u32)new_dir->i_ino, (int)new_dentry->d_name.len, new_dentry->d_name.name); retval = affs_check_name(new_dentry->d_name.name,new_dentry->d_name.len); if (retval) return retval; if (new_dentry->d_inode) { retval = affs_remove_header(new_dentry); if (retval) return retval; } bh = affs_bread(sb, old_dentry->d_inode->i_ino); if (!bh) return -EIO; affs_lock_dir(old_dir); retval = affs_remove_hash(old_dir, bh); affs_unlock_dir(old_dir); if (retval) goto done; affs_copy_name(AFFS_TAIL(sb, bh)->name, new_dentry); affs_fix_checksum(sb, bh); affs_lock_dir(new_dir); retval = affs_insert_hash(new_dir, bh); affs_unlock_dir(new_dir); done: mark_buffer_dirty_inode(bh, retval ? old_dir : new_dir); affs_brelse(bh); return retval; }
gpl-2.0
invisiblek/android_kernel_htc_m8
arch/arm/mach-msm/board-sapphire-gpio.c
34
7432
/* arch/arm/mach-msm/board-sapphire-gpio.c * Copyright (C) 2007-2009 HTC Corporation. * Author: Thomas Tsai <thomas_tsai@htc.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/irq.h> #include <linux/pm.h> #include <linux/sysdev.h> #include <linux/io.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include "gpio_chip.h" #include "board-sapphire.h" #ifdef DEBUG_SAPPHIRE_GPIO #define DBG(fmt, arg...) printk(KERN_INFO "%s: " fmt "\n", __func__, ## arg) #else #define DBG(fmt, arg...) do {} while (0) #endif #define SAPPHIRE_CPLD_INT_STATUS (SAPPHIRE_CPLD_BASE + 0x0E) #define SAPPHIRE_CPLD_INT_LEVEL (SAPPHIRE_CPLD_BASE + 0x08) #define SAPPHIRE_CPLD_INT_MASK (SAPPHIRE_CPLD_BASE + 0x0C) static const int _g_CPLD_MISCn_Offset[] = { 0x0A, 0x00, 0x02, 0x04, 0x06}; static const int _g_INT_BANK_Offset[][3] = {{0x0E, 0x08, 0x0C} }; static uint8_t sapphire_cpld_initdata[4] = { [0] = 0x80, [1] = 0x34, [3] = 0x04, }; static uint8_t sapphire_int_mask[] = { [0] = 0xfb, }; static uint8_t sapphire_sleep_int_mask[] = { [0] = 0x00, }; static int sapphire_suspended; static int sapphire_gpio_read(struct gpio_chip *chip, unsigned n) { if (n < SAPPHIRE_GPIO_INT_B0_BASE) return !!(readb(CPLD_GPIO_REG(n)) & CPLD_GPIO_BIT_POS_MASK(n)); else if (n <= SAPPHIRE_GPIO_END) return !!(readb(CPLD_INT_LEVEL_REG_G(n)) & CPLD_GPIO_BIT_POS_MASK(n)); return 0; } int sapphire_gpio_write(struct gpio_chip *chip, unsigned n, unsigned on) { unsigned long flags; uint8_t reg_val; if (n > SAPPHIRE_GPIO_END) return -1; local_irq_save(flags); reg_val = readb(CPLD_GPIO_REG(n)); if (on) reg_val |= CPLD_GPIO_BIT_POS_MASK(n); else reg_val &= ~CPLD_GPIO_BIT_POS_MASK(n); writeb(reg_val, CPLD_GPIO_REG(n)); DBG("gpio=%d, l=0x%x\r\n", n, readb(SAPPHIRE_CPLD_INT_LEVEL)); local_irq_restore(flags); return 0; } static int sapphire_gpio_configure(struct gpio_chip *chip, unsigned int gpio, unsigned long flags) { if (flags & (GPIOF_OUTPUT_LOW | GPIOF_OUTPUT_HIGH)) sapphire_gpio_write(chip, gpio, flags & GPIOF_OUTPUT_HIGH); DBG("gpio=%d, l=0x%x\r\n", gpio, readb(SAPPHIRE_CPLD_INT_LEVEL)); return 0; } static int sapphire_gpio_get_irq_num(struct gpio_chip *chip, unsigned int gpio, unsigned int *irqp, unsigned long *irqnumflagsp) { DBG("gpio=%d, l=0x%x\r\n", gpio, readb(SAPPHIRE_CPLD_INT_LEVEL)); DBG("SAPPHIRE_GPIO_INT_B0_BASE=%d, SAPPHIRE_GPIO_LAST_INT=%d\r\n", SAPPHIRE_GPIO_INT_B0_BASE, SAPPHIRE_GPIO_LAST_INT); if ((gpio < SAPPHIRE_GPIO_INT_B0_BASE) || (gpio > SAPPHIRE_GPIO_LAST_INT)) return -ENOENT; *irqp = SAPPHIRE_GPIO_TO_INT(gpio); DBG("*irqp=%d\r\n", *irqp); if (irqnumflagsp) *irqnumflagsp = 0; return 0; } static void sapphire_gpio_irq_ack(unsigned int irq) { writeb(SAPPHIRE_INT_BIT_MASK(irq), CPLD_INT_STATUS_REG(irq)); } static void sapphire_gpio_irq_enable(unsigned int irq) { unsigned long flags; uint8_t reg_val; local_irq_save(flags); reg_val = readb(CPLD_INT_MASK_REG(irq)) | SAPPHIRE_INT_BIT_MASK(irq); DBG("(irq=%d,0x%x, 0x%x)\r\n", irq, CPLD_INT_MASK_REG(irq), SAPPHIRE_INT_BIT_MASK(irq)); DBG("sapphire_suspended=%d\r\n", sapphire_suspended); if (!sapphire_suspended) writeb(reg_val, CPLD_INT_MASK_REG(irq)); reg_val = readb(CPLD_INT_MASK_REG(irq)); DBG("reg_val= 0x%x\r\n", reg_val); DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL)); local_irq_restore(flags); } static void sapphire_gpio_irq_disable(unsigned int irq) { unsigned long flags; uint8_t reg_val; local_irq_save(flags); reg_val = readb(CPLD_INT_MASK_REG(irq)) & ~SAPPHIRE_INT_BIT_MASK(irq); DBG("(%d,0x%x, 0x%x, 0x%x)\r\n", irq, reg_val, CPLD_INT_MASK_REG(irq), SAPPHIRE_INT_BIT_MASK(irq)); DBG("sapphire_suspended=%d\r\n", sapphire_suspended); if (!sapphire_suspended) writeb(reg_val, CPLD_INT_MASK_REG(irq)); reg_val = readb(CPLD_INT_MASK_REG(irq)); DBG("reg_val= 0x%x\r\n", reg_val); DBG("l=0x%x\r\n", readb(SAPPHIRE_CPLD_INT_LEVEL)); local_irq_restore(flags); } int sapphire_gpio_irq_set_wake(unsigned int irq, unsigned int on) { unsigned long flags; uint8_t mask = SAPPHIRE_INT_BIT_MASK(irq); local_irq_save(flags); if (on) sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] |= mask; else sapphire_sleep_int_mask[CPLD_INT_TO_BANK(irq)] &= ~mask; local_irq_restore(flags); return 0; } static void sapphire_gpio_irq_handler(unsigned int irq, struct irq_desc *desc) { int j; unsigned v; int int_base = SAPPHIRE_INT_START; v = readb(SAPPHIRE_CPLD_INT_STATUS); for (j = 0; j < 8 ; j++) { if (v & (1U << j)) { DBG("generic_handle_irq j=0x%x\r\n", j); generic_handle_irq(int_base + j); } } desc->chip->ack(irq); DBG("irq=%d, l=0x%x\r\n", irq, readb(SAPPHIRE_CPLD_INT_LEVEL)); } static int sapphire_sysdev_suspend(struct sys_device *dev, pm_message_t state) { sapphire_suspended = 1; sapphire_int_mask[0] = readb(SAPPHIRE_CPLD_BASE + SAPPHIRE_GPIO_INT_B0_MASK_REG); writeb(sapphire_sleep_int_mask[0], SAPPHIRE_CPLD_BASE + SAPPHIRE_GPIO_INT_B0_MASK_REG); return 0; } int sapphire_sysdev_resume(struct sys_device *dev) { writeb(sapphire_int_mask[0], SAPPHIRE_CPLD_BASE + SAPPHIRE_GPIO_INT_B0_MASK_REG); sapphire_suspended = 0; return 0; } static struct irq_chip sapphire_gpio_irq_chip = { .name = "sapphiregpio", .ack = sapphire_gpio_irq_ack, .mask = sapphire_gpio_irq_disable, .unmask = sapphire_gpio_irq_enable, .set_wake = sapphire_gpio_irq_set_wake, }; static struct gpio_chip sapphire_gpio_chip = { .start = SAPPHIRE_GPIO_START, .end = SAPPHIRE_GPIO_END, .configure = sapphire_gpio_configure, .get_irq_num = sapphire_gpio_get_irq_num, .read = sapphire_gpio_read, .write = sapphire_gpio_write, }; struct sysdev_class sapphire_sysdev_class = { .name = "sapphiregpio_irq", .suspend = sapphire_sysdev_suspend, .resume = sapphire_sysdev_resume, }; static struct sys_device sapphire_irq_device = { .cls = &sapphire_sysdev_class, }; int sapphire_init_gpio(void) { int i; if (!machine_is_sapphire()) return 0; DBG("%d,%d\r\n", SAPPHIRE_INT_START, SAPPHIRE_INT_END); DBG("NR_MSM_IRQS=%d, NR_GPIO_IRQS=%d\r\n", NR_MSM_IRQS, NR_GPIO_IRQS); for (i = SAPPHIRE_INT_START; i <= SAPPHIRE_INT_END; i++) { set_irq_chip(i, &sapphire_gpio_irq_chip); set_irq_handler(i, handle_edge_irq); set_irq_flags(i, IRQF_VALID); } register_gpio_chip(&sapphire_gpio_chip); set_irq_type(MSM_GPIO_TO_INT(17), IRQF_TRIGGER_HIGH); set_irq_chained_handler(MSM_GPIO_TO_INT(17), sapphire_gpio_irq_handler); set_irq_wake(MSM_GPIO_TO_INT(17), 1); if (sysdev_class_register(&sapphire_sysdev_class) == 0) sysdev_register(&sapphire_irq_device); return 0; } int sapphire_init_cpld(unsigned int sys_rev) { int i; for (i = 0; i < ARRAY_SIZE(sapphire_cpld_initdata); i++) writeb(sapphire_cpld_initdata[i], SAPPHIRE_CPLD_BASE + i * 2); return 0; } postcore_initcall(sapphire_init_gpio);
gpl-2.0
Diaob/z_bac_150827_android_kernel_oneplus_msm8994
net/netfilter/xt_HARDIDLETIMER.c
290
9782
/* * linux/net/netfilter/xt_HARDIDLETIMER.c * * Netfilter module to trigger a timer when packet matches. * After timer expires a kevent will be sent. * * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved. * * Copyright (C) 2004, 2010 Nokia Corporation * * Written by Timo Teras <ext-timo.teras@nokia.com> * * Converted to x_tables and reworked for upstream inclusion * by Luciano Coelho <luciano.coelho@nokia.com> * * Contact: Luciano Coelho <luciano.coelho@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/timer.h> #include <linux/alarmtimer.h> #include <linux/list.h> #include <linux/mutex.h> #include <linux/netfilter.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_HARDIDLETIMER.h> #include <linux/kdev_t.h> #include <linux/kobject.h> #include <linux/skbuff.h> #include <linux/workqueue.h> #include <linux/sysfs.h> #include <net/net_namespace.h> struct hardidletimer_tg_attr { struct attribute attr; ssize_t (*show)(struct kobject *kobj, struct attribute *attr, char *buf); }; struct hardidletimer_tg { struct list_head entry; struct alarm alarm; struct work_struct work; struct kobject *kobj; struct hardidletimer_tg_attr attr; unsigned int refcnt; bool send_nl_msg; bool active; }; static LIST_HEAD(hardidletimer_tg_list); static DEFINE_MUTEX(list_mutex); static struct kobject *hardidletimer_tg_kobj; static void notify_netlink_uevent(const char *iface, struct hardidletimer_tg *timer) { char iface_msg[NLMSG_MAX_SIZE]; char state_msg[NLMSG_MAX_SIZE]; char *envp[] = { iface_msg, state_msg, NULL }; int res; res = snprintf(iface_msg, NLMSG_MAX_SIZE, "INTERFACE=%s", iface); if (NLMSG_MAX_SIZE <= res) { pr_err("message too long (%d)", res); return; } res = snprintf(state_msg, NLMSG_MAX_SIZE, "STATE=%s", timer->active ? "active" : "inactive"); if (NLMSG_MAX_SIZE <= res) { pr_err("message too long (%d)", res); return; } pr_debug("putting nlmsg: <%s> <%s>\n", iface_msg, state_msg); kobject_uevent_env(hardidletimer_tg_kobj, KOBJ_CHANGE, envp); return; } static struct hardidletimer_tg *__hardidletimer_tg_find_by_label(const char *label) { struct hardidletimer_tg *entry; BUG_ON(!label); list_for_each_entry(entry, &hardidletimer_tg_list, entry) { if (!strcmp(label, entry->attr.attr.name)) return entry; } return NULL; } static ssize_t hardidletimer_tg_show(struct kobject *kobj, struct attribute *attr, char *buf) { struct hardidletimer_tg *timer; ktime_t expires; struct timespec ktimespec; memset(&ktimespec, 0, sizeof(struct timespec)); mutex_lock(&list_mutex); timer = __hardidletimer_tg_find_by_label(attr->name); if (timer) { expires = alarm_expires_remaining(&timer->alarm); ktimespec = ktime_to_timespec(expires); } mutex_unlock(&list_mutex); if (ktimespec.tv_sec >= 0) return snprintf(buf, PAGE_SIZE, "%ld\n", ktimespec.tv_sec); if (timer != NULL && timer->send_nl_msg) return snprintf(buf, PAGE_SIZE, "0 %ld\n", ktimespec.tv_sec); else return snprintf(buf, PAGE_SIZE, "0\n"); } static void hardidletimer_tg_work(struct work_struct *work) { struct hardidletimer_tg *timer = container_of(work, struct hardidletimer_tg, work); sysfs_notify(hardidletimer_tg_kobj, NULL, timer->attr.attr.name); if (timer->send_nl_msg) notify_netlink_uevent(timer->attr.attr.name, timer); } static enum alarmtimer_restart hardidletimer_tg_alarmproc(struct alarm *alarm, ktime_t now) { struct hardidletimer_tg *timer = alarm->data; pr_debug("alarm %s expired\n", timer->attr.attr.name); timer->active = false; schedule_work(&timer->work); return ALARMTIMER_NORESTART; } static int hardidletimer_tg_create(struct hardidletimer_tg_info *info) { int ret; ktime_t tout; info->timer = kmalloc(sizeof(*info->timer), GFP_KERNEL); if (!info->timer) { ret = -ENOMEM; goto out; } info->timer->attr.attr.name = kstrdup(info->label, GFP_KERNEL); if (!info->timer->attr.attr.name) { ret = -ENOMEM; goto out_free_timer; } info->timer->attr.attr.mode = S_IRUGO; info->timer->attr.show = hardidletimer_tg_show; ret = sysfs_create_file(hardidletimer_tg_kobj, &info->timer->attr.attr); if (ret < 0) { pr_debug("couldn't add file to sysfs"); goto out_free_attr; } list_add(&info->timer->entry, &hardidletimer_tg_list); alarm_init(&info->timer->alarm, ALARM_BOOTTIME, hardidletimer_tg_alarmproc); info->timer->alarm.data = info->timer; info->timer->refcnt = 1; info->timer->send_nl_msg = (info->send_nl_msg == 0) ? false : true; info->timer->active = true; tout = ktime_set(info->timeout, 0); alarm_start_relative(&info->timer->alarm, tout); INIT_WORK(&info->timer->work, hardidletimer_tg_work); return 0; out_free_attr: kfree(info->timer->attr.attr.name); out_free_timer: kfree(info->timer); out: return ret; } /* The actual xt_tables plugin. */ static unsigned int hardidletimer_tg_target(struct sk_buff *skb, const struct xt_action_param *par) { const struct hardidletimer_tg_info *info = par->targinfo; ktime_t tout; pr_debug("resetting timer %s, timeout period %u\n", info->label, info->timeout); BUG_ON(!info->timer); if (info->timer->active == false) { schedule_work(&info->timer->work); pr_debug("Starting timer %s\n", info->label); } info->timer->active = true; /* TODO: Avoid modifying timers on each packet */ tout = ktime_set(info->timeout, 0); alarm_start_relative(&info->timer->alarm, tout); return XT_CONTINUE; } static int hardidletimer_tg_checkentry(const struct xt_tgchk_param *par) { struct hardidletimer_tg_info *info = par->targinfo; int ret; ktime_t tout; pr_debug("checkentry targinfo %s\n", info->label); if (info->timeout == 0) { pr_debug("timeout value is zero\n"); return -EINVAL; } if (info->label[0] == '\0' || strnlen(info->label, MAX_HARDIDLETIMER_LABEL_SIZE) == MAX_HARDIDLETIMER_LABEL_SIZE) { pr_debug("label is empty or not nul-terminated\n"); return -EINVAL; } mutex_lock(&list_mutex); info->timer = __hardidletimer_tg_find_by_label(info->label); if (info->timer) { info->timer->refcnt++; if (info->timer->active == false) { schedule_work(&info->timer->work); pr_debug("Starting Checkentry timer\n"); } info->timer->active = true; tout = ktime_set(info->timeout, 0); alarm_start_relative(&info->timer->alarm, tout); pr_debug("increased refcnt of timer %s to %u\n", info->label, info->timer->refcnt); } else { ret = hardidletimer_tg_create(info); if (ret < 0) { pr_debug("failed to create timer\n"); mutex_unlock(&list_mutex); return ret; } } mutex_unlock(&list_mutex); return 0; } static void hardidletimer_tg_destroy(const struct xt_tgdtor_param *par) { const struct hardidletimer_tg_info *info = par->targinfo; pr_debug("destroy targinfo %s\n", info->label); mutex_lock(&list_mutex); if (--info->timer->refcnt == 0) { pr_debug("deleting timer %s\n", info->label); list_del(&info->timer->entry); alarm_cancel(&info->timer->alarm); cancel_work_sync(&info->timer->work); sysfs_remove_file(hardidletimer_tg_kobj, &info->timer->attr.attr); kfree(info->timer->attr.attr.name); kfree(info->timer); } else { pr_debug("decreased refcnt of timer %s to %u\n", info->label, info->timer->refcnt); } mutex_unlock(&list_mutex); } static struct xt_target hardidletimer_tg __read_mostly = { .name = "HARDIDLETIMER", .revision = 1, .family = NFPROTO_UNSPEC, .target = hardidletimer_tg_target, .targetsize = sizeof(struct hardidletimer_tg_info), .checkentry = hardidletimer_tg_checkentry, .destroy = hardidletimer_tg_destroy, .me = THIS_MODULE, }; static struct class *hardidletimer_tg_class; static struct device *hardidletimer_tg_device; static int __init hardidletimer_tg_init(void) { int err; hardidletimer_tg_class = class_create(THIS_MODULE, "xt_hardidletimer"); err = PTR_ERR(hardidletimer_tg_class); if (IS_ERR(hardidletimer_tg_class)) { pr_debug("couldn't register device class\n"); goto out; } hardidletimer_tg_device = device_create(hardidletimer_tg_class, NULL, MKDEV(0, 0), NULL, "timers"); err = PTR_ERR(hardidletimer_tg_device); if (IS_ERR(hardidletimer_tg_device)) { pr_debug("couldn't register system device\n"); goto out_class; } hardidletimer_tg_kobj = &hardidletimer_tg_device->kobj; err = xt_register_target(&hardidletimer_tg); if (err < 0) { pr_debug("couldn't register xt target\n"); goto out_dev; } return 0; out_dev: device_destroy(hardidletimer_tg_class, MKDEV(0, 0)); out_class: class_destroy(hardidletimer_tg_class); out: return err; } static void __exit hardidletimer_tg_exit(void) { xt_unregister_target(&hardidletimer_tg); device_destroy(hardidletimer_tg_class, MKDEV(0, 0)); class_destroy(hardidletimer_tg_class); } module_init(hardidletimer_tg_init); module_exit(hardidletimer_tg_exit); MODULE_AUTHOR("Timo Teras <ext-timo.teras@nokia.com>"); MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); MODULE_DESCRIPTION("Xtables: idle time monitor"); MODULE_LICENSE("GPL v2");
gpl-2.0
LuweiLight/linux-3.14.35-vbal
drivers/clocksource/clksrc-of.c
290
1327
/* * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/init.h> #include <linux/of.h> #include <linux/clocksource.h> extern struct of_device_id __clksrc_of_table[]; static const struct of_device_id __clksrc_of_table_sentinel __used __section(__clksrc_of_table_end); void __init clocksource_of_init(void) { struct device_node *np; const struct of_device_id *match; clocksource_of_init_fn init_func; unsigned clocksources = 0; for_each_matching_node_and_match(np, __clksrc_of_table, &match) { if (!of_device_is_available(np)) continue; init_func = match->data; init_func(np); clocksources++; } if (!clocksources) pr_crit("%s: no matching clocksources found\n", __func__); }
gpl-2.0
flaming-toast/unrm
drivers/base/cacheinfo.c
290
14507
/* * cacheinfo support - processor cache information via sysfs * * Based on arch/x86/kernel/cpu/intel_cacheinfo.c * Author: Sudeep Holla <sudeep.holla@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any * kind, whether express or implied; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/bitops.h> #include <linux/cacheinfo.h> #include <linux/compiler.h> #include <linux/cpu.h> #include <linux/device.h> #include <linux/init.h> #include <linux/of.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/smp.h> #include <linux/sysfs.h> /* pointer to per cpu cacheinfo */ static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo); #define ci_cacheinfo(cpu) (&per_cpu(ci_cpu_cacheinfo, cpu)) #define cache_leaves(cpu) (ci_cacheinfo(cpu)->num_leaves) #define per_cpu_cacheinfo(cpu) (ci_cacheinfo(cpu)->info_list) struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu) { return ci_cacheinfo(cpu); } #ifdef CONFIG_OF static int cache_setup_of_node(unsigned int cpu) { struct device_node *np; struct cacheinfo *this_leaf; struct device *cpu_dev = get_cpu_device(cpu); struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); unsigned int index = 0; /* skip if of_node is already populated */ if (this_cpu_ci->info_list->of_node) return 0; if (!cpu_dev) { pr_err("No cpu device for CPU %d\n", cpu); return -ENODEV; } np = cpu_dev->of_node; if (!np) { pr_err("Failed to find cpu%d device node\n", cpu); return -ENOENT; } while (index < cache_leaves(cpu)) { this_leaf = this_cpu_ci->info_list + index; if (this_leaf->level != 1) np = of_find_next_cache_node(np); else np = of_node_get(np);/* cpu node itself */ if (!np) break; this_leaf->of_node = np; index++; } if (index != cache_leaves(cpu)) /* not all OF nodes populated */ return -ENOENT; return 0; } static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, struct cacheinfo *sib_leaf) { return sib_leaf->of_node == this_leaf->of_node; } #else static inline int cache_setup_of_node(unsigned int cpu) { return 0; } static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf, struct cacheinfo *sib_leaf) { /* * For non-DT systems, assume unique level 1 cache, system-wide * shared caches for all other levels. This will be used only if * arch specific code has not populated shared_cpu_map */ return !(this_leaf->level == 1); } #endif static int cache_shared_cpu_map_setup(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int index; int ret; ret = cache_setup_of_node(cpu); if (ret) return ret; for (index = 0; index < cache_leaves(cpu); index++) { unsigned int i; this_leaf = this_cpu_ci->info_list + index; /* skip if shared_cpu_map is already populated */ if (!cpumask_empty(&this_leaf->shared_cpu_map)) continue; cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map); for_each_online_cpu(i) { struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i); if (i == cpu || !sib_cpu_ci->info_list) continue;/* skip if itself or no cacheinfo */ sib_leaf = sib_cpu_ci->info_list + index; if (cache_leaves_are_shared(this_leaf, sib_leaf)) { cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_set_cpu(i, &this_leaf->shared_cpu_map); } } } return 0; } static void cache_shared_cpu_map_remove(unsigned int cpu) { struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); struct cacheinfo *this_leaf, *sib_leaf; unsigned int sibling, index; for (index = 0; index < cache_leaves(cpu); index++) { this_leaf = this_cpu_ci->info_list + index; for_each_cpu(sibling, &this_leaf->shared_cpu_map) { struct cpu_cacheinfo *sib_cpu_ci; if (sibling == cpu) /* skip itself */ continue; sib_cpu_ci = get_cpu_cacheinfo(sibling); sib_leaf = sib_cpu_ci->info_list + index; cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map); cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map); } of_node_put(this_leaf->of_node); } } static void free_cache_attributes(unsigned int cpu) { cache_shared_cpu_map_remove(cpu); kfree(per_cpu_cacheinfo(cpu)); per_cpu_cacheinfo(cpu) = NULL; } int __weak init_cache_level(unsigned int cpu) { return -ENOENT; } int __weak populate_cache_leaves(unsigned int cpu) { return -ENOENT; } static int detect_cache_attributes(unsigned int cpu) { int ret; if (init_cache_level(cpu) || !cache_leaves(cpu)) return -ENOENT; per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct cacheinfo), GFP_KERNEL); if (per_cpu_cacheinfo(cpu) == NULL) return -ENOMEM; ret = populate_cache_leaves(cpu); if (ret) goto free_ci; /* * For systems using DT for cache hierarchy, of_node and shared_cpu_map * will be set up here only if they are not populated already */ ret = cache_shared_cpu_map_setup(cpu); if (ret) { pr_warn("Unable to detect cache hierarchy from DT for CPU %d\n", cpu); goto free_ci; } return 0; free_ci: free_cache_attributes(cpu); return ret; } /* pointer to cpuX/cache device */ static DEFINE_PER_CPU(struct device *, ci_cache_dev); #define per_cpu_cache_dev(cpu) (per_cpu(ci_cache_dev, cpu)) static cpumask_t cache_dev_map; /* pointer to array of devices for cpuX/cache/indexY */ static DEFINE_PER_CPU(struct device **, ci_index_dev); #define per_cpu_index_dev(cpu) (per_cpu(ci_index_dev, cpu)) #define per_cache_index_dev(cpu, idx) ((per_cpu_index_dev(cpu))[idx]) #define show_one(file_name, object) \ static ssize_t file_name##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct cacheinfo *this_leaf = dev_get_drvdata(dev); \ return sprintf(buf, "%u\n", this_leaf->object); \ } show_one(level, level); show_one(coherency_line_size, coherency_line_size); show_one(number_of_sets, number_of_sets); show_one(physical_line_partition, physical_line_partition); show_one(ways_of_associativity, ways_of_associativity); static ssize_t size_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); return sprintf(buf, "%uK\n", this_leaf->size >> 10); } static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); const struct cpumask *mask = &this_leaf->shared_cpu_map; return cpumap_print_to_pagebuf(list, buf, mask); } static ssize_t shared_cpu_map_show(struct device *dev, struct device_attribute *attr, char *buf) { return shared_cpumap_show_func(dev, false, buf); } static ssize_t shared_cpu_list_show(struct device *dev, struct device_attribute *attr, char *buf) { return shared_cpumap_show_func(dev, true, buf); } static ssize_t type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); switch (this_leaf->type) { case CACHE_TYPE_DATA: return sprintf(buf, "Data\n"); case CACHE_TYPE_INST: return sprintf(buf, "Instruction\n"); case CACHE_TYPE_UNIFIED: return sprintf(buf, "Unified\n"); default: return -EINVAL; } } static ssize_t allocation_policy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); unsigned int ci_attr = this_leaf->attributes; int n = 0; if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE)) n = sprintf(buf, "ReadWriteAllocate\n"); else if (ci_attr & CACHE_READ_ALLOCATE) n = sprintf(buf, "ReadAllocate\n"); else if (ci_attr & CACHE_WRITE_ALLOCATE) n = sprintf(buf, "WriteAllocate\n"); return n; } static ssize_t write_policy_show(struct device *dev, struct device_attribute *attr, char *buf) { struct cacheinfo *this_leaf = dev_get_drvdata(dev); unsigned int ci_attr = this_leaf->attributes; int n = 0; if (ci_attr & CACHE_WRITE_THROUGH) n = sprintf(buf, "WriteThrough\n"); else if (ci_attr & CACHE_WRITE_BACK) n = sprintf(buf, "WriteBack\n"); return n; } static DEVICE_ATTR_RO(level); static DEVICE_ATTR_RO(type); static DEVICE_ATTR_RO(coherency_line_size); static DEVICE_ATTR_RO(ways_of_associativity); static DEVICE_ATTR_RO(number_of_sets); static DEVICE_ATTR_RO(size); static DEVICE_ATTR_RO(allocation_policy); static DEVICE_ATTR_RO(write_policy); static DEVICE_ATTR_RO(shared_cpu_map); static DEVICE_ATTR_RO(shared_cpu_list); static DEVICE_ATTR_RO(physical_line_partition); static struct attribute *cache_default_attrs[] = { &dev_attr_type.attr, &dev_attr_level.attr, &dev_attr_shared_cpu_map.attr, &dev_attr_shared_cpu_list.attr, &dev_attr_coherency_line_size.attr, &dev_attr_ways_of_associativity.attr, &dev_attr_number_of_sets.attr, &dev_attr_size.attr, &dev_attr_allocation_policy.attr, &dev_attr_write_policy.attr, &dev_attr_physical_line_partition.attr, NULL }; static umode_t cache_default_attrs_is_visible(struct kobject *kobj, struct attribute *attr, int unused) { struct device *dev = kobj_to_dev(kobj); struct cacheinfo *this_leaf = dev_get_drvdata(dev); const struct cpumask *mask = &this_leaf->shared_cpu_map; umode_t mode = attr->mode; if ((attr == &dev_attr_type.attr) && this_leaf->type) return mode; if ((attr == &dev_attr_level.attr) && this_leaf->level) return mode; if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask)) return mode; if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask)) return mode; if ((attr == &dev_attr_coherency_line_size.attr) && this_leaf->coherency_line_size) return mode; if ((attr == &dev_attr_ways_of_associativity.attr) && this_leaf->size) /* allow 0 = full associativity */ return mode; if ((attr == &dev_attr_number_of_sets.attr) && this_leaf->number_of_sets) return mode; if ((attr == &dev_attr_size.attr) && this_leaf->size) return mode; if ((attr == &dev_attr_write_policy.attr) && (this_leaf->attributes & CACHE_WRITE_POLICY_MASK)) return mode; if ((attr == &dev_attr_allocation_policy.attr) && (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK)) return mode; if ((attr == &dev_attr_physical_line_partition.attr) && this_leaf->physical_line_partition) return mode; return 0; } static const struct attribute_group cache_default_group = { .attrs = cache_default_attrs, .is_visible = cache_default_attrs_is_visible, }; static const struct attribute_group *cache_default_groups[] = { &cache_default_group, NULL, }; static const struct attribute_group *cache_private_groups[] = { &cache_default_group, NULL, /* Place holder for private group */ NULL, }; const struct attribute_group * __weak cache_get_priv_group(struct cacheinfo *this_leaf) { return NULL; } static const struct attribute_group ** cache_get_attribute_groups(struct cacheinfo *this_leaf) { const struct attribute_group *priv_group = cache_get_priv_group(this_leaf); if (!priv_group) return cache_default_groups; if (!cache_private_groups[1]) cache_private_groups[1] = priv_group; return cache_private_groups; } /* Add/Remove cache interface for CPU device */ static void cpu_cache_sysfs_exit(unsigned int cpu) { int i; struct device *ci_dev; if (per_cpu_index_dev(cpu)) { for (i = 0; i < cache_leaves(cpu); i++) { ci_dev = per_cache_index_dev(cpu, i); if (!ci_dev) continue; device_unregister(ci_dev); } kfree(per_cpu_index_dev(cpu)); per_cpu_index_dev(cpu) = NULL; } device_unregister(per_cpu_cache_dev(cpu)); per_cpu_cache_dev(cpu) = NULL; } static int cpu_cache_sysfs_init(unsigned int cpu) { struct device *dev = get_cpu_device(cpu); if (per_cpu_cacheinfo(cpu) == NULL) return -ENOENT; per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache"); if (IS_ERR(per_cpu_cache_dev(cpu))) return PTR_ERR(per_cpu_cache_dev(cpu)); /* Allocate all required memory */ per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu), sizeof(struct device *), GFP_KERNEL); if (unlikely(per_cpu_index_dev(cpu) == NULL)) goto err_out; return 0; err_out: cpu_cache_sysfs_exit(cpu); return -ENOMEM; } static int cache_add_dev(unsigned int cpu) { unsigned int i; int rc; struct device *ci_dev, *parent; struct cacheinfo *this_leaf; struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); const struct attribute_group **cache_groups; rc = cpu_cache_sysfs_init(cpu); if (unlikely(rc < 0)) return rc; parent = per_cpu_cache_dev(cpu); for (i = 0; i < cache_leaves(cpu); i++) { this_leaf = this_cpu_ci->info_list + i; if (this_leaf->disable_sysfs) continue; cache_groups = cache_get_attribute_groups(this_leaf); ci_dev = cpu_device_create(parent, this_leaf, cache_groups, "index%1u", i); if (IS_ERR(ci_dev)) { rc = PTR_ERR(ci_dev); goto err; } per_cache_index_dev(cpu, i) = ci_dev; } cpumask_set_cpu(cpu, &cache_dev_map); return 0; err: cpu_cache_sysfs_exit(cpu); return rc; } static void cache_remove_dev(unsigned int cpu) { if (!cpumask_test_cpu(cpu, &cache_dev_map)) return; cpumask_clear_cpu(cpu, &cache_dev_map); cpu_cache_sysfs_exit(cpu); } static int cacheinfo_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) { unsigned int cpu = (unsigned long)hcpu; int rc = 0; switch (action & ~CPU_TASKS_FROZEN) { case CPU_ONLINE: rc = detect_cache_attributes(cpu); if (!rc) rc = cache_add_dev(cpu); break; case CPU_DEAD: cache_remove_dev(cpu); if (per_cpu_cacheinfo(cpu)) free_cache_attributes(cpu); break; } return notifier_from_errno(rc); } static int __init cacheinfo_sysfs_init(void) { int cpu, rc = 0; cpu_notifier_register_begin(); for_each_online_cpu(cpu) { rc = detect_cache_attributes(cpu); if (rc) goto out; rc = cache_add_dev(cpu); if (rc) { free_cache_attributes(cpu); pr_err("error populating cacheinfo..cpu%d\n", cpu); goto out; } } __hotcpu_notifier(cacheinfo_cpu_callback, 0); out: cpu_notifier_register_done(); return rc; } device_initcall(cacheinfo_sysfs_init);
gpl-2.0
travistabbal/dragon-fire-kernel
kernel/audit_tree.c
802
21484
#include "audit.h" #include <linux/inotify.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/kthread.h> #include <linux/slab.h> struct audit_tree; struct audit_chunk; struct audit_tree { atomic_t count; int goner; struct audit_chunk *root; struct list_head chunks; struct list_head rules; struct list_head list; struct list_head same_root; struct rcu_head head; char pathname[]; }; struct audit_chunk { struct list_head hash; struct inotify_watch watch; struct list_head trees; /* with root here */ int dead; int count; atomic_long_t refs; struct rcu_head head; struct node { struct list_head list; struct audit_tree *owner; unsigned index; /* index; upper bit indicates 'will prune' */ } owners[]; }; static LIST_HEAD(tree_list); static LIST_HEAD(prune_list); /* * One struct chunk is attached to each inode of interest. * We replace struct chunk on tagging/untagging. * Rules have pointer to struct audit_tree. * Rules have struct list_head rlist forming a list of rules over * the same tree. * References to struct chunk are collected at audit_inode{,_child}() * time and used in AUDIT_TREE rule matching. * These references are dropped at the same time we are calling * audit_free_names(), etc. * * Cyclic lists galore: * tree.chunks anchors chunk.owners[].list hash_lock * tree.rules anchors rule.rlist audit_filter_mutex * chunk.trees anchors tree.same_root hash_lock * chunk.hash is a hash with middle bits of watch.inode as * a hash function. RCU, hash_lock * * tree is refcounted; one reference for "some rules on rules_list refer to * it", one for each chunk with pointer to it. * * chunk is refcounted by embedded inotify_watch + .refs (non-zero refcount * of watch contributes 1 to .refs). * * node.index allows to get from node.list to containing chunk. * MSB of that sucker is stolen to mark taggings that we might have to * revert - several operations have very unpleasant cleanup logics and * that makes a difference. Some. */ static struct inotify_handle *rtree_ih; static struct audit_tree *alloc_tree(const char *s) { struct audit_tree *tree; tree = kmalloc(sizeof(struct audit_tree) + strlen(s) + 1, GFP_KERNEL); if (tree) { atomic_set(&tree->count, 1); tree->goner = 0; INIT_LIST_HEAD(&tree->chunks); INIT_LIST_HEAD(&tree->rules); INIT_LIST_HEAD(&tree->list); INIT_LIST_HEAD(&tree->same_root); tree->root = NULL; strcpy(tree->pathname, s); } return tree; } static inline void get_tree(struct audit_tree *tree) { atomic_inc(&tree->count); } static void __put_tree(struct rcu_head *rcu) { struct audit_tree *tree = container_of(rcu, struct audit_tree, head); kfree(tree); } static inline void put_tree(struct audit_tree *tree) { if (atomic_dec_and_test(&tree->count)) call_rcu(&tree->head, __put_tree); } /* to avoid bringing the entire thing in audit.h */ const char *audit_tree_path(struct audit_tree *tree) { return tree->pathname; } static struct audit_chunk *alloc_chunk(int count) { struct audit_chunk *chunk; size_t size; int i; size = offsetof(struct audit_chunk, owners) + count * sizeof(struct node); chunk = kzalloc(size, GFP_KERNEL); if (!chunk) return NULL; INIT_LIST_HEAD(&chunk->hash); INIT_LIST_HEAD(&chunk->trees); chunk->count = count; atomic_long_set(&chunk->refs, 1); for (i = 0; i < count; i++) { INIT_LIST_HEAD(&chunk->owners[i].list); chunk->owners[i].index = i; } inotify_init_watch(&chunk->watch); return chunk; } static void free_chunk(struct audit_chunk *chunk) { int i; for (i = 0; i < chunk->count; i++) { if (chunk->owners[i].owner) put_tree(chunk->owners[i].owner); } kfree(chunk); } void audit_put_chunk(struct audit_chunk *chunk) { if (atomic_long_dec_and_test(&chunk->refs)) free_chunk(chunk); } static void __put_chunk(struct rcu_head *rcu) { struct audit_chunk *chunk = container_of(rcu, struct audit_chunk, head); audit_put_chunk(chunk); } enum {HASH_SIZE = 128}; static struct list_head chunk_hash_heads[HASH_SIZE]; static __cacheline_aligned_in_smp DEFINE_SPINLOCK(hash_lock); static inline struct list_head *chunk_hash(const struct inode *inode) { unsigned long n = (unsigned long)inode / L1_CACHE_BYTES; return chunk_hash_heads + n % HASH_SIZE; } /* hash_lock is held by caller */ static void insert_hash(struct audit_chunk *chunk) { struct list_head *list = chunk_hash(chunk->watch.inode); list_add_rcu(&chunk->hash, list); } /* called under rcu_read_lock */ struct audit_chunk *audit_tree_lookup(const struct inode *inode) { struct list_head *list = chunk_hash(inode); struct audit_chunk *p; list_for_each_entry_rcu(p, list, hash) { if (p->watch.inode == inode) { atomic_long_inc(&p->refs); return p; } } return NULL; } int audit_tree_match(struct audit_chunk *chunk, struct audit_tree *tree) { int n; for (n = 0; n < chunk->count; n++) if (chunk->owners[n].owner == tree) return 1; return 0; } /* tagging and untagging inodes with trees */ static struct audit_chunk *find_chunk(struct node *p) { int index = p->index & ~(1U<<31); p -= index; return container_of(p, struct audit_chunk, owners[0]); } static void untag_chunk(struct node *p) { struct audit_chunk *chunk = find_chunk(p); struct audit_chunk *new; struct audit_tree *owner; int size = chunk->count - 1; int i, j; if (!pin_inotify_watch(&chunk->watch)) { /* * Filesystem is shutting down; all watches are getting * evicted, just take it off the node list for this * tree and let the eviction logics take care of the * rest. */ owner = p->owner; if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); return; } spin_unlock(&hash_lock); /* * pin_inotify_watch() succeeded, so the watch won't go away * from under us. */ mutex_lock(&chunk->watch.inode->inotify_mutex); if (chunk->dead) { mutex_unlock(&chunk->watch.inode->inotify_mutex); goto out; } owner = p->owner; if (!size) { chunk->dead = 1; spin_lock(&hash_lock); list_del_init(&chunk->trees); if (owner->root == chunk) owner->root = NULL; list_del_init(&p->list); list_del_rcu(&chunk->hash); spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); goto out; } new = alloc_chunk(size); if (!new) goto Fallback; if (inotify_clone_watch(&chunk->watch, &new->watch) < 0) { free_chunk(new); goto Fallback; } chunk->dead = 1; spin_lock(&hash_lock); list_replace_init(&chunk->trees, &new->trees); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } for (i = j = 0; j <= size; i++, j++) { struct audit_tree *s; if (&chunk->owners[j] == p) { list_del_init(&p->list); i--; continue; } s = chunk->owners[j].owner; new->owners[i].owner = s; new->owners[i].index = chunk->owners[j].index - j + i; if (!s) /* result of earlier fallback */ continue; get_tree(s); list_replace_init(&chunk->owners[j].list, &new->owners[i].list); } list_replace_rcu(&chunk->hash, &new->hash); list_for_each_entry(owner, &new->trees, same_root) owner->root = new; spin_unlock(&hash_lock); inotify_evict_watch(&chunk->watch); mutex_unlock(&chunk->watch.inode->inotify_mutex); put_inotify_watch(&chunk->watch); goto out; Fallback: // do the best we can spin_lock(&hash_lock); if (owner->root == chunk) { list_del_init(&owner->same_root); owner->root = NULL; } list_del_init(&p->list); p->owner = NULL; put_tree(owner); spin_unlock(&hash_lock); mutex_unlock(&chunk->watch.inode->inotify_mutex); out: unpin_inotify_watch(&chunk->watch); spin_lock(&hash_lock); } static int create_chunk(struct inode *inode, struct audit_tree *tree) { struct audit_chunk *chunk = alloc_chunk(1); if (!chunk) return -ENOMEM; if (inotify_add_watch(rtree_ih, &chunk->watch, inode, IN_IGNORED | IN_DELETE_SELF) < 0) { free_chunk(chunk); return -ENOSPC; } mutex_lock(&inode->inotify_mutex); spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; inotify_evict_watch(&chunk->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&chunk->watch); return 0; } chunk->owners[0].index = (1U << 31); chunk->owners[0].owner = tree; get_tree(tree); list_add(&chunk->owners[0].list, &tree->chunks); if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } insert_hash(chunk); spin_unlock(&hash_lock); mutex_unlock(&inode->inotify_mutex); return 0; } /* the first tagged inode becomes root of tree */ static int tag_chunk(struct inode *inode, struct audit_tree *tree) { struct inotify_watch *watch; struct audit_tree *owner; struct audit_chunk *chunk, *old; struct node *p; int n; if (inotify_find_watch(rtree_ih, inode, &watch) < 0) return create_chunk(inode, tree); old = container_of(watch, struct audit_chunk, watch); /* are we already there? */ spin_lock(&hash_lock); for (n = 0; n < old->count; n++) { if (old->owners[n].owner == tree) { spin_unlock(&hash_lock); put_inotify_watch(&old->watch); return 0; } } spin_unlock(&hash_lock); chunk = alloc_chunk(old->count + 1); if (!chunk) { put_inotify_watch(&old->watch); return -ENOMEM; } mutex_lock(&inode->inotify_mutex); if (inotify_clone_watch(&old->watch, &chunk->watch) < 0) { mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&old->watch); free_chunk(chunk); return -ENOSPC; } spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); chunk->dead = 1; inotify_evict_watch(&chunk->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&old->watch); put_inotify_watch(&chunk->watch); return 0; } list_replace_init(&old->trees, &chunk->trees); for (n = 0, p = chunk->owners; n < old->count; n++, p++) { struct audit_tree *s = old->owners[n].owner; p->owner = s; p->index = old->owners[n].index; if (!s) /* result of fallback in untag */ continue; get_tree(s); list_replace_init(&old->owners[n].list, &p->list); } p->index = (chunk->count - 1) | (1U<<31); p->owner = tree; get_tree(tree); list_add(&p->list, &tree->chunks); list_replace_rcu(&old->hash, &chunk->hash); list_for_each_entry(owner, &chunk->trees, same_root) owner->root = chunk; old->dead = 1; if (!tree->root) { tree->root = chunk; list_add(&tree->same_root, &chunk->trees); } spin_unlock(&hash_lock); inotify_evict_watch(&old->watch); mutex_unlock(&inode->inotify_mutex); put_inotify_watch(&old->watch); /* pair to inotify_find_watch */ put_inotify_watch(&old->watch); /* and kill it */ return 0; } static void kill_rules(struct audit_tree *tree) { struct audit_krule *rule, *next; struct audit_entry *entry; struct audit_buffer *ab; list_for_each_entry_safe(rule, next, &tree->rules, rlist) { entry = container_of(rule, struct audit_entry, rule); list_del_init(&rule->rlist); if (rule->tree) { /* not a half-baked one */ ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_CONFIG_CHANGE); audit_log_format(ab, "op="); audit_log_string(ab, "remove rule"); audit_log_format(ab, " dir="); audit_log_untrustedstring(ab, rule->tree->pathname); audit_log_key(ab, rule->filterkey); audit_log_format(ab, " list=%d res=1", rule->listnr); audit_log_end(ab); rule->tree = NULL; list_del_rcu(&entry->list); list_del(&entry->rule.list); call_rcu(&entry->rcu, audit_free_rule_rcu); } } } /* * finish killing struct audit_tree */ static void prune_one(struct audit_tree *victim) { spin_lock(&hash_lock); while (!list_empty(&victim->chunks)) { struct node *p; p = list_entry(victim->chunks.next, struct node, list); untag_chunk(p); } spin_unlock(&hash_lock); put_tree(victim); } /* trim the uncommitted chunks from tree */ static void trim_marked(struct audit_tree *tree) { struct list_head *p, *q; spin_lock(&hash_lock); if (tree->goner) { spin_unlock(&hash_lock); return; } /* reorder */ for (p = tree->chunks.next; p != &tree->chunks; p = q) { struct node *node = list_entry(p, struct node, list); q = p->next; if (node->index & (1U<<31)) { list_del_init(p); list_add(p, &tree->chunks); } } while (!list_empty(&tree->chunks)) { struct node *node; node = list_entry(tree->chunks.next, struct node, list); /* have we run out of marked? */ if (!(node->index & (1U<<31))) break; untag_chunk(node); } if (!tree->root && !tree->goner) { tree->goner = 1; spin_unlock(&hash_lock); mutex_lock(&audit_filter_mutex); kill_rules(tree); list_del_init(&tree->list); mutex_unlock(&audit_filter_mutex); prune_one(tree); } else { spin_unlock(&hash_lock); } } static void audit_schedule_prune(void); /* called with audit_filter_mutex */ int audit_remove_tree_rule(struct audit_krule *rule) { struct audit_tree *tree; tree = rule->tree; if (tree) { spin_lock(&hash_lock); list_del_init(&rule->rlist); if (list_empty(&tree->rules) && !tree->goner) { tree->root = NULL; list_del_init(&tree->same_root); tree->goner = 1; list_move(&tree->list, &prune_list); rule->tree = NULL; spin_unlock(&hash_lock); audit_schedule_prune(); return 1; } rule->tree = NULL; spin_unlock(&hash_lock); return 1; } return 0; } static int compare_root(struct vfsmount *mnt, void *arg) { return mnt->mnt_root->d_inode == arg; } void audit_trim_trees(void) { struct list_head cursor; mutex_lock(&audit_filter_mutex); list_add(&cursor, &tree_list); while (cursor.next != &tree_list) { struct audit_tree *tree; struct path path; struct vfsmount *root_mnt; struct node *node; int err; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto skip_it; root_mnt = collect_mounts(&path); path_put(&path); if (!root_mnt) goto skip_it; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) { struct inode *inode = find_chunk(node)->watch.inode; node->index |= 1U<<31; if (iterate_mounts(compare_root, inode, root_mnt)) node->index &= ~(1U<<31); } spin_unlock(&hash_lock); trim_marked(tree); put_tree(tree); drop_collected_mounts(root_mnt); skip_it: mutex_lock(&audit_filter_mutex); } list_del(&cursor); mutex_unlock(&audit_filter_mutex); } int audit_make_tree(struct audit_krule *rule, char *pathname, u32 op) { if (pathname[0] != '/' || rule->listnr != AUDIT_FILTER_EXIT || op != Audit_equal || rule->inode_f || rule->watch || rule->tree) return -EINVAL; rule->tree = alloc_tree(pathname); if (!rule->tree) return -ENOMEM; return 0; } void audit_put_tree(struct audit_tree *tree) { put_tree(tree); } static int tag_mount(struct vfsmount *mnt, void *arg) { return tag_chunk(mnt->mnt_root->d_inode, arg); } /* called with audit_filter_mutex */ int audit_add_tree_rule(struct audit_krule *rule) { struct audit_tree *seed = rule->tree, *tree; struct path path; struct vfsmount *mnt; int err; list_for_each_entry(tree, &tree_list, list) { if (!strcmp(seed->pathname, tree->pathname)) { put_tree(seed); rule->tree = tree; list_add(&rule->rlist, &tree->rules); return 0; } } tree = seed; list_add(&tree->list, &tree_list); list_add(&rule->rlist, &tree->rules); /* do not set rule->tree yet */ mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path); if (err) goto Err; mnt = collect_mounts(&path); path_put(&path); if (!mnt) { err = -ENOMEM; goto Err; } get_tree(tree); err = iterate_mounts(tag_mount, tree, mnt); drop_collected_mounts(mnt); if (!err) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); goto Err; } mutex_lock(&audit_filter_mutex); if (list_empty(&rule->rlist)) { put_tree(tree); return -ENOENT; } rule->tree = tree; put_tree(tree); return 0; Err: mutex_lock(&audit_filter_mutex); list_del_init(&tree->list); list_del_init(&tree->rules); put_tree(tree); return err; } int audit_tag_tree(char *old, char *new) { struct list_head cursor, barrier; int failed = 0; struct path path1, path2; struct vfsmount *tagged; int err; err = kern_path(new, 0, &path2); if (err) return err; tagged = collect_mounts(&path2); path_put(&path2); if (!tagged) return -ENOMEM; err = kern_path(old, 0, &path1); if (err) { drop_collected_mounts(tagged); return err; } mutex_lock(&audit_filter_mutex); list_add(&barrier, &tree_list); list_add(&cursor, &barrier); while (cursor.next != &tree_list) { struct audit_tree *tree; int good_one = 0; tree = container_of(cursor.next, struct audit_tree, list); get_tree(tree); list_del(&cursor); list_add(&cursor, &tree->list); mutex_unlock(&audit_filter_mutex); err = kern_path(tree->pathname, 0, &path2); if (!err) { good_one = path_is_under(&path1, &path2); path_put(&path2); } if (!good_one) { put_tree(tree); mutex_lock(&audit_filter_mutex); continue; } failed = iterate_mounts(tag_mount, tree, tagged); if (failed) { put_tree(tree); mutex_lock(&audit_filter_mutex); break; } mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); if (!tree->goner) { list_del(&tree->list); list_add(&tree->list, &tree_list); } spin_unlock(&hash_lock); put_tree(tree); } while (barrier.prev != &tree_list) { struct audit_tree *tree; tree = container_of(barrier.prev, struct audit_tree, list); get_tree(tree); list_del(&tree->list); list_add(&tree->list, &barrier); mutex_unlock(&audit_filter_mutex); if (!failed) { struct node *node; spin_lock(&hash_lock); list_for_each_entry(node, &tree->chunks, list) node->index &= ~(1U<<31); spin_unlock(&hash_lock); } else { trim_marked(tree); } put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&barrier); list_del(&cursor); mutex_unlock(&audit_filter_mutex); path_put(&path1); drop_collected_mounts(tagged); return failed; } /* * That gets run when evict_chunk() ends up needing to kill audit_tree. * Runs from a separate thread. */ static int prune_tree_thread(void *unused) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(&prune_list)) { struct audit_tree *victim; victim = list_entry(prune_list.next, struct audit_tree, list); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); return 0; } static void audit_schedule_prune(void) { kthread_run(prune_tree_thread, NULL, "audit_prune_tree"); } /* * ... and that one is done if evict_chunk() decides to delay until the end * of syscall. Runs synchronously. */ void audit_kill_trees(struct list_head *list) { mutex_lock(&audit_cmd_mutex); mutex_lock(&audit_filter_mutex); while (!list_empty(list)) { struct audit_tree *victim; victim = list_entry(list->next, struct audit_tree, list); kill_rules(victim); list_del_init(&victim->list); mutex_unlock(&audit_filter_mutex); prune_one(victim); mutex_lock(&audit_filter_mutex); } mutex_unlock(&audit_filter_mutex); mutex_unlock(&audit_cmd_mutex); } /* * Here comes the stuff asynchronous to auditctl operations */ /* inode->inotify_mutex is locked */ static void evict_chunk(struct audit_chunk *chunk) { struct audit_tree *owner; struct list_head *postponed = audit_killed_trees(); int need_prune = 0; int n; if (chunk->dead) return; chunk->dead = 1; mutex_lock(&audit_filter_mutex); spin_lock(&hash_lock); while (!list_empty(&chunk->trees)) { owner = list_entry(chunk->trees.next, struct audit_tree, same_root); owner->goner = 1; owner->root = NULL; list_del_init(&owner->same_root); spin_unlock(&hash_lock); if (!postponed) { kill_rules(owner); list_move(&owner->list, &prune_list); need_prune = 1; } else { list_move(&owner->list, postponed); } spin_lock(&hash_lock); } list_del_rcu(&chunk->hash); for (n = 0; n < chunk->count; n++) list_del_init(&chunk->owners[n].list); spin_unlock(&hash_lock); if (need_prune) audit_schedule_prune(); mutex_unlock(&audit_filter_mutex); } static void handle_event(struct inotify_watch *watch, u32 wd, u32 mask, u32 cookie, const char *dname, struct inode *inode) { struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); if (mask & IN_IGNORED) { evict_chunk(chunk); put_inotify_watch(watch); } } static void destroy_watch(struct inotify_watch *watch) { struct audit_chunk *chunk = container_of(watch, struct audit_chunk, watch); call_rcu(&chunk->head, __put_chunk); } static const struct inotify_operations rtree_inotify_ops = { .handle_event = handle_event, .destroy_watch = destroy_watch, }; static int __init audit_tree_init(void) { int i; rtree_ih = inotify_init(&rtree_inotify_ops); if (IS_ERR(rtree_ih)) audit_panic("cannot initialize inotify handle for rectree watches"); for (i = 0; i < HASH_SIZE; i++) INIT_LIST_HEAD(&chunk_hash_heads[i]); return 0; } __initcall(audit_tree_init);
gpl-2.0
Bdaman80/BDA-Lexikon
net/ipv4/ip_fragment.c
802
18401
/* * INET An implementation of the TCP/IP protocol suite for the LINUX * operating system. INET is implemented using the BSD Socket * interface as the means of communication with the user level. * * The IP fragmentation functionality. * * Authors: Fred N. van Kempen <waltje@uWalt.NL.Mugnet.ORG> * Alan Cox <alan@lxorguk.ukuu.org.uk> * * Fixes: * Alan Cox : Split from ip.c , see ip_input.c for history. * David S. Miller : Begin massive cleanup... * Andi Kleen : Add sysctls. * xxxx : Overlapfrag bug. * Ultima : ip_expire() kernel panic. * Bill Hawes : Frag accounting and evictor fixes. * John McDonald : 0 length frag bug. * Alexey Kuznetsov: SMP races, threading, cleanup. * Patrick McHardy : LRU queue of frag heads for evictor. */ #include <linux/compiler.h> #include <linux/module.h> #include <linux/types.h> #include <linux/mm.h> #include <linux/jiffies.h> #include <linux/skbuff.h> #include <linux/list.h> #include <linux/ip.h> #include <linux/icmp.h> #include <linux/netdevice.h> #include <linux/jhash.h> #include <linux/random.h> #include <linux/slab.h> #include <net/route.h> #include <net/dst.h> #include <net/sock.h> #include <net/ip.h> #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> #include <net/inet_frag.h> #include <linux/tcp.h> #include <linux/udp.h> #include <linux/inet.h> #include <linux/netfilter_ipv4.h> /* NOTE. Logic of IP defragmentation is parallel to corresponding IPv6 * code now. If you change something here, _PLEASE_ update ipv6/reassembly.c * as well. Or notify me, at least. --ANK */ static int sysctl_ipfrag_max_dist __read_mostly = 64; struct ipfrag_skb_cb { struct inet_skb_parm h; int offset; }; #define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb)) /* Describe an entry in the "incomplete datagrams" queue. */ struct ipq { struct inet_frag_queue q; u32 user; __be32 saddr; __be32 daddr; __be16 id; u8 protocol; int iif; unsigned int rid; struct inet_peer *peer; }; static struct inet_frags ip4_frags; int ip_frag_nqueues(struct net *net) { return net->ipv4.frags.nqueues; } int ip_frag_mem(struct net *net) { return atomic_read(&net->ipv4.frags.mem); } static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev); struct ip4_create_arg { struct iphdr *iph; u32 user; }; static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot) { return jhash_3words((__force u32)id << 16 | prot, (__force u32)saddr, (__force u32)daddr, ip4_frags.rnd) & (INETFRAGS_HASHSZ - 1); } static unsigned int ip4_hashfn(struct inet_frag_queue *q) { struct ipq *ipq; ipq = container_of(q, struct ipq, q); return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol); } static int ip4_frag_match(struct inet_frag_queue *q, void *a) { struct ipq *qp; struct ip4_create_arg *arg = a; qp = container_of(q, struct ipq, q); return (qp->id == arg->iph->id && qp->saddr == arg->iph->saddr && qp->daddr == arg->iph->daddr && qp->protocol == arg->iph->protocol && qp->user == arg->user); } /* Memory Tracking Functions. */ static __inline__ void frag_kfree_skb(struct netns_frags *nf, struct sk_buff *skb, int *work) { if (work) *work -= skb->truesize; atomic_sub(skb->truesize, &nf->mem); kfree_skb(skb); } static void ip4_frag_init(struct inet_frag_queue *q, void *a) { struct ipq *qp = container_of(q, struct ipq, q); struct ip4_create_arg *arg = a; qp->protocol = arg->iph->protocol; qp->id = arg->iph->id; qp->saddr = arg->iph->saddr; qp->daddr = arg->iph->daddr; qp->user = arg->user; qp->peer = sysctl_ipfrag_max_dist ? inet_getpeer(arg->iph->saddr, 1) : NULL; } static __inline__ void ip4_frag_free(struct inet_frag_queue *q) { struct ipq *qp; qp = container_of(q, struct ipq, q); if (qp->peer) inet_putpeer(qp->peer); } /* Destruction primitives. */ static __inline__ void ipq_put(struct ipq *ipq) { inet_frag_put(&ipq->q, &ip4_frags); } /* Kill ipq entry. It is not destroyed immediately, * because caller (and someone more) holds reference count. */ static void ipq_kill(struct ipq *ipq) { inet_frag_kill(&ipq->q, &ip4_frags); } /* Memory limiting on fragments. Evictor trashes the oldest * fragment queue until we are back under the threshold. */ static void ip_evictor(struct net *net) { int evicted; evicted = inet_frag_evictor(&net->ipv4.frags, &ip4_frags); if (evicted) IP_ADD_STATS_BH(net, IPSTATS_MIB_REASMFAILS, evicted); } /* * Oops, a fragment queue timed out. Kill it and send an ICMP reply. */ static void ip_expire(unsigned long arg) { struct ipq *qp; struct net *net; qp = container_of((struct inet_frag_queue *) arg, struct ipq, q); net = container_of(qp->q.net, struct net, ipv4.frags); spin_lock(&qp->q.lock); if (qp->q.last_in & INET_FRAG_COMPLETE) goto out; ipq_kill(qp); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMTIMEOUT); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); if ((qp->q.last_in & INET_FRAG_FIRST_IN) && qp->q.fragments != NULL) { struct sk_buff *head = qp->q.fragments; rcu_read_lock(); head->dev = dev_get_by_index_rcu(net, qp->iif); if (!head->dev) goto out_rcu_unlock; /* * Only search router table for the head fragment, * when defraging timeout at PRE_ROUTING HOOK. */ if (qp->user == IP_DEFRAG_CONNTRACK_IN && !skb_dst(head)) { const struct iphdr *iph = ip_hdr(head); int err = ip_route_input(head, iph->daddr, iph->saddr, iph->tos, head->dev); if (unlikely(err)) goto out_rcu_unlock; /* * Only an end host needs to send an ICMP * "Fragment Reassembly Timeout" message, per RFC792. */ if (skb_rtable(head)->rt_type != RTN_LOCAL) goto out_rcu_unlock; } /* Send an ICMP "Fragment Reassembly Timeout" message. */ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0); out_rcu_unlock: rcu_read_unlock(); } out: spin_unlock(&qp->q.lock); ipq_put(qp); } /* Find the correct entry in the "incomplete datagrams" queue for * this IP datagram, and create new one, if nothing is found. */ static inline struct ipq *ip_find(struct net *net, struct iphdr *iph, u32 user) { struct inet_frag_queue *q; struct ip4_create_arg arg; unsigned int hash; arg.iph = iph; arg.user = user; read_lock(&ip4_frags.lock); hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol); q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash); if (q == NULL) goto out_nomem; return container_of(q, struct ipq, q); out_nomem: LIMIT_NETDEBUG(KERN_ERR "ip_frag_create: no memory left !\n"); return NULL; } /* Is the fragment too far ahead to be part of ipq? */ static inline int ip_frag_too_far(struct ipq *qp) { struct inet_peer *peer = qp->peer; unsigned int max = sysctl_ipfrag_max_dist; unsigned int start, end; int rc; if (!peer || !max) return 0; start = qp->rid; end = atomic_inc_return(&peer->rid); qp->rid = end; rc = qp->q.fragments && (end - start) > max; if (rc) { struct net *net; net = container_of(qp->q.net, struct net, ipv4.frags); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); } return rc; } static int ip_frag_reinit(struct ipq *qp) { struct sk_buff *fp; if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) { atomic_inc(&qp->q.refcnt); return -ETIMEDOUT; } fp = qp->q.fragments; do { struct sk_buff *xp = fp->next; frag_kfree_skb(qp->q.net, fp, NULL); fp = xp; } while (fp); qp->q.last_in = 0; qp->q.len = 0; qp->q.meat = 0; qp->q.fragments = NULL; qp->iif = 0; return 0; } /* Add new segment to existing queue. */ static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb) { struct sk_buff *prev, *next; struct net_device *dev; int flags, offset; int ihl, end; int err = -ENOENT; if (qp->q.last_in & INET_FRAG_COMPLETE) goto err; if (!(IPCB(skb)->flags & IPSKB_FRAG_COMPLETE) && unlikely(ip_frag_too_far(qp)) && unlikely(err = ip_frag_reinit(qp))) { ipq_kill(qp); goto err; } offset = ntohs(ip_hdr(skb)->frag_off); flags = offset & ~IP_OFFSET; offset &= IP_OFFSET; offset <<= 3; /* offset is in 8-byte chunks */ ihl = ip_hdrlen(skb); /* Determine the position of this fragment. */ end = offset + skb->len - ihl; err = -EINVAL; /* Is this the final fragment? */ if ((flags & IP_MF) == 0) { /* If we already have some bits beyond end * or have different end, the segment is corrrupted. */ if (end < qp->q.len || ((qp->q.last_in & INET_FRAG_LAST_IN) && end != qp->q.len)) goto err; qp->q.last_in |= INET_FRAG_LAST_IN; qp->q.len = end; } else { if (end&7) { end &= ~7; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } if (end > qp->q.len) { /* Some bits beyond end -> corruption. */ if (qp->q.last_in & INET_FRAG_LAST_IN) goto err; qp->q.len = end; } } if (end == offset) goto err; err = -ENOMEM; if (pskb_pull(skb, ihl) == NULL) goto err; err = pskb_trim_rcsum(skb, end - offset); if (err) goto err; /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = NULL; for (next = qp->q.fragments; next != NULL; next = next->next) { if (FRAG_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } /* We found where to put this one. Check for overlap with * preceding fragment, and, if needed, align things so that * any overlaps are eliminated. */ if (prev) { int i = (FRAG_CB(prev)->offset + prev->len) - offset; if (i > 0) { offset += i; err = -EINVAL; if (end <= offset) goto err; err = -ENOMEM; if (!pskb_pull(skb, i)) goto err; if (skb->ip_summed != CHECKSUM_UNNECESSARY) skb->ip_summed = CHECKSUM_NONE; } } err = -ENOMEM; while (next && FRAG_CB(next)->offset < end) { int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */ if (i < next->len) { /* Eat head of the next overlapped fragment * and leave the loop. The next ones cannot overlap. */ if (!pskb_pull(next, i)) goto err; FRAG_CB(next)->offset += i; qp->q.meat -= i; if (next->ip_summed != CHECKSUM_UNNECESSARY) next->ip_summed = CHECKSUM_NONE; break; } else { struct sk_buff *free_it = next; /* Old fragment is completely overridden with * new one drop it. */ next = next->next; if (prev) prev->next = next; else qp->q.fragments = next; qp->q.meat -= free_it->len; frag_kfree_skb(qp->q.net, free_it, NULL); } } FRAG_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (prev) prev->next = skb; else qp->q.fragments = skb; dev = skb->dev; if (dev) { qp->iif = dev->ifindex; skb->dev = NULL; } qp->q.stamp = skb->tstamp; qp->q.meat += skb->len; atomic_add(skb->truesize, &qp->q.net->mem); if (offset == 0) qp->q.last_in |= INET_FRAG_FIRST_IN; if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && qp->q.meat == qp->q.len) return ip_frag_reasm(qp, prev, dev); write_lock(&ip4_frags.lock); list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); write_unlock(&ip4_frags.lock); return -EINPROGRESS; err: kfree_skb(skb); return err; } /* Build a new IP datagram from all its fragments. */ static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev, struct net_device *dev) { struct net *net = container_of(qp->q.net, struct net, ipv4.frags); struct iphdr *iph; struct sk_buff *fp, *head = qp->q.fragments; int len; int ihlen; int err; ipq_kill(qp); /* Make the one we just received the head. */ if (prev) { head = prev->next; fp = skb_clone(head, GFP_ATOMIC); if (!fp) goto out_nomem; fp->next = head->next; prev->next = fp; skb_morph(head, qp->q.fragments); head->next = qp->q.fragments->next; kfree_skb(qp->q.fragments); qp->q.fragments = head; } WARN_ON(head == NULL); WARN_ON(FRAG_CB(head)->offset != 0); /* Allocate a new buffer for the datagram. */ ihlen = ip_hdrlen(head); len = ihlen + qp->q.len; err = -E2BIG; if (len > 65535) goto out_oversize; /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) goto out_nomem; /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frags(head)) { struct sk_buff *clone; int i, plen = 0; if ((clone = alloc_skb(0, GFP_ATOMIC)) == NULL) goto out_nomem; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i=0; i<skb_shinfo(head)->nr_frags; i++) plen += skb_shinfo(head)->frags[i].size; clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; atomic_add(clone->truesize, &qp->q.net->mem); } skb_shinfo(head)->frag_list = head->next; skb_push(head, head->data - skb_network_header(head)); atomic_sub(head->truesize, &qp->q.net->mem); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; atomic_sub(fp->truesize, &qp->q.net->mem); } head->next = NULL; head->dev = dev; head->tstamp = qp->q.stamp; iph = ip_hdr(head); iph->frag_off = 0; iph->tot_len = htons(len); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMOKS); qp->q.fragments = NULL; return 0; out_nomem: LIMIT_NETDEBUG(KERN_ERR "IP: queue_glue: no memory for gluing " "queue %p\n", qp); err = -ENOMEM; goto out_fail; out_oversize: if (net_ratelimit()) printk(KERN_INFO "Oversized IP packet from %pI4.\n", &qp->saddr); out_fail: IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); return err; } /* Process an incoming IP datagram fragment. */ int ip_defrag(struct sk_buff *skb, u32 user) { struct ipq *qp; struct net *net; net = skb->dev ? dev_net(skb->dev) : dev_net(skb_dst(skb)->dev); IP_INC_STATS_BH(net, IPSTATS_MIB_REASMREQDS); /* Start by cleaning up the memory. */ if (atomic_read(&net->ipv4.frags.mem) > net->ipv4.frags.high_thresh) ip_evictor(net); /* Lookup (or create) queue header */ if ((qp = ip_find(net, ip_hdr(skb), user)) != NULL) { int ret; spin_lock(&qp->q.lock); ret = ip_frag_queue(qp, skb); spin_unlock(&qp->q.lock); ipq_put(qp); return ret; } IP_INC_STATS_BH(net, IPSTATS_MIB_REASMFAILS); kfree_skb(skb); return -ENOMEM; } #ifdef CONFIG_SYSCTL static int zero; static struct ctl_table ip4_frags_ns_ctl_table[] = { { .procname = "ipfrag_high_thresh", .data = &init_net.ipv4.frags.high_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "ipfrag_low_thresh", .data = &init_net.ipv4.frags.low_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, { .procname = "ipfrag_time", .data = &init_net.ipv4.frags.timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; static struct ctl_table ip4_frags_ctl_table[] = { { .procname = "ipfrag_secret_interval", .data = &ip4_frags.secret_interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ipfrag_max_dist", .data = &sysctl_ipfrag_max_dist, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero }, { } }; static int __net_init ip4_frags_ns_ctl_register(struct net *net) { struct ctl_table *table; struct ctl_table_header *hdr; table = ip4_frags_ns_ctl_table; if (!net_eq(net, &init_net)) { table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL); if (table == NULL) goto err_alloc; table[0].data = &net->ipv4.frags.high_thresh; table[1].data = &net->ipv4.frags.low_thresh; table[2].data = &net->ipv4.frags.timeout; } hdr = register_net_sysctl_table(net, net_ipv4_ctl_path, table); if (hdr == NULL) goto err_reg; net->ipv4.frags_hdr = hdr; return 0; err_reg: if (!net_eq(net, &init_net)) kfree(table); err_alloc: return -ENOMEM; } static void __net_exit ip4_frags_ns_ctl_unregister(struct net *net) { struct ctl_table *table; table = net->ipv4.frags_hdr->ctl_table_arg; unregister_net_sysctl_table(net->ipv4.frags_hdr); kfree(table); } static void ip4_frags_ctl_register(void) { register_net_sysctl_rotable(net_ipv4_ctl_path, ip4_frags_ctl_table); } #else static inline int ip4_frags_ns_ctl_register(struct net *net) { return 0; } static inline void ip4_frags_ns_ctl_unregister(struct net *net) { } static inline void ip4_frags_ctl_register(void) { } #endif static int __net_init ipv4_frags_init_net(struct net *net) { /* * Fragment cache limits. We will commit 256K at one time. Should we * cross that limit we will prune down to 192K. This should cope with * even the most extreme cases without allowing an attacker to * measurably harm machine performance. */ net->ipv4.frags.high_thresh = 256 * 1024; net->ipv4.frags.low_thresh = 192 * 1024; /* * Important NOTE! Fragment queue must be destroyed before MSL expires. * RFC791 is wrong proposing to prolongate timer each fragment arrival * by TTL. */ net->ipv4.frags.timeout = IP_FRAG_TIME; inet_frags_init_net(&net->ipv4.frags); return ip4_frags_ns_ctl_register(net); } static void __net_exit ipv4_frags_exit_net(struct net *net) { ip4_frags_ns_ctl_unregister(net); inet_frags_exit_net(&net->ipv4.frags, &ip4_frags); } static struct pernet_operations ip4_frags_ops = { .init = ipv4_frags_init_net, .exit = ipv4_frags_exit_net, }; void __init ipfrag_init(void) { ip4_frags_ctl_register(); register_pernet_subsys(&ip4_frags_ops); ip4_frags.hashfn = ip4_hashfn; ip4_frags.constructor = ip4_frag_init; ip4_frags.destructor = ip4_frag_free; ip4_frags.skb_free = NULL; ip4_frags.qsize = sizeof(struct ipq); ip4_frags.match = ip4_frag_match; ip4_frags.frag_expire = ip_expire; ip4_frags.secret_interval = 10 * 60 * HZ; inet_frags_init(&ip4_frags); } EXPORT_SYMBOL(ip_defrag);
gpl-2.0
droidzone/Supernova-Kernel
drivers/drivers/atm/nicstar.c
802
86201
/****************************************************************************** * * nicstar.c * * Device driver supporting CBR for IDT 77201/77211 "NICStAR" based cards. * * IMPORTANT: The included file nicstarmac.c was NOT WRITTEN BY ME. * It was taken from the frle-0.22 device driver. * As the file doesn't have a copyright notice, in the file * nicstarmac.copyright I put the copyright notice from the * frle-0.22 device driver. * Some code is based on the nicstar driver by M. Welsh. * * Author: Rui Prior (rprior@inescn.pt) * PowerPC support by Jay Talbott (jay_talbott@mcg.mot.com) April 1999 * * * (C) INESC 1999 * * ******************************************************************************/ /**** IMPORTANT INFORMATION *************************************************** * * There are currently three types of spinlocks: * * 1 - Per card interrupt spinlock (to protect structures and such) * 2 - Per SCQ scq spinlock * 3 - Per card resource spinlock (to access registers, etc.) * * These must NEVER be grabbed in reverse order. * ******************************************************************************/ /* Header files ***************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/atmdev.h> #include <linux/atm.h> #include <linux/pci.h> #include <linux/types.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/timer.h> #include <linux/interrupt.h> #include <linux/bitops.h> #include <linux/slab.h> #include <asm/io.h> #include <asm/uaccess.h> #include <asm/atomic.h> #include "nicstar.h" #ifdef CONFIG_ATM_NICSTAR_USE_SUNI #include "suni.h" #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 #include "idt77105.h" #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ #if BITS_PER_LONG != 32 # error FIXME: this driver requires a 32-bit platform #endif /* Additional code ************************************************************/ #include "nicstarmac.c" /* Configurable parameters ****************************************************/ #undef PHY_LOOPBACK #undef TX_DEBUG #undef RX_DEBUG #undef GENERAL_DEBUG #undef EXTRA_DEBUG #undef NS_USE_DESTRUCTORS /* For now keep this undefined unless you know you're going to use only raw ATM */ /* Do not touch these *********************************************************/ #ifdef TX_DEBUG #define TXPRINTK(args...) printk(args) #else #define TXPRINTK(args...) #endif /* TX_DEBUG */ #ifdef RX_DEBUG #define RXPRINTK(args...) printk(args) #else #define RXPRINTK(args...) #endif /* RX_DEBUG */ #ifdef GENERAL_DEBUG #define PRINTK(args...) printk(args) #else #define PRINTK(args...) #endif /* GENERAL_DEBUG */ #ifdef EXTRA_DEBUG #define XPRINTK(args...) printk(args) #else #define XPRINTK(args...) #endif /* EXTRA_DEBUG */ /* Macros *********************************************************************/ #define CMD_BUSY(card) (readl((card)->membase + STAT) & NS_STAT_CMDBZ) #define NS_DELAY mdelay(1) #define ALIGN_BUS_ADDR(addr, alignment) \ ((((u32) (addr)) + (((u32) (alignment)) - 1)) & ~(((u32) (alignment)) - 1)) #define ALIGN_ADDRESS(addr, alignment) \ bus_to_virt(ALIGN_BUS_ADDR(virt_to_bus(addr), alignment)) #undef CEIL #ifndef ATM_SKB #define ATM_SKB(s) (&(s)->atm) #endif /* Function declarations ******************************************************/ static u32 ns_read_sram(ns_dev *card, u32 sram_address); static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count); static int __devinit ns_init_card(int i, struct pci_dev *pcidev); static void __devinit ns_init_card_error(ns_dev *card, int error); static scq_info *get_scq(int size, u32 scd); static void free_scq(scq_info *scq, struct atm_vcc *vcc); static void push_rxbufs(ns_dev *, struct sk_buff *); static irqreturn_t ns_irq_handler(int irq, void *dev_id); static int ns_open(struct atm_vcc *vcc); static void ns_close(struct atm_vcc *vcc); static void fill_tst(ns_dev *card, int n, vc_map *vc); static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb); static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, struct sk_buff *skb); static void process_tsq(ns_dev *card); static void drain_scq(ns_dev *card, scq_info *scq, int pos); static void process_rsq(ns_dev *card); static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe); #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb); static void ns_lb_destructor(struct sk_buff *lb); static void ns_hb_destructor(struct sk_buff *hb); #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb); static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count); static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb); static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb); static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb); static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page); static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg); static void which_list(ns_dev *card, struct sk_buff *skb); static void ns_poll(unsigned long arg); static int ns_parse_mac(char *mac, unsigned char *esi); static short ns_h2i(char c); static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr); static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr); /* Global variables ***********************************************************/ static struct ns_dev *cards[NS_MAX_CARDS]; static unsigned num_cards; static struct atmdev_ops atm_ops = { .open = ns_open, .close = ns_close, .ioctl = ns_ioctl, .send = ns_send, .phy_put = ns_phy_put, .phy_get = ns_phy_get, .proc_read = ns_proc_read, .owner = THIS_MODULE, }; static struct timer_list ns_timer; static char *mac[NS_MAX_CARDS]; module_param_array(mac, charp, NULL, 0); MODULE_LICENSE("GPL"); /* Functions*******************************************************************/ static int __devinit nicstar_init_one(struct pci_dev *pcidev, const struct pci_device_id *ent) { static int index = -1; unsigned int error; index++; cards[index] = NULL; error = ns_init_card(index, pcidev); if (error) { cards[index--] = NULL; /* don't increment index */ goto err_out; } return 0; err_out: return -ENODEV; } static void __devexit nicstar_remove_one(struct pci_dev *pcidev) { int i, j; ns_dev *card = pci_get_drvdata(pcidev); struct sk_buff *hb; struct sk_buff *iovb; struct sk_buff *lb; struct sk_buff *sb; i = card->index; if (cards[i] == NULL) return; if (card->atmdev->phy && card->atmdev->phy->stop) card->atmdev->phy->stop(card->atmdev); /* Stop everything */ writel(0x00000000, card->membase + CFG); /* De-register device */ atm_dev_deregister(card->atmdev); /* Disable PCI device */ pci_disable_device(pcidev); /* Free up resources */ j = 0; PRINTK("nicstar%d: freeing %d huge buffers.\n", i, card->hbpool.count); while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) { dev_kfree_skb_any(hb); j++; } PRINTK("nicstar%d: %d huge buffers freed.\n", i, j); j = 0; PRINTK("nicstar%d: freeing %d iovec buffers.\n", i, card->iovpool.count); while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) { dev_kfree_skb_any(iovb); j++; } PRINTK("nicstar%d: %d iovec buffers freed.\n", i, j); while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card->scq0, NULL); for (j = 0; j < NS_FRSCD_NUM; j++) { if (card->scd2vc[j] != NULL) free_scq(card->scd2vc[j]->scq, card->scd2vc[j]->tx_vcc); } kfree(card->rsq.org); kfree(card->tsq.org); free_irq(card->pcidev->irq, card); iounmap(card->membase); kfree(card); } static struct pci_device_id nicstar_pci_tbl[] __devinitdata = { {PCI_VENDOR_ID_IDT, PCI_DEVICE_ID_IDT_IDT77201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {0,} /* terminate list */ }; MODULE_DEVICE_TABLE(pci, nicstar_pci_tbl); static struct pci_driver nicstar_driver = { .name = "nicstar", .id_table = nicstar_pci_tbl, .probe = nicstar_init_one, .remove = __devexit_p(nicstar_remove_one), }; static int __init nicstar_init(void) { unsigned error = 0; /* Initialized to remove compile warning */ XPRINTK("nicstar: nicstar_init() called.\n"); error = pci_register_driver(&nicstar_driver); TXPRINTK("nicstar: TX debug enabled.\n"); RXPRINTK("nicstar: RX debug enabled.\n"); PRINTK("nicstar: General debug enabled.\n"); #ifdef PHY_LOOPBACK printk("nicstar: using PHY loopback.\n"); #endif /* PHY_LOOPBACK */ XPRINTK("nicstar: nicstar_init() returned.\n"); if (!error) { init_timer(&ns_timer); ns_timer.expires = jiffies + NS_POLL_PERIOD; ns_timer.data = 0UL; ns_timer.function = ns_poll; add_timer(&ns_timer); } return error; } static void __exit nicstar_cleanup(void) { XPRINTK("nicstar: nicstar_cleanup() called.\n"); del_timer(&ns_timer); pci_unregister_driver(&nicstar_driver); XPRINTK("nicstar: nicstar_cleanup() returned.\n"); } static u32 ns_read_sram(ns_dev *card, u32 sram_address) { unsigned long flags; u32 data; sram_address <<= 2; sram_address &= 0x0007FFFC; /* address must be dword aligned */ sram_address |= 0x50000000; /* SRAM read command */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)); writel(sram_address, card->membase + CMD); while (CMD_BUSY(card)); data = readl(card->membase + DR0); spin_unlock_irqrestore(&card->res_lock, flags); return data; } static void ns_write_sram(ns_dev *card, u32 sram_address, u32 *value, int count) { unsigned long flags; int i, c; count--; /* count range now is 0..3 instead of 1..4 */ c = count; c <<= 2; /* to use increments of 4 */ spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)); for (i = 0; i <= c; i += 4) writel(*(value++), card->membase + i); /* Note: DR# registers are the first 4 dwords in nicstar's memspace, so card->membase + DR0 == card->membase */ sram_address <<= 2; sram_address &= 0x0007FFFC; sram_address |= (0x40000000 | count); writel(sram_address, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static int __devinit ns_init_card(int i, struct pci_dev *pcidev) { int j; struct ns_dev *card = NULL; unsigned char pci_latency; unsigned error; u32 data; u32 u32d[4]; u32 ns_cfg_rctsize; int bcount; unsigned long membase; error = 0; if (pci_enable_device(pcidev)) { printk("nicstar%d: can't enable PCI device\n", i); error = 2; ns_init_card_error(card, error); return error; } if ((card = kmalloc(sizeof(ns_dev), GFP_KERNEL)) == NULL) { printk("nicstar%d: can't allocate memory for device structure.\n", i); error = 2; ns_init_card_error(card, error); return error; } cards[i] = card; spin_lock_init(&card->int_lock); spin_lock_init(&card->res_lock); pci_set_drvdata(pcidev, card); card->index = i; card->atmdev = NULL; card->pcidev = pcidev; membase = pci_resource_start(pcidev, 1); card->membase = ioremap(membase, NS_IOREMAP_SIZE); if (!card->membase) { printk("nicstar%d: can't ioremap() membase.\n",i); error = 3; ns_init_card_error(card, error); return error; } PRINTK("nicstar%d: membase at 0x%x.\n", i, card->membase); pci_set_master(pcidev); if (pci_read_config_byte(pcidev, PCI_LATENCY_TIMER, &pci_latency) != 0) { printk("nicstar%d: can't read PCI latency timer.\n", i); error = 6; ns_init_card_error(card, error); return error; } #ifdef NS_PCI_LATENCY if (pci_latency < NS_PCI_LATENCY) { PRINTK("nicstar%d: setting PCI latency timer to %d.\n", i, NS_PCI_LATENCY); for (j = 1; j < 4; j++) { if (pci_write_config_byte(pcidev, PCI_LATENCY_TIMER, NS_PCI_LATENCY) != 0) break; } if (j == 4) { printk("nicstar%d: can't set PCI latency timer to %d.\n", i, NS_PCI_LATENCY); error = 7; ns_init_card_error(card, error); return error; } } #endif /* NS_PCI_LATENCY */ /* Clear timer overflow */ data = readl(card->membase + STAT); if (data & NS_STAT_TMROF) writel(NS_STAT_TMROF, card->membase + STAT); /* Software reset */ writel(NS_CFG_SWRST, card->membase + CFG); NS_DELAY; writel(0x00000000, card->membase + CFG); /* PHY reset */ writel(0x00000008, card->membase + GP); NS_DELAY; writel(0x00000001, card->membase + GP); NS_DELAY; while (CMD_BUSY(card)); writel(NS_CMD_WRITE_UTILITY | 0x00000100, card->membase + CMD); /* Sync UTOPIA with SAR clock */ NS_DELAY; /* Detect PHY type */ while (CMD_BUSY(card)); writel(NS_CMD_READ_UTILITY | 0x00000200, card->membase + CMD); while (CMD_BUSY(card)); data = readl(card->membase + DR0); switch(data) { case 0x00000009: printk("nicstar%d: PHY seems to be 25 Mbps.\n", i); card->max_pcr = ATM_25_PCR; while(CMD_BUSY(card)); writel(0x00000008, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200, card->membase + CMD); /* Clear an eventual pending interrupt */ writel(NS_STAT_SFBQF, card->membase + STAT); #ifdef PHY_LOOPBACK while(CMD_BUSY(card)); writel(0x00000022, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000202, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; case 0x00000030: case 0x00000031: printk("nicstar%d: PHY seems to be 155 Mbps.\n", i); card->max_pcr = ATM_OC3_PCR; #ifdef PHY_LOOPBACK while(CMD_BUSY(card)); writel(0x00000002, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000205, card->membase + CMD); #endif /* PHY_LOOPBACK */ break; default: printk("nicstar%d: unknown PHY type (0x%08X).\n", i, data); error = 8; ns_init_card_error(card, error); return error; } writel(0x00000000, card->membase + GP); /* Determine SRAM size */ data = 0x76543210; ns_write_sram(card, 0x1C003, &data, 1); data = 0x89ABCDEF; ns_write_sram(card, 0x14003, &data, 1); if (ns_read_sram(card, 0x14003) == 0x89ABCDEF && ns_read_sram(card, 0x1C003) == 0x76543210) card->sram_size = 128; else card->sram_size = 32; PRINTK("nicstar%d: %dK x 32bit SRAM size.\n", i, card->sram_size); card->rct_size = NS_MAX_RCTSIZE; #if (NS_MAX_RCTSIZE == 4096) if (card->sram_size == 128) printk("nicstar%d: limiting maximum VCI. See NS_MAX_RCTSIZE in nicstar.h\n", i); #elif (NS_MAX_RCTSIZE == 16384) if (card->sram_size == 32) { printk("nicstar%d: wasting memory. See NS_MAX_RCTSIZE in nicstar.h\n", i); card->rct_size = 4096; } #else #error NS_MAX_RCTSIZE must be either 4096 or 16384 in nicstar.c #endif card->vpibits = NS_VPIBITS; if (card->rct_size == 4096) card->vcibits = 12 - NS_VPIBITS; else /* card->rct_size == 16384 */ card->vcibits = 14 - NS_VPIBITS; /* Initialize the nicstar eeprom/eprom stuff, for the MAC addr */ if (mac[i] == NULL) nicstar_init_eprom(card->membase); /* Set the VPI/VCI MSb mask to zero so we can receive OAM cells */ writel(0x00000000, card->membase + VPM); /* Initialize TSQ */ card->tsq.org = kmalloc(NS_TSQSIZE + NS_TSQ_ALIGNMENT, GFP_KERNEL); if (card->tsq.org == NULL) { printk("nicstar%d: can't allocate TSQ.\n", i); error = 10; ns_init_card_error(card, error); return error; } card->tsq.base = (ns_tsi *) ALIGN_ADDRESS(card->tsq.org, NS_TSQ_ALIGNMENT); card->tsq.next = card->tsq.base; card->tsq.last = card->tsq.base + (NS_TSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_TSQ_NUM_ENTRIES; j++) ns_tsi_init(card->tsq.base + j); writel(0x00000000, card->membase + TSQH); writel((u32) virt_to_bus(card->tsq.base), card->membase + TSQB); PRINTK("nicstar%d: TSQ base at 0x%x 0x%x 0x%x.\n", i, (u32) card->tsq.base, (u32) virt_to_bus(card->tsq.base), readl(card->membase + TSQB)); /* Initialize RSQ */ card->rsq.org = kmalloc(NS_RSQSIZE + NS_RSQ_ALIGNMENT, GFP_KERNEL); if (card->rsq.org == NULL) { printk("nicstar%d: can't allocate RSQ.\n", i); error = 11; ns_init_card_error(card, error); return error; } card->rsq.base = (ns_rsqe *) ALIGN_ADDRESS(card->rsq.org, NS_RSQ_ALIGNMENT); card->rsq.next = card->rsq.base; card->rsq.last = card->rsq.base + (NS_RSQ_NUM_ENTRIES - 1); for (j = 0; j < NS_RSQ_NUM_ENTRIES; j++) ns_rsqe_init(card->rsq.base + j); writel(0x00000000, card->membase + RSQH); writel((u32) virt_to_bus(card->rsq.base), card->membase + RSQB); PRINTK("nicstar%d: RSQ base at 0x%x.\n", i, (u32) card->rsq.base); /* Initialize SCQ0, the only VBR SCQ used */ card->scq1 = NULL; card->scq2 = NULL; card->scq0 = get_scq(VBR_SCQSIZE, NS_VRSCD0); if (card->scq0 == NULL) { printk("nicstar%d: can't get SCQ0.\n", i); error = 12; ns_init_card_error(card, error); return error; } u32d[0] = (u32) virt_to_bus(card->scq0->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, NS_VRSCD0, u32d, 4); ns_write_sram(card, NS_VRSCD1, u32d, 4); /* These last two won't be used */ ns_write_sram(card, NS_VRSCD2, u32d, 4); /* but are initialized, just in case... */ card->scq0->scd = NS_VRSCD0; PRINTK("nicstar%d: VBR-SCQ0 base at 0x%x.\n", i, (u32) card->scq0->base); /* Initialize TSTs */ card->tst_addr = NS_TST0; card->tst_free_entries = NS_TST_NUM_ENTRIES; data = NS_TST_OPCODE_VARIABLE; for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST0 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST0); ns_write_sram(card, NS_TST0 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) ns_write_sram(card, NS_TST1 + j, &data, 1); data = ns_tste_make(NS_TST_OPCODE_END, NS_TST1); ns_write_sram(card, NS_TST1 + NS_TST_NUM_ENTRIES, &data, 1); for (j = 0; j < NS_TST_NUM_ENTRIES; j++) card->tste2vc[j] = NULL; writel(NS_TST0 << 2, card->membase + TSTB); /* Initialize RCT. AAL type is set on opening the VC. */ #ifdef RCQ_SUPPORT u32d[0] = NS_RCTE_RAWCELLINTEN; #else u32d[0] = 0x00000000; #endif /* RCQ_SUPPORT */ u32d[1] = 0x00000000; u32d[2] = 0x00000000; u32d[3] = 0xFFFFFFFF; for (j = 0; j < card->rct_size; j++) ns_write_sram(card, j * 4, u32d, 4); memset(card->vcmap, 0, NS_MAX_RCTSIZE * sizeof(vc_map)); for (j = 0; j < NS_FRSCD_NUM; j++) card->scd2vc[j] = NULL; /* Initialize buffer levels */ card->sbnr.min = MIN_SB; card->sbnr.init = NUM_SB; card->sbnr.max = MAX_SB; card->lbnr.min = MIN_LB; card->lbnr.init = NUM_LB; card->lbnr.max = MAX_LB; card->iovnr.min = MIN_IOVB; card->iovnr.init = NUM_IOVB; card->iovnr.max = MAX_IOVB; card->hbnr.min = MIN_HB; card->hbnr.init = NUM_HB; card->hbnr.max = MAX_HB; card->sm_handle = 0x00000000; card->sm_addr = 0x00000000; card->lg_handle = 0x00000000; card->lg_addr = 0x00000000; card->efbie = 1; /* To prevent push_rxbufs from enabling the interrupt */ /* Pre-allocate some huge buffers */ skb_queue_head_init(&card->hbpool.queue); card->hbpool.count = 0; for (j = 0; j < NUM_HB; j++) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) { printk("nicstar%d: can't allocate %dth of %d huge buffers.\n", i, j, NUM_HB); error = 13; ns_init_card_error(card, error); return error; } NS_SKB_CB(hb)->buf_type = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } /* Allocate large buffers */ skb_queue_head_init(&card->lbpool.queue); card->lbpool.count = 0; /* Not used */ for (j = 0; j < NUM_LB; j++) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) { printk("nicstar%d: can't allocate %dth of %d large buffers.\n", i, j, NUM_LB); error = 14; ns_init_card_error(card, error); return error; } NS_SKB_CB(lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); /* Due to the implementation of push_rxbufs() this is 1, not 0 */ if (j == 1) { card->rcbuf = lb; card->rawch = (u32) virt_to_bus(lb->data); } } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_lfbqc_get(readl(card->membase + STAT))) < card->lbnr.min) { printk("nicstar%d: Strange... Just allocated %d large buffers and lfbqc = %d.\n", i, j, bcount); error = 14; ns_init_card_error(card, error); return error; } /* Allocate small buffers */ skb_queue_head_init(&card->sbpool.queue); card->sbpool.count = 0; /* Not used */ for (j = 0; j < NUM_SB; j++) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) { printk("nicstar%d: can't allocate %dth of %d small buffers.\n", i, j, NUM_SB); error = 15; ns_init_card_error(card, error); return error; } NS_SKB_CB(sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } /* Test for strange behaviour which leads to crashes */ if ((bcount = ns_stat_sfbqc_get(readl(card->membase + STAT))) < card->sbnr.min) { printk("nicstar%d: Strange... Just allocated %d small buffers and sfbqc = %d.\n", i, j, bcount); error = 15; ns_init_card_error(card, error); return error; } /* Allocate iovec buffers */ skb_queue_head_init(&card->iovpool.queue); card->iovpool.count = 0; for (j = 0; j < NUM_IOVB; j++) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) { printk("nicstar%d: can't allocate %dth of %d iovec buffers.\n", i, j, NUM_IOVB); error = 16; ns_init_card_error(card, error); return error; } NS_SKB_CB(iovb)->buf_type = BUF_NONE; skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } /* Configure NICStAR */ if (card->rct_size == 4096) ns_cfg_rctsize = NS_CFG_RCTSIZE_4096_ENTRIES; else /* (card->rct_size == 16384) */ ns_cfg_rctsize = NS_CFG_RCTSIZE_16384_ENTRIES; card->efbie = 1; card->intcnt = 0; if (request_irq(pcidev->irq, &ns_irq_handler, IRQF_DISABLED | IRQF_SHARED, "nicstar", card) != 0) { printk("nicstar%d: can't allocate IRQ %d.\n", i, pcidev->irq); error = 9; ns_init_card_error(card, error); return error; } /* Register device */ card->atmdev = atm_dev_register("nicstar", &atm_ops, -1, NULL); if (card->atmdev == NULL) { printk("nicstar%d: can't register device.\n", i); error = 17; ns_init_card_error(card, error); return error; } if (ns_parse_mac(mac[i], card->atmdev->esi)) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET, card->atmdev->esi, 6); if (memcmp(card->atmdev->esi, "\x00\x00\x00\x00\x00\x00", 6) == 0) { nicstar_read_eprom(card->membase, NICSTAR_EPROM_MAC_ADDR_OFFSET_ALT, card->atmdev->esi, 6); } } printk("nicstar%d: MAC address %pM\n", i, card->atmdev->esi); card->atmdev->dev_data = card; card->atmdev->ci_range.vpi_bits = card->vpibits; card->atmdev->ci_range.vci_bits = card->vcibits; card->atmdev->link_rate = card->max_pcr; card->atmdev->phy = NULL; #ifdef CONFIG_ATM_NICSTAR_USE_SUNI if (card->max_pcr == ATM_OC3_PCR) suni_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_SUNI */ #ifdef CONFIG_ATM_NICSTAR_USE_IDT77105 if (card->max_pcr == ATM_25_PCR) idt77105_init(card->atmdev); #endif /* CONFIG_ATM_NICSTAR_USE_IDT77105 */ if (card->atmdev->phy && card->atmdev->phy->start) card->atmdev->phy->start(card->atmdev); writel(NS_CFG_RXPATH | NS_CFG_SMBUFSIZE | NS_CFG_LGBUFSIZE | NS_CFG_EFBIE | NS_CFG_RSQSIZE | NS_CFG_VPIBITS | ns_cfg_rctsize | NS_CFG_RXINT_NODELAY | NS_CFG_RAWIE | /* Only enabled if RCQ_SUPPORT */ NS_CFG_RSQAFIE | NS_CFG_TXEN | NS_CFG_TXIE | NS_CFG_TSQFIE_OPT | /* Only enabled if ENABLE_TSQFIE */ NS_CFG_PHYIE, card->membase + CFG); num_cards++; return error; } static void __devinit ns_init_card_error(ns_dev *card, int error) { if (error >= 17) { writel(0x00000000, card->membase + CFG); } if (error >= 16) { struct sk_buff *iovb; while ((iovb = skb_dequeue(&card->iovpool.queue)) != NULL) dev_kfree_skb_any(iovb); } if (error >= 15) { struct sk_buff *sb; while ((sb = skb_dequeue(&card->sbpool.queue)) != NULL) dev_kfree_skb_any(sb); free_scq(card->scq0, NULL); } if (error >= 14) { struct sk_buff *lb; while ((lb = skb_dequeue(&card->lbpool.queue)) != NULL) dev_kfree_skb_any(lb); } if (error >= 13) { struct sk_buff *hb; while ((hb = skb_dequeue(&card->hbpool.queue)) != NULL) dev_kfree_skb_any(hb); } if (error >= 12) { kfree(card->rsq.org); } if (error >= 11) { kfree(card->tsq.org); } if (error >= 10) { free_irq(card->pcidev->irq, card); } if (error >= 4) { iounmap(card->membase); } if (error >= 3) { pci_disable_device(card->pcidev); kfree(card); } } static scq_info *get_scq(int size, u32 scd) { scq_info *scq; int i; if (size != VBR_SCQSIZE && size != CBR_SCQSIZE) return NULL; scq = kmalloc(sizeof(scq_info), GFP_KERNEL); if (scq == NULL) return NULL; scq->org = kmalloc(2 * size, GFP_KERNEL); if (scq->org == NULL) { kfree(scq); return NULL; } scq->skb = kmalloc(sizeof(struct sk_buff *) * (size / NS_SCQE_SIZE), GFP_KERNEL); if (scq->skb == NULL) { kfree(scq->org); kfree(scq); return NULL; } scq->num_entries = size / NS_SCQE_SIZE; scq->base = (ns_scqe *) ALIGN_ADDRESS(scq->org, size); scq->next = scq->base; scq->last = scq->base + (scq->num_entries - 1); scq->tail = scq->last; scq->scd = scd; scq->num_entries = size / NS_SCQE_SIZE; scq->tbd_count = 0; init_waitqueue_head(&scq->scqfull_waitq); scq->full = 0; spin_lock_init(&scq->lock); for (i = 0; i < scq->num_entries; i++) scq->skb[i] = NULL; return scq; } /* For variable rate SCQ vcc must be NULL */ static void free_scq(scq_info *scq, struct atm_vcc *vcc) { int i; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { vcc = ATM_SKB(scq->skb[i])->vcc; if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } else /* vcc must be != NULL */ { if (vcc == NULL) { printk("nicstar: free_scq() called with vcc == NULL for fixed rate scq."); for (i = 0; i < scq->num_entries; i++) dev_kfree_skb_any(scq->skb[i]); } else for (i = 0; i < scq->num_entries; i++) { if (scq->skb[i] != NULL) { if (vcc->pop != NULL) vcc->pop(vcc, scq->skb[i]); else dev_kfree_skb_any(scq->skb[i]); } } } kfree(scq->skb); kfree(scq->org); kfree(scq); } /* The handles passed must be pointers to the sk_buff containing the small or large buffer(s) cast to u32. */ static void push_rxbufs(ns_dev *card, struct sk_buff *skb) { struct ns_skb_cb *cb = NS_SKB_CB(skb); u32 handle1, addr1; u32 handle2, addr2; u32 stat; unsigned long flags; /* *BARF* */ handle2 = addr2 = 0; handle1 = (u32)skb; addr1 = (u32)virt_to_bus(skb->data); #ifdef GENERAL_DEBUG if (!addr1) printk("nicstar%d: push_rxbufs called with addr1 = 0.\n", card->index); #endif /* GENERAL_DEBUG */ stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); if (cb->buf_type == BUF_SM) { if (!addr2) { if (card->sm_addr) { addr2 = card->sm_addr; handle2 = card->sm_handle; card->sm_addr = 0x00000000; card->sm_handle = 0x00000000; } else /* (!sm_addr) */ { card->sm_addr = addr1; card->sm_handle = handle1; } } } else /* buf_type == BUF_LG */ { if (!addr2) { if (card->lg_addr) { addr2 = card->lg_addr; handle2 = card->lg_handle; card->lg_addr = 0x00000000; card->lg_handle = 0x00000000; } else /* (!lg_addr) */ { card->lg_addr = addr1; card->lg_handle = handle1; } } } if (addr2) { if (cb->buf_type == BUF_SM) { if (card->sbfqc >= card->sbnr.max) { skb_unlink((struct sk_buff *) handle1, &card->sbpool.queue); dev_kfree_skb_any((struct sk_buff *) handle1); skb_unlink((struct sk_buff *) handle2, &card->sbpool.queue); dev_kfree_skb_any((struct sk_buff *) handle2); return; } else card->sbfqc += 2; } else /* (buf_type == BUF_LG) */ { if (card->lbfqc >= card->lbnr.max) { skb_unlink((struct sk_buff *) handle1, &card->lbpool.queue); dev_kfree_skb_any((struct sk_buff *) handle1); skb_unlink((struct sk_buff *) handle2, &card->lbpool.queue); dev_kfree_skb_any((struct sk_buff *) handle2); return; } else card->lbfqc += 2; } spin_lock_irqsave(&card->res_lock, flags); while (CMD_BUSY(card)); writel(addr2, card->membase + DR3); writel(handle2, card->membase + DR2); writel(addr1, card->membase + DR1); writel(handle1, card->membase + DR0); writel(NS_CMD_WRITE_FREEBUFQ | cb->buf_type, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); XPRINTK("nicstar%d: Pushing %s buffers at 0x%x and 0x%x.\n", card->index, (cb->buf_type == BUF_SM ? "small" : "large"), addr1, addr2); } if (!card->efbie && card->sbfqc >= card->sbnr.min && card->lbfqc >= card->lbnr.min) { card->efbie = 1; writel((readl(card->membase + CFG) | NS_CFG_EFBIE), card->membase + CFG); } return; } static irqreturn_t ns_irq_handler(int irq, void *dev_id) { u32 stat_r; ns_dev *card; struct atm_dev *dev; unsigned long flags; card = (ns_dev *) dev_id; dev = card->atmdev; card->intcnt++; PRINTK("nicstar%d: NICStAR generated an interrupt\n", card->index); spin_lock_irqsave(&card->int_lock, flags); stat_r = readl(card->membase + STAT); /* Transmit Status Indicator has been written to T. S. Queue */ if (stat_r & NS_STAT_TSIF) { TXPRINTK("nicstar%d: TSI interrupt\n", card->index); process_tsq(card); writel(NS_STAT_TSIF, card->membase + STAT); } /* Incomplete CS-PDU has been transmitted */ if (stat_r & NS_STAT_TXICP) { writel(NS_STAT_TXICP, card->membase + STAT); TXPRINTK("nicstar%d: Incomplete CS-PDU transmitted.\n", card->index); } /* Transmit Status Queue 7/8 full */ if (stat_r & NS_STAT_TSQF) { writel(NS_STAT_TSQF, card->membase + STAT); PRINTK("nicstar%d: TSQ full.\n", card->index); process_tsq(card); } /* Timer overflow */ if (stat_r & NS_STAT_TMROF) { writel(NS_STAT_TMROF, card->membase + STAT); PRINTK("nicstar%d: Timer overflow.\n", card->index); } /* PHY device interrupt signal active */ if (stat_r & NS_STAT_PHYI) { writel(NS_STAT_PHYI, card->membase + STAT); PRINTK("nicstar%d: PHY interrupt.\n", card->index); if (dev->phy && dev->phy->interrupt) { dev->phy->interrupt(dev); } } /* Small Buffer Queue is full */ if (stat_r & NS_STAT_SFBQF) { writel(NS_STAT_SFBQF, card->membase + STAT); printk("nicstar%d: Small free buffer queue is full.\n", card->index); } /* Large Buffer Queue is full */ if (stat_r & NS_STAT_LFBQF) { writel(NS_STAT_LFBQF, card->membase + STAT); printk("nicstar%d: Large free buffer queue is full.\n", card->index); } /* Receive Status Queue is full */ if (stat_r & NS_STAT_RSQF) { writel(NS_STAT_RSQF, card->membase + STAT); printk("nicstar%d: RSQ full.\n", card->index); process_rsq(card); } /* Complete CS-PDU received */ if (stat_r & NS_STAT_EOPDU) { RXPRINTK("nicstar%d: End of CS-PDU received.\n", card->index); process_rsq(card); writel(NS_STAT_EOPDU, card->membase + STAT); } /* Raw cell received */ if (stat_r & NS_STAT_RAWCF) { writel(NS_STAT_RAWCF, card->membase + STAT); #ifndef RCQ_SUPPORT printk("nicstar%d: Raw cell received and no support yet...\n", card->index); #endif /* RCQ_SUPPORT */ /* NOTE: the following procedure may keep a raw cell pending until the next interrupt. As this preliminary support is only meant to avoid buffer leakage, this is not an issue. */ while (readl(card->membase + RAWCT) != card->rawch) { ns_rcqe *rawcell; rawcell = (ns_rcqe *) bus_to_virt(card->rawch); if (ns_rcqe_islast(rawcell)) { struct sk_buff *oldbuf; oldbuf = card->rcbuf; card->rcbuf = (struct sk_buff *) ns_rcqe_nextbufhandle(rawcell); card->rawch = (u32) virt_to_bus(card->rcbuf->data); recycle_rx_buf(card, oldbuf); } else card->rawch += NS_RCQE_SIZE; } } /* Small buffer queue is empty */ if (stat_r & NS_STAT_SFBQE) { int i; struct sk_buff *sb; writel(NS_STAT_SFBQE, card->membase + STAT); printk("nicstar%d: Small free buffer queue empty.\n", card->index); for (i = 0; i < card->sbnr.min; i++) { sb = dev_alloc_skb(NS_SMSKBSIZE); if (sb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_SKB_CB(sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } card->sbfqc = i; process_rsq(card); } /* Large buffer queue empty */ if (stat_r & NS_STAT_LFBQE) { int i; struct sk_buff *lb; writel(NS_STAT_LFBQE, card->membase + STAT); printk("nicstar%d: Large free buffer queue empty.\n", card->index); for (i = 0; i < card->lbnr.min; i++) { lb = dev_alloc_skb(NS_LGSKBSIZE); if (lb == NULL) { writel(readl(card->membase + CFG) & ~NS_CFG_EFBIE, card->membase + CFG); card->efbie = 0; break; } NS_SKB_CB(lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } card->lbfqc = i; process_rsq(card); } /* Receive Status Queue is 7/8 full */ if (stat_r & NS_STAT_RSQAF) { writel(NS_STAT_RSQAF, card->membase + STAT); RXPRINTK("nicstar%d: RSQ almost full.\n", card->index); process_rsq(card); } spin_unlock_irqrestore(&card->int_lock, flags); PRINTK("nicstar%d: end of interrupt service\n", card->index); return IRQ_HANDLED; } static int ns_open(struct atm_vcc *vcc) { ns_dev *card; vc_map *vc; unsigned long tmpl, modl; int tcr, tcra; /* target cell rate, and absolute value */ int n = 0; /* Number of entries in the TST. Initialized to remove the compiler warning. */ u32 u32d[4]; int frscdi = 0; /* Index of the SCD. Initialized to remove the compiler warning. How I wish compilers were clever enough to tell which variables can truly be used uninitialized... */ int inuse; /* tx or rx vc already in use by another vcc */ short vpi = vcc->vpi; int vci = vcc->vci; card = (ns_dev *) vcc->dev->dev_data; PRINTK("nicstar%d: opening vpi.vci %d.%d \n", card->index, (int) vpi, vci); if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { PRINTK("nicstar%d: unsupported AAL.\n", card->index); return -EINVAL; } vc = &(card->vcmap[vpi << card->vcibits | vci]); vcc->dev_data = vc; inuse = 0; if (vcc->qos.txtp.traffic_class != ATM_NONE && vc->tx) inuse = 1; if (vcc->qos.rxtp.traffic_class != ATM_NONE && vc->rx) inuse += 2; if (inuse) { printk("nicstar%d: %s vci already in use.\n", card->index, inuse == 1 ? "tx" : inuse == 2 ? "rx" : "tx and rx"); return -EINVAL; } set_bit(ATM_VF_ADDR,&vcc->flags); /* NOTE: You are not allowed to modify an open connection's QOS. To change that, remove the ATM_VF_PARTIAL flag checking. There may be other changes needed to do that. */ if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { scq_info *scq; set_bit(ATM_VF_PARTIAL,&vcc->flags); if (vcc->qos.txtp.traffic_class == ATM_CBR) { /* Check requested cell rate and availability of SCD */ if (vcc->qos.txtp.max_pcr == 0 && vcc->qos.txtp.pcr == 0 && vcc->qos.txtp.min_pcr == 0) { PRINTK("nicstar%d: trying to open a CBR vc with cell rate = 0 \n", card->index); clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -EINVAL; } tcr = atm_pcr_goal(&(vcc->qos.txtp)); tcra = tcr >= 0 ? tcr : -tcr; PRINTK("nicstar%d: target cell rate = %d.\n", card->index, vcc->qos.txtp.max_pcr); tmpl = (unsigned long)tcra * (unsigned long)NS_TST_NUM_ENTRIES; modl = tmpl % card->max_pcr; n = (int)(tmpl / card->max_pcr); if (tcr > 0) { if (modl > 0) n++; } else if (tcr == 0) { if ((n = (card->tst_free_entries - NS_TST_RESERVED)) <= 0) { PRINTK("nicstar%d: no CBR bandwidth free.\n", card->index); clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -EINVAL; } } if (n == 0) { printk("nicstar%d: selected bandwidth < granularity.\n", card->index); clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -EINVAL; } if (n > (card->tst_free_entries - NS_TST_RESERVED)) { PRINTK("nicstar%d: not enough free CBR bandwidth.\n", card->index); clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -EINVAL; } else card->tst_free_entries -= n; XPRINTK("nicstar%d: writing %d tst entries.\n", card->index, n); for (frscdi = 0; frscdi < NS_FRSCD_NUM; frscdi++) { if (card->scd2vc[frscdi] == NULL) { card->scd2vc[frscdi] = vc; break; } } if (frscdi == NS_FRSCD_NUM) { PRINTK("nicstar%d: no SCD available for CBR channel.\n", card->index); card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -EBUSY; } vc->cbr_scd = NS_FRSCD + frscdi * NS_FRSCD_SIZE; scq = get_scq(CBR_SCQSIZE, vc->cbr_scd); if (scq == NULL) { PRINTK("nicstar%d: can't get fixed rate SCQ.\n", card->index); card->scd2vc[frscdi] = NULL; card->tst_free_entries += n; clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); return -ENOMEM; } vc->scq = scq; u32d[0] = (u32) virt_to_bus(scq->base); u32d[1] = (u32) 0x00000000; u32d[2] = (u32) 0xffffffff; u32d[3] = (u32) 0x00000000; ns_write_sram(card, vc->cbr_scd, u32d, 4); fill_tst(card, n, vc); } else if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->cbr_scd = 0x00000000; vc->scq = card->scq0; } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 1; vc->tx_vcc = vcc; vc->tbd_count = 0; } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 status; vc->rx = 1; vc->rx_vcc = vcc; vc->rx_iov = NULL; /* Open the connection in hardware */ if (vcc->qos.aal == ATM_AAL5) status = NS_RCTE_AAL5 | NS_RCTE_CONNECTOPEN; else /* vcc->qos.aal == ATM_AAL0 */ status = NS_RCTE_AAL0 | NS_RCTE_CONNECTOPEN; #ifdef RCQ_SUPPORT status |= NS_RCTE_RAWCELLINTEN; #endif /* RCQ_SUPPORT */ ns_write_sram(card, NS_RCT + (vpi << card->vcibits | vci) * NS_RCT_ENTRY_SIZE, &status, 1); } } set_bit(ATM_VF_READY,&vcc->flags); return 0; } static void ns_close(struct atm_vcc *vcc) { vc_map *vc; ns_dev *card; u32 data; int i; vc = vcc->dev_data; card = vcc->dev->dev_data; PRINTK("nicstar%d: closing vpi.vci %d.%d \n", card->index, (int) vcc->vpi, vcc->vci); clear_bit(ATM_VF_READY,&vcc->flags); if (vcc->qos.rxtp.traffic_class != ATM_NONE) { u32 addr; unsigned long flags; addr = NS_RCT + (vcc->vpi << card->vcibits | vcc->vci) * NS_RCT_ENTRY_SIZE; spin_lock_irqsave(&card->res_lock, flags); while(CMD_BUSY(card)); writel(NS_CMD_CLOSE_CONNECTION | addr << 2, card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); vc->rx = 0; if (vc->rx_iov != NULL) { struct sk_buff *iovb; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); PRINTK("nicstar%d: closing a VC with pending rx buffers.\n", card->index); iovb = vc->rx_iov; recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); NS_SKB(iovb)->iovcnt = 0; NS_SKB(iovb)->vcc = NULL; spin_lock_irqsave(&card->int_lock, flags); recycle_iov_buf(card, iovb); spin_unlock_irqrestore(&card->int_lock, flags); vc->rx_iov = NULL; } } if (vcc->qos.txtp.traffic_class != ATM_NONE) { vc->tx = 0; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { unsigned long flags; ns_scqe *scqep; scq_info *scq; scq = vc->scq; for (;;) { spin_lock_irqsave(&scq->lock, flags); scqep = scq->next; if (scqep == scq->base) scqep = scq->last; else scqep--; if (scqep == scq->tail) { spin_unlock_irqrestore(&scq->lock, flags); break; } /* If the last entry is not a TSR, place one in the SCQ in order to be able to completely drain it and then close. */ if (!ns_scqe_is_tsr(scqep) && scq->tail != scq->next) { ns_scqe tsr; u32 scdi, scqi; u32 data; int index; tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int) scqi; scq->skb[index] = NULL; if (scq->next == scq->last) scq->next = scq->base; else scq->next++; data = (u32) virt_to_bus(scq->next); ns_write_sram(card, scq->scd, &data, 1); } spin_unlock_irqrestore(&scq->lock, flags); schedule(); } /* Free all TST entries */ data = NS_TST_OPCODE_VARIABLE; for (i = 0; i < NS_TST_NUM_ENTRIES; i++) { if (card->tste2vc[i] == vc) { ns_write_sram(card, card->tst_addr + i, &data, 1); card->tste2vc[i] = NULL; card->tst_free_entries++; } } card->scd2vc[(vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE] = NULL; free_scq(vc->scq, vcc); } /* remove all references to vcc before deleting it */ if (vcc->qos.txtp.traffic_class != ATM_NONE) { unsigned long flags; scq_info *scq = card->scq0; spin_lock_irqsave(&scq->lock, flags); for(i = 0; i < scq->num_entries; i++) { if(scq->skb[i] && ATM_SKB(scq->skb[i])->vcc == vcc) { ATM_SKB(scq->skb[i])->vcc = NULL; atm_return(vcc, scq->skb[i]->truesize); PRINTK("nicstar: deleted pending vcc mapping\n"); } } spin_unlock_irqrestore(&scq->lock, flags); } vcc->dev_data = NULL; clear_bit(ATM_VF_PARTIAL,&vcc->flags); clear_bit(ATM_VF_ADDR,&vcc->flags); #ifdef RX_DEBUG { u32 stat, cfg; stat = readl(card->membase + STAT); cfg = readl(card->membase + CFG); printk("STAT = 0x%08X CFG = 0x%08X \n", stat, cfg); printk("TSQ: base = 0x%08X next = 0x%08X last = 0x%08X TSQT = 0x%08X \n", (u32) card->tsq.base, (u32) card->tsq.next,(u32) card->tsq.last, readl(card->membase + TSQT)); printk("RSQ: base = 0x%08X next = 0x%08X last = 0x%08X RSQT = 0x%08X \n", (u32) card->rsq.base, (u32) card->rsq.next,(u32) card->rsq.last, readl(card->membase + RSQT)); printk("Empty free buffer queue interrupt %s \n", card->efbie ? "enabled" : "disabled"); printk("SBCNT = %d count = %d LBCNT = %d count = %d \n", ns_stat_sfbqc_get(stat), card->sbpool.count, ns_stat_lfbqc_get(stat), card->lbpool.count); printk("hbpool.count = %d iovpool.count = %d \n", card->hbpool.count, card->iovpool.count); } #endif /* RX_DEBUG */ } static void fill_tst(ns_dev *card, int n, vc_map *vc) { u32 new_tst; unsigned long cl; int e, r; u32 data; /* It would be very complicated to keep the two TSTs synchronized while assuring that writes are only made to the inactive TST. So, for now I will use only one TST. If problems occur, I will change this again */ new_tst = card->tst_addr; /* Fill procedure */ for (e = 0; e < NS_TST_NUM_ENTRIES; e++) { if (card->tste2vc[e] == NULL) break; } if (e == NS_TST_NUM_ENTRIES) { printk("nicstar%d: No free TST entries found. \n", card->index); return; } r = n; cl = NS_TST_NUM_ENTRIES; data = ns_tste_make(NS_TST_OPCODE_FIXED, vc->cbr_scd); while (r > 0) { if (cl >= NS_TST_NUM_ENTRIES && card->tste2vc[e] == NULL) { card->tste2vc[e] = vc; ns_write_sram(card, new_tst + e, &data, 1); cl -= NS_TST_NUM_ENTRIES; r--; } if (++e == NS_TST_NUM_ENTRIES) { e = 0; } cl += n; } /* End of fill procedure */ data = ns_tste_make(NS_TST_OPCODE_END, new_tst); ns_write_sram(card, new_tst + NS_TST_NUM_ENTRIES, &data, 1); ns_write_sram(card, card->tst_addr + NS_TST_NUM_ENTRIES, &data, 1); card->tst_addr = new_tst; } static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb) { ns_dev *card; vc_map *vc; scq_info *scq; unsigned long buflen; ns_scqe scqe; u32 flags; /* TBD flags, not CPU flags */ card = vcc->dev->dev_data; TXPRINTK("nicstar%d: ns_send() called.\n", card->index); if ((vc = (vc_map *) vcc->dev_data) == NULL) { printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (!vc->tx) { printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) { printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } if (skb_shinfo(skb)->nr_frags != 0) { printk("nicstar%d: No scatter-gather yet.\n", card->index); atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; if (vcc->qos.aal == ATM_AAL5) { buflen = (skb->len + 47 + 8) / 48 * 48; /* Multiple of 48 */ flags = NS_TBD_AAL5; scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data)); scqe.word_3 = cpu_to_le32((u32) skb->len); scqe.word_4 = ns_tbd_mkword_4(0, (u32) vcc->vpi, (u32) vcc->vci, 0, ATM_SKB(skb)->atm_options & ATM_ATMOPT_CLP ? 1 : 0); flags |= NS_TBD_EOPDU; } else /* (vcc->qos.aal == ATM_AAL0) */ { buflen = ATM_CELL_PAYLOAD; /* i.e., 48 bytes */ flags = NS_TBD_AAL0; scqe.word_2 = cpu_to_le32((u32) virt_to_bus(skb->data) + NS_AAL0_HEADER); scqe.word_3 = cpu_to_le32(0x00000000); if (*skb->data & 0x02) /* Payload type 1 - end of pdu */ flags |= NS_TBD_EOPDU; scqe.word_4 = cpu_to_le32(*((u32 *) skb->data) & ~NS_TBD_VC_MASK); /* Force the VPI/VCI to be the same as in VCC struct */ scqe.word_4 |= cpu_to_le32((((u32) vcc->vpi) << NS_TBD_VPI_SHIFT | ((u32) vcc->vci) << NS_TBD_VCI_SHIFT) & NS_TBD_VC_MASK); } if (vcc->qos.txtp.traffic_class == ATM_CBR) { scqe.word_1 = ns_tbd_mkword_1_novbr(flags, (u32) buflen); scq = ((vc_map *) vcc->dev_data)->scq; } else { scqe.word_1 = ns_tbd_mkword_1(flags, (u32) 1, (u32) 1, (u32) buflen); scq = card->scq0; } if (push_scqe(card, vc, scq, &scqe, skb) != 0) { atomic_inc(&vcc->stats->tx_err); dev_kfree_skb_any(skb); return -EIO; } atomic_inc(&vcc->stats->tx); return 0; } static int push_scqe(ns_dev *card, vc_map *vc, scq_info *scq, ns_scqe *tbd, struct sk_buff *skb) { unsigned long flags; ns_scqe tsr; u32 scdi, scqi; int scq_is_vbr; u32 data; int index; spin_lock_irqsave(&scq->lock, flags); while (scq->tail == scq->next) { if (in_interrupt()) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TBD.\n", card->index); return 1; } scq->full = 1; spin_unlock_irqrestore(&scq->lock, flags); interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); spin_lock_irqsave(&scq->lock, flags); if (scq->full) { spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Timeout pushing TBD.\n", card->index); return 1; } } *scq->next = *tbd; index = (int) (scq->next - scq->base); scq->skb[index] = skb; XPRINTK("nicstar%d: sending skb at 0x%x (pos %d).\n", card->index, (u32) skb, index); XPRINTK("nicstar%d: TBD written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", card->index, le32_to_cpu(tbd->word_1), le32_to_cpu(tbd->word_2), le32_to_cpu(tbd->word_3), le32_to_cpu(tbd->word_4), (u32) scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count++; if (scq->num_entries == VBR_SCQ_NUM_ENTRIES) { scq->tbd_count++; scq_is_vbr = 1; } else scq_is_vbr = 0; if (vc->tbd_count >= MAX_TBD_PER_VC || scq->tbd_count >= MAX_TBD_PER_SCQ) { int has_run = 0; while (scq->tail == scq->next) { if (in_interrupt()) { data = (u32) virt_to_bus(scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); printk("nicstar%d: Error pushing TSR.\n", card->index); return 0; } scq->full = 1; if (has_run++) break; spin_unlock_irqrestore(&scq->lock, flags); interruptible_sleep_on_timeout(&scq->scqfull_waitq, SCQFULL_TIMEOUT); spin_lock_irqsave(&scq->lock, flags); } if (!scq->full) { tsr.word_1 = ns_tsr_mkword_1(NS_TSR_INTENABLE); if (scq_is_vbr) scdi = NS_TSR_SCDISVBR; else scdi = (vc->cbr_scd - NS_FRSCD) / NS_FRSCD_SIZE; scqi = scq->next - scq->base; tsr.word_2 = ns_tsr_mkword_2(scdi, scqi); tsr.word_3 = 0x00000000; tsr.word_4 = 0x00000000; *scq->next = tsr; index = (int) scqi; scq->skb[index] = NULL; XPRINTK("nicstar%d: TSR written:\n0x%x\n0x%x\n0x%x\n0x%x\n at 0x%x.\n", card->index, le32_to_cpu(tsr.word_1), le32_to_cpu(tsr.word_2), le32_to_cpu(tsr.word_3), le32_to_cpu(tsr.word_4), (u32) scq->next); if (scq->next == scq->last) scq->next = scq->base; else scq->next++; vc->tbd_count = 0; scq->tbd_count = 0; } else PRINTK("nicstar%d: Timeout pushing TSR.\n", card->index); } data = (u32) virt_to_bus(scq->next); ns_write_sram(card, scq->scd, &data, 1); spin_unlock_irqrestore(&scq->lock, flags); return 0; } static void process_tsq(ns_dev *card) { u32 scdi; scq_info *scq; ns_tsi *previous = NULL, *one_ahead, *two_ahead; int serviced_entries; /* flag indicating at least on entry was serviced */ serviced_entries = 0; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; while (!ns_tsi_isempty(card->tsq.next) || !ns_tsi_isempty(one_ahead) || !ns_tsi_isempty(two_ahead)) /* At most two empty, as stated in the 77201 errata */ { serviced_entries = 1; /* Skip the one or two possible empty entries */ while (ns_tsi_isempty(card->tsq.next)) { if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; } if (!ns_tsi_tmrof(card->tsq.next)) { scdi = ns_tsi_getscdindex(card->tsq.next); if (scdi == NS_TSI_SCDISVBR) scq = card->scq0; else { if (card->scd2vc[scdi] == NULL) { printk("nicstar%d: could not find VC from SCD index.\n", card->index); ns_tsi_init(card->tsq.next); return; } scq = card->scd2vc[scdi]->scq; } drain_scq(card, scq, ns_tsi_getscqpos(card->tsq.next)); scq->full = 0; wake_up_interruptible(&(scq->scqfull_waitq)); } ns_tsi_init(card->tsq.next); previous = card->tsq.next; if (card->tsq.next == card->tsq.last) card->tsq.next = card->tsq.base; else card->tsq.next++; if (card->tsq.next == card->tsq.last) one_ahead = card->tsq.base; else one_ahead = card->tsq.next + 1; if (one_ahead == card->tsq.last) two_ahead = card->tsq.base; else two_ahead = one_ahead + 1; } if (serviced_entries) { writel((((u32) previous) - ((u32) card->tsq.base)), card->membase + TSQH); } } static void drain_scq(ns_dev *card, scq_info *scq, int pos) { struct atm_vcc *vcc; struct sk_buff *skb; int i; unsigned long flags; XPRINTK("nicstar%d: drain_scq() called, scq at 0x%x, pos %d.\n", card->index, (u32) scq, pos); if (pos >= scq->num_entries) { printk("nicstar%d: Bad index on drain_scq().\n", card->index); return; } spin_lock_irqsave(&scq->lock, flags); i = (int) (scq->tail - scq->base); if (++i == scq->num_entries) i = 0; while (i != pos) { skb = scq->skb[i]; XPRINTK("nicstar%d: freeing skb at 0x%x (index %d).\n", card->index, (u32) skb, i); if (skb != NULL) { vcc = ATM_SKB(skb)->vcc; if (vcc && vcc->pop != NULL) { vcc->pop(vcc, skb); } else { dev_kfree_skb_irq(skb); } scq->skb[i] = NULL; } if (++i == scq->num_entries) i = 0; } scq->tail = scq->base + pos; spin_unlock_irqrestore(&scq->lock, flags); } static void process_rsq(ns_dev *card) { ns_rsqe *previous; if (!ns_rsqe_valid(card->rsq.next)) return; do { dequeue_rx(card, card->rsq.next); ns_rsqe_init(card->rsq.next); previous = card->rsq.next; if (card->rsq.next == card->rsq.last) card->rsq.next = card->rsq.base; else card->rsq.next++; } while (ns_rsqe_valid(card->rsq.next)); writel((((u32) previous) - ((u32) card->rsq.base)), card->membase + RSQH); } static void dequeue_rx(ns_dev *card, ns_rsqe *rsqe) { u32 vpi, vci; vc_map *vc; struct sk_buff *iovb; struct iovec *iov; struct atm_vcc *vcc; struct sk_buff *skb; unsigned short aal5_len; int len; u32 stat; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); skb = (struct sk_buff *) le32_to_cpu(rsqe->buffer_handle); vpi = ns_rsqe_vpi(rsqe); vci = ns_rsqe_vci(rsqe); if (vpi >= 1UL << card->vpibits || vci >= 1UL << card->vcibits) { printk("nicstar%d: SDU received for out-of-range vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vc = &(card->vcmap[vpi << card->vcibits | vci]); if (!vc->rx) { RXPRINTK("nicstar%d: SDU received on non-rx vc %d.%d.\n", card->index, vpi, vci); recycle_rx_buf(card, skb); return; } vcc = vc->rx_vcc; if (vcc->qos.aal == ATM_AAL0) { struct sk_buff *sb; unsigned char *cell; int i; cell = skb->data; for (i = ns_rsqe_cellcount(rsqe); i; i--) { if ((sb = dev_alloc_skb(NS_SMSKBSIZE)) == NULL) { printk("nicstar%d: Can't allocate buffers for aal0.\n", card->index); atomic_add(i,&vcc->stats->rx_drop); break; } if (!atm_charge(vcc, sb->truesize)) { RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", card->index); atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ dev_kfree_skb_any(sb); break; } /* Rebuild the header */ *((u32 *) sb->data) = le32_to_cpu(rsqe->word_1) << 4 | (ns_rsqe_clp(rsqe) ? 0x00000001 : 0x00000000); if (i == 1 && ns_rsqe_eopdu(rsqe)) *((u32 *) sb->data) |= 0x00000002; skb_put(sb, NS_AAL0_HEADER); memcpy(skb_tail_pointer(sb), cell, ATM_CELL_PAYLOAD); skb_put(sb, ATM_CELL_PAYLOAD); ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); cell += ATM_CELL_PAYLOAD; } recycle_rx_buf(card, skb); return; } /* To reach this point, the AAL layer can only be AAL5 */ if ((iovb = vc->rx_iov) == NULL) { iovb = skb_dequeue(&(card->iovpool.queue)); if (iovb == NULL) /* No buffers in the queue */ { iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC); if (iovb == NULL) { printk("nicstar%d: Out of iovec buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_rx_buf(card, skb); return; } NS_SKB_CB(iovb)->buf_type = BUF_NONE; } else if (--card->iovpool.count < card->iovnr.min) { struct sk_buff *new_iovb; if ((new_iovb = alloc_skb(NS_IOVBUFSIZE, GFP_ATOMIC)) != NULL) { NS_SKB_CB(iovb)->buf_type = BUF_NONE; skb_queue_tail(&card->iovpool.queue, new_iovb); card->iovpool.count++; } } vc->rx_iov = iovb; NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); NS_SKB(iovb)->vcc = vcc; /* IMPORTANT: a pointer to the sk_buff containing the small or large buffer is stored as iovec base, NOT a pointer to the small or large buffer itself. */ } else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) { printk("nicstar%d: received too big AAL5 SDU.\n", card->index); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); NS_SKB(iovb)->iovcnt = 0; iovb->len = 0; iovb->data = iovb->head; skb_reset_tail_pointer(iovb); NS_SKB(iovb)->vcc = vcc; } iov = &((struct iovec *) iovb->data)[NS_SKB(iovb)->iovcnt++]; iov->iov_base = (void *) skb; iov->iov_len = ns_rsqe_cellcount(rsqe) * 48; iovb->len += iov->iov_len; if (NS_SKB(iovb)->iovcnt == 1) { if (NS_SKB_CB(skb)->buf_type != BUF_SM) { printk("nicstar%d: Expected a small buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_rx_buf(card, skb); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } else /* NS_SKB(iovb)->iovcnt >= 2 */ { if (NS_SKB_CB(skb)->buf_type != BUF_LG) { printk("nicstar%d: Expected a large buffer, and this is not one.\n", card->index); which_list(card, skb); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } } if (ns_rsqe_eopdu(rsqe)) { /* This works correctly regardless of the endianness of the host */ unsigned char *L1L2 = (unsigned char *)((u32)skb->data + iov->iov_len - 6); aal5_len = L1L2[0] << 8 | L1L2[1]; len = (aal5_len == 0x0000) ? 0x10000 : aal5_len; if (ns_rsqe_crcerr(rsqe) || len + 8 > iovb->len || len + (47 + 8) < iovb->len) { printk("nicstar%d: AAL5 CRC error", card->index); if (len + 8 > iovb->len || len + (47 + 8) < iovb->len) printk(" - PDU size mismatch.\n"); else printk(".\n"); atomic_inc(&vcc->stats->rx_err); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } /* By this point we (hopefully) have a complete SDU without errors. */ if (NS_SKB(iovb)->iovcnt == 1) /* Just a small buffer */ { /* skb points to a small buffer */ if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(skb, len); dequeue_sm_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } } else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ { struct sk_buff *sb; sb = (struct sk_buff *) (iov - 1)->iov_base; /* skb points to a large buffer */ if (len <= NS_SMBUFSIZE) { if (!atm_charge(vcc, sb->truesize)) { push_rxbufs(card, sb); atomic_inc(&vcc->stats->rx_drop); } else { skb_put(sb, len); dequeue_sm_buf(card, sb); #ifdef NS_USE_DESTRUCTORS sb->destructor = ns_sb_destructor; #endif /* NS_USE_DESTRUCTORS */ ATM_SKB(sb)->vcc = vcc; __net_timestamp(sb); vcc->push(vcc, sb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, skb); } else /* len > NS_SMBUFSIZE, the usual case */ { if (!atm_charge(vcc, skb->truesize)) { push_rxbufs(card, skb); atomic_inc(&vcc->stats->rx_drop); } else { dequeue_lg_buf(card, skb); #ifdef NS_USE_DESTRUCTORS skb->destructor = ns_lb_destructor; #endif /* NS_USE_DESTRUCTORS */ skb_push(skb, NS_SMBUFSIZE); skb_copy_from_linear_data(sb, skb->data, NS_SMBUFSIZE); skb_put(skb, len - NS_SMBUFSIZE); ATM_SKB(skb)->vcc = vcc; __net_timestamp(skb); vcc->push(vcc, skb); atomic_inc(&vcc->stats->rx); } push_rxbufs(card, sb); } } else /* Must push a huge buffer */ { struct sk_buff *hb, *sb, *lb; int remaining, tocopy; int j; hb = skb_dequeue(&(card->hbpool.queue)); if (hb == NULL) /* No buffers in the queue */ { hb = dev_alloc_skb(NS_HBUFSIZE); if (hb == NULL) { printk("nicstar%d: Out of huge buffers.\n", card->index); atomic_inc(&vcc->stats->rx_drop); recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_SKB(iovb)->iovcnt); vc->rx_iov = NULL; recycle_iov_buf(card, iovb); return; } else if (card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } } NS_SKB_CB(hb)->buf_type = BUF_NONE; } else if (--card->hbpool.count < card->hbnr.min) { struct sk_buff *new_hb; if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_SKB_CB(new_hb)->buf_type = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } if (card->hbpool.count < card->hbnr.min) { if ((new_hb = dev_alloc_skb(NS_HBUFSIZE)) != NULL) { NS_SKB_CB(new_hb)->buf_type = BUF_NONE; skb_queue_tail(&card->hbpool.queue, new_hb); card->hbpool.count++; } } } iov = (struct iovec *) iovb->data; if (!atm_charge(vcc, hb->truesize)) { recycle_iovec_rx_bufs(card, iov, NS_SKB(iovb)->iovcnt); if (card->hbpool.count < card->hbnr.max) { skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } else dev_kfree_skb_any(hb); atomic_inc(&vcc->stats->rx_drop); } else { /* Copy the small buffer to the huge buffer */ sb = (struct sk_buff *) iov->iov_base; skb_copy_from_linear_data(sb, hb->data, iov->iov_len); skb_put(hb, iov->iov_len); remaining = len - iov->iov_len; iov++; /* Free the small buffer */ push_rxbufs(card, sb); /* Copy all large buffers to the huge buffer and free them */ for (j = 1; j < NS_SKB(iovb)->iovcnt; j++) { lb = (struct sk_buff *) iov->iov_base; tocopy = min_t(int, remaining, iov->iov_len); skb_copy_from_linear_data(lb, skb_tail_pointer(hb), tocopy); skb_put(hb, tocopy); iov++; remaining -= tocopy; push_rxbufs(card, lb); } #ifdef EXTRA_DEBUG if (remaining != 0 || hb->len != len) printk("nicstar%d: Huge buffer len mismatch.\n", card->index); #endif /* EXTRA_DEBUG */ ATM_SKB(hb)->vcc = vcc; #ifdef NS_USE_DESTRUCTORS hb->destructor = ns_hb_destructor; #endif /* NS_USE_DESTRUCTORS */ __net_timestamp(hb); vcc->push(vcc, hb); atomic_inc(&vcc->stats->rx); } } vc->rx_iov = NULL; recycle_iov_buf(card, iovb); } } #ifdef NS_USE_DESTRUCTORS static void ns_sb_destructor(struct sk_buff *sb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(sb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) break; NS_SKB_CB(sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } while (card->sbfqc < card->sbnr.min); } static void ns_lb_destructor(struct sk_buff *lb) { ns_dev *card; u32 stat; card = (ns_dev *) ATM_SKB(lb)->vcc->dev->dev_data; stat = readl(card->membase + STAT); card->sbfqc = ns_stat_sfbqc_get(stat); card->lbfqc = ns_stat_lfbqc_get(stat); do { lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) break; NS_SKB_CB(lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } while (card->lbfqc < card->lbnr.min); } static void ns_hb_destructor(struct sk_buff *hb) { ns_dev *card; card = (ns_dev *) ATM_SKB(hb)->vcc->dev->dev_data; while (card->hbpool.count < card->hbnr.init) { hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) break; NS_SKB_CB(hb)->buf_type = BUF_NONE; skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; } } #endif /* NS_USE_DESTRUCTORS */ static void recycle_rx_buf(ns_dev *card, struct sk_buff *skb) { struct ns_skb_cb *cb = NS_SKB_CB(skb); if (unlikely(cb->buf_type == BUF_NONE)) { printk("nicstar%d: What kind of rx buffer is this?\n", card->index); dev_kfree_skb_any(skb); } else push_rxbufs(card, skb); } static void recycle_iovec_rx_bufs(ns_dev *card, struct iovec *iov, int count) { while (count-- > 0) recycle_rx_buf(card, (struct sk_buff *) (iov++)->iov_base); } static void recycle_iov_buf(ns_dev *card, struct sk_buff *iovb) { if (card->iovpool.count < card->iovnr.max) { skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; } else dev_kfree_skb_any(iovb); } static void dequeue_sm_buf(ns_dev *card, struct sk_buff *sb) { skb_unlink(sb, &card->sbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->sbfqc < card->sbnr.min) #else if (card->sbfqc < card->sbnr.init) { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_SKB_CB(new_sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } if (card->sbfqc < card->sbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_sb; if ((new_sb = dev_alloc_skb(NS_SMSKBSIZE)) != NULL) { NS_SKB_CB(new_sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, new_sb); skb_reserve(new_sb, NS_AAL0_HEADER); push_rxbufs(card, new_sb); } } } static void dequeue_lg_buf(ns_dev *card, struct sk_buff *lb) { skb_unlink(lb, &card->lbpool.queue); #ifdef NS_USE_DESTRUCTORS if (card->lbfqc < card->lbnr.min) #else if (card->lbfqc < card->lbnr.init) { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_SKB_CB(new_lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } if (card->lbfqc < card->lbnr.init) #endif /* NS_USE_DESTRUCTORS */ { struct sk_buff *new_lb; if ((new_lb = dev_alloc_skb(NS_LGSKBSIZE)) != NULL) { NS_SKB_CB(new_lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, new_lb); skb_reserve(new_lb, NS_SMBUFSIZE); push_rxbufs(card, new_lb); } } } static int ns_proc_read(struct atm_dev *dev, loff_t *pos, char *page) { u32 stat; ns_dev *card; int left; left = (int) *pos; card = (ns_dev *) dev->dev_data; stat = readl(card->membase + STAT); if (!left--) return sprintf(page, "Pool count min init max \n"); if (!left--) return sprintf(page, "Small %5d %5d %5d %5d \n", ns_stat_sfbqc_get(stat), card->sbnr.min, card->sbnr.init, card->sbnr.max); if (!left--) return sprintf(page, "Large %5d %5d %5d %5d \n", ns_stat_lfbqc_get(stat), card->lbnr.min, card->lbnr.init, card->lbnr.max); if (!left--) return sprintf(page, "Huge %5d %5d %5d %5d \n", card->hbpool.count, card->hbnr.min, card->hbnr.init, card->hbnr.max); if (!left--) return sprintf(page, "Iovec %5d %5d %5d %5d \n", card->iovpool.count, card->iovnr.min, card->iovnr.init, card->iovnr.max); if (!left--) { int retval; retval = sprintf(page, "Interrupt counter: %u \n", card->intcnt); card->intcnt = 0; return retval; } #if 0 /* Dump 25.6 Mbps PHY registers */ /* Now there's a 25.6 Mbps PHY driver this code isn't needed. I left it here just in case it's needed for debugging. */ if (card->max_pcr == ATM_25_PCR && !left--) { u32 phy_regs[4]; u32 i; for (i = 0; i < 4; i++) { while (CMD_BUSY(card)); writel(NS_CMD_READ_UTILITY | 0x00000200 | i, card->membase + CMD); while (CMD_BUSY(card)); phy_regs[i] = readl(card->membase + DR0) & 0x000000FF; } return sprintf(page, "PHY regs: 0x%02X 0x%02X 0x%02X 0x%02X \n", phy_regs[0], phy_regs[1], phy_regs[2], phy_regs[3]); } #endif /* 0 - Dump 25.6 Mbps PHY registers */ #if 0 /* Dump TST */ if (left-- < NS_TST_NUM_ENTRIES) { if (card->tste2vc[left + 1] == NULL) return sprintf(page, "%5d - VBR/UBR \n", left + 1); else return sprintf(page, "%5d - %d %d \n", left + 1, card->tste2vc[left + 1]->tx_vcc->vpi, card->tste2vc[left + 1]->tx_vcc->vci); } #endif /* 0 */ return 0; } static int ns_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) { ns_dev *card; pool_levels pl; long btype; unsigned long flags; card = dev->dev_data; switch (cmd) { case NS_GETPSTAT: if (get_user(pl.buftype, &((pool_levels __user *) arg)->buftype)) return -EFAULT; switch (pl.buftype) { case NS_BUFTYPE_SMALL: pl.count = ns_stat_sfbqc_get(readl(card->membase + STAT)); pl.level.min = card->sbnr.min; pl.level.init = card->sbnr.init; pl.level.max = card->sbnr.max; break; case NS_BUFTYPE_LARGE: pl.count = ns_stat_lfbqc_get(readl(card->membase + STAT)); pl.level.min = card->lbnr.min; pl.level.init = card->lbnr.init; pl.level.max = card->lbnr.max; break; case NS_BUFTYPE_HUGE: pl.count = card->hbpool.count; pl.level.min = card->hbnr.min; pl.level.init = card->hbnr.init; pl.level.max = card->hbnr.max; break; case NS_BUFTYPE_IOVEC: pl.count = card->iovpool.count; pl.level.min = card->iovnr.min; pl.level.init = card->iovnr.init; pl.level.max = card->iovnr.max; break; default: return -ENOIOCTLCMD; } if (!copy_to_user((pool_levels __user *) arg, &pl, sizeof(pl))) return (sizeof(pl)); else return -EFAULT; case NS_SETBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_from_user(&pl, (pool_levels __user *) arg, sizeof(pl))) return -EFAULT; if (pl.level.min >= pl.level.init || pl.level.init >= pl.level.max) return -EINVAL; if (pl.level.min == 0) return -EINVAL; switch (pl.buftype) { case NS_BUFTYPE_SMALL: if (pl.level.max > TOP_SB) return -EINVAL; card->sbnr.min = pl.level.min; card->sbnr.init = pl.level.init; card->sbnr.max = pl.level.max; break; case NS_BUFTYPE_LARGE: if (pl.level.max > TOP_LB) return -EINVAL; card->lbnr.min = pl.level.min; card->lbnr.init = pl.level.init; card->lbnr.max = pl.level.max; break; case NS_BUFTYPE_HUGE: if (pl.level.max > TOP_HB) return -EINVAL; card->hbnr.min = pl.level.min; card->hbnr.init = pl.level.init; card->hbnr.max = pl.level.max; break; case NS_BUFTYPE_IOVEC: if (pl.level.max > TOP_IOVB) return -EINVAL; card->iovnr.min = pl.level.min; card->iovnr.init = pl.level.init; card->iovnr.max = pl.level.max; break; default: return -EINVAL; } return 0; case NS_ADJBUFLEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; btype = (long) arg; /* a long is the same size as a pointer or bigger */ switch (btype) { case NS_BUFTYPE_SMALL: while (card->sbfqc < card->sbnr.init) { struct sk_buff *sb; sb = __dev_alloc_skb(NS_SMSKBSIZE, GFP_KERNEL); if (sb == NULL) return -ENOMEM; NS_SKB_CB(sb)->buf_type = BUF_SM; skb_queue_tail(&card->sbpool.queue, sb); skb_reserve(sb, NS_AAL0_HEADER); push_rxbufs(card, sb); } break; case NS_BUFTYPE_LARGE: while (card->lbfqc < card->lbnr.init) { struct sk_buff *lb; lb = __dev_alloc_skb(NS_LGSKBSIZE, GFP_KERNEL); if (lb == NULL) return -ENOMEM; NS_SKB_CB(lb)->buf_type = BUF_LG; skb_queue_tail(&card->lbpool.queue, lb); skb_reserve(lb, NS_SMBUFSIZE); push_rxbufs(card, lb); } break; case NS_BUFTYPE_HUGE: while (card->hbpool.count > card->hbnr.init) { struct sk_buff *hb; spin_lock_irqsave(&card->int_lock, flags); hb = skb_dequeue(&card->hbpool.queue); card->hbpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (hb == NULL) printk("nicstar%d: huge buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(hb); } while (card->hbpool.count < card->hbnr.init) { struct sk_buff *hb; hb = __dev_alloc_skb(NS_HBUFSIZE, GFP_KERNEL); if (hb == NULL) return -ENOMEM; NS_SKB_CB(hb)->buf_type = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->hbpool.queue, hb); card->hbpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; case NS_BUFTYPE_IOVEC: while (card->iovpool.count > card->iovnr.init) { struct sk_buff *iovb; spin_lock_irqsave(&card->int_lock, flags); iovb = skb_dequeue(&card->iovpool.queue); card->iovpool.count--; spin_unlock_irqrestore(&card->int_lock, flags); if (iovb == NULL) printk("nicstar%d: iovec buffer count inconsistent.\n", card->index); else dev_kfree_skb_any(iovb); } while (card->iovpool.count < card->iovnr.init) { struct sk_buff *iovb; iovb = alloc_skb(NS_IOVBUFSIZE, GFP_KERNEL); if (iovb == NULL) return -ENOMEM; NS_SKB_CB(iovb)->buf_type = BUF_NONE; spin_lock_irqsave(&card->int_lock, flags); skb_queue_tail(&card->iovpool.queue, iovb); card->iovpool.count++; spin_unlock_irqrestore(&card->int_lock, flags); } break; default: return -EINVAL; } return 0; default: if (dev->phy && dev->phy->ioctl) { return dev->phy->ioctl(dev, cmd, arg); } else { printk("nicstar%d: %s == NULL \n", card->index, dev->phy ? "dev->phy->ioctl" : "dev->phy"); return -ENOIOCTLCMD; } } } static void which_list(ns_dev *card, struct sk_buff *skb) { printk("skb buf_type: 0x%08x\n", NS_SKB_CB(skb)->buf_type); } static void ns_poll(unsigned long arg) { int i; ns_dev *card; unsigned long flags; u32 stat_r, stat_w; PRINTK("nicstar: Entering ns_poll().\n"); for (i = 0; i < num_cards; i++) { card = cards[i]; if (spin_is_locked(&card->int_lock)) { /* Probably it isn't worth spinning */ continue; } spin_lock_irqsave(&card->int_lock, flags); stat_w = 0; stat_r = readl(card->membase + STAT); if (stat_r & NS_STAT_TSIF) stat_w |= NS_STAT_TSIF; if (stat_r & NS_STAT_EOPDU) stat_w |= NS_STAT_EOPDU; process_tsq(card); process_rsq(card); writel(stat_w, card->membase + STAT); spin_unlock_irqrestore(&card->int_lock, flags); } mod_timer(&ns_timer, jiffies + NS_POLL_PERIOD); PRINTK("nicstar: Leaving ns_poll().\n"); } static int ns_parse_mac(char *mac, unsigned char *esi) { int i, j; short byte1, byte0; if (mac == NULL || esi == NULL) return -1; j = 0; for (i = 0; i < 6; i++) { if ((byte1 = ns_h2i(mac[j++])) < 0) return -1; if ((byte0 = ns_h2i(mac[j++])) < 0) return -1; esi[i] = (unsigned char) (byte1 * 16 + byte0); if (i < 5) { if (mac[j++] != ':') return -1; } } return 0; } static short ns_h2i(char c) { if (c >= '0' && c <= '9') return (short) (c - '0'); if (c >= 'A' && c <= 'F') return (short) (c - 'A' + 10); if (c >= 'a' && c <= 'f') return (short) (c - 'a' + 10); return -1; } static void ns_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { ns_dev *card; unsigned long flags; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while(CMD_BUSY(card)); writel((unsigned long) value, card->membase + DR0); writel(NS_CMD_WRITE_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); spin_unlock_irqrestore(&card->res_lock, flags); } static unsigned char ns_phy_get(struct atm_dev *dev, unsigned long addr) { ns_dev *card; unsigned long flags; unsigned long data; card = dev->dev_data; spin_lock_irqsave(&card->res_lock, flags); while(CMD_BUSY(card)); writel(NS_CMD_READ_UTILITY | 0x00000200 | (addr & 0x000000FF), card->membase + CMD); while(CMD_BUSY(card)); data = readl(card->membase + DR0) & 0x000000FF; spin_unlock_irqrestore(&card->res_lock, flags); return (unsigned char) data; } module_init(nicstar_init); module_exit(nicstar_cleanup);
gpl-2.0
cattleprod/XCeLL-X69
drivers/media/video/gspca/jeilinj.c
802
10267
/* * Jeilinj subdriver * * Supports some Jeilin dual-mode cameras which use bulk transport and * download raw JPEG data. * * Copyright (C) 2009 Theodore Kilgore * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define MODULE_NAME "jeilinj" #include <linux/workqueue.h> #include <linux/slab.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Theodore Kilgore <kilgota@auburn.edu>"); MODULE_DESCRIPTION("GSPCA/JEILINJ USB Camera Driver"); MODULE_LICENSE("GPL"); /* Default timeouts, in ms */ #define JEILINJ_CMD_TIMEOUT 500 #define JEILINJ_DATA_TIMEOUT 1000 /* Maximum transfer size to use. */ #define JEILINJ_MAX_TRANSFER 0x200 #define FRAME_HEADER_LEN 0x10 /* Structure to hold all of our device specific stuff */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ const struct v4l2_pix_format *cap_mode; /* Driver stuff */ struct work_struct work_struct; struct workqueue_struct *work_thread; u8 quality; /* image quality */ u8 jpegqual; /* webcam quality */ u8 *jpeg_hdr; }; struct jlj_command { unsigned char instruction[2]; unsigned char ack_wanted; }; /* AFAICT these cameras will only do 320x240. */ static struct v4l2_pix_format jlj_mode[] = { { 320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0} }; /* * cam uses endpoint 0x03 to send commands, 0x84 for read commands, * and 0x82 for bulk transfer. */ /* All commands are two bytes only */ static int jlj_write2(struct gspca_dev *gspca_dev, unsigned char *command) { int retval; memcpy(gspca_dev->usb_buf, command, 2); retval = usb_bulk_msg(gspca_dev->dev, usb_sndbulkpipe(gspca_dev->dev, 3), gspca_dev->usb_buf, 2, NULL, 500); if (retval < 0) PDEBUG(D_ERR, "command write [%02x] error %d", gspca_dev->usb_buf[0], retval); return retval; } /* Responses are one byte only */ static int jlj_read1(struct gspca_dev *gspca_dev, unsigned char response) { int retval; retval = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x84), gspca_dev->usb_buf, 1, NULL, 500); response = gspca_dev->usb_buf[0]; if (retval < 0) PDEBUG(D_ERR, "read command [%02x] error %d", gspca_dev->usb_buf[0], retval); return retval; } static int jlj_start(struct gspca_dev *gspca_dev) { int i; int retval = -1; u8 response = 0xff; struct jlj_command start_commands[] = { {{0x71, 0x81}, 0}, {{0x70, 0x05}, 0}, {{0x95, 0x70}, 1}, {{0x71, 0x81}, 0}, {{0x70, 0x04}, 0}, {{0x95, 0x70}, 1}, {{0x71, 0x00}, 0}, {{0x70, 0x08}, 0}, {{0x95, 0x70}, 1}, {{0x94, 0x02}, 0}, {{0xde, 0x24}, 0}, {{0x94, 0x02}, 0}, {{0xdd, 0xf0}, 0}, {{0x94, 0x02}, 0}, {{0xe3, 0x2c}, 0}, {{0x94, 0x02}, 0}, {{0xe4, 0x00}, 0}, {{0x94, 0x02}, 0}, {{0xe5, 0x00}, 0}, {{0x94, 0x02}, 0}, {{0xe6, 0x2c}, 0}, {{0x94, 0x03}, 0}, {{0xaa, 0x00}, 0}, {{0x71, 0x1e}, 0}, {{0x70, 0x06}, 0}, {{0x71, 0x80}, 0}, {{0x70, 0x07}, 0} }; for (i = 0; i < ARRAY_SIZE(start_commands); i++) { retval = jlj_write2(gspca_dev, start_commands[i].instruction); if (retval < 0) return retval; if (start_commands[i].ack_wanted) retval = jlj_read1(gspca_dev, response); if (retval < 0) return retval; } PDEBUG(D_ERR, "jlj_start retval is %d", retval); return retval; } static int jlj_stop(struct gspca_dev *gspca_dev) { int i; int retval; struct jlj_command stop_commands[] = { {{0x71, 0x00}, 0}, {{0x70, 0x09}, 0}, {{0x71, 0x80}, 0}, {{0x70, 0x05}, 0} }; for (i = 0; i < ARRAY_SIZE(stop_commands); i++) { retval = jlj_write2(gspca_dev, stop_commands[i].instruction); if (retval < 0) return retval; } return retval; } /* This function is called as a workqueue function and runs whenever the camera * is streaming data. Because it is a workqueue function it is allowed to sleep * so we can use synchronous USB calls. To avoid possible collisions with other * threads attempting to use the camera's USB interface the gspca usb_lock is * used when performing the one USB control operation inside the workqueue, * which tells the camera to close the stream. In practice the only thing * which needs to be protected against is the usb_set_interface call that * gspca makes during stream_off. Otherwise the camera doesn't provide any * controls that the user could try to change. */ static void jlj_dostream(struct work_struct *work) { struct sd *dev = container_of(work, struct sd, work_struct); struct gspca_dev *gspca_dev = &dev->gspca_dev; int blocks_left; /* 0x200-sized blocks remaining in current frame. */ int size_in_blocks; int act_len; int packet_type; int ret; u8 *buffer; buffer = kmalloc(JEILINJ_MAX_TRANSFER, GFP_KERNEL | GFP_DMA); if (!buffer) { PDEBUG(D_ERR, "Couldn't allocate USB buffer"); goto quit_stream; } while (gspca_dev->present && gspca_dev->streaming) { /* * Now request data block 0. Line 0 reports the size * to download, in blocks of size 0x200, and also tells the * "actual" data size, in bytes, which seems best to ignore. */ ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x82), buffer, JEILINJ_MAX_TRANSFER, &act_len, JEILINJ_DATA_TIMEOUT); PDEBUG(D_STREAM, "Got %d bytes out of %d for Block 0", act_len, JEILINJ_MAX_TRANSFER); if (ret < 0 || act_len < FRAME_HEADER_LEN) goto quit_stream; size_in_blocks = buffer[0x0a]; blocks_left = buffer[0x0a] - 1; PDEBUG(D_STREAM, "blocks_left = 0x%x", blocks_left); /* Start a new frame, and add the JPEG header, first thing */ gspca_frame_add(gspca_dev, FIRST_PACKET, dev->jpeg_hdr, JPEG_HDR_SZ); /* Toss line 0 of data block 0, keep the rest. */ gspca_frame_add(gspca_dev, INTER_PACKET, buffer + FRAME_HEADER_LEN, JEILINJ_MAX_TRANSFER - FRAME_HEADER_LEN); while (blocks_left > 0) { if (!gspca_dev->present) goto quit_stream; ret = usb_bulk_msg(gspca_dev->dev, usb_rcvbulkpipe(gspca_dev->dev, 0x82), buffer, JEILINJ_MAX_TRANSFER, &act_len, JEILINJ_DATA_TIMEOUT); if (ret < 0 || act_len < JEILINJ_MAX_TRANSFER) goto quit_stream; PDEBUG(D_STREAM, "%d blocks remaining for frame", blocks_left); blocks_left -= 1; if (blocks_left == 0) packet_type = LAST_PACKET; else packet_type = INTER_PACKET; gspca_frame_add(gspca_dev, packet_type, buffer, JEILINJ_MAX_TRANSFER); } } quit_stream: mutex_lock(&gspca_dev->usb_lock); if (gspca_dev->present) jlj_stop(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); kfree(buffer); } /* This function is called at probe time just before sd_init */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct cam *cam = &gspca_dev->cam; struct sd *dev = (struct sd *) gspca_dev; dev->quality = 85; dev->jpegqual = 85; PDEBUG(D_PROBE, "JEILINJ camera detected" " (vid/pid 0x%04X:0x%04X)", id->idVendor, id->idProduct); cam->cam_mode = jlj_mode; cam->nmodes = 1; cam->bulk = 1; /* We don't use the buffer gspca allocates so make it small. */ cam->bulk_size = 32; INIT_WORK(&dev->work_struct, jlj_dostream); return 0; } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; /* wait for the work queue to terminate */ mutex_unlock(&gspca_dev->usb_lock); /* This waits for jlj_dostream to finish */ destroy_workqueue(dev->work_thread); dev->work_thread = NULL; mutex_lock(&gspca_dev->usb_lock); kfree(dev->jpeg_hdr); } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { return 0; } /* Set up for getting frames. */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *dev = (struct sd *) gspca_dev; int ret; /* create the JPEG header */ dev->jpeg_hdr = kmalloc(JPEG_HDR_SZ, GFP_KERNEL); if (dev->jpeg_hdr == NULL) return -ENOMEM; jpeg_define(dev->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ jpeg_set_qual(dev->jpeg_hdr, dev->quality); PDEBUG(D_STREAM, "Start streaming at 320x240"); ret = jlj_start(gspca_dev); if (ret < 0) { PDEBUG(D_ERR, "Start streaming command failed"); return ret; } /* Start the workqueue function to do the streaming */ dev->work_thread = create_singlethread_workqueue(MODULE_NAME); queue_work(dev->work_thread, &dev->work_struct); return 0; } /* Table of supported USB devices */ static const __devinitdata struct usb_device_id device_table[] = { {USB_DEVICE(0x0979, 0x0280)}, {} }; MODULE_DEVICE_TABLE(usb, device_table); /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .config = sd_config, .init = sd_init, .start = sd_start, .stop0 = sd_stop0, }; /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { int ret; ret = usb_register(&sd_driver); if (ret < 0) return ret; PDEBUG(D_PROBE, "registered"); return 0; } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); PDEBUG(D_PROBE, "deregistered"); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
JoeyJiao/huawei_kernel_2.6.32_9
lib/nlattr.c
1570
12456
/* * NETLINK Netlink attributes * * Authors: Thomas Graf <tgraf@suug.ch> * Alexey Kuznetsov <kuznet@ms2.inr.ac.ru> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/jiffies.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/string.h> #include <linux/types.h> #include <net/netlink.h> static u16 nla_attr_minlen[NLA_TYPE_MAX+1] __read_mostly = { [NLA_U8] = sizeof(u8), [NLA_U16] = sizeof(u16), [NLA_U32] = sizeof(u32), [NLA_U64] = sizeof(u64), [NLA_NESTED] = NLA_HDRLEN, }; static int validate_nla(struct nlattr *nla, int maxtype, const struct nla_policy *policy) { const struct nla_policy *pt; int minlen = 0, attrlen = nla_len(nla), type = nla_type(nla); if (type <= 0 || type > maxtype) return 0; pt = &policy[type]; BUG_ON(pt->type > NLA_TYPE_MAX); switch (pt->type) { case NLA_FLAG: if (attrlen > 0) return -ERANGE; break; case NLA_NUL_STRING: if (pt->len) minlen = min_t(int, attrlen, pt->len + 1); else minlen = attrlen; if (!minlen || memchr(nla_data(nla), '\0', minlen) == NULL) return -EINVAL; /* fall through */ case NLA_STRING: if (attrlen < 1) return -ERANGE; if (pt->len) { char *buf = nla_data(nla); if (buf[attrlen - 1] == '\0') attrlen--; if (attrlen > pt->len) return -ERANGE; } break; case NLA_BINARY: if (pt->len && attrlen > pt->len) return -ERANGE; break; case NLA_NESTED_COMPAT: if (attrlen < pt->len) return -ERANGE; if (attrlen < NLA_ALIGN(pt->len)) break; if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN) return -ERANGE; nla = nla_data(nla) + NLA_ALIGN(pt->len); if (attrlen < NLA_ALIGN(pt->len) + NLA_HDRLEN + nla_len(nla)) return -ERANGE; break; case NLA_NESTED: /* a nested attributes is allowed to be empty; if its not, * it must have a size of at least NLA_HDRLEN. */ if (attrlen == 0) break; default: if (pt->len) minlen = pt->len; else if (pt->type != NLA_UNSPEC) minlen = nla_attr_minlen[pt->type]; if (attrlen < minlen) return -ERANGE; } return 0; } /** * nla_validate - Validate a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy * * Validates all attributes in the specified attribute stream against the * specified policy. Attributes with a type exceeding maxtype will be * ignored. See documenation of struct nla_policy for more details. * * Returns 0 on success or a negative error code. */ int nla_validate(struct nlattr *head, int len, int maxtype, const struct nla_policy *policy) { struct nlattr *nla; int rem, err; nla_for_each_attr(nla, head, len, rem) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } err = 0; errout: return err; } /** * nla_policy_len - Determin the max. length of a policy * @policy: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used * to allocated Netlink buffers roughly the size of the actual * message. * * Returns 0 on success or a negative error code. */ int nla_policy_len(const struct nla_policy *p, int n) { int i, len = 0; for (i = 0; i < n; i++) { if (p->len) len += nla_total_size(p->len); else if (nla_attr_minlen[p->type]) len += nla_total_size(nla_attr_minlen[p->type]); } return len; } /** * nla_parse - Parse a stream of attributes into a tb buffer * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected * @head: head of attribute stream * @len: length of attribute stream * @policy: validation policy * * Parses a stream of attributes and stores a pointer to each attribute in * the tb array accessable via the attribute type. Attributes with a type * exceeding maxtype will be silently ignored for backwards compatibility * reasons. policy may be set to NULL if no validation is required. * * Returns 0 on success or a negative error code. */ int nla_parse(struct nlattr *tb[], int maxtype, struct nlattr *head, int len, const struct nla_policy *policy) { struct nlattr *nla; int rem, err; memset(tb, 0, sizeof(struct nlattr *) * (maxtype + 1)); nla_for_each_attr(nla, head, len, rem) { u16 type = nla_type(nla); if (type > 0 && type <= maxtype) { if (policy) { err = validate_nla(nla, maxtype, policy); if (err < 0) goto errout; } tb[type] = nla; } } if (unlikely(rem > 0)) printk(KERN_WARNING "netlink: %d bytes leftover after parsing " "attributes.\n", rem); err = 0; errout: return err; } /** * nla_find - Find a specific attribute in a stream of attributes * @head: head of attribute stream * @len: length of attribute stream * @attrtype: type of attribute to look for * * Returns the first attribute in the stream matching the specified type. */ struct nlattr *nla_find(struct nlattr *head, int len, int attrtype) { struct nlattr *nla; int rem; nla_for_each_attr(nla, head, len, rem) if (nla_type(nla) == attrtype) return nla; return NULL; } /** * nla_strlcpy - Copy string attribute payload into a sized buffer * @dst: where to copy the string to * @nla: attribute to copy the string from * @dstsize: size of destination buffer * * Copies at most dstsize - 1 bytes into the destination buffer. * The result is always a valid NUL-terminated string. Unlike * strlcpy the destination buffer is always padded out. * * Returns the length of the source buffer. */ size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize) { size_t srclen = nla_len(nla); char *src = nla_data(nla); if (srclen > 0 && src[srclen - 1] == '\0') srclen--; if (dstsize > 0) { size_t len = (srclen >= dstsize) ? dstsize - 1 : srclen; memset(dst, 0, dstsize); memcpy(dst, src, len); } return srclen; } /** * nla_memcpy - Copy a netlink attribute into another memory area * @dest: where to copy to memcpy * @src: netlink attribute to copy from * @count: size of the destination area * * Note: The number of bytes copied is limited by the length of * attribute's payload. memcpy * * Returns the number of bytes copied. */ int nla_memcpy(void *dest, const struct nlattr *src, int count) { int minlen = min_t(int, count, nla_len(src)); memcpy(dest, nla_data(src), minlen); return minlen; } /** * nla_memcmp - Compare an attribute with sized memory area * @nla: netlink attribute * @data: memory area * @size: size of memory area */ int nla_memcmp(const struct nlattr *nla, const void *data, size_t size) { int d = nla_len(nla) - size; if (d == 0) d = memcmp(nla_data(nla), data, size); return d; } /** * nla_strcmp - Compare a string attribute against a string * @nla: netlink string attribute * @str: another string */ int nla_strcmp(const struct nlattr *nla, const char *str) { int len = strlen(str) + 1; int d = nla_len(nla) - len; if (d == 0) d = memcmp(nla_data(nla), str, len); return d; } #ifdef CONFIG_NET /** * __nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ struct nlattr *__nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { struct nlattr *nla; nla = (struct nlattr *) skb_put(skb, nla_total_size(attrlen)); nla->nla_type = attrtype; nla->nla_len = nla_attr_size(attrlen); memset((unsigned char *) nla + nla->nla_len, 0, nla_padlen(attrlen)); return nla; } EXPORT_SYMBOL(__nla_reserve); /** * __nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * The caller is responsible to ensure that the skb provides enough * tailroom for the payload. */ void *__nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { void *start; start = skb_put(skb, NLA_ALIGN(attrlen)); memset(start, 0, NLA_ALIGN(attrlen)); return start; } EXPORT_SYMBOL(__nla_reserve_nohdr); /** * nla_reserve - reserve room for attribute on the skb * @skb: socket buffer to reserve room on * @attrtype: attribute type * @attrlen: length of attribute payload * * Adds a netlink attribute header to a socket buffer and reserves * room for the payload but does not copy it. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute header and payload. */ struct nlattr *nla_reserve(struct sk_buff *skb, int attrtype, int attrlen) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return NULL; return __nla_reserve(skb, attrtype, attrlen); } EXPORT_SYMBOL(nla_reserve); /** * nla_reserve_nohdr - reserve room for attribute without header * @skb: socket buffer to reserve room on * @attrlen: length of attribute payload * * Reserves room for attribute payload without a header. * * Returns NULL if the tailroom of the skb is insufficient to store * the attribute payload. */ void *nla_reserve_nohdr(struct sk_buff *skb, int attrlen) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return NULL; return __nla_reserve_nohdr(skb, attrlen); } EXPORT_SYMBOL(nla_reserve_nohdr); /** * __nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute header and payload. */ void __nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { struct nlattr *nla; nla = __nla_reserve(skb, attrtype, attrlen); memcpy(nla_data(nla), data, attrlen); } EXPORT_SYMBOL(__nla_put); /** * __nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * The caller is responsible to ensure that the skb provides enough * tailroom for the attribute payload. */ void __nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { void *start; start = __nla_reserve_nohdr(skb, attrlen); memcpy(start, data, attrlen); } EXPORT_SYMBOL(__nla_put_nohdr); /** * nla_put - Add a netlink attribute to a socket buffer * @skb: socket buffer to add attribute to * @attrtype: attribute type * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute header and payload. */ int nla_put(struct sk_buff *skb, int attrtype, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < nla_total_size(attrlen))) return -EMSGSIZE; __nla_put(skb, attrtype, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put); /** * nla_put_nohdr - Add a netlink attribute without header * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_put_nohdr(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; __nla_put_nohdr(skb, attrlen, data); return 0; } EXPORT_SYMBOL(nla_put_nohdr); /** * nla_append - Add a netlink attribute without header or padding * @skb: socket buffer to add attribute to * @attrlen: length of attribute payload * @data: head of attribute payload * * Returns -EMSGSIZE if the tailroom of the skb is insufficient to store * the attribute payload. */ int nla_append(struct sk_buff *skb, int attrlen, const void *data) { if (unlikely(skb_tailroom(skb) < NLA_ALIGN(attrlen))) return -EMSGSIZE; memcpy(skb_put(skb, attrlen), data, attrlen); return 0; } EXPORT_SYMBOL(nla_append); #endif EXPORT_SYMBOL(nla_validate); EXPORT_SYMBOL(nla_policy_len); EXPORT_SYMBOL(nla_parse); EXPORT_SYMBOL(nla_find); EXPORT_SYMBOL(nla_strlcpy); EXPORT_SYMBOL(nla_memcpy); EXPORT_SYMBOL(nla_memcmp); EXPORT_SYMBOL(nla_strcmp);
gpl-2.0
snishanth512/linux
drivers/powercap/powercap_sys.c
1826
19421
/* * Power capping class * Copyright (c) 2013, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc. * */ #include <linux/module.h> #include <linux/device.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/powercap.h> #define to_powercap_zone(n) container_of(n, struct powercap_zone, dev) #define to_powercap_control_type(n) \ container_of(n, struct powercap_control_type, dev) /* Power zone show function */ #define define_power_zone_show(_attr) \ static ssize_t _attr##_show(struct device *dev, \ struct device_attribute *dev_attr,\ char *buf) \ { \ u64 value; \ ssize_t len = -EINVAL; \ struct powercap_zone *power_zone = to_powercap_zone(dev); \ \ if (power_zone->ops->get_##_attr) { \ if (!power_zone->ops->get_##_attr(power_zone, &value)) \ len = sprintf(buf, "%lld\n", value); \ } \ \ return len; \ } /* The only meaningful input is 0 (reset), others are silently ignored */ #define define_power_zone_store(_attr) \ static ssize_t _attr##_store(struct device *dev,\ struct device_attribute *dev_attr, \ const char *buf, size_t count) \ { \ int err; \ struct powercap_zone *power_zone = to_powercap_zone(dev); \ u64 value; \ \ err = kstrtoull(buf, 10, &value); \ if (err) \ return -EINVAL; \ if (value) \ return count; \ if (power_zone->ops->reset_##_attr) { \ if (!power_zone->ops->reset_##_attr(power_zone)) \ return count; \ } \ \ return -EINVAL; \ } /* Power zone constraint show function */ #define define_power_zone_constraint_show(_attr) \ static ssize_t show_constraint_##_attr(struct device *dev, \ struct device_attribute *dev_attr,\ char *buf) \ { \ u64 value; \ ssize_t len = -ENODATA; \ struct powercap_zone *power_zone = to_powercap_zone(dev); \ int id; \ struct powercap_zone_constraint *pconst;\ \ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \ return -EINVAL; \ if (id >= power_zone->const_id_cnt) \ return -EINVAL; \ pconst = &power_zone->constraints[id]; \ if (pconst && pconst->ops && pconst->ops->get_##_attr) { \ if (!pconst->ops->get_##_attr(power_zone, id, &value)) \ len = sprintf(buf, "%lld\n", value); \ } \ \ return len; \ } /* Power zone constraint store function */ #define define_power_zone_constraint_store(_attr) \ static ssize_t store_constraint_##_attr(struct device *dev,\ struct device_attribute *dev_attr, \ const char *buf, size_t count) \ { \ int err; \ u64 value; \ struct powercap_zone *power_zone = to_powercap_zone(dev); \ int id; \ struct powercap_zone_constraint *pconst;\ \ if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) \ return -EINVAL; \ if (id >= power_zone->const_id_cnt) \ return -EINVAL; \ pconst = &power_zone->constraints[id]; \ err = kstrtoull(buf, 10, &value); \ if (err) \ return -EINVAL; \ if (pconst && pconst->ops && pconst->ops->set_##_attr) { \ if (!pconst->ops->set_##_attr(power_zone, id, value)) \ return count; \ } \ \ return -ENODATA; \ } /* Power zone information callbacks */ define_power_zone_show(power_uw); define_power_zone_show(max_power_range_uw); define_power_zone_show(energy_uj); define_power_zone_store(energy_uj); define_power_zone_show(max_energy_range_uj); /* Power zone attributes */ static DEVICE_ATTR_RO(max_power_range_uw); static DEVICE_ATTR_RO(power_uw); static DEVICE_ATTR_RO(max_energy_range_uj); static DEVICE_ATTR_RW(energy_uj); /* Power zone constraint attributes callbacks */ define_power_zone_constraint_show(power_limit_uw); define_power_zone_constraint_store(power_limit_uw); define_power_zone_constraint_show(time_window_us); define_power_zone_constraint_store(time_window_us); define_power_zone_constraint_show(max_power_uw); define_power_zone_constraint_show(min_power_uw); define_power_zone_constraint_show(max_time_window_us); define_power_zone_constraint_show(min_time_window_us); /* For one time seeding of constraint device attributes */ struct powercap_constraint_attr { struct device_attribute power_limit_attr; struct device_attribute time_window_attr; struct device_attribute max_power_attr; struct device_attribute min_power_attr; struct device_attribute max_time_window_attr; struct device_attribute min_time_window_attr; struct device_attribute name_attr; }; static struct powercap_constraint_attr constraint_attrs[MAX_CONSTRAINTS_PER_ZONE]; /* A list of powercap control_types */ static LIST_HEAD(powercap_cntrl_list); /* Mutex to protect list of powercap control_types */ static DEFINE_MUTEX(powercap_cntrl_list_lock); #define POWERCAP_CONSTRAINT_NAME_LEN 30 /* Some limit to avoid overflow */ static ssize_t show_constraint_name(struct device *dev, struct device_attribute *dev_attr, char *buf) { const char *name; struct powercap_zone *power_zone = to_powercap_zone(dev); int id; ssize_t len = -ENODATA; struct powercap_zone_constraint *pconst; if (!sscanf(dev_attr->attr.name, "constraint_%d_", &id)) return -EINVAL; if (id >= power_zone->const_id_cnt) return -EINVAL; pconst = &power_zone->constraints[id]; if (pconst && pconst->ops && pconst->ops->get_name) { name = pconst->ops->get_name(power_zone, id); if (name) { snprintf(buf, POWERCAP_CONSTRAINT_NAME_LEN, "%s\n", name); buf[POWERCAP_CONSTRAINT_NAME_LEN] = '\0'; len = strlen(buf); } } return len; } static int create_constraint_attribute(int id, const char *name, int mode, struct device_attribute *dev_attr, ssize_t (*show)(struct device *, struct device_attribute *, char *), ssize_t (*store)(struct device *, struct device_attribute *, const char *, size_t) ) { dev_attr->attr.name = kasprintf(GFP_KERNEL, "constraint_%d_%s", id, name); if (!dev_attr->attr.name) return -ENOMEM; dev_attr->attr.mode = mode; dev_attr->show = show; dev_attr->store = store; return 0; } static void free_constraint_attributes(void) { int i; for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { kfree(constraint_attrs[i].power_limit_attr.attr.name); kfree(constraint_attrs[i].time_window_attr.attr.name); kfree(constraint_attrs[i].name_attr.attr.name); kfree(constraint_attrs[i].max_power_attr.attr.name); kfree(constraint_attrs[i].min_power_attr.attr.name); kfree(constraint_attrs[i].max_time_window_attr.attr.name); kfree(constraint_attrs[i].min_time_window_attr.attr.name); } } static int seed_constraint_attributes(void) { int i; int ret; for (i = 0; i < MAX_CONSTRAINTS_PER_ZONE; ++i) { ret = create_constraint_attribute(i, "power_limit_uw", S_IWUSR | S_IRUGO, &constraint_attrs[i].power_limit_attr, show_constraint_power_limit_uw, store_constraint_power_limit_uw); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "time_window_us", S_IWUSR | S_IRUGO, &constraint_attrs[i].time_window_attr, show_constraint_time_window_us, store_constraint_time_window_us); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "name", S_IRUGO, &constraint_attrs[i].name_attr, show_constraint_name, NULL); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "max_power_uw", S_IRUGO, &constraint_attrs[i].max_power_attr, show_constraint_max_power_uw, NULL); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "min_power_uw", S_IRUGO, &constraint_attrs[i].min_power_attr, show_constraint_min_power_uw, NULL); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "max_time_window_us", S_IRUGO, &constraint_attrs[i].max_time_window_attr, show_constraint_max_time_window_us, NULL); if (ret) goto err_alloc; ret = create_constraint_attribute(i, "min_time_window_us", S_IRUGO, &constraint_attrs[i].min_time_window_attr, show_constraint_min_time_window_us, NULL); if (ret) goto err_alloc; } return 0; err_alloc: free_constraint_attributes(); return ret; } static int create_constraints(struct powercap_zone *power_zone, int nr_constraints, struct powercap_zone_constraint_ops *const_ops) { int i; int ret = 0; int count; struct powercap_zone_constraint *pconst; if (!power_zone || !const_ops || !const_ops->get_power_limit_uw || !const_ops->set_power_limit_uw || !const_ops->get_time_window_us || !const_ops->set_time_window_us) return -EINVAL; count = power_zone->zone_attr_count; for (i = 0; i < nr_constraints; ++i) { pconst = &power_zone->constraints[i]; pconst->ops = const_ops; pconst->id = power_zone->const_id_cnt; power_zone->const_id_cnt++; power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].power_limit_attr.attr; power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].time_window_attr.attr; if (pconst->ops->get_name) power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].name_attr.attr; if (pconst->ops->get_max_power_uw) power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].max_power_attr.attr; if (pconst->ops->get_min_power_uw) power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].min_power_attr.attr; if (pconst->ops->get_max_time_window_us) power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].max_time_window_attr.attr; if (pconst->ops->get_min_time_window_us) power_zone->zone_dev_attrs[count++] = &constraint_attrs[i].min_time_window_attr.attr; } power_zone->zone_attr_count = count; return ret; } static bool control_type_valid(void *control_type) { struct powercap_control_type *pos = NULL; bool found = false; mutex_lock(&powercap_cntrl_list_lock); list_for_each_entry(pos, &powercap_cntrl_list, node) { if (pos == control_type) { found = true; break; } } mutex_unlock(&powercap_cntrl_list_lock); return found; } static ssize_t name_show(struct device *dev, struct device_attribute *attr, char *buf) { struct powercap_zone *power_zone = to_powercap_zone(dev); return sprintf(buf, "%s\n", power_zone->name); } static DEVICE_ATTR_RO(name); /* Create zone and attributes in sysfs */ static void create_power_zone_common_attributes( struct powercap_zone *power_zone) { int count = 0; power_zone->zone_dev_attrs[count++] = &dev_attr_name.attr; if (power_zone->ops->get_max_energy_range_uj) power_zone->zone_dev_attrs[count++] = &dev_attr_max_energy_range_uj.attr; if (power_zone->ops->get_energy_uj) { if (power_zone->ops->reset_energy_uj) dev_attr_energy_uj.attr.mode = S_IWUSR | S_IRUGO; else dev_attr_energy_uj.attr.mode = S_IRUGO; power_zone->zone_dev_attrs[count++] = &dev_attr_energy_uj.attr; } if (power_zone->ops->get_power_uw) power_zone->zone_dev_attrs[count++] = &dev_attr_power_uw.attr; if (power_zone->ops->get_max_power_range_uw) power_zone->zone_dev_attrs[count++] = &dev_attr_max_power_range_uw.attr; power_zone->zone_dev_attrs[count] = NULL; power_zone->zone_attr_count = count; } static void powercap_release(struct device *dev) { bool allocated; if (dev->parent) { struct powercap_zone *power_zone = to_powercap_zone(dev); /* Store flag as the release() may free memory */ allocated = power_zone->allocated; /* Remove id from parent idr struct */ idr_remove(power_zone->parent_idr, power_zone->id); /* Destroy idrs allocated for this zone */ idr_destroy(&power_zone->idr); kfree(power_zone->name); kfree(power_zone->zone_dev_attrs); kfree(power_zone->constraints); if (power_zone->ops->release) power_zone->ops->release(power_zone); if (allocated) kfree(power_zone); } else { struct powercap_control_type *control_type = to_powercap_control_type(dev); /* Store flag as the release() may free memory */ allocated = control_type->allocated; idr_destroy(&control_type->idr); mutex_destroy(&control_type->lock); if (control_type->ops && control_type->ops->release) control_type->ops->release(control_type); if (allocated) kfree(control_type); } } static ssize_t enabled_show(struct device *dev, struct device_attribute *attr, char *buf) { bool mode = true; /* Default is enabled */ if (dev->parent) { struct powercap_zone *power_zone = to_powercap_zone(dev); if (power_zone->ops->get_enable) if (power_zone->ops->get_enable(power_zone, &mode)) mode = false; } else { struct powercap_control_type *control_type = to_powercap_control_type(dev); if (control_type->ops && control_type->ops->get_enable) if (control_type->ops->get_enable(control_type, &mode)) mode = false; } return sprintf(buf, "%d\n", mode); } static ssize_t enabled_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t len) { bool mode; if (strtobool(buf, &mode)) return -EINVAL; if (dev->parent) { struct powercap_zone *power_zone = to_powercap_zone(dev); if (power_zone->ops->set_enable) if (!power_zone->ops->set_enable(power_zone, mode)) return len; } else { struct powercap_control_type *control_type = to_powercap_control_type(dev); if (control_type->ops && control_type->ops->set_enable) if (!control_type->ops->set_enable(control_type, mode)) return len; } return -ENOSYS; } static DEVICE_ATTR_RW(enabled); static struct attribute *powercap_attrs[] = { &dev_attr_enabled.attr, NULL, }; ATTRIBUTE_GROUPS(powercap); static struct class powercap_class = { .name = "powercap", .dev_release = powercap_release, .dev_groups = powercap_groups, }; struct powercap_zone *powercap_register_zone( struct powercap_zone *power_zone, struct powercap_control_type *control_type, const char *name, struct powercap_zone *parent, const struct powercap_zone_ops *ops, int nr_constraints, struct powercap_zone_constraint_ops *const_ops) { int result; int nr_attrs; if (!name || !control_type || !ops || nr_constraints > MAX_CONSTRAINTS_PER_ZONE || (!ops->get_energy_uj && !ops->get_power_uw) || !control_type_valid(control_type)) return ERR_PTR(-EINVAL); if (power_zone) { if (!ops->release) return ERR_PTR(-EINVAL); memset(power_zone, 0, sizeof(*power_zone)); } else { power_zone = kzalloc(sizeof(*power_zone), GFP_KERNEL); if (!power_zone) return ERR_PTR(-ENOMEM); power_zone->allocated = true; } power_zone->ops = ops; power_zone->control_type_inst = control_type; if (!parent) { power_zone->dev.parent = &control_type->dev; power_zone->parent_idr = &control_type->idr; } else { power_zone->dev.parent = &parent->dev; power_zone->parent_idr = &parent->idr; } power_zone->dev.class = &powercap_class; mutex_lock(&control_type->lock); /* Using idr to get the unique id */ result = idr_alloc(power_zone->parent_idr, NULL, 0, 0, GFP_KERNEL); if (result < 0) goto err_idr_alloc; power_zone->id = result; idr_init(&power_zone->idr); power_zone->name = kstrdup(name, GFP_KERNEL); if (!power_zone->name) goto err_name_alloc; dev_set_name(&power_zone->dev, "%s:%x", dev_name(power_zone->dev.parent), power_zone->id); power_zone->constraints = kzalloc(sizeof(*power_zone->constraints) * nr_constraints, GFP_KERNEL); if (!power_zone->constraints) goto err_const_alloc; nr_attrs = nr_constraints * POWERCAP_CONSTRAINTS_ATTRS + POWERCAP_ZONE_MAX_ATTRS + 1; power_zone->zone_dev_attrs = kzalloc(sizeof(void *) * nr_attrs, GFP_KERNEL); if (!power_zone->zone_dev_attrs) goto err_attr_alloc; create_power_zone_common_attributes(power_zone); result = create_constraints(power_zone, nr_constraints, const_ops); if (result) goto err_dev_ret; power_zone->zone_dev_attrs[power_zone->zone_attr_count] = NULL; power_zone->dev_zone_attr_group.attrs = power_zone->zone_dev_attrs; power_zone->dev_attr_groups[0] = &power_zone->dev_zone_attr_group; power_zone->dev_attr_groups[1] = NULL; power_zone->dev.groups = power_zone->dev_attr_groups; result = device_register(&power_zone->dev); if (result) goto err_dev_ret; control_type->nr_zones++; mutex_unlock(&control_type->lock); return power_zone; err_dev_ret: kfree(power_zone->zone_dev_attrs); err_attr_alloc: kfree(power_zone->constraints); err_const_alloc: kfree(power_zone->name); err_name_alloc: idr_remove(power_zone->parent_idr, power_zone->id); err_idr_alloc: if (power_zone->allocated) kfree(power_zone); mutex_unlock(&control_type->lock); return ERR_PTR(result); } EXPORT_SYMBOL_GPL(powercap_register_zone); int powercap_unregister_zone(struct powercap_control_type *control_type, struct powercap_zone *power_zone) { if (!power_zone || !control_type) return -EINVAL; mutex_lock(&control_type->lock); control_type->nr_zones--; mutex_unlock(&control_type->lock); device_unregister(&power_zone->dev); return 0; } EXPORT_SYMBOL_GPL(powercap_unregister_zone); struct powercap_control_type *powercap_register_control_type( struct powercap_control_type *control_type, const char *name, const struct powercap_control_type_ops *ops) { int result; if (!name) return ERR_PTR(-EINVAL); if (control_type) { if (!ops || !ops->release) return ERR_PTR(-EINVAL); memset(control_type, 0, sizeof(*control_type)); } else { control_type = kzalloc(sizeof(*control_type), GFP_KERNEL); if (!control_type) return ERR_PTR(-ENOMEM); control_type->allocated = true; } mutex_init(&control_type->lock); control_type->ops = ops; INIT_LIST_HEAD(&control_type->node); control_type->dev.class = &powercap_class; dev_set_name(&control_type->dev, "%s", name); result = device_register(&control_type->dev); if (result) { if (control_type->allocated) kfree(control_type); return ERR_PTR(result); } idr_init(&control_type->idr); mutex_lock(&powercap_cntrl_list_lock); list_add_tail(&control_type->node, &powercap_cntrl_list); mutex_unlock(&powercap_cntrl_list_lock); return control_type; } EXPORT_SYMBOL_GPL(powercap_register_control_type); int powercap_unregister_control_type(struct powercap_control_type *control_type) { struct powercap_control_type *pos = NULL; if (control_type->nr_zones) { dev_err(&control_type->dev, "Zones of this type still not freed\n"); return -EINVAL; } mutex_lock(&powercap_cntrl_list_lock); list_for_each_entry(pos, &powercap_cntrl_list, node) { if (pos == control_type) { list_del(&control_type->node); mutex_unlock(&powercap_cntrl_list_lock); device_unregister(&control_type->dev); return 0; } } mutex_unlock(&powercap_cntrl_list_lock); return -ENODEV; } EXPORT_SYMBOL_GPL(powercap_unregister_control_type); static int __init powercap_init(void) { int result = 0; result = seed_constraint_attributes(); if (result) return result; result = class_register(&powercap_class); return result; } device_initcall(powercap_init); MODULE_DESCRIPTION("PowerCap sysfs Driver"); MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
pgielda/vybrid-linux
arch/sh/kernel/cpu/sh4a/clock-sh7757.c
2338
4138
/* * arch/sh/kernel/cpu/sh4/clock-sh7757.c * * SH7757 support for the clock framework * * Copyright (C) 2009-2010 Renesas Solutions Corp. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/clkdev.h> #include <asm/clock.h> #include <asm/freq.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .rate = 48000000, }; static unsigned long pll_recalc(struct clk *clk) { int multiplier; multiplier = test_mode_pin(MODE_PIN0) ? 24 : 16; return clk->parent->rate * multiplier; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 1, 2, 1, 1, 4, 1, 6, 1, 1, 1, 16, 1, 24, 1, 1 }; static struct clk_div_mult_table div4_div_mult_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; static struct clk_div4_table div4_table = { .div_mult_table = &div4_div_mult_table, }; enum { DIV4_I, DIV4_SH, DIV4_P, DIV4_NR }; #define DIV4(_bit, _mask, _flags) \ SH_CLK_DIV4(&pll_clk, FRQCR, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { /* * P clock is always enable, because some P clock modules is used * by Host PC. */ [DIV4_P] = DIV4(0, 0x2800, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4(12, 0x00a0, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4(20, 0x0004, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc80030 #define MSTPCR1 0xffc80034 #define MSTPCR2 0xffc10028 enum { MSTP004, MSTP000, MSTP114, MSTP113, MSTP112, MSTP111, MSTP110, MSTP103, MSTP102, MSTP220, MSTP_NR }; static struct clk mstp_clks[MSTP_NR] = { /* MSTPCR0 */ [MSTP004] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 4, 0), [MSTP000] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR0, 0, 0), /* MSTPCR1 */ [MSTP114] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 14, 0), [MSTP113] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 13, 0), [MSTP112] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 12, 0), [MSTP111] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 11, 0), [MSTP110] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 10, 0), [MSTP103] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 3, 0), [MSTP102] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR1, 2, 0), /* MSTPCR2 */ [MSTP220] = SH_CLK_MSTP32(&div4_clks[DIV4_P], MSTPCR2, 20, 0), }; #define CLKDEV_CON_ID(_id, _clk) { .con_id = _id, .clk = _clk } static struct clk_lookup lookups[] = { /* main clocks */ CLKDEV_CON_ID("extal", &extal_clk), CLKDEV_CON_ID("pll_clk", &pll_clk), /* DIV4 clocks */ CLKDEV_CON_ID("peripheral_clk", &div4_clks[DIV4_P]), CLKDEV_CON_ID("shyway_clk", &div4_clks[DIV4_SH]), CLKDEV_CON_ID("cpu_clk", &div4_clks[DIV4_I]), /* MSTP32 clocks */ CLKDEV_CON_ID("sdhi0", &mstp_clks[MSTP004]), CLKDEV_CON_ID("riic", &mstp_clks[MSTP000]), { /* TMU0 */ .dev_id = "sh_tmu.0", .con_id = "tmu_fck", .clk = &mstp_clks[MSTP113], }, { /* TMU1 */ .dev_id = "sh_tmu.1", .con_id = "tmu_fck", .clk = &mstp_clks[MSTP114], }, { /* SCIF4 (But, ID is 2) */ .dev_id = "sh-sci.2", .con_id = "sci_fck", .clk = &mstp_clks[MSTP112], }, { /* SCIF3 */ .dev_id = "sh-sci.1", .con_id = "sci_fck", .clk = &mstp_clks[MSTP111], }, { /* SCIF2 */ .dev_id = "sh-sci.0", .con_id = "sci_fck", .clk = &mstp_clks[MSTP110], }, CLKDEV_CON_ID("usb0", &mstp_clks[MSTP102]), CLKDEV_CON_ID("mmc0", &mstp_clks[MSTP220]), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); for (i = 0; i < ARRAY_SIZE(lookups); i++) clkdev_add(&lookups[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, MSTP_NR); return ret; }
gpl-2.0
MikeC84/mac_kernel_moto_minnow
drivers/media/usb/tm6000/tm6000-cards.c
2338
36301
/* * tm6000-cards.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/delay.h> #include <linux/i2c.h> #include <linux/usb.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include <media/tvaudio.h> #include <media/i2c-addr.h> #include <media/rc-map.h> #include "tm6000.h" #include "tm6000-regs.h" #include "tuner-xc2028.h" #include "xc5000.h" #define TM6000_BOARD_UNKNOWN 0 #define TM5600_BOARD_GENERIC 1 #define TM6000_BOARD_GENERIC 2 #define TM6010_BOARD_GENERIC 3 #define TM5600_BOARD_10MOONS_UT821 4 #define TM5600_BOARD_10MOONS_UT330 5 #define TM6000_BOARD_ADSTECH_DUAL_TV 6 #define TM6000_BOARD_FREECOM_AND_SIMILAR 7 #define TM6000_BOARD_ADSTECH_MINI_DUAL_TV 8 #define TM6010_BOARD_HAUPPAUGE_900H 9 #define TM6010_BOARD_BEHOLD_WANDER 10 #define TM6010_BOARD_BEHOLD_VOYAGER 11 #define TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE 12 #define TM6010_BOARD_TWINHAN_TU501 13 #define TM6010_BOARD_BEHOLD_WANDER_LITE 14 #define TM6010_BOARD_BEHOLD_VOYAGER_LITE 15 #define TM5600_BOARD_TERRATEC_GRABSTER 16 #define is_generic(model) ((model == TM6000_BOARD_UNKNOWN) || \ (model == TM5600_BOARD_GENERIC) || \ (model == TM6000_BOARD_GENERIC) || \ (model == TM6010_BOARD_GENERIC)) #define TM6000_MAXBOARDS 16 static unsigned int card[] = {[0 ... (TM6000_MAXBOARDS - 1)] = UNSET }; module_param_array(card, int, NULL, 0444); static unsigned long tm6000_devused; struct tm6000_board { char *name; char eename[16]; /* EEPROM name */ unsigned eename_size; /* size of EEPROM name */ unsigned eename_pos; /* Position where it appears at ROM */ struct tm6000_capabilities caps; enum tm6000_devtype type; /* variant of the chipset */ int tuner_type; /* type of the tuner */ int tuner_addr; /* tuner address */ int demod_addr; /* demodulator address */ struct tm6000_gpio gpio; struct tm6000_input vinput[3]; struct tm6000_input rinput; char *ir_codes; }; static struct tm6000_board tm6000_boards[] = { [TM6000_BOARD_UNKNOWN] = { .name = "Unknown tm6000 video grabber", .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_GENERIC] = { .name = "Generic tm5600 board", .type = TM5600, .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_GENERIC] = { .name = "Generic tm6000 board", .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_GENERIC] = { .name = "Generic tm6010 board", .type = TM6010, .tuner_type = TUNER_XC2028, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_10MOONS_UT821] = { .name = "10Moons UT 821", .tuner_type = TUNER_XC2028, .eename = { '1', '0', 'M', 'O', 'O', 'N', 'S', '5', '6', '0', '0', 0xff, 0x45, 0x5b}, .eename_size = 14, .eename_pos = 0x14, .type = TM5600, .tuner_addr = 0xc2 >> 1, .caps = { .has_tuner = 1, .has_eeprom = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM5600_BOARD_10MOONS_UT330] = { .name = "10Moons UT 330", .tuner_type = TUNER_PHILIPS_FQ1216AME_MK4, .tuner_addr = 0xc8 >> 1, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_ADSTECH_DUAL_TV] = { .name = "ADSTECH Dual TV USB", .tuner_type = TUNER_XC2028, .tuner_addr = 0xc8 >> 1, .caps = { .has_tuner = 1, .has_tda9874 = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_FREECOM_AND_SIMILAR] = { .name = "Freecom Hybrid Stick / Moka DVB-T Receiver Dual", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 0, .has_remote = 1, }, .gpio = { .tuner_reset = TM6000_GPIO_4, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6000_BOARD_ADSTECH_MINI_DUAL_TV] = { .name = "ADSTECH Mini Dual TV USB", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc8 >> 1, .demod_addr = 0x1e >> 1, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 0, }, .gpio = { .tuner_reset = TM6000_GPIO_4, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_HAUPPAUGE_900H] = { .name = "Hauppauge WinTV HVR-900H / WinTV USB2-Stick", .eename = { 'H', 0, 'V', 0, 'R', 0, '9', 0, '0', 0, '0', 0, 'H', 0 }, .eename_size = 14, .eename_pos = 0x42, .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .ir_codes = RC_MAP_HAUPPAUGE, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_BEHOLD_WANDER] = { .name = "Beholder Wander DVB-T/TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .demod_reset = TM6010_GPIO_1, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_BEHOLD_VOYAGER] = { .name = "Beholder Voyager TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, .has_remote = 1, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE] = { .name = "Terratec Cinergy Hybrid XE / Cinergy Hybrid-Stick", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .ir_codes = RC_MAP_NEC_TERRATEC_CINERGY_XS, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_SIF1, }, }, [TM5600_BOARD_TERRATEC_GRABSTER] = { .name = "Terratec Grabster AV 150/250 MX", .type = TM5600, .tuner_type = TUNER_ABSENT, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_ADC1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_TWINHAN_TU501] = { .name = "Twinhan TU501(704D1)", .tuner_type = TUNER_XC2028, /* has a XC3028 */ .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_2, .tuner_on = TM6010_GPIO_3, .demod_reset = TM6010_GPIO_1, .demod_on = TM6010_GPIO_4, .power_led = TM6010_GPIO_7, .dvb_led = TM6010_GPIO_5, .ir = TM6010_GPIO_0, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, { .type = TM6000_INPUT_COMPOSITE1, .vmux = TM6000_VMUX_VIDEO_A, .amux = TM6000_AMUX_ADC2, }, { .type = TM6000_INPUT_SVIDEO, .vmux = TM6000_VMUX_VIDEO_AB, .amux = TM6000_AMUX_ADC2, }, }, }, [TM6010_BOARD_BEHOLD_WANDER_LITE] = { .name = "Beholder Wander Lite DVB-T/TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .demod_addr = 0x1e >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 1, .has_zl10353 = 1, .has_eeprom = 1, .has_remote = 0, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .demod_reset = TM6010_GPIO_1, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, [TM6010_BOARD_BEHOLD_VOYAGER_LITE] = { .name = "Beholder Voyager Lite TV/FM USB2.0", .tuner_type = TUNER_XC5000, .tuner_addr = 0xc2 >> 1, .type = TM6010, .caps = { .has_tuner = 1, .has_dvb = 0, .has_zl10353 = 0, .has_eeprom = 1, .has_remote = 0, .has_radio = 1, }, .gpio = { .tuner_reset = TM6010_GPIO_0, .power_led = TM6010_GPIO_6, }, .vinput = { { .type = TM6000_INPUT_TV, .vmux = TM6000_VMUX_VIDEO_B, .amux = TM6000_AMUX_SIF1, }, }, .rinput = { .type = TM6000_INPUT_RADIO, .amux = TM6000_AMUX_ADC1, }, }, }; /* table of devices that work with this driver */ static struct usb_device_id tm6000_id_table[] = { { USB_DEVICE(0x6000, 0x0001), .driver_info = TM5600_BOARD_GENERIC }, { USB_DEVICE(0x6000, 0x0002), .driver_info = TM6010_BOARD_GENERIC }, { USB_DEVICE(0x06e1, 0xf332), .driver_info = TM6000_BOARD_ADSTECH_DUAL_TV }, { USB_DEVICE(0x14aa, 0x0620), .driver_info = TM6000_BOARD_FREECOM_AND_SIMILAR }, { USB_DEVICE(0x06e1, 0xb339), .driver_info = TM6000_BOARD_ADSTECH_MINI_DUAL_TV }, { USB_DEVICE(0x2040, 0x6600), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6601), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6610), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x2040, 0x6611), .driver_info = TM6010_BOARD_HAUPPAUGE_900H }, { USB_DEVICE(0x6000, 0xdec0), .driver_info = TM6010_BOARD_BEHOLD_WANDER }, { USB_DEVICE(0x6000, 0xdec1), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER }, { USB_DEVICE(0x0ccd, 0x0086), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE }, { USB_DEVICE(0x0ccd, 0x00A5), .driver_info = TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE }, { USB_DEVICE(0x0ccd, 0x0079), .driver_info = TM5600_BOARD_TERRATEC_GRABSTER }, { USB_DEVICE(0x13d3, 0x3240), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3241), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3243), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x13d3, 0x3264), .driver_info = TM6010_BOARD_TWINHAN_TU501 }, { USB_DEVICE(0x6000, 0xdec2), .driver_info = TM6010_BOARD_BEHOLD_WANDER_LITE }, { USB_DEVICE(0x6000, 0xdec3), .driver_info = TM6010_BOARD_BEHOLD_VOYAGER_LITE }, { } }; MODULE_DEVICE_TABLE(usb, tm6000_id_table); /* Control power led for show some activity */ void tm6000_flash_led(struct tm6000_core *dev, u8 state) { /* Power LED unconfigured */ if (!dev->gpio.power_led) return; /* ON Power LED */ if (state) { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); break; } } /* OFF Power LED */ else { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); break; } } } /* Tuner callback to provide the proper gpio changes needed for xc5000 */ int tm6000_xc5000_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct tm6000_core *dev = ptr; if (dev->tuner_type != TUNER_XC5000) return 0; switch (command) { case XC5000_TUNER_RESET: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(15); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(15); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); break; } return rc; } EXPORT_SYMBOL_GPL(tm6000_xc5000_callback); /* Tuner callback to provide the proper gpio changes needed for xc2028 */ int tm6000_tuner_callback(void *ptr, int component, int command, int arg) { int rc = 0; struct tm6000_core *dev = ptr; if (dev->tuner_type != TUNER_XC2028) return 0; switch (command) { case XC2028_RESET_CLK: tm6000_ir_wait(dev, 0); tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x02, arg); msleep(10); rc = tm6000_i2c_reset(dev, 10); break; case XC2028_TUNER_RESET: /* Reset codes during load firmware */ switch (arg) { case 0: /* newer tuner can faster reset */ switch (dev->model) { case TM5600_BOARD_10MOONS_UT821: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x01); msleep(10); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x00); msleep(10); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, 0x300, 0x01); break; case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(60); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(75); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(60); break; default: tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); msleep(130); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); msleep(130); break; } tm6000_ir_wait(dev, 1); break; case 1: tm6000_set_reg(dev, REQ_04_EN_DISABLE_MCU_INT, 0x02, 0x01); msleep(10); break; case 2: rc = tm6000_i2c_reset(dev, 100); break; } break; case XC2028_I2C_FLUSH: tm6000_set_reg(dev, REQ_50_SET_START, 0, 0); tm6000_set_reg(dev, REQ_51_SET_STOP, 0, 0); break; } return rc; } EXPORT_SYMBOL_GPL(tm6000_tuner_callback); int tm6000_cards_setup(struct tm6000_core *dev) { /* * Board-specific initialization sequence. Handles all GPIO * initialization sequences that are board-specific. * Up to now, all found devices use GPIO1 and GPIO4 at the same way. * Probably, they're all based on some reference device. Due to that, * there's a common routine at the end to handle those GPIO's. Devices * that use different pinups or init sequences can just return at * the board-specific session. */ switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: case TM6010_BOARD_GENERIC: /* Turn xceive 3028 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_on, 0x01); msleep(15); /* Turn zarlink zl10353 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00); msleep(15); /* Reset zarlink zl10353 */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00); msleep(50); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01); msleep(15); /* Turn zarlink zl10353 off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x01); msleep(15); /* ir ? */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.ir, 0x01); msleep(15); /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); msleep(15); /* DVB led off (orange) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.dvb_led, 0x01); msleep(15); /* Turn zarlink zl10353 on */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_on, 0x00); msleep(15); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_WANDER_LITE: /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); /* Reset zarlink zl10353 */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x00); msleep(50); tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.demod_reset, 0x01); msleep(15); break; case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: /* Power led on (blue) */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); break; default: break; } /* * Default initialization. Most of the devices seem to use GPIO1 * and GPIO4.on the same way, so, this handles the common sequence * used by most devices. * If a device uses a different sequence or different GPIO pins for * reset, just add the code at the board-specific part */ if (dev->gpio.tuner_reset) { int rc; int i; for (i = 0; i < 2; i++) { rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x00); if (rc < 0) { printk(KERN_ERR "Error %i doing tuner reset\n", rc); return rc; } msleep(10); /* Just to be conservative */ rc = tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.tuner_reset, 0x01); if (rc < 0) { printk(KERN_ERR "Error %i doing tuner reset\n", rc); return rc; } } } else { printk(KERN_ERR "Tuner reset is not configured\n"); return -1; } msleep(50); return 0; }; static void tm6000_config_tuner(struct tm6000_core *dev) { struct tuner_setup tun_setup; /* Load tuner module */ v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tuner", dev->tuner_addr, NULL); memset(&tun_setup, 0, sizeof(tun_setup)); tun_setup.type = dev->tuner_type; tun_setup.addr = dev->tuner_addr; tun_setup.mode_mask = 0; if (dev->caps.has_tuner) tun_setup.mode_mask |= (T_ANALOG_TV | T_RADIO); switch (dev->tuner_type) { case TUNER_XC2028: tun_setup.tuner_callback = tm6000_tuner_callback; break; case TUNER_XC5000: tun_setup.tuner_callback = tm6000_xc5000_callback; break; } v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_type_addr, &tun_setup); switch (dev->tuner_type) { case TUNER_XC2028: { struct v4l2_priv_tun_config xc2028_cfg; struct xc2028_ctrl ctl; memset(&xc2028_cfg, 0, sizeof(xc2028_cfg)); memset(&ctl, 0, sizeof(ctl)); ctl.demod = XC3028_FE_ZARLINK456; xc2028_cfg.tuner = TUNER_XC2028; xc2028_cfg.priv = &ctl; switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: ctl.max_len = 80; ctl.fname = "xc3028L-v36.fw"; break; default: if (dev->dev_type == TM6010) ctl.fname = "xc3028-v27.fw"; else ctl.fname = "xc3028-v24.fw"; } printk(KERN_INFO "Setting firmware parameters for xc2028\n"); v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc2028_cfg); } break; case TUNER_XC5000: { struct v4l2_priv_tun_config xc5000_cfg; struct xc5000_config ctl = { .i2c_address = dev->tuner_addr, .if_khz = 4570, .radio_input = XC5000_RADIO_FM1_MONO, }; xc5000_cfg.tuner = TUNER_XC5000; xc5000_cfg.priv = &ctl; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_config, &xc5000_cfg); } break; default: printk(KERN_INFO "Unknown tuner type. Tuner is not configured.\n"); break; } } static int fill_board_specific_data(struct tm6000_core *dev) { int rc; dev->dev_type = tm6000_boards[dev->model].type; dev->tuner_type = tm6000_boards[dev->model].tuner_type; dev->tuner_addr = tm6000_boards[dev->model].tuner_addr; dev->gpio = tm6000_boards[dev->model].gpio; dev->ir_codes = tm6000_boards[dev->model].ir_codes; dev->demod_addr = tm6000_boards[dev->model].demod_addr; dev->caps = tm6000_boards[dev->model].caps; dev->vinput[0] = tm6000_boards[dev->model].vinput[0]; dev->vinput[1] = tm6000_boards[dev->model].vinput[1]; dev->vinput[2] = tm6000_boards[dev->model].vinput[2]; dev->rinput = tm6000_boards[dev->model].rinput; /* setup per-model quirks */ switch (dev->model) { case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_HAUPPAUGE_900H: dev->quirks |= TM6000_QUIRK_NO_USB_DELAY; break; default: break; } /* initialize hardware */ rc = tm6000_init(dev); if (rc < 0) return rc; return v4l2_device_register(&dev->udev->dev, &dev->v4l2_dev); } static void use_alternative_detection_method(struct tm6000_core *dev) { int i, model = -1; if (!dev->eedata_size) return; for (i = 0; i < ARRAY_SIZE(tm6000_boards); i++) { if (!tm6000_boards[i].eename_size) continue; if (dev->eedata_size < tm6000_boards[i].eename_pos + tm6000_boards[i].eename_size) continue; if (!memcmp(&dev->eedata[tm6000_boards[i].eename_pos], tm6000_boards[i].eename, tm6000_boards[i].eename_size)) { model = i; break; } } if (model < 0) { printk(KERN_INFO "Device has eeprom but is currently unknown\n"); return; } dev->model = model; printk(KERN_INFO "Device identified via eeprom as %s (type = %d)\n", tm6000_boards[model].name, model); } #if defined(CONFIG_MODULES) && defined(MODULE) static void request_module_async(struct work_struct *work) { struct tm6000_core *dev = container_of(work, struct tm6000_core, request_module_wk); request_module("tm6000-alsa"); if (dev->caps.has_dvb) request_module("tm6000-dvb"); } static void request_modules(struct tm6000_core *dev) { INIT_WORK(&dev->request_module_wk, request_module_async); schedule_work(&dev->request_module_wk); } static void flush_request_modules(struct tm6000_core *dev) { flush_work(&dev->request_module_wk); } #else #define request_modules(dev) #define flush_request_modules(dev) #endif /* CONFIG_MODULES */ static int tm6000_init_dev(struct tm6000_core *dev) { struct v4l2_frequency f; int rc = 0; mutex_init(&dev->lock); mutex_lock(&dev->lock); if (!is_generic(dev->model)) { rc = fill_board_specific_data(dev); if (rc < 0) goto err; /* register i2c bus */ rc = tm6000_i2c_register(dev); if (rc < 0) goto err; } else { /* register i2c bus */ rc = tm6000_i2c_register(dev); if (rc < 0) goto err; use_alternative_detection_method(dev); rc = fill_board_specific_data(dev); if (rc < 0) goto err; } /* Default values for STD and resolutions */ dev->width = 720; dev->height = 480; dev->norm = V4L2_STD_PAL_M; /* Configure tuner */ tm6000_config_tuner(dev); /* Set video standard */ v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); /* Set tuner frequency - also loads firmware on xc2028/xc3028 */ f.tuner = 0; f.type = V4L2_TUNER_ANALOG_TV; f.frequency = 3092; /* 193.25 MHz */ dev->freq = f.frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, &f); if (dev->caps.has_tda9874) v4l2_i2c_new_subdev(&dev->v4l2_dev, &dev->i2c_adap, "tvaudio", I2C_ADDR_TDA9874, NULL); /* register and initialize V4L2 */ rc = tm6000_v4l2_register(dev); if (rc < 0) goto err; tm6000_add_into_devlist(dev); tm6000_init_extension(dev); tm6000_ir_init(dev); request_modules(dev); mutex_unlock(&dev->lock); return 0; err: mutex_unlock(&dev->lock); return rc; } /* high bandwidth multiplier, as encoded in highspeed endpoint descriptors */ #define hb_mult(wMaxPacketSize) (1 + (((wMaxPacketSize) >> 11) & 0x03)) static void get_max_endpoint(struct usb_device *udev, struct usb_host_interface *alt, char *msgtype, struct usb_host_endpoint *curr_e, struct tm6000_endpoint *tm_ep) { u16 tmp = le16_to_cpu(curr_e->desc.wMaxPacketSize); unsigned int size = tmp & 0x7ff; if (udev->speed == USB_SPEED_HIGH) size = size * hb_mult(tmp); if (size > tm_ep->maxsize) { tm_ep->endp = curr_e; tm_ep->maxsize = size; tm_ep->bInterfaceNumber = alt->desc.bInterfaceNumber; tm_ep->bAlternateSetting = alt->desc.bAlternateSetting; printk(KERN_INFO "tm6000: %s endpoint: 0x%02x (max size=%u bytes)\n", msgtype, curr_e->desc.bEndpointAddress, size); } } /* * tm6000_usb_probe() * checks for supported devices */ static int tm6000_usb_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct usb_device *usbdev; struct tm6000_core *dev = NULL; int i, rc = 0; int nr = 0; char *speed; usbdev = usb_get_dev(interface_to_usbdev(interface)); /* Selects the proper interface */ rc = usb_set_interface(usbdev, 0, 1); if (rc < 0) goto err; /* Check to see next free device and mark as used */ nr = find_first_zero_bit(&tm6000_devused, TM6000_MAXBOARDS); if (nr >= TM6000_MAXBOARDS) { printk(KERN_ERR "tm6000: Supports only %i tm60xx boards.\n", TM6000_MAXBOARDS); usb_put_dev(usbdev); return -ENOMEM; } /* Create and initialize dev struct */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (dev == NULL) { printk(KERN_ERR "tm6000" ": out of memory!\n"); usb_put_dev(usbdev); return -ENOMEM; } spin_lock_init(&dev->slock); mutex_init(&dev->usb_lock); /* Increment usage count */ set_bit(nr, &tm6000_devused); snprintf(dev->name, 29, "tm6000 #%d", nr); dev->model = id->driver_info; if (card[nr] < ARRAY_SIZE(tm6000_boards)) dev->model = card[nr]; dev->udev = usbdev; dev->devno = nr; switch (usbdev->speed) { case USB_SPEED_LOW: speed = "1.5"; break; case USB_SPEED_UNKNOWN: case USB_SPEED_FULL: speed = "12"; break; case USB_SPEED_HIGH: speed = "480"; break; default: speed = "unknown"; } /* Get endpoints */ for (i = 0; i < interface->num_altsetting; i++) { int ep; for (ep = 0; ep < interface->altsetting[i].desc.bNumEndpoints; ep++) { struct usb_host_endpoint *e; int dir_out; e = &interface->altsetting[i].endpoint[ep]; dir_out = ((e->desc.bEndpointAddress & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT); printk(KERN_INFO "tm6000: alt %d, interface %i, class %i\n", i, interface->altsetting[i].desc.bInterfaceNumber, interface->altsetting[i].desc.bInterfaceClass); switch (e->desc.bmAttributes) { case USB_ENDPOINT_XFER_BULK: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "Bulk IN", e, &dev->bulk_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "Bulk OUT", e, &dev->bulk_out); } break; case USB_ENDPOINT_XFER_ISOC: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "ISOC IN", e, &dev->isoc_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "ISOC OUT", e, &dev->isoc_out); } break; case USB_ENDPOINT_XFER_INT: if (!dir_out) { get_max_endpoint(usbdev, &interface->altsetting[i], "INT IN", e, &dev->int_in); } else { get_max_endpoint(usbdev, &interface->altsetting[i], "INT OUT", e, &dev->int_out); } break; } } } printk(KERN_INFO "tm6000: New video device @ %s Mbps (%04x:%04x, ifnum %d)\n", speed, le16_to_cpu(dev->udev->descriptor.idVendor), le16_to_cpu(dev->udev->descriptor.idProduct), interface->altsetting->desc.bInterfaceNumber); /* check if the the device has the iso in endpoint at the correct place */ if (!dev->isoc_in.endp) { printk(KERN_ERR "tm6000: probing error: no IN ISOC endpoint!\n"); rc = -ENODEV; goto err; } /* save our data pointer in this interface device */ usb_set_intfdata(interface, dev); printk(KERN_INFO "tm6000: Found %s\n", tm6000_boards[dev->model].name); rc = tm6000_init_dev(dev); if (rc < 0) goto err; return 0; err: printk(KERN_ERR "tm6000: Error %d while registering\n", rc); clear_bit(nr, &tm6000_devused); usb_put_dev(usbdev); kfree(dev); return rc; } /* * tm6000_usb_disconnect() * called when the device gets diconencted * video device will be unregistered on v4l2_close in case it is still open */ static void tm6000_usb_disconnect(struct usb_interface *interface) { struct tm6000_core *dev = usb_get_intfdata(interface); usb_set_intfdata(interface, NULL); if (!dev) return; printk(KERN_INFO "tm6000: disconnecting %s\n", dev->name); flush_request_modules(dev); tm6000_ir_fini(dev); if (dev->gpio.power_led) { switch (dev->model) { case TM6010_BOARD_HAUPPAUGE_900H: case TM6010_BOARD_TERRATEC_CINERGY_HYBRID_XE: case TM6010_BOARD_TWINHAN_TU501: /* Power led off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x01); msleep(15); break; case TM6010_BOARD_BEHOLD_WANDER: case TM6010_BOARD_BEHOLD_VOYAGER: case TM6010_BOARD_BEHOLD_WANDER_LITE: case TM6010_BOARD_BEHOLD_VOYAGER_LITE: /* Power led off */ tm6000_set_reg(dev, REQ_03_SET_GET_MCU_PIN, dev->gpio.power_led, 0x00); msleep(15); break; } } tm6000_v4l2_unregister(dev); tm6000_i2c_unregister(dev); v4l2_device_unregister(&dev->v4l2_dev); dev->state |= DEV_DISCONNECTED; usb_put_dev(dev->udev); tm6000_close_extension(dev); tm6000_remove_from_devlist(dev); clear_bit(dev->devno, &tm6000_devused); kfree(dev); } static struct usb_driver tm6000_usb_driver = { .name = "tm6000", .probe = tm6000_usb_probe, .disconnect = tm6000_usb_disconnect, .id_table = tm6000_id_table, }; module_usb_driver(tm6000_usb_driver); MODULE_DESCRIPTION("Trident TVMaster TM5600/TM6000/TM6010 USB2 adapter"); MODULE_AUTHOR("Mauro Carvalho Chehab"); MODULE_LICENSE("GPL");
gpl-2.0
voidz777/omap
tools/perf/util/scripting-engines/trace-event-perl.c
3106
13615
/* * trace-event-perl. Feed perf script events to an embedded Perl interpreter. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <stdio.h> #include <stdlib.h> #include <string.h> #include <ctype.h> #include <errno.h> #include "../../perf.h" #include "../util.h" #include "../trace-event.h" #include <EXTERN.h> #include <perl.h> void boot_Perf__Trace__Context(pTHX_ CV *cv); void boot_DynaLoader(pTHX_ CV *cv); typedef PerlInterpreter * INTERP; void xs_init(pTHX); void xs_init(pTHX) { const char *file = __FILE__; dXSUB_SYS; newXS("Perf::Trace::Context::bootstrap", boot_Perf__Trace__Context, file); newXS("DynaLoader::boot_DynaLoader", boot_DynaLoader, file); } INTERP my_perl; #define FTRACE_MAX_EVENT \ ((1 << (sizeof(unsigned short) * 8)) - 1) struct event *events[FTRACE_MAX_EVENT]; extern struct scripting_context *scripting_context; static char *cur_field_name; static int zero_flag_atom; static void define_symbolic_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_symbolic_value", 0)) call_pv("main::define_symbolic_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_symbolic_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_symbolic_value(ev_name, field_name, field->value, field->str); if (field->next) define_symbolic_values(field->next, ev_name, field_name); } static void define_symbolic_field(const char *ev_name, const char *field_name) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); PUTBACK; if (get_cv("main::define_symbolic_field", 0)) call_pv("main::define_symbolic_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_value(const char *ev_name, const char *field_name, const char *field_value, const char *field_str) { unsigned long long value; dSP; value = eval_flag(field_value); ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVuv(value))); XPUSHs(sv_2mortal(newSVpv(field_str, 0))); PUTBACK; if (get_cv("main::define_flag_value", 0)) call_pv("main::define_flag_value", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_flag_values(struct print_flag_sym *field, const char *ev_name, const char *field_name) { define_flag_value(ev_name, field_name, field->value, field->str); if (field->next) define_flag_values(field->next, ev_name, field_name); } static void define_flag_field(const char *ev_name, const char *field_name, const char *delim) { dSP; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(ev_name, 0))); XPUSHs(sv_2mortal(newSVpv(field_name, 0))); XPUSHs(sv_2mortal(newSVpv(delim, 0))); PUTBACK; if (get_cv("main::define_flag_field", 0)) call_pv("main::define_flag_field", G_SCALAR); SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void define_event_symbols(struct event *event, const char *ev_name, struct print_arg *args) { switch (args->type) { case PRINT_NULL: break; case PRINT_ATOM: define_flag_value(ev_name, cur_field_name, "0", args->atom.atom); zero_flag_atom = 0; break; case PRINT_FIELD: if (cur_field_name) free(cur_field_name); cur_field_name = strdup(args->field.name); break; case PRINT_FLAGS: define_event_symbols(event, ev_name, args->flags.field); define_flag_field(ev_name, cur_field_name, args->flags.delim); define_flag_values(args->flags.flags, ev_name, cur_field_name); break; case PRINT_SYMBOL: define_event_symbols(event, ev_name, args->symbol.field); define_symbolic_field(ev_name, cur_field_name); define_symbolic_values(args->symbol.symbols, ev_name, cur_field_name); break; case PRINT_STRING: break; case PRINT_TYPE: define_event_symbols(event, ev_name, args->typecast.item); break; case PRINT_OP: if (strcmp(args->op.op, ":") == 0) zero_flag_atom = 1; define_event_symbols(event, ev_name, args->op.left); define_event_symbols(event, ev_name, args->op.right); break; default: /* we should warn... */ return; } if (args->next) define_event_symbols(event, ev_name, args->next); } static inline struct event *find_cache_event(int type) { static char ev_name[256]; struct event *event; if (events[type]) return events[type]; events[type] = event = trace_find_event(type); if (!event) return NULL; sprintf(ev_name, "%s::%s", event->system, event->name); define_event_symbols(event, ev_name, event->print_fmt.args); return event; } static void perl_process_event(union perf_event *pevent __unused, struct perf_sample *sample, struct perf_evsel *evsel, struct perf_session *session __unused, struct thread *thread) { struct format_field *field; static char handler[256]; unsigned long long val; unsigned long s, ns; struct event *event; int type; int pid; int cpu = sample->cpu; void *data = sample->raw_data; unsigned long long nsecs = sample->time; char *comm = thread->comm; dSP; type = trace_parse_common_type(data); event = find_cache_event(type); if (!event) die("ug! no event found for type %d", type); pid = trace_parse_common_pid(data); sprintf(handler, "%s::%s", event->system, event->name); s = nsecs / NSECS_PER_SEC; ns = nsecs - s * NSECS_PER_SEC; scripting_context->event_data = data; ENTER; SAVETMPS; PUSHMARK(SP); XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(s))); XPUSHs(sv_2mortal(newSVuv(ns))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); /* common fields other than pid can be accessed via xsub fns */ for (field = event->format.fields; field; field = field->next) { if (field->flags & FIELD_IS_STRING) { int offset; if (field->flags & FIELD_IS_DYNAMIC) { offset = *(int *)(data + field->offset); offset &= 0xffff; } else offset = field->offset; XPUSHs(sv_2mortal(newSVpv((char *)data + offset, 0))); } else { /* FIELD_IS_NUMERIC */ val = read_size(data + field->offset, field->size); if (field->flags & FIELD_IS_SIGNED) { XPUSHs(sv_2mortal(newSViv(val))); } else { XPUSHs(sv_2mortal(newSVuv(val))); } } } PUTBACK; if (get_cv(handler, 0)) call_pv(handler, G_SCALAR); else if (get_cv("main::trace_unhandled", 0)) { XPUSHs(sv_2mortal(newSVpv(handler, 0))); XPUSHs(sv_2mortal(newSViv(PTR2IV(scripting_context)))); XPUSHs(sv_2mortal(newSVuv(cpu))); XPUSHs(sv_2mortal(newSVuv(nsecs))); XPUSHs(sv_2mortal(newSViv(pid))); XPUSHs(sv_2mortal(newSVpv(comm, 0))); call_pv("main::trace_unhandled", G_SCALAR); } SPAGAIN; PUTBACK; FREETMPS; LEAVE; } static void run_start_sub(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_begin", 0)) call_pv("main::trace_begin", G_DISCARD | G_NOARGS); } /* * Start trace script */ static int perl_start_script(const char *script, int argc, const char **argv) { const char **command_line; int i, err = 0; command_line = malloc((argc + 2) * sizeof(const char *)); command_line[0] = ""; command_line[1] = script; for (i = 2; i < argc + 2; i++) command_line[i] = argv[i - 2]; my_perl = perl_alloc(); perl_construct(my_perl); if (perl_parse(my_perl, xs_init, argc + 2, (char **)command_line, (char **)NULL)) { err = -1; goto error; } if (perl_run(my_perl)) { err = -1; goto error; } if (SvTRUE(ERRSV)) { err = -1; goto error; } run_start_sub(); free(command_line); return 0; error: perl_free(my_perl); free(command_line); return err; } /* * Stop trace script */ static int perl_stop_script(void) { dSP; /* access to Perl stack */ PUSHMARK(SP); if (get_cv("main::trace_end", 0)) call_pv("main::trace_end", G_DISCARD | G_NOARGS); perl_destruct(my_perl); perl_free(my_perl); return 0; } static int perl_generate_script(const char *outfile) { struct event *event = NULL; struct format_field *f; char fname[PATH_MAX]; int not_first, count; FILE *ofp; sprintf(fname, "%s.pl", outfile); ofp = fopen(fname, "w"); if (ofp == NULL) { fprintf(stderr, "couldn't open %s\n", fname); return -1; } fprintf(ofp, "# perf script event handlers, " "generated by perf script -g perl\n"); fprintf(ofp, "# Licensed under the terms of the GNU GPL" " License version 2\n\n"); fprintf(ofp, "# The common_* event handler fields are the most useful " "fields common to\n"); fprintf(ofp, "# all events. They don't necessarily correspond to " "the 'common_*' fields\n"); fprintf(ofp, "# in the format files. Those fields not available as " "handler params can\n"); fprintf(ofp, "# be retrieved using Perl functions of the form " "common_*($context).\n"); fprintf(ofp, "# See Context.pm for the list of available " "functions.\n\n"); fprintf(ofp, "use lib \"$ENV{'PERF_EXEC_PATH'}/scripts/perl/" "Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use lib \"./Perf-Trace-Util/lib\";\n"); fprintf(ofp, "use Perf::Trace::Core;\n"); fprintf(ofp, "use Perf::Trace::Context;\n"); fprintf(ofp, "use Perf::Trace::Util;\n\n"); fprintf(ofp, "sub trace_begin\n{\n\t# optional\n}\n\n"); fprintf(ofp, "sub trace_end\n{\n\t# optional\n}\n\n"); while ((event = trace_find_next_event(event))) { fprintf(ofp, "sub %s::%s\n{\n", event->system, event->name); fprintf(ofp, "\tmy ("); fprintf(ofp, "$event_name, "); fprintf(ofp, "$context, "); fprintf(ofp, "$common_cpu, "); fprintf(ofp, "$common_secs, "); fprintf(ofp, "$common_nsecs,\n"); fprintf(ofp, "\t $common_pid, "); fprintf(ofp, "$common_comm,\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); fprintf(ofp, "$%s", f->name); } fprintf(ofp, ") = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm);\n\n"); fprintf(ofp, "\tprintf(\""); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (count && count % 4 == 0) { fprintf(ofp, "\".\n\t \""); } count++; fprintf(ofp, "%s=", f->name); if (f->flags & FIELD_IS_STRING || f->flags & FIELD_IS_FLAG || f->flags & FIELD_IS_SYMBOLIC) fprintf(ofp, "%%s"); else if (f->flags & FIELD_IS_SIGNED) fprintf(ofp, "%%d"); else fprintf(ofp, "%%u"); } fprintf(ofp, "\\n\",\n\t "); not_first = 0; count = 0; for (f = event->format.fields; f; f = f->next) { if (not_first++) fprintf(ofp, ", "); if (++count % 5 == 0) fprintf(ofp, "\n\t "); if (f->flags & FIELD_IS_FLAG) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "flag_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else if (f->flags & FIELD_IS_SYMBOLIC) { if ((count - 1) % 5 != 0) { fprintf(ofp, "\n\t "); count = 4; } fprintf(ofp, "symbol_str(\""); fprintf(ofp, "%s::%s\", ", event->system, event->name); fprintf(ofp, "\"%s\", $%s)", f->name, f->name); } else fprintf(ofp, "$%s", f->name); } fprintf(ofp, ");\n"); fprintf(ofp, "}\n\n"); } fprintf(ofp, "sub trace_unhandled\n{\n\tmy ($event_name, $context, " "$common_cpu, $common_secs, $common_nsecs,\n\t " "$common_pid, $common_comm) = @_;\n\n"); fprintf(ofp, "\tprint_header($event_name, $common_cpu, " "$common_secs, $common_nsecs,\n\t $common_pid, " "$common_comm);\n}\n\n"); fprintf(ofp, "sub print_header\n{\n" "\tmy ($event_name, $cpu, $secs, $nsecs, $pid, $comm) = @_;\n\n" "\tprintf(\"%%-20s %%5u %%05u.%%09u %%8u %%-20s \",\n\t " "$event_name, $cpu, $secs, $nsecs, $pid, $comm);\n}"); fclose(ofp); fprintf(stderr, "generated Perl script: %s\n", fname); return 0; } struct scripting_ops perl_scripting_ops = { .name = "Perl", .start_script = perl_start_script, .stop_script = perl_stop_script, .process_event = perl_process_event, .generate_script = perl_generate_script, };
gpl-2.0
jeboo/kernel_JB_i337_ATT_MK2
drivers/block/nbd.c
3106
20995
/* * Network block device - make block devices work over TCP * * Note that you can not swap over this thing, yet. Seems to work but * deadlocks sometimes - you can not swap over TCP in general. * * Copyright 1997-2000, 2008 Pavel Machek <pavel@ucw.cz> * Parts copyright 2001 Steven Whitehouse <steve@chygwyn.com> * * This file is released under GPLv2 or later. * * (part of code stolen from loop.c) */ #include <linux/major.h> #include <linux/blkdev.h> #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/bio.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/file.h> #include <linux/ioctl.h> #include <linux/mutex.h> #include <linux/compiler.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/net.h> #include <linux/kthread.h> #include <asm/uaccess.h> #include <asm/types.h> #include <linux/nbd.h> #define NBD_MAGIC 0x68797548 #ifdef NDEBUG #define dprintk(flags, fmt...) #else /* NDEBUG */ #define dprintk(flags, fmt...) do { \ if (debugflags & (flags)) printk(KERN_DEBUG fmt); \ } while (0) #define DBG_IOCTL 0x0004 #define DBG_INIT 0x0010 #define DBG_EXIT 0x0020 #define DBG_BLKDEV 0x0100 #define DBG_RX 0x0200 #define DBG_TX 0x0400 static unsigned int debugflags; #endif /* NDEBUG */ static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; static int max_part; /* * Use just one lock (or at most 1 per NIC). Two arguments for this: * 1. Each NIC is essentially a synchronization point for all servers * accessed through that NIC so there's no need to have more locks * than NICs anyway. * 2. More locks lead to more "Dirty cache line bouncing" which will slow * down each lock to the point where they're actually slower than just * a single lock. * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! */ static DEFINE_SPINLOCK(nbd_lock); #ifndef NDEBUG static const char *ioctl_cmd_to_ascii(int cmd) { switch (cmd) { case NBD_SET_SOCK: return "set-sock"; case NBD_SET_BLKSIZE: return "set-blksize"; case NBD_SET_SIZE: return "set-size"; case NBD_DO_IT: return "do-it"; case NBD_CLEAR_SOCK: return "clear-sock"; case NBD_CLEAR_QUE: return "clear-que"; case NBD_PRINT_DEBUG: return "print-debug"; case NBD_SET_SIZE_BLOCKS: return "set-size-blocks"; case NBD_DISCONNECT: return "disconnect"; case BLKROSET: return "set-read-only"; case BLKFLSBUF: return "flush-buffer-cache"; } return "unknown"; } static const char *nbdcmd_to_ascii(int cmd) { switch (cmd) { case NBD_CMD_READ: return "read"; case NBD_CMD_WRITE: return "write"; case NBD_CMD_DISC: return "disconnect"; } return "invalid"; } #endif /* NDEBUG */ static void nbd_end_request(struct request *req) { int error = req->errors ? -EIO : 0; struct request_queue *q = req->q; unsigned long flags; dprintk(DBG_BLKDEV, "%s: request %p: %s\n", req->rq_disk->disk_name, req, error ? "failed" : "done"); spin_lock_irqsave(q->queue_lock, flags); __blk_end_request_all(req, error); spin_unlock_irqrestore(q->queue_lock, flags); } static void sock_shutdown(struct nbd_device *nbd, int lock) { /* Forcibly shutdown the socket causing all listeners * to error * * FIXME: This code is duplicated from sys_shutdown, but * there should be a more generic interface rather than * calling socket ops directly here */ if (lock) mutex_lock(&nbd->tx_lock); if (nbd->sock) { dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); kernel_sock_shutdown(nbd->sock, SHUT_RDWR); nbd->sock = NULL; } if (lock) mutex_unlock(&nbd->tx_lock); } static void nbd_xmit_timeout(unsigned long arg) { struct task_struct *task = (struct task_struct *)arg; printk(KERN_WARNING "nbd: killing hung xmit (%s, pid: %d)\n", task->comm, task->pid); force_sig(SIGKILL, task); } /* * Send or receive packet. */ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, int msg_flags) { struct socket *sock = nbd->sock; int result; struct msghdr msg; struct kvec iov; sigset_t blocked, oldset; if (unlikely(!sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted %s on closed socket in sock_xmit\n", (send ? "send" : "recv")); return -EINVAL; } /* Allow interception of SIGKILL only * Don't allow other signals to interrupt the transmission */ siginitsetinv(&blocked, sigmask(SIGKILL)); sigprocmask(SIG_SETMASK, &blocked, &oldset); do { sock->sk->sk_allocation = GFP_NOIO; iov.iov_base = buf; iov.iov_len = size; msg.msg_name = NULL; msg.msg_namelen = 0; msg.msg_control = NULL; msg.msg_controllen = 0; msg.msg_flags = msg_flags | MSG_NOSIGNAL; if (send) { struct timer_list ti; if (nbd->xmit_timeout) { init_timer(&ti); ti.function = nbd_xmit_timeout; ti.data = (unsigned long)current; ti.expires = jiffies + nbd->xmit_timeout; add_timer(&ti); } result = kernel_sendmsg(sock, &msg, &iov, 1, size); if (nbd->xmit_timeout) del_timer_sync(&ti); } else result = kernel_recvmsg(sock, &msg, &iov, 1, size, msg.msg_flags); if (signal_pending(current)) { siginfo_t info; printk(KERN_WARNING "nbd (pid %d: %s) got signal %d\n", task_pid_nr(current), current->comm, dequeue_signal_lock(current, &current->blocked, &info)); result = -EINTR; sock_shutdown(nbd, !send); break; } if (result <= 0) { if (result == 0) result = -EPIPE; /* short read */ break; } size -= result; buf += result; } while (size > 0); sigprocmask(SIG_SETMASK, &oldset, NULL); return result; } static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, int flags) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 1, kaddr + bvec->bv_offset, bvec->bv_len, flags); kunmap(bvec->bv_page); return result; } /* always call with the tx_lock held */ static int nbd_send_req(struct nbd_device *nbd, struct request *req) { int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); request.magic = htonl(NBD_REQUEST_MAGIC); request.type = htonl(nbd_cmd(req)); request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); memcpy(request.handle, &req, sizeof(req)); dprintk(DBG_TX, "%s: request %p: sending control (%s@%llu,%uB)\n", nbd->disk->disk_name, req, nbdcmd_to_ascii(nbd_cmd(req)), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (nbd_cmd(req) == NBD_CMD_WRITE) ? MSG_MORE : 0); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send control failed (result %d)\n", result); goto error_out; } if (nbd_cmd(req) == NBD_CMD_WRITE) { struct req_iterator iter; struct bio_vec *bvec; /* * we are really probing at internals to determine * whether to set MSG_MORE or not... */ rq_for_each_segment(bvec, req, iter) { flags = 0; if (!rq_iter_last(req, iter)) flags = MSG_MORE; dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", nbd->disk->disk_name, req, bvec->bv_len); result = sock_send_bvec(nbd, bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Send data failed (result %d)\n", result); goto error_out; } } } return 0; error_out: return -EIO; } static struct request *nbd_find_request(struct nbd_device *nbd, struct request *xreq) { struct request *req, *tmp; int err; err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); if (unlikely(err)) goto out; spin_lock(&nbd->queue_lock); list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { if (req != xreq) continue; list_del_init(&req->queuelist); spin_unlock(&nbd->queue_lock); return req; } spin_unlock(&nbd->queue_lock); err = -ENOENT; out: return ERR_PTR(err); } static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; void *kaddr = kmap(bvec->bv_page); result = sock_xmit(nbd, 0, kaddr + bvec->bv_offset, bvec->bv_len, MSG_WAITALL); kunmap(bvec->bv_page); return result; } /* NULL returned = something went wrong, inform userspace */ static struct request *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; struct request *req; reply.magic = 0; result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive control failed (result %d)\n", result); goto harderror; } if (ntohl(reply.magic) != NBD_REPLY_MAGIC) { dev_err(disk_to_dev(nbd->disk), "Wrong magic (0x%lx)\n", (unsigned long)ntohl(reply.magic)); result = -EPROTO; goto harderror; } req = nbd_find_request(nbd, *(struct request **)reply.handle); if (IS_ERR(req)) { result = PTR_ERR(req); if (result != -ENOENT) goto harderror; dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", reply.handle); result = -EBADR; goto harderror; } if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got reply\n", nbd->disk->disk_name, req); if (nbd_cmd(req) == NBD_CMD_READ) { struct req_iterator iter; struct bio_vec *bvec; rq_for_each_segment(bvec, req, iter) { result = sock_recv_bvec(nbd, bvec); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; return req; } dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", nbd->disk->disk_name, req, bvec->bv_len); } } return req; harderror: nbd->harderror = result; return NULL; } static ssize_t pid_show(struct device *dev, struct device_attribute *attr, char *buf) { struct gendisk *disk = dev_to_disk(dev); return sprintf(buf, "%ld\n", (long) ((struct nbd_device *)disk->private_data)->pid); } static struct device_attribute pid_attr = { .attr = { .name = "pid", .mode = S_IRUGO}, .show = pid_show, }; static int nbd_do_it(struct nbd_device *nbd) { struct request *req; int ret; BUG_ON(nbd->magic != NBD_MAGIC); nbd->pid = task_pid_nr(current); ret = device_create_file(disk_to_dev(nbd->disk), &pid_attr); if (ret) { dev_err(disk_to_dev(nbd->disk), "device_create_file failed!\n"); nbd->pid = 0; return ret; } while ((req = nbd_read_stat(nbd)) != NULL) nbd_end_request(req); device_remove_file(disk_to_dev(nbd->disk), &pid_attr); nbd->pid = 0; return 0; } static void nbd_clear_que(struct nbd_device *nbd) { struct request *req; BUG_ON(nbd->magic != NBD_MAGIC); /* * Because we have set nbd->sock to NULL under the tx_lock, all * modifications to the list must have completed by now. For * the same reason, the active_req must be NULL. * * As a consequence, we don't need to take the spin lock while * purging the list here. */ BUG_ON(nbd->sock); BUG_ON(nbd->active_req); while (!list_empty(&nbd->queue_head)) { req = list_entry(nbd->queue_head.next, struct request, queuelist); list_del_init(&req->queuelist); req->errors++; nbd_end_request(req); } } static void nbd_handle_req(struct nbd_device *nbd, struct request *req) { if (req->cmd_type != REQ_TYPE_FS) goto error_out; nbd_cmd(req) = NBD_CMD_READ; if (rq_data_dir(req) == WRITE) { nbd_cmd(req) = NBD_CMD_WRITE; if (nbd->flags & NBD_READ_ONLY) { dev_err(disk_to_dev(nbd->disk), "Write on read-only\n"); goto error_out; } } req->errors = 0; mutex_lock(&nbd->tx_lock); if (unlikely(!nbd->sock)) { mutex_unlock(&nbd->tx_lock); dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); goto error_out; } nbd->active_req = req; if (nbd_send_req(nbd, req) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; nbd_end_request(req); } else { spin_lock(&nbd->queue_lock); list_add(&req->queuelist, &nbd->queue_head); spin_unlock(&nbd->queue_lock); } nbd->active_req = NULL; mutex_unlock(&nbd->tx_lock); wake_up_all(&nbd->active_wq); return; error_out: req->errors++; nbd_end_request(req); } static int nbd_thread(void *data) { struct nbd_device *nbd = data; struct request *req; set_user_nice(current, -20); while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { /* wait for something to do */ wait_event_interruptible(nbd->waiting_wq, kthread_should_stop() || !list_empty(&nbd->waiting_queue)); /* extract request */ if (list_empty(&nbd->waiting_queue)) continue; spin_lock_irq(&nbd->queue_lock); req = list_entry(nbd->waiting_queue.next, struct request, queuelist); list_del_init(&req->queuelist); spin_unlock_irq(&nbd->queue_lock); /* handle request */ nbd_handle_req(nbd, req); } return 0; } /* * We always wait for result of write, for now. It would be nice to make it optional * in future * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } */ static void do_nbd_request(struct request_queue *q) { struct request *req; while ((req = blk_fetch_request(q)) != NULL) { struct nbd_device *nbd; spin_unlock_irq(q->queue_lock); dprintk(DBG_BLKDEV, "%s: request %p: dequeued (flags=%x)\n", req->rq_disk->disk_name, req, req->cmd_type); nbd = req->rq_disk->private_data; BUG_ON(nbd->magic != NBD_MAGIC); if (unlikely(!nbd->sock)) { dev_err(disk_to_dev(nbd->disk), "Attempted send on closed socket\n"); req->errors++; nbd_end_request(req); spin_lock_irq(q->queue_lock); continue; } spin_lock_irq(&nbd->queue_lock); list_add_tail(&req->queuelist, &nbd->waiting_queue); spin_unlock_irq(&nbd->queue_lock); wake_up(&nbd->waiting_wq); spin_lock_irq(q->queue_lock); } } /* Must be called with tx_lock held */ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, unsigned int cmd, unsigned long arg) { switch (cmd) { case NBD_DISCONNECT: { struct request sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); blk_rq_init(NULL, &sreq); sreq.cmd_type = REQ_TYPE_SPECIAL; nbd_cmd(&sreq) = NBD_CMD_DISC; if (!nbd->sock) return -EINVAL; nbd_send_req(nbd, &sreq); return 0; } case NBD_CLEAR_SOCK: { struct file *file; nbd->sock = NULL; file = nbd->file; nbd->file = NULL; nbd_clear_que(nbd); BUG_ON(!list_empty(&nbd->queue_head)); if (file) fput(file); return 0; } case NBD_SET_SOCK: { struct file *file; if (nbd->file) return -EBUSY; file = fget(arg); if (file) { struct inode *inode = file->f_path.dentry->d_inode; if (S_ISSOCK(inode->i_mode)) { nbd->file = file; nbd->sock = SOCKET_I(inode); if (max_part > 0) bdev->bd_invalidated = 1; return 0; } else { fput(file); } } return -EINVAL; } case NBD_SET_BLKSIZE: nbd->blksize = arg; nbd->bytesize &= ~(nbd->blksize-1); bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_SIZE: nbd->bytesize = arg & ~(nbd->blksize-1); bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_SET_TIMEOUT: nbd->xmit_timeout = arg * HZ; return 0; case NBD_SET_SIZE_BLOCKS: nbd->bytesize = ((u64) arg) * nbd->blksize; bdev->bd_inode->i_size = nbd->bytesize; set_blocksize(bdev, nbd->blksize); set_capacity(nbd->disk, nbd->bytesize >> 9); return 0; case NBD_DO_IT: { struct task_struct *thread; struct file *file; int error; if (nbd->pid) return -EBUSY; if (!nbd->file) return -EINVAL; mutex_unlock(&nbd->tx_lock); thread = kthread_create(nbd_thread, nbd, nbd->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&nbd->tx_lock); return PTR_ERR(thread); } wake_up_process(thread); error = nbd_do_it(nbd); kthread_stop(thread); mutex_lock(&nbd->tx_lock); if (error) return error; sock_shutdown(nbd, 0); file = nbd->file; nbd->file = NULL; nbd_clear_que(nbd); dev_warn(disk_to_dev(nbd->disk), "queue cleared\n"); if (file) fput(file); nbd->bytesize = 0; bdev->bd_inode->i_size = 0; set_capacity(nbd->disk, 0); if (max_part > 0) ioctl_by_bdev(bdev, BLKRRPART, 0); return nbd->harderror; } case NBD_CLEAR_QUE: /* * This is for compatibility only. The queue is always cleared * by NBD_DO_IT or NBD_CLEAR_SOCK. */ BUG_ON(!nbd->sock && !list_empty(&nbd->queue_head)); return 0; case NBD_PRINT_DEBUG: dev_info(disk_to_dev(nbd->disk), "next = %p, prev = %p, head = %p\n", nbd->queue_head.next, nbd->queue_head.prev, &nbd->queue_head); return 0; } return -ENOTTY; } static int nbd_ioctl(struct block_device *bdev, fmode_t mode, unsigned int cmd, unsigned long arg) { struct nbd_device *nbd = bdev->bd_disk->private_data; int error; if (!capable(CAP_SYS_ADMIN)) return -EPERM; BUG_ON(nbd->magic != NBD_MAGIC); /* Anyone capable of this syscall can do *real bad* things */ dprintk(DBG_IOCTL, "%s: nbd_ioctl cmd=%s(0x%x) arg=%lu\n", nbd->disk->disk_name, ioctl_cmd_to_ascii(cmd), cmd, arg); mutex_lock(&nbd->tx_lock); error = __nbd_ioctl(bdev, nbd, cmd, arg); mutex_unlock(&nbd->tx_lock); return error; } static const struct block_device_operations nbd_fops = { .owner = THIS_MODULE, .ioctl = nbd_ioctl, }; /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) */ static int __init nbd_init(void) { int err = -ENOMEM; int i; int part_shift; BUILD_BUG_ON(sizeof(struct nbd_request) != 28); if (max_part < 0) { printk(KERN_ERR "nbd: max_part must be >= 0\n"); return -EINVAL; } nbd_dev = kcalloc(nbds_max, sizeof(*nbd_dev), GFP_KERNEL); if (!nbd_dev) return -ENOMEM; part_shift = 0; if (max_part > 0) { part_shift = fls(max_part); /* * Adjust max_part according to part_shift as it is exported * to user space so that user can know the max number of * partition kernel should be able to manage. * * Note that -1 is required because partition 0 is reserved * for the whole disk. */ max_part = (1UL << part_shift) - 1; } if ((1UL << part_shift) > DISK_MAX_PARTS) return -EINVAL; if (nbds_max > 1UL << (MINORBITS - part_shift)) return -EINVAL; for (i = 0; i < nbds_max; i++) { struct gendisk *disk = alloc_disk(1 << part_shift); if (!disk) goto out; nbd_dev[i].disk = disk; /* * The new linux 2.5 block layer implementation requires * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ disk->queue = blk_init_queue(do_nbd_request, &nbd_lock); if (!disk->queue) { put_disk(disk); goto out; } /* * Tell the block layer that we are not a rotational device */ queue_flag_set_unlocked(QUEUE_FLAG_NONROT, disk->queue); } if (register_blkdev(NBD_MAJOR, "nbd")) { err = -EIO; goto out; } printk(KERN_INFO "nbd: registered device at major %d\n", NBD_MAJOR); dprintk(DBG_INIT, "nbd: debugflags=0x%x\n", debugflags); for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].file = NULL; nbd_dev[i].magic = NBD_MAGIC; nbd_dev[i].flags = 0; INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); spin_lock_init(&nbd_dev[i].queue_lock); INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); init_waitqueue_head(&nbd_dev[i].active_wq); init_waitqueue_head(&nbd_dev[i].waiting_wq); nbd_dev[i].blksize = 1024; nbd_dev[i].bytesize = 0; disk->major = NBD_MAJOR; disk->first_minor = i << part_shift; disk->fops = &nbd_fops; disk->private_data = &nbd_dev[i]; sprintf(disk->disk_name, "nbd%d", i); set_capacity(disk, 0); add_disk(disk); } return 0; out: while (i--) { blk_cleanup_queue(nbd_dev[i].disk->queue); put_disk(nbd_dev[i].disk); } kfree(nbd_dev); return err; } static void __exit nbd_cleanup(void) { int i; for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = 0; if (disk) { del_gendisk(disk); blk_cleanup_queue(disk->queue); put_disk(disk); } } unregister_blkdev(NBD_MAJOR, "nbd"); kfree(nbd_dev); printk(KERN_INFO "nbd: unregistered device at major %d\n", NBD_MAJOR); } module_init(nbd_init); module_exit(nbd_cleanup); MODULE_DESCRIPTION("Network Block Device"); MODULE_LICENSE("GPL"); module_param(nbds_max, int, 0444); MODULE_PARM_DESC(nbds_max, "number of network block devices to initialize (default: 16)"); module_param(max_part, int, 0444); MODULE_PARM_DESC(max_part, "number of partitions per device (default: 0)"); #ifndef NDEBUG module_param(debugflags, int, 0644); MODULE_PARM_DESC(debugflags, "flags for controlling debug output"); #endif
gpl-2.0
ALabate/linux-asus-T200TA
drivers/ide/alim15x3.c
4642
15217
/* * Copyright (C) 1998-2000 Michel Aubry, Maintainer * Copyright (C) 1998-2000 Andrzej Krzysztofowicz, Maintainer * Copyright (C) 1999-2000 CJ, cjtsai@ali.com.tw, Maintainer * * Copyright (C) 1998-2000 Andre Hedrick (andre@linux-ide.org) * May be copied or modified under the terms of the GNU General Public License * Copyright (C) 2002 Alan Cox * ALi (now ULi M5228) support by Clear Zhang <Clear.Zhang@ali.com.tw> * Copyright (C) 2007 MontaVista Software, Inc. <source@mvista.com> * Copyright (C) 2007-2010 Bartlomiej Zolnierkiewicz * * (U)DMA capable version of ali 1533/1543(C), 1535(D) * ********************************************************************** * 9/7/99 --Parts from the above author are included and need to be * converted into standard interface, once I finish the thought. * * Recent changes * Don't use LBA48 mode on ALi <= 0xC4 * Don't poke 0x79 with a non ALi northbridge * Don't flip undefined bits on newer chipsets (fix Fujitsu laptop hang) * Allow UDMA6 on revisions > 0xC4 * * Documentation * Chipset documentation available under NDA only * */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <linux/init.h> #include <linux/dmi.h> #include <asm/io.h> #define DRV_NAME "alim15x3" /* * ALi devices are not plug in. Otherwise these static values would * need to go. They ought to go away anyway */ static u8 m5229_revision; static u8 chip_is_1543c_e; static struct pci_dev *isa_dev; static void ali_fifo_control(ide_hwif_t *hwif, ide_drive_t *drive, int on) { struct pci_dev *pdev = to_pci_dev(hwif->dev); int pio_fifo = 0x54 + hwif->channel; u8 fifo; int shift = 4 * (drive->dn & 1); pci_read_config_byte(pdev, pio_fifo, &fifo); fifo &= ~(0x0F << shift); fifo |= (on << shift); pci_write_config_byte(pdev, pio_fifo, fifo); } static void ali_program_timings(ide_hwif_t *hwif, ide_drive_t *drive, struct ide_timing *t, u8 ultra) { struct pci_dev *dev = to_pci_dev(hwif->dev); int port = hwif->channel ? 0x5c : 0x58; int udmat = 0x56 + hwif->channel; u8 unit = drive->dn & 1, udma; int shift = 4 * unit; /* Set up the UDMA */ pci_read_config_byte(dev, udmat, &udma); udma &= ~(0x0F << shift); udma |= ultra << shift; pci_write_config_byte(dev, udmat, udma); if (t == NULL) return; t->setup = clamp_val(t->setup, 1, 8) & 7; t->act8b = clamp_val(t->act8b, 1, 8) & 7; t->rec8b = clamp_val(t->rec8b, 1, 16) & 15; t->active = clamp_val(t->active, 1, 8) & 7; t->recover = clamp_val(t->recover, 1, 16) & 15; pci_write_config_byte(dev, port, t->setup); pci_write_config_byte(dev, port + 1, (t->act8b << 4) | t->rec8b); pci_write_config_byte(dev, port + unit + 2, (t->active << 4) | t->recover); } /** * ali_set_pio_mode - set host controller for PIO mode * @hwif: port * @drive: drive * * Program the controller for the given PIO mode. */ static void ali_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_drive_t *pair = ide_get_pair_dev(drive); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; unsigned long T = 1000000 / bus_speed; /* PCI clock based */ struct ide_timing t; ide_timing_compute(drive, drive->pio_mode, &t, T, 1); if (pair) { struct ide_timing p; ide_timing_compute(pair, pair->pio_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); if (pair->dma_mode) { ide_timing_compute(pair, pair->dma_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); } } /* * PIO mode => ATA FIFO on, ATAPI FIFO off */ ali_fifo_control(hwif, drive, (drive->media == ide_disk) ? 0x05 : 0x00); ali_program_timings(hwif, drive, &t, 0); } /** * ali_udma_filter - compute UDMA mask * @drive: IDE device * * Return available UDMA modes. * * The actual rules for the ALi are: * No UDMA on revisions <= 0x20 * Disk only for revisions < 0xC2 * Not WDC drives on M1543C-E (?) */ static u8 ali_udma_filter(ide_drive_t *drive) { if (m5229_revision > 0x20 && m5229_revision < 0xC2) { if (drive->media != ide_disk) return 0; if (chip_is_1543c_e && strstr((char *)&drive->id[ATA_ID_PROD], "WDC ")) return 0; } return drive->hwif->ultra_mask; } /** * ali_set_dma_mode - set host controller for DMA mode * @hwif: port * @drive: drive * * Configure the hardware for the desired IDE transfer mode. */ static void ali_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive) { static u8 udma_timing[7] = { 0xC, 0xB, 0xA, 0x9, 0x8, 0xF, 0xD }; struct pci_dev *dev = to_pci_dev(hwif->dev); ide_drive_t *pair = ide_get_pair_dev(drive); int bus_speed = ide_pci_clk ? ide_pci_clk : 33; unsigned long T = 1000000 / bus_speed; /* PCI clock based */ const u8 speed = drive->dma_mode; u8 tmpbyte = 0x00; struct ide_timing t; if (speed < XFER_UDMA_0) { ide_timing_compute(drive, drive->dma_mode, &t, T, 1); if (pair) { struct ide_timing p; ide_timing_compute(pair, pair->pio_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); if (pair->dma_mode) { ide_timing_compute(pair, pair->dma_mode, &p, T, 1); ide_timing_merge(&p, &t, &t, IDE_TIMING_SETUP | IDE_TIMING_8BIT); } } ali_program_timings(hwif, drive, &t, 0); } else { ali_program_timings(hwif, drive, NULL, udma_timing[speed - XFER_UDMA_0]); if (speed >= XFER_UDMA_3) { pci_read_config_byte(dev, 0x4b, &tmpbyte); tmpbyte |= 1; pci_write_config_byte(dev, 0x4b, tmpbyte); } } } /** * ali_dma_check - DMA check * @drive: target device * @cmd: command * * Returns 1 if the DMA cannot be performed, zero on success. */ static int ali_dma_check(ide_drive_t *drive, struct ide_cmd *cmd) { if (m5229_revision < 0xC2 && drive->media != ide_disk) { if (cmd->tf_flags & IDE_TFLAG_WRITE) return 1; /* try PIO instead of DMA */ } return 0; } /** * init_chipset_ali15x3 - Initialise an ALi IDE controller * @dev: PCI device * * This function initializes the ALI IDE controller and where * appropriate also sets up the 1533 southbridge. */ static int init_chipset_ali15x3(struct pci_dev *dev) { unsigned long flags; u8 tmpbyte; struct pci_dev *north = pci_get_slot(dev->bus, PCI_DEVFN(0,0)); m5229_revision = dev->revision; isa_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); local_irq_save(flags); if (m5229_revision < 0xC2) { /* * revision 0x20 (1543-E, 1543-F) * revision 0xC0, 0xC1 (1543C-C, 1543C-D, 1543C-E) * clear CD-ROM DMA write bit, m5229, 0x4b, bit 7 */ pci_read_config_byte(dev, 0x4b, &tmpbyte); /* * clear bit 7 */ pci_write_config_byte(dev, 0x4b, tmpbyte & 0x7F); /* * check m1533, 0x5e, bit 1~4 == 1001 => & 00011110 = 00010010 */ if (m5229_revision >= 0x20 && isa_dev) { pci_read_config_byte(isa_dev, 0x5e, &tmpbyte); chip_is_1543c_e = ((tmpbyte & 0x1e) == 0x12) ? 1: 0; } goto out; } /* * 1543C-B?, 1535, 1535D, 1553 * Note 1: not all "motherboard" support this detection * Note 2: if no udma 66 device, the detection may "error". * but in this case, we will not set the device to * ultra 66, the detection result is not important */ /* * enable "Cable Detection", m5229, 0x4b, bit3 */ pci_read_config_byte(dev, 0x4b, &tmpbyte); pci_write_config_byte(dev, 0x4b, tmpbyte | 0x08); /* * We should only tune the 1533 enable if we are using an ALi * North bridge. We might have no north found on some zany * box without a device at 0:0.0. The ALi bridge will be at * 0:0.0 so if we didn't find one we know what is cooking. */ if (north && north->vendor != PCI_VENDOR_ID_AL) goto out; if (m5229_revision < 0xC5 && isa_dev) { /* * set south-bridge's enable bit, m1533, 0x79 */ pci_read_config_byte(isa_dev, 0x79, &tmpbyte); if (m5229_revision == 0xC2) { /* * 1543C-B0 (m1533, 0x79, bit 2) */ pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x04); } else if (m5229_revision >= 0xC3) { /* * 1553/1535 (m1533, 0x79, bit 1) */ pci_write_config_byte(isa_dev, 0x79, tmpbyte | 0x02); } } out: /* * CD_ROM DMA on (m5229, 0x53, bit0) * Enable this bit even if we want to use PIO. * PIO FIFO off (m5229, 0x53, bit1) * The hardware will use 0x54h and 0x55h to control PIO FIFO. * (Not on later devices it seems) * * 0x53 changes meaning on later revs - we must no touch * bit 1 on them. Need to check if 0x20 is the right break. */ if (m5229_revision >= 0x20) { pci_read_config_byte(dev, 0x53, &tmpbyte); if (m5229_revision <= 0x20) tmpbyte = (tmpbyte & (~0x02)) | 0x01; else if (m5229_revision == 0xc7 || m5229_revision == 0xc8) tmpbyte |= 0x03; else tmpbyte |= 0x01; pci_write_config_byte(dev, 0x53, tmpbyte); } pci_dev_put(north); pci_dev_put(isa_dev); local_irq_restore(flags); return 0; } /* * Cable special cases */ static const struct dmi_system_id cable_dmi_table[] = { { .ident = "HP Pavilion N5430", .matches = { DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), DMI_MATCH(DMI_BOARD_VERSION, "OmniBook N32N-736"), }, }, { .ident = "Toshiba Satellite S1800-814", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), DMI_MATCH(DMI_PRODUCT_NAME, "S1800-814"), }, }, { } }; static int ali_cable_override(struct pci_dev *pdev) { /* Fujitsu P2000 */ if (pdev->subsystem_vendor == 0x10CF && pdev->subsystem_device == 0x10AF) return 1; /* Mitac 8317 (Winbook-A) and relatives */ if (pdev->subsystem_vendor == 0x1071 && pdev->subsystem_device == 0x8317) return 1; /* Systems by DMI */ if (dmi_check_system(cable_dmi_table)) return 1; return 0; } /** * ali_cable_detect - cable detection * @hwif: IDE interface * * This checks if the controller and the cable are capable * of UDMA66 transfers. It doesn't check the drives. */ static u8 ali_cable_detect(ide_hwif_t *hwif) { struct pci_dev *dev = to_pci_dev(hwif->dev); u8 cbl = ATA_CBL_PATA40, tmpbyte; if (m5229_revision >= 0xC2) { /* * m5229 80-pin cable detection (from Host View) * * 0x4a bit0 is 0 => primary channel has 80-pin * 0x4a bit1 is 0 => secondary channel has 80-pin * * Certain laptops use short but suitable cables * and don't implement the detect logic. */ if (ali_cable_override(dev)) cbl = ATA_CBL_PATA40_SHORT; else { pci_read_config_byte(dev, 0x4a, &tmpbyte); if ((tmpbyte & (1 << hwif->channel)) == 0) cbl = ATA_CBL_PATA80; } } return cbl; } #ifndef CONFIG_SPARC64 /** * init_hwif_ali15x3 - Initialize the ALI IDE x86 stuff * @hwif: interface to configure * * Obtain the IRQ tables for an ALi based IDE solution on the PC * class platforms. This part of the code isn't applicable to the * Sparc systems. */ static void init_hwif_ali15x3(ide_hwif_t *hwif) { u8 ideic, inmir; s8 irq_routing_table[] = { -1, 9, 3, 10, 4, 5, 7, 6, 1, 11, 0, 12, 0, 14, 0, 15 }; int irq = -1; if (isa_dev) { /* * read IDE interface control */ pci_read_config_byte(isa_dev, 0x58, &ideic); /* bit0, bit1 */ ideic = ideic & 0x03; /* get IRQ for IDE Controller */ if ((hwif->channel && ideic == 0x03) || (!hwif->channel && !ideic)) { /* * get SIRQ1 routing table */ pci_read_config_byte(isa_dev, 0x44, &inmir); inmir = inmir & 0x0f; irq = irq_routing_table[inmir]; } else if (hwif->channel && !(ideic & 0x01)) { /* * get SIRQ2 routing table */ pci_read_config_byte(isa_dev, 0x75, &inmir); inmir = inmir & 0x0f; irq = irq_routing_table[inmir]; } if(irq >= 0) hwif->irq = irq; } } #else #define init_hwif_ali15x3 NULL #endif /* CONFIG_SPARC64 */ /** * init_dma_ali15x3 - set up DMA on ALi15x3 * @hwif: IDE interface * @d: IDE port info * * Set up the DMA functionality on the ALi 15x3. */ static int init_dma_ali15x3(ide_hwif_t *hwif, const struct ide_port_info *d) { struct pci_dev *dev = to_pci_dev(hwif->dev); unsigned long base = ide_pci_dma_base(hwif, d); if (base == 0) return -1; hwif->dma_base = base; if (ide_pci_check_simplex(hwif, d) < 0) return -1; if (ide_pci_set_master(dev, d->name) < 0) return -1; if (!hwif->channel) outb(inb(base + 2) & 0x60, base + 2); printk(KERN_INFO " %s: BM-DMA at 0x%04lx-0x%04lx\n", hwif->name, base, base + 7); if (ide_allocate_dma_engine(hwif)) return -1; return 0; } static const struct ide_port_ops ali_port_ops = { .set_pio_mode = ali_set_pio_mode, .set_dma_mode = ali_set_dma_mode, .udma_filter = ali_udma_filter, .cable_detect = ali_cable_detect, }; static const struct ide_dma_ops ali_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_check = ali_dma_check, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; static const struct ide_port_info ali15x3_chipset = { .name = DRV_NAME, .init_chipset = init_chipset_ali15x3, .init_hwif = init_hwif_ali15x3, .init_dma = init_dma_ali15x3, .port_ops = &ali_port_ops, .dma_ops = &sff_dma_ops, .pio_mask = ATA_PIO5, .swdma_mask = ATA_SWDMA2, .mwdma_mask = ATA_MWDMA2, }; /** * alim15x3_init_one - set up an ALi15x3 IDE controller * @dev: PCI device to set up * * Perform the actual set up for an ALi15x3 that has been found by the * hot plug layer. */ static int alim15x3_init_one(struct pci_dev *dev, const struct pci_device_id *id) { struct ide_port_info d = ali15x3_chipset; u8 rev = dev->revision, idx = id->driver_data; /* don't use LBA48 DMA on ALi devices before rev 0xC5 */ if (rev <= 0xC4) d.host_flags |= IDE_HFLAG_NO_LBA48_DMA; if (rev >= 0x20) { if (rev == 0x20) d.host_flags |= IDE_HFLAG_NO_ATAPI_DMA; if (rev < 0xC2) d.udma_mask = ATA_UDMA2; else if (rev == 0xC2 || rev == 0xC3) d.udma_mask = ATA_UDMA4; else if (rev == 0xC4) d.udma_mask = ATA_UDMA5; else d.udma_mask = ATA_UDMA6; d.dma_ops = &ali_dma_ops; } else { d.host_flags |= IDE_HFLAG_NO_DMA; d.mwdma_mask = d.swdma_mask = 0; } if (idx == 0) d.host_flags |= IDE_HFLAG_CLEAR_SIMPLEX; return ide_pci_init_one(dev, &d, NULL); } static const struct pci_device_id alim15x3_pci_tbl[] = { { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5229), 0 }, { PCI_VDEVICE(AL, PCI_DEVICE_ID_AL_M5228), 1 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, alim15x3_pci_tbl); static struct pci_driver alim15x3_pci_driver = { .name = "ALI15x3_IDE", .id_table = alim15x3_pci_tbl, .probe = alim15x3_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init ali15x3_ide_init(void) { return ide_pci_register_driver(&alim15x3_pci_driver); } static void __exit ali15x3_ide_exit(void) { pci_unregister_driver(&alim15x3_pci_driver); } module_init(ali15x3_ide_init); module_exit(ali15x3_ide_exit); MODULE_AUTHOR("Michael Aubry, Andrzej Krzysztofowicz, CJ, Andre Hedrick, Alan Cox, Bartlomiej Zolnierkiewicz"); MODULE_DESCRIPTION("PCI driver module for ALi 15x3 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
di11igaf/ace-kernel
sound/pci/emu10k1/voice.c
4642
4616
/* * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * Creative Labs, Inc. * Lee Revell <rlrevell@joe-job.com> * Routines for control of EMU10K1 chips - voice manager * * Rewrote voice allocator for multichannel support - rlrevell 12/2004 * * BUGS: * -- * * TODO: * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/time.h> #include <sound/core.h> #include <sound/emu10k1.h> /* Previously the voice allocator started at 0 every time. The new voice * allocator uses a round robin scheme. The next free voice is tracked in * the card record and each allocation begins where the last left off. The * hardware requires stereo interleaved voices be aligned to an even/odd * boundary. For multichannel voice allocation we ensure than the block of * voices does not cross the 32 voice boundary. This simplifies the * multichannel support and ensures we can use a single write to the * (set|clear)_loop_stop registers. Otherwise (for example) the voices would * get out of sync when pausing/resuming a stream. * --rlrevell */ static int voice_alloc(struct snd_emu10k1 *emu, int type, int number, struct snd_emu10k1_voice **rvoice) { struct snd_emu10k1_voice *voice; int i, j, k, first_voice, last_voice, skip; *rvoice = NULL; first_voice = last_voice = 0; for (i = emu->next_free_voice, j = 0; j < NUM_G ; i += number, j += number) { /* printk(KERN_DEBUG "i %d j %d next free %d!\n", i, j, emu->next_free_voice); */ i %= NUM_G; /* stereo voices must be even/odd */ if ((number == 2) && (i % 2)) { i++; continue; } skip = 0; for (k = 0; k < number; k++) { voice = &emu->voices[(i+k) % NUM_G]; if (voice->use) { skip = 1; break; } } if (!skip) { /* printk(KERN_DEBUG "allocated voice %d\n", i); */ first_voice = i; last_voice = (i + number) % NUM_G; emu->next_free_voice = last_voice; break; } } if (first_voice == last_voice) return -ENOMEM; for (i = 0; i < number; i++) { voice = &emu->voices[(first_voice + i) % NUM_G]; /* printk(kERN_DEBUG "voice alloc - %i, %i of %i\n", voice->number, idx-first_voice+1, number); */ voice->use = 1; switch (type) { case EMU10K1_PCM: voice->pcm = 1; break; case EMU10K1_SYNTH: voice->synth = 1; break; case EMU10K1_MIDI: voice->midi = 1; break; case EMU10K1_EFX: voice->efx = 1; break; } } *rvoice = &emu->voices[first_voice]; return 0; } int snd_emu10k1_voice_alloc(struct snd_emu10k1 *emu, int type, int number, struct snd_emu10k1_voice **rvoice) { unsigned long flags; int result; if (snd_BUG_ON(!rvoice)) return -EINVAL; if (snd_BUG_ON(!number)) return -EINVAL; spin_lock_irqsave(&emu->voice_lock, flags); for (;;) { result = voice_alloc(emu, type, number, rvoice); if (result == 0 || type == EMU10K1_SYNTH || type == EMU10K1_MIDI) break; /* free a voice from synth */ if (emu->get_synth_voice) { result = emu->get_synth_voice(emu); if (result >= 0) { struct snd_emu10k1_voice *pvoice = &emu->voices[result]; pvoice->interrupt = NULL; pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = pvoice->efx = 0; pvoice->epcm = NULL; } } if (result < 0) break; } spin_unlock_irqrestore(&emu->voice_lock, flags); return result; } EXPORT_SYMBOL(snd_emu10k1_voice_alloc); int snd_emu10k1_voice_free(struct snd_emu10k1 *emu, struct snd_emu10k1_voice *pvoice) { unsigned long flags; if (snd_BUG_ON(!pvoice)) return -EINVAL; spin_lock_irqsave(&emu->voice_lock, flags); pvoice->interrupt = NULL; pvoice->use = pvoice->pcm = pvoice->synth = pvoice->midi = pvoice->efx = 0; pvoice->epcm = NULL; snd_emu10k1_voice_init(emu, pvoice->number); spin_unlock_irqrestore(&emu->voice_lock, flags); return 0; } EXPORT_SYMBOL(snd_emu10k1_voice_free);
gpl-2.0
crpalmer/android_kernel_huawei_kiwi
drivers/gpu/drm/mga/mga_warp.c
4898
4823
/* mga_warp.c -- Matrox G200/G400 WARP engine management -*- linux-c -*- * Created: Thu Jan 11 21:29:32 2001 by gareth@valinux.com * * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: * Gareth Hughes <gareth@valinux.com> */ #include <linux/firmware.h> #include <linux/ihex.h> #include <linux/platform_device.h> #include <linux/module.h> #include <drm/drmP.h> #include <drm/mga_drm.h> #include "mga_drv.h" #define FIRMWARE_G200 "matrox/g200_warp.fw" #define FIRMWARE_G400 "matrox/g400_warp.fw" MODULE_FIRMWARE(FIRMWARE_G200); MODULE_FIRMWARE(FIRMWARE_G400); #define MGA_WARP_CODE_ALIGN 256 /* in bytes */ #define WARP_UCODE_SIZE(size) ALIGN(size, MGA_WARP_CODE_ALIGN) int mga_warp_install_microcode(drm_mga_private_t *dev_priv) { unsigned char *vcbase = dev_priv->warp->handle; unsigned long pcbase = dev_priv->warp->offset; const char *firmware_name; struct platform_device *pdev; const struct firmware *fw = NULL; const struct ihex_binrec *rec; unsigned int size; int n_pipes, where; int rc = 0; switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: firmware_name = FIRMWARE_G400; n_pipes = MGA_MAX_G400_PIPES; break; case MGA_CARD_TYPE_G200: firmware_name = FIRMWARE_G200; n_pipes = MGA_MAX_G200_PIPES; break; default: return -EINVAL; } pdev = platform_device_register_simple("mga_warp", 0, NULL, 0); if (IS_ERR(pdev)) { DRM_ERROR("mga: Failed to register microcode\n"); return PTR_ERR(pdev); } rc = request_ihex_firmware(&fw, firmware_name, &pdev->dev); platform_device_unregister(pdev); if (rc) { DRM_ERROR("mga: Failed to load microcode \"%s\"\n", firmware_name); return rc; } size = 0; where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { size += WARP_UCODE_SIZE(be16_to_cpu(rec->len)); where++; } if (where != n_pipes) { DRM_ERROR("mga: Invalid microcode \"%s\"\n", firmware_name); rc = -EINVAL; goto out; } size = PAGE_ALIGN(size); DRM_DEBUG("MGA ucode size = %d bytes\n", size); if (size > dev_priv->warp->size) { DRM_ERROR("microcode too large! (%u > %lu)\n", size, dev_priv->warp->size); rc = -ENOMEM; goto out; } memset(dev_priv->warp_pipe_phys, 0, sizeof(dev_priv->warp_pipe_phys)); where = 0; for (rec = (const struct ihex_binrec *)fw->data; rec; rec = ihex_next_binrec(rec)) { unsigned int src_size, dst_size; DRM_DEBUG(" pcbase = 0x%08lx vcbase = %p\n", pcbase, vcbase); dev_priv->warp_pipe_phys[where] = pcbase; src_size = be16_to_cpu(rec->len); dst_size = WARP_UCODE_SIZE(src_size); memcpy(vcbase, rec->data, src_size); pcbase += dst_size; vcbase += dst_size; where++; } out: release_firmware(fw); return rc; } #define WMISC_EXPECTED (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE) int mga_warp_init(drm_mga_private_t *dev_priv) { u32 wmisc; /* FIXME: Get rid of these damned magic numbers... */ switch (dev_priv->chipset) { case MGA_CARD_TYPE_G400: case MGA_CARD_TYPE_G550: MGA_WRITE(MGA_WIADDR2, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x00000E00); MGA_WRITE(MGA_WVRTXSZ, 0x00001807); MGA_WRITE(MGA_WACCEPTSEQ, 0x18000000); break; case MGA_CARD_TYPE_G200: MGA_WRITE(MGA_WIADDR, MGA_WMODE_SUSPEND); MGA_WRITE(MGA_WGETMSB, 0x1606); MGA_WRITE(MGA_WVRTXSZ, 7); break; default: return -EINVAL; } MGA_WRITE(MGA_WMISC, (MGA_WUCODECACHE_ENABLE | MGA_WMASTER_ENABLE | MGA_WCACHEFLUSH_ENABLE)); wmisc = MGA_READ(MGA_WMISC); if (wmisc != WMISC_EXPECTED) { DRM_ERROR("WARP engine config failed! 0x%x != 0x%x\n", wmisc, WMISC_EXPECTED); return -EINVAL; } return 0; }
gpl-2.0
travarilo/redkancut_cancro
sound/soc/codecs/wm8400.c
4898
44156
/* * wm8400.c -- WM8400 ALSA Soc Audio driver * * Copyright 2008, 2009 Wolfson Microelectronics PLC. * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/mfd/wm8400-audio.h> #include <linux/mfd/wm8400-private.h> #include <linux/mfd/core.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <sound/initval.h> #include <sound/tlv.h> #include "wm8400.h" /* Fake register for internal state */ #define WM8400_INTDRIVBITS (WM8400_REGISTER_COUNT + 1) #define WM8400_INMIXL_PWR 0 #define WM8400_AINLMUX_PWR 1 #define WM8400_INMIXR_PWR 2 #define WM8400_AINRMUX_PWR 3 static struct regulator_bulk_data power[] = { { .supply = "I2S1VDD", }, { .supply = "I2S2VDD", }, { .supply = "DCVDD", }, { .supply = "AVDD", }, { .supply = "FLLVDD", }, { .supply = "HPVDD", }, { .supply = "SPKVDD", }, }; /* codec private data */ struct wm8400_priv { struct snd_soc_codec *codec; struct wm8400 *wm8400; u16 fake_register; unsigned int sysclk; unsigned int pcmclk; struct work_struct work; int fll_in, fll_out; }; static inline unsigned int wm8400_read(struct snd_soc_codec *codec, unsigned int reg) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); if (reg == WM8400_INTDRIVBITS) return wm8400->fake_register; else return wm8400_reg_read(wm8400->wm8400, reg); } /* * write to the wm8400 register space */ static int wm8400_write(struct snd_soc_codec *codec, unsigned int reg, unsigned int value) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); if (reg == WM8400_INTDRIVBITS) { wm8400->fake_register = value; return 0; } else return wm8400_set_bits(wm8400->wm8400, reg, 0xffff, value); } static void wm8400_codec_reset(struct snd_soc_codec *codec) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); wm8400_reset_codec_reg_cache(wm8400->wm8400); } static const DECLARE_TLV_DB_SCALE(rec_mix_tlv, -1500, 600, 0); static const DECLARE_TLV_DB_SCALE(in_pga_tlv, -1650, 3000, 0); static const DECLARE_TLV_DB_SCALE(out_mix_tlv, -2100, 0, 0); static const DECLARE_TLV_DB_SCALE(out_pga_tlv, -7300, 600, 0); static const DECLARE_TLV_DB_SCALE(out_omix_tlv, -600, 0, 0); static const DECLARE_TLV_DB_SCALE(out_dac_tlv, -7163, 0, 0); static const DECLARE_TLV_DB_SCALE(in_adc_tlv, -7163, 1763, 0); static const DECLARE_TLV_DB_SCALE(out_sidetone_tlv, -3600, 0, 0); static int wm8400_outpga_put_volsw_vu(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol); struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int reg = mc->reg; int ret; u16 val; ret = snd_soc_put_volsw(kcontrol, ucontrol); if (ret < 0) return ret; /* now hit the volume update bits (always bit 8) */ val = wm8400_read(codec, reg); return wm8400_write(codec, reg, val | 0x0100); } #define WM8400_OUTPGA_SINGLE_R_TLV(xname, reg, shift, max, invert, tlv_array) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = (xname), \ .access = SNDRV_CTL_ELEM_ACCESS_TLV_READ |\ SNDRV_CTL_ELEM_ACCESS_READWRITE,\ .tlv.p = (tlv_array), \ .info = snd_soc_info_volsw, \ .get = snd_soc_get_volsw, .put = wm8400_outpga_put_volsw_vu, \ .private_value = SOC_SINGLE_VALUE(reg, shift, max, invert) } static const char *wm8400_digital_sidetone[] = {"None", "Left ADC", "Right ADC", "Reserved"}; static const struct soc_enum wm8400_left_digital_sidetone_enum = SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE, WM8400_ADC_TO_DACL_SHIFT, 2, wm8400_digital_sidetone); static const struct soc_enum wm8400_right_digital_sidetone_enum = SOC_ENUM_SINGLE(WM8400_DIGITAL_SIDE_TONE, WM8400_ADC_TO_DACR_SHIFT, 2, wm8400_digital_sidetone); static const char *wm8400_adcmode[] = {"Hi-fi mode", "Voice mode 1", "Voice mode 2", "Voice mode 3"}; static const struct soc_enum wm8400_right_adcmode_enum = SOC_ENUM_SINGLE(WM8400_ADC_CTRL, WM8400_ADC_HPF_CUT_SHIFT, 3, wm8400_adcmode); static const struct snd_kcontrol_new wm8400_snd_controls[] = { /* INMIXL */ SOC_SINGLE("LIN12 PGA Boost", WM8400_INPUT_MIXER3, WM8400_L12MNBST_SHIFT, 1, 0), SOC_SINGLE("LIN34 PGA Boost", WM8400_INPUT_MIXER3, WM8400_L34MNBST_SHIFT, 1, 0), /* INMIXR */ SOC_SINGLE("RIN12 PGA Boost", WM8400_INPUT_MIXER3, WM8400_R12MNBST_SHIFT, 1, 0), SOC_SINGLE("RIN34 PGA Boost", WM8400_INPUT_MIXER3, WM8400_R34MNBST_SHIFT, 1, 0), /* LOMIX */ SOC_SINGLE_TLV("LOMIX LIN3 Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LLI3LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX RIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LR12LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX LIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER3, WM8400_LL12LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX RIN3 Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRI3LOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX AINRMUX Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRBLOVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("LOMIX AINLMUX Bypass Volume", WM8400_OUTPUT_MIXER5, WM8400_LRBLOVOL_SHIFT, 7, 0, out_mix_tlv), /* ROMIX */ SOC_SINGLE_TLV("ROMIX RIN3 Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RRI3ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX LIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RL12ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX RIN12 PGA Bypass Volume", WM8400_OUTPUT_MIXER4, WM8400_RR12ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX LIN3 Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RLI3ROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX AINLMUX Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RLBROVOL_SHIFT, 7, 0, out_mix_tlv), SOC_SINGLE_TLV("ROMIX AINRMUX Bypass Volume", WM8400_OUTPUT_MIXER6, WM8400_RRBROVOL_SHIFT, 7, 0, out_mix_tlv), /* LOUT */ WM8400_OUTPGA_SINGLE_R_TLV("LOUT Volume", WM8400_LEFT_OUTPUT_VOLUME, WM8400_LOUTVOL_SHIFT, WM8400_LOUTVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("LOUT ZC", WM8400_LEFT_OUTPUT_VOLUME, WM8400_LOZC_SHIFT, 1, 0), /* ROUT */ WM8400_OUTPGA_SINGLE_R_TLV("ROUT Volume", WM8400_RIGHT_OUTPUT_VOLUME, WM8400_ROUTVOL_SHIFT, WM8400_ROUTVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("ROUT ZC", WM8400_RIGHT_OUTPUT_VOLUME, WM8400_ROZC_SHIFT, 1, 0), /* LOPGA */ WM8400_OUTPGA_SINGLE_R_TLV("LOPGA Volume", WM8400_LEFT_OPGA_VOLUME, WM8400_LOPGAVOL_SHIFT, WM8400_LOPGAVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("LOPGA ZC Switch", WM8400_LEFT_OPGA_VOLUME, WM8400_LOPGAZC_SHIFT, 1, 0), /* ROPGA */ WM8400_OUTPGA_SINGLE_R_TLV("ROPGA Volume", WM8400_RIGHT_OPGA_VOLUME, WM8400_ROPGAVOL_SHIFT, WM8400_ROPGAVOL_MASK, 0, out_pga_tlv), SOC_SINGLE("ROPGA ZC Switch", WM8400_RIGHT_OPGA_VOLUME, WM8400_ROPGAZC_SHIFT, 1, 0), SOC_SINGLE("LON Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LONMUTE_SHIFT, 1, 0), SOC_SINGLE("LOP Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LOPMUTE_SHIFT, 1, 0), SOC_SINGLE("LOP Attenuation Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_LOATTN_SHIFT, 1, 0), SOC_SINGLE("RON Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_RONMUTE_SHIFT, 1, 0), SOC_SINGLE("ROP Mute Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_ROPMUTE_SHIFT, 1, 0), SOC_SINGLE("ROP Attenuation Switch", WM8400_LINE_OUTPUTS_VOLUME, WM8400_ROATTN_SHIFT, 1, 0), SOC_SINGLE("OUT3 Mute Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT3MUTE_SHIFT, 1, 0), SOC_SINGLE("OUT3 Attenuation Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT3ATTN_SHIFT, 1, 0), SOC_SINGLE("OUT4 Mute Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT4MUTE_SHIFT, 1, 0), SOC_SINGLE("OUT4 Attenuation Switch", WM8400_OUT3_4_VOLUME, WM8400_OUT4ATTN_SHIFT, 1, 0), SOC_SINGLE("Speaker Mode Switch", WM8400_CLASSD1, WM8400_CDMODE_SHIFT, 1, 0), SOC_SINGLE("Speaker Output Attenuation Volume", WM8400_SPEAKER_VOLUME, WM8400_SPKATTN_SHIFT, WM8400_SPKATTN_MASK, 0), SOC_SINGLE("Speaker DC Boost Volume", WM8400_CLASSD3, WM8400_DCGAIN_SHIFT, 6, 0), SOC_SINGLE("Speaker AC Boost Volume", WM8400_CLASSD3, WM8400_ACGAIN_SHIFT, 6, 0), WM8400_OUTPGA_SINGLE_R_TLV("Left DAC Digital Volume", WM8400_LEFT_DAC_DIGITAL_VOLUME, WM8400_DACL_VOL_SHIFT, 127, 0, out_dac_tlv), WM8400_OUTPGA_SINGLE_R_TLV("Right DAC Digital Volume", WM8400_RIGHT_DAC_DIGITAL_VOLUME, WM8400_DACR_VOL_SHIFT, 127, 0, out_dac_tlv), SOC_ENUM("Left Digital Sidetone", wm8400_left_digital_sidetone_enum), SOC_ENUM("Right Digital Sidetone", wm8400_right_digital_sidetone_enum), SOC_SINGLE_TLV("Left Digital Sidetone Volume", WM8400_DIGITAL_SIDE_TONE, WM8400_ADCL_DAC_SVOL_SHIFT, 15, 0, out_sidetone_tlv), SOC_SINGLE_TLV("Right Digital Sidetone Volume", WM8400_DIGITAL_SIDE_TONE, WM8400_ADCR_DAC_SVOL_SHIFT, 15, 0, out_sidetone_tlv), SOC_SINGLE("ADC Digital High Pass Filter Switch", WM8400_ADC_CTRL, WM8400_ADC_HPF_ENA_SHIFT, 1, 0), SOC_ENUM("ADC HPF Mode", wm8400_right_adcmode_enum), WM8400_OUTPGA_SINGLE_R_TLV("Left ADC Digital Volume", WM8400_LEFT_ADC_DIGITAL_VOLUME, WM8400_ADCL_VOL_SHIFT, WM8400_ADCL_VOL_MASK, 0, in_adc_tlv), WM8400_OUTPGA_SINGLE_R_TLV("Right ADC Digital Volume", WM8400_RIGHT_ADC_DIGITAL_VOLUME, WM8400_ADCR_VOL_SHIFT, WM8400_ADCR_VOL_MASK, 0, in_adc_tlv), WM8400_OUTPGA_SINGLE_R_TLV("LIN12 Volume", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LIN12VOL_SHIFT, WM8400_LIN12VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("LIN12 ZC Switch", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LI12ZC_SHIFT, 1, 0), SOC_SINGLE("LIN12 Mute Switch", WM8400_LEFT_LINE_INPUT_1_2_VOLUME, WM8400_LI12MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("LIN34 Volume", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LIN34VOL_SHIFT, WM8400_LIN34VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("LIN34 ZC Switch", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LI34ZC_SHIFT, 1, 0), SOC_SINGLE("LIN34 Mute Switch", WM8400_LEFT_LINE_INPUT_3_4_VOLUME, WM8400_LI34MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("RIN12 Volume", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RIN12VOL_SHIFT, WM8400_RIN12VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("RIN12 ZC Switch", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RI12ZC_SHIFT, 1, 0), SOC_SINGLE("RIN12 Mute Switch", WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, WM8400_RI12MUTE_SHIFT, 1, 0), WM8400_OUTPGA_SINGLE_R_TLV("RIN34 Volume", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RIN34VOL_SHIFT, WM8400_RIN34VOL_MASK, 0, in_pga_tlv), SOC_SINGLE("RIN34 ZC Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RI34ZC_SHIFT, 1, 0), SOC_SINGLE("RIN34 Mute Switch", WM8400_RIGHT_LINE_INPUT_3_4_VOLUME, WM8400_RI34MUTE_SHIFT, 1, 0), }; /* * _DAPM_ Controls */ static int inmixer_event (struct snd_soc_dapm_widget *w, struct snd_kcontrol *kcontrol, int event) { u16 reg, fakepower; reg = wm8400_read(w->codec, WM8400_POWER_MANAGEMENT_2); fakepower = wm8400_read(w->codec, WM8400_INTDRIVBITS); if (fakepower & ((1 << WM8400_INMIXL_PWR) | (1 << WM8400_AINLMUX_PWR))) { reg |= WM8400_AINL_ENA; } else { reg &= ~WM8400_AINL_ENA; } if (fakepower & ((1 << WM8400_INMIXR_PWR) | (1 << WM8400_AINRMUX_PWR))) { reg |= WM8400_AINR_ENA; } else { reg &= ~WM8400_AINR_ENA; } wm8400_write(w->codec, WM8400_POWER_MANAGEMENT_2, reg); return 0; } static int outmixer_event (struct snd_soc_dapm_widget *w, struct snd_kcontrol * kcontrol, int event) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; u32 reg_shift = mc->shift; int ret = 0; u16 reg; switch (reg_shift) { case WM8400_SPEAKER_MIXER | (WM8400_LDSPK << 8) : reg = wm8400_read(w->codec, WM8400_OUTPUT_MIXER1); if (reg & WM8400_LDLO) { printk(KERN_WARNING "Cannot set as Output Mixer 1 LDLO Set\n"); ret = -1; } break; case WM8400_SPEAKER_MIXER | (WM8400_RDSPK << 8): reg = wm8400_read(w->codec, WM8400_OUTPUT_MIXER2); if (reg & WM8400_RDRO) { printk(KERN_WARNING "Cannot set as Output Mixer 2 RDRO Set\n"); ret = -1; } break; case WM8400_OUTPUT_MIXER1 | (WM8400_LDLO << 8): reg = wm8400_read(w->codec, WM8400_SPEAKER_MIXER); if (reg & WM8400_LDSPK) { printk(KERN_WARNING "Cannot set as Speaker Mixer LDSPK Set\n"); ret = -1; } break; case WM8400_OUTPUT_MIXER2 | (WM8400_RDRO << 8): reg = wm8400_read(w->codec, WM8400_SPEAKER_MIXER); if (reg & WM8400_RDSPK) { printk(KERN_WARNING "Cannot set as Speaker Mixer RDSPK Set\n"); ret = -1; } break; } return ret; } /* INMIX dB values */ static const unsigned int in_mix_tlv[] = { TLV_DB_RANGE_HEAD(1), 0,7, TLV_DB_SCALE_ITEM(-1200, 600, 0), }; /* Left In PGA Connections */ static const struct snd_kcontrol_new wm8400_dapm_lin12_pga_controls[] = { SOC_DAPM_SINGLE("LIN1 Switch", WM8400_INPUT_MIXER2, WM8400_LMN1_SHIFT, 1, 0), SOC_DAPM_SINGLE("LIN2 Switch", WM8400_INPUT_MIXER2, WM8400_LMP2_SHIFT, 1, 0), }; static const struct snd_kcontrol_new wm8400_dapm_lin34_pga_controls[] = { SOC_DAPM_SINGLE("LIN3 Switch", WM8400_INPUT_MIXER2, WM8400_LMN3_SHIFT, 1, 0), SOC_DAPM_SINGLE("LIN4 Switch", WM8400_INPUT_MIXER2, WM8400_LMP4_SHIFT, 1, 0), }; /* Right In PGA Connections */ static const struct snd_kcontrol_new wm8400_dapm_rin12_pga_controls[] = { SOC_DAPM_SINGLE("RIN1 Switch", WM8400_INPUT_MIXER2, WM8400_RMN1_SHIFT, 1, 0), SOC_DAPM_SINGLE("RIN2 Switch", WM8400_INPUT_MIXER2, WM8400_RMP2_SHIFT, 1, 0), }; static const struct snd_kcontrol_new wm8400_dapm_rin34_pga_controls[] = { SOC_DAPM_SINGLE("RIN3 Switch", WM8400_INPUT_MIXER2, WM8400_RMN3_SHIFT, 1, 0), SOC_DAPM_SINGLE("RIN4 Switch", WM8400_INPUT_MIXER2, WM8400_RMP4_SHIFT, 1, 0), }; /* INMIXL */ static const struct snd_kcontrol_new wm8400_dapm_inmixl_controls[] = { SOC_DAPM_SINGLE_TLV("Record Left Volume", WM8400_INPUT_MIXER3, WM8400_LDBVOL_SHIFT, WM8400_LDBVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("LIN2 Volume", WM8400_INPUT_MIXER5, WM8400_LI2BVOL_SHIFT, 7, 0, in_mix_tlv), SOC_DAPM_SINGLE("LINPGA12 Switch", WM8400_INPUT_MIXER3, WM8400_L12MNB_SHIFT, 1, 0), SOC_DAPM_SINGLE("LINPGA34 Switch", WM8400_INPUT_MIXER3, WM8400_L34MNB_SHIFT, 1, 0), }; /* INMIXR */ static const struct snd_kcontrol_new wm8400_dapm_inmixr_controls[] = { SOC_DAPM_SINGLE_TLV("Record Right Volume", WM8400_INPUT_MIXER4, WM8400_RDBVOL_SHIFT, WM8400_RDBVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("RIN2 Volume", WM8400_INPUT_MIXER6, WM8400_RI2BVOL_SHIFT, 7, 0, in_mix_tlv), SOC_DAPM_SINGLE("RINPGA12 Switch", WM8400_INPUT_MIXER3, WM8400_L12MNB_SHIFT, 1, 0), SOC_DAPM_SINGLE("RINPGA34 Switch", WM8400_INPUT_MIXER3, WM8400_L34MNB_SHIFT, 1, 0), }; /* AINLMUX */ static const char *wm8400_ainlmux[] = {"INMIXL Mix", "RXVOICE Mix", "DIFFINL Mix"}; static const struct soc_enum wm8400_ainlmux_enum = SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINLMODE_SHIFT, ARRAY_SIZE(wm8400_ainlmux), wm8400_ainlmux); static const struct snd_kcontrol_new wm8400_dapm_ainlmux_controls = SOC_DAPM_ENUM("Route", wm8400_ainlmux_enum); /* DIFFINL */ /* AINRMUX */ static const char *wm8400_ainrmux[] = {"INMIXR Mix", "RXVOICE Mix", "DIFFINR Mix"}; static const struct soc_enum wm8400_ainrmux_enum = SOC_ENUM_SINGLE( WM8400_INPUT_MIXER1, WM8400_AINRMODE_SHIFT, ARRAY_SIZE(wm8400_ainrmux), wm8400_ainrmux); static const struct snd_kcontrol_new wm8400_dapm_ainrmux_controls = SOC_DAPM_ENUM("Route", wm8400_ainrmux_enum); /* RXVOICE */ static const struct snd_kcontrol_new wm8400_dapm_rxvoice_controls[] = { SOC_DAPM_SINGLE_TLV("LIN4/RXN", WM8400_INPUT_MIXER5, WM8400_LR4BVOL_SHIFT, WM8400_LR4BVOL_MASK, 0, in_mix_tlv), SOC_DAPM_SINGLE_TLV("RIN4/RXP", WM8400_INPUT_MIXER6, WM8400_RL4BVOL_SHIFT, WM8400_RL4BVOL_MASK, 0, in_mix_tlv), }; /* LOMIX */ static const struct snd_kcontrol_new wm8400_dapm_lomix_controls[] = { SOC_DAPM_SINGLE("LOMIX Right ADC Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LRBLO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX Left ADC Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LLBLO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX RIN3 Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LRI3LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX LIN3 Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LLI3LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX RIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LR12LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX LIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER1, WM8400_LL12LO_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOMIX Left DAC Switch", WM8400_OUTPUT_MIXER1, WM8400_LDLO_SHIFT, 1, 0), }; /* ROMIX */ static const struct snd_kcontrol_new wm8400_dapm_romix_controls[] = { SOC_DAPM_SINGLE("ROMIX Left ADC Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RLBRO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX Right ADC Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RRBRO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX LIN3 Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RLI3RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX RIN3 Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RRI3RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX LIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RL12RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX RIN12 PGA Bypass Switch", WM8400_OUTPUT_MIXER2, WM8400_RR12RO_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROMIX Right DAC Switch", WM8400_OUTPUT_MIXER2, WM8400_RDRO_SHIFT, 1, 0), }; /* LONMIX */ static const struct snd_kcontrol_new wm8400_dapm_lonmix_controls[] = { SOC_DAPM_SINGLE("LONMIX Left Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LLOPGALON_SHIFT, 1, 0), SOC_DAPM_SINGLE("LONMIX Right Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LROPGALON_SHIFT, 1, 0), SOC_DAPM_SINGLE("LONMIX Inverted LOP Switch", WM8400_LINE_MIXER1, WM8400_LOPLON_SHIFT, 1, 0), }; /* LOPMIX */ static const struct snd_kcontrol_new wm8400_dapm_lopmix_controls[] = { SOC_DAPM_SINGLE("LOPMIX Right Mic Bypass Switch", WM8400_LINE_MIXER1, WM8400_LR12LOP_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOPMIX Left Mic Bypass Switch", WM8400_LINE_MIXER1, WM8400_LL12LOP_SHIFT, 1, 0), SOC_DAPM_SINGLE("LOPMIX Left Mixer PGA Switch", WM8400_LINE_MIXER1, WM8400_LLOPGALOP_SHIFT, 1, 0), }; /* RONMIX */ static const struct snd_kcontrol_new wm8400_dapm_ronmix_controls[] = { SOC_DAPM_SINGLE("RONMIX Right Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RROPGARON_SHIFT, 1, 0), SOC_DAPM_SINGLE("RONMIX Left Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RLOPGARON_SHIFT, 1, 0), SOC_DAPM_SINGLE("RONMIX Inverted ROP Switch", WM8400_LINE_MIXER2, WM8400_ROPRON_SHIFT, 1, 0), }; /* ROPMIX */ static const struct snd_kcontrol_new wm8400_dapm_ropmix_controls[] = { SOC_DAPM_SINGLE("ROPMIX Left Mic Bypass Switch", WM8400_LINE_MIXER2, WM8400_RL12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROPMIX Right Mic Bypass Switch", WM8400_LINE_MIXER2, WM8400_RR12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("ROPMIX Right Mixer PGA Switch", WM8400_LINE_MIXER2, WM8400_RROPGAROP_SHIFT, 1, 0), }; /* OUT3MIX */ static const struct snd_kcontrol_new wm8400_dapm_out3mix_controls[] = { SOC_DAPM_SINGLE("OUT3MIX LIN4/RXP Bypass Switch", WM8400_OUT3_4_MIXER, WM8400_LI4O3_SHIFT, 1, 0), SOC_DAPM_SINGLE("OUT3MIX Left Out PGA Switch", WM8400_OUT3_4_MIXER, WM8400_LPGAO3_SHIFT, 1, 0), }; /* OUT4MIX */ static const struct snd_kcontrol_new wm8400_dapm_out4mix_controls[] = { SOC_DAPM_SINGLE("OUT4MIX Right Out PGA Switch", WM8400_OUT3_4_MIXER, WM8400_RPGAO4_SHIFT, 1, 0), SOC_DAPM_SINGLE("OUT4MIX RIN4/RXP Bypass Switch", WM8400_OUT3_4_MIXER, WM8400_RI4O4_SHIFT, 1, 0), }; /* SPKMIX */ static const struct snd_kcontrol_new wm8400_dapm_spkmix_controls[] = { SOC_DAPM_SINGLE("SPKMIX LIN2 Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_LI2SPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX LADC Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_LB2SPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Left Mixer PGA Switch", WM8400_SPEAKER_MIXER, WM8400_LOPGASPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Left DAC Switch", WM8400_SPEAKER_MIXER, WM8400_LDSPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Right DAC Switch", WM8400_SPEAKER_MIXER, WM8400_RDSPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX Right Mixer PGA Switch", WM8400_SPEAKER_MIXER, WM8400_ROPGASPK_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX RADC Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_RL12ROP_SHIFT, 1, 0), SOC_DAPM_SINGLE("SPKMIX RIN2 Bypass Switch", WM8400_SPEAKER_MIXER, WM8400_RI2SPK_SHIFT, 1, 0), }; static const struct snd_soc_dapm_widget wm8400_dapm_widgets[] = { /* Input Side */ /* Input Lines */ SND_SOC_DAPM_INPUT("LIN1"), SND_SOC_DAPM_INPUT("LIN2"), SND_SOC_DAPM_INPUT("LIN3"), SND_SOC_DAPM_INPUT("LIN4/RXN"), SND_SOC_DAPM_INPUT("RIN3"), SND_SOC_DAPM_INPUT("RIN4/RXP"), SND_SOC_DAPM_INPUT("RIN1"), SND_SOC_DAPM_INPUT("RIN2"), SND_SOC_DAPM_INPUT("Internal ADC Source"), /* DACs */ SND_SOC_DAPM_ADC("Left ADC", "Left Capture", WM8400_POWER_MANAGEMENT_2, WM8400_ADCL_ENA_SHIFT, 0), SND_SOC_DAPM_ADC("Right ADC", "Right Capture", WM8400_POWER_MANAGEMENT_2, WM8400_ADCR_ENA_SHIFT, 0), /* Input PGAs */ SND_SOC_DAPM_MIXER("LIN12 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_LIN12_ENA_SHIFT, 0, &wm8400_dapm_lin12_pga_controls[0], ARRAY_SIZE(wm8400_dapm_lin12_pga_controls)), SND_SOC_DAPM_MIXER("LIN34 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_LIN34_ENA_SHIFT, 0, &wm8400_dapm_lin34_pga_controls[0], ARRAY_SIZE(wm8400_dapm_lin34_pga_controls)), SND_SOC_DAPM_MIXER("RIN12 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_RIN12_ENA_SHIFT, 0, &wm8400_dapm_rin12_pga_controls[0], ARRAY_SIZE(wm8400_dapm_rin12_pga_controls)), SND_SOC_DAPM_MIXER("RIN34 PGA", WM8400_POWER_MANAGEMENT_2, WM8400_RIN34_ENA_SHIFT, 0, &wm8400_dapm_rin34_pga_controls[0], ARRAY_SIZE(wm8400_dapm_rin34_pga_controls)), /* INMIXL */ SND_SOC_DAPM_MIXER_E("INMIXL", WM8400_INTDRIVBITS, WM8400_INMIXL_PWR, 0, &wm8400_dapm_inmixl_controls[0], ARRAY_SIZE(wm8400_dapm_inmixl_controls), inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* AINLMUX */ SND_SOC_DAPM_MUX_E("AILNMUX", WM8400_INTDRIVBITS, WM8400_AINLMUX_PWR, 0, &wm8400_dapm_ainlmux_controls, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* INMIXR */ SND_SOC_DAPM_MIXER_E("INMIXR", WM8400_INTDRIVBITS, WM8400_INMIXR_PWR, 0, &wm8400_dapm_inmixr_controls[0], ARRAY_SIZE(wm8400_dapm_inmixr_controls), inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* AINRMUX */ SND_SOC_DAPM_MUX_E("AIRNMUX", WM8400_INTDRIVBITS, WM8400_AINRMUX_PWR, 0, &wm8400_dapm_ainrmux_controls, inmixer_event, SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD), /* Output Side */ /* DACs */ SND_SOC_DAPM_DAC("Left DAC", "Left Playback", WM8400_POWER_MANAGEMENT_3, WM8400_DACL_ENA_SHIFT, 0), SND_SOC_DAPM_DAC("Right DAC", "Right Playback", WM8400_POWER_MANAGEMENT_3, WM8400_DACR_ENA_SHIFT, 0), /* LOMIX */ SND_SOC_DAPM_MIXER_E("LOMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LOMIX_ENA_SHIFT, 0, &wm8400_dapm_lomix_controls[0], ARRAY_SIZE(wm8400_dapm_lomix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* LONMIX */ SND_SOC_DAPM_MIXER("LONMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LON_ENA_SHIFT, 0, &wm8400_dapm_lonmix_controls[0], ARRAY_SIZE(wm8400_dapm_lonmix_controls)), /* LOPMIX */ SND_SOC_DAPM_MIXER("LOPMIX", WM8400_POWER_MANAGEMENT_3, WM8400_LOP_ENA_SHIFT, 0, &wm8400_dapm_lopmix_controls[0], ARRAY_SIZE(wm8400_dapm_lopmix_controls)), /* OUT3MIX */ SND_SOC_DAPM_MIXER("OUT3MIX", WM8400_POWER_MANAGEMENT_1, WM8400_OUT3_ENA_SHIFT, 0, &wm8400_dapm_out3mix_controls[0], ARRAY_SIZE(wm8400_dapm_out3mix_controls)), /* SPKMIX */ SND_SOC_DAPM_MIXER_E("SPKMIX", WM8400_POWER_MANAGEMENT_1, WM8400_SPK_ENA_SHIFT, 0, &wm8400_dapm_spkmix_controls[0], ARRAY_SIZE(wm8400_dapm_spkmix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* OUT4MIX */ SND_SOC_DAPM_MIXER("OUT4MIX", WM8400_POWER_MANAGEMENT_1, WM8400_OUT4_ENA_SHIFT, 0, &wm8400_dapm_out4mix_controls[0], ARRAY_SIZE(wm8400_dapm_out4mix_controls)), /* ROPMIX */ SND_SOC_DAPM_MIXER("ROPMIX", WM8400_POWER_MANAGEMENT_3, WM8400_ROP_ENA_SHIFT, 0, &wm8400_dapm_ropmix_controls[0], ARRAY_SIZE(wm8400_dapm_ropmix_controls)), /* RONMIX */ SND_SOC_DAPM_MIXER("RONMIX", WM8400_POWER_MANAGEMENT_3, WM8400_RON_ENA_SHIFT, 0, &wm8400_dapm_ronmix_controls[0], ARRAY_SIZE(wm8400_dapm_ronmix_controls)), /* ROMIX */ SND_SOC_DAPM_MIXER_E("ROMIX", WM8400_POWER_MANAGEMENT_3, WM8400_ROMIX_ENA_SHIFT, 0, &wm8400_dapm_romix_controls[0], ARRAY_SIZE(wm8400_dapm_romix_controls), outmixer_event, SND_SOC_DAPM_PRE_REG), /* LOUT PGA */ SND_SOC_DAPM_PGA("LOUT PGA", WM8400_POWER_MANAGEMENT_1, WM8400_LOUT_ENA_SHIFT, 0, NULL, 0), /* ROUT PGA */ SND_SOC_DAPM_PGA("ROUT PGA", WM8400_POWER_MANAGEMENT_1, WM8400_ROUT_ENA_SHIFT, 0, NULL, 0), /* LOPGA */ SND_SOC_DAPM_PGA("LOPGA", WM8400_POWER_MANAGEMENT_3, WM8400_LOPGA_ENA_SHIFT, 0, NULL, 0), /* ROPGA */ SND_SOC_DAPM_PGA("ROPGA", WM8400_POWER_MANAGEMENT_3, WM8400_ROPGA_ENA_SHIFT, 0, NULL, 0), /* MICBIAS */ SND_SOC_DAPM_SUPPLY("MICBIAS", WM8400_POWER_MANAGEMENT_1, WM8400_MIC1BIAS_ENA_SHIFT, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("LON"), SND_SOC_DAPM_OUTPUT("LOP"), SND_SOC_DAPM_OUTPUT("OUT3"), SND_SOC_DAPM_OUTPUT("LOUT"), SND_SOC_DAPM_OUTPUT("SPKN"), SND_SOC_DAPM_OUTPUT("SPKP"), SND_SOC_DAPM_OUTPUT("ROUT"), SND_SOC_DAPM_OUTPUT("OUT4"), SND_SOC_DAPM_OUTPUT("ROP"), SND_SOC_DAPM_OUTPUT("RON"), SND_SOC_DAPM_OUTPUT("Internal DAC Sink"), }; static const struct snd_soc_dapm_route wm8400_dapm_routes[] = { /* Make DACs turn on when playing even if not mixed into any outputs */ {"Internal DAC Sink", NULL, "Left DAC"}, {"Internal DAC Sink", NULL, "Right DAC"}, /* Make ADCs turn on when recording * even if not mixed from any inputs */ {"Left ADC", NULL, "Internal ADC Source"}, {"Right ADC", NULL, "Internal ADC Source"}, /* Input Side */ /* LIN12 PGA */ {"LIN12 PGA", "LIN1 Switch", "LIN1"}, {"LIN12 PGA", "LIN2 Switch", "LIN2"}, /* LIN34 PGA */ {"LIN34 PGA", "LIN3 Switch", "LIN3"}, {"LIN34 PGA", "LIN4 Switch", "LIN4/RXN"}, /* INMIXL */ {"INMIXL", "Record Left Volume", "LOMIX"}, {"INMIXL", "LIN2 Volume", "LIN2"}, {"INMIXL", "LINPGA12 Switch", "LIN12 PGA"}, {"INMIXL", "LINPGA34 Switch", "LIN34 PGA"}, /* AILNMUX */ {"AILNMUX", "INMIXL Mix", "INMIXL"}, {"AILNMUX", "DIFFINL Mix", "LIN12 PGA"}, {"AILNMUX", "DIFFINL Mix", "LIN34 PGA"}, {"AILNMUX", "RXVOICE Mix", "LIN4/RXN"}, {"AILNMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ {"Left ADC", NULL, "AILNMUX"}, /* RIN12 PGA */ {"RIN12 PGA", "RIN1 Switch", "RIN1"}, {"RIN12 PGA", "RIN2 Switch", "RIN2"}, /* RIN34 PGA */ {"RIN34 PGA", "RIN3 Switch", "RIN3"}, {"RIN34 PGA", "RIN4 Switch", "RIN4/RXP"}, /* INMIXL */ {"INMIXR", "Record Right Volume", "ROMIX"}, {"INMIXR", "RIN2 Volume", "RIN2"}, {"INMIXR", "RINPGA12 Switch", "RIN12 PGA"}, {"INMIXR", "RINPGA34 Switch", "RIN34 PGA"}, /* AIRNMUX */ {"AIRNMUX", "INMIXR Mix", "INMIXR"}, {"AIRNMUX", "DIFFINR Mix", "RIN12 PGA"}, {"AIRNMUX", "DIFFINR Mix", "RIN34 PGA"}, {"AIRNMUX", "RXVOICE Mix", "LIN4/RXN"}, {"AIRNMUX", "RXVOICE Mix", "RIN4/RXP"}, /* ADC */ {"Right ADC", NULL, "AIRNMUX"}, /* LOMIX */ {"LOMIX", "LOMIX RIN3 Bypass Switch", "RIN3"}, {"LOMIX", "LOMIX LIN3 Bypass Switch", "LIN3"}, {"LOMIX", "LOMIX LIN12 PGA Bypass Switch", "LIN12 PGA"}, {"LOMIX", "LOMIX RIN12 PGA Bypass Switch", "RIN12 PGA"}, {"LOMIX", "LOMIX Right ADC Bypass Switch", "AIRNMUX"}, {"LOMIX", "LOMIX Left ADC Bypass Switch", "AILNMUX"}, {"LOMIX", "LOMIX Left DAC Switch", "Left DAC"}, /* ROMIX */ {"ROMIX", "ROMIX RIN3 Bypass Switch", "RIN3"}, {"ROMIX", "ROMIX LIN3 Bypass Switch", "LIN3"}, {"ROMIX", "ROMIX LIN12 PGA Bypass Switch", "LIN12 PGA"}, {"ROMIX", "ROMIX RIN12 PGA Bypass Switch", "RIN12 PGA"}, {"ROMIX", "ROMIX Right ADC Bypass Switch", "AIRNMUX"}, {"ROMIX", "ROMIX Left ADC Bypass Switch", "AILNMUX"}, {"ROMIX", "ROMIX Right DAC Switch", "Right DAC"}, /* SPKMIX */ {"SPKMIX", "SPKMIX LIN2 Bypass Switch", "LIN2"}, {"SPKMIX", "SPKMIX RIN2 Bypass Switch", "RIN2"}, {"SPKMIX", "SPKMIX LADC Bypass Switch", "AILNMUX"}, {"SPKMIX", "SPKMIX RADC Bypass Switch", "AIRNMUX"}, {"SPKMIX", "SPKMIX Left Mixer PGA Switch", "LOPGA"}, {"SPKMIX", "SPKMIX Right Mixer PGA Switch", "ROPGA"}, {"SPKMIX", "SPKMIX Right DAC Switch", "Right DAC"}, {"SPKMIX", "SPKMIX Left DAC Switch", "Right DAC"}, /* LONMIX */ {"LONMIX", "LONMIX Left Mixer PGA Switch", "LOPGA"}, {"LONMIX", "LONMIX Right Mixer PGA Switch", "ROPGA"}, {"LONMIX", "LONMIX Inverted LOP Switch", "LOPMIX"}, /* LOPMIX */ {"LOPMIX", "LOPMIX Right Mic Bypass Switch", "RIN12 PGA"}, {"LOPMIX", "LOPMIX Left Mic Bypass Switch", "LIN12 PGA"}, {"LOPMIX", "LOPMIX Left Mixer PGA Switch", "LOPGA"}, /* OUT3MIX */ {"OUT3MIX", "OUT3MIX LIN4/RXP Bypass Switch", "LIN4/RXN"}, {"OUT3MIX", "OUT3MIX Left Out PGA Switch", "LOPGA"}, /* OUT4MIX */ {"OUT4MIX", "OUT4MIX Right Out PGA Switch", "ROPGA"}, {"OUT4MIX", "OUT4MIX RIN4/RXP Bypass Switch", "RIN4/RXP"}, /* RONMIX */ {"RONMIX", "RONMIX Right Mixer PGA Switch", "ROPGA"}, {"RONMIX", "RONMIX Left Mixer PGA Switch", "LOPGA"}, {"RONMIX", "RONMIX Inverted ROP Switch", "ROPMIX"}, /* ROPMIX */ {"ROPMIX", "ROPMIX Left Mic Bypass Switch", "LIN12 PGA"}, {"ROPMIX", "ROPMIX Right Mic Bypass Switch", "RIN12 PGA"}, {"ROPMIX", "ROPMIX Right Mixer PGA Switch", "ROPGA"}, /* Out Mixer PGAs */ {"LOPGA", NULL, "LOMIX"}, {"ROPGA", NULL, "ROMIX"}, {"LOUT PGA", NULL, "LOMIX"}, {"ROUT PGA", NULL, "ROMIX"}, /* Output Pins */ {"LON", NULL, "LONMIX"}, {"LOP", NULL, "LOPMIX"}, {"OUT3", NULL, "OUT3MIX"}, {"LOUT", NULL, "LOUT PGA"}, {"SPKN", NULL, "SPKMIX"}, {"ROUT", NULL, "ROUT PGA"}, {"OUT4", NULL, "OUT4MIX"}, {"ROP", NULL, "ROPMIX"}, {"RON", NULL, "RONMIX"}, }; /* * Clock after FLL and dividers */ static int wm8400_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); wm8400->sysclk = freq; return 0; } struct fll_factors { u16 n; u16 k; u16 outdiv; u16 fratio; u16 freq_ref; }; #define FIXED_FLL_SIZE ((1 << 16) * 10) static int fll_factors(struct wm8400_priv *wm8400, struct fll_factors *factors, unsigned int Fref, unsigned int Fout) { u64 Kpart; unsigned int K, Nmod, target; factors->outdiv = 2; while (Fout * factors->outdiv < 90000000 || Fout * factors->outdiv > 100000000) { factors->outdiv *= 2; if (factors->outdiv > 32) { dev_err(wm8400->wm8400->dev, "Unsupported FLL output frequency %uHz\n", Fout); return -EINVAL; } } target = Fout * factors->outdiv; factors->outdiv = factors->outdiv >> 2; if (Fref < 48000) factors->freq_ref = 1; else factors->freq_ref = 0; if (Fref < 1000000) factors->fratio = 9; else factors->fratio = 0; /* Ensure we have a fractional part */ do { if (Fref < 1000000) factors->fratio--; else factors->fratio++; if (factors->fratio < 1 || factors->fratio > 8) { dev_err(wm8400->wm8400->dev, "Unable to calculate FRATIO\n"); return -EINVAL; } factors->n = target / (Fref * factors->fratio); Nmod = target % (Fref * factors->fratio); } while (Nmod == 0); /* Calculate fractional part - scale up so we can round. */ Kpart = FIXED_FLL_SIZE * (long long)Nmod; do_div(Kpart, (Fref * factors->fratio)); K = Kpart & 0xFFFFFFFF; if ((K % 10) >= 5) K += 5; /* Move down to proper range now rounding is done */ factors->k = K / 10; dev_dbg(wm8400->wm8400->dev, "FLL: Fref=%u Fout=%u N=%x K=%x, FRATIO=%x OUTDIV=%x\n", Fref, Fout, factors->n, factors->k, factors->fratio, factors->outdiv); return 0; } static int wm8400_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, int source, unsigned int freq_in, unsigned int freq_out) { struct snd_soc_codec *codec = codec_dai->codec; struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); struct fll_factors factors; int ret; u16 reg; if (freq_in == wm8400->fll_in && freq_out == wm8400->fll_out) return 0; if (freq_out) { ret = fll_factors(wm8400, &factors, freq_in, freq_out); if (ret != 0) return ret; } else { /* Bodge GCC 4.4.0 uninitialised variable warning - it * doesn't seem capable of working out that we exit if * freq_out is 0 before any of the uses. */ memset(&factors, 0, sizeof(factors)); } wm8400->fll_out = freq_out; wm8400->fll_in = freq_in; /* We *must* disable the FLL before any changes */ reg = wm8400_read(codec, WM8400_POWER_MANAGEMENT_2); reg &= ~WM8400_FLL_ENA; wm8400_write(codec, WM8400_POWER_MANAGEMENT_2, reg); reg = wm8400_read(codec, WM8400_FLL_CONTROL_1); reg &= ~WM8400_FLL_OSC_ENA; wm8400_write(codec, WM8400_FLL_CONTROL_1, reg); if (!freq_out) return 0; reg &= ~(WM8400_FLL_REF_FREQ | WM8400_FLL_FRATIO_MASK); reg |= WM8400_FLL_FRAC | factors.fratio; reg |= factors.freq_ref << WM8400_FLL_REF_FREQ_SHIFT; wm8400_write(codec, WM8400_FLL_CONTROL_1, reg); wm8400_write(codec, WM8400_FLL_CONTROL_2, factors.k); wm8400_write(codec, WM8400_FLL_CONTROL_3, factors.n); reg = wm8400_read(codec, WM8400_FLL_CONTROL_4); reg &= ~WM8400_FLL_OUTDIV_MASK; reg |= factors.outdiv; wm8400_write(codec, WM8400_FLL_CONTROL_4, reg); return 0; } /* * Sets ADC and Voice DAC format. */ static int wm8400_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct snd_soc_codec *codec = codec_dai->codec; u16 audio1, audio3; audio1 = wm8400_read(codec, WM8400_AUDIO_INTERFACE_1); audio3 = wm8400_read(codec, WM8400_AUDIO_INTERFACE_3); /* set master/slave audio interface */ switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: audio3 &= ~WM8400_AIF_MSTR1; break; case SND_SOC_DAIFMT_CBM_CFM: audio3 |= WM8400_AIF_MSTR1; break; default: return -EINVAL; } audio1 &= ~WM8400_AIF_FMT_MASK; /* interface format */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: audio1 |= WM8400_AIF_FMT_I2S; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_RIGHT_J: audio1 |= WM8400_AIF_FMT_RIGHTJ; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_LEFT_J: audio1 |= WM8400_AIF_FMT_LEFTJ; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_DSP_A: audio1 |= WM8400_AIF_FMT_DSP; audio1 &= ~WM8400_AIF_LRCLK_INV; break; case SND_SOC_DAIFMT_DSP_B: audio1 |= WM8400_AIF_FMT_DSP | WM8400_AIF_LRCLK_INV; break; default: return -EINVAL; } wm8400_write(codec, WM8400_AUDIO_INTERFACE_1, audio1); wm8400_write(codec, WM8400_AUDIO_INTERFACE_3, audio3); return 0; } static int wm8400_set_dai_clkdiv(struct snd_soc_dai *codec_dai, int div_id, int div) { struct snd_soc_codec *codec = codec_dai->codec; u16 reg; switch (div_id) { case WM8400_MCLK_DIV: reg = wm8400_read(codec, WM8400_CLOCKING_2) & ~WM8400_MCLK_DIV_MASK; wm8400_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_DACCLK_DIV: reg = wm8400_read(codec, WM8400_CLOCKING_2) & ~WM8400_DAC_CLKDIV_MASK; wm8400_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_ADCCLK_DIV: reg = wm8400_read(codec, WM8400_CLOCKING_2) & ~WM8400_ADC_CLKDIV_MASK; wm8400_write(codec, WM8400_CLOCKING_2, reg | div); break; case WM8400_BCLK_DIV: reg = wm8400_read(codec, WM8400_CLOCKING_1) & ~WM8400_BCLK_DIV_MASK; wm8400_write(codec, WM8400_CLOCKING_1, reg | div); break; default: return -EINVAL; } return 0; } /* * Set PCM DAI bit size and sample rate. */ static int wm8400_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; u16 audio1 = wm8400_read(codec, WM8400_AUDIO_INTERFACE_1); audio1 &= ~WM8400_AIF_WL_MASK; /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: break; case SNDRV_PCM_FORMAT_S20_3LE: audio1 |= WM8400_AIF_WL_20BITS; break; case SNDRV_PCM_FORMAT_S24_LE: audio1 |= WM8400_AIF_WL_24BITS; break; case SNDRV_PCM_FORMAT_S32_LE: audio1 |= WM8400_AIF_WL_32BITS; break; } wm8400_write(codec, WM8400_AUDIO_INTERFACE_1, audio1); return 0; } static int wm8400_mute(struct snd_soc_dai *dai, int mute) { struct snd_soc_codec *codec = dai->codec; u16 val = wm8400_read(codec, WM8400_DAC_CTRL) & ~WM8400_DAC_MUTE; if (mute) wm8400_write(codec, WM8400_DAC_CTRL, val | WM8400_DAC_MUTE); else wm8400_write(codec, WM8400_DAC_CTRL, val); return 0; } /* TODO: set bias for best performance at standby */ static int wm8400_set_bias_level(struct snd_soc_codec *codec, enum snd_soc_bias_level level) { struct wm8400_priv *wm8400 = snd_soc_codec_get_drvdata(codec); u16 val; int ret; switch (level) { case SND_SOC_BIAS_ON: break; case SND_SOC_BIAS_PREPARE: /* VMID=2*50k */ val = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1) & ~WM8400_VMID_MODE_MASK; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val | 0x2); break; case SND_SOC_BIAS_STANDBY: if (codec->dapm.bias_level == SND_SOC_BIAS_OFF) { ret = regulator_bulk_enable(ARRAY_SIZE(power), &power[0]); if (ret != 0) { dev_err(wm8400->wm8400->dev, "Failed to enable regulators: %d\n", ret); return ret; } wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, WM8400_CODEC_ENA | WM8400_SYSCLK_ENA); /* Enable POBCTRL, SOFT_ST, VMIDTOG and BUFDCOPEN */ wm8400_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL); msleep(50); /* Enable VREF & VMID at 2x50k */ val = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1); val |= 0x2 | WM8400_VREF_ENA; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* Enable BUFIOEN */ wm8400_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL | WM8400_BUFIOEN); /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ wm8400_write(codec, WM8400_ANTIPOP2, WM8400_BUFIOEN); } /* VMID=2*300k */ val = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1) & ~WM8400_VMID_MODE_MASK; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val | 0x4); break; case SND_SOC_BIAS_OFF: /* Enable POBCTRL and SOFT_ST */ wm8400_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_POBCTRL | WM8400_BUFIOEN); /* Enable POBCTRL, SOFT_ST and BUFDCOPEN */ wm8400_write(codec, WM8400_ANTIPOP2, WM8400_SOFTST | WM8400_BUFDCOPEN | WM8400_POBCTRL | WM8400_BUFIOEN); /* mute DAC */ val = wm8400_read(codec, WM8400_DAC_CTRL); wm8400_write(codec, WM8400_DAC_CTRL, val | WM8400_DAC_MUTE); /* Enable any disabled outputs */ val = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1); val |= WM8400_SPK_ENA | WM8400_OUT3_ENA | WM8400_OUT4_ENA | WM8400_LOUT_ENA | WM8400_ROUT_ENA; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* Disable VMID */ val &= ~WM8400_VMID_MODE_MASK; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val); msleep(300); /* Enable all output discharge bits */ wm8400_write(codec, WM8400_ANTIPOP1, WM8400_DIS_LLINE | WM8400_DIS_RLINE | WM8400_DIS_OUT3 | WM8400_DIS_OUT4 | WM8400_DIS_LOUT | WM8400_DIS_ROUT); /* Disable VREF */ val &= ~WM8400_VREF_ENA; wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, val); /* disable POBCTRL, SOFT_ST and BUFDCOPEN */ wm8400_write(codec, WM8400_ANTIPOP2, 0x0); ret = regulator_bulk_disable(ARRAY_SIZE(power), &power[0]); if (ret != 0) return ret; break; } codec->dapm.bias_level = level; return 0; } #define WM8400_RATES SNDRV_PCM_RATE_8000_96000 #define WM8400_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE |\ SNDRV_PCM_FMTBIT_S24_LE) static const struct snd_soc_dai_ops wm8400_dai_ops = { .hw_params = wm8400_hw_params, .digital_mute = wm8400_mute, .set_fmt = wm8400_set_dai_fmt, .set_clkdiv = wm8400_set_dai_clkdiv, .set_sysclk = wm8400_set_dai_sysclk, .set_pll = wm8400_set_dai_pll, }; /* * The WM8400 supports 2 different and mutually exclusive DAI * configurations. * * 1. ADC/DAC on Primary Interface * 2. ADC on Primary Interface/DAC on secondary */ static struct snd_soc_dai_driver wm8400_dai = { /* ADC/DAC on primary */ .name = "wm8400-hifi", .playback = { .stream_name = "Playback", .channels_min = 1, .channels_max = 2, .rates = WM8400_RATES, .formats = WM8400_FORMATS, }, .capture = { .stream_name = "Capture", .channels_min = 1, .channels_max = 2, .rates = WM8400_RATES, .formats = WM8400_FORMATS, }, .ops = &wm8400_dai_ops, }; static int wm8400_suspend(struct snd_soc_codec *codec) { wm8400_set_bias_level(codec, SND_SOC_BIAS_OFF); return 0; } static int wm8400_resume(struct snd_soc_codec *codec) { wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY); return 0; } static void wm8400_probe_deferred(struct work_struct *work) { struct wm8400_priv *priv = container_of(work, struct wm8400_priv, work); struct snd_soc_codec *codec = priv->codec; /* charge output caps */ wm8400_set_bias_level(codec, SND_SOC_BIAS_STANDBY); } static int wm8400_codec_probe(struct snd_soc_codec *codec) { struct wm8400 *wm8400 = dev_get_platdata(codec->dev); struct wm8400_priv *priv; int ret; u16 reg; priv = devm_kzalloc(codec->dev, sizeof(struct wm8400_priv), GFP_KERNEL); if (priv == NULL) return -ENOMEM; snd_soc_codec_set_drvdata(codec, priv); codec->control_data = priv->wm8400 = wm8400; priv->codec = codec; ret = regulator_bulk_get(wm8400->dev, ARRAY_SIZE(power), &power[0]); if (ret != 0) { dev_err(codec->dev, "Failed to get regulators: %d\n", ret); return ret; } INIT_WORK(&priv->work, wm8400_probe_deferred); wm8400_codec_reset(codec); reg = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1); wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, reg | WM8400_CODEC_ENA); /* Latch volume update bits */ reg = wm8400_read(codec, WM8400_LEFT_LINE_INPUT_1_2_VOLUME); wm8400_write(codec, WM8400_LEFT_LINE_INPUT_1_2_VOLUME, reg & WM8400_IPVU); reg = wm8400_read(codec, WM8400_RIGHT_LINE_INPUT_1_2_VOLUME); wm8400_write(codec, WM8400_RIGHT_LINE_INPUT_1_2_VOLUME, reg & WM8400_IPVU); wm8400_write(codec, WM8400_LEFT_OUTPUT_VOLUME, 0x50 | (1<<8)); wm8400_write(codec, WM8400_RIGHT_OUTPUT_VOLUME, 0x50 | (1<<8)); if (!schedule_work(&priv->work)) { ret = -EINVAL; goto err_regulator; } return 0; err_regulator: regulator_bulk_free(ARRAY_SIZE(power), power); return ret; } static int wm8400_codec_remove(struct snd_soc_codec *codec) { u16 reg; reg = wm8400_read(codec, WM8400_POWER_MANAGEMENT_1); wm8400_write(codec, WM8400_POWER_MANAGEMENT_1, reg & (~WM8400_CODEC_ENA)); regulator_bulk_free(ARRAY_SIZE(power), power); return 0; } static struct snd_soc_codec_driver soc_codec_dev_wm8400 = { .probe = wm8400_codec_probe, .remove = wm8400_codec_remove, .suspend = wm8400_suspend, .resume = wm8400_resume, .read = wm8400_read, .write = wm8400_write, .set_bias_level = wm8400_set_bias_level, .controls = wm8400_snd_controls, .num_controls = ARRAY_SIZE(wm8400_snd_controls), .dapm_widgets = wm8400_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(wm8400_dapm_widgets), .dapm_routes = wm8400_dapm_routes, .num_dapm_routes = ARRAY_SIZE(wm8400_dapm_routes), }; static int __devinit wm8400_probe(struct platform_device *pdev) { return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_wm8400, &wm8400_dai, 1); } static int __devexit wm8400_remove(struct platform_device *pdev) { snd_soc_unregister_codec(&pdev->dev); return 0; } static struct platform_driver wm8400_codec_driver = { .driver = { .name = "wm8400-codec", .owner = THIS_MODULE, }, .probe = wm8400_probe, .remove = __devexit_p(wm8400_remove), }; module_platform_driver(wm8400_codec_driver); MODULE_DESCRIPTION("ASoC WM8400 driver"); MODULE_AUTHOR("Mark Brown"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8400-codec");
gpl-2.0
jfdsmabalot/kernel_samsung_msm8974ab
drivers/net/ethernet/8390/stnic.c
7458
7151
/* stnic.c : A SH7750 specific part of driver for NS DP83902A ST-NIC. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1999 kaz Kojima */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/delay.h> #include <asm/io.h> #include <mach-se/mach/se.h> #include <asm/machvec.h> #ifdef CONFIG_SH_STANDARD_BIOS #include <asm/sh_bios.h> #endif #include "8390.h" #define DRV_NAME "stnic" #define byte unsigned char #define half unsigned short #define word unsigned int #define vbyte volatile unsigned char #define vhalf volatile unsigned short #define vword volatile unsigned int #define STNIC_RUN 0x01 /* 1 == Run, 0 == reset. */ #define START_PG 0 /* First page of TX buffer */ #define STOP_PG 128 /* Last page +1 of RX ring */ /* Alias */ #define STNIC_CR E8390_CMD #define PG0_RSAR0 EN0_RSARLO #define PG0_RSAR1 EN0_RSARHI #define PG0_RBCR0 EN0_RCNTLO #define PG0_RBCR1 EN0_RCNTHI #define CR_RRD E8390_RREAD #define CR_RWR E8390_RWRITE #define CR_PG0 E8390_PAGE0 #define CR_STA E8390_START #define CR_RDMA E8390_NODMA /* FIXME! YOU MUST SET YOUR OWN ETHER ADDRESS. */ static byte stnic_eadr[6] = {0x00, 0xc0, 0x6e, 0x00, 0x00, 0x07}; static struct net_device *stnic_dev; static void stnic_reset (struct net_device *dev); static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void stnic_block_input (struct net_device *dev, int count, struct sk_buff *skb , int ring_offset); static void stnic_block_output (struct net_device *dev, int count, const unsigned char *buf, int start_page); static void stnic_init (struct net_device *dev); /* SH7750 specific read/write io. */ static inline void STNIC_DELAY (void) { vword trash; trash = *(vword *) 0xa0000000; trash = *(vword *) 0xa0000000; trash = *(vword *) 0xa0000000; } static inline byte STNIC_READ (int reg) { byte val; val = (*(vhalf *) (PA_83902 + ((reg) << 1)) >> 8) & 0xff; STNIC_DELAY (); return val; } static inline void STNIC_WRITE (int reg, byte val) { *(vhalf *) (PA_83902 + ((reg) << 1)) = ((half) (val) << 8); STNIC_DELAY (); } static int __init stnic_probe(void) { struct net_device *dev; int i, err; /* If we are not running on a SolutionEngine, give up now */ if (! MACH_SE) return -ENODEV; /* New style probing API */ dev = alloc_ei_netdev(); if (!dev) return -ENOMEM; #ifdef CONFIG_SH_STANDARD_BIOS sh_bios_get_node_addr (stnic_eadr); #endif for (i = 0; i < ETH_ALEN; i++) dev->dev_addr[i] = stnic_eadr[i]; /* Set the base address to point to the NIC, not the "real" base! */ dev->base_addr = 0x1000; dev->irq = IRQ_STNIC; dev->netdev_ops = &ei_netdev_ops; /* Snarf the interrupt now. There's no point in waiting since we cannot share and the board will usually be enabled. */ err = request_irq (dev->irq, ei_interrupt, 0, DRV_NAME, dev); if (err) { printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq); free_netdev(dev); return err; } ei_status.name = dev->name; ei_status.word16 = 1; #ifdef __LITTLE_ENDIAN__ ei_status.bigendian = 0; #else ei_status.bigendian = 1; #endif ei_status.tx_start_page = START_PG; ei_status.rx_start_page = START_PG + TX_PAGES; ei_status.stop_page = STOP_PG; ei_status.reset_8390 = &stnic_reset; ei_status.get_8390_hdr = &stnic_get_hdr; ei_status.block_input = &stnic_block_input; ei_status.block_output = &stnic_block_output; stnic_init (dev); err = register_netdev(dev); if (err) { free_irq(dev->irq, dev); free_netdev(dev); return err; } stnic_dev = dev; printk (KERN_INFO "NS ST-NIC 83902A\n"); return 0; } static void stnic_reset (struct net_device *dev) { *(vhalf *) PA_83902_RST = 0; udelay (5); if (ei_debug > 1) printk (KERN_WARNING "8390 reset done (%ld).\n", jiffies); *(vhalf *) PA_83902_RST = ~0; udelay (5); } static void stnic_get_hdr (struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { half buf[2]; STNIC_WRITE (PG0_RSAR0, 0); STNIC_WRITE (PG0_RSAR1, ring_page); STNIC_WRITE (PG0_RBCR0, 4); STNIC_WRITE (PG0_RBCR1, 0); STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); buf[0] = *(vhalf *) PA_83902_IF; STNIC_DELAY (); buf[1] = *(vhalf *) PA_83902_IF; STNIC_DELAY (); hdr->next = buf[0] >> 8; hdr->status = buf[0] & 0xff; #ifdef __LITTLE_ENDIAN__ hdr->count = buf[1]; #else hdr->count = ((buf[1] >> 8) & 0xff) | (buf[1] << 8); #endif if (ei_debug > 1) printk (KERN_DEBUG "ring %x status %02x next %02x count %04x.\n", ring_page, hdr->status, hdr->next, hdr->count); STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); } /* Block input and output, similar to the Crynwr packet driver. If you are porting to a new ethercard look at the packet driver source for hints. The HP LAN doesn't use shared memory -- we put the packet out through the "remote DMA" dataport. */ static void stnic_block_input (struct net_device *dev, int length, struct sk_buff *skb, int offset) { char *buf = skb->data; half val; STNIC_WRITE (PG0_RSAR0, offset & 0xff); STNIC_WRITE (PG0_RSAR1, offset >> 8); STNIC_WRITE (PG0_RBCR0, length & 0xff); STNIC_WRITE (PG0_RBCR1, length >> 8); STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); if (length & 1) length++; while (length > 0) { val = *(vhalf *) PA_83902_IF; #ifdef __LITTLE_ENDIAN__ *buf++ = val & 0xff; *buf++ = val >> 8; #else *buf++ = val >> 8; *buf++ = val & 0xff; #endif STNIC_DELAY (); length -= sizeof (half); } STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); } static void stnic_block_output (struct net_device *dev, int length, const unsigned char *buf, int output_page) { STNIC_WRITE (PG0_RBCR0, 1); /* Write non-zero value */ STNIC_WRITE (STNIC_CR, CR_RRD | CR_PG0 | CR_STA); STNIC_DELAY (); STNIC_WRITE (PG0_RBCR0, length & 0xff); STNIC_WRITE (PG0_RBCR1, length >> 8); STNIC_WRITE (PG0_RSAR0, 0); STNIC_WRITE (PG0_RSAR1, output_page); STNIC_WRITE (STNIC_CR, CR_RWR | CR_PG0 | CR_STA); if (length & 1) length++; while (length > 0) { #ifdef __LITTLE_ENDIAN__ *(vhalf *) PA_83902_IF = ((half) buf[1] << 8) | buf[0]; #else *(vhalf *) PA_83902_IF = ((half) buf[0] << 8) | buf[1]; #endif STNIC_DELAY (); buf += sizeof (half); length -= sizeof (half); } STNIC_WRITE (STNIC_CR, CR_RDMA | CR_PG0 | CR_STA); } /* This function resets the STNIC if something screws up. */ static void stnic_init (struct net_device *dev) { stnic_reset (dev); NS8390_init (dev, 0); } static void __exit stnic_cleanup(void) { unregister_netdev(stnic_dev); free_irq(stnic_dev->irq, stnic_dev); free_netdev(stnic_dev); } module_init(stnic_probe); module_exit(stnic_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
p12tic/tf700-kernel
drivers/macintosh/therm_pm72.c
8226
63960
/* * Device driver for the thermostats & fan controller of the * Apple G5 "PowerMac7,2" desktop machines. * * (c) Copyright IBM Corp. 2003-2004 * * Maintained by: Benjamin Herrenschmidt * <benh@kernel.crashing.org> * * * The algorithm used is the PID control algorithm, used the same * way the published Darwin code does, using the same values that * are present in the Darwin 7.0 snapshot property lists. * * As far as the CPUs control loops are concerned, I use the * calibration & PID constants provided by the EEPROM, * I do _not_ embed any value from the property lists, as the ones * provided by Darwin 7.0 seem to always have an older version that * what I've seen on the actual computers. * It would be interesting to verify that though. Darwin has a * version code of 1.0.0d11 for all control loops it seems, while * so far, the machines EEPROMs contain a dataset versioned 1.0.0f * * Darwin doesn't provide source to all parts, some missing * bits like the AppleFCU driver or the actual scale of some * of the values returned by sensors had to be "guessed" some * way... or based on what Open Firmware does. * * I didn't yet figure out how to get the slots power consumption * out of the FCU, so that part has not been implemented yet and * the slots fan is set to a fixed 50% PWM, hoping this value is * safe enough ... * * Note: I have observed strange oscillations of the CPU control * loop on a dual G5 here. When idle, the CPU exhaust fan tend to * oscillates slowly (over several minutes) between the minimum * of 300RPMs and approx. 1000 RPMs. I don't know what is causing * this, it could be some incorrect constant or an error in the * way I ported the algorithm, or it could be just normal. I * don't have full understanding on the way Apple tweaked the PID * algorithm for the CPU control, it is definitely not a standard * implementation... * * TODO: - Check MPU structure version/signature * - Add things like /sbin/overtemp for non-critical * overtemp conditions so userland can take some policy * decisions, like slowing down CPUs * - Deal with fan and i2c failures in a better way * - Maybe do a generic PID based on params used for * U3 and Drives ? Definitely need to factor code a bit * better... also make sensor detection more robust using * the device-tree to probe for them * - Figure out how to get the slots consumption and set the * slots fan accordingly * * History: * * Nov. 13, 2003 : 0.5 * - First release * * Nov. 14, 2003 : 0.6 * - Read fan speed from FCU, low level fan routines now deal * with errors & check fan status, though higher level don't * do much. * - Move a bunch of definitions to .h file * * Nov. 18, 2003 : 0.7 * - Fix build on ppc64 kernel * - Move back statics definitions to .c file * - Avoid calling schedule_timeout with a negative number * * Dec. 18, 2003 : 0.8 * - Fix typo when reading back fan speed on 2 CPU machines * * Mar. 11, 2004 : 0.9 * - Rework code accessing the ADC chips, make it more robust and * closer to the chip spec. Also make sure it is configured properly, * I've seen yet unexplained cases where on startup, I would have stale * values in the configuration register * - Switch back to use of target fan speed for PID, thus lowering * pressure on i2c * * Oct. 20, 2004 : 1.1 * - Add device-tree lookup for fan IDs, should detect liquid cooling * pumps when present * - Enable driver for PowerMac7,3 machines * - Split the U3/Backside cooling on U3 & U3H versions as Darwin does * - Add new CPU cooling algorithm for machines with liquid cooling * - Workaround for some PowerMac7,3 with empty "fan" node in the devtree * - Fix a signed/unsigned compare issue in some PID loops * * Mar. 10, 2005 : 1.2 * - Add basic support for Xserve G5 * - Retrieve pumps min/max from EEPROM image in device-tree (broken) * - Use min/max macros here or there * - Latest darwin updated U3H min fan speed to 20% PWM * * July. 06, 2006 : 1.3 * - Fix setting of RPM fans on Xserve G5 (they were going too fast) * - Add missing slots fan control loop for Xserve G5 * - Lower fixed slots fan speed from 50% to 40% on desktop G5s. We * still can't properly implement the control loop for these, so let's * reduce the noise a little bit, it appears that 40% still gives us * a pretty good air flow * - Add code to "tickle" the FCU regulary so it doesn't think that * we are gone while in fact, the machine just didn't need any fan * speed change lately * */ #include <linux/types.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/wait.h> #include <linux/reboot.h> #include <linux/kmod.h> #include <linux/i2c.h> #include <linux/kthread.h> #include <linux/mutex.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/prom.h> #include <asm/machdep.h> #include <asm/io.h> #include <asm/sections.h> #include <asm/macio.h> #include "therm_pm72.h" #define VERSION "1.3" #undef DEBUG #ifdef DEBUG #define DBG(args...) printk(args) #else #define DBG(args...) do { } while(0) #endif /* * Driver statics */ static struct platform_device * of_dev; static struct i2c_adapter * u3_0; static struct i2c_adapter * u3_1; static struct i2c_adapter * k2; static struct i2c_client * fcu; static struct cpu_pid_state processor_state[2]; static struct basckside_pid_params backside_params; static struct backside_pid_state backside_state; static struct drives_pid_state drives_state; static struct dimm_pid_state dimms_state; static struct slots_pid_state slots_state; static int state; static int cpu_count; static int cpu_pid_type; static struct task_struct *ctrl_task; static struct completion ctrl_complete; static int critical_state; static int rackmac; static s32 dimm_output_clamp; static int fcu_rpm_shift; static int fcu_tickle_ticks; static DEFINE_MUTEX(driver_lock); /* * We have 3 types of CPU PID control. One is "split" old style control * for intake & exhaust fans, the other is "combined" control for both * CPUs that also deals with the pumps when present. To be "compatible" * with OS X at this point, we only use "COMBINED" on the machines that * are identified as having the pumps (though that identification is at * least dodgy). Ultimately, we could probably switch completely to this * algorithm provided we hack it to deal with the UP case */ #define CPU_PID_TYPE_SPLIT 0 #define CPU_PID_TYPE_COMBINED 1 #define CPU_PID_TYPE_RACKMAC 2 /* * This table describes all fans in the FCU. The "id" and "type" values * are defaults valid for all earlier machines. Newer machines will * eventually override the table content based on the device-tree */ struct fcu_fan_table { char* loc; /* location code */ int type; /* 0 = rpm, 1 = pwm, 2 = pump */ int id; /* id or -1 */ }; #define FCU_FAN_RPM 0 #define FCU_FAN_PWM 1 #define FCU_FAN_ABSENT_ID -1 #define FCU_FAN_COUNT ARRAY_SIZE(fcu_fans) struct fcu_fan_table fcu_fans[] = { [BACKSIDE_FAN_PWM_INDEX] = { .loc = "BACKSIDE,SYS CTRLR FAN", .type = FCU_FAN_PWM, .id = BACKSIDE_FAN_PWM_DEFAULT_ID, }, [DRIVES_FAN_RPM_INDEX] = { .loc = "DRIVE BAY", .type = FCU_FAN_RPM, .id = DRIVES_FAN_RPM_DEFAULT_ID, }, [SLOTS_FAN_PWM_INDEX] = { .loc = "SLOT,PCI FAN", .type = FCU_FAN_PWM, .id = SLOTS_FAN_PWM_DEFAULT_ID, }, [CPUA_INTAKE_FAN_RPM_INDEX] = { .loc = "CPU A INTAKE", .type = FCU_FAN_RPM, .id = CPUA_INTAKE_FAN_RPM_DEFAULT_ID, }, [CPUA_EXHAUST_FAN_RPM_INDEX] = { .loc = "CPU A EXHAUST", .type = FCU_FAN_RPM, .id = CPUA_EXHAUST_FAN_RPM_DEFAULT_ID, }, [CPUB_INTAKE_FAN_RPM_INDEX] = { .loc = "CPU B INTAKE", .type = FCU_FAN_RPM, .id = CPUB_INTAKE_FAN_RPM_DEFAULT_ID, }, [CPUB_EXHAUST_FAN_RPM_INDEX] = { .loc = "CPU B EXHAUST", .type = FCU_FAN_RPM, .id = CPUB_EXHAUST_FAN_RPM_DEFAULT_ID, }, /* pumps aren't present by default, have to be looked up in the * device-tree */ [CPUA_PUMP_RPM_INDEX] = { .loc = "CPU A PUMP", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPUB_PUMP_RPM_INDEX] = { .loc = "CPU B PUMP", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, /* Xserve fans */ [CPU_A1_FAN_RPM_INDEX] = { .loc = "CPU A 1", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_A2_FAN_RPM_INDEX] = { .loc = "CPU A 2", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_A3_FAN_RPM_INDEX] = { .loc = "CPU A 3", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B1_FAN_RPM_INDEX] = { .loc = "CPU B 1", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B2_FAN_RPM_INDEX] = { .loc = "CPU B 2", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, [CPU_B3_FAN_RPM_INDEX] = { .loc = "CPU B 3", .type = FCU_FAN_RPM, .id = FCU_FAN_ABSENT_ID, }, }; static struct i2c_driver therm_pm72_driver; /* * Utility function to create an i2c_client structure and * attach it to one of u3 adapters */ static struct i2c_client *attach_i2c_chip(int id, const char *name) { struct i2c_client *clt; struct i2c_adapter *adap; struct i2c_board_info info; if (id & 0x200) adap = k2; else if (id & 0x100) adap = u3_1; else adap = u3_0; if (adap == NULL) return NULL; memset(&info, 0, sizeof(struct i2c_board_info)); info.addr = (id >> 1) & 0x7f; strlcpy(info.type, "therm_pm72", I2C_NAME_SIZE); clt = i2c_new_device(adap, &info); if (!clt) { printk(KERN_ERR "therm_pm72: Failed to attach to i2c ID 0x%x\n", id); return NULL; } /* * Let i2c-core delete that device on driver removal. * This is safe because i2c-core holds the core_lock mutex for us. */ list_add_tail(&clt->detected, &therm_pm72_driver.clients); return clt; } /* * Here are the i2c chip access wrappers */ static void initialize_adc(struct cpu_pid_state *state) { int rc; u8 buf[2]; /* Read ADC the configuration register and cache it. We * also make sure Config2 contains proper values, I've seen * cases where we got stale grabage in there, thus preventing * proper reading of conv. values */ /* Clear Config2 */ buf[0] = 5; buf[1] = 0; i2c_master_send(state->monitor, buf, 2); /* Read & cache Config1 */ buf[0] = 1; rc = i2c_master_send(state->monitor, buf, 1); if (rc > 0) { rc = i2c_master_recv(state->monitor, buf, 1); if (rc > 0) { state->adc_config = buf[0]; DBG("ADC config reg: %02x\n", state->adc_config); /* Disable shutdown mode */ state->adc_config &= 0xfe; buf[0] = 1; buf[1] = state->adc_config; rc = i2c_master_send(state->monitor, buf, 2); } } if (rc <= 0) printk(KERN_ERR "therm_pm72: Error reading ADC config" " register !\n"); } static int read_smon_adc(struct cpu_pid_state *state, int chan) { int rc, data, tries = 0; u8 buf[2]; for (;;) { /* Set channel */ buf[0] = 1; buf[1] = (state->adc_config & 0x1f) | (chan << 5); rc = i2c_master_send(state->monitor, buf, 2); if (rc <= 0) goto error; /* Wait for conversion */ msleep(1); /* Switch to data register */ buf[0] = 4; rc = i2c_master_send(state->monitor, buf, 1); if (rc <= 0) goto error; /* Read result */ rc = i2c_master_recv(state->monitor, buf, 2); if (rc < 0) goto error; data = ((u16)buf[0]) << 8 | (u16)buf[1]; return data >> 6; error: DBG("Error reading ADC, retrying...\n"); if (++tries > 10) { printk(KERN_ERR "therm_pm72: Error reading ADC !\n"); return -1; } msleep(10); } } static int read_lm87_reg(struct i2c_client * chip, int reg) { int rc, tries = 0; u8 buf; for (;;) { /* Set address */ buf = (u8)reg; rc = i2c_master_send(chip, &buf, 1); if (rc <= 0) goto error; rc = i2c_master_recv(chip, &buf, 1); if (rc <= 0) goto error; return (int)buf; error: DBG("Error reading LM87, retrying...\n"); if (++tries > 10) { printk(KERN_ERR "therm_pm72: Error reading LM87 !\n"); return -1; } msleep(10); } } static int fan_read_reg(int reg, unsigned char *buf, int nb) { int tries, nr, nw; buf[0] = reg; tries = 0; for (;;) { nw = i2c_master_send(fcu, buf, 1); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw <= 0) { printk(KERN_ERR "Failure writing address to FCU: %d", nw); return -EIO; } tries = 0; for (;;) { nr = i2c_master_recv(fcu, buf, nb); if (nr > 0 || (nr < 0 && nr != -ENODEV) || tries >= 100) break; msleep(10); ++tries; } if (nr <= 0) printk(KERN_ERR "Failure reading data from FCU: %d", nw); return nr; } static int fan_write_reg(int reg, const unsigned char *ptr, int nb) { int tries, nw; unsigned char buf[16]; buf[0] = reg; memcpy(buf+1, ptr, nb); ++nb; tries = 0; for (;;) { nw = i2c_master_send(fcu, buf, nb); if (nw > 0 || (nw < 0 && nw != -EIO) || tries >= 100) break; msleep(10); ++tries; } if (nw < 0) printk(KERN_ERR "Failure writing to FCU: %d", nw); return nw; } static int start_fcu(void) { unsigned char buf = 0xff; int rc; rc = fan_write_reg(0xe, &buf, 1); if (rc < 0) return -EIO; rc = fan_write_reg(0x2e, &buf, 1); if (rc < 0) return -EIO; rc = fan_read_reg(0, &buf, 1); if (rc < 0) return -EIO; fcu_rpm_shift = (buf == 1) ? 2 : 3; printk(KERN_DEBUG "FCU Initialized, RPM fan shift is %d\n", fcu_rpm_shift); return 0; } static int set_rpm_fan(int fan_index, int rpm) { unsigned char buf[2]; int rc, id, min, max; if (fcu_fans[fan_index].type != FCU_FAN_RPM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; min = 2400 >> fcu_rpm_shift; max = 56000 >> fcu_rpm_shift; if (rpm < min) rpm = min; else if (rpm > max) rpm = max; buf[0] = rpm >> (8 - fcu_rpm_shift); buf[1] = rpm << fcu_rpm_shift; rc = fan_write_reg(0x10 + (id * 2), buf, 2); if (rc < 0) return -EIO; return 0; } static int get_rpm_fan(int fan_index, int programmed) { unsigned char failure; unsigned char active; unsigned char buf[2]; int rc, id, reg_base; if (fcu_fans[fan_index].type != FCU_FAN_RPM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; rc = fan_read_reg(0xb, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << id)) != 0) return -EFAULT; rc = fan_read_reg(0xd, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << id)) == 0) return -ENXIO; /* Programmed value or real current speed */ reg_base = programmed ? 0x10 : 0x11; rc = fan_read_reg(reg_base + (id * 2), buf, 2); if (rc != 2) return -EIO; return (buf[0] << (8 - fcu_rpm_shift)) | buf[1] >> fcu_rpm_shift; } static int set_pwm_fan(int fan_index, int pwm) { unsigned char buf[2]; int rc, id; if (fcu_fans[fan_index].type != FCU_FAN_PWM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; if (pwm < 10) pwm = 10; else if (pwm > 100) pwm = 100; pwm = (pwm * 2559) / 1000; buf[0] = pwm; rc = fan_write_reg(0x30 + (id * 2), buf, 1); if (rc < 0) return rc; return 0; } static int get_pwm_fan(int fan_index) { unsigned char failure; unsigned char active; unsigned char buf[2]; int rc, id; if (fcu_fans[fan_index].type != FCU_FAN_PWM) return -EINVAL; id = fcu_fans[fan_index].id; if (id == FCU_FAN_ABSENT_ID) return -EINVAL; rc = fan_read_reg(0x2b, &failure, 1); if (rc != 1) return -EIO; if ((failure & (1 << id)) != 0) return -EFAULT; rc = fan_read_reg(0x2d, &active, 1); if (rc != 1) return -EIO; if ((active & (1 << id)) == 0) return -ENXIO; /* Programmed value or real current speed */ rc = fan_read_reg(0x30 + (id * 2), buf, 1); if (rc != 1) return -EIO; return (buf[0] * 1000) / 2559; } static void tickle_fcu(void) { int pwm; pwm = get_pwm_fan(SLOTS_FAN_PWM_INDEX); DBG("FCU Tickle, slots fan is: %d\n", pwm); if (pwm < 0) pwm = 100; if (!rackmac) { pwm = SLOTS_FAN_DEFAULT_PWM; } else if (pwm < SLOTS_PID_OUTPUT_MIN) pwm = SLOTS_PID_OUTPUT_MIN; /* That is hopefully enough to make the FCU happy */ set_pwm_fan(SLOTS_FAN_PWM_INDEX, pwm); } /* * Utility routine to read the CPU calibration EEPROM data * from the device-tree */ static int read_eeprom(int cpu, struct mpu_data *out) { struct device_node *np; char nodename[64]; const u8 *data; int len; /* prom.c routine for finding a node by path is a bit brain dead * and requires exact @xxx unit numbers. This is a bit ugly but * will work for these machines */ sprintf(nodename, "/u3@0,f8000000/i2c@f8001000/cpuid@a%d", cpu ? 2 : 0); np = of_find_node_by_path(nodename); if (np == NULL) { printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid node from device-tree\n"); return -ENODEV; } data = of_get_property(np, "cpuid", &len); if (data == NULL) { printk(KERN_ERR "therm_pm72: Failed to retrieve cpuid property from device-tree\n"); of_node_put(np); return -ENODEV; } memcpy(out, data, sizeof(struct mpu_data)); of_node_put(np); return 0; } static void fetch_cpu_pumps_minmax(void) { struct cpu_pid_state *state0 = &processor_state[0]; struct cpu_pid_state *state1 = &processor_state[1]; u16 pump_min = 0, pump_max = 0xffff; u16 tmp[4]; /* Try to fetch pumps min/max infos from eeprom */ memcpy(&tmp, &state0->mpu.processor_part_num, 8); if (tmp[0] != 0xffff && tmp[1] != 0xffff) { pump_min = max(pump_min, tmp[0]); pump_max = min(pump_max, tmp[1]); } if (tmp[2] != 0xffff && tmp[3] != 0xffff) { pump_min = max(pump_min, tmp[2]); pump_max = min(pump_max, tmp[3]); } /* Double check the values, this _IS_ needed as the EEPROM on * some dual 2.5Ghz G5s seem, at least, to have both min & max * same to the same value ... (grrrr) */ if (pump_min == pump_max || pump_min == 0 || pump_max == 0xffff) { pump_min = CPU_PUMP_OUTPUT_MIN; pump_max = CPU_PUMP_OUTPUT_MAX; } state0->pump_min = state1->pump_min = pump_min; state0->pump_max = state1->pump_max = pump_max; } /* * Now, unfortunately, sysfs doesn't give us a nice void * we could * pass around to the attribute functions, so we don't really have * choice but implement a bunch of them... * * That sucks a bit, we take the lock because FIX32TOPRINT evaluates * the input twice... I accept patches :) */ #define BUILD_SHOW_FUNC_FIX(name, data) \ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ ssize_t r; \ mutex_lock(&driver_lock); \ r = sprintf(buf, "%d.%03d", FIX32TOPRINT(data)); \ mutex_unlock(&driver_lock); \ return r; \ } #define BUILD_SHOW_FUNC_INT(name, data) \ static ssize_t show_##name(struct device *dev, struct device_attribute *attr, char *buf) \ { \ return sprintf(buf, "%d", data); \ } BUILD_SHOW_FUNC_FIX(cpu0_temperature, processor_state[0].last_temp) BUILD_SHOW_FUNC_FIX(cpu0_voltage, processor_state[0].voltage) BUILD_SHOW_FUNC_FIX(cpu0_current, processor_state[0].current_a) BUILD_SHOW_FUNC_INT(cpu0_exhaust_fan_rpm, processor_state[0].rpm) BUILD_SHOW_FUNC_INT(cpu0_intake_fan_rpm, processor_state[0].intake_rpm) BUILD_SHOW_FUNC_FIX(cpu1_temperature, processor_state[1].last_temp) BUILD_SHOW_FUNC_FIX(cpu1_voltage, processor_state[1].voltage) BUILD_SHOW_FUNC_FIX(cpu1_current, processor_state[1].current_a) BUILD_SHOW_FUNC_INT(cpu1_exhaust_fan_rpm, processor_state[1].rpm) BUILD_SHOW_FUNC_INT(cpu1_intake_fan_rpm, processor_state[1].intake_rpm) BUILD_SHOW_FUNC_FIX(backside_temperature, backside_state.last_temp) BUILD_SHOW_FUNC_INT(backside_fan_pwm, backside_state.pwm) BUILD_SHOW_FUNC_FIX(drives_temperature, drives_state.last_temp) BUILD_SHOW_FUNC_INT(drives_fan_rpm, drives_state.rpm) BUILD_SHOW_FUNC_FIX(slots_temperature, slots_state.last_temp) BUILD_SHOW_FUNC_INT(slots_fan_pwm, slots_state.pwm) BUILD_SHOW_FUNC_FIX(dimms_temperature, dimms_state.last_temp) static DEVICE_ATTR(cpu0_temperature,S_IRUGO,show_cpu0_temperature,NULL); static DEVICE_ATTR(cpu0_voltage,S_IRUGO,show_cpu0_voltage,NULL); static DEVICE_ATTR(cpu0_current,S_IRUGO,show_cpu0_current,NULL); static DEVICE_ATTR(cpu0_exhaust_fan_rpm,S_IRUGO,show_cpu0_exhaust_fan_rpm,NULL); static DEVICE_ATTR(cpu0_intake_fan_rpm,S_IRUGO,show_cpu0_intake_fan_rpm,NULL); static DEVICE_ATTR(cpu1_temperature,S_IRUGO,show_cpu1_temperature,NULL); static DEVICE_ATTR(cpu1_voltage,S_IRUGO,show_cpu1_voltage,NULL); static DEVICE_ATTR(cpu1_current,S_IRUGO,show_cpu1_current,NULL); static DEVICE_ATTR(cpu1_exhaust_fan_rpm,S_IRUGO,show_cpu1_exhaust_fan_rpm,NULL); static DEVICE_ATTR(cpu1_intake_fan_rpm,S_IRUGO,show_cpu1_intake_fan_rpm,NULL); static DEVICE_ATTR(backside_temperature,S_IRUGO,show_backside_temperature,NULL); static DEVICE_ATTR(backside_fan_pwm,S_IRUGO,show_backside_fan_pwm,NULL); static DEVICE_ATTR(drives_temperature,S_IRUGO,show_drives_temperature,NULL); static DEVICE_ATTR(drives_fan_rpm,S_IRUGO,show_drives_fan_rpm,NULL); static DEVICE_ATTR(slots_temperature,S_IRUGO,show_slots_temperature,NULL); static DEVICE_ATTR(slots_fan_pwm,S_IRUGO,show_slots_fan_pwm,NULL); static DEVICE_ATTR(dimms_temperature,S_IRUGO,show_dimms_temperature,NULL); /* * CPUs fans control loop */ static int do_read_one_cpu_values(struct cpu_pid_state *state, s32 *temp, s32 *power) { s32 ltemp, volts, amps; int index, rc = 0; /* Default (in case of error) */ *temp = state->cur_temp; *power = state->cur_power; if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) index = (state->index == 0) ? CPU_A1_FAN_RPM_INDEX : CPU_B1_FAN_RPM_INDEX; else index = (state->index == 0) ? CPUA_EXHAUST_FAN_RPM_INDEX : CPUB_EXHAUST_FAN_RPM_INDEX; /* Read current fan status */ rc = get_rpm_fan(index, !RPM_PID_USE_ACTUAL_SPEED); if (rc < 0) { /* XXX What do we do now ? Nothing for now, keep old value, but * return error upstream */ DBG(" cpu %d, fan reading error !\n", state->index); } else { state->rpm = rc; DBG(" cpu %d, exhaust RPM: %d\n", state->index, state->rpm); } /* Get some sensor readings and scale it */ ltemp = read_smon_adc(state, 1); if (ltemp == -1) { /* XXX What do we do now ? */ state->overtemp++; if (rc == 0) rc = -EIO; DBG(" cpu %d, temp reading error !\n", state->index); } else { /* Fixup temperature according to diode calibration */ DBG(" cpu %d, temp raw: %04x, m_diode: %04x, b_diode: %04x\n", state->index, ltemp, state->mpu.mdiode, state->mpu.bdiode); *temp = ((s32)ltemp * (s32)state->mpu.mdiode + ((s32)state->mpu.bdiode << 12)) >> 2; state->last_temp = *temp; DBG(" temp: %d.%03d\n", FIX32TOPRINT((*temp))); } /* * Read voltage & current and calculate power */ volts = read_smon_adc(state, 3); amps = read_smon_adc(state, 4); /* Scale voltage and current raw sensor values according to fixed scales * obtained in Darwin and calculate power from I and V */ volts *= ADC_CPU_VOLTAGE_SCALE; amps *= ADC_CPU_CURRENT_SCALE; *power = (((u64)volts) * ((u64)amps)) >> 16; state->voltage = volts; state->current_a = amps; state->last_power = *power; DBG(" cpu %d, current: %d.%03d, voltage: %d.%03d, power: %d.%03d W\n", state->index, FIX32TOPRINT(state->current_a), FIX32TOPRINT(state->voltage), FIX32TOPRINT(*power)); return 0; } static void do_cpu_pid(struct cpu_pid_state *state, s32 temp, s32 power) { s32 power_target, integral, derivative, proportional, adj_in_target, sval; s64 integ_p, deriv_p, prop_p, sum; int i; /* Calculate power target value (could be done once for all) * and convert to a 16.16 fp number */ power_target = ((u32)(state->mpu.pmaxh - state->mpu.padjmax)) << 16; DBG(" power target: %d.%03d, error: %d.%03d\n", FIX32TOPRINT(power_target), FIX32TOPRINT(power_target - power)); /* Store temperature and power in history array */ state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE; state->temp_history[state->cur_temp] = temp; state->cur_power = (state->cur_power + 1) % state->count_power; state->power_history[state->cur_power] = power; state->error_history[state->cur_power] = power_target - power; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (state->count_power - 1); i++) { state->cur_power = (state->cur_power + 1) % state->count_power; state->power_history[state->cur_power] = power; state->error_history[state->cur_power] = power_target - power; } for (i = 0; i < (CPU_TEMP_HISTORY_SIZE - 1); i++) { state->cur_temp = (state->cur_temp + 1) % CPU_TEMP_HISTORY_SIZE; state->temp_history[state->cur_temp] = temp; } state->first = 0; } /* Calculate the integral term normally based on the "power" values */ sum = 0; integral = 0; for (i = 0; i < state->count_power; i++) integral += state->error_history[i]; integral *= CPU_PID_INTERVAL; DBG(" integral: %08x\n", integral); /* Calculate the adjusted input (sense value). * G_r is 12.20 * integ is 16.16 * so the result is 28.36 * * input target is mpu.ttarget, input max is mpu.tmax */ integ_p = ((s64)state->mpu.pid_gr) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sval = (state->mpu.tmax << 16) - ((integ_p >> 20) & 0xffffffff); adj_in_target = (state->mpu.ttarget << 16); if (adj_in_target > sval) adj_in_target = sval; DBG(" adj_in_target: %d.%03d, ttarget: %d\n", FIX32TOPRINT(adj_in_target), state->mpu.ttarget); /* Calculate the derivative term */ derivative = state->temp_history[state->cur_temp] - state->temp_history[(state->cur_temp + CPU_TEMP_HISTORY_SIZE - 1) % CPU_TEMP_HISTORY_SIZE]; derivative /= CPU_PID_INTERVAL; deriv_p = ((s64)state->mpu.pid_gd) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ proportional = temp - adj_in_target; prop_p = ((s64)state->mpu.pid_gp) * (s64)proportional; DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->rpm += (s32)sum; } static void do_monitor_cpu_combined(void) { struct cpu_pid_state *state0 = &processor_state[0]; struct cpu_pid_state *state1 = &processor_state[1]; s32 temp0, power0, temp1, power1; s32 temp_combi, power_combi; int rc, intake, pump; rc = do_read_one_cpu_values(state0, &temp0, &power0); if (rc < 0) { /* XXX What do we do now ? */ } state1->overtemp = 0; rc = do_read_one_cpu_values(state1, &temp1, &power1); if (rc < 0) { /* XXX What do we do now ? */ } if (state1->overtemp) state0->overtemp++; temp_combi = max(temp0, temp1); power_combi = max(power0, power1); /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp_combi >= ((state0->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! Temperature way above maximum (%d) !\n", temp_combi >> 16); state0->overtemp += CPU_MAX_OVERTEMP / 4; } else if (temp_combi > (state0->mpu.tmax << 16)) { state0->overtemp++; printk(KERN_WARNING "Temperature %d above max %d. overtemp %d\n", temp_combi >> 16, state0->mpu.tmax, state0->overtemp); } else { if (state0->overtemp) printk(KERN_WARNING "Temperature back down to %d\n", temp_combi >> 16); state0->overtemp = 0; } if (state0->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state0->overtemp > 0) { state0->rpm = state0->mpu.rmaxn_exhaust_fan; state0->intake_rpm = intake = state0->mpu.rmaxn_intake_fan; pump = state0->pump_max; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state0, temp_combi, power_combi); /* Range check */ state0->rpm = max(state0->rpm, (int)state0->mpu.rminn_exhaust_fan); state0->rpm = min(state0->rpm, (int)state0->mpu.rmaxn_exhaust_fan); /* Calculate intake fan speed */ intake = (state0->rpm * CPU_INTAKE_SCALE) >> 16; intake = max(intake, (int)state0->mpu.rminn_intake_fan); intake = min(intake, (int)state0->mpu.rmaxn_intake_fan); state0->intake_rpm = intake; /* Calculate pump speed */ pump = (state0->rpm * state0->pump_max) / state0->mpu.rmaxn_exhaust_fan; pump = min(pump, state0->pump_max); pump = max(pump, state0->pump_min); do_set_fans: /* We copy values from state 0 to state 1 for /sysfs */ state1->rpm = state0->rpm; state1->intake_rpm = state0->intake_rpm; DBG("** CPU %d RPM: %d Ex, %d, Pump: %d, In, overtemp: %d\n", state1->index, (int)state1->rpm, intake, pump, state1->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state0->rpm); set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state0->rpm); if (fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) set_rpm_fan(CPUA_PUMP_RPM_INDEX, pump); if (fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) set_rpm_fan(CPUB_PUMP_RPM_INDEX, pump); } static void do_monitor_cpu_split(struct cpu_pid_state *state) { s32 temp, power; int rc, intake; /* Read current fan status */ rc = do_read_one_cpu_values(state, &temp, &power); if (rc < 0) { /* XXX What do we do now ? */ } /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp >= ((state->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum" " (%d) !\n", state->index, temp >> 16); state->overtemp += CPU_MAX_OVERTEMP / 4; } else if (temp > (state->mpu.tmax << 16)) { state->overtemp++; printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", state->index, temp >> 16, state->mpu.tmax, state->overtemp); } else { if (state->overtemp) printk(KERN_WARNING "CPU %d temperature back down to %d\n", state->index, temp >> 16); state->overtemp = 0; } if (state->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state->overtemp > 0) { state->rpm = state->mpu.rmaxn_exhaust_fan; state->intake_rpm = intake = state->mpu.rmaxn_intake_fan; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state, temp, power); /* Range check */ state->rpm = max(state->rpm, (int)state->mpu.rminn_exhaust_fan); state->rpm = min(state->rpm, (int)state->mpu.rmaxn_exhaust_fan); /* Calculate intake fan */ intake = (state->rpm * CPU_INTAKE_SCALE) >> 16; intake = max(intake, (int)state->mpu.rminn_intake_fan); intake = min(intake, (int)state->mpu.rmaxn_intake_fan); state->intake_rpm = intake; do_set_fans: DBG("** CPU %d RPM: %d Ex, %d In, overtemp: %d\n", state->index, (int)state->rpm, intake, state->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ if (state->index == 0) { set_rpm_fan(CPUA_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUA_EXHAUST_FAN_RPM_INDEX, state->rpm); } else { set_rpm_fan(CPUB_INTAKE_FAN_RPM_INDEX, intake); set_rpm_fan(CPUB_EXHAUST_FAN_RPM_INDEX, state->rpm); } } static void do_monitor_cpu_rack(struct cpu_pid_state *state) { s32 temp, power, fan_min; int rc; /* Read current fan status */ rc = do_read_one_cpu_values(state, &temp, &power); if (rc < 0) { /* XXX What do we do now ? */ } /* Check tmax, increment overtemp if we are there. At tmax+8, we go * full blown immediately and try to trigger a shutdown */ if (temp >= ((state->mpu.tmax + 8) << 16)) { printk(KERN_WARNING "Warning ! CPU %d temperature way above maximum" " (%d) !\n", state->index, temp >> 16); state->overtemp = CPU_MAX_OVERTEMP / 4; } else if (temp > (state->mpu.tmax << 16)) { state->overtemp++; printk(KERN_WARNING "CPU %d temperature %d above max %d. overtemp %d\n", state->index, temp >> 16, state->mpu.tmax, state->overtemp); } else { if (state->overtemp) printk(KERN_WARNING "CPU %d temperature back down to %d\n", state->index, temp >> 16); state->overtemp = 0; } if (state->overtemp >= CPU_MAX_OVERTEMP) critical_state = 1; if (state->overtemp > 0) { state->rpm = state->intake_rpm = state->mpu.rmaxn_intake_fan; goto do_set_fans; } /* Do the PID */ do_cpu_pid(state, temp, power); /* Check clamp from dimms */ fan_min = dimm_output_clamp; fan_min = max(fan_min, (int)state->mpu.rminn_intake_fan); DBG(" CPU min mpu = %d, min dimm = %d\n", state->mpu.rminn_intake_fan, dimm_output_clamp); state->rpm = max(state->rpm, (int)fan_min); state->rpm = min(state->rpm, (int)state->mpu.rmaxn_intake_fan); state->intake_rpm = state->rpm; do_set_fans: DBG("** CPU %d RPM: %d overtemp: %d\n", state->index, (int)state->rpm, state->overtemp); /* We should check for errors, shouldn't we ? But then, what * do we do once the error occurs ? For FCU notified fan * failures (-EFAULT) we probably want to notify userland * some way... */ if (state->index == 0) { set_rpm_fan(CPU_A1_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_A2_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_A3_FAN_RPM_INDEX, state->rpm); } else { set_rpm_fan(CPU_B1_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_B2_FAN_RPM_INDEX, state->rpm); set_rpm_fan(CPU_B3_FAN_RPM_INDEX, state->rpm); } } /* * Initialize the state structure for one CPU control loop */ static int init_processor_state(struct cpu_pid_state *state, int index) { int err; state->index = index; state->first = 1; state->rpm = (cpu_pid_type == CPU_PID_TYPE_RACKMAC) ? 4000 : 1000; state->overtemp = 0; state->adc_config = 0x00; if (index == 0) state->monitor = attach_i2c_chip(SUPPLY_MONITOR_ID, "CPU0_monitor"); else if (index == 1) state->monitor = attach_i2c_chip(SUPPLY_MONITORB_ID, "CPU1_monitor"); if (state->monitor == NULL) goto fail; if (read_eeprom(index, &state->mpu)) goto fail; state->count_power = state->mpu.tguardband; if (state->count_power > CPU_POWER_HISTORY_SIZE) { printk(KERN_WARNING "Warning ! too many power history slots\n"); state->count_power = CPU_POWER_HISTORY_SIZE; } DBG("CPU %d Using %d power history entries\n", index, state->count_power); if (index == 0) { err = device_create_file(&of_dev->dev, &dev_attr_cpu0_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_voltage); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_current); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm); err |= device_create_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm); } else { err = device_create_file(&of_dev->dev, &dev_attr_cpu1_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_voltage); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_current); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm); err |= device_create_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm); } if (err) printk(KERN_WARNING "Failed to create some of the attribute" "files for CPU %d\n", index); return 0; fail: state->monitor = NULL; return -ENODEV; } /* * Dispose of the state data for one CPU control loop */ static void dispose_processor_state(struct cpu_pid_state *state) { if (state->monitor == NULL) return; if (state->index == 0) { device_remove_file(&of_dev->dev, &dev_attr_cpu0_temperature); device_remove_file(&of_dev->dev, &dev_attr_cpu0_voltage); device_remove_file(&of_dev->dev, &dev_attr_cpu0_current); device_remove_file(&of_dev->dev, &dev_attr_cpu0_exhaust_fan_rpm); device_remove_file(&of_dev->dev, &dev_attr_cpu0_intake_fan_rpm); } else { device_remove_file(&of_dev->dev, &dev_attr_cpu1_temperature); device_remove_file(&of_dev->dev, &dev_attr_cpu1_voltage); device_remove_file(&of_dev->dev, &dev_attr_cpu1_current); device_remove_file(&of_dev->dev, &dev_attr_cpu1_exhaust_fan_rpm); device_remove_file(&of_dev->dev, &dev_attr_cpu1_intake_fan_rpm); } state->monitor = NULL; } /* * Motherboard backside & U3 heatsink fan control loop */ static void do_monitor_backside(struct backside_pid_state *state) { s32 temp, integral, derivative, fan_min; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = backside_params.interval; DBG("backside:\n"); /* Check fan status */ rc = get_pwm_fan(BACKSIDE_FAN_PWM_INDEX); if (rc < 0) { printk(KERN_WARNING "Error %d reading backside fan !\n", rc); /* XXX What do we do now ? */ } else state->pwm = rc; DBG(" current pwm: %d\n", state->pwm); /* Get some sensor readings */ temp = i2c_smbus_read_byte_data(state->monitor, MAX6690_EXT_TEMP) << 16; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(backside_params.input_target)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - backside_params.input_target; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (BACKSIDE_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % BACKSIDE_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - backside_params.input_target; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < BACKSIDE_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= backside_params.interval; DBG(" integral: %08x\n", integral); integ_p = ((s64)backside_params.G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + BACKSIDE_PID_HISTORY_SIZE - 1) % BACKSIDE_PID_HISTORY_SIZE]; derivative /= backside_params.interval; deriv_p = ((s64)backside_params.G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)backside_params.G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); if (backside_params.additive) state->pwm += (s32)sum; else state->pwm = sum; /* Check for clamp */ fan_min = (dimm_output_clamp * 100) / 14000; fan_min = max(fan_min, backside_params.output_min); state->pwm = max(state->pwm, fan_min); state->pwm = min(state->pwm, backside_params.output_max); DBG("** BACKSIDE PWM: %d\n", (int)state->pwm); set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, state->pwm); } /* * Initialize the state structure for the backside fan control loop */ static int init_backside_state(struct backside_pid_state *state) { struct device_node *u3; int u3h = 1; /* conservative by default */ int err; /* * There are different PID params for machines with U3 and machines * with U3H, pick the right ones now */ u3 = of_find_node_by_path("/u3@0,f8000000"); if (u3 != NULL) { const u32 *vers = of_get_property(u3, "device-rev", NULL); if (vers) if (((*vers) & 0x3f) < 0x34) u3h = 0; of_node_put(u3); } if (rackmac) { backside_params.G_d = BACKSIDE_PID_RACK_G_d; backside_params.input_target = BACKSIDE_PID_RACK_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_RACK_INTERVAL; backside_params.G_p = BACKSIDE_PID_RACK_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 0; } else if (u3h) { backside_params.G_d = BACKSIDE_PID_U3H_G_d; backside_params.input_target = BACKSIDE_PID_U3H_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3H_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_INTERVAL; backside_params.G_p = BACKSIDE_PID_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 1; } else { backside_params.G_d = BACKSIDE_PID_U3_G_d; backside_params.input_target = BACKSIDE_PID_U3_INPUT_TARGET; backside_params.output_min = BACKSIDE_PID_U3_OUTPUT_MIN; backside_params.interval = BACKSIDE_PID_INTERVAL; backside_params.G_p = BACKSIDE_PID_G_p; backside_params.G_r = BACKSIDE_PID_G_r; backside_params.output_max = BACKSIDE_PID_OUTPUT_MAX; backside_params.additive = 1; } state->ticks = 1; state->first = 1; state->pwm = 50; state->monitor = attach_i2c_chip(BACKSIDE_MAX_ID, "backside_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_backside_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_backside_fan_pwm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for backside fan\n"); return 0; } /* * Dispose of the state data for the backside control loop */ static void dispose_backside_state(struct backside_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_backside_temperature); device_remove_file(&of_dev->dev, &dev_attr_backside_fan_pwm); state->monitor = NULL; } /* * Drives bay fan control loop */ static void do_monitor_drives(struct drives_pid_state *state) { s32 temp, integral, derivative; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = DRIVES_PID_INTERVAL; DBG("drives:\n"); /* Check fan status */ rc = get_rpm_fan(DRIVES_FAN_RPM_INDEX, !RPM_PID_USE_ACTUAL_SPEED); if (rc < 0) { printk(KERN_WARNING "Error %d reading drives fan !\n", rc); /* XXX What do we do now ? */ } else state->rpm = rc; DBG(" current rpm: %d\n", state->rpm); /* Get some sensor readings */ temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor, DS1775_TEMP)) << 8; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(DRIVES_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (DRIVES_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % DRIVES_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DRIVES_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < DRIVES_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= DRIVES_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)DRIVES_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + DRIVES_PID_HISTORY_SIZE - 1) % DRIVES_PID_HISTORY_SIZE]; derivative /= DRIVES_PID_INTERVAL; deriv_p = ((s64)DRIVES_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)DRIVES_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->rpm += (s32)sum; state->rpm = max(state->rpm, DRIVES_PID_OUTPUT_MIN); state->rpm = min(state->rpm, DRIVES_PID_OUTPUT_MAX); DBG("** DRIVES RPM: %d\n", (int)state->rpm); set_rpm_fan(DRIVES_FAN_RPM_INDEX, state->rpm); } /* * Initialize the state structure for the drives bay fan control loop */ static int init_drives_state(struct drives_pid_state *state) { int err; state->ticks = 1; state->first = 1; state->rpm = 1000; state->monitor = attach_i2c_chip(DRIVES_DALLAS_ID, "drives_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_drives_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_drives_fan_rpm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for drives bay fan\n"); return 0; } /* * Dispose of the state data for the drives control loop */ static void dispose_drives_state(struct drives_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_drives_temperature); device_remove_file(&of_dev->dev, &dev_attr_drives_fan_rpm); state->monitor = NULL; } /* * DIMMs temp control loop */ static void do_monitor_dimms(struct dimm_pid_state *state) { s32 temp, integral, derivative, fan_min; s64 integ_p, deriv_p, prop_p, sum; int i; if (--state->ticks != 0) return; state->ticks = DIMM_PID_INTERVAL; DBG("DIMM:\n"); DBG(" current value: %d\n", state->output); temp = read_lm87_reg(state->monitor, LM87_INT_TEMP); if (temp < 0) return; temp <<= 16; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(DIMM_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (DIMM_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % DIMM_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - DIMM_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < DIMM_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= DIMM_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)DIMM_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + DIMM_PID_HISTORY_SIZE - 1) % DIMM_PID_HISTORY_SIZE]; derivative /= DIMM_PID_INTERVAL; deriv_p = ((s64)DIMM_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)DIMM_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->output = (s32)sum; state->output = max(state->output, DIMM_PID_OUTPUT_MIN); state->output = min(state->output, DIMM_PID_OUTPUT_MAX); dimm_output_clamp = state->output; DBG("** DIMM clamp value: %d\n", (int)state->output); /* Backside PID is only every 5 seconds, force backside fan clamping now */ fan_min = (dimm_output_clamp * 100) / 14000; fan_min = max(fan_min, backside_params.output_min); if (backside_state.pwm < fan_min) { backside_state.pwm = fan_min; DBG(" -> applying clamp to backside fan now: %d !\n", fan_min); set_pwm_fan(BACKSIDE_FAN_PWM_INDEX, fan_min); } } /* * Initialize the state structure for the DIMM temp control loop */ static int init_dimms_state(struct dimm_pid_state *state) { state->ticks = 1; state->first = 1; state->output = 4000; state->monitor = attach_i2c_chip(XSERVE_DIMMS_LM87, "dimms_temp"); if (state->monitor == NULL) return -ENODEV; if (device_create_file(&of_dev->dev, &dev_attr_dimms_temperature)) printk(KERN_WARNING "Failed to create attribute file" " for DIMM temperature\n"); return 0; } /* * Dispose of the state data for the DIMM control loop */ static void dispose_dimms_state(struct dimm_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_dimms_temperature); state->monitor = NULL; } /* * Slots fan control loop */ static void do_monitor_slots(struct slots_pid_state *state) { s32 temp, integral, derivative; s64 integ_p, deriv_p, prop_p, sum; int i, rc; if (--state->ticks != 0) return; state->ticks = SLOTS_PID_INTERVAL; DBG("slots:\n"); /* Check fan status */ rc = get_pwm_fan(SLOTS_FAN_PWM_INDEX); if (rc < 0) { printk(KERN_WARNING "Error %d reading slots fan !\n", rc); /* XXX What do we do now ? */ } else state->pwm = rc; DBG(" current pwm: %d\n", state->pwm); /* Get some sensor readings */ temp = le16_to_cpu(i2c_smbus_read_word_data(state->monitor, DS1775_TEMP)) << 8; state->last_temp = temp; DBG(" temp: %d.%03d, target: %d.%03d\n", FIX32TOPRINT(temp), FIX32TOPRINT(SLOTS_PID_INPUT_TARGET)); /* Store temperature and error in history array */ state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET; /* If first loop, fill the history table */ if (state->first) { for (i = 0; i < (SLOTS_PID_HISTORY_SIZE - 1); i++) { state->cur_sample = (state->cur_sample + 1) % SLOTS_PID_HISTORY_SIZE; state->sample_history[state->cur_sample] = temp; state->error_history[state->cur_sample] = temp - SLOTS_PID_INPUT_TARGET; } state->first = 0; } /* Calculate the integral term */ sum = 0; integral = 0; for (i = 0; i < SLOTS_PID_HISTORY_SIZE; i++) integral += state->error_history[i]; integral *= SLOTS_PID_INTERVAL; DBG(" integral: %08x\n", integral); integ_p = ((s64)SLOTS_PID_G_r) * (s64)integral; DBG(" integ_p: %d\n", (int)(integ_p >> 36)); sum += integ_p; /* Calculate the derivative term */ derivative = state->error_history[state->cur_sample] - state->error_history[(state->cur_sample + SLOTS_PID_HISTORY_SIZE - 1) % SLOTS_PID_HISTORY_SIZE]; derivative /= SLOTS_PID_INTERVAL; deriv_p = ((s64)SLOTS_PID_G_d) * (s64)derivative; DBG(" deriv_p: %d\n", (int)(deriv_p >> 36)); sum += deriv_p; /* Calculate the proportional term */ prop_p = ((s64)SLOTS_PID_G_p) * (s64)(state->error_history[state->cur_sample]); DBG(" prop_p: %d\n", (int)(prop_p >> 36)); sum += prop_p; /* Scale sum */ sum >>= 36; DBG(" sum: %d\n", (int)sum); state->pwm = (s32)sum; state->pwm = max(state->pwm, SLOTS_PID_OUTPUT_MIN); state->pwm = min(state->pwm, SLOTS_PID_OUTPUT_MAX); DBG("** DRIVES PWM: %d\n", (int)state->pwm); set_pwm_fan(SLOTS_FAN_PWM_INDEX, state->pwm); } /* * Initialize the state structure for the slots bay fan control loop */ static int init_slots_state(struct slots_pid_state *state) { int err; state->ticks = 1; state->first = 1; state->pwm = 50; state->monitor = attach_i2c_chip(XSERVE_SLOTS_LM75, "slots_temp"); if (state->monitor == NULL) return -ENODEV; err = device_create_file(&of_dev->dev, &dev_attr_slots_temperature); err |= device_create_file(&of_dev->dev, &dev_attr_slots_fan_pwm); if (err) printk(KERN_WARNING "Failed to create attribute file(s)" " for slots bay fan\n"); return 0; } /* * Dispose of the state data for the slots control loop */ static void dispose_slots_state(struct slots_pid_state *state) { if (state->monitor == NULL) return; device_remove_file(&of_dev->dev, &dev_attr_slots_temperature); device_remove_file(&of_dev->dev, &dev_attr_slots_fan_pwm); state->monitor = NULL; } static int call_critical_overtemp(void) { char *argv[] = { critical_overtemp_path, NULL }; static char *envp[] = { "HOME=/", "TERM=linux", "PATH=/sbin:/usr/sbin:/bin:/usr/bin", NULL }; return call_usermodehelper(critical_overtemp_path, argv, envp, UMH_WAIT_EXEC); } /* * Here's the kernel thread that calls the various control loops */ static int main_control_loop(void *x) { DBG("main_control_loop started\n"); mutex_lock(&driver_lock); if (start_fcu() < 0) { printk(KERN_ERR "kfand: failed to start FCU\n"); mutex_unlock(&driver_lock); goto out; } /* Set the PCI fan once for now on non-RackMac */ if (!rackmac) set_pwm_fan(SLOTS_FAN_PWM_INDEX, SLOTS_FAN_DEFAULT_PWM); /* Initialize ADCs */ initialize_adc(&processor_state[0]); if (processor_state[1].monitor != NULL) initialize_adc(&processor_state[1]); fcu_tickle_ticks = FCU_TICKLE_TICKS; mutex_unlock(&driver_lock); while (state == state_attached) { unsigned long elapsed, start; start = jiffies; mutex_lock(&driver_lock); /* Tickle the FCU just in case */ if (--fcu_tickle_ticks < 0) { fcu_tickle_ticks = FCU_TICKLE_TICKS; tickle_fcu(); } /* First, we always calculate the new DIMMs state on an Xserve */ if (rackmac) do_monitor_dimms(&dimms_state); /* Then, the CPUs */ if (cpu_pid_type == CPU_PID_TYPE_COMBINED) do_monitor_cpu_combined(); else if (cpu_pid_type == CPU_PID_TYPE_RACKMAC) { do_monitor_cpu_rack(&processor_state[0]); if (processor_state[1].monitor != NULL) do_monitor_cpu_rack(&processor_state[1]); // better deal with UP } else { do_monitor_cpu_split(&processor_state[0]); if (processor_state[1].monitor != NULL) do_monitor_cpu_split(&processor_state[1]); // better deal with UP } /* Then, the rest */ do_monitor_backside(&backside_state); if (rackmac) do_monitor_slots(&slots_state); else do_monitor_drives(&drives_state); mutex_unlock(&driver_lock); if (critical_state == 1) { printk(KERN_WARNING "Temperature control detected a critical condition\n"); printk(KERN_WARNING "Attempting to shut down...\n"); if (call_critical_overtemp()) { printk(KERN_WARNING "Can't call %s, power off now!\n", critical_overtemp_path); machine_power_off(); } } if (critical_state > 0) critical_state++; if (critical_state > MAX_CRITICAL_STATE) { printk(KERN_WARNING "Shutdown timed out, power off now !\n"); machine_power_off(); } // FIXME: Deal with signals elapsed = jiffies - start; if (elapsed < HZ) schedule_timeout_interruptible(HZ - elapsed); } out: DBG("main_control_loop ended\n"); ctrl_task = 0; complete_and_exit(&ctrl_complete, 0); } /* * Dispose the control loops when tearing down */ static void dispose_control_loops(void) { dispose_processor_state(&processor_state[0]); dispose_processor_state(&processor_state[1]); dispose_backside_state(&backside_state); dispose_drives_state(&drives_state); dispose_slots_state(&slots_state); dispose_dimms_state(&dimms_state); } /* * Create the control loops. U3-0 i2c bus is up, so we can now * get to the various sensors */ static int create_control_loops(void) { struct device_node *np; /* Count CPUs from the device-tree, we don't care how many are * actually used by Linux */ cpu_count = 0; for (np = NULL; NULL != (np = of_find_node_by_type(np, "cpu"));) cpu_count++; DBG("counted %d CPUs in the device-tree\n", cpu_count); /* Decide the type of PID algorithm to use based on the presence of * the pumps, though that may not be the best way, that is good enough * for now */ if (rackmac) cpu_pid_type = CPU_PID_TYPE_RACKMAC; else if (of_machine_is_compatible("PowerMac7,3") && (cpu_count > 1) && fcu_fans[CPUA_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID && fcu_fans[CPUB_PUMP_RPM_INDEX].id != FCU_FAN_ABSENT_ID) { printk(KERN_INFO "Liquid cooling pumps detected, using new algorithm !\n"); cpu_pid_type = CPU_PID_TYPE_COMBINED; } else cpu_pid_type = CPU_PID_TYPE_SPLIT; /* Create control loops for everything. If any fail, everything * fails */ if (init_processor_state(&processor_state[0], 0)) goto fail; if (cpu_pid_type == CPU_PID_TYPE_COMBINED) fetch_cpu_pumps_minmax(); if (cpu_count > 1 && init_processor_state(&processor_state[1], 1)) goto fail; if (init_backside_state(&backside_state)) goto fail; if (rackmac && init_dimms_state(&dimms_state)) goto fail; if (rackmac && init_slots_state(&slots_state)) goto fail; if (!rackmac && init_drives_state(&drives_state)) goto fail; DBG("all control loops up !\n"); return 0; fail: DBG("failure creating control loops, disposing\n"); dispose_control_loops(); return -ENODEV; } /* * Start the control loops after everything is up, that is create * the thread that will make them run */ static void start_control_loops(void) { init_completion(&ctrl_complete); ctrl_task = kthread_run(main_control_loop, NULL, "kfand"); } /* * Stop the control loops when tearing down */ static void stop_control_loops(void) { if (ctrl_task) wait_for_completion(&ctrl_complete); } /* * Attach to the i2c FCU after detecting U3-1 bus */ static int attach_fcu(void) { fcu = attach_i2c_chip(FAN_CTRLER_ID, "fcu"); if (fcu == NULL) return -ENODEV; DBG("FCU attached\n"); return 0; } /* * Detach from the i2c FCU when tearing down */ static void detach_fcu(void) { fcu = NULL; } /* * Attach to the i2c controller. We probe the various chips based * on the device-tree nodes and build everything for the driver to * run, we then kick the driver monitoring thread */ static int therm_pm72_attach(struct i2c_adapter *adapter) { mutex_lock(&driver_lock); /* Check state */ if (state == state_detached) state = state_attaching; if (state != state_attaching) { mutex_unlock(&driver_lock); return 0; } /* Check if we are looking for one of these */ if (u3_0 == NULL && !strcmp(adapter->name, "u3 0")) { u3_0 = adapter; DBG("found U3-0\n"); if (k2 || !rackmac) if (create_control_loops()) u3_0 = NULL; } else if (u3_1 == NULL && !strcmp(adapter->name, "u3 1")) { u3_1 = adapter; DBG("found U3-1, attaching FCU\n"); if (attach_fcu()) u3_1 = NULL; } else if (k2 == NULL && !strcmp(adapter->name, "mac-io 0")) { k2 = adapter; DBG("Found K2\n"); if (u3_0 && rackmac) if (create_control_loops()) k2 = NULL; } /* We got all we need, start control loops */ if (u3_0 != NULL && u3_1 != NULL && (k2 || !rackmac)) { DBG("everything up, starting control loops\n"); state = state_attached; start_control_loops(); } mutex_unlock(&driver_lock); return 0; } static int therm_pm72_probe(struct i2c_client *client, const struct i2c_device_id *id) { /* Always succeed, the real work was done in therm_pm72_attach() */ return 0; } /* * Called when any of the devices which participates into thermal management * is going away. */ static int therm_pm72_remove(struct i2c_client *client) { struct i2c_adapter *adapter = client->adapter; mutex_lock(&driver_lock); if (state != state_detached) state = state_detaching; /* Stop control loops if any */ DBG("stopping control loops\n"); mutex_unlock(&driver_lock); stop_control_loops(); mutex_lock(&driver_lock); if (u3_0 != NULL && !strcmp(adapter->name, "u3 0")) { DBG("lost U3-0, disposing control loops\n"); dispose_control_loops(); u3_0 = NULL; } if (u3_1 != NULL && !strcmp(adapter->name, "u3 1")) { DBG("lost U3-1, detaching FCU\n"); detach_fcu(); u3_1 = NULL; } if (u3_0 == NULL && u3_1 == NULL) state = state_detached; mutex_unlock(&driver_lock); return 0; } /* * i2c_driver structure to attach to the host i2c controller */ static const struct i2c_device_id therm_pm72_id[] = { /* * Fake device name, thermal management is done by several * chips but we don't need to differentiate between them at * this point. */ { "therm_pm72", 0 }, { } }; static struct i2c_driver therm_pm72_driver = { .driver = { .name = "therm_pm72", }, .attach_adapter = therm_pm72_attach, .probe = therm_pm72_probe, .remove = therm_pm72_remove, .id_table = therm_pm72_id, }; static int fan_check_loc_match(const char *loc, int fan) { char tmp[64]; char *c, *e; strlcpy(tmp, fcu_fans[fan].loc, 64); c = tmp; for (;;) { e = strchr(c, ','); if (e) *e = 0; if (strcmp(loc, c) == 0) return 1; if (e == NULL) break; c = e + 1; } return 0; } static void fcu_lookup_fans(struct device_node *fcu_node) { struct device_node *np = NULL; int i; /* The table is filled by default with values that are suitable * for the old machines without device-tree informations. We scan * the device-tree and override those values with whatever is * there */ DBG("Looking up FCU controls in device-tree...\n"); while ((np = of_get_next_child(fcu_node, np)) != NULL) { int type = -1; const char *loc; const u32 *reg; DBG(" control: %s, type: %s\n", np->name, np->type); /* Detect control type */ if (!strcmp(np->type, "fan-rpm-control") || !strcmp(np->type, "fan-rpm")) type = FCU_FAN_RPM; if (!strcmp(np->type, "fan-pwm-control") || !strcmp(np->type, "fan-pwm")) type = FCU_FAN_PWM; /* Only care about fans for now */ if (type == -1) continue; /* Lookup for a matching location */ loc = of_get_property(np, "location", NULL); reg = of_get_property(np, "reg", NULL); if (loc == NULL || reg == NULL) continue; DBG(" matching location: %s, reg: 0x%08x\n", loc, *reg); for (i = 0; i < FCU_FAN_COUNT; i++) { int fan_id; if (!fan_check_loc_match(loc, i)) continue; DBG(" location match, index: %d\n", i); fcu_fans[i].id = FCU_FAN_ABSENT_ID; if (type != fcu_fans[i].type) { printk(KERN_WARNING "therm_pm72: Fan type mismatch " "in device-tree for %s\n", np->full_name); break; } if (type == FCU_FAN_RPM) fan_id = ((*reg) - 0x10) / 2; else fan_id = ((*reg) - 0x30) / 2; if (fan_id > 7) { printk(KERN_WARNING "therm_pm72: Can't parse " "fan ID in device-tree for %s\n", np->full_name); break; } DBG(" fan id -> %d, type -> %d\n", fan_id, type); fcu_fans[i].id = fan_id; } } /* Now dump the array */ printk(KERN_INFO "Detected fan controls:\n"); for (i = 0; i < FCU_FAN_COUNT; i++) { if (fcu_fans[i].id == FCU_FAN_ABSENT_ID) continue; printk(KERN_INFO " %d: %s fan, id %d, location: %s\n", i, fcu_fans[i].type == FCU_FAN_RPM ? "RPM" : "PWM", fcu_fans[i].id, fcu_fans[i].loc); } } static int fcu_of_probe(struct platform_device* dev) { state = state_detached; of_dev = dev; dev_info(&dev->dev, "PowerMac G5 Thermal control driver %s\n", VERSION); /* Lookup the fans in the device tree */ fcu_lookup_fans(dev->dev.of_node); /* Add the driver */ return i2c_add_driver(&therm_pm72_driver); } static int fcu_of_remove(struct platform_device* dev) { i2c_del_driver(&therm_pm72_driver); return 0; } static const struct of_device_id fcu_match[] = { { .type = "fcu", }, {}, }; MODULE_DEVICE_TABLE(of, fcu_match); static struct platform_driver fcu_of_platform_driver = { .driver = { .name = "temperature", .owner = THIS_MODULE, .of_match_table = fcu_match, }, .probe = fcu_of_probe, .remove = fcu_of_remove }; /* * Check machine type, attach to i2c controller */ static int __init therm_pm72_init(void) { rackmac = of_machine_is_compatible("RackMac3,1"); if (!of_machine_is_compatible("PowerMac7,2") && !of_machine_is_compatible("PowerMac7,3") && !rackmac) return -ENODEV; return platform_driver_register(&fcu_of_platform_driver); } static void __exit therm_pm72_exit(void) { platform_driver_unregister(&fcu_of_platform_driver); } module_init(therm_pm72_init); module_exit(therm_pm72_exit); MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>"); MODULE_DESCRIPTION("Driver for Apple's PowerMac G5 thermal control"); MODULE_LICENSE("GPL");
gpl-2.0
aatjitra/PR25
arch/powerpc/boot/of.c
11554
1965
/* * Copyright (C) Paul Mackerras 1997. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "of.h" /* Value picked to match that used by yaboot */ #define PROG_START 0x01400000 /* only used on 64-bit systems */ #define RAM_END (512<<20) /* Fixme: use OF */ #define ONE_MB 0x100000 static unsigned long claim_base; static void *of_try_claim(unsigned long size) { unsigned long addr = 0; if (claim_base == 0) claim_base = _ALIGN_UP((unsigned long)_end, ONE_MB); for(; claim_base < RAM_END; claim_base += ONE_MB) { #ifdef DEBUG printf(" trying: 0x%08lx\n\r", claim_base); #endif addr = (unsigned long)of_claim(claim_base, size, 0); if ((void *)addr != (void *)-1) break; } if (addr == 0) return NULL; claim_base = PAGE_ALIGN(claim_base + size); return (void *)addr; } static void of_image_hdr(const void *hdr) { const Elf64_Ehdr *elf64 = hdr; if (elf64->e_ident[EI_CLASS] == ELFCLASS64) { /* * Maintain a "magic" minimum address. This keeps some older * firmware platforms running. */ if (claim_base < PROG_START) claim_base = PROG_START; } } void platform_init(unsigned long a1, unsigned long a2, void *promptr) { platform_ops.image_hdr = of_image_hdr; platform_ops.malloc = of_try_claim; platform_ops.exit = of_exit; platform_ops.vmlinux_alloc = of_vmlinux_alloc; dt_ops.finddevice = of_finddevice; dt_ops.getprop = of_getprop; dt_ops.setprop = of_setprop; of_console_init(); of_init(promptr); loader_info.promptr = promptr; if (a1 && a2 && a2 != 0xdeadbeef) { loader_info.initrd_addr = a1; loader_info.initrd_size = a2; } }
gpl-2.0
Satius/pia-linux-kernel
drivers/platform/x86/acer-wmi.c
35
48573
/* * Acer WMI Laptop Extras * * Copyright (C) 2007-2009 Carlos Corbacho <carlos@strangeworlds.co.uk> * * Based on acer_acpi: * Copyright (C) 2005-2007 E.M. Smith * Copyright (C) 2007-2008 Carlos Corbacho <cathectic@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/dmi.h> #include <linux/fb.h> #include <linux/backlight.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/i8042.h> #include <linux/rfkill.h> #include <linux/workqueue.h> #include <linux/debugfs.h> #include <linux/slab.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <acpi/acpi_drivers.h> MODULE_AUTHOR("Carlos Corbacho"); MODULE_DESCRIPTION("Acer Laptop WMI Extras Driver"); MODULE_LICENSE("GPL"); /* * Magic Number * Meaning is unknown - this number is required for writing to ACPI for AMW0 * (it's also used in acerhk when directly accessing the BIOS) */ #define ACER_AMW0_WRITE 0x9610 /* * Bit masks for the AMW0 interface */ #define ACER_AMW0_WIRELESS_MASK 0x35 #define ACER_AMW0_BLUETOOTH_MASK 0x34 #define ACER_AMW0_MAILLED_MASK 0x31 /* * Method IDs for WMID interface */ #define ACER_WMID_GET_WIRELESS_METHODID 1 #define ACER_WMID_GET_BLUETOOTH_METHODID 2 #define ACER_WMID_GET_BRIGHTNESS_METHODID 3 #define ACER_WMID_SET_WIRELESS_METHODID 4 #define ACER_WMID_SET_BLUETOOTH_METHODID 5 #define ACER_WMID_SET_BRIGHTNESS_METHODID 6 #define ACER_WMID_GET_THREEG_METHODID 10 #define ACER_WMID_SET_THREEG_METHODID 11 /* * Acer ACPI method GUIDs */ #define AMW0_GUID1 "67C3371D-95A3-4C37-BB61-DD47B491DAAB" #define AMW0_GUID2 "431F16ED-0C2B-444C-B267-27DEB140CF9C" #define WMID_GUID1 "6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3" #define WMID_GUID2 "95764E09-FB56-4E83-B31A-37761F60994A" #define WMID_GUID3 "61EF69EA-865C-4BC3-A502-A0DEBA0CB531" /* * Acer ACPI event GUIDs */ #define ACERWMID_EVENT_GUID "676AA15E-6A47-4D9F-A2CC-1E6D18D14026" MODULE_ALIAS("wmi:67C3371D-95A3-4C37-BB61-DD47B491DAAB"); MODULE_ALIAS("wmi:6AF4F258-B401-42FD-BE91-3D4AC2D7C0D3"); MODULE_ALIAS("wmi:676AA15E-6A47-4D9F-A2CC-1E6D18D14026"); enum acer_wmi_event_ids { WMID_HOTKEY_EVENT = 0x1, }; static const struct key_entry acer_wmi_keymap[] = { {KE_KEY, 0x01, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x03, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x04, {KEY_WLAN} }, /* WiFi */ {KE_KEY, 0x12, {KEY_BLUETOOTH} }, /* BT */ {KE_KEY, 0x21, {KEY_PROG1} }, /* Backup */ {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ {KE_IGNORE, 0x41, {KEY_MUTE} }, {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, {KE_IGNORE, 0x44, {KEY_PLAYPAUSE} }, {KE_IGNORE, 0x45, {KEY_STOP} }, {KE_IGNORE, 0x48, {KEY_VOLUMEUP} }, {KE_IGNORE, 0x49, {KEY_VOLUMEDOWN} }, {KE_IGNORE, 0x61, {KEY_SWITCHVIDEOMODE} }, {KE_IGNORE, 0x62, {KEY_BRIGHTNESSUP} }, {KE_IGNORE, 0x63, {KEY_BRIGHTNESSDOWN} }, {KE_KEY, 0x64, {KEY_SWITCHVIDEOMODE} }, /* Display Switch */ {KE_IGNORE, 0x81, {KEY_SLEEP} }, {KE_KEY, 0x82, {KEY_TOUCHPAD_TOGGLE} }, /* Touch Pad On/Off */ {KE_IGNORE, 0x83, {KEY_TOUCHPAD_TOGGLE} }, {KE_END, 0} }; static struct input_dev *acer_wmi_input_dev; struct event_return_value { u8 function; u8 key_num; u16 device_state; u32 reserved; } __attribute__((packed)); /* * GUID3 Get Device Status device flags */ #define ACER_WMID3_GDS_WIRELESS (1<<0) /* WiFi */ #define ACER_WMID3_GDS_THREEG (1<<6) /* 3G */ #define ACER_WMID3_GDS_WIMAX (1<<7) /* WiMAX */ #define ACER_WMID3_GDS_BLUETOOTH (1<<11) /* BT */ struct lm_input_params { u8 function_num; /* Function Number */ u16 commun_devices; /* Communication type devices default status */ u16 devices; /* Other type devices default status */ u8 lm_status; /* Launch Manager Status */ u16 reserved; } __attribute__((packed)); struct lm_return_value { u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 reserved; } __attribute__((packed)); struct wmid3_gds_input_param { /* Get Device Status input parameter */ u8 function_num; /* Function Number */ u8 hotkey_number; /* Hotkey Number */ u16 devices; /* Get Device */ } __attribute__((packed)); struct wmid3_gds_return_value { /* Get Device Status return value*/ u8 error_code; /* Error Code */ u8 ec_return_value; /* EC Return Value */ u16 devices; /* Current Device Status */ u32 reserved; } __attribute__((packed)); struct hotkey_function_type_aa { u8 type; u8 length; u16 handle; u16 commun_func_bitmap; } __attribute__((packed)); /* * Interface capability flags */ #define ACER_CAP_MAILLED (1<<0) #define ACER_CAP_WIRELESS (1<<1) #define ACER_CAP_BLUETOOTH (1<<2) #define ACER_CAP_BRIGHTNESS (1<<3) #define ACER_CAP_THREEG (1<<4) #define ACER_CAP_ANY (0xFFFFFFFF) /* * Interface type flags */ enum interface_flags { ACER_AMW0, ACER_AMW0_V2, ACER_WMID, ACER_WMID_v2, }; #define ACER_DEFAULT_WIRELESS 0 #define ACER_DEFAULT_BLUETOOTH 0 #define ACER_DEFAULT_MAILLED 0 #define ACER_DEFAULT_THREEG 0 static int max_brightness = 0xF; static int mailled = -1; static int brightness = -1; static int threeg = -1; static int force_series; static bool ec_raw_mode; static bool has_type_aa; static u16 commun_func_bitmap; module_param(mailled, int, 0444); module_param(brightness, int, 0444); module_param(threeg, int, 0444); module_param(force_series, int, 0444); module_param(ec_raw_mode, bool, 0444); MODULE_PARM_DESC(mailled, "Set initial state of Mail LED"); MODULE_PARM_DESC(brightness, "Set initial LCD backlight brightness"); MODULE_PARM_DESC(threeg, "Set initial state of 3G hardware"); MODULE_PARM_DESC(force_series, "Force a different laptop series"); MODULE_PARM_DESC(ec_raw_mode, "Enable EC raw mode"); struct acer_data { int mailled; int threeg; int brightness; }; struct acer_debug { struct dentry *root; struct dentry *devices; u32 wmid_devices; }; static struct rfkill *wireless_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *threeg_rfkill; static bool rfkill_inited; /* Each low-level interface must define at least some of the following */ struct wmi_interface { /* The WMI device type */ u32 type; /* The capabilities this interface provides */ u32 capability; /* Private data for the current interface */ struct acer_data data; /* debugfs entries associated with this interface */ struct acer_debug debug; }; /* The static interface pointer, points to the currently detected interface */ static struct wmi_interface *interface; /* * Embedded Controller quirks * Some laptops require us to directly access the EC to either enable or query * features that are not available through WMI. */ struct quirk_entry { u8 wireless; u8 mailled; s8 brightness; u8 bluetooth; }; static struct quirk_entry *quirks; static void set_quirks(void) { if (!interface) return; if (quirks->mailled) interface->capability |= ACER_CAP_MAILLED; if (quirks->brightness) interface->capability |= ACER_CAP_BRIGHTNESS; } static int dmi_matched(const struct dmi_system_id *dmi) { quirks = dmi->driver_data; return 1; } static struct quirk_entry quirk_unknown = { }; static struct quirk_entry quirk_acer_aspire_1520 = { .brightness = -1, }; static struct quirk_entry quirk_acer_travelmate_2490 = { .mailled = 1, }; /* This AMW0 laptop has no bluetooth */ static struct quirk_entry quirk_medion_md_98300 = { .wireless = 1, }; static struct quirk_entry quirk_fujitsu_amilo_li_1718 = { .wireless = 2, }; static struct quirk_entry quirk_lenovo_ideapad_s205 = { .wireless = 3, }; /* The Aspire One has a dummy ACPI-WMI interface - disable it */ static struct dmi_system_id __devinitdata acer_blacklist[] = { { .ident = "Acer Aspire One (SSD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA110"), }, }, { .ident = "Acer Aspire One (HDD)", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "AOA150"), }, }, {} }; static struct dmi_system_id acer_quirks[] = { { .callback = dmi_matched, .ident = "Acer Aspire 1360", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1360"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 1520", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 1520"), }, .driver_data = &quirk_acer_aspire_1520, }, { .callback = dmi_matched, .ident = "Acer Aspire 3100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 3610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 3610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5100", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5100"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5610", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5610"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5630", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5630"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5650", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5650"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 5680", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 5680"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer Aspire 9110", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "Aspire 9110"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 2490", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 2490"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Acer TravelMate 4200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Acer"), DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4200"), }, .driver_data = &quirk_acer_travelmate_2490, }, { .callback = dmi_matched, .ident = "Fujitsu Siemens Amilo Li 1718", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU SIEMENS"), DMI_MATCH(DMI_PRODUCT_NAME, "AMILO Li 1718"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, { .callback = dmi_matched, .ident = "Medion MD 98300", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "MEDION"), DMI_MATCH(DMI_PRODUCT_NAME, "WAM2030"), }, .driver_data = &quirk_medion_md_98300, }, { .callback = dmi_matched, .ident = "Lenovo Ideapad S205", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "10382LG"), }, .driver_data = &quirk_lenovo_ideapad_s205, }, { .callback = dmi_matched, .ident = "Lenovo 3000 N200", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"), DMI_MATCH(DMI_PRODUCT_NAME, "0687A31"), }, .driver_data = &quirk_fujitsu_amilo_li_1718, }, {} }; /* Find which quirks are needed for a particular vendor/ model pair */ static void find_quirks(void) { if (!force_series) { dmi_check_system(acer_quirks); } else if (force_series == 2490) { quirks = &quirk_acer_travelmate_2490; } if (quirks == NULL) quirks = &quirk_unknown; set_quirks(); } /* * General interface convenience methods */ static bool has_cap(u32 cap) { if ((interface->capability & cap) != 0) return 1; return 0; } /* * AMW0 (V1) interface */ struct wmab_args { u32 eax; u32 ebx; u32 ecx; u32 edx; }; struct wmab_ret { u32 eax; u32 ebx; u32 ecx; u32 edx; u32 eex; }; static acpi_status wmab_execute(struct wmab_args *regbuf, struct acpi_buffer *result) { struct acpi_buffer input; acpi_status status; input.length = sizeof(struct wmab_args); input.pointer = (u8 *)regbuf; status = wmi_evaluate_method(AMW0_GUID1, 1, 1, &input, result); return status; } static acpi_status AMW0_get_u32(u32 *value, u32 cap, struct wmi_interface *iface) { int err; u8 result; switch (cap) { case ACER_CAP_MAILLED: switch (quirks->mailled) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 7) & 0x1; return AE_OK; } break; case ACER_CAP_WIRELESS: switch (quirks->wireless) { case 1: err = ec_read(0x7B, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 2: err = ec_read(0x71, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; case 3: err = ec_read(0x78, &result); if (err) return AE_ERROR; *value = result & 0x1; return AE_OK; default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 2) & 0x1; return AE_OK; } break; case ACER_CAP_BLUETOOTH: switch (quirks->bluetooth) { default: err = ec_read(0xA, &result); if (err) return AE_ERROR; *value = (result >> 4) & 0x1; return AE_OK; } break; case ACER_CAP_BRIGHTNESS: switch (quirks->brightness) { default: err = ec_read(0x83, &result); if (err) return AE_ERROR; *value = result; return AE_OK; } break; default: return AE_ERROR; } return AE_OK; } static acpi_status AMW0_set_u32(u32 value, u32 cap, struct wmi_interface *iface) { struct wmab_args args; args.eax = ACER_AMW0_WRITE; args.ebx = value ? (1<<8) : 0; args.ecx = args.edx = 0; switch (cap) { case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_MAILLED_MASK; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_WIRELESS_MASK; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; break; case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; switch (quirks->brightness) { default: return ec_write(0x83, value); break; } default: return AE_ERROR; } /* Actually do the set */ return wmab_execute(&args, NULL); } static acpi_status AMW0_find_mailled(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status = AE_OK; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; args.eax = 0x86; args.ebx = args.ecx = args.edx = 0; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { kfree(out.pointer); return AE_ERROR; } if (ret.eex & 0x1) interface->capability |= ACER_CAP_MAILLED; kfree(out.pointer); return AE_OK; } static int AMW0_set_cap_acpi_check_device_found; static acpi_status AMW0_set_cap_acpi_check_device_cb(acpi_handle handle, u32 level, void *context, void **retval) { AMW0_set_cap_acpi_check_device_found = 1; return AE_OK; } static const struct acpi_device_id norfkill_ids[] = { { "VPC2004", 0}, { "IBM0068", 0}, { "LEN0068", 0}, { "SNY5001", 0}, /* sony-laptop in charge */ { "", 0}, }; static int AMW0_set_cap_acpi_check_device(void) { const struct acpi_device_id *id; for (id = norfkill_ids; id->id[0]; id++) acpi_get_devices(id->id, AMW0_set_cap_acpi_check_device_cb, NULL, NULL); return AMW0_set_cap_acpi_check_device_found; } static acpi_status AMW0_set_capabilities(void) { struct wmab_args args; struct wmab_ret ret; acpi_status status; struct acpi_buffer out = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; /* * On laptops with this strange GUID (non Acer), normal probing doesn't * work. */ if (wmi_has_guid(AMW0_GUID2)) { if ((quirks != &quirk_unknown) || !AMW0_set_cap_acpi_check_device()) interface->capability |= ACER_CAP_WIRELESS; return AE_OK; } args.eax = ACER_AMW0_WRITE; args.ecx = args.edx = 0; args.ebx = 0xa2 << 8; args.ebx |= ACER_AMW0_WIRELESS_MASK; status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) return status; obj = out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_WIRELESS; args.ebx = 2 << 8; args.ebx |= ACER_AMW0_BLUETOOTH_MASK; /* * It's ok to use existing buffer for next wmab_execute call. * But we need to kfree(out.pointer) if next wmab_execute fail. */ status = wmab_execute(&args, &out); if (ACPI_FAILURE(status)) goto out; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && obj->buffer.length == sizeof(struct wmab_ret)) { ret = *((struct wmab_ret *) obj->buffer.pointer); } else { status = AE_ERROR; goto out; } if (ret.eax & 0x1) interface->capability |= ACER_CAP_BLUETOOTH; /* * This appears to be safe to enable, since all Wistron based laptops * appear to use the same EC register for brightness, even if they * differ for wireless, etc */ if (quirks->brightness >= 0) interface->capability |= ACER_CAP_BRIGHTNESS; status = AE_OK; out: kfree(out.pointer); return status; } static struct wmi_interface AMW0_interface = { .type = ACER_AMW0, }; static struct wmi_interface AMW0_V2_interface = { .type = ACER_AMW0_V2, }; /* * New interface (The WMID interface) */ static acpi_status WMI_execute_u32(u32 method_id, u32 in, u32 *out) { struct acpi_buffer input = { (acpi_size) sizeof(u32), (void *)(&in) }; struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; u32 tmp; acpi_status status; status = wmi_evaluate_method(WMID_GUID1, 1, method_id, &input, &result); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) result.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { tmp = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { tmp = (u32) obj->integer.value; } else { tmp = 0; } if (out) *out = tmp; kfree(result.pointer); return status; } static acpi_status WMID_get_u32(u32 *value, u32 cap, struct wmi_interface *iface) { acpi_status status; u8 tmp; u32 result, method_id = 0; switch (cap) { case ACER_CAP_WIRELESS: method_id = ACER_WMID_GET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: method_id = ACER_WMID_GET_BLUETOOTH_METHODID; break; case ACER_CAP_BRIGHTNESS: method_id = ACER_WMID_GET_BRIGHTNESS_METHODID; break; case ACER_CAP_THREEG: method_id = ACER_WMID_GET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (quirks->mailled == 1) { ec_read(0x9f, &tmp); *value = tmp & 0x1; return 0; } default: return AE_ERROR; } status = WMI_execute_u32(method_id, 0, &result); if (ACPI_SUCCESS(status)) *value = (u8)result; return status; } static acpi_status WMID_set_u32(u32 value, u32 cap, struct wmi_interface *iface) { u32 method_id = 0; char param; switch (cap) { case ACER_CAP_BRIGHTNESS: if (value > max_brightness) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BRIGHTNESS_METHODID; break; case ACER_CAP_WIRELESS: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_WIRELESS_METHODID; break; case ACER_CAP_BLUETOOTH: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_BLUETOOTH_METHODID; break; case ACER_CAP_THREEG: if (value > 1) return AE_BAD_PARAMETER; method_id = ACER_WMID_SET_THREEG_METHODID; break; case ACER_CAP_MAILLED: if (value > 1) return AE_BAD_PARAMETER; if (quirks->mailled == 1) { param = value ? 0x92 : 0x93; i8042_lock_chip(); i8042_command(&param, 0x1059); i8042_unlock_chip(); return 0; } break; default: return AE_ERROR; } return WMI_execute_u32(method_id, (u32)value, NULL); } static acpi_status wmid3_get_device_status(u32 *value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; struct wmid3_gds_input_param params = { .function_num = 0x1, .hotkey_number = 0x01, .devices = device, }; struct acpi_buffer input = { sizeof(struct wmid3_gds_input_param), &params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warn("Get 0x%x Device Status failed: 0x%x - 0x%x\n", device, return_value.error_code, return_value.ec_return_value); else *value = !!(return_value.devices & device); return status; } static acpi_status wmid_v2_get_u32(u32 *value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_get_device_status(value, device); } static acpi_status wmid3_set_device_status(u32 value, u16 device) { struct wmid3_gds_return_value return_value; acpi_status status; union acpi_object *obj; u16 devices; struct wmid3_gds_input_param params = { .function_num = 0x1, .hotkey_number = 0x01, .devices = commun_func_bitmap, }; struct acpi_buffer input = { sizeof(struct wmid3_gds_input_param), &params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer output2 = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x2, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 8) { pr_warning("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) { pr_warning("Get Current Device Status failed: " "0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } devices = return_value.devices; params.function_num = 0x2; params.hotkey_number = 0x01; params.devices = (value) ? (devices | device) : (devices & ~device); status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output2); if (ACPI_FAILURE(status)) return status; obj = output2.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warning("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } return_value = *((struct wmid3_gds_return_value *)obj->buffer.pointer); kfree(obj); if (return_value.error_code || return_value.ec_return_value) pr_warning("Set Device Status failed: " "0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static acpi_status wmid_v2_set_u32(u32 value, u32 cap) { u16 device; switch (cap) { case ACER_CAP_WIRELESS: device = ACER_WMID3_GDS_WIRELESS; break; case ACER_CAP_BLUETOOTH: device = ACER_WMID3_GDS_BLUETOOTH; break; case ACER_CAP_THREEG: device = ACER_WMID3_GDS_THREEG; break; default: return AE_ERROR; } return wmid3_set_device_status(value, device); } static void type_aa_dmi_decode(const struct dmi_header *header, void *dummy) { struct hotkey_function_type_aa *type_aa; /* We are looking for OEM-specific Type AAh */ if (header->type != 0xAA) return; has_type_aa = true; type_aa = (struct hotkey_function_type_aa *) header; pr_info("Function bitmap for Communication Button: 0x%x\n", type_aa->commun_func_bitmap); commun_func_bitmap = type_aa->commun_func_bitmap; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_WIRELESS) interface->capability |= ACER_CAP_WIRELESS; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_THREEG) interface->capability |= ACER_CAP_THREEG; if (type_aa->commun_func_bitmap & ACER_WMID3_GDS_BLUETOOTH) interface->capability |= ACER_CAP_BLUETOOTH; } static acpi_status WMID_set_capabilities(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return status; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } else { kfree(out.pointer); return AE_ERROR; } pr_info("Function bitmap for Communication Device: 0x%x\n", devices); if (devices & 0x07) interface->capability |= ACER_CAP_WIRELESS; if (devices & 0x40) interface->capability |= ACER_CAP_THREEG; if (devices & 0x10) interface->capability |= ACER_CAP_BLUETOOTH; if (!(devices & 0x20)) max_brightness = 0x9; kfree(out.pointer); return status; } static struct wmi_interface wmid_interface = { .type = ACER_WMID, }; static struct wmi_interface wmid_v2_interface = { .type = ACER_WMID_v2, }; /* * Generic Device (interface-independent) */ static acpi_status get_u32(u32 *value, u32 cap) { acpi_status status = AE_ERROR; switch (interface->type) { case ACER_AMW0: status = AMW0_get_u32(value, cap, interface); break; case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) { status = AMW0_get_u32(value, cap, interface); break; } case ACER_WMID: status = WMID_get_u32(value, cap, interface); break; case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) status = wmid_v2_get_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) status = WMID_get_u32(value, cap, interface); break; } return status; } static acpi_status set_u32(u32 value, u32 cap) { acpi_status status; if (interface->capability & cap) { switch (interface->type) { case ACER_AMW0: return AMW0_set_u32(value, cap, interface); case ACER_AMW0_V2: if (cap == ACER_CAP_MAILLED) return AMW0_set_u32(value, cap, interface); /* * On some models, some WMID methods don't toggle * properly. For those cases, we want to run the AMW0 * method afterwards to be certain we've really toggled * the device state. */ if (cap == ACER_CAP_WIRELESS || cap == ACER_CAP_BLUETOOTH) { status = WMID_set_u32(value, cap, interface); if (ACPI_FAILURE(status)) return status; return AMW0_set_u32(value, cap, interface); } case ACER_WMID: return WMID_set_u32(value, cap, interface); case ACER_WMID_v2: if (cap & (ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) return wmid_v2_set_u32(value, cap); else if (wmi_has_guid(WMID_GUID2)) return WMID_set_u32(value, cap, interface); default: return AE_BAD_PARAMETER; } } return AE_BAD_PARAMETER; } static void __init acer_commandline_init(void) { /* * These will all fail silently if the value given is invalid, or the * capability isn't available on the given interface */ if (mailled >= 0) set_u32(mailled, ACER_CAP_MAILLED); if (!has_type_aa && threeg >= 0) set_u32(threeg, ACER_CAP_THREEG); if (brightness >= 0) set_u32(brightness, ACER_CAP_BRIGHTNESS); } /* * LED device (Mail LED only, no other LEDs known yet) */ static void mail_led_set(struct led_classdev *led_cdev, enum led_brightness value) { set_u32(value, ACER_CAP_MAILLED); } static struct led_classdev mail_led = { .name = "acer-wmi::mail", .brightness_set = mail_led_set, }; static int __devinit acer_led_init(struct device *dev) { return led_classdev_register(dev, &mail_led); } static void acer_led_exit(void) { set_u32(LED_OFF, ACER_CAP_MAILLED); led_classdev_unregister(&mail_led); } /* * Backlight device */ static struct backlight_device *acer_backlight_device; static int read_brightness(struct backlight_device *bd) { u32 value; get_u32(&value, ACER_CAP_BRIGHTNESS); return value; } static int update_bl_status(struct backlight_device *bd) { int intensity = bd->props.brightness; if (bd->props.power != FB_BLANK_UNBLANK) intensity = 0; if (bd->props.fb_blank != FB_BLANK_UNBLANK) intensity = 0; set_u32(intensity, ACER_CAP_BRIGHTNESS); return 0; } static const struct backlight_ops acer_bl_ops = { .get_brightness = read_brightness, .update_status = update_bl_status, }; static int __devinit acer_backlight_init(struct device *dev) { struct backlight_properties props; struct backlight_device *bd; memset(&props, 0, sizeof(struct backlight_properties)); props.type = BACKLIGHT_PLATFORM; props.max_brightness = max_brightness; bd = backlight_device_register("acer-wmi", dev, NULL, &acer_bl_ops, &props); if (IS_ERR(bd)) { pr_err("Could not register Acer backlight device\n"); acer_backlight_device = NULL; return PTR_ERR(bd); } acer_backlight_device = bd; bd->props.power = FB_BLANK_UNBLANK; bd->props.brightness = read_brightness(bd); backlight_update_status(bd); return 0; } static void acer_backlight_exit(void) { backlight_device_unregister(acer_backlight_device); } /* * Rfkill devices */ static void acer_rfkill_update(struct work_struct *ignored); static DECLARE_DELAYED_WORK(acer_rfkill_work, acer_rfkill_update); static void acer_rfkill_update(struct work_struct *ignored) { u32 state; acpi_status status; if (has_cap(ACER_CAP_WIRELESS)) { status = get_u32(&state, ACER_CAP_WIRELESS); if (ACPI_SUCCESS(status)) { if (quirks->wireless == 3) rfkill_set_hw_state(wireless_rfkill, !state); else rfkill_set_sw_state(wireless_rfkill, !state); } } if (has_cap(ACER_CAP_BLUETOOTH)) { status = get_u32(&state, ACER_CAP_BLUETOOTH); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(bluetooth_rfkill, !state); } if (has_cap(ACER_CAP_THREEG) && wmi_has_guid(WMID_GUID3)) { status = get_u32(&state, ACER_WMID3_GDS_THREEG); if (ACPI_SUCCESS(status)) rfkill_set_sw_state(threeg_rfkill, !state); } schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); } static int acer_rfkill_set(void *data, bool blocked) { acpi_status status; u32 cap = (unsigned long)data; if (rfkill_inited) { status = set_u32(!blocked, cap); if (ACPI_FAILURE(status)) return -ENODEV; } return 0; } static const struct rfkill_ops acer_rfkill_ops = { .set_block = acer_rfkill_set, }; static struct rfkill *acer_rfkill_register(struct device *dev, enum rfkill_type type, char *name, u32 cap) { int err; struct rfkill *rfkill_dev; u32 state; acpi_status status; rfkill_dev = rfkill_alloc(name, dev, type, &acer_rfkill_ops, (void *)(unsigned long)cap); if (!rfkill_dev) return ERR_PTR(-ENOMEM); status = get_u32(&state, cap); err = rfkill_register(rfkill_dev); if (err) { rfkill_destroy(rfkill_dev); return ERR_PTR(err); } if (ACPI_SUCCESS(status)) rfkill_set_sw_state(rfkill_dev, !state); return rfkill_dev; } static int acer_rfkill_init(struct device *dev) { int err; if (has_cap(ACER_CAP_WIRELESS)) { wireless_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WLAN, "acer-wireless", ACER_CAP_WIRELESS); if (IS_ERR(wireless_rfkill)) { err = PTR_ERR(wireless_rfkill); goto error_wireless; } } if (has_cap(ACER_CAP_BLUETOOTH)) { bluetooth_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_BLUETOOTH, "acer-bluetooth", ACER_CAP_BLUETOOTH); if (IS_ERR(bluetooth_rfkill)) { err = PTR_ERR(bluetooth_rfkill); goto error_bluetooth; } } if (has_cap(ACER_CAP_THREEG)) { threeg_rfkill = acer_rfkill_register(dev, RFKILL_TYPE_WWAN, "acer-threeg", ACER_CAP_THREEG); if (IS_ERR(threeg_rfkill)) { err = PTR_ERR(threeg_rfkill); goto error_threeg; } } rfkill_inited = true; if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) schedule_delayed_work(&acer_rfkill_work, round_jiffies_relative(HZ)); return 0; error_threeg: if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } error_bluetooth: if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } error_wireless: return err; } static void acer_rfkill_exit(void) { if ((ec_raw_mode || !wmi_has_guid(ACERWMID_EVENT_GUID)) && has_cap(ACER_CAP_WIRELESS | ACER_CAP_BLUETOOTH | ACER_CAP_THREEG)) cancel_delayed_work_sync(&acer_rfkill_work); if (has_cap(ACER_CAP_WIRELESS)) { rfkill_unregister(wireless_rfkill); rfkill_destroy(wireless_rfkill); } if (has_cap(ACER_CAP_BLUETOOTH)) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (has_cap(ACER_CAP_THREEG)) { rfkill_unregister(threeg_rfkill); rfkill_destroy(threeg_rfkill); } return; } /* * sysfs interface */ static ssize_t show_bool_threeg(struct device *dev, struct device_attribute *attr, char *buf) { u32 result; \ acpi_status status; pr_info("This threeg sysfs will be removed in 2012" " - used by: %s\n", current->comm); status = get_u32(&result, ACER_CAP_THREEG); if (ACPI_SUCCESS(status)) return sprintf(buf, "%u\n", result); return sprintf(buf, "Read error\n"); } static ssize_t set_bool_threeg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 tmp = simple_strtoul(buf, NULL, 10); acpi_status status = set_u32(tmp, ACER_CAP_THREEG); pr_info("This threeg sysfs will be removed in 2012" " - used by: %s\n", current->comm); if (ACPI_FAILURE(status)) return -EINVAL; return count; } static DEVICE_ATTR(threeg, S_IRUGO | S_IWUSR, show_bool_threeg, set_bool_threeg); static ssize_t show_interface(struct device *dev, struct device_attribute *attr, char *buf) { pr_info("This interface sysfs will be removed in 2012" " - used by: %s\n", current->comm); switch (interface->type) { case ACER_AMW0: return sprintf(buf, "AMW0\n"); case ACER_AMW0_V2: return sprintf(buf, "AMW0 v2\n"); case ACER_WMID: return sprintf(buf, "WMID\n"); case ACER_WMID_v2: return sprintf(buf, "WMID v2\n"); default: return sprintf(buf, "Error!\n"); } } static DEVICE_ATTR(interface, S_IRUGO, show_interface, NULL); static void acer_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; struct event_return_value return_value; acpi_status status; u16 device_state; const struct key_entry *key; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_warn("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_warn("Unknown response received %d\n", obj->type); kfree(obj); return; } if (obj->buffer.length != 8) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return; } return_value = *((struct event_return_value *)obj->buffer.pointer); kfree(obj); switch (return_value.function) { case WMID_HOTKEY_EVENT: device_state = return_value.device_state; pr_debug("device state: 0x%x\n", device_state); key = sparse_keymap_entry_from_scancode(acer_wmi_input_dev, return_value.key_num); if (!key) { pr_warn("Unknown key number - 0x%x\n", return_value.key_num); } else { switch (key->keycode) { case KEY_WLAN: case KEY_BLUETOOTH: if (has_cap(ACER_CAP_WIRELESS)) rfkill_set_sw_state(wireless_rfkill, !(device_state & ACER_WMID3_GDS_WIRELESS)); if (has_cap(ACER_CAP_THREEG)) rfkill_set_sw_state(threeg_rfkill, !(device_state & ACER_WMID3_GDS_THREEG)); if (has_cap(ACER_CAP_BLUETOOTH)) rfkill_set_sw_state(bluetooth_rfkill, !(device_state & ACER_WMID3_GDS_BLUETOOTH)); break; } sparse_keymap_report_entry(acer_wmi_input_dev, key, 1, true); } break; default: pr_warn("Unknown function number - %d - %d\n", return_value.function, return_value.key_num); break; } } static acpi_status wmid3_set_lm_mode(struct lm_input_params *params, struct lm_return_value *return_value) { acpi_status status; union acpi_object *obj; struct acpi_buffer input = { sizeof(struct lm_input_params), params }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; status = wmi_evaluate_method(WMID_GUID3, 0, 0x1, &input, &output); if (ACPI_FAILURE(status)) return status; obj = output.pointer; if (!obj) return AE_ERROR; else if (obj->type != ACPI_TYPE_BUFFER) { kfree(obj); return AE_ERROR; } if (obj->buffer.length != 4) { pr_warn("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return AE_ERROR; } *return_value = *((struct lm_return_value *)obj->buffer.pointer); kfree(obj); return status; } static int acer_wmi_enable_ec_raw(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x00, /* Launch Manager Deactive */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling EC raw mode failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); else pr_info("Enabled EC raw mode\n"); return status; } static int acer_wmi_enable_lm(void) { struct lm_return_value return_value; acpi_status status; struct lm_input_params params = { .function_num = 0x1, .commun_devices = 0xFFFF, .devices = 0xFFFF, .lm_status = 0x01, /* Launch Manager Active */ }; status = wmid3_set_lm_mode(&params, &return_value); if (return_value.error_code || return_value.ec_return_value) pr_warn("Enabling Launch Manager failed: 0x%x - 0x%x\n", return_value.error_code, return_value.ec_return_value); return status; } static int __init acer_wmi_input_setup(void) { acpi_status status; int err; acer_wmi_input_dev = input_allocate_device(); if (!acer_wmi_input_dev) return -ENOMEM; acer_wmi_input_dev->name = "Acer WMI hotkeys"; acer_wmi_input_dev->phys = "wmi/input0"; acer_wmi_input_dev->id.bustype = BUS_HOST; err = sparse_keymap_setup(acer_wmi_input_dev, acer_wmi_keymap, NULL); if (err) goto err_free_dev; status = wmi_install_notify_handler(ACERWMID_EVENT_GUID, acer_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; goto err_free_keymap; } err = input_register_device(acer_wmi_input_dev); if (err) goto err_uninstall_notifier; return 0; err_uninstall_notifier: wmi_remove_notify_handler(ACERWMID_EVENT_GUID); err_free_keymap: sparse_keymap_free(acer_wmi_input_dev); err_free_dev: input_free_device(acer_wmi_input_dev); return err; } static void acer_wmi_input_destroy(void) { wmi_remove_notify_handler(ACERWMID_EVENT_GUID); sparse_keymap_free(acer_wmi_input_dev); input_unregister_device(acer_wmi_input_dev); } /* * debugfs functions */ static u32 get_wmid_devices(void) { struct acpi_buffer out = {ACPI_ALLOCATE_BUFFER, NULL}; union acpi_object *obj; acpi_status status; u32 devices = 0; status = wmi_query_block(WMID_GUID2, 1, &out); if (ACPI_FAILURE(status)) return 0; obj = (union acpi_object *) out.pointer; if (obj && obj->type == ACPI_TYPE_BUFFER && (obj->buffer.length == sizeof(u32) || obj->buffer.length == sizeof(u64))) { devices = *((u32 *) obj->buffer.pointer); } else if (obj->type == ACPI_TYPE_INTEGER) { devices = (u32) obj->integer.value; } kfree(out.pointer); return devices; } /* * Platform device */ static int __devinit acer_platform_probe(struct platform_device *device) { int err; if (has_cap(ACER_CAP_MAILLED)) { err = acer_led_init(&device->dev); if (err) goto error_mailled; } if (has_cap(ACER_CAP_BRIGHTNESS)) { err = acer_backlight_init(&device->dev); if (err) goto error_brightness; } err = acer_rfkill_init(&device->dev); if (err) goto error_rfkill; return err; error_rfkill: if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); error_brightness: if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); error_mailled: return err; } static int acer_platform_remove(struct platform_device *device) { if (has_cap(ACER_CAP_MAILLED)) acer_led_exit(); if (has_cap(ACER_CAP_BRIGHTNESS)) acer_backlight_exit(); acer_rfkill_exit(); return 0; } static int acer_platform_suspend(struct platform_device *dev, pm_message_t state) { u32 value; struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) { get_u32(&value, ACER_CAP_MAILLED); set_u32(LED_OFF, ACER_CAP_MAILLED); data->mailled = value; } if (has_cap(ACER_CAP_BRIGHTNESS)) { get_u32(&value, ACER_CAP_BRIGHTNESS); data->brightness = value; } return 0; } static int acer_platform_resume(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return -ENOMEM; if (has_cap(ACER_CAP_MAILLED)) set_u32(data->mailled, ACER_CAP_MAILLED); if (has_cap(ACER_CAP_BRIGHTNESS)) set_u32(data->brightness, ACER_CAP_BRIGHTNESS); return 0; } static void acer_platform_shutdown(struct platform_device *device) { struct acer_data *data = &interface->data; if (!data) return; if (has_cap(ACER_CAP_MAILLED)) set_u32(LED_OFF, ACER_CAP_MAILLED); } static struct platform_driver acer_platform_driver = { .driver = { .name = "acer-wmi", .owner = THIS_MODULE, }, .probe = acer_platform_probe, .remove = acer_platform_remove, .suspend = acer_platform_suspend, .resume = acer_platform_resume, .shutdown = acer_platform_shutdown, }; static struct platform_device *acer_platform_device; static int remove_sysfs(struct platform_device *device) { if (has_cap(ACER_CAP_THREEG)) device_remove_file(&device->dev, &dev_attr_threeg); device_remove_file(&device->dev, &dev_attr_interface); return 0; } static int create_sysfs(void) { int retval = -ENOMEM; if (has_cap(ACER_CAP_THREEG)) { retval = device_create_file(&acer_platform_device->dev, &dev_attr_threeg); if (retval) goto error_sysfs; } retval = device_create_file(&acer_platform_device->dev, &dev_attr_interface); if (retval) goto error_sysfs; return 0; error_sysfs: remove_sysfs(acer_platform_device); return retval; } static void remove_debugfs(void) { debugfs_remove(interface->debug.devices); debugfs_remove(interface->debug.root); } static int create_debugfs(void) { interface->debug.root = debugfs_create_dir("acer-wmi", NULL); if (!interface->debug.root) { pr_err("Failed to create debugfs directory"); return -ENOMEM; } interface->debug.devices = debugfs_create_u32("devices", S_IRUGO, interface->debug.root, &interface->debug.wmid_devices); if (!interface->debug.devices) goto error_debugfs; return 0; error_debugfs: remove_debugfs(); return -ENOMEM; } static int __init acer_wmi_init(void) { int err; pr_info("Acer Laptop ACPI-WMI Extras\n"); if (dmi_check_system(acer_blacklist)) { pr_info("Blacklisted hardware detected - not loading\n"); return -ENODEV; } find_quirks(); /* * Detect which ACPI-WMI interface we're using. */ if (wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &AMW0_V2_interface; if (!wmi_has_guid(AMW0_GUID1) && wmi_has_guid(WMID_GUID1)) interface = &wmid_interface; if (wmi_has_guid(WMID_GUID3)) interface = &wmid_v2_interface; if (interface) dmi_walk(type_aa_dmi_decode, NULL); if (wmi_has_guid(WMID_GUID2) && interface) { if (!has_type_aa && ACPI_FAILURE(WMID_set_capabilities())) { pr_err("Unable to detect available WMID devices\n"); return -ENODEV; } /* WMID always provides brightness methods */ interface->capability |= ACER_CAP_BRIGHTNESS; } else if (!wmi_has_guid(WMID_GUID2) && interface && !has_type_aa) { pr_err("No WMID device detection method found\n"); return -ENODEV; } if (wmi_has_guid(AMW0_GUID1) && !wmi_has_guid(WMID_GUID1)) { interface = &AMW0_interface; if (ACPI_FAILURE(AMW0_set_capabilities())) { pr_err("Unable to detect available AMW0 devices\n"); return -ENODEV; } } if (wmi_has_guid(AMW0_GUID1)) AMW0_find_mailled(); if (!interface) { pr_err("No or unsupported WMI interface, unable to load\n"); return -ENODEV; } set_quirks(); if (acpi_video_backlight_support()) { interface->capability &= ~ACER_CAP_BRIGHTNESS; pr_info("Brightness must be controlled by " "generic video driver\n"); } if (wmi_has_guid(WMID_GUID3)) { if (ec_raw_mode) { if (ACPI_FAILURE(acer_wmi_enable_ec_raw())) { pr_err("Cannot enable EC raw mode\n"); return -ENODEV; } } else if (ACPI_FAILURE(acer_wmi_enable_lm())) { pr_err("Cannot enable Launch Manager mode\n"); return -ENODEV; } } else if (ec_raw_mode) { pr_info("No WMID EC raw mode enable method\n"); } if (wmi_has_guid(ACERWMID_EVENT_GUID)) { err = acer_wmi_input_setup(); if (err) return err; } err = platform_driver_register(&acer_platform_driver); if (err) { pr_err("Unable to register platform driver.\n"); goto error_platform_register; } acer_platform_device = platform_device_alloc("acer-wmi", -1); if (!acer_platform_device) { err = -ENOMEM; goto error_device_alloc; } err = platform_device_add(acer_platform_device); if (err) goto error_device_add; err = create_sysfs(); if (err) goto error_create_sys; if (wmi_has_guid(WMID_GUID2)) { interface->debug.wmid_devices = get_wmid_devices(); err = create_debugfs(); if (err) goto error_create_debugfs; } /* Override any initial settings with values from the commandline */ acer_commandline_init(); return 0; error_create_debugfs: remove_sysfs(acer_platform_device); error_create_sys: platform_device_del(acer_platform_device); error_device_add: platform_device_put(acer_platform_device); error_device_alloc: platform_driver_unregister(&acer_platform_driver); error_platform_register: if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); return err; } static void __exit acer_wmi_exit(void) { if (wmi_has_guid(ACERWMID_EVENT_GUID)) acer_wmi_input_destroy(); remove_sysfs(acer_platform_device); remove_debugfs(); platform_device_unregister(acer_platform_device); platform_driver_unregister(&acer_platform_driver); pr_info("Acer Laptop WMI Extras unloaded\n"); return; } module_init(acer_wmi_init); module_exit(acer_wmi_exit);
gpl-2.0