repo_name
string
path
string
copies
string
size
string
content
string
license
string
naufragoweb/android_kernel_samsung_kyleopen
sound/isa/sb/emu8000_callback.c
4890
13528
/* * synth callback routines for the emu8000 (AWE32/64) * * Copyright (C) 1999 Steve Ratcliffe * Copyright (C) 1999-2000 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emu8000_local.h" #include <sound/asoundef.h> /* * prototypes */ static struct snd_emux_voice *get_voice(struct snd_emux *emu, struct snd_emux_port *port); static int start_voice(struct snd_emux_voice *vp); static void trigger_voice(struct snd_emux_voice *vp); static void release_voice(struct snd_emux_voice *vp); static void update_voice(struct snd_emux_voice *vp, int update); static void reset_voice(struct snd_emux *emu, int ch); static void terminate_voice(struct snd_emux_voice *vp); static void sysex(struct snd_emux *emu, char *buf, int len, int parsed, struct snd_midi_channel_set *chset); #ifdef CONFIG_SND_SEQUENCER_OSS static int oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2); #endif static int load_fx(struct snd_emux *emu, int type, int mode, const void __user *buf, long len); static void set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp); static void snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int ch); /* * Ensure a value is between two points * macro evaluates its args more than once, so changed to upper-case. */ #define LIMITVALUE(x, a, b) do { if ((x) < (a)) (x) = (a); else if ((x) > (b)) (x) = (b); } while (0) #define LIMITMAX(x, a) do {if ((x) > (a)) (x) = (a); } while (0) /* * set up operators */ static struct snd_emux_operators emu8000_ops = { .owner = THIS_MODULE, .get_voice = get_voice, .prepare = start_voice, .trigger = trigger_voice, .release = release_voice, .update = update_voice, .terminate = terminate_voice, .reset = reset_voice, .sample_new = snd_emu8000_sample_new, .sample_free = snd_emu8000_sample_free, .sample_reset = snd_emu8000_sample_reset, .load_fx = load_fx, .sysex = sysex, #ifdef CONFIG_SND_SEQUENCER_OSS .oss_ioctl = oss_ioctl, #endif }; void snd_emu8000_ops_setup(struct snd_emu8000 *hw) { hw->emu->ops = emu8000_ops; } /* * Terminate a voice */ static void release_voice(struct snd_emux_voice *vp) { int dcysusv; struct snd_emu8000 *hw; hw = vp->hw; dcysusv = 0x8000 | (unsigned char)vp->reg.parm.modrelease; EMU8000_DCYSUS_WRITE(hw, vp->ch, dcysusv); dcysusv = 0x8000 | (unsigned char)vp->reg.parm.volrelease; EMU8000_DCYSUSV_WRITE(hw, vp->ch, dcysusv); } /* */ static void terminate_voice(struct snd_emux_voice *vp) { struct snd_emu8000 *hw; hw = vp->hw; EMU8000_DCYSUSV_WRITE(hw, vp->ch, 0x807F); } /* */ static void update_voice(struct snd_emux_voice *vp, int update) { struct snd_emu8000 *hw; hw = vp->hw; if (update & SNDRV_EMUX_UPDATE_VOLUME) set_volume(hw, vp); if (update & SNDRV_EMUX_UPDATE_PITCH) set_pitch(hw, vp); if ((update & SNDRV_EMUX_UPDATE_PAN) && vp->port->ctrls[EMUX_MD_REALTIME_PAN]) set_pan(hw, vp); if (update & SNDRV_EMUX_UPDATE_FMMOD) set_fmmod(hw, vp); if (update & SNDRV_EMUX_UPDATE_TREMFREQ) set_tremfreq(hw, vp); if (update & SNDRV_EMUX_UPDATE_FM2FRQ2) set_fm2frq2(hw, vp); if (update & SNDRV_EMUX_UPDATE_Q) set_filterQ(hw, vp); } /* * Find a channel (voice) within the EMU that is not in use or at least * less in use than other channels. Always returns a valid pointer * no matter what. If there is a real shortage of voices then one * will be cut. Such is life. * * The channel index (vp->ch) must be initialized in this routine. * In Emu8k, it is identical with the array index. */ static struct snd_emux_voice * get_voice(struct snd_emux *emu, struct snd_emux_port *port) { int i; struct snd_emux_voice *vp; struct snd_emu8000 *hw; /* what we are looking for, in order of preference */ enum { OFF=0, RELEASED, PLAYING, END }; /* Keeps track of what we are finding */ struct best { unsigned int time; int voice; } best[END]; struct best *bp; hw = emu->hw; for (i = 0; i < END; i++) { best[i].time = (unsigned int)(-1); /* XXX MAX_?INT really */; best[i].voice = -1; } /* * Go through them all and get a best one to use. */ for (i = 0; i < emu->max_voices; i++) { int state, val; vp = &emu->voices[i]; state = vp->state; if (state == SNDRV_EMUX_ST_OFF) bp = best + OFF; else if (state == SNDRV_EMUX_ST_RELEASED || state == SNDRV_EMUX_ST_PENDING) { bp = best + RELEASED; val = (EMU8000_CVCF_READ(hw, vp->ch) >> 16) & 0xffff; if (! val) bp = best + OFF; } else if (state & SNDRV_EMUX_ST_ON) bp = best + PLAYING; else continue; /* check if sample is finished playing (non-looping only) */ if (state != SNDRV_EMUX_ST_OFF && (vp->reg.sample_mode & SNDRV_SFNT_SAMPLE_SINGLESHOT)) { val = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff; if (val >= vp->reg.loopstart) bp = best + OFF; } if (vp->time < bp->time) { bp->time = vp->time; bp->voice = i; } } for (i = 0; i < END; i++) { if (best[i].voice >= 0) { vp = &emu->voices[best[i].voice]; vp->ch = best[i].voice; return vp; } } /* not found */ return NULL; } /* */ static int start_voice(struct snd_emux_voice *vp) { unsigned int temp; int ch; int addr; struct snd_midi_channel *chan; struct snd_emu8000 *hw; hw = vp->hw; ch = vp->ch; chan = vp->chan; /* channel to be silent and idle */ EMU8000_DCYSUSV_WRITE(hw, ch, 0x0080); EMU8000_VTFT_WRITE(hw, ch, 0x0000FFFF); EMU8000_CVCF_WRITE(hw, ch, 0x0000FFFF); EMU8000_PTRX_WRITE(hw, ch, 0); EMU8000_CPF_WRITE(hw, ch, 0); /* set pitch offset */ set_pitch(hw, vp); /* set envelope parameters */ EMU8000_ENVVAL_WRITE(hw, ch, vp->reg.parm.moddelay); EMU8000_ATKHLD_WRITE(hw, ch, vp->reg.parm.modatkhld); EMU8000_DCYSUS_WRITE(hw, ch, vp->reg.parm.moddcysus); EMU8000_ENVVOL_WRITE(hw, ch, vp->reg.parm.voldelay); EMU8000_ATKHLDV_WRITE(hw, ch, vp->reg.parm.volatkhld); /* decay/sustain parameter for volume envelope is used for triggerg the voice */ /* cutoff and volume */ set_volume(hw, vp); /* modulation envelope heights */ EMU8000_PEFE_WRITE(hw, ch, vp->reg.parm.pefe); /* lfo1/2 delay */ EMU8000_LFO1VAL_WRITE(hw, ch, vp->reg.parm.lfo1delay); EMU8000_LFO2VAL_WRITE(hw, ch, vp->reg.parm.lfo2delay); /* lfo1 pitch & cutoff shift */ set_fmmod(hw, vp); /* lfo1 volume & freq */ set_tremfreq(hw, vp); /* lfo2 pitch & freq */ set_fm2frq2(hw, vp); /* pan & loop start */ set_pan(hw, vp); /* chorus & loop end (chorus 8bit, MSB) */ addr = vp->reg.loopend - 1; temp = vp->reg.parm.chorus; temp += (int)chan->control[MIDI_CTL_E3_CHORUS_DEPTH] * 9 / 10; LIMITMAX(temp, 255); temp = (temp <<24) | (unsigned int)addr; EMU8000_CSL_WRITE(hw, ch, temp); /* Q & current address (Q 4bit value, MSB) */ addr = vp->reg.start - 1; temp = vp->reg.parm.filterQ; temp = (temp<<28) | (unsigned int)addr; EMU8000_CCCA_WRITE(hw, ch, temp); /* clear unknown registers */ EMU8000_00A0_WRITE(hw, ch, 0); EMU8000_0080_WRITE(hw, ch, 0); /* reset volume */ temp = vp->vtarget << 16; EMU8000_VTFT_WRITE(hw, ch, temp | vp->ftarget); EMU8000_CVCF_WRITE(hw, ch, temp | 0xff00); return 0; } /* * Start envelope */ static void trigger_voice(struct snd_emux_voice *vp) { int ch = vp->ch; unsigned int temp; struct snd_emu8000 *hw; hw = vp->hw; /* set reverb and pitch target */ temp = vp->reg.parm.reverb; temp += (int)vp->chan->control[MIDI_CTL_E1_REVERB_DEPTH] * 9 / 10; LIMITMAX(temp, 255); temp = (temp << 8) | (vp->ptarget << 16) | vp->aaux; EMU8000_PTRX_WRITE(hw, ch, temp); EMU8000_CPF_WRITE(hw, ch, vp->ptarget << 16); EMU8000_DCYSUSV_WRITE(hw, ch, vp->reg.parm.voldcysus); } /* * reset voice parameters */ static void reset_voice(struct snd_emux *emu, int ch) { struct snd_emu8000 *hw; hw = emu->hw; EMU8000_DCYSUSV_WRITE(hw, ch, 0x807F); snd_emu8000_tweak_voice(hw, ch); } /* * Set the pitch of a possibly playing note. */ static void set_pitch(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { EMU8000_IP_WRITE(hw, vp->ch, vp->apitch); } /* * Set the volume of a possibly already playing note */ static void set_volume(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { int ifatn; ifatn = (unsigned char)vp->acutoff; ifatn = (ifatn << 8); ifatn |= (unsigned char)vp->avol; EMU8000_IFATN_WRITE(hw, vp->ch, ifatn); } /* * Set pan and loop start address. */ static void set_pan(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { unsigned int temp; temp = ((unsigned int)vp->apan<<24) | ((unsigned int)vp->reg.loopstart - 1); EMU8000_PSST_WRITE(hw, vp->ch, temp); } #define MOD_SENSE 18 static void set_fmmod(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { unsigned short fmmod; short pitch; unsigned char cutoff; int modulation; pitch = (char)(vp->reg.parm.fmmod>>8); cutoff = (vp->reg.parm.fmmod & 0xff); modulation = vp->chan->gm_modulation + vp->chan->midi_pressure; pitch += (MOD_SENSE * modulation) / 1200; LIMITVALUE(pitch, -128, 127); fmmod = ((unsigned char)pitch<<8) | cutoff; EMU8000_FMMOD_WRITE(hw, vp->ch, fmmod); } /* set tremolo (lfo1) volume & frequency */ static void set_tremfreq(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { EMU8000_TREMFRQ_WRITE(hw, vp->ch, vp->reg.parm.tremfrq); } /* set lfo2 pitch & frequency */ static void set_fm2frq2(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { unsigned short fm2frq2; short pitch; unsigned char freq; int modulation; pitch = (char)(vp->reg.parm.fm2frq2>>8); freq = vp->reg.parm.fm2frq2 & 0xff; modulation = vp->chan->gm_modulation + vp->chan->midi_pressure; pitch += (MOD_SENSE * modulation) / 1200; LIMITVALUE(pitch, -128, 127); fm2frq2 = ((unsigned char)pitch<<8) | freq; EMU8000_FM2FRQ2_WRITE(hw, vp->ch, fm2frq2); } /* set filterQ */ static void set_filterQ(struct snd_emu8000 *hw, struct snd_emux_voice *vp) { unsigned int addr; addr = EMU8000_CCCA_READ(hw, vp->ch) & 0xffffff; addr |= (vp->reg.parm.filterQ << 28); EMU8000_CCCA_WRITE(hw, vp->ch, addr); } /* * set the envelope & LFO parameters to the default values */ static void snd_emu8000_tweak_voice(struct snd_emu8000 *emu, int i) { /* set all mod/vol envelope shape to minimum */ EMU8000_ENVVOL_WRITE(emu, i, 0x8000); EMU8000_ENVVAL_WRITE(emu, i, 0x8000); EMU8000_DCYSUS_WRITE(emu, i, 0x7F7F); EMU8000_ATKHLDV_WRITE(emu, i, 0x7F7F); EMU8000_ATKHLD_WRITE(emu, i, 0x7F7F); EMU8000_PEFE_WRITE(emu, i, 0); /* mod envelope height to zero */ EMU8000_LFO1VAL_WRITE(emu, i, 0x8000); /* no delay for LFO1 */ EMU8000_LFO2VAL_WRITE(emu, i, 0x8000); EMU8000_IP_WRITE(emu, i, 0xE000); /* no pitch shift */ EMU8000_IFATN_WRITE(emu, i, 0xFF00); /* volume to minimum */ EMU8000_FMMOD_WRITE(emu, i, 0); EMU8000_TREMFRQ_WRITE(emu, i, 0); EMU8000_FM2FRQ2_WRITE(emu, i, 0); } /* * sysex callback */ static void sysex(struct snd_emux *emu, char *buf, int len, int parsed, struct snd_midi_channel_set *chset) { struct snd_emu8000 *hw; hw = emu->hw; switch (parsed) { case SNDRV_MIDI_SYSEX_GS_CHORUS_MODE: hw->chorus_mode = chset->gs_chorus_mode; snd_emu8000_update_chorus_mode(hw); break; case SNDRV_MIDI_SYSEX_GS_REVERB_MODE: hw->reverb_mode = chset->gs_reverb_mode; snd_emu8000_update_reverb_mode(hw); break; } } #ifdef CONFIG_SND_SEQUENCER_OSS /* * OSS ioctl callback */ static int oss_ioctl(struct snd_emux *emu, int cmd, int p1, int p2) { struct snd_emu8000 *hw; hw = emu->hw; switch (cmd) { case _EMUX_OSS_REVERB_MODE: hw->reverb_mode = p1; snd_emu8000_update_reverb_mode(hw); break; case _EMUX_OSS_CHORUS_MODE: hw->chorus_mode = p1; snd_emu8000_update_chorus_mode(hw); break; case _EMUX_OSS_INITIALIZE_CHIP: /* snd_emu8000_init(hw); */ /*ignored*/ break; case _EMUX_OSS_EQUALIZER: hw->bass_level = p1; hw->treble_level = p2; snd_emu8000_update_equalizer(hw); break; } return 0; } #endif /* * additional patch keys */ #define SNDRV_EMU8000_LOAD_CHORUS_FX 0x10 /* optarg=mode */ #define SNDRV_EMU8000_LOAD_REVERB_FX 0x11 /* optarg=mode */ /* * callback routine */ static int load_fx(struct snd_emux *emu, int type, int mode, const void __user *buf, long len) { struct snd_emu8000 *hw; hw = emu->hw; /* skip header */ buf += 16; len -= 16; switch (type) { case SNDRV_EMU8000_LOAD_CHORUS_FX: return snd_emu8000_load_chorus_fx(hw, mode, buf, len); case SNDRV_EMU8000_LOAD_REVERB_FX: return snd_emu8000_load_reverb_fx(hw, mode, buf, len); } return -EINVAL; }
gpl-2.0
SlimDevs/kernel_lge_hammerhead
drivers/isdn/hysdn/hysdn_procconf.c
5146
13901
/* $Id: hysdn_procconf.c,v 1.8.6.4 2001/09/23 22:24:54 kai Exp $ * * Linux driver for HYSDN cards, /proc/net filesystem dir and conf functions. * * written by Werner Cornelius (werner@titro.de) for Hypercope GmbH * * Copyright 1999 by Werner Cornelius (werner@titro.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/cred.h> #include <linux/module.h> #include <linux/poll.h> #include <linux/proc_fs.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/mutex.h> #include <net/net_namespace.h> #include "hysdn_defs.h" static DEFINE_MUTEX(hysdn_conf_mutex); #define INFO_OUT_LEN 80 /* length of info line including lf */ /********************************************************/ /* defines and data structure for conf write operations */ /********************************************************/ #define CONF_STATE_DETECT 0 /* waiting for detect */ #define CONF_STATE_CONF 1 /* writing config data */ #define CONF_STATE_POF 2 /* writing pof data */ #define CONF_LINE_LEN 255 /* 255 chars max */ struct conf_writedata { hysdn_card *card; /* card the device is connected to */ int buf_size; /* actual number of bytes in the buffer */ int needed_size; /* needed size when reading pof */ int state; /* actual interface states from above constants */ unsigned char conf_line[CONF_LINE_LEN]; /* buffered conf line */ unsigned short channel; /* active channel number */ unsigned char *pof_buffer; /* buffer when writing pof */ }; /***********************************************************************/ /* process_line parses one config line and transfers it to the card if */ /* necessary. */ /* if the return value is negative an error occurred. */ /***********************************************************************/ static int process_line(struct conf_writedata *cnf) { unsigned char *cp = cnf->conf_line; int i; if (cnf->card->debug_flags & LOG_CNF_LINE) hysdn_addlog(cnf->card, "conf line: %s", cp); if (*cp == '-') { /* option */ cp++; /* point to option char */ if (*cp++ != 'c') return (0); /* option unknown or used */ i = 0; /* start value for channel */ while ((*cp <= '9') && (*cp >= '0')) i = i * 10 + *cp++ - '0'; /* get decimal number */ if (i > 65535) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "conf channel invalid %d", i); return (-ERR_INV_CHAN); /* invalid channel */ } cnf->channel = i & 0xFFFF; /* set new channel number */ return (0); /* success */ } /* option */ if (*cp == '*') { /* line to send */ if (cnf->card->debug_flags & LOG_CNF_DATA) hysdn_addlog(cnf->card, "conf chan=%d %s", cnf->channel, cp); return (hysdn_tx_cfgline(cnf->card, cnf->conf_line + 1, cnf->channel)); /* send the line without * */ } /* line to send */ return (0); } /* process_line */ /***********************************/ /* conf file operations and tables */ /***********************************/ /****************************************************/ /* write conf file -> boot or send cfg line to card */ /****************************************************/ static ssize_t hysdn_conf_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { struct conf_writedata *cnf; int i; unsigned char ch, *cp; if (!count) return (0); /* nothing to handle */ if (!(cnf = file->private_data)) return (-EFAULT); /* should never happen */ if (cnf->state == CONF_STATE_DETECT) { /* auto detect cnf or pof data */ if (copy_from_user(&ch, buf, 1)) /* get first char for detect */ return (-EFAULT); if (ch == 0x1A) { /* we detected a pof file */ if ((cnf->needed_size = pof_write_open(cnf->card, &cnf->pof_buffer)) <= 0) return (cnf->needed_size); /* an error occurred -> exit */ cnf->buf_size = 0; /* buffer is empty */ cnf->state = CONF_STATE_POF; /* new state */ } else { /* conf data has been detected */ cnf->buf_size = 0; /* buffer is empty */ cnf->state = CONF_STATE_CONF; /* requested conf data write */ if (cnf->card->state != CARD_STATE_RUN) return (-ERR_NOT_BOOTED); cnf->conf_line[CONF_LINE_LEN - 1] = 0; /* limit string length */ cnf->channel = 4098; /* default channel for output */ } } /* state was auto detect */ if (cnf->state == CONF_STATE_POF) { /* pof write active */ i = cnf->needed_size - cnf->buf_size; /* bytes still missing for write */ if (i <= 0) return (-EINVAL); /* size error handling pof */ if (i < count) count = i; /* limit requested number of bytes */ if (copy_from_user(cnf->pof_buffer + cnf->buf_size, buf, count)) return (-EFAULT); /* error while copying */ cnf->buf_size += count; if (cnf->needed_size == cnf->buf_size) { cnf->needed_size = pof_write_buffer(cnf->card, cnf->buf_size); /* write data */ if (cnf->needed_size <= 0) { cnf->card->state = CARD_STATE_BOOTERR; /* show boot error */ return (cnf->needed_size); /* an error occurred */ } cnf->buf_size = 0; /* buffer is empty again */ } } /* pof write active */ else { /* conf write active */ if (cnf->card->state != CARD_STATE_RUN) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf write denied -> not booted"); return (-ERR_NOT_BOOTED); } i = (CONF_LINE_LEN - 1) - cnf->buf_size; /* bytes available in buffer */ if (i > 0) { /* copy remaining bytes into buffer */ if (count > i) count = i; /* limit transfer */ if (copy_from_user(cnf->conf_line + cnf->buf_size, buf, count)) return (-EFAULT); /* error while copying */ i = count; /* number of chars in buffer */ cp = cnf->conf_line + cnf->buf_size; while (i) { /* search for end of line */ if ((*cp < ' ') && (*cp != 9)) break; /* end of line found */ cp++; i--; } /* search for end of line */ if (i) { /* delimiter found */ *cp++ = 0; /* string termination */ count -= (i - 1); /* subtract remaining bytes from count */ while ((i) && (*cp < ' ') && (*cp != 9)) { i--; /* discard next char */ count++; /* mark as read */ cp++; /* next char */ } cnf->buf_size = 0; /* buffer is empty after transfer */ if ((i = process_line(cnf)) < 0) /* handle the line */ count = i; /* return the error */ } /* delimiter found */ else { cnf->buf_size += count; /* add chars to string */ if (cnf->buf_size >= CONF_LINE_LEN - 1) { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf line too long %d chars pos %d", cnf->buf_size, count); return (-ERR_CONF_LONG); } } /* not delimited */ } /* copy remaining bytes into buffer */ else { if (cnf->card->debug_flags & LOG_CNF_MISC) hysdn_addlog(cnf->card, "cnf line too long"); return (-ERR_CONF_LONG); } } /* conf write active */ return (count); } /* hysdn_conf_write */ /*******************************************/ /* read conf file -> output card info data */ /*******************************************/ static ssize_t hysdn_conf_read(struct file *file, char __user *buf, size_t count, loff_t *off) { char *cp; if (!(file->f_mode & FMODE_READ)) return -EPERM; /* no permission to read */ if (!(cp = file->private_data)) return -EFAULT; /* should never happen */ return simple_read_from_buffer(buf, count, off, cp, strlen(cp)); } /* hysdn_conf_read */ /******************/ /* open conf file */ /******************/ static int hysdn_conf_open(struct inode *ino, struct file *filep) { hysdn_card *card; struct proc_dir_entry *pd; struct conf_writedata *cnf; char *cp, *tmp; /* now search the addressed card */ mutex_lock(&hysdn_conf_mutex); card = card_root; while (card) { pd = card->procconf; if (pd == PDE(ino)) break; card = card->next; /* search next entry */ } if (!card) { mutex_unlock(&hysdn_conf_mutex); return (-ENODEV); /* device is unknown/invalid */ } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config open for uid=%d gid=%d mode=0x%x", filep->f_cred->fsuid, filep->f_cred->fsgid, filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ if (!(cnf = kmalloc(sizeof(struct conf_writedata), GFP_KERNEL))) { mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); } cnf->card = card; cnf->buf_size = 0; /* nothing buffered */ cnf->state = CONF_STATE_DETECT; /* start auto detect */ filep->private_data = cnf; } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { /* read access -> output card info data */ if (!(tmp = kmalloc(INFO_OUT_LEN * 2 + 2, GFP_KERNEL))) { mutex_unlock(&hysdn_conf_mutex); return (-EFAULT); /* out of memory */ } filep->private_data = tmp; /* start of string */ /* first output a headline */ sprintf(tmp, "id bus slot type irq iobase dp-mem b-chans fax-chans state device"); cp = tmp; /* start of string */ while (*cp) cp++; while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) *cp++ = ' '; *cp++ = '\n'; /* and now the data */ sprintf(cp, "%d %3d %4d %4d %3d 0x%04x 0x%08lx %7d %9d %3d %s", card->myid, card->bus, PCI_SLOT(card->devfn), card->brdtype, card->irq, card->iobase, card->membase, card->bchans, card->faxchans, card->state, hysdn_net_getname(card)); while (*cp) cp++; while (((cp - tmp) % (INFO_OUT_LEN + 1)) != INFO_OUT_LEN) *cp++ = ' '; *cp++ = '\n'; *cp = 0; /* end of string */ } else { /* simultaneous read/write access forbidden ! */ mutex_unlock(&hysdn_conf_mutex); return (-EPERM); /* no permission this time */ } mutex_unlock(&hysdn_conf_mutex); return nonseekable_open(ino, filep); } /* hysdn_conf_open */ /***************************/ /* close a config file. */ /***************************/ static int hysdn_conf_close(struct inode *ino, struct file *filep) { hysdn_card *card; struct conf_writedata *cnf; int retval = 0; struct proc_dir_entry *pd; mutex_lock(&hysdn_conf_mutex); /* search the addressed card */ card = card_root; while (card) { pd = card->procconf; if (pd == PDE(ino)) break; card = card->next; /* search next entry */ } if (!card) { mutex_unlock(&hysdn_conf_mutex); return (-ENODEV); /* device is unknown/invalid */ } if (card->debug_flags & (LOG_PROC_OPEN | LOG_PROC_ALL)) hysdn_addlog(card, "config close for uid=%d gid=%d mode=0x%x", filep->f_cred->fsuid, filep->f_cred->fsgid, filep->f_mode); if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_WRITE) { /* write only access -> write boot file or conf line */ if (filep->private_data) { cnf = filep->private_data; if (cnf->state == CONF_STATE_POF) retval = pof_write_close(cnf->card); /* close the pof write */ kfree(filep->private_data); /* free allocated memory for buffer */ } /* handle write private data */ } else if ((filep->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) { /* read access -> output card info data */ kfree(filep->private_data); /* release memory */ } mutex_unlock(&hysdn_conf_mutex); return (retval); } /* hysdn_conf_close */ /******************************************************/ /* table for conf filesystem functions defined above. */ /******************************************************/ static const struct file_operations conf_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = hysdn_conf_read, .write = hysdn_conf_write, .open = hysdn_conf_open, .release = hysdn_conf_close, }; /*****************************/ /* hysdn subdir in /proc/net */ /*****************************/ struct proc_dir_entry *hysdn_proc_entry = NULL; /*******************************************************************************/ /* hysdn_procconf_init is called when the module is loaded and after the cards */ /* have been detected. The needed proc dir and card config files are created. */ /* The log init is called at last. */ /*******************************************************************************/ int hysdn_procconf_init(void) { hysdn_card *card; unsigned char conf_name[20]; hysdn_proc_entry = proc_mkdir(PROC_SUBDIR_NAME, init_net.proc_net); if (!hysdn_proc_entry) { printk(KERN_ERR "HYSDN: unable to create hysdn subdir\n"); return (-1); } card = card_root; /* point to first card */ while (card) { sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); if ((card->procconf = (void *) proc_create(conf_name, S_IFREG | S_IRUGO | S_IWUSR, hysdn_proc_entry, &conf_fops)) != NULL) { hysdn_proclog_init(card); /* init the log file entry */ } card = card->next; /* next entry */ } printk(KERN_NOTICE "HYSDN: procfs initialised\n"); return (0); } /* hysdn_procconf_init */ /*************************************************************************************/ /* hysdn_procconf_release is called when the module is unloaded and before the cards */ /* resources are released. The module counter is assumed to be 0 ! */ /*************************************************************************************/ void hysdn_procconf_release(void) { hysdn_card *card; unsigned char conf_name[20]; card = card_root; /* start with first card */ while (card) { sprintf(conf_name, "%s%d", PROC_CONF_BASENAME, card->myid); if (card->procconf) remove_proc_entry(conf_name, hysdn_proc_entry); hysdn_proclog_release(card); /* init the log file entry */ card = card->next; /* point to next card */ } remove_proc_entry(PROC_SUBDIR_NAME, init_net.proc_net); }
gpl-2.0
atinm/android_kernel_samsung_manta
drivers/media/video/cx23885/cx23885-ioctl.c
9242
5225
/* * Driver for the Conexant CX23885/7/8 PCIe bridge * * Various common ioctl() support functions * * Copyright (c) 2009 Andy Walls <awalls@md.metrocast.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "cx23885.h" #include <media/v4l2-chip-ident.h> int cx23885_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; int err = 0; u8 rev; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; switch (chip->match.type) { case V4L2_CHIP_MATCH_HOST: switch (chip->match.addr) { case 0: rev = cx_read(RDR_CFG2) & 0xff; switch (dev->pci->device) { case 0x8852: /* rev 0x04 could be '885 or '888. Pick '888. */ if (rev == 0x04) chip->ident = V4L2_IDENT_CX23888; else chip->ident = V4L2_IDENT_CX23885; break; case 0x8880: if (rev == 0x0e || rev == 0x0f) chip->ident = V4L2_IDENT_CX23887; else chip->ident = V4L2_IDENT_CX23888; break; default: chip->ident = V4L2_IDENT_UNKNOWN; break; } chip->revision = (dev->pci->device << 16) | (rev << 8) | (dev->hwrevision & 0xff); break; case 1: if (dev->v4l_device != NULL) { chip->ident = V4L2_IDENT_CX23417; chip->revision = 0; } break; case 2: /* * The integrated IR controller on the CX23888 is * host chip 2. It may not be used/initialized or sd_ir * may be pointing at the cx25840 subdevice for the * IR controller on the CX23885. Thus we find it * without using the dev->sd_ir pointer. */ call_hw(dev, CX23885_HW_888_IR, core, g_chip_ident, chip); break; default: err = -EINVAL; /* per V4L2 spec */ break; } break; case V4L2_CHIP_MATCH_I2C_DRIVER: /* If needed, returns V4L2_IDENT_AMBIGUOUS without extra work */ call_all(dev, core, g_chip_ident, chip); break; case V4L2_CHIP_MATCH_I2C_ADDR: /* * We could return V4L2_IDENT_UNKNOWN, but we don't do the work * to look if a chip is at the address with no driver. That's a * dangerous thing to do with EEPROMs anyway. */ call_all(dev, core, g_chip_ident, chip); break; default: err = -EINVAL; break; } return err; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int cx23885_g_host_register(struct cx23885_dev *dev, struct v4l2_dbg_register *reg) { if ((reg->reg & 0x3) != 0 || reg->reg >= pci_resource_len(dev->pci, 0)) return -EINVAL; reg->size = 4; reg->val = cx_read(reg->reg); return 0; } static int cx23417_g_register(struct cx23885_dev *dev, struct v4l2_dbg_register *reg) { u32 value; if (dev->v4l_device == NULL) return -EINVAL; if ((reg->reg & 0x3) != 0 || reg->reg >= 0x10000) return -EINVAL; if (mc417_register_read(dev, (u16) reg->reg, &value)) return -EINVAL; /* V4L2 spec, but -EREMOTEIO really */ reg->size = 4; reg->val = value; return 0; } int cx23885_g_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->match.type == V4L2_CHIP_MATCH_HOST) { switch (reg->match.addr) { case 0: return cx23885_g_host_register(dev, reg); case 1: return cx23417_g_register(dev, reg); default: break; } } /* FIXME - any error returns should not be ignored */ call_all(dev, core, g_register, reg); return 0; } static int cx23885_s_host_register(struct cx23885_dev *dev, struct v4l2_dbg_register *reg) { if ((reg->reg & 0x3) != 0 || reg->reg >= pci_resource_len(dev->pci, 0)) return -EINVAL; reg->size = 4; cx_write(reg->reg, reg->val); return 0; } static int cx23417_s_register(struct cx23885_dev *dev, struct v4l2_dbg_register *reg) { if (dev->v4l_device == NULL) return -EINVAL; if ((reg->reg & 0x3) != 0 || reg->reg >= 0x10000) return -EINVAL; if (mc417_register_write(dev, (u16) reg->reg, (u32) reg->val)) return -EINVAL; /* V4L2 spec, but -EREMOTEIO really */ reg->size = 4; return 0; } int cx23885_s_register(struct file *file, void *fh, struct v4l2_dbg_register *reg) { struct cx23885_dev *dev = ((struct cx23885_fh *)fh)->dev; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->match.type == V4L2_CHIP_MATCH_HOST) { switch (reg->match.addr) { case 0: return cx23885_s_host_register(dev, reg); case 1: return cx23417_s_register(dev, reg); default: break; } } /* FIXME - any error returns should not be ignored */ call_all(dev, core, s_register, reg); return 0; } #endif
gpl-2.0
arter97/linaro-lsk
arch/arm/kernel/dma-isa.c
12570
5163
/* * linux/arch/arm/kernel/dma-isa.c * * Copyright (C) 1999-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * ISA DMA primitives * Taken from various sources, including: * linux/include/asm/dma.h: Defines for using and allocating dma channels. * Written by Hennus Bergman, 1992. * High DMA channel support & info by Hannu Savolainen and John Boyd, * Nov. 1992. * arch/arm/kernel/dma-ebsa285.c * Copyright (C) 1998 Phil Blundell */ #include <linux/ioport.h> #include <linux/init.h> #include <linux/dma-mapping.h> #include <linux/io.h> #include <asm/dma.h> #include <asm/mach/dma.h> #define ISA_DMA_MASK 0 #define ISA_DMA_MODE 1 #define ISA_DMA_CLRFF 2 #define ISA_DMA_PGHI 3 #define ISA_DMA_PGLO 4 #define ISA_DMA_ADDR 5 #define ISA_DMA_COUNT 6 static unsigned int isa_dma_port[8][7] = { /* MASK MODE CLRFF PAGE_HI PAGE_LO ADDR COUNT */ { 0x0a, 0x0b, 0x0c, 0x487, 0x087, 0x00, 0x01 }, { 0x0a, 0x0b, 0x0c, 0x483, 0x083, 0x02, 0x03 }, { 0x0a, 0x0b, 0x0c, 0x481, 0x081, 0x04, 0x05 }, { 0x0a, 0x0b, 0x0c, 0x482, 0x082, 0x06, 0x07 }, { 0xd4, 0xd6, 0xd8, 0x000, 0x000, 0xc0, 0xc2 }, { 0xd4, 0xd6, 0xd8, 0x48b, 0x08b, 0xc4, 0xc6 }, { 0xd4, 0xd6, 0xd8, 0x489, 0x089, 0xc8, 0xca }, { 0xd4, 0xd6, 0xd8, 0x48a, 0x08a, 0xcc, 0xce } }; static int isa_get_dma_residue(unsigned int chan, dma_t *dma) { unsigned int io_port = isa_dma_port[chan][ISA_DMA_COUNT]; int count; count = 1 + inb(io_port); count |= inb(io_port) << 8; return chan < 4 ? count : (count << 1); } static void isa_enable_dma(unsigned int chan, dma_t *dma) { if (dma->invalid) { unsigned long address, length; unsigned int mode; enum dma_data_direction direction; mode = (chan & 3) | dma->dma_mode; switch (dma->dma_mode & DMA_MODE_MASK) { case DMA_MODE_READ: direction = DMA_FROM_DEVICE; break; case DMA_MODE_WRITE: direction = DMA_TO_DEVICE; break; case DMA_MODE_CASCADE: direction = DMA_BIDIRECTIONAL; break; default: direction = DMA_NONE; break; } if (!dma->sg) { /* * Cope with ISA-style drivers which expect cache * coherence. */ dma->sg = &dma->buf; dma->sgcount = 1; dma->buf.length = dma->count; dma->buf.dma_address = dma_map_single(NULL, dma->addr, dma->count, direction); } address = dma->buf.dma_address; length = dma->buf.length - 1; outb(address >> 16, isa_dma_port[chan][ISA_DMA_PGLO]); outb(address >> 24, isa_dma_port[chan][ISA_DMA_PGHI]); if (chan >= 4) { address >>= 1; length >>= 1; } outb(0, isa_dma_port[chan][ISA_DMA_CLRFF]); outb(address, isa_dma_port[chan][ISA_DMA_ADDR]); outb(address >> 8, isa_dma_port[chan][ISA_DMA_ADDR]); outb(length, isa_dma_port[chan][ISA_DMA_COUNT]); outb(length >> 8, isa_dma_port[chan][ISA_DMA_COUNT]); outb(mode, isa_dma_port[chan][ISA_DMA_MODE]); dma->invalid = 0; } outb(chan & 3, isa_dma_port[chan][ISA_DMA_MASK]); } static void isa_disable_dma(unsigned int chan, dma_t *dma) { outb(chan | 4, isa_dma_port[chan][ISA_DMA_MASK]); } static struct dma_ops isa_dma_ops = { .type = "ISA", .enable = isa_enable_dma, .disable = isa_disable_dma, .residue = isa_get_dma_residue, }; static struct resource dma_resources[] = { { .name = "dma1", .start = 0x0000, .end = 0x000f }, { .name = "dma low page", .start = 0x0080, .end = 0x008f }, { .name = "dma2", .start = 0x00c0, .end = 0x00df }, { .name = "dma high page", .start = 0x0480, .end = 0x048f } }; static dma_t isa_dma[8]; /* * ISA DMA always starts at channel 0 */ void __init isa_init_dma(void) { /* * Try to autodetect presence of an ISA DMA controller. * We do some minimal initialisation, and check that * channel 0's DMA address registers are writeable. */ outb(0xff, 0x0d); outb(0xff, 0xda); /* * Write high and low address, and then read them back * in the same order. */ outb(0x55, 0x00); outb(0xaa, 0x00); if (inb(0) == 0x55 && inb(0) == 0xaa) { unsigned int chan, i; for (chan = 0; chan < 8; chan++) { isa_dma[chan].d_ops = &isa_dma_ops; isa_disable_dma(chan, NULL); } outb(0x40, 0x0b); outb(0x41, 0x0b); outb(0x42, 0x0b); outb(0x43, 0x0b); outb(0xc0, 0xd6); outb(0x41, 0xd6); outb(0x42, 0xd6); outb(0x43, 0xd6); outb(0, 0xd4); outb(0x10, 0x08); outb(0x10, 0xd0); /* * Is this correct? According to my documentation, it * doesn't appear to be. It should be: * outb(0x3f, 0x40b); outb(0x3f, 0x4d6); */ outb(0x30, 0x40b); outb(0x31, 0x40b); outb(0x32, 0x40b); outb(0x33, 0x40b); outb(0x31, 0x4d6); outb(0x32, 0x4d6); outb(0x33, 0x4d6); for (i = 0; i < ARRAY_SIZE(dma_resources); i++) request_resource(&ioport_resource, dma_resources + i); for (chan = 0; chan < 8; chan++) { int ret = isa_dma_add(chan, &isa_dma[chan]); if (ret) printk(KERN_ERR "ISADMA%u: unable to register: %d\n", chan, ret); } request_dma(DMA_ISA_CASCADE, "cascade"); } }
gpl-2.0
TroNit/BlackDome_New_supersonic
sound/pci/ctxfi/ctimap.c
14618
2516
/** * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. * * @File ctimap.c * * @Brief * This file contains the implementation of generic input mapper operations * for input mapper management. * * @Author Liu Chun * @Date May 23 2008 * */ #include "ctimap.h" #include <linux/slab.h> int input_mapper_add(struct list_head *mappers, struct imapper *entry, int (*map_op)(void *, struct imapper *), void *data) { struct list_head *pos, *pre, *head; struct imapper *pre_ent, *pos_ent; head = mappers; if (list_empty(head)) { entry->next = entry->addr; map_op(data, entry); list_add(&entry->list, head); return 0; } list_for_each(pos, head) { pos_ent = list_entry(pos, struct imapper, list); if (pos_ent->slot > entry->slot) { /* found a position in list */ break; } } if (pos != head) { pre = pos->prev; if (pre == head) pre = head->prev; __list_add(&entry->list, pos->prev, pos); } else { pre = head->prev; pos = head->next; list_add_tail(&entry->list, head); } pre_ent = list_entry(pre, struct imapper, list); pos_ent = list_entry(pos, struct imapper, list); entry->next = pos_ent->addr; map_op(data, entry); pre_ent->next = entry->addr; map_op(data, pre_ent); return 0; } int input_mapper_delete(struct list_head *mappers, struct imapper *entry, int (*map_op)(void *, struct imapper *), void *data) { struct list_head *next, *pre, *head; struct imapper *pre_ent, *next_ent; head = mappers; if (list_empty(head)) return 0; pre = (entry->list.prev == head) ? head->prev : entry->list.prev; next = (entry->list.next == head) ? head->next : entry->list.next; if (pre == &entry->list) { /* entry is the only one node in mappers list */ entry->next = entry->addr = entry->user = entry->slot = 0; map_op(data, entry); list_del(&entry->list); return 0; } pre_ent = list_entry(pre, struct imapper, list); next_ent = list_entry(next, struct imapper, list); pre_ent->next = next_ent->addr; map_op(data, pre_ent); list_del(&entry->list); return 0; } void free_input_mapper_list(struct list_head *head) { struct imapper *entry; struct list_head *pos; while (!list_empty(head)) { pos = head->next; list_del(pos); entry = list_entry(pos, struct imapper, list); kfree(entry); } }
gpl-2.0
liaoch/gcc
libgomp/loop_ull.c
27
17655
/* Copyright (C) 2005-2013 Free Software Foundation, Inc. Contributed by Richard Henderson <rth@redhat.com>. This file is part of the GNU OpenMP Library (libgomp). Libgomp is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3, or (at your option) any later version. Libgomp is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ /* This file handles the LOOP (FOR/DO) construct. */ #include <limits.h> #include <stdlib.h> #include "libgomp.h" typedef unsigned long long gomp_ull; /* Initialize the given work share construct from the given arguments. */ static inline void gomp_loop_ull_init (struct gomp_work_share *ws, bool up, gomp_ull start, gomp_ull end, gomp_ull incr, enum gomp_schedule_type sched, gomp_ull chunk_size) { ws->sched = sched; ws->chunk_size_ull = chunk_size; /* Canonicalize loops that have zero iterations to ->next == ->end. */ ws->end_ull = ((up && start > end) || (!up && start < end)) ? start : end; ws->incr_ull = incr; ws->next_ull = start; ws->mode = 0; if (sched == GFS_DYNAMIC) { ws->chunk_size_ull *= incr; #if defined HAVE_SYNC_BUILTINS && defined __LP64__ { /* For dynamic scheduling prepare things to make each iteration faster. */ struct gomp_thread *thr = gomp_thread (); struct gomp_team *team = thr->ts.team; long nthreads = team ? team->nthreads : 1; if (__builtin_expect (up, 1)) { /* Cheap overflow protection. */ if (__builtin_expect ((nthreads | ws->chunk_size_ull) < 1ULL << (sizeof (gomp_ull) * __CHAR_BIT__ / 2 - 1), 1)) ws->mode = ws->end_ull < (__LONG_LONG_MAX__ * 2ULL + 1 - (nthreads + 1) * ws->chunk_size_ull); } /* Cheap overflow protection. */ else if (__builtin_expect ((nthreads | -ws->chunk_size_ull) < 1ULL << (sizeof (gomp_ull) * __CHAR_BIT__ / 2 - 1), 1)) ws->mode = ws->end_ull > ((nthreads + 1) * -ws->chunk_size_ull - (__LONG_LONG_MAX__ * 2ULL + 1)); } #endif } if (!up) ws->mode |= 2; } /* The *_start routines are called when first encountering a loop construct that is not bound directly to a parallel construct. The first thread that arrives will create the work-share construct; subsequent threads will see the construct exists and allocate work from it. START, END, INCR are the bounds of the loop; due to the restrictions of OpenMP, these values must be the same in every thread. This is not verified (nor is it entirely verifiable, since START is not necessarily retained intact in the work-share data structure). CHUNK_SIZE is the scheduling parameter; again this must be identical in all threads. Returns true if there's any work for this thread to perform. If so, *ISTART and *IEND are filled with the bounds of the iteration block allocated to this thread. Returns false if all work was assigned to other threads prior to this thread's arrival. */ static bool gomp_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); thr->ts.static_trip = 0; if (gomp_work_share_start (false)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_STATIC, chunk_size); gomp_work_share_init_done (); } return !gomp_iter_ull_static_next (istart, iend); } static bool gomp_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; if (gomp_work_share_start (false)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_DYNAMIC, chunk_size); gomp_work_share_init_done (); } #if defined HAVE_SYNC_BUILTINS && defined __LP64__ ret = gomp_iter_ull_dynamic_next (istart, iend); #else gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_dynamic_next_locked (istart, iend); gomp_mutex_unlock (&thr->ts.work_share->lock); #endif return ret; } static bool gomp_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; if (gomp_work_share_start (false)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_GUIDED, chunk_size); gomp_work_share_init_done (); } #if defined HAVE_SYNC_BUILTINS && defined __LP64__ ret = gomp_iter_ull_guided_next (istart, iend); #else gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_guided_next_locked (istart, iend); gomp_mutex_unlock (&thr->ts.work_share->lock); #endif return ret; } bool GOMP_loop_ull_runtime_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull *istart, gomp_ull *iend) { struct gomp_task_icv *icv = gomp_icv (false); switch (icv->run_sched_var) { case GFS_STATIC: return gomp_loop_ull_static_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_DYNAMIC: return gomp_loop_ull_dynamic_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_GUIDED: return gomp_loop_ull_guided_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_AUTO: /* For now map to schedule(static), later on we could play with feedback driven choice. */ return gomp_loop_ull_static_start (up, start, end, incr, 0, istart, iend); default: abort (); } } /* The *_ordered_*_start routines are similar. The only difference is that this work-share construct is initialized to expect an ORDERED section. */ static bool gomp_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); thr->ts.static_trip = 0; if (gomp_work_share_start (true)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_STATIC, chunk_size); gomp_ordered_static_init (); gomp_work_share_init_done (); } return !gomp_iter_ull_static_next (istart, iend); } static bool gomp_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; if (gomp_work_share_start (true)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_DYNAMIC, chunk_size); gomp_mutex_lock (&thr->ts.work_share->lock); gomp_work_share_init_done (); } else gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_dynamic_next_locked (istart, iend); if (ret) gomp_ordered_first (); gomp_mutex_unlock (&thr->ts.work_share->lock); return ret; } static bool gomp_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; if (gomp_work_share_start (true)) { gomp_loop_ull_init (thr->ts.work_share, up, start, end, incr, GFS_GUIDED, chunk_size); gomp_mutex_lock (&thr->ts.work_share->lock); gomp_work_share_init_done (); } else gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_guided_next_locked (istart, iend); if (ret) gomp_ordered_first (); gomp_mutex_unlock (&thr->ts.work_share->lock); return ret; } bool GOMP_loop_ull_ordered_runtime_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull *istart, gomp_ull *iend) { struct gomp_task_icv *icv = gomp_icv (false); switch (icv->run_sched_var) { case GFS_STATIC: return gomp_loop_ull_ordered_static_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_DYNAMIC: return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_GUIDED: return gomp_loop_ull_ordered_guided_start (up, start, end, incr, icv->run_sched_modifier, istart, iend); case GFS_AUTO: /* For now map to schedule(static), later on we could play with feedback driven choice. */ return gomp_loop_ull_ordered_static_start (up, start, end, incr, 0, istart, iend); default: abort (); } } /* The *_next routines are called when the thread completes processing of the iteration block currently assigned to it. If the work-share construct is bound directly to a parallel construct, then the iteration bounds may have been set up before the parallel. In which case, this may be the first iteration for the thread. Returns true if there is work remaining to be performed; *ISTART and *IEND are filled with a new iteration block. Returns false if all work has been assigned. */ static bool gomp_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) { return !gomp_iter_ull_static_next (istart, iend); } static bool gomp_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) { bool ret; #if defined HAVE_SYNC_BUILTINS && defined __LP64__ ret = gomp_iter_ull_dynamic_next (istart, iend); #else struct gomp_thread *thr = gomp_thread (); gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_dynamic_next_locked (istart, iend); gomp_mutex_unlock (&thr->ts.work_share->lock); #endif return ret; } static bool gomp_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) { bool ret; #if defined HAVE_SYNC_BUILTINS && defined __LP64__ ret = gomp_iter_ull_guided_next (istart, iend); #else struct gomp_thread *thr = gomp_thread (); gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_guided_next_locked (istart, iend); gomp_mutex_unlock (&thr->ts.work_share->lock); #endif return ret; } bool GOMP_loop_ull_runtime_next (gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); switch (thr->ts.work_share->sched) { case GFS_STATIC: case GFS_AUTO: return gomp_loop_ull_static_next (istart, iend); case GFS_DYNAMIC: return gomp_loop_ull_dynamic_next (istart, iend); case GFS_GUIDED: return gomp_loop_ull_guided_next (istart, iend); default: abort (); } } /* The *_ordered_*_next routines are called when the thread completes processing of the iteration block currently assigned to it. Returns true if there is work remaining to be performed; *ISTART and *IEND are filled with a new iteration block. Returns false if all work has been assigned. */ static bool gomp_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); int test; gomp_ordered_sync (); gomp_mutex_lock (&thr->ts.work_share->lock); test = gomp_iter_ull_static_next (istart, iend); if (test >= 0) gomp_ordered_static_next (); gomp_mutex_unlock (&thr->ts.work_share->lock); return test == 0; } static bool gomp_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; gomp_ordered_sync (); gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_dynamic_next_locked (istart, iend); if (ret) gomp_ordered_next (); else gomp_ordered_last (); gomp_mutex_unlock (&thr->ts.work_share->lock); return ret; } static bool gomp_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); bool ret; gomp_ordered_sync (); gomp_mutex_lock (&thr->ts.work_share->lock); ret = gomp_iter_ull_guided_next_locked (istart, iend); if (ret) gomp_ordered_next (); else gomp_ordered_last (); gomp_mutex_unlock (&thr->ts.work_share->lock); return ret; } bool GOMP_loop_ull_ordered_runtime_next (gomp_ull *istart, gomp_ull *iend) { struct gomp_thread *thr = gomp_thread (); switch (thr->ts.work_share->sched) { case GFS_STATIC: case GFS_AUTO: return gomp_loop_ull_ordered_static_next (istart, iend); case GFS_DYNAMIC: return gomp_loop_ull_ordered_dynamic_next (istart, iend); case GFS_GUIDED: return gomp_loop_ull_ordered_guided_next (istart, iend); default: abort (); } } /* We use static functions above so that we're sure that the "runtime" function can defer to the proper routine without interposition. We export the static function with a strong alias when possible, or with a wrapper function otherwise. */ #ifdef HAVE_ATTRIBUTE_ALIAS extern __typeof(gomp_loop_ull_static_start) GOMP_loop_ull_static_start __attribute__((alias ("gomp_loop_ull_static_start"))); extern __typeof(gomp_loop_ull_dynamic_start) GOMP_loop_ull_dynamic_start __attribute__((alias ("gomp_loop_ull_dynamic_start"))); extern __typeof(gomp_loop_ull_guided_start) GOMP_loop_ull_guided_start __attribute__((alias ("gomp_loop_ull_guided_start"))); extern __typeof(gomp_loop_ull_ordered_static_start) GOMP_loop_ull_ordered_static_start __attribute__((alias ("gomp_loop_ull_ordered_static_start"))); extern __typeof(gomp_loop_ull_ordered_dynamic_start) GOMP_loop_ull_ordered_dynamic_start __attribute__((alias ("gomp_loop_ull_ordered_dynamic_start"))); extern __typeof(gomp_loop_ull_ordered_guided_start) GOMP_loop_ull_ordered_guided_start __attribute__((alias ("gomp_loop_ull_ordered_guided_start"))); extern __typeof(gomp_loop_ull_static_next) GOMP_loop_ull_static_next __attribute__((alias ("gomp_loop_ull_static_next"))); extern __typeof(gomp_loop_ull_dynamic_next) GOMP_loop_ull_dynamic_next __attribute__((alias ("gomp_loop_ull_dynamic_next"))); extern __typeof(gomp_loop_ull_guided_next) GOMP_loop_ull_guided_next __attribute__((alias ("gomp_loop_ull_guided_next"))); extern __typeof(gomp_loop_ull_ordered_static_next) GOMP_loop_ull_ordered_static_next __attribute__((alias ("gomp_loop_ull_ordered_static_next"))); extern __typeof(gomp_loop_ull_ordered_dynamic_next) GOMP_loop_ull_ordered_dynamic_next __attribute__((alias ("gomp_loop_ull_ordered_dynamic_next"))); extern __typeof(gomp_loop_ull_ordered_guided_next) GOMP_loop_ull_ordered_guided_next __attribute__((alias ("gomp_loop_ull_ordered_guided_next"))); #else bool GOMP_loop_ull_static_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_static_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_dynamic_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_dynamic_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_guided_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_guided_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_ordered_static_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_static_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_ordered_dynamic_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_dynamic_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_ordered_guided_start (bool up, gomp_ull start, gomp_ull end, gomp_ull incr, gomp_ull chunk_size, gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_guided_start (up, start, end, incr, chunk_size, istart, iend); } bool GOMP_loop_ull_static_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_static_next (istart, iend); } bool GOMP_loop_ull_dynamic_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_dynamic_next (istart, iend); } bool GOMP_loop_ull_guided_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_guided_next (istart, iend); } bool GOMP_loop_ull_ordered_static_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_static_next (istart, iend); } bool GOMP_loop_ull_ordered_dynamic_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_dynamic_next (istart, iend); } bool GOMP_loop_ull_ordered_guided_next (gomp_ull *istart, gomp_ull *iend) { return gomp_loop_ull_ordered_guided_next (istart, iend); } #endif
gpl-2.0
jidongxiao/hyperpsonline
qemu-2.0.0/audio/audio.c
27
52627
/* * QEMU Audio subsystem * * Copyright (c) 2003-2005 Vassili Karpov (malc) * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "hw/hw.h" #include "audio.h" #include "monitor/monitor.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" #define AUDIO_CAP "audio" #include "audio_int.h" /* #define DEBUG_PLIVE */ /* #define DEBUG_LIVE */ /* #define DEBUG_OUT */ /* #define DEBUG_CAPTURE */ /* #define DEBUG_POLL */ #define SW_NAME(sw) (sw)->name ? (sw)->name : "unknown" /* Order of CONFIG_AUDIO_DRIVERS is import. The 1st one is the one used by default, that is the reason that we generate the list. */ static struct audio_driver *drvtab[] = { #ifdef CONFIG_SPICE &spice_audio_driver, #endif CONFIG_AUDIO_DRIVERS &no_audio_driver, &wav_audio_driver }; struct fixed_settings { int enabled; int nb_voices; int greedy; struct audsettings settings; }; static struct { struct fixed_settings fixed_out; struct fixed_settings fixed_in; union { int hertz; int64_t ticks; } period; int plive; int log_to_monitor; int try_poll_in; int try_poll_out; } conf = { .fixed_out = { /* DAC fixed settings */ .enabled = 1, .nb_voices = 1, .greedy = 1, .settings = { .freq = 44100, .nchannels = 2, .fmt = AUD_FMT_S16, .endianness = AUDIO_HOST_ENDIANNESS, } }, .fixed_in = { /* ADC fixed settings */ .enabled = 1, .nb_voices = 1, .greedy = 1, .settings = { .freq = 44100, .nchannels = 2, .fmt = AUD_FMT_S16, .endianness = AUDIO_HOST_ENDIANNESS, } }, .period = { .hertz = 100 }, .plive = 0, .log_to_monitor = 0, .try_poll_in = 1, .try_poll_out = 1, }; static AudioState glob_audio_state; const struct mixeng_volume nominal_volume = { .mute = 0, #ifdef FLOAT_MIXENG .r = 1.0, .l = 1.0, #else .r = 1ULL << 32, .l = 1ULL << 32, #endif }; #ifdef AUDIO_IS_FLAWLESS_AND_NO_CHECKS_ARE_REQURIED #error No its not #else static void audio_print_options (const char *prefix, struct audio_option *opt); int audio_bug (const char *funcname, int cond) { if (cond) { static int shown; AUD_log (NULL, "A bug was just triggered in %s\n", funcname); if (!shown) { struct audio_driver *d; shown = 1; AUD_log (NULL, "Save all your work and restart without audio\n"); AUD_log (NULL, "Please send bug report to av1474@comtv.ru\n"); AUD_log (NULL, "I am sorry\n"); d = glob_audio_state.drv; if (d) { audio_print_options (d->name, d->options); } } AUD_log (NULL, "Context:\n"); #if defined AUDIO_BREAKPOINT_ON_BUG # if defined HOST_I386 # if defined __GNUC__ __asm__ ("int3"); # elif defined _MSC_VER _asm _emit 0xcc; # else abort (); # endif # else abort (); # endif #endif } return cond; } #endif static inline int audio_bits_to_index (int bits) { switch (bits) { case 8: return 0; case 16: return 1; case 32: return 2; default: audio_bug ("bits_to_index", 1); AUD_log (NULL, "invalid bits %d\n", bits); return 0; } } void *audio_calloc (const char *funcname, int nmemb, size_t size) { int cond; size_t len; len = nmemb * size; cond = !nmemb || !size; cond |= nmemb < 0; cond |= len < size; if (audio_bug ("audio_calloc", cond)) { AUD_log (NULL, "%s passed invalid arguments to audio_calloc\n", funcname); AUD_log (NULL, "nmemb=%d size=%zu (len=%zu)\n", nmemb, size, len); return NULL; } return g_malloc0 (len); } static char *audio_alloc_prefix (const char *s) { const char qemu_prefix[] = "QEMU_"; size_t len, i; char *r, *u; if (!s) { return NULL; } len = strlen (s); r = g_malloc (len + sizeof (qemu_prefix)); u = r + sizeof (qemu_prefix) - 1; pstrcpy (r, len + sizeof (qemu_prefix), qemu_prefix); pstrcat (r, len + sizeof (qemu_prefix), s); for (i = 0; i < len; ++i) { u[i] = qemu_toupper(u[i]); } return r; } static const char *audio_audfmt_to_string (audfmt_e fmt) { switch (fmt) { case AUD_FMT_U8: return "U8"; case AUD_FMT_U16: return "U16"; case AUD_FMT_S8: return "S8"; case AUD_FMT_S16: return "S16"; case AUD_FMT_U32: return "U32"; case AUD_FMT_S32: return "S32"; } dolog ("Bogus audfmt %d returning S16\n", fmt); return "S16"; } static audfmt_e audio_string_to_audfmt (const char *s, audfmt_e defval, int *defaultp) { if (!strcasecmp (s, "u8")) { *defaultp = 0; return AUD_FMT_U8; } else if (!strcasecmp (s, "u16")) { *defaultp = 0; return AUD_FMT_U16; } else if (!strcasecmp (s, "u32")) { *defaultp = 0; return AUD_FMT_U32; } else if (!strcasecmp (s, "s8")) { *defaultp = 0; return AUD_FMT_S8; } else if (!strcasecmp (s, "s16")) { *defaultp = 0; return AUD_FMT_S16; } else if (!strcasecmp (s, "s32")) { *defaultp = 0; return AUD_FMT_S32; } else { dolog ("Bogus audio format `%s' using %s\n", s, audio_audfmt_to_string (defval)); *defaultp = 1; return defval; } } static audfmt_e audio_get_conf_fmt (const char *envname, audfmt_e defval, int *defaultp) { const char *var = getenv (envname); if (!var) { *defaultp = 1; return defval; } return audio_string_to_audfmt (var, defval, defaultp); } static int audio_get_conf_int (const char *key, int defval, int *defaultp) { int val; char *strval; strval = getenv (key); if (strval) { *defaultp = 0; val = atoi (strval); return val; } else { *defaultp = 1; return defval; } } static const char *audio_get_conf_str (const char *key, const char *defval, int *defaultp) { const char *val = getenv (key); if (!val) { *defaultp = 1; return defval; } else { *defaultp = 0; return val; } } void AUD_vlog (const char *cap, const char *fmt, va_list ap) { if (conf.log_to_monitor) { if (cap) { monitor_printf(default_mon, "%s: ", cap); } monitor_vprintf(default_mon, fmt, ap); } else { if (cap) { fprintf (stderr, "%s: ", cap); } vfprintf (stderr, fmt, ap); } } void AUD_log (const char *cap, const char *fmt, ...) { va_list ap; va_start (ap, fmt); AUD_vlog (cap, fmt, ap); va_end (ap); } static void audio_print_options (const char *prefix, struct audio_option *opt) { char *uprefix; if (!prefix) { dolog ("No prefix specified\n"); return; } if (!opt) { dolog ("No options\n"); return; } uprefix = audio_alloc_prefix (prefix); for (; opt->name; opt++) { const char *state = "default"; printf (" %s_%s: ", uprefix, opt->name); if (opt->overriddenp && *opt->overriddenp) { state = "current"; } switch (opt->tag) { case AUD_OPT_BOOL: { int *intp = opt->valp; printf ("boolean, %s = %d\n", state, *intp ? 1 : 0); } break; case AUD_OPT_INT: { int *intp = opt->valp; printf ("integer, %s = %d\n", state, *intp); } break; case AUD_OPT_FMT: { audfmt_e *fmtp = opt->valp; printf ( "format, %s = %s, (one of: U8 S8 U16 S16 U32 S32)\n", state, audio_audfmt_to_string (*fmtp) ); } break; case AUD_OPT_STR: { const char **strp = opt->valp; printf ("string, %s = %s\n", state, *strp ? *strp : "(not set)"); } break; default: printf ("???\n"); dolog ("Bad value tag for option %s_%s %d\n", uprefix, opt->name, opt->tag); break; } printf (" %s\n", opt->descr); } g_free (uprefix); } static void audio_process_options (const char *prefix, struct audio_option *opt) { char *optname; const char qemu_prefix[] = "QEMU_"; size_t preflen, optlen; if (audio_bug (AUDIO_FUNC, !prefix)) { dolog ("prefix = NULL\n"); return; } if (audio_bug (AUDIO_FUNC, !opt)) { dolog ("opt = NULL\n"); return; } preflen = strlen (prefix); for (; opt->name; opt++) { size_t len, i; int def; if (!opt->valp) { dolog ("Option value pointer for `%s' is not set\n", opt->name); continue; } len = strlen (opt->name); /* len of opt->name + len of prefix + size of qemu_prefix * (includes trailing zero) + zero + underscore (on behalf of * sizeof) */ optlen = len + preflen + sizeof (qemu_prefix) + 1; optname = g_malloc (optlen); pstrcpy (optname, optlen, qemu_prefix); /* copy while upper-casing, including trailing zero */ for (i = 0; i <= preflen; ++i) { optname[i + sizeof (qemu_prefix) - 1] = qemu_toupper(prefix[i]); } pstrcat (optname, optlen, "_"); pstrcat (optname, optlen, opt->name); def = 1; switch (opt->tag) { case AUD_OPT_BOOL: case AUD_OPT_INT: { int *intp = opt->valp; *intp = audio_get_conf_int (optname, *intp, &def); } break; case AUD_OPT_FMT: { audfmt_e *fmtp = opt->valp; *fmtp = audio_get_conf_fmt (optname, *fmtp, &def); } break; case AUD_OPT_STR: { const char **strp = opt->valp; *strp = audio_get_conf_str (optname, *strp, &def); } break; default: dolog ("Bad value tag for option `%s' - %d\n", optname, opt->tag); break; } if (!opt->overriddenp) { opt->overriddenp = &opt->overridden; } *opt->overriddenp = !def; g_free (optname); } } static void audio_print_settings (struct audsettings *as) { dolog ("frequency=%d nchannels=%d fmt=", as->freq, as->nchannels); switch (as->fmt) { case AUD_FMT_S8: AUD_log (NULL, "S8"); break; case AUD_FMT_U8: AUD_log (NULL, "U8"); break; case AUD_FMT_S16: AUD_log (NULL, "S16"); break; case AUD_FMT_U16: AUD_log (NULL, "U16"); break; case AUD_FMT_S32: AUD_log (NULL, "S32"); break; case AUD_FMT_U32: AUD_log (NULL, "U32"); break; default: AUD_log (NULL, "invalid(%d)", as->fmt); break; } AUD_log (NULL, " endianness="); switch (as->endianness) { case 0: AUD_log (NULL, "little"); break; case 1: AUD_log (NULL, "big"); break; default: AUD_log (NULL, "invalid"); break; } AUD_log (NULL, "\n"); } static int audio_validate_settings (struct audsettings *as) { int invalid; invalid = as->nchannels != 1 && as->nchannels != 2; invalid |= as->endianness != 0 && as->endianness != 1; switch (as->fmt) { case AUD_FMT_S8: case AUD_FMT_U8: case AUD_FMT_S16: case AUD_FMT_U16: case AUD_FMT_S32: case AUD_FMT_U32: break; default: invalid = 1; break; } invalid |= as->freq <= 0; return invalid ? -1 : 0; } static int audio_pcm_info_eq (struct audio_pcm_info *info, struct audsettings *as) { int bits = 8, sign = 0; switch (as->fmt) { case AUD_FMT_S8: sign = 1; /* fall through */ case AUD_FMT_U8: break; case AUD_FMT_S16: sign = 1; /* fall through */ case AUD_FMT_U16: bits = 16; break; case AUD_FMT_S32: sign = 1; /* fall through */ case AUD_FMT_U32: bits = 32; break; } return info->freq == as->freq && info->nchannels == as->nchannels && info->sign == sign && info->bits == bits && info->swap_endianness == (as->endianness != AUDIO_HOST_ENDIANNESS); } void audio_pcm_init_info (struct audio_pcm_info *info, struct audsettings *as) { int bits = 8, sign = 0, shift = 0; switch (as->fmt) { case AUD_FMT_S8: sign = 1; case AUD_FMT_U8: break; case AUD_FMT_S16: sign = 1; case AUD_FMT_U16: bits = 16; shift = 1; break; case AUD_FMT_S32: sign = 1; case AUD_FMT_U32: bits = 32; shift = 2; break; } info->freq = as->freq; info->bits = bits; info->sign = sign; info->nchannels = as->nchannels; info->shift = (as->nchannels == 2) + shift; info->align = (1 << info->shift) - 1; info->bytes_per_second = info->freq << info->shift; info->swap_endianness = (as->endianness != AUDIO_HOST_ENDIANNESS); } void audio_pcm_info_clear_buf (struct audio_pcm_info *info, void *buf, int len) { if (!len) { return; } if (info->sign) { memset (buf, 0x00, len << info->shift); } else { switch (info->bits) { case 8: memset (buf, 0x80, len << info->shift); break; case 16: { int i; uint16_t *p = buf; int shift = info->nchannels - 1; short s = INT16_MAX; if (info->swap_endianness) { s = bswap16 (s); } for (i = 0; i < len << shift; i++) { p[i] = s; } } break; case 32: { int i; uint32_t *p = buf; int shift = info->nchannels - 1; int32_t s = INT32_MAX; if (info->swap_endianness) { s = bswap32 (s); } for (i = 0; i < len << shift; i++) { p[i] = s; } } break; default: AUD_log (NULL, "audio_pcm_info_clear_buf: invalid bits %d\n", info->bits); break; } } } /* * Capture */ static void noop_conv (struct st_sample *dst, const void *src, int samples) { (void) src; (void) dst; (void) samples; } static CaptureVoiceOut *audio_pcm_capture_find_specific ( struct audsettings *as ) { CaptureVoiceOut *cap; AudioState *s = &glob_audio_state; for (cap = s->cap_head.lh_first; cap; cap = cap->entries.le_next) { if (audio_pcm_info_eq (&cap->hw.info, as)) { return cap; } } return NULL; } static void audio_notify_capture (CaptureVoiceOut *cap, audcnotification_e cmd) { struct capture_callback *cb; #ifdef DEBUG_CAPTURE dolog ("notification %d sent\n", cmd); #endif for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { cb->ops.notify (cb->opaque, cmd); } } static void audio_capture_maybe_changed (CaptureVoiceOut *cap, int enabled) { if (cap->hw.enabled != enabled) { audcnotification_e cmd; cap->hw.enabled = enabled; cmd = enabled ? AUD_CNOTIFY_ENABLE : AUD_CNOTIFY_DISABLE; audio_notify_capture (cap, cmd); } } static void audio_recalc_and_notify_capture (CaptureVoiceOut *cap) { HWVoiceOut *hw = &cap->hw; SWVoiceOut *sw; int enabled = 0; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (sw->active) { enabled = 1; break; } } audio_capture_maybe_changed (cap, enabled); } static void audio_detach_capture (HWVoiceOut *hw) { SWVoiceCap *sc = hw->cap_head.lh_first; while (sc) { SWVoiceCap *sc1 = sc->entries.le_next; SWVoiceOut *sw = &sc->sw; CaptureVoiceOut *cap = sc->cap; int was_active = sw->active; if (sw->rate) { st_rate_stop (sw->rate); sw->rate = NULL; } QLIST_REMOVE (sw, entries); QLIST_REMOVE (sc, entries); g_free (sc); if (was_active) { /* We have removed soft voice from the capture: this might have changed the overall status of the capture since this might have been the only active voice */ audio_recalc_and_notify_capture (cap); } sc = sc1; } } static int audio_attach_capture (HWVoiceOut *hw) { AudioState *s = &glob_audio_state; CaptureVoiceOut *cap; audio_detach_capture (hw); for (cap = s->cap_head.lh_first; cap; cap = cap->entries.le_next) { SWVoiceCap *sc; SWVoiceOut *sw; HWVoiceOut *hw_cap = &cap->hw; sc = audio_calloc (AUDIO_FUNC, 1, sizeof (*sc)); if (!sc) { dolog ("Could not allocate soft capture voice (%zu bytes)\n", sizeof (*sc)); return -1; } sc->cap = cap; sw = &sc->sw; sw->hw = hw_cap; sw->info = hw->info; sw->empty = 1; sw->active = hw->enabled; sw->conv = noop_conv; sw->ratio = ((int64_t) hw_cap->info.freq << 32) / sw->info.freq; sw->vol = nominal_volume; sw->rate = st_rate_start (sw->info.freq, hw_cap->info.freq); if (!sw->rate) { dolog ("Could not start rate conversion for `%s'\n", SW_NAME (sw)); g_free (sw); return -1; } QLIST_INSERT_HEAD (&hw_cap->sw_head, sw, entries); QLIST_INSERT_HEAD (&hw->cap_head, sc, entries); #ifdef DEBUG_CAPTURE sw->name = g_strdup_printf ("for %p %d,%d,%d", hw, sw->info.freq, sw->info.bits, sw->info.nchannels); dolog ("Added %s active = %d\n", sw->name, sw->active); #endif if (sw->active) { audio_capture_maybe_changed (cap, 1); } } return 0; } /* * Hard voice (capture) */ static int audio_pcm_hw_find_min_in (HWVoiceIn *hw) { SWVoiceIn *sw; int m = hw->total_samples_captured; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (sw->active) { m = audio_MIN (m, sw->total_hw_samples_acquired); } } return m; } int audio_pcm_hw_get_live_in (HWVoiceIn *hw) { int live = hw->total_samples_captured - audio_pcm_hw_find_min_in (hw); if (audio_bug (AUDIO_FUNC, live < 0 || live > hw->samples)) { dolog ("live=%d hw->samples=%d\n", live, hw->samples); return 0; } return live; } int audio_pcm_hw_clip_out (HWVoiceOut *hw, void *pcm_buf, int live, int pending) { int left = hw->samples - pending; int len = audio_MIN (left, live); int clipped = 0; while (len) { struct st_sample *src = hw->mix_buf + hw->rpos; uint8_t *dst = advance (pcm_buf, hw->rpos << hw->info.shift); int samples_till_end_of_buf = hw->samples - hw->rpos; int samples_to_clip = audio_MIN (len, samples_till_end_of_buf); hw->clip (dst, src, samples_to_clip); hw->rpos = (hw->rpos + samples_to_clip) % hw->samples; len -= samples_to_clip; clipped += samples_to_clip; } return clipped; } /* * Soft voice (capture) */ static int audio_pcm_sw_get_rpos_in (SWVoiceIn *sw) { HWVoiceIn *hw = sw->hw; int live = hw->total_samples_captured - sw->total_hw_samples_acquired; int rpos; if (audio_bug (AUDIO_FUNC, live < 0 || live > hw->samples)) { dolog ("live=%d hw->samples=%d\n", live, hw->samples); return 0; } rpos = hw->wpos - live; if (rpos >= 0) { return rpos; } else { return hw->samples + rpos; } } int audio_pcm_sw_read (SWVoiceIn *sw, void *buf, int size) { HWVoiceIn *hw = sw->hw; int samples, live, ret = 0, swlim, isamp, osamp, rpos, total = 0; struct st_sample *src, *dst = sw->buf; rpos = audio_pcm_sw_get_rpos_in (sw) % hw->samples; live = hw->total_samples_captured - sw->total_hw_samples_acquired; if (audio_bug (AUDIO_FUNC, live < 0 || live > hw->samples)) { dolog ("live_in=%d hw->samples=%d\n", live, hw->samples); return 0; } samples = size >> sw->info.shift; if (!live) { return 0; } swlim = (live * sw->ratio) >> 32; swlim = audio_MIN (swlim, samples); while (swlim) { src = hw->conv_buf + rpos; isamp = hw->wpos - rpos; /* XXX: <= ? */ if (isamp <= 0) { isamp = hw->samples - rpos; } if (!isamp) { break; } osamp = swlim; if (audio_bug (AUDIO_FUNC, osamp < 0)) { dolog ("osamp=%d\n", osamp); return 0; } st_rate_flow (sw->rate, src, dst, &isamp, &osamp); swlim -= osamp; rpos = (rpos + isamp) % hw->samples; dst += osamp; ret += osamp; total += isamp; } if (!(hw->ctl_caps & VOICE_VOLUME_CAP)) { mixeng_volume (sw->buf, ret, &sw->vol); } sw->clip (buf, sw->buf, ret); sw->total_hw_samples_acquired += total; return ret << sw->info.shift; } /* * Hard voice (playback) */ static int audio_pcm_hw_find_min_out (HWVoiceOut *hw, int *nb_livep) { SWVoiceOut *sw; int m = INT_MAX; int nb_live = 0; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (sw->active || !sw->empty) { m = audio_MIN (m, sw->total_hw_samples_mixed); nb_live += 1; } } *nb_livep = nb_live; return m; } static int audio_pcm_hw_get_live_out (HWVoiceOut *hw, int *nb_live) { int smin; int nb_live1; smin = audio_pcm_hw_find_min_out (hw, &nb_live1); if (nb_live) { *nb_live = nb_live1; } if (nb_live1) { int live = smin; if (audio_bug (AUDIO_FUNC, live < 0 || live > hw->samples)) { dolog ("live=%d hw->samples=%d\n", live, hw->samples); return 0; } return live; } return 0; } /* * Soft voice (playback) */ int audio_pcm_sw_write (SWVoiceOut *sw, void *buf, int size) { int hwsamples, samples, isamp, osamp, wpos, live, dead, left, swlim, blck; int ret = 0, pos = 0, total = 0; if (!sw) { return size; } hwsamples = sw->hw->samples; live = sw->total_hw_samples_mixed; if (audio_bug (AUDIO_FUNC, live < 0 || live > hwsamples)){ dolog ("live=%d hw->samples=%d\n", live, hwsamples); return 0; } if (live == hwsamples) { #ifdef DEBUG_OUT dolog ("%s is full %d\n", sw->name, live); #endif return 0; } wpos = (sw->hw->rpos + live) % hwsamples; samples = size >> sw->info.shift; dead = hwsamples - live; swlim = ((int64_t) dead << 32) / sw->ratio; swlim = audio_MIN (swlim, samples); if (swlim) { sw->conv (sw->buf, buf, swlim); if (!(sw->hw->ctl_caps & VOICE_VOLUME_CAP)) { mixeng_volume (sw->buf, swlim, &sw->vol); } } while (swlim) { dead = hwsamples - live; left = hwsamples - wpos; blck = audio_MIN (dead, left); if (!blck) { break; } isamp = swlim; osamp = blck; st_rate_flow_mix ( sw->rate, sw->buf + pos, sw->hw->mix_buf + wpos, &isamp, &osamp ); ret += isamp; swlim -= isamp; pos += isamp; live += osamp; wpos = (wpos + osamp) % hwsamples; total += osamp; } sw->total_hw_samples_mixed += total; sw->empty = sw->total_hw_samples_mixed == 0; #ifdef DEBUG_OUT dolog ( "%s: write size %d ret %d total sw %d\n", SW_NAME (sw), size >> sw->info.shift, ret, sw->total_hw_samples_mixed ); #endif return ret << sw->info.shift; } #ifdef DEBUG_AUDIO static void audio_pcm_print_info (const char *cap, struct audio_pcm_info *info) { dolog ("%s: bits %d, sign %d, freq %d, nchan %d\n", cap, info->bits, info->sign, info->freq, info->nchannels); } #endif #define DAC #include "audio_template.h" #undef DAC #include "audio_template.h" /* * Timer */ static int audio_is_timer_needed (void) { HWVoiceIn *hwi = NULL; HWVoiceOut *hwo = NULL; while ((hwo = audio_pcm_hw_find_any_enabled_out (hwo))) { if (!hwo->poll_mode) return 1; } while ((hwi = audio_pcm_hw_find_any_enabled_in (hwi))) { if (!hwi->poll_mode) return 1; } return 0; } static void audio_reset_timer (AudioState *s) { if (audio_is_timer_needed ()) { timer_mod (s->ts, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + conf.period.ticks); } else { timer_del (s->ts); } } static void audio_timer (void *opaque) { audio_run ("timer"); audio_reset_timer (opaque); } /* * Public API */ int AUD_write (SWVoiceOut *sw, void *buf, int size) { int bytes; if (!sw) { /* XXX: Consider options */ return size; } if (!sw->hw->enabled) { dolog ("Writing to disabled voice %s\n", SW_NAME (sw)); return 0; } bytes = sw->hw->pcm_ops->write (sw, buf, size); return bytes; } int AUD_read (SWVoiceIn *sw, void *buf, int size) { int bytes; if (!sw) { /* XXX: Consider options */ return size; } if (!sw->hw->enabled) { dolog ("Reading from disabled voice %s\n", SW_NAME (sw)); return 0; } bytes = sw->hw->pcm_ops->read (sw, buf, size); return bytes; } int AUD_get_buffer_size_out (SWVoiceOut *sw) { return sw->hw->samples << sw->hw->info.shift; } void AUD_set_active_out (SWVoiceOut *sw, int on) { HWVoiceOut *hw; if (!sw) { return; } hw = sw->hw; if (sw->active != on) { AudioState *s = &glob_audio_state; SWVoiceOut *temp_sw; SWVoiceCap *sc; if (on) { hw->pending_disable = 0; if (!hw->enabled) { hw->enabled = 1; if (s->vm_running) { hw->pcm_ops->ctl_out (hw, VOICE_ENABLE, conf.try_poll_out); audio_reset_timer (s); } } } else { if (hw->enabled) { int nb_active = 0; for (temp_sw = hw->sw_head.lh_first; temp_sw; temp_sw = temp_sw->entries.le_next) { nb_active += temp_sw->active != 0; } hw->pending_disable = nb_active == 1; } } for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) { sc->sw.active = hw->enabled; if (hw->enabled) { audio_capture_maybe_changed (sc->cap, 1); } } sw->active = on; } } void AUD_set_active_in (SWVoiceIn *sw, int on) { HWVoiceIn *hw; if (!sw) { return; } hw = sw->hw; if (sw->active != on) { AudioState *s = &glob_audio_state; SWVoiceIn *temp_sw; if (on) { if (!hw->enabled) { hw->enabled = 1; if (s->vm_running) { hw->pcm_ops->ctl_in (hw, VOICE_ENABLE, conf.try_poll_in); audio_reset_timer (s); } } sw->total_hw_samples_acquired = hw->total_samples_captured; } else { if (hw->enabled) { int nb_active = 0; for (temp_sw = hw->sw_head.lh_first; temp_sw; temp_sw = temp_sw->entries.le_next) { nb_active += temp_sw->active != 0; } if (nb_active == 1) { hw->enabled = 0; hw->pcm_ops->ctl_in (hw, VOICE_DISABLE); } } } sw->active = on; } } static int audio_get_avail (SWVoiceIn *sw) { int live; if (!sw) { return 0; } live = sw->hw->total_samples_captured - sw->total_hw_samples_acquired; if (audio_bug (AUDIO_FUNC, live < 0 || live > sw->hw->samples)) { dolog ("live=%d sw->hw->samples=%d\n", live, sw->hw->samples); return 0; } ldebug ( "%s: get_avail live %d ret %" PRId64 "\n", SW_NAME (sw), live, (((int64_t) live << 32) / sw->ratio) << sw->info.shift ); return (((int64_t) live << 32) / sw->ratio) << sw->info.shift; } static int audio_get_free (SWVoiceOut *sw) { int live, dead; if (!sw) { return 0; } live = sw->total_hw_samples_mixed; if (audio_bug (AUDIO_FUNC, live < 0 || live > sw->hw->samples)) { dolog ("live=%d sw->hw->samples=%d\n", live, sw->hw->samples); return 0; } dead = sw->hw->samples - live; #ifdef DEBUG_OUT dolog ("%s: get_free live %d dead %d ret %" PRId64 "\n", SW_NAME (sw), live, dead, (((int64_t) dead << 32) / sw->ratio) << sw->info.shift); #endif return (((int64_t) dead << 32) / sw->ratio) << sw->info.shift; } static void audio_capture_mix_and_clear (HWVoiceOut *hw, int rpos, int samples) { int n; if (hw->enabled) { SWVoiceCap *sc; for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) { SWVoiceOut *sw = &sc->sw; int rpos2 = rpos; n = samples; while (n) { int till_end_of_hw = hw->samples - rpos2; int to_write = audio_MIN (till_end_of_hw, n); int bytes = to_write << hw->info.shift; int written; sw->buf = hw->mix_buf + rpos2; written = audio_pcm_sw_write (sw, NULL, bytes); if (written - bytes) { dolog ("Could not mix %d bytes into a capture " "buffer, mixed %d\n", bytes, written); break; } n -= to_write; rpos2 = (rpos2 + to_write) % hw->samples; } } } n = audio_MIN (samples, hw->samples - rpos); mixeng_clear (hw->mix_buf + rpos, n); mixeng_clear (hw->mix_buf, samples - n); } static void audio_run_out (AudioState *s) { HWVoiceOut *hw = NULL; SWVoiceOut *sw; while ((hw = audio_pcm_hw_find_any_enabled_out (hw))) { int played; int live, free, nb_live, cleanup_required, prev_rpos; live = audio_pcm_hw_get_live_out (hw, &nb_live); if (!nb_live) { live = 0; } if (audio_bug (AUDIO_FUNC, live < 0 || live > hw->samples)) { dolog ("live=%d hw->samples=%d\n", live, hw->samples); continue; } if (hw->pending_disable && !nb_live) { SWVoiceCap *sc; #ifdef DEBUG_OUT dolog ("Disabling voice\n"); #endif hw->enabled = 0; hw->pending_disable = 0; hw->pcm_ops->ctl_out (hw, VOICE_DISABLE); for (sc = hw->cap_head.lh_first; sc; sc = sc->entries.le_next) { sc->sw.active = 0; audio_recalc_and_notify_capture (sc->cap); } continue; } if (!live) { for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (sw->active) { free = audio_get_free (sw); if (free > 0) { sw->callback.fn (sw->callback.opaque, free); } } } continue; } prev_rpos = hw->rpos; played = hw->pcm_ops->run_out (hw, live); if (audio_bug (AUDIO_FUNC, hw->rpos >= hw->samples)) { dolog ("hw->rpos=%d hw->samples=%d played=%d\n", hw->rpos, hw->samples, played); hw->rpos = 0; } #ifdef DEBUG_OUT dolog ("played=%d\n", played); #endif if (played) { hw->ts_helper += played; audio_capture_mix_and_clear (hw, prev_rpos, played); } cleanup_required = 0; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (!sw->active && sw->empty) { continue; } if (audio_bug (AUDIO_FUNC, played > sw->total_hw_samples_mixed)) { dolog ("played=%d sw->total_hw_samples_mixed=%d\n", played, sw->total_hw_samples_mixed); played = sw->total_hw_samples_mixed; } sw->total_hw_samples_mixed -= played; if (!sw->total_hw_samples_mixed) { sw->empty = 1; cleanup_required |= !sw->active && !sw->callback.fn; } if (sw->active) { free = audio_get_free (sw); if (free > 0) { sw->callback.fn (sw->callback.opaque, free); } } } if (cleanup_required) { SWVoiceOut *sw1; sw = hw->sw_head.lh_first; while (sw) { sw1 = sw->entries.le_next; if (!sw->active && !sw->callback.fn) { #ifdef DEBUG_PLIVE dolog ("Finishing with old voice\n"); #endif audio_close_out (sw); } sw = sw1; } } } } static void audio_run_in (AudioState *s) { HWVoiceIn *hw = NULL; while ((hw = audio_pcm_hw_find_any_enabled_in (hw))) { SWVoiceIn *sw; int captured, min; captured = hw->pcm_ops->run_in (hw); min = audio_pcm_hw_find_min_in (hw); hw->total_samples_captured += captured - min; hw->ts_helper += captured; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { sw->total_hw_samples_acquired -= min; if (sw->active) { int avail; avail = audio_get_avail (sw); if (avail > 0) { sw->callback.fn (sw->callback.opaque, avail); } } } } } static void audio_run_capture (AudioState *s) { CaptureVoiceOut *cap; for (cap = s->cap_head.lh_first; cap; cap = cap->entries.le_next) { int live, rpos, captured; HWVoiceOut *hw = &cap->hw; SWVoiceOut *sw; captured = live = audio_pcm_hw_get_live_out (hw, NULL); rpos = hw->rpos; while (live) { int left = hw->samples - rpos; int to_capture = audio_MIN (live, left); struct st_sample *src; struct capture_callback *cb; src = hw->mix_buf + rpos; hw->clip (cap->buf, src, to_capture); mixeng_clear (src, to_capture); for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { cb->ops.capture (cb->opaque, cap->buf, to_capture << hw->info.shift); } rpos = (rpos + to_capture) % hw->samples; live -= to_capture; } hw->rpos = rpos; for (sw = hw->sw_head.lh_first; sw; sw = sw->entries.le_next) { if (!sw->active && sw->empty) { continue; } if (audio_bug (AUDIO_FUNC, captured > sw->total_hw_samples_mixed)) { dolog ("captured=%d sw->total_hw_samples_mixed=%d\n", captured, sw->total_hw_samples_mixed); captured = sw->total_hw_samples_mixed; } sw->total_hw_samples_mixed -= captured; sw->empty = sw->total_hw_samples_mixed == 0; } } } void audio_run (const char *msg) { AudioState *s = &glob_audio_state; audio_run_out (s); audio_run_in (s); audio_run_capture (s); #ifdef DEBUG_POLL { static double prevtime; double currtime; struct timeval tv; if (gettimeofday (&tv, NULL)) { perror ("audio_run: gettimeofday"); return; } currtime = tv.tv_sec + tv.tv_usec * 1e-6; dolog ("Elapsed since last %s: %f\n", msg, currtime - prevtime); prevtime = currtime; } #endif } static struct audio_option audio_options[] = { /* DAC */ { .name = "DAC_FIXED_SETTINGS", .tag = AUD_OPT_BOOL, .valp = &conf.fixed_out.enabled, .descr = "Use fixed settings for host DAC" }, { .name = "DAC_FIXED_FREQ", .tag = AUD_OPT_INT, .valp = &conf.fixed_out.settings.freq, .descr = "Frequency for fixed host DAC" }, { .name = "DAC_FIXED_FMT", .tag = AUD_OPT_FMT, .valp = &conf.fixed_out.settings.fmt, .descr = "Format for fixed host DAC" }, { .name = "DAC_FIXED_CHANNELS", .tag = AUD_OPT_INT, .valp = &conf.fixed_out.settings.nchannels, .descr = "Number of channels for fixed DAC (1 - mono, 2 - stereo)" }, { .name = "DAC_VOICES", .tag = AUD_OPT_INT, .valp = &conf.fixed_out.nb_voices, .descr = "Number of voices for DAC" }, { .name = "DAC_TRY_POLL", .tag = AUD_OPT_BOOL, .valp = &conf.try_poll_out, .descr = "Attempt using poll mode for DAC" }, /* ADC */ { .name = "ADC_FIXED_SETTINGS", .tag = AUD_OPT_BOOL, .valp = &conf.fixed_in.enabled, .descr = "Use fixed settings for host ADC" }, { .name = "ADC_FIXED_FREQ", .tag = AUD_OPT_INT, .valp = &conf.fixed_in.settings.freq, .descr = "Frequency for fixed host ADC" }, { .name = "ADC_FIXED_FMT", .tag = AUD_OPT_FMT, .valp = &conf.fixed_in.settings.fmt, .descr = "Format for fixed host ADC" }, { .name = "ADC_FIXED_CHANNELS", .tag = AUD_OPT_INT, .valp = &conf.fixed_in.settings.nchannels, .descr = "Number of channels for fixed ADC (1 - mono, 2 - stereo)" }, { .name = "ADC_VOICES", .tag = AUD_OPT_INT, .valp = &conf.fixed_in.nb_voices, .descr = "Number of voices for ADC" }, { .name = "ADC_TRY_POLL", .tag = AUD_OPT_BOOL, .valp = &conf.try_poll_in, .descr = "Attempt using poll mode for ADC" }, /* Misc */ { .name = "TIMER_PERIOD", .tag = AUD_OPT_INT, .valp = &conf.period.hertz, .descr = "Timer period in HZ (0 - use lowest possible)" }, { .name = "PLIVE", .tag = AUD_OPT_BOOL, .valp = &conf.plive, .descr = "(undocumented)" }, { .name = "LOG_TO_MONITOR", .tag = AUD_OPT_BOOL, .valp = &conf.log_to_monitor, .descr = "Print logging messages to monitor instead of stderr" }, { /* End of list */ } }; static void audio_pp_nb_voices (const char *typ, int nb) { switch (nb) { case 0: printf ("Does not support %s\n", typ); break; case 1: printf ("One %s voice\n", typ); break; case INT_MAX: printf ("Theoretically supports many %s voices\n", typ); break; default: printf ("Theoretically supports up to %d %s voices\n", nb, typ); break; } } void AUD_help (void) { size_t i; audio_process_options ("AUDIO", audio_options); for (i = 0; i < ARRAY_SIZE (drvtab); i++) { struct audio_driver *d = drvtab[i]; if (d->options) { audio_process_options (d->name, d->options); } } printf ("Audio options:\n"); audio_print_options ("AUDIO", audio_options); printf ("\n"); printf ("Available drivers:\n"); for (i = 0; i < ARRAY_SIZE (drvtab); i++) { struct audio_driver *d = drvtab[i]; printf ("Name: %s\n", d->name); printf ("Description: %s\n", d->descr); audio_pp_nb_voices ("playback", d->max_voices_out); audio_pp_nb_voices ("capture", d->max_voices_in); if (d->options) { printf ("Options:\n"); audio_print_options (d->name, d->options); } else { printf ("No options\n"); } printf ("\n"); } printf ( "Options are settable through environment variables.\n" "Example:\n" #ifdef _WIN32 " set QEMU_AUDIO_DRV=wav\n" " set QEMU_WAV_PATH=c:\\tune.wav\n" #else " export QEMU_AUDIO_DRV=wav\n" " export QEMU_WAV_PATH=$HOME/tune.wav\n" "(for csh replace export with setenv in the above)\n" #endif " qemu ...\n\n" ); } static int audio_driver_init (AudioState *s, struct audio_driver *drv) { if (drv->options) { audio_process_options (drv->name, drv->options); } s->drv_opaque = drv->init (); if (s->drv_opaque) { audio_init_nb_voices_out (drv); audio_init_nb_voices_in (drv); s->drv = drv; return 0; } else { dolog ("Could not init `%s' audio driver\n", drv->name); return -1; } } static void audio_vm_change_state_handler (void *opaque, int running, RunState state) { AudioState *s = opaque; HWVoiceOut *hwo = NULL; HWVoiceIn *hwi = NULL; int op = running ? VOICE_ENABLE : VOICE_DISABLE; s->vm_running = running; while ((hwo = audio_pcm_hw_find_any_enabled_out (hwo))) { hwo->pcm_ops->ctl_out (hwo, op, conf.try_poll_out); } while ((hwi = audio_pcm_hw_find_any_enabled_in (hwi))) { hwi->pcm_ops->ctl_in (hwi, op, conf.try_poll_in); } audio_reset_timer (s); } static void audio_atexit (void) { AudioState *s = &glob_audio_state; HWVoiceOut *hwo = NULL; HWVoiceIn *hwi = NULL; while ((hwo = audio_pcm_hw_find_any_out (hwo))) { SWVoiceCap *sc; if (hwo->enabled) { hwo->pcm_ops->ctl_out (hwo, VOICE_DISABLE); } hwo->pcm_ops->fini_out (hwo); for (sc = hwo->cap_head.lh_first; sc; sc = sc->entries.le_next) { CaptureVoiceOut *cap = sc->cap; struct capture_callback *cb; for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { cb->ops.destroy (cb->opaque); } } } while ((hwi = audio_pcm_hw_find_any_in (hwi))) { if (hwi->enabled) { hwi->pcm_ops->ctl_in (hwi, VOICE_DISABLE); } hwi->pcm_ops->fini_in (hwi); } if (s->drv) { s->drv->fini (s->drv_opaque); } } static const VMStateDescription vmstate_audio = { .name = "audio", .version_id = 1, .minimum_version_id = 1, .minimum_version_id_old = 1, .fields = (VMStateField []) { VMSTATE_END_OF_LIST() } }; static void audio_init (void) { size_t i; int done = 0; const char *drvname; VMChangeStateEntry *e; AudioState *s = &glob_audio_state; if (s->drv) { return; } QLIST_INIT (&s->hw_head_out); QLIST_INIT (&s->hw_head_in); QLIST_INIT (&s->cap_head); atexit (audio_atexit); s->ts = timer_new_ns(QEMU_CLOCK_VIRTUAL, audio_timer, s); if (!s->ts) { hw_error("Could not create audio timer\n"); } audio_process_options ("AUDIO", audio_options); s->nb_hw_voices_out = conf.fixed_out.nb_voices; s->nb_hw_voices_in = conf.fixed_in.nb_voices; if (s->nb_hw_voices_out <= 0) { dolog ("Bogus number of playback voices %d, setting to 1\n", s->nb_hw_voices_out); s->nb_hw_voices_out = 1; } if (s->nb_hw_voices_in <= 0) { dolog ("Bogus number of capture voices %d, setting to 0\n", s->nb_hw_voices_in); s->nb_hw_voices_in = 0; } { int def; drvname = audio_get_conf_str ("QEMU_AUDIO_DRV", NULL, &def); } if (drvname) { int found = 0; for (i = 0; i < ARRAY_SIZE (drvtab); i++) { if (!strcmp (drvname, drvtab[i]->name)) { done = !audio_driver_init (s, drvtab[i]); found = 1; break; } } if (!found) { dolog ("Unknown audio driver `%s'\n", drvname); dolog ("Run with -audio-help to list available drivers\n"); } } if (!done) { for (i = 0; !done && i < ARRAY_SIZE (drvtab); i++) { if (drvtab[i]->can_be_default) { done = !audio_driver_init (s, drvtab[i]); } } } if (!done) { done = !audio_driver_init (s, &no_audio_driver); if (!done) { hw_error("Could not initialize audio subsystem\n"); } else { dolog ("warning: Using timer based audio emulation\n"); } } if (conf.period.hertz <= 0) { if (conf.period.hertz < 0) { dolog ("warning: Timer period is negative - %d " "treating as zero\n", conf.period.hertz); } conf.period.ticks = 1; } else { conf.period.ticks = muldiv64 (1, get_ticks_per_sec (), conf.period.hertz); } e = qemu_add_vm_change_state_handler (audio_vm_change_state_handler, s); if (!e) { dolog ("warning: Could not register change state handler\n" "(Audio can continue looping even after stopping the VM)\n"); } QLIST_INIT (&s->card_head); vmstate_register (NULL, 0, &vmstate_audio, s); } void AUD_register_card (const char *name, QEMUSoundCard *card) { audio_init (); card->name = g_strdup (name); memset (&card->entries, 0, sizeof (card->entries)); QLIST_INSERT_HEAD (&glob_audio_state.card_head, card, entries); } void AUD_remove_card (QEMUSoundCard *card) { QLIST_REMOVE (card, entries); g_free (card->name); } CaptureVoiceOut *AUD_add_capture ( struct audsettings *as, struct audio_capture_ops *ops, void *cb_opaque ) { AudioState *s = &glob_audio_state; CaptureVoiceOut *cap; struct capture_callback *cb; if (audio_validate_settings (as)) { dolog ("Invalid settings were passed when trying to add capture\n"); audio_print_settings (as); goto err0; } cb = audio_calloc (AUDIO_FUNC, 1, sizeof (*cb)); if (!cb) { dolog ("Could not allocate capture callback information, size %zu\n", sizeof (*cb)); goto err0; } cb->ops = *ops; cb->opaque = cb_opaque; cap = audio_pcm_capture_find_specific (as); if (cap) { QLIST_INSERT_HEAD (&cap->cb_head, cb, entries); return cap; } else { HWVoiceOut *hw; CaptureVoiceOut *cap; cap = audio_calloc (AUDIO_FUNC, 1, sizeof (*cap)); if (!cap) { dolog ("Could not allocate capture voice, size %zu\n", sizeof (*cap)); goto err1; } hw = &cap->hw; QLIST_INIT (&hw->sw_head); QLIST_INIT (&cap->cb_head); /* XXX find a more elegant way */ hw->samples = 4096 * 4; hw->mix_buf = audio_calloc (AUDIO_FUNC, hw->samples, sizeof (struct st_sample)); if (!hw->mix_buf) { dolog ("Could not allocate capture mix buffer (%d samples)\n", hw->samples); goto err2; } audio_pcm_init_info (&hw->info, as); cap->buf = audio_calloc (AUDIO_FUNC, hw->samples, 1 << hw->info.shift); if (!cap->buf) { dolog ("Could not allocate capture buffer " "(%d samples, each %d bytes)\n", hw->samples, 1 << hw->info.shift); goto err3; } hw->clip = mixeng_clip [hw->info.nchannels == 2] [hw->info.sign] [hw->info.swap_endianness] [audio_bits_to_index (hw->info.bits)]; QLIST_INSERT_HEAD (&s->cap_head, cap, entries); QLIST_INSERT_HEAD (&cap->cb_head, cb, entries); hw = NULL; while ((hw = audio_pcm_hw_find_any_out (hw))) { audio_attach_capture (hw); } return cap; err3: g_free (cap->hw.mix_buf); err2: g_free (cap); err1: g_free (cb); err0: return NULL; } } void AUD_del_capture (CaptureVoiceOut *cap, void *cb_opaque) { struct capture_callback *cb; for (cb = cap->cb_head.lh_first; cb; cb = cb->entries.le_next) { if (cb->opaque == cb_opaque) { cb->ops.destroy (cb_opaque); QLIST_REMOVE (cb, entries); g_free (cb); if (!cap->cb_head.lh_first) { SWVoiceOut *sw = cap->hw.sw_head.lh_first, *sw1; while (sw) { SWVoiceCap *sc = (SWVoiceCap *) sw; #ifdef DEBUG_CAPTURE dolog ("freeing %s\n", sw->name); #endif sw1 = sw->entries.le_next; if (sw->rate) { st_rate_stop (sw->rate); sw->rate = NULL; } QLIST_REMOVE (sw, entries); QLIST_REMOVE (sc, entries); g_free (sc); sw = sw1; } QLIST_REMOVE (cap, entries); g_free (cap); } return; } } } void AUD_set_volume_out (SWVoiceOut *sw, int mute, uint8_t lvol, uint8_t rvol) { if (sw) { HWVoiceOut *hw = sw->hw; sw->vol.mute = mute; sw->vol.l = nominal_volume.l * lvol / 255; sw->vol.r = nominal_volume.r * rvol / 255; if (hw->pcm_ops->ctl_out) { hw->pcm_ops->ctl_out (hw, VOICE_VOLUME, sw); } } } void AUD_set_volume_in (SWVoiceIn *sw, int mute, uint8_t lvol, uint8_t rvol) { if (sw) { HWVoiceIn *hw = sw->hw; sw->vol.mute = mute; sw->vol.l = nominal_volume.l * lvol / 255; sw->vol.r = nominal_volume.r * rvol / 255; if (hw->pcm_ops->ctl_in) { hw->pcm_ops->ctl_in (hw, VOICE_VOLUME, sw); } } }
gpl-2.0
bw-oss/linux
tools/testing/selftests/seccomp/seccomp_bpf.c
27
63594
/* * Copyright (c) 2012 The Chromium OS Authors. All rights reserved. * Use of this source code is governed by the GPLv2 license. * * Test code for seccomp bpf. */ #include <sys/types.h> #include <asm/siginfo.h> #define __have_siginfo_t 1 #define __have_sigval_t 1 #define __have_sigevent_t 1 #include <errno.h> #include <linux/filter.h> #include <sys/prctl.h> #include <sys/ptrace.h> #include <sys/user.h> #include <linux/prctl.h> #include <linux/ptrace.h> #include <linux/seccomp.h> #include <pthread.h> #include <semaphore.h> #include <signal.h> #include <stddef.h> #include <stdbool.h> #include <string.h> #include <time.h> #include <linux/elf.h> #include <sys/uio.h> #include <sys/utsname.h> #include <sys/fcntl.h> #include <sys/mman.h> #include <sys/times.h> #define _GNU_SOURCE #include <unistd.h> #include <sys/syscall.h> #include "../kselftest_harness.h" #ifndef PR_SET_PTRACER # define PR_SET_PTRACER 0x59616d61 #endif #ifndef PR_SET_NO_NEW_PRIVS #define PR_SET_NO_NEW_PRIVS 38 #define PR_GET_NO_NEW_PRIVS 39 #endif #ifndef PR_SECCOMP_EXT #define PR_SECCOMP_EXT 43 #endif #ifndef SECCOMP_EXT_ACT #define SECCOMP_EXT_ACT 1 #endif #ifndef SECCOMP_EXT_ACT_TSYNC #define SECCOMP_EXT_ACT_TSYNC 1 #endif #ifndef SECCOMP_MODE_STRICT #define SECCOMP_MODE_STRICT 1 #endif #ifndef SECCOMP_MODE_FILTER #define SECCOMP_MODE_FILTER 2 #endif #ifndef SECCOMP_RET_KILL #define SECCOMP_RET_KILL 0x00000000U /* kill the task immediately */ #define SECCOMP_RET_TRAP 0x00030000U /* disallow and force a SIGSYS */ #define SECCOMP_RET_ERRNO 0x00050000U /* returns an errno */ #define SECCOMP_RET_TRACE 0x7ff00000U /* pass to a tracer or disallow */ #define SECCOMP_RET_ALLOW 0x7fff0000U /* allow */ /* Masks for the return value sections. */ #define SECCOMP_RET_ACTION 0x7fff0000U #define SECCOMP_RET_DATA 0x0000ffffU struct seccomp_data { int nr; __u32 arch; __u64 instruction_pointer; __u64 args[6]; }; #endif #if __BYTE_ORDER == __LITTLE_ENDIAN #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n])) #elif __BYTE_ORDER == __BIG_ENDIAN #define syscall_arg(_n) (offsetof(struct seccomp_data, args[_n]) + sizeof(__u32)) #else #error "wut? Unknown __BYTE_ORDER?!" #endif #define SIBLING_EXIT_UNKILLED 0xbadbeef #define SIBLING_EXIT_FAILURE 0xbadface #define SIBLING_EXIT_NEWPRIVS 0xbadfeed TEST(mode_strict_support) { long ret; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support CONFIG_SECCOMP"); } syscall(__NR_exit, 1); } TEST_SIGNAL(mode_strict_cannot_call_prctl, SIGKILL) { long ret; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, NULL, NULL); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support CONFIG_SECCOMP"); } syscall(__NR_prctl, PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); EXPECT_FALSE(true) { TH_LOG("Unreachable!"); } } /* Note! This doesn't test no new privs behavior */ TEST(no_new_privs_support) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); EXPECT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } } /* Tests kernel support by checking for a copy_from_user() fault on * NULL. */ TEST(mode_filter_support) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, NULL, NULL, NULL); EXPECT_EQ(-1, ret); EXPECT_EQ(EFAULT, errno) { TH_LOG("Kernel does not support CONFIG_SECCOMP_FILTER!"); } } TEST(mode_filter_without_nnp) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_GET_NO_NEW_PRIVS, 0, NULL, 0, 0); ASSERT_LE(0, ret) { TH_LOG("Expected 0 or unsupported for NO_NEW_PRIVS"); } errno = 0; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); /* Succeeds with CAP_SYS_ADMIN, fails without */ /* TODO(wad) check caps not euid */ if (geteuid()) { EXPECT_EQ(-1, ret); EXPECT_EQ(EACCES, errno); } else { EXPECT_EQ(0, ret); } } #define MAX_INSNS_PER_PATH 32768 TEST(filter_size_limits) { int i; int count = BPF_MAXINSNS + 1; struct sock_filter allow[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter *filter; struct sock_fprog prog = { }; long ret; filter = calloc(count, sizeof(*filter)); ASSERT_NE(NULL, filter); for (i = 0; i < count; i++) filter[i] = allow[0]; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); prog.filter = filter; prog.len = count; /* Too many filter instructions in a single filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_NE(0, ret) { TH_LOG("Installing %d insn filter was allowed", prog.len); } /* One less is okay, though. */ prog.len -= 1; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Installing %d insn filter wasn't allowed", prog.len); } } TEST(filter_chain_limits) { int i; int count = BPF_MAXINSNS; struct sock_filter allow[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter *filter; struct sock_fprog prog = { }; long ret; filter = calloc(count, sizeof(*filter)); ASSERT_NE(NULL, filter); for (i = 0; i < count; i++) filter[i] = allow[0]; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); prog.filter = filter; prog.len = 1; ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); prog.len = count; /* Too many total filter instructions. */ for (i = 0; i < MAX_INSNS_PER_PATH; i++) { ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); if (ret != 0) break; } ASSERT_NE(0, ret) { TH_LOG("Allowed %d %d-insn filters (total with penalties:%d)", i, count, i * (count + 4)); } } TEST(mode_filter_cannot_move_to_strict) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, NULL, 0, 0); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST(mode_filter_get_seccomp) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); EXPECT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_GET_SECCOMP, 0, 0, 0, 0); EXPECT_EQ(2, ret); } TEST(ALLOW_all) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); } TEST(empty_prog) { struct sock_filter filter[] = { }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST_SIGNAL(unknown_ret_is_kill_inside, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, 0x10000000U), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(0, syscall(__NR_getpid)) { TH_LOG("getpid() shouldn't ever return"); } } /* return code >= 0x80000000 is unused. */ TEST_SIGNAL(unknown_ret_is_kill_above_allow, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, 0x90000000U), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(0, syscall(__NR_getpid)) { TH_LOG("getpid() shouldn't ever return"); } } TEST_SIGNAL(KILL_all, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); } TEST_SIGNAL(KILL_one, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_SIGNAL(KILL_one_arg_one, SIGSYS) { void *fatal_address; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_times, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), /* Only both with lower 32-bit for now. */ BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(0)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, (unsigned long)&fatal_address, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); struct tms timebuf; clock_t clock = times(&timebuf); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_LE(clock, syscall(__NR_times, &timebuf)); /* times() should never return. */ EXPECT_EQ(0, syscall(__NR_times, &fatal_address)); } TEST_SIGNAL(KILL_one_arg_six, SIGSYS) { #ifndef __NR_mmap2 int sysno = __NR_mmap; #else int sysno = __NR_mmap2; #endif struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, sysno, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), /* Only both with lower 32-bit for now. */ BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(5)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 0x0C0FFEE, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); int fd; void *map1, *map2; int page_size = sysconf(_SC_PAGESIZE); ASSERT_LT(0, page_size); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); fd = open("/dev/zero", O_RDONLY); ASSERT_NE(-1, fd); EXPECT_EQ(parent, syscall(__NR_getppid)); map1 = (void *)syscall(sysno, NULL, page_size, PROT_READ, MAP_PRIVATE, fd, page_size); EXPECT_NE(MAP_FAILED, map1); /* mmap2() should never return. */ map2 = (void *)syscall(sysno, NULL, page_size, PROT_READ, MAP_PRIVATE, fd, 0x0C0FFEE); EXPECT_EQ(MAP_FAILED, map2); /* The test failed, so clean up the resources. */ munmap(map1, page_size); munmap(map2, page_size); close(fd); } /* TODO(wad) add 64-bit versus 32-bit arg tests. */ TEST(arg_out_of_range) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, syscall_arg(6)), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); EXPECT_EQ(-1, ret); EXPECT_EQ(EINVAL, errno); } TEST(ERRNO_valid) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | E2BIG), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(-1, read(0, NULL, 0)); EXPECT_EQ(E2BIG, errno); } TEST(ERRNO_zero) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); /* "errno" of 0 is ok. */ EXPECT_EQ(0, read(0, NULL, 0)); } TEST(ERRNO_capped) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | 4096), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; pid_t parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog); ASSERT_EQ(0, ret); EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(-1, read(0, NULL, 0)); EXPECT_EQ(4095, errno); } FIXTURE_DATA(TRAP) { struct sock_fprog prog; }; FIXTURE_SETUP(TRAP) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); } FIXTURE_TEARDOWN(TRAP) { if (self->prog.filter) free(self->prog.filter); } TEST_F_SIGNAL(TRAP, dfl, SIGSYS) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); syscall(__NR_getpid); } /* Ensure that SIGSYS overrides SIG_IGN */ TEST_F_SIGNAL(TRAP, ign, SIGSYS) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); signal(SIGSYS, SIG_IGN); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); syscall(__NR_getpid); } static struct siginfo TRAP_info; static volatile int TRAP_nr; static void TRAP_action(int nr, siginfo_t *info, void *void_context) { memcpy(&TRAP_info, info, sizeof(TRAP_info)); TRAP_nr = nr; } TEST_F(TRAP, handler) { int ret, test; struct sigaction act; sigset_t mask; memset(&act, 0, sizeof(act)); sigemptyset(&mask); sigaddset(&mask, SIGSYS); act.sa_sigaction = &TRAP_action; act.sa_flags = SA_SIGINFO; ret = sigaction(SIGSYS, &act, NULL); ASSERT_EQ(0, ret) { TH_LOG("sigaction failed"); } ret = sigprocmask(SIG_UNBLOCK, &mask, NULL); ASSERT_EQ(0, ret) { TH_LOG("sigprocmask failed"); } ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog); ASSERT_EQ(0, ret); TRAP_nr = 0; memset(&TRAP_info, 0, sizeof(TRAP_info)); /* Expect the registers to be rolled back. (nr = error) may vary * based on arch. */ ret = syscall(__NR_getpid); /* Silence gcc warning about volatile. */ test = TRAP_nr; EXPECT_EQ(SIGSYS, test); struct local_sigsys { void *_call_addr; /* calling user insn */ int _syscall; /* triggering system call number */ unsigned int _arch; /* AUDIT_ARCH_* of syscall */ } *sigsys = (struct local_sigsys *) #ifdef si_syscall &(TRAP_info.si_call_addr); #else &TRAP_info.si_pid; #endif EXPECT_EQ(__NR_getpid, sigsys->_syscall); /* Make sure arch is non-zero. */ EXPECT_NE(0, sigsys->_arch); EXPECT_NE(0, (unsigned long)sigsys->_call_addr); } FIXTURE_DATA(precedence) { struct sock_fprog allow; struct sock_fprog trace; struct sock_fprog error; struct sock_fprog trap; struct sock_fprog kill; }; FIXTURE_SETUP(precedence) { struct sock_filter allow_insns[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter trace_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE), }; struct sock_filter error_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO), }; struct sock_filter trap_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRAP), }; struct sock_filter kill_insns[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 1, 0), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), }; memset(self, 0, sizeof(*self)); #define FILTER_ALLOC(_x) \ self->_x.filter = malloc(sizeof(_x##_insns)); \ ASSERT_NE(NULL, self->_x.filter); \ memcpy(self->_x.filter, &_x##_insns, sizeof(_x##_insns)); \ self->_x.len = (unsigned short)ARRAY_SIZE(_x##_insns) FILTER_ALLOC(allow); FILTER_ALLOC(trace); FILTER_ALLOC(error); FILTER_ALLOC(trap); FILTER_ALLOC(kill); } FIXTURE_TEARDOWN(precedence) { #define FILTER_FREE(_x) if (self->_x.filter) free(self->_x.filter) FILTER_FREE(allow); FILTER_FREE(trace); FILTER_FREE(error); FILTER_FREE(trap); FILTER_FREE(kill); } TEST_F(precedence, allow_ok) { pid_t parent, res = 0; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); /* Should work just fine. */ res = syscall(__NR_getppid); EXPECT_EQ(parent, res); } TEST_F_SIGNAL(precedence, kill_is_highest, SIGSYS) { pid_t parent, res = 0; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); /* Should work just fine. */ res = syscall(__NR_getppid); EXPECT_EQ(parent, res); /* getpid() should never return. */ res = syscall(__NR_getpid); EXPECT_EQ(0, res); } TEST_F_SIGNAL(precedence, kill_is_highest_in_any_order, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->kill); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F_SIGNAL(precedence, trap_is_second, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F_SIGNAL(precedence, trap_is_second_in_any_order, SIGSYS) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trap); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* getpid() should never return. */ EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, errno_is_third) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, errno_is_third_in_any_order) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->error); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); EXPECT_EQ(0, syscall(__NR_getpid)); } TEST_F(precedence, trace_is_fourth) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* No ptracer */ EXPECT_EQ(-1, syscall(__NR_getpid)); } TEST_F(precedence, trace_is_fourth_in_any_order) { pid_t parent; long ret; parent = getppid(); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->trace); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->allow); ASSERT_EQ(0, ret); /* Should work just fine. */ EXPECT_EQ(parent, syscall(__NR_getppid)); /* No ptracer */ EXPECT_EQ(-1, syscall(__NR_getpid)); } #ifndef PTRACE_O_TRACESECCOMP #define PTRACE_O_TRACESECCOMP 0x00000080 #endif /* Catch the Ubuntu 12.04 value error. */ #if PTRACE_EVENT_SECCOMP != 7 #undef PTRACE_EVENT_SECCOMP #endif #ifndef PTRACE_EVENT_SECCOMP #define PTRACE_EVENT_SECCOMP 7 #endif #define IS_SECCOMP_EVENT(status) ((status >> 16) == PTRACE_EVENT_SECCOMP) bool tracer_running; void tracer_stop(int sig) { tracer_running = false; } typedef void tracer_func_t(struct __test_metadata *_metadata, pid_t tracee, int status, void *args); void start_tracer(struct __test_metadata *_metadata, int fd, pid_t tracee, tracer_func_t tracer_func, void *args, bool ptrace_syscall) { int ret = -1; struct sigaction action = { .sa_handler = tracer_stop, }; /* Allow external shutdown. */ tracer_running = true; ASSERT_EQ(0, sigaction(SIGUSR1, &action, NULL)); errno = 0; while (ret == -1 && errno != EINVAL) ret = ptrace(PTRACE_ATTACH, tracee, NULL, 0); ASSERT_EQ(0, ret) { kill(tracee, SIGKILL); } /* Wait for attach stop */ wait(NULL); ret = ptrace(PTRACE_SETOPTIONS, tracee, NULL, ptrace_syscall ? PTRACE_O_TRACESYSGOOD : PTRACE_O_TRACESECCOMP); ASSERT_EQ(0, ret) { TH_LOG("Failed to set PTRACE_O_TRACESECCOMP"); kill(tracee, SIGKILL); } ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, tracee, NULL, 0); ASSERT_EQ(0, ret); /* Unblock the tracee */ ASSERT_EQ(1, write(fd, "A", 1)); ASSERT_EQ(0, close(fd)); /* Run until we're shut down. Must assert to stop execution. */ while (tracer_running) { int status; if (wait(&status) != tracee) continue; if (WIFSIGNALED(status) || WIFEXITED(status)) /* Child is dead. Time to go. */ return; /* Check if this is a seccomp event. */ ASSERT_EQ(!ptrace_syscall, IS_SECCOMP_EVENT(status)); tracer_func(_metadata, tracee, status, args); ret = ptrace(ptrace_syscall ? PTRACE_SYSCALL : PTRACE_CONT, tracee, NULL, 0); ASSERT_EQ(0, ret); } /* Directly report the status of our test harness results. */ syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); } /* Common tracer setup/teardown functions. */ void cont_handler(int num) { } pid_t setup_trace_fixture(struct __test_metadata *_metadata, tracer_func_t func, void *args, bool ptrace_syscall) { char sync; int pipefd[2]; pid_t tracer_pid; pid_t tracee = getpid(); /* Setup a pipe for clean synchronization. */ ASSERT_EQ(0, pipe(pipefd)); /* Fork a child which we'll promote to tracer */ tracer_pid = fork(); ASSERT_LE(0, tracer_pid); signal(SIGALRM, cont_handler); if (tracer_pid == 0) { close(pipefd[0]); start_tracer(_metadata, pipefd[1], tracee, func, args, ptrace_syscall); syscall(__NR_exit, 0); } close(pipefd[1]); prctl(PR_SET_PTRACER, tracer_pid, 0, 0, 0); read(pipefd[0], &sync, 1); close(pipefd[0]); return tracer_pid; } void teardown_trace_fixture(struct __test_metadata *_metadata, pid_t tracer) { if (tracer) { int status; /* * Extract the exit code from the other process and * adopt it for ourselves in case its asserts failed. */ ASSERT_EQ(0, kill(tracer, SIGUSR1)); ASSERT_EQ(tracer, waitpid(tracer, &status, 0)); if (WEXITSTATUS(status)) _metadata->passed = 0; } } /* "poke" tracer arguments and function. */ struct tracer_args_poke_t { unsigned long poke_addr; }; void tracer_poke(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret; unsigned long msg; struct tracer_args_poke_t *info = (struct tracer_args_poke_t *)args; ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); /* If this fails, don't try to recover. */ ASSERT_EQ(0x1001, msg) { kill(tracee, SIGKILL); } /* * Poke in the message. * Registers are not touched to try to keep this relatively arch * agnostic. */ ret = ptrace(PTRACE_POKEDATA, tracee, info->poke_addr, 0x1001); EXPECT_EQ(0, ret); } FIXTURE_DATA(TRACE_poke) { struct sock_fprog prog; pid_t tracer; long poked; struct tracer_args_poke_t tracer_args; }; FIXTURE_SETUP(TRACE_poke) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1001), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; self->poked = 0; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); /* Set up tracer args. */ self->tracer_args.poke_addr = (unsigned long)&self->poked; /* Launch tracer. */ self->tracer = setup_trace_fixture(_metadata, tracer_poke, &self->tracer_args, false); } FIXTURE_TEARDOWN(TRACE_poke) { teardown_trace_fixture(_metadata, self->tracer); if (self->prog.filter) free(self->prog.filter); } TEST_F(TRACE_poke, read_has_side_effects) { ssize_t ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); EXPECT_EQ(0, self->poked); ret = read(-1, NULL, 0); EXPECT_EQ(-1, ret); EXPECT_EQ(0x1001, self->poked); } TEST_F(TRACE_poke, getpid_runs_normally) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); EXPECT_EQ(0, self->poked); EXPECT_NE(0, syscall(__NR_getpid)); EXPECT_EQ(0, self->poked); } #if defined(__x86_64__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM orig_rax # define SYSCALL_RET rax #elif defined(__i386__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM orig_eax # define SYSCALL_RET eax #elif defined(__arm__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM ARM_r7 # define SYSCALL_RET ARM_r0 #elif defined(__aarch64__) # define ARCH_REGS struct user_pt_regs # define SYSCALL_NUM regs[8] # define SYSCALL_RET regs[0] #elif defined(__hppa__) # define ARCH_REGS struct user_regs_struct # define SYSCALL_NUM gr[20] # define SYSCALL_RET gr[28] #elif defined(__powerpc__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM gpr[0] # define SYSCALL_RET gpr[3] #elif defined(__s390__) # define ARCH_REGS s390_regs # define SYSCALL_NUM gprs[2] # define SYSCALL_RET gprs[2] #elif defined(__mips__) # define ARCH_REGS struct pt_regs # define SYSCALL_NUM regs[2] # define SYSCALL_SYSCALL_NUM regs[4] # define SYSCALL_RET regs[2] # define SYSCALL_NUM_RET_SHARE_REG #else # error "Do not know how to find your architecture's registers and syscalls" #endif /* Use PTRACE_GETREGS and PTRACE_SETREGS when available. This is useful for * architectures without HAVE_ARCH_TRACEHOOK (e.g. User-mode Linux). */ #if defined(__x86_64__) || defined(__i386__) || defined(__mips__) #define HAVE_GETREGS #endif /* Architecture-specific syscall fetching routine. */ int get_syscall(struct __test_metadata *_metadata, pid_t tracee) { ARCH_REGS regs; #ifdef HAVE_GETREGS EXPECT_EQ(0, ptrace(PTRACE_GETREGS, tracee, 0, &regs)) { TH_LOG("PTRACE_GETREGS failed"); return -1; } #else struct iovec iov; iov.iov_base = &regs; iov.iov_len = sizeof(regs); EXPECT_EQ(0, ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov)) { TH_LOG("PTRACE_GETREGSET failed"); return -1; } #endif #if defined(__mips__) if (regs.SYSCALL_NUM == __NR_O32_Linux) return regs.SYSCALL_SYSCALL_NUM; #endif return regs.SYSCALL_NUM; } /* Architecture-specific syscall changing routine. */ void change_syscall(struct __test_metadata *_metadata, pid_t tracee, int syscall) { int ret; ARCH_REGS regs; #ifdef HAVE_GETREGS ret = ptrace(PTRACE_GETREGS, tracee, 0, &regs); #else struct iovec iov; iov.iov_base = &regs; iov.iov_len = sizeof(regs); ret = ptrace(PTRACE_GETREGSET, tracee, NT_PRSTATUS, &iov); #endif EXPECT_EQ(0, ret) {} #if defined(__x86_64__) || defined(__i386__) || defined(__powerpc__) || \ defined(__s390__) || defined(__hppa__) { regs.SYSCALL_NUM = syscall; } #elif defined(__mips__) { if (regs.SYSCALL_NUM == __NR_O32_Linux) regs.SYSCALL_SYSCALL_NUM = syscall; else regs.SYSCALL_NUM = syscall; } #elif defined(__arm__) # ifndef PTRACE_SET_SYSCALL # define PTRACE_SET_SYSCALL 23 # endif { ret = ptrace(PTRACE_SET_SYSCALL, tracee, NULL, syscall); EXPECT_EQ(0, ret); } #elif defined(__aarch64__) # ifndef NT_ARM_SYSTEM_CALL # define NT_ARM_SYSTEM_CALL 0x404 # endif { iov.iov_base = &syscall; iov.iov_len = sizeof(syscall); ret = ptrace(PTRACE_SETREGSET, tracee, NT_ARM_SYSTEM_CALL, &iov); EXPECT_EQ(0, ret); } #else ASSERT_EQ(1, 0) { TH_LOG("How is the syscall changed on this architecture?"); } #endif /* If syscall is skipped, change return value. */ if (syscall == -1) #ifdef SYSCALL_NUM_RET_SHARE_REG TH_LOG("Can't modify syscall return on this architecture"); #else regs.SYSCALL_RET = 1; #endif #ifdef HAVE_GETREGS ret = ptrace(PTRACE_SETREGS, tracee, 0, &regs); #else iov.iov_base = &regs; iov.iov_len = sizeof(regs); ret = ptrace(PTRACE_SETREGSET, tracee, NT_PRSTATUS, &iov); #endif EXPECT_EQ(0, ret); } void tracer_syscall(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret; unsigned long msg; /* Make sure we got the right message. */ ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); /* Validate and take action on expected syscalls. */ switch (msg) { case 0x1002: /* change getpid to getppid. */ EXPECT_EQ(__NR_getpid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, __NR_getppid); break; case 0x1003: /* skip gettid. */ EXPECT_EQ(__NR_gettid, get_syscall(_metadata, tracee)); change_syscall(_metadata, tracee, -1); break; case 0x1004: /* do nothing (allow getppid) */ EXPECT_EQ(__NR_getppid, get_syscall(_metadata, tracee)); break; default: EXPECT_EQ(0, msg) { TH_LOG("Unknown PTRACE_GETEVENTMSG: 0x%lx", msg); kill(tracee, SIGKILL); } } } void tracer_ptrace(struct __test_metadata *_metadata, pid_t tracee, int status, void *args) { int ret, nr; unsigned long msg; static bool entry; /* Make sure we got an empty message. */ ret = ptrace(PTRACE_GETEVENTMSG, tracee, NULL, &msg); EXPECT_EQ(0, ret); EXPECT_EQ(0, msg); /* The only way to tell PTRACE_SYSCALL entry/exit is by counting. */ entry = !entry; if (!entry) return; nr = get_syscall(_metadata, tracee); if (nr == __NR_getpid) change_syscall(_metadata, tracee, __NR_getppid); } FIXTURE_DATA(TRACE_syscall) { struct sock_fprog prog; pid_t tracer, mytid, mypid, parent; }; FIXTURE_SETUP(TRACE_syscall) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getpid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1002), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_gettid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1003), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE | 0x1004), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->prog, 0, sizeof(self->prog)); self->prog.filter = malloc(sizeof(filter)); ASSERT_NE(NULL, self->prog.filter); memcpy(self->prog.filter, filter, sizeof(filter)); self->prog.len = (unsigned short)ARRAY_SIZE(filter); /* Prepare some testable syscall results. */ self->mytid = syscall(__NR_gettid); ASSERT_GT(self->mytid, 0); ASSERT_NE(self->mytid, 1) { TH_LOG("Running this test as init is not supported. :)"); } self->mypid = getpid(); ASSERT_GT(self->mypid, 0); ASSERT_EQ(self->mytid, self->mypid); self->parent = getppid(); ASSERT_GT(self->parent, 0); ASSERT_NE(self->parent, self->mypid); /* Launch tracer. */ self->tracer = setup_trace_fixture(_metadata, tracer_syscall, NULL, false); } FIXTURE_TEARDOWN(TRACE_syscall) { teardown_trace_fixture(_metadata, self->tracer); if (self->prog.filter) free(self->prog.filter); } TEST_F(TRACE_syscall, syscall_allowed) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* getppid works as expected (no changes). */ EXPECT_EQ(self->parent, syscall(__NR_getppid)); EXPECT_NE(self->mypid, syscall(__NR_getppid)); } TEST_F(TRACE_syscall, syscall_redirected) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* getpid has been redirected to getppid as expected. */ EXPECT_EQ(self->parent, syscall(__NR_getpid)); EXPECT_NE(self->mypid, syscall(__NR_getpid)); } TEST_F(TRACE_syscall, syscall_dropped) { long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); #ifdef SYSCALL_NUM_RET_SHARE_REG /* gettid has been skipped */ EXPECT_EQ(-1, syscall(__NR_gettid)); #else /* gettid has been skipped and an altered return value stored. */ EXPECT_EQ(1, syscall(__NR_gettid)); #endif EXPECT_NE(self->mytid, syscall(__NR_gettid)); } TEST_F(TRACE_syscall, skip_after_RET_TRACE) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install fixture filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* Install "errno on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should see EPERM. */ EXPECT_EQ(-1, syscall(__NR_getpid)); EXPECT_EQ(EPERM, errno); } TEST_F_SIGNAL(TRACE_syscall, kill_after_RET_TRACE, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install fixture filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &self->prog, 0, 0); ASSERT_EQ(0, ret); /* Install "death on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should die. */ EXPECT_NE(self->mypid, syscall(__NR_getpid)); } TEST_F(TRACE_syscall, skip_after_ptrace) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EPERM), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ teardown_trace_fixture(_metadata, self->tracer); self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, true); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install "errno on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should see EPERM. */ EXPECT_EQ(-1, syscall(__NR_getpid)); EXPECT_EQ(EPERM, errno); } TEST_F_SIGNAL(TRACE_syscall, kill_after_ptrace, SIGSYS) { struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_getppid, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; /* Swap SECCOMP_RET_TRACE tracer for PTRACE_SYSCALL tracer. */ teardown_trace_fixture(_metadata, self->tracer); self->tracer = setup_trace_fixture(_metadata, tracer_ptrace, NULL, true); ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret); /* Install "death on getppid" filter. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); ASSERT_EQ(0, ret); /* Tracer will redirect getpid to getppid, and we should die. */ EXPECT_NE(self->mypid, syscall(__NR_getpid)); } #ifndef __NR_seccomp # if defined(__i386__) # define __NR_seccomp 354 # elif defined(__x86_64__) # define __NR_seccomp 317 # elif defined(__arm__) # define __NR_seccomp 383 # elif defined(__aarch64__) # define __NR_seccomp 277 # elif defined(__hppa__) # define __NR_seccomp 338 # elif defined(__powerpc__) # define __NR_seccomp 358 # elif defined(__s390__) # define __NR_seccomp 348 # else # warning "seccomp syscall number unknown for this architecture" # define __NR_seccomp 0xffff # endif #endif #ifndef SECCOMP_SET_MODE_STRICT #define SECCOMP_SET_MODE_STRICT 0 #endif #ifndef SECCOMP_SET_MODE_FILTER #define SECCOMP_SET_MODE_FILTER 1 #endif #ifndef SECCOMP_FILTER_FLAG_TSYNC #define SECCOMP_FILTER_FLAG_TSYNC 1 #endif #ifndef seccomp int seccomp(unsigned int op, unsigned int flags, void *args) { errno = 0; return syscall(__NR_seccomp, op, flags, args); } #endif TEST(seccomp_syscall) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* Reject insane operation. */ ret = seccomp(-1, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject crazy op value!"); } /* Reject strict with flags or pointer. */ ret = seccomp(SECCOMP_SET_MODE_STRICT, -1, NULL); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject mode strict with flags!"); } ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, &prog); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject mode strict with uargs!"); } /* Reject insane args for filter. */ ret = seccomp(SECCOMP_SET_MODE_FILTER, -1, &prog); EXPECT_EQ(EINVAL, errno) { TH_LOG("Did not reject crazy filter flags!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, NULL); EXPECT_EQ(EFAULT, errno) { TH_LOG("Did not reject NULL filter!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); EXPECT_EQ(0, errno) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER: %s", strerror(errno)); } } TEST(seccomp_syscall_mode_lock) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(0, ret) { TH_LOG("Could not install filter!"); } /* Make sure neither entry point will switch to strict. */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_STRICT, 0, 0, 0); EXPECT_EQ(EINVAL, errno) { TH_LOG("Switched to mode strict!"); } ret = seccomp(SECCOMP_SET_MODE_STRICT, 0, NULL); EXPECT_EQ(EINVAL, errno) { TH_LOG("Switched to mode strict!"); } } TEST(TSYNC_first) { struct sock_filter filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; long ret; ret = prctl(PR_SET_NO_NEW_PRIVS, 1, NULL, 0, 0); ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } EXPECT_EQ(0, ret) { TH_LOG("Could not install initial filter with TSYNC!"); } } #define TSYNC_SIBLINGS 2 struct tsync_sibling { pthread_t tid; pid_t system_tid; sem_t *started; pthread_cond_t *cond; pthread_mutex_t *mutex; int diverge; int num_waits; struct sock_fprog *prog; struct __test_metadata *metadata; }; /* * To avoid joining joined threads (which is not allowed by Bionic), * make sure we both successfully join and clear the tid to skip a * later join attempt during fixture teardown. Any remaining threads * will be directly killed during teardown. */ #define PTHREAD_JOIN(tid, status) \ do { \ int _rc = pthread_join(tid, status); \ if (_rc) { \ TH_LOG("pthread_join of tid %u failed: %d\n", \ (unsigned int)tid, _rc); \ } else { \ tid = 0; \ } \ } while (0) FIXTURE_DATA(TSYNC) { struct sock_fprog root_prog, apply_prog; struct tsync_sibling sibling[TSYNC_SIBLINGS]; sem_t started; pthread_cond_t cond; pthread_mutex_t mutex; int sibling_count; }; FIXTURE_SETUP(TSYNC) { struct sock_filter root_filter[] = { BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_filter apply_filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; memset(&self->root_prog, 0, sizeof(self->root_prog)); memset(&self->apply_prog, 0, sizeof(self->apply_prog)); memset(&self->sibling, 0, sizeof(self->sibling)); self->root_prog.filter = malloc(sizeof(root_filter)); ASSERT_NE(NULL, self->root_prog.filter); memcpy(self->root_prog.filter, &root_filter, sizeof(root_filter)); self->root_prog.len = (unsigned short)ARRAY_SIZE(root_filter); self->apply_prog.filter = malloc(sizeof(apply_filter)); ASSERT_NE(NULL, self->apply_prog.filter); memcpy(self->apply_prog.filter, &apply_filter, sizeof(apply_filter)); self->apply_prog.len = (unsigned short)ARRAY_SIZE(apply_filter); self->sibling_count = 0; pthread_mutex_init(&self->mutex, NULL); pthread_cond_init(&self->cond, NULL); sem_init(&self->started, 0, 0); self->sibling[0].tid = 0; self->sibling[0].cond = &self->cond; self->sibling[0].started = &self->started; self->sibling[0].mutex = &self->mutex; self->sibling[0].diverge = 0; self->sibling[0].num_waits = 1; self->sibling[0].prog = &self->root_prog; self->sibling[0].metadata = _metadata; self->sibling[1].tid = 0; self->sibling[1].cond = &self->cond; self->sibling[1].started = &self->started; self->sibling[1].mutex = &self->mutex; self->sibling[1].diverge = 0; self->sibling[1].prog = &self->root_prog; self->sibling[1].num_waits = 1; self->sibling[1].metadata = _metadata; } FIXTURE_TEARDOWN(TSYNC) { int sib = 0; if (self->root_prog.filter) free(self->root_prog.filter); if (self->apply_prog.filter) free(self->apply_prog.filter); for ( ; sib < self->sibling_count; ++sib) { struct tsync_sibling *s = &self->sibling[sib]; if (!s->tid) continue; /* * If a thread is still running, it may be stuck, so hit * it over the head really hard. */ pthread_kill(s->tid, 9); } pthread_mutex_destroy(&self->mutex); pthread_cond_destroy(&self->cond); sem_destroy(&self->started); } void *tsync_sibling(void *data) { long ret = 0; struct tsync_sibling *me = data; me->system_tid = syscall(__NR_gettid); pthread_mutex_lock(me->mutex); if (me->diverge) { /* Just re-apply the root prog to fork the tree */ ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, me->prog, 0, 0); } sem_post(me->started); /* Return outside of started so parent notices failures. */ if (ret) { pthread_mutex_unlock(me->mutex); return (void *)SIBLING_EXIT_FAILURE; } do { pthread_cond_wait(me->cond, me->mutex); me->num_waits = me->num_waits - 1; } while (me->num_waits); pthread_mutex_unlock(me->mutex); ret = prctl(PR_GET_NO_NEW_PRIVS, 0, 0, 0, 0); if (!ret) return (void *)SIBLING_EXIT_NEWPRIVS; read(0, NULL, 0); return (void *)SIBLING_EXIT_UNKILLED; } void tsync_start_sibling(struct tsync_sibling *sibling) { pthread_create(&sibling->tid, NULL, tsync_sibling, (void *)sibling); } TEST_F(TSYNC, siblings_fail_prctl) { long ret; void *status; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_prctl, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ERRNO | EINVAL), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* Check prctl failure detection by requesting sib 0 diverge. */ ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("setting filter failed"); } self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } /* Signal the threads to clean up*/ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure diverging sibling failed to call prctl. */ PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_FAILURE, (long)status); PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); } TEST_F(TSYNC, two_siblings_with_ancestor) { long ret; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); } /* Tell the siblings to test the policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both killed and don't exit cleanly. */ PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); } TEST_F(TSYNC, two_sibling_want_nnp) { void *status; /* start siblings before any prctl() operations */ tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } /* Tell the siblings to test no policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both upset about lacking nnp. */ PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_NEWPRIVS, (long)status); } TEST_F(TSYNC, two_siblings_with_no_filter) { long ret; void *status; /* start siblings before any prctl() operations */ tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Could install filter on all threads!"); } /* Tell the siblings to test the policy */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both killed and don't exit cleanly. */ PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(0x0, (long)status); PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(0x0, (long)status); } TEST_F(TSYNC, two_siblings_with_one_divergence) { long ret; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(self->sibling[0].system_tid, ret) { TH_LOG("Did not fail on diverged sibling."); } /* Wake the threads */ pthread_mutex_lock(&self->mutex); ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); /* Ensure they are both unkilled. */ PTHREAD_JOIN(self->sibling[0].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); PTHREAD_JOIN(self->sibling[1].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); } TEST_F(TSYNC, two_siblings_not_under_filter) { long ret, sib; void *status; ASSERT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } /* * Sibling 0 will have its own seccomp policy * and Sibling 1 will not be under seccomp at * all. Sibling 1 will enter seccomp and 0 * will cause failure. */ self->sibling[0].diverge = 1; tsync_start_sibling(&self->sibling[0]); tsync_start_sibling(&self->sibling[1]); while (self->sibling_count < TSYNC_SIBLINGS) { sem_wait(&self->started); self->sibling_count++; } ret = seccomp(SECCOMP_SET_MODE_FILTER, 0, &self->root_prog); ASSERT_NE(ENOSYS, errno) { TH_LOG("Kernel does not support seccomp syscall!"); } ASSERT_EQ(0, ret) { TH_LOG("Kernel does not support SECCOMP_SET_MODE_FILTER!"); } ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(ret, self->sibling[0].system_tid) { TH_LOG("Did not fail on diverged sibling."); } sib = 1; if (ret == self->sibling[0].system_tid) sib = 0; pthread_mutex_lock(&self->mutex); /* Increment the other siblings num_waits so we can clean up * the one we just saw. */ self->sibling[!sib].num_waits += 1; /* Signal the thread to clean up*/ ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); PTHREAD_JOIN(self->sibling[sib].tid, &status); EXPECT_EQ(SIBLING_EXIT_UNKILLED, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1); /* Switch to the remaining sibling */ sib = !sib; ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret) { TH_LOG("Expected the remaining sibling to sync"); }; pthread_mutex_lock(&self->mutex); /* If remaining sibling didn't have a chance to wake up during * the first broadcast, manually reduce the num_waits now. */ if (self->sibling[sib].num_waits > 1) self->sibling[sib].num_waits = 1; ASSERT_EQ(0, pthread_cond_broadcast(&self->cond)) { TH_LOG("cond broadcast non-zero"); } pthread_mutex_unlock(&self->mutex); PTHREAD_JOIN(self->sibling[sib].tid, &status); EXPECT_EQ(0, (long)status); /* Poll for actual task death. pthread_join doesn't guarantee it. */ while (!kill(self->sibling[sib].system_tid, 0)) sleep(0.1); ret = seccomp(SECCOMP_SET_MODE_FILTER, SECCOMP_FILTER_FLAG_TSYNC, &self->apply_prog); ASSERT_EQ(0, ret); /* just us chickens */ } /* Make sure restarted syscalls are seen directly as "restart_syscall". */ TEST(syscall_restart) { long ret; unsigned long msg; pid_t child_pid; int pipefd[2]; int status; siginfo_t info = { }; struct sock_filter filter[] = { BPF_STMT(BPF_LD|BPF_W|BPF_ABS, offsetof(struct seccomp_data, nr)), #ifdef __NR_sigreturn BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_sigreturn, 6, 0), #endif BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_read, 5, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_exit, 4, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_rt_sigreturn, 3, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_nanosleep, 4, 0), BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_restart_syscall, 4, 0), /* Allow __NR_write for easy logging. */ BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, __NR_write, 0, 1), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_ALLOW), BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_KILL), /* The nanosleep jump target. */ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x100), /* The restart_syscall jump target. */ BPF_STMT(BPF_RET|BPF_K, SECCOMP_RET_TRACE|0x200), }; struct sock_fprog prog = { .len = (unsigned short)ARRAY_SIZE(filter), .filter = filter, }; #if defined(__arm__) struct utsname utsbuf; #endif ASSERT_EQ(0, pipe(pipefd)); child_pid = fork(); ASSERT_LE(0, child_pid); if (child_pid == 0) { /* Child uses EXPECT not ASSERT to deliver status correctly. */ char buf = ' '; struct timespec timeout = { }; /* Attach parent as tracer and stop. */ EXPECT_EQ(0, ptrace(PTRACE_TRACEME)); EXPECT_EQ(0, raise(SIGSTOP)); EXPECT_EQ(0, close(pipefd[1])); EXPECT_EQ(0, prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0)) { TH_LOG("Kernel does not support PR_SET_NO_NEW_PRIVS!"); } ret = prctl(PR_SET_SECCOMP, SECCOMP_MODE_FILTER, &prog, 0, 0); EXPECT_EQ(0, ret) { TH_LOG("Failed to install filter!"); } EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { TH_LOG("Failed to read() sync from parent"); } EXPECT_EQ('.', buf) { TH_LOG("Failed to get sync data from read()"); } /* Start nanosleep to be interrupted. */ timeout.tv_sec = 1; errno = 0; EXPECT_EQ(0, nanosleep(&timeout, NULL)) { TH_LOG("Call to nanosleep() failed (errno %d)", errno); } /* Read final sync from parent. */ EXPECT_EQ(1, read(pipefd[0], &buf, 1)) { TH_LOG("Failed final read() from parent"); } EXPECT_EQ('!', buf) { TH_LOG("Failed to get final data from read()"); } /* Directly report the status of our test harness results. */ syscall(__NR_exit, _metadata->passed ? EXIT_SUCCESS : EXIT_FAILURE); } EXPECT_EQ(0, close(pipefd[0])); /* Attach to child, setup options, and release. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(0, ptrace(PTRACE_SETOPTIONS, child_pid, NULL, PTRACE_O_TRACESECCOMP)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(1, write(pipefd[1], ".", 1)); /* Wait for nanosleep() to start. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); ASSERT_EQ(0x100, msg); EXPECT_EQ(__NR_nanosleep, get_syscall(_metadata, child_pid)); /* Might as well check siginfo for sanity while we're here. */ ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); ASSERT_EQ(SIGTRAP, info.si_signo); ASSERT_EQ(SIGTRAP | (PTRACE_EVENT_SECCOMP << 8), info.si_code); EXPECT_EQ(0, info.si_errno); EXPECT_EQ(getuid(), info.si_uid); /* Verify signal delivery came from child (seccomp-triggered). */ EXPECT_EQ(child_pid, info.si_pid); /* Interrupt nanosleep with SIGSTOP (which we'll need to handle). */ ASSERT_EQ(0, kill(child_pid, SIGSTOP)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGSTOP, WSTOPSIG(status)); /* Verify signal delivery came from parent now. */ ASSERT_EQ(0, ptrace(PTRACE_GETSIGINFO, child_pid, NULL, &info)); EXPECT_EQ(getpid(), info.si_pid); /* Restart nanosleep with SIGCONT, which triggers restart_syscall. */ ASSERT_EQ(0, kill(child_pid, SIGCONT)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGCONT, WSTOPSIG(status)); ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); /* Wait for restart_syscall() to start. */ ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); ASSERT_EQ(true, WIFSTOPPED(status)); ASSERT_EQ(SIGTRAP, WSTOPSIG(status)); ASSERT_EQ(PTRACE_EVENT_SECCOMP, (status >> 16)); ASSERT_EQ(0, ptrace(PTRACE_GETEVENTMSG, child_pid, NULL, &msg)); ASSERT_EQ(0x200, msg); ret = get_syscall(_metadata, child_pid); #if defined(__arm__) /* * FIXME: * - native ARM registers do NOT expose true syscall. * - compat ARM registers on ARM64 DO expose true syscall. */ ASSERT_EQ(0, uname(&utsbuf)); if (strncmp(utsbuf.machine, "arm", 3) == 0) { EXPECT_EQ(__NR_nanosleep, ret); } else #endif { EXPECT_EQ(__NR_restart_syscall, ret); } /* Write again to end test. */ ASSERT_EQ(0, ptrace(PTRACE_CONT, child_pid, NULL, 0)); ASSERT_EQ(1, write(pipefd[1], "!", 1)); EXPECT_EQ(0, close(pipefd[1])); ASSERT_EQ(child_pid, waitpid(child_pid, &status, 0)); if (WIFSIGNALED(status) || WEXITSTATUS(status)) _metadata->passed = 0; } /* * TODO: * - add microbenchmarks * - expand NNP testing * - better arch-specific TRACE and TRAP handlers. * - endianness checking when appropriate * - 64-bit arg prodding * - arch value testing (x86 modes especially) * - ... */ TEST_HARNESS_MAIN
gpl-2.0
randomblame/3.1.10_a50x
net/wireless/nl80211.c
27
188021
/* * This is the new netlink-based wireless configuration interface. * * Copyright 2006-2010 Johannes Berg <johannes@sipsolutions.net> */ #include <linux/if.h> #include <linux/module.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/if_ether.h> #include <linux/ieee80211.h> #include <linux/nl80211.h> #include <linux/rtnetlink.h> #include <linux/netlink.h> #include <linux/etherdevice.h> #include <net/net_namespace.h> #include <net/genetlink.h> #include <net/cfg80211.h> #include <net/sock.h> #include "core.h" #include "nl80211.h" #include "reg.h" static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info); /* the netlink family */ static struct genl_family nl80211_fam = { .id = GENL_ID_GENERATE, /* don't bother with a hardcoded ID */ .name = "nl80211", /* have users key off the name instead */ .hdrsize = 0, /* no private header */ .version = 1, /* no particular meaning now */ .maxattr = NL80211_ATTR_MAX, .netnsok = true, .pre_doit = nl80211_pre_doit, .post_doit = nl80211_post_doit, }; /* internal helper: get rdev and dev */ static int get_rdev_dev_by_info_ifindex(struct genl_info *info, struct cfg80211_registered_device **rdev, struct net_device **dev) { struct nlattr **attrs = info->attrs; int ifindex; if (!attrs[NL80211_ATTR_IFINDEX]) return -EINVAL; ifindex = nla_get_u32(attrs[NL80211_ATTR_IFINDEX]); *dev = dev_get_by_index(genl_info_net(info), ifindex); if (!*dev) return -ENODEV; *rdev = cfg80211_get_dev_from_ifindex(genl_info_net(info), ifindex); if (IS_ERR(*rdev)) { dev_put(*dev); return PTR_ERR(*rdev); } return 0; } /* policy for the attributes */ static const struct nla_policy nl80211_policy[NL80211_ATTR_MAX+1] = { [NL80211_ATTR_WIPHY] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_NAME] = { .type = NLA_NUL_STRING, .len = 20-1 }, [NL80211_ATTR_WIPHY_TXQ_PARAMS] = { .type = NLA_NESTED }, [NL80211_ATTR_WIPHY_FREQ] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_CHANNEL_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RETRY_SHORT] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_RETRY_LONG] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_FRAG_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_RTS_THRESHOLD] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_COVERAGE_CLASS] = { .type = NLA_U8 }, [NL80211_ATTR_IFTYPE] = { .type = NLA_U32 }, [NL80211_ATTR_IFINDEX] = { .type = NLA_U32 }, [NL80211_ATTR_IFNAME] = { .type = NLA_NUL_STRING, .len = IFNAMSIZ-1 }, [NL80211_ATTR_MAC] = { .len = ETH_ALEN }, [NL80211_ATTR_PREV_BSSID] = { .len = ETH_ALEN }, [NL80211_ATTR_KEY] = { .type = NLA_NESTED, }, [NL80211_ATTR_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_ATTR_KEY_IDX] = { .type = NLA_U8 }, [NL80211_ATTR_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_ATTR_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_ATTR_KEY_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_DTIM_PERIOD] = { .type = NLA_U32 }, [NL80211_ATTR_BEACON_HEAD] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_BEACON_TAIL] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_STA_AID] = { .type = NLA_U16 }, [NL80211_ATTR_STA_FLAGS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_LISTEN_INTERVAL] = { .type = NLA_U16 }, [NL80211_ATTR_STA_SUPPORTED_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_STA_PLINK_ACTION] = { .type = NLA_U8 }, [NL80211_ATTR_STA_VLAN] = { .type = NLA_U32 }, [NL80211_ATTR_MNTR_FLAGS] = { /* NLA_NESTED can't be empty */ }, [NL80211_ATTR_MESH_ID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_MESH_ID_LEN }, [NL80211_ATTR_MPATH_NEXT_HOP] = { .type = NLA_U32 }, [NL80211_ATTR_REG_ALPHA2] = { .type = NLA_STRING, .len = 2 }, [NL80211_ATTR_REG_RULES] = { .type = NLA_NESTED }, [NL80211_ATTR_BSS_CTS_PROT] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_PREAMBLE] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_SHORT_SLOT_TIME] = { .type = NLA_U8 }, [NL80211_ATTR_BSS_BASIC_RATES] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, [NL80211_ATTR_BSS_HT_OPMODE] = { .type = NLA_U16 }, [NL80211_ATTR_MESH_CONFIG] = { .type = NLA_NESTED }, [NL80211_ATTR_SUPPORT_MESH_AUTH] = { .type = NLA_FLAG }, [NL80211_ATTR_HT_CAPABILITY] = { .len = NL80211_HT_CAPABILITY_LEN }, [NL80211_ATTR_MGMT_SUBTYPE] = { .type = NLA_U8 }, [NL80211_ATTR_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_SCAN_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_SCAN_FREQUENCIES] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SSIDS] = { .type = NLA_NESTED }, [NL80211_ATTR_SSID] = { .type = NLA_BINARY, .len = IEEE80211_MAX_SSID_LEN }, [NL80211_ATTR_AUTH_TYPE] = { .type = NLA_U32 }, [NL80211_ATTR_REASON_CODE] = { .type = NLA_U16 }, [NL80211_ATTR_FREQ_FIXED] = { .type = NLA_FLAG }, [NL80211_ATTR_TIMED_OUT] = { .type = NLA_FLAG }, [NL80211_ATTR_USE_MFP] = { .type = NLA_U32 }, [NL80211_ATTR_STA_FLAGS2] = { .len = sizeof(struct nl80211_sta_flag_update), }, [NL80211_ATTR_CONTROL_PORT] = { .type = NLA_FLAG }, [NL80211_ATTR_CONTROL_PORT_ETHERTYPE] = { .type = NLA_U16 }, [NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT] = { .type = NLA_FLAG }, [NL80211_ATTR_PRIVACY] = { .type = NLA_FLAG }, [NL80211_ATTR_CIPHER_SUITE_GROUP] = { .type = NLA_U32 }, [NL80211_ATTR_WPA_VERSIONS] = { .type = NLA_U32 }, [NL80211_ATTR_PID] = { .type = NLA_U32 }, [NL80211_ATTR_4ADDR] = { .type = NLA_U8 }, [NL80211_ATTR_PMKID] = { .type = NLA_BINARY, .len = WLAN_PMKID_LEN }, [NL80211_ATTR_DURATION] = { .type = NLA_U32 }, [NL80211_ATTR_COOKIE] = { .type = NLA_U64 }, [NL80211_ATTR_TX_RATES] = { .type = NLA_NESTED }, [NL80211_ATTR_FRAME] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_ATTR_FRAME_MATCH] = { .type = NLA_BINARY, }, [NL80211_ATTR_PS_STATE] = { .type = NLA_U32 }, [NL80211_ATTR_CQM] = { .type = NLA_NESTED, }, [NL80211_ATTR_LOCAL_STATE_CHANGE] = { .type = NLA_FLAG }, [NL80211_ATTR_AP_ISOLATE] = { .type = NLA_U8 }, [NL80211_ATTR_WIPHY_TX_POWER_SETTING] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_TX_POWER_LEVEL] = { .type = NLA_U32 }, [NL80211_ATTR_FRAME_TYPE] = { .type = NLA_U16 }, [NL80211_ATTR_WIPHY_ANTENNA_TX] = { .type = NLA_U32 }, [NL80211_ATTR_WIPHY_ANTENNA_RX] = { .type = NLA_U32 }, [NL80211_ATTR_MCAST_RATE] = { .type = NLA_U32 }, [NL80211_ATTR_OFFCHANNEL_TX_OK] = { .type = NLA_FLAG }, [NL80211_ATTR_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, [NL80211_ATTR_WOWLAN_TRIGGERS] = { .type = NLA_NESTED }, [NL80211_ATTR_STA_PLINK_STATE] = { .type = NLA_U8 }, [NL80211_ATTR_SCHED_SCAN_INTERVAL] = { .type = NLA_U32 }, [NL80211_ATTR_REKEY_DATA] = { .type = NLA_NESTED }, [NL80211_ATTR_SCAN_SUPP_RATES] = { .type = NLA_NESTED }, }; /* policy for the key attributes */ static const struct nla_policy nl80211_key_policy[NL80211_KEY_MAX + 1] = { [NL80211_KEY_DATA] = { .type = NLA_BINARY, .len = WLAN_MAX_KEY_LEN }, [NL80211_KEY_IDX] = { .type = NLA_U8 }, [NL80211_KEY_CIPHER] = { .type = NLA_U32 }, [NL80211_KEY_SEQ] = { .type = NLA_BINARY, .len = 8 }, [NL80211_KEY_DEFAULT] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_MGMT] = { .type = NLA_FLAG }, [NL80211_KEY_TYPE] = { .type = NLA_U32 }, [NL80211_KEY_DEFAULT_TYPES] = { .type = NLA_NESTED }, }; /* policy for the key default flags */ static const struct nla_policy nl80211_key_default_policy[NUM_NL80211_KEY_DEFAULT_TYPES] = { [NL80211_KEY_DEFAULT_TYPE_UNICAST] = { .type = NLA_FLAG }, [NL80211_KEY_DEFAULT_TYPE_MULTICAST] = { .type = NLA_FLAG }, }; /* policy for WoWLAN attributes */ static const struct nla_policy nl80211_wowlan_policy[NUM_NL80211_WOWLAN_TRIG] = { [NL80211_WOWLAN_TRIG_ANY] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_DISCONNECT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_MAGIC_PKT] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_PKT_PATTERN] = { .type = NLA_NESTED }, [NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE] = { .type = NLA_FLAG }, [NL80211_WOWLAN_TRIG_RFKILL_RELEASE] = { .type = NLA_FLAG }, }; /* policy for GTK rekey offload attributes */ static const struct nla_policy nl80211_rekey_policy[NUM_NL80211_REKEY_DATA] = { [NL80211_REKEY_DATA_KEK] = { .len = NL80211_KEK_LEN }, [NL80211_REKEY_DATA_KCK] = { .len = NL80211_KCK_LEN }, [NL80211_REKEY_DATA_REPLAY_CTR] = { .len = NL80211_REPLAY_CTR_LEN }, }; /* ifidx get helper */ static int nl80211_get_ifidx(struct netlink_callback *cb) { int res; res = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (res) return res; if (!nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]) return -EINVAL; res = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_IFINDEX]); if (!res) return -EINVAL; return res; } static int nl80211_prepare_netdev_dump(struct sk_buff *skb, struct netlink_callback *cb, struct cfg80211_registered_device **rdev, struct net_device **dev) { int ifidx = cb->args[0]; int err; if (!ifidx) ifidx = nl80211_get_ifidx(cb); if (ifidx < 0) return ifidx; cb->args[0] = ifidx; rtnl_lock(); *dev = __dev_get_by_index(sock_net(skb->sk), ifidx); if (!*dev) { err = -ENODEV; goto out_rtnl; } *rdev = cfg80211_get_dev_from_ifindex(sock_net(skb->sk), ifidx); if (IS_ERR(*rdev)) { err = PTR_ERR(*rdev); goto out_rtnl; } return 0; out_rtnl: rtnl_unlock(); return err; } static void nl80211_finish_netdev_dump(struct cfg80211_registered_device *rdev) { cfg80211_unlock_rdev(rdev); rtnl_unlock(); } /* IE validation */ static bool is_valid_ie_attr(const struct nlattr *attr) { const u8 *pos; int len; if (!attr) return true; pos = nla_data(attr); len = nla_len(attr); while (len) { u8 elemlen; if (len < 2) return false; len -= 2; elemlen = pos[1]; if (elemlen > len) return false; len -= elemlen; pos += 2 + elemlen; } return true; } /* message building helper */ static inline void *nl80211hdr_put(struct sk_buff *skb, u32 pid, u32 seq, int flags, u8 cmd) { /* since there is no private header just add the generic one */ return genlmsg_put(skb, pid, seq, &nl80211_fam, flags, cmd); } static int nl80211_msg_put_channel(struct sk_buff *msg, struct ieee80211_channel *chan) { NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_FREQ, chan->center_freq); if (chan->flags & IEEE80211_CHAN_DISABLED) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_DISABLED); if (chan->flags & IEEE80211_CHAN_PASSIVE_SCAN) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_PASSIVE_SCAN); if (chan->flags & IEEE80211_CHAN_NO_IBSS) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_NO_IBSS); if (chan->flags & IEEE80211_CHAN_RADAR) NLA_PUT_FLAG(msg, NL80211_FREQUENCY_ATTR_RADAR); NLA_PUT_U32(msg, NL80211_FREQUENCY_ATTR_MAX_TX_POWER, DBM_TO_MBM(chan->max_power)); return 0; nla_put_failure: return -ENOBUFS; } /* netlink command implementations */ struct key_parse { struct key_params p; int idx; int type; bool def, defmgmt; bool def_uni, def_multi; }; static int nl80211_parse_key_new(struct nlattr *key, struct key_parse *k) { struct nlattr *tb[NL80211_KEY_MAX + 1]; int err = nla_parse_nested(tb, NL80211_KEY_MAX, key, nl80211_key_policy); if (err) return err; k->def = !!tb[NL80211_KEY_DEFAULT]; k->defmgmt = !!tb[NL80211_KEY_DEFAULT_MGMT]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt) k->def_multi = true; if (tb[NL80211_KEY_IDX]) k->idx = nla_get_u8(tb[NL80211_KEY_IDX]); if (tb[NL80211_KEY_DATA]) { k->p.key = nla_data(tb[NL80211_KEY_DATA]); k->p.key_len = nla_len(tb[NL80211_KEY_DATA]); } if (tb[NL80211_KEY_SEQ]) { k->p.seq = nla_data(tb[NL80211_KEY_SEQ]); k->p.seq_len = nla_len(tb[NL80211_KEY_SEQ]); } if (tb[NL80211_KEY_CIPHER]) k->p.cipher = nla_get_u32(tb[NL80211_KEY_CIPHER]); if (tb[NL80211_KEY_TYPE]) { k->type = nla_get_u32(tb[NL80211_KEY_TYPE]); if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) return -EINVAL; } if (tb[NL80211_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; int err = nla_parse_nested(kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, tb[NL80211_KEY_DEFAULT_TYPES], nl80211_key_default_policy); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } return 0; } static int nl80211_parse_key_old(struct genl_info *info, struct key_parse *k) { if (info->attrs[NL80211_ATTR_KEY_DATA]) { k->p.key = nla_data(info->attrs[NL80211_ATTR_KEY_DATA]); k->p.key_len = nla_len(info->attrs[NL80211_ATTR_KEY_DATA]); } if (info->attrs[NL80211_ATTR_KEY_SEQ]) { k->p.seq = nla_data(info->attrs[NL80211_ATTR_KEY_SEQ]); k->p.seq_len = nla_len(info->attrs[NL80211_ATTR_KEY_SEQ]); } if (info->attrs[NL80211_ATTR_KEY_IDX]) k->idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (info->attrs[NL80211_ATTR_KEY_CIPHER]) k->p.cipher = nla_get_u32(info->attrs[NL80211_ATTR_KEY_CIPHER]); k->def = !!info->attrs[NL80211_ATTR_KEY_DEFAULT]; k->defmgmt = !!info->attrs[NL80211_ATTR_KEY_DEFAULT_MGMT]; if (k->def) { k->def_uni = true; k->def_multi = true; } if (k->defmgmt) k->def_multi = true; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { k->type = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (k->type < 0 || k->type >= NUM_NL80211_KEYTYPES) return -EINVAL; } if (info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES]) { struct nlattr *kdt[NUM_NL80211_KEY_DEFAULT_TYPES]; int err = nla_parse_nested( kdt, NUM_NL80211_KEY_DEFAULT_TYPES - 1, info->attrs[NL80211_ATTR_KEY_DEFAULT_TYPES], nl80211_key_default_policy); if (err) return err; k->def_uni = kdt[NL80211_KEY_DEFAULT_TYPE_UNICAST]; k->def_multi = kdt[NL80211_KEY_DEFAULT_TYPE_MULTICAST]; } return 0; } static int nl80211_parse_key(struct genl_info *info, struct key_parse *k) { int err; memset(k, 0, sizeof(*k)); k->idx = -1; k->type = -1; if (info->attrs[NL80211_ATTR_KEY]) err = nl80211_parse_key_new(info->attrs[NL80211_ATTR_KEY], k); else err = nl80211_parse_key_old(info, k); if (err) return err; if (k->def && k->defmgmt) return -EINVAL; if (k->defmgmt) { if (k->def_uni || !k->def_multi) return -EINVAL; } if (k->idx != -1) { if (k->defmgmt) { if (k->idx < 4 || k->idx > 5) return -EINVAL; } else if (k->def) { if (k->idx < 0 || k->idx > 3) return -EINVAL; } else { if (k->idx < 0 || k->idx > 5) return -EINVAL; } } return 0; } static struct cfg80211_cached_keys * nl80211_parse_connkeys(struct cfg80211_registered_device *rdev, struct nlattr *keys) { struct key_parse parse; struct nlattr *key; struct cfg80211_cached_keys *result; int rem, err, def = 0; result = kzalloc(sizeof(*result), GFP_KERNEL); if (!result) return ERR_PTR(-ENOMEM); result->def = -1; result->defmgmt = -1; nla_for_each_nested(key, keys, rem) { memset(&parse, 0, sizeof(parse)); parse.idx = -1; err = nl80211_parse_key_new(key, &parse); if (err) goto error; err = -EINVAL; if (!parse.p.key) goto error; if (parse.idx < 0 || parse.idx > 4) goto error; if (parse.def) { if (def) goto error; def = 1; result->def = parse.idx; if (!parse.def_uni || !parse.def_multi) goto error; } else if (parse.defmgmt) goto error; err = cfg80211_validate_key_settings(rdev, &parse.p, parse.idx, false, NULL); if (err) goto error; result->params[parse.idx].cipher = parse.p.cipher; result->params[parse.idx].key_len = parse.p.key_len; result->params[parse.idx].key = result->data[parse.idx]; memcpy(result->data[parse.idx], parse.p.key, parse.p.key_len); } return result; error: kfree(result); return ERR_PTR(err); } static int nl80211_key_allowed(struct wireless_dev *wdev) { ASSERT_WDEV_LOCK(wdev); switch (wdev->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: case NL80211_IFTYPE_MESH_POINT: break; case NL80211_IFTYPE_ADHOC: if (!wdev->current_bss) return -ENOLINK; break; case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_P2P_CLIENT: if (wdev->sme_state != CFG80211_SME_CONNECTED) return -ENOLINK; break; default: return -EINVAL; } return 0; } static int nl80211_put_iftypes(struct sk_buff *msg, u32 attr, u16 ifmodes) { struct nlattr *nl_modes = nla_nest_start(msg, attr); int i; if (!nl_modes) goto nla_put_failure; i = 0; while (ifmodes) { if (ifmodes & 1) NLA_PUT_FLAG(msg, i); ifmodes >>= 1; i++; } nla_nest_end(msg, nl_modes); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_put_iface_combinations(struct wiphy *wiphy, struct sk_buff *msg) { struct nlattr *nl_combis; int i, j; nl_combis = nla_nest_start(msg, NL80211_ATTR_INTERFACE_COMBINATIONS); if (!nl_combis) goto nla_put_failure; for (i = 0; i < wiphy->n_iface_combinations; i++) { const struct ieee80211_iface_combination *c; struct nlattr *nl_combi, *nl_limits; c = &wiphy->iface_combinations[i]; nl_combi = nla_nest_start(msg, i + 1); if (!nl_combi) goto nla_put_failure; nl_limits = nla_nest_start(msg, NL80211_IFACE_COMB_LIMITS); if (!nl_limits) goto nla_put_failure; for (j = 0; j < c->n_limits; j++) { struct nlattr *nl_limit; nl_limit = nla_nest_start(msg, j + 1); if (!nl_limit) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_IFACE_LIMIT_MAX, c->limits[j].max); if (nl80211_put_iftypes(msg, NL80211_IFACE_LIMIT_TYPES, c->limits[j].types)) goto nla_put_failure; nla_nest_end(msg, nl_limit); } nla_nest_end(msg, nl_limits); if (c->beacon_int_infra_match) NLA_PUT_FLAG(msg, NL80211_IFACE_COMB_STA_AP_BI_MATCH); NLA_PUT_U32(msg, NL80211_IFACE_COMB_NUM_CHANNELS, c->num_different_channels); NLA_PUT_U32(msg, NL80211_IFACE_COMB_MAXNUM, c->max_interfaces); nla_nest_end(msg, nl_combi); } nla_nest_end(msg, nl_combis); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_send_wiphy(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct cfg80211_registered_device *dev) { void *hdr; struct nlattr *nl_bands, *nl_band; struct nlattr *nl_freqs, *nl_freq; struct nlattr *nl_rates, *nl_rate; struct nlattr *nl_cmds; enum ieee80211_band band; struct ieee80211_channel *chan; struct ieee80211_rate *rate; int i; const struct ieee80211_txrx_stypes *mgmt_stypes = dev->wiphy.mgmt_stypes; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_WIPHY); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, dev->wiphy_idx); NLA_PUT_STRING(msg, NL80211_ATTR_WIPHY_NAME, wiphy_name(&dev->wiphy)); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, cfg80211_rdev_list_generation); NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_SHORT, dev->wiphy.retry_short); NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_RETRY_LONG, dev->wiphy.retry_long); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FRAG_THRESHOLD, dev->wiphy.frag_threshold); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_RTS_THRESHOLD, dev->wiphy.rts_threshold); NLA_PUT_U8(msg, NL80211_ATTR_WIPHY_COVERAGE_CLASS, dev->wiphy.coverage_class); NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCAN_SSIDS, dev->wiphy.max_scan_ssids); NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_SCHED_SCAN_SSIDS, dev->wiphy.max_sched_scan_ssids); NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCAN_IE_LEN, dev->wiphy.max_scan_ie_len); NLA_PUT_U16(msg, NL80211_ATTR_MAX_SCHED_SCAN_IE_LEN, dev->wiphy.max_sched_scan_ie_len); if (dev->wiphy.flags & WIPHY_FLAG_IBSS_RSN) NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_IBSS_RSN); if (dev->wiphy.flags & WIPHY_FLAG_MESH_AUTH) NLA_PUT_FLAG(msg, NL80211_ATTR_SUPPORT_MESH_AUTH); NLA_PUT(msg, NL80211_ATTR_CIPHER_SUITES, sizeof(u32) * dev->wiphy.n_cipher_suites, dev->wiphy.cipher_suites); NLA_PUT_U8(msg, NL80211_ATTR_MAX_NUM_PMKIDS, dev->wiphy.max_num_pmkids); if (dev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) NLA_PUT_FLAG(msg, NL80211_ATTR_CONTROL_PORT_ETHERTYPE); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_TX, dev->wiphy.available_antennas_tx); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_AVAIL_RX, dev->wiphy.available_antennas_rx); if ((dev->wiphy.available_antennas_tx || dev->wiphy.available_antennas_rx) && dev->ops->get_antenna) { u32 tx_ant = 0, rx_ant = 0; int res; res = dev->ops->get_antenna(&dev->wiphy, &tx_ant, &rx_ant); if (!res) { NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_TX, tx_ant); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_ANTENNA_RX, rx_ant); } } if (nl80211_put_iftypes(msg, NL80211_ATTR_SUPPORTED_IFTYPES, dev->wiphy.interface_modes)) goto nla_put_failure; nl_bands = nla_nest_start(msg, NL80211_ATTR_WIPHY_BANDS); if (!nl_bands) goto nla_put_failure; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { if (!dev->wiphy.bands[band]) continue; nl_band = nla_nest_start(msg, band); if (!nl_band) goto nla_put_failure; /* add HT info */ if (dev->wiphy.bands[band]->ht_cap.ht_supported) { NLA_PUT(msg, NL80211_BAND_ATTR_HT_MCS_SET, sizeof(dev->wiphy.bands[band]->ht_cap.mcs), &dev->wiphy.bands[band]->ht_cap.mcs); NLA_PUT_U16(msg, NL80211_BAND_ATTR_HT_CAPA, dev->wiphy.bands[band]->ht_cap.cap); NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_FACTOR, dev->wiphy.bands[band]->ht_cap.ampdu_factor); NLA_PUT_U8(msg, NL80211_BAND_ATTR_HT_AMPDU_DENSITY, dev->wiphy.bands[band]->ht_cap.ampdu_density); } /* add frequencies */ nl_freqs = nla_nest_start(msg, NL80211_BAND_ATTR_FREQS); if (!nl_freqs) goto nla_put_failure; for (i = 0; i < dev->wiphy.bands[band]->n_channels; i++) { nl_freq = nla_nest_start(msg, i); if (!nl_freq) goto nla_put_failure; chan = &dev->wiphy.bands[band]->channels[i]; if (nl80211_msg_put_channel(msg, chan)) goto nla_put_failure; nla_nest_end(msg, nl_freq); } nla_nest_end(msg, nl_freqs); /* add bitrates */ nl_rates = nla_nest_start(msg, NL80211_BAND_ATTR_RATES); if (!nl_rates) goto nla_put_failure; for (i = 0; i < dev->wiphy.bands[band]->n_bitrates; i++) { nl_rate = nla_nest_start(msg, i); if (!nl_rate) goto nla_put_failure; rate = &dev->wiphy.bands[band]->bitrates[i]; NLA_PUT_U32(msg, NL80211_BITRATE_ATTR_RATE, rate->bitrate); if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE) NLA_PUT_FLAG(msg, NL80211_BITRATE_ATTR_2GHZ_SHORTPREAMBLE); nla_nest_end(msg, nl_rate); } nla_nest_end(msg, nl_rates); nla_nest_end(msg, nl_band); } nla_nest_end(msg, nl_bands); nl_cmds = nla_nest_start(msg, NL80211_ATTR_SUPPORTED_COMMANDS); if (!nl_cmds) goto nla_put_failure; i = 0; #define CMD(op, n) \ do { \ if (dev->ops->op) { \ i++; \ NLA_PUT_U32(msg, i, NL80211_CMD_ ## n); \ } \ } while (0) CMD(add_virtual_intf, NEW_INTERFACE); CMD(change_virtual_intf, SET_INTERFACE); CMD(add_key, NEW_KEY); CMD(add_beacon, NEW_BEACON); CMD(add_station, NEW_STATION); CMD(add_mpath, NEW_MPATH); CMD(update_mesh_config, SET_MESH_CONFIG); CMD(change_bss, SET_BSS); CMD(auth, AUTHENTICATE); CMD(assoc, ASSOCIATE); CMD(deauth, DEAUTHENTICATE); CMD(disassoc, DISASSOCIATE); CMD(join_ibss, JOIN_IBSS); CMD(join_mesh, JOIN_MESH); CMD(set_pmksa, SET_PMKSA); CMD(del_pmksa, DEL_PMKSA); CMD(flush_pmksa, FLUSH_PMKSA); CMD(remain_on_channel, REMAIN_ON_CHANNEL); CMD(set_bitrate_mask, SET_TX_BITRATE_MASK); CMD(mgmt_tx, FRAME); CMD(mgmt_tx_cancel_wait, FRAME_WAIT_CANCEL); if (dev->wiphy.flags & WIPHY_FLAG_NETNS_OK) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_SET_WIPHY_NETNS); } CMD(set_channel, SET_CHANNEL); CMD(set_wds_peer, SET_WDS_PEER); if (dev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) CMD(sched_scan_start, START_SCHED_SCAN); #undef CMD if (dev->ops->connect || dev->ops->auth) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_CONNECT); } if (dev->ops->disconnect || dev->ops->deauth) { i++; NLA_PUT_U32(msg, i, NL80211_CMD_DISCONNECT); } nla_nest_end(msg, nl_cmds); if (dev->ops->remain_on_channel) NLA_PUT_U32(msg, NL80211_ATTR_MAX_REMAIN_ON_CHANNEL_DURATION, dev->wiphy.max_remain_on_channel_duration); /* for now at least assume all drivers have it */ if (dev->ops->mgmt_tx) NLA_PUT_FLAG(msg, NL80211_ATTR_OFFCHANNEL_TX_OK); if (mgmt_stypes) { u16 stypes; struct nlattr *nl_ftypes, *nl_ifs; enum nl80211_iftype ift; nl_ifs = nla_nest_start(msg, NL80211_ATTR_TX_FRAME_TYPES); if (!nl_ifs) goto nla_put_failure; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start(msg, ift); if (!nl_ftypes) goto nla_put_failure; i = 0; stypes = mgmt_stypes[ift].tx; while (stypes) { if (stypes & 1) NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT); stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); nl_ifs = nla_nest_start(msg, NL80211_ATTR_RX_FRAME_TYPES); if (!nl_ifs) goto nla_put_failure; for (ift = 0; ift < NUM_NL80211_IFTYPES; ift++) { nl_ftypes = nla_nest_start(msg, ift); if (!nl_ftypes) goto nla_put_failure; i = 0; stypes = mgmt_stypes[ift].rx; while (stypes) { if (stypes & 1) NLA_PUT_U16(msg, NL80211_ATTR_FRAME_TYPE, (i << 4) | IEEE80211_FTYPE_MGMT); stypes >>= 1; i++; } nla_nest_end(msg, nl_ftypes); } nla_nest_end(msg, nl_ifs); } if (dev->wiphy.wowlan.flags || dev->wiphy.wowlan.n_patterns) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS_SUPPORTED); if (!nl_wowlan) goto nla_put_failure; if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_ANY) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_DISCONNECT) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_MAGIC_PKT) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_SUPPORTS_GTK_REKEY) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_4WAY_HANDSHAKE) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); if (dev->wiphy.wowlan.flags & WIPHY_WOWLAN_RFKILL_RELEASE) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); if (dev->wiphy.wowlan.n_patterns) { struct nl80211_wowlan_pattern_support pat = { .max_patterns = dev->wiphy.wowlan.n_patterns, .min_pattern_len = dev->wiphy.wowlan.pattern_min_len, .max_pattern_len = dev->wiphy.wowlan.pattern_max_len, }; NLA_PUT(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN, sizeof(pat), &pat); } nla_nest_end(msg, nl_wowlan); } if (nl80211_put_iftypes(msg, NL80211_ATTR_SOFTWARE_IFTYPES, dev->wiphy.software_iftypes)) goto nla_put_failure; if (nl80211_put_iface_combinations(&dev->wiphy, msg)) goto nla_put_failure; return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_wiphy(struct sk_buff *skb, struct netlink_callback *cb) { int idx = 0; int start = cb->args[0]; struct cfg80211_registered_device *dev; mutex_lock(&cfg80211_mutex); list_for_each_entry(dev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&dev->wiphy), sock_net(skb->sk))) continue; if (++idx <= start) continue; if (nl80211_send_wiphy(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, dev) < 0) { idx--; break; } } mutex_unlock(&cfg80211_mutex); cb->args[0] = idx; return skb->len; } static int nl80211_get_wiphy(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *dev = info->user_ptr[0]; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_wiphy(msg, info->snd_pid, info->snd_seq, 0, dev) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy txq_params_policy[NL80211_TXQ_ATTR_MAX + 1] = { [NL80211_TXQ_ATTR_QUEUE] = { .type = NLA_U8 }, [NL80211_TXQ_ATTR_TXOP] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMIN] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_CWMAX] = { .type = NLA_U16 }, [NL80211_TXQ_ATTR_AIFS] = { .type = NLA_U8 }, }; static int parse_txq_params(struct nlattr *tb[], struct ieee80211_txq_params *txq_params) { if (!tb[NL80211_TXQ_ATTR_QUEUE] || !tb[NL80211_TXQ_ATTR_TXOP] || !tb[NL80211_TXQ_ATTR_CWMIN] || !tb[NL80211_TXQ_ATTR_CWMAX] || !tb[NL80211_TXQ_ATTR_AIFS]) return -EINVAL; txq_params->queue = nla_get_u8(tb[NL80211_TXQ_ATTR_QUEUE]); txq_params->txop = nla_get_u16(tb[NL80211_TXQ_ATTR_TXOP]); txq_params->cwmin = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMIN]); txq_params->cwmax = nla_get_u16(tb[NL80211_TXQ_ATTR_CWMAX]); txq_params->aifs = nla_get_u8(tb[NL80211_TXQ_ATTR_AIFS]); return 0; } static bool nl80211_can_set_dev_channel(struct wireless_dev *wdev) { /* * You can only set the channel explicitly for AP, mesh * and WDS type interfaces; all others have their channel * managed via their respective "establish a connection" * command (connect, join, ...) * * Monitors are special as they are normally slaved to * whatever else is going on, so they behave as though * you tried setting the wiphy channel itself. */ return !wdev || wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_WDS || wdev->iftype == NL80211_IFTYPE_MESH_POINT || wdev->iftype == NL80211_IFTYPE_MONITOR || wdev->iftype == NL80211_IFTYPE_P2P_GO; } static int __nl80211_set_channel(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct genl_info *info) { enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; u32 freq; int result; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; if (!nl80211_can_set_dev_channel(wdev)) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { channel_type = nla_get_u32(info->attrs[ NL80211_ATTR_WIPHY_CHANNEL_TYPE]); if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && channel_type != NL80211_CHAN_HT40MINUS) return -EINVAL; } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); mutex_lock(&rdev->devlist_mtx); if (wdev) { wdev_lock(wdev); result = cfg80211_set_freq(rdev, wdev, freq, channel_type); wdev_unlock(wdev); } else { result = cfg80211_set_freq(rdev, NULL, freq, channel_type); } mutex_unlock(&rdev->devlist_mtx); return result; } static int nl80211_set_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *netdev = info->user_ptr[1]; return __nl80211_set_channel(rdev, netdev->ieee80211_ptr, info); } static int nl80211_set_wds_peer(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; const u8 *bssid; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (netif_running(dev)) return -EBUSY; if (!rdev->ops->set_wds_peer) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_WDS) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); return rdev->ops->set_wds_peer(wdev->wiphy, dev, bssid); } static int nl80211_set_wiphy(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *netdev = NULL; struct wireless_dev *wdev; int result = 0, rem_txq_params = 0; struct nlattr *nl_txq_params; u32 changed; u8 retry_short = 0, retry_long = 0; u32 frag_threshold = 0, rts_threshold = 0; u8 coverage_class = 0; /* * Try to find the wiphy and netdev. Normally this * function shouldn't need the netdev, but this is * done for backward compatibility -- previously * setting the channel was done per wiphy, but now * it is per netdev. Previous userland like hostapd * also passed a netdev to set_wiphy, so that it is * possible to let that go to the right netdev! */ mutex_lock(&cfg80211_mutex); if (info->attrs[NL80211_ATTR_IFINDEX]) { int ifindex = nla_get_u32(info->attrs[NL80211_ATTR_IFINDEX]); netdev = dev_get_by_index(genl_info_net(info), ifindex); if (netdev && netdev->ieee80211_ptr) { rdev = wiphy_to_dev(netdev->ieee80211_ptr->wiphy); mutex_lock(&rdev->mtx); } else netdev = NULL; } if (!netdev) { rdev = __cfg80211_rdev_from_info(info); if (IS_ERR(rdev)) { mutex_unlock(&cfg80211_mutex); return PTR_ERR(rdev); } wdev = NULL; netdev = NULL; result = 0; mutex_lock(&rdev->mtx); } else if (netif_running(netdev) && nl80211_can_set_dev_channel(netdev->ieee80211_ptr)) wdev = netdev->ieee80211_ptr; else wdev = NULL; /* * end workaround code, by now the rdev is available * and locked, and wdev may or may not be NULL. */ if (info->attrs[NL80211_ATTR_WIPHY_NAME]) result = cfg80211_dev_rename( rdev, nla_data(info->attrs[NL80211_ATTR_WIPHY_NAME])); mutex_unlock(&cfg80211_mutex); if (result) goto bad_res; if (info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS]) { struct ieee80211_txq_params txq_params; struct nlattr *tb[NL80211_TXQ_ATTR_MAX + 1]; if (!rdev->ops->set_txq_params) { result = -EOPNOTSUPP; goto bad_res; } nla_for_each_nested(nl_txq_params, info->attrs[NL80211_ATTR_WIPHY_TXQ_PARAMS], rem_txq_params) { nla_parse(tb, NL80211_TXQ_ATTR_MAX, nla_data(nl_txq_params), nla_len(nl_txq_params), txq_params_policy); result = parse_txq_params(tb, &txq_params); if (result) goto bad_res; result = rdev->ops->set_txq_params(&rdev->wiphy, &txq_params); if (result) goto bad_res; } } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { result = __nl80211_set_channel(rdev, wdev, info); if (result) goto bad_res; } if (info->attrs[NL80211_ATTR_WIPHY_TX_POWER_SETTING]) { enum nl80211_tx_power_setting type; int idx, mbm = 0; if (!rdev->ops->set_tx_power) { result = -EOPNOTSUPP; goto bad_res; } idx = NL80211_ATTR_WIPHY_TX_POWER_SETTING; type = nla_get_u32(info->attrs[idx]); if (!info->attrs[NL80211_ATTR_WIPHY_TX_POWER_LEVEL] && (type != NL80211_TX_POWER_AUTOMATIC)) { result = -EINVAL; goto bad_res; } if (type != NL80211_TX_POWER_AUTOMATIC) { idx = NL80211_ATTR_WIPHY_TX_POWER_LEVEL; mbm = nla_get_u32(info->attrs[idx]); } result = rdev->ops->set_tx_power(&rdev->wiphy, type, mbm); if (result) goto bad_res; } if (info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX] && info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]) { u32 tx_ant, rx_ant; if ((!rdev->wiphy.available_antennas_tx && !rdev->wiphy.available_antennas_rx) || !rdev->ops->set_antenna) { result = -EOPNOTSUPP; goto bad_res; } tx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_TX]); rx_ant = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_ANTENNA_RX]); /* reject antenna configurations which don't match the * available antenna masks, except for the "all" mask */ if ((~tx_ant && (tx_ant & ~rdev->wiphy.available_antennas_tx)) || (~rx_ant && (rx_ant & ~rdev->wiphy.available_antennas_rx))) { result = -EINVAL; goto bad_res; } tx_ant = tx_ant & rdev->wiphy.available_antennas_tx; rx_ant = rx_ant & rdev->wiphy.available_antennas_rx; result = rdev->ops->set_antenna(&rdev->wiphy, tx_ant, rx_ant); if (result) goto bad_res; } changed = 0; if (info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]) { retry_short = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_SHORT]); if (retry_short == 0) { result = -EINVAL; goto bad_res; } changed |= WIPHY_PARAM_RETRY_SHORT; } if (info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]) { retry_long = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_RETRY_LONG]); if (retry_long == 0) { result = -EINVAL; goto bad_res; } changed |= WIPHY_PARAM_RETRY_LONG; } if (info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]) { frag_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_FRAG_THRESHOLD]); if (frag_threshold < 256) { result = -EINVAL; goto bad_res; } if (frag_threshold != (u32) -1) { /* * Fragments (apart from the last one) are required to * have even length. Make the fragmentation code * simpler by stripping LSB should someone try to use * odd threshold value. */ frag_threshold &= ~0x1; } changed |= WIPHY_PARAM_FRAG_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]) { rts_threshold = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_RTS_THRESHOLD]); changed |= WIPHY_PARAM_RTS_THRESHOLD; } if (info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]) { coverage_class = nla_get_u8( info->attrs[NL80211_ATTR_WIPHY_COVERAGE_CLASS]); changed |= WIPHY_PARAM_COVERAGE_CLASS; } if (changed) { u8 old_retry_short, old_retry_long; u32 old_frag_threshold, old_rts_threshold; u8 old_coverage_class; if (!rdev->ops->set_wiphy_params) { result = -EOPNOTSUPP; goto bad_res; } old_retry_short = rdev->wiphy.retry_short; old_retry_long = rdev->wiphy.retry_long; old_frag_threshold = rdev->wiphy.frag_threshold; old_rts_threshold = rdev->wiphy.rts_threshold; old_coverage_class = rdev->wiphy.coverage_class; if (changed & WIPHY_PARAM_RETRY_SHORT) rdev->wiphy.retry_short = retry_short; if (changed & WIPHY_PARAM_RETRY_LONG) rdev->wiphy.retry_long = retry_long; if (changed & WIPHY_PARAM_FRAG_THRESHOLD) rdev->wiphy.frag_threshold = frag_threshold; if (changed & WIPHY_PARAM_RTS_THRESHOLD) rdev->wiphy.rts_threshold = rts_threshold; if (changed & WIPHY_PARAM_COVERAGE_CLASS) rdev->wiphy.coverage_class = coverage_class; result = rdev->ops->set_wiphy_params(&rdev->wiphy, changed); if (result) { rdev->wiphy.retry_short = old_retry_short; rdev->wiphy.retry_long = old_retry_long; rdev->wiphy.frag_threshold = old_frag_threshold; rdev->wiphy.rts_threshold = old_rts_threshold; rdev->wiphy.coverage_class = old_coverage_class; } } bad_res: mutex_unlock(&rdev->mtx); if (netdev) dev_put(netdev); return result; } static int nl80211_send_iface(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct net_device *dev) { void *hdr; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_INTERFACE); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_STRING(msg, NL80211_ATTR_IFNAME, dev->name); NLA_PUT_U32(msg, NL80211_ATTR_IFTYPE, dev->ieee80211_ptr->iftype); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->devlist_generation ^ (cfg80211_rdev_list_generation << 2)); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_interface(struct sk_buff *skb, struct netlink_callback *cb) { int wp_idx = 0; int if_idx = 0; int wp_start = cb->args[0]; int if_start = cb->args[1]; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; mutex_lock(&cfg80211_mutex); list_for_each_entry(rdev, &cfg80211_rdev_list, list) { if (!net_eq(wiphy_net(&rdev->wiphy), sock_net(skb->sk))) continue; if (wp_idx < wp_start) { wp_idx++; continue; } if_idx = 0; mutex_lock(&rdev->devlist_mtx); list_for_each_entry(wdev, &rdev->netdev_list, list) { if (if_idx < if_start) { if_idx++; continue; } if (nl80211_send_iface(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev->netdev) < 0) { mutex_unlock(&rdev->devlist_mtx); goto out; } if_idx++; } mutex_unlock(&rdev->devlist_mtx); wp_idx++; } out: mutex_unlock(&cfg80211_mutex); cb->args[0] = wp_idx; cb->args[1] = if_idx; return skb->len; } static int nl80211_get_interface(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; struct cfg80211_registered_device *dev = info->user_ptr[0]; struct net_device *netdev = info->user_ptr[1]; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_iface(msg, info->snd_pid, info->snd_seq, 0, dev, netdev) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static const struct nla_policy mntr_flags_policy[NL80211_MNTR_FLAG_MAX + 1] = { [NL80211_MNTR_FLAG_FCSFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_PLCPFAIL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_CONTROL] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_OTHER_BSS] = { .type = NLA_FLAG }, [NL80211_MNTR_FLAG_COOK_FRAMES] = { .type = NLA_FLAG }, }; static int parse_monitor_flags(struct nlattr *nla, u32 *mntrflags) { struct nlattr *flags[NL80211_MNTR_FLAG_MAX + 1]; int flag; *mntrflags = 0; if (!nla) return -EINVAL; if (nla_parse_nested(flags, NL80211_MNTR_FLAG_MAX, nla, mntr_flags_policy)) return -EINVAL; for (flag = 1; flag <= NL80211_MNTR_FLAG_MAX; flag++) if (flags[flag]) *mntrflags |= (1<<flag); return 0; } static int nl80211_valid_4addr(struct cfg80211_registered_device *rdev, struct net_device *netdev, u8 use_4addr, enum nl80211_iftype iftype) { if (!use_4addr) { if (netdev && (netdev->priv_flags & IFF_BRIDGE_PORT)) return -EBUSY; return 0; } switch (iftype) { case NL80211_IFTYPE_AP_VLAN: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_AP) return 0; break; case NL80211_IFTYPE_STATION: if (rdev->wiphy.flags & WIPHY_FLAG_4ADDR_STATION) return 0; break; default: break; } return -EOPNOTSUPP; } static int nl80211_set_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; int err; enum nl80211_iftype otype, ntype; struct net_device *dev = info->user_ptr[1]; u32 _flags, *flags = NULL; bool change = false; memset(&params, 0, sizeof(params)); otype = ntype = dev->ieee80211_ptr->iftype; if (info->attrs[NL80211_ATTR_IFTYPE]) { ntype = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (otype != ntype) change = true; if (ntype > NL80211_IFTYPE_MAX) return -EINVAL; } if (info->attrs[NL80211_ATTR_MESH_ID]) { struct wireless_dev *wdev = dev->ieee80211_ptr; if (ntype != NL80211_IFTYPE_MESH_POINT) return -EINVAL; if (netif_running(dev)) return -EBUSY; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->mesh_id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->mesh_id_up_len); wdev_unlock(wdev); } if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); change = true; err = nl80211_valid_4addr(rdev, dev, params.use_4addr, ntype); if (err) return err; } else { params.use_4addr = -1; } if (info->attrs[NL80211_ATTR_MNTR_FLAGS]) { if (ntype != NL80211_IFTYPE_MONITOR) return -EINVAL; err = parse_monitor_flags(info->attrs[NL80211_ATTR_MNTR_FLAGS], &_flags); if (err) return err; flags = &_flags; change = true; } if (change) err = cfg80211_change_iface(rdev, dev, ntype, flags, &params); else err = 0; if (!err && params.use_4addr != -1) dev->ieee80211_ptr->use_4addr = params.use_4addr; return err; } static int nl80211_new_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct vif_params params; struct net_device *dev; int err; enum nl80211_iftype type = NL80211_IFTYPE_UNSPECIFIED; u32 flags; memset(&params, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_IFNAME]) return -EINVAL; if (info->attrs[NL80211_ATTR_IFTYPE]) { type = nla_get_u32(info->attrs[NL80211_ATTR_IFTYPE]); if (type > NL80211_IFTYPE_MAX) return -EINVAL; } if (!rdev->ops->add_virtual_intf || !(rdev->wiphy.interface_modes & (1 << type))) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_4ADDR]) { params.use_4addr = !!nla_get_u8(info->attrs[NL80211_ATTR_4ADDR]); err = nl80211_valid_4addr(rdev, NULL, params.use_4addr, type); if (err) return err; } err = parse_monitor_flags(type == NL80211_IFTYPE_MONITOR ? info->attrs[NL80211_ATTR_MNTR_FLAGS] : NULL, &flags); dev = rdev->ops->add_virtual_intf(&rdev->wiphy, nla_data(info->attrs[NL80211_ATTR_IFNAME]), type, err ? NULL : &flags, &params); if (IS_ERR(dev)) return PTR_ERR(dev); if (type == NL80211_IFTYPE_MESH_POINT && info->attrs[NL80211_ATTR_MESH_ID]) { struct wireless_dev *wdev = dev->ieee80211_ptr; wdev_lock(wdev); BUILD_BUG_ON(IEEE80211_MAX_SSID_LEN != IEEE80211_MAX_MESH_ID_LEN); wdev->mesh_id_up_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); memcpy(wdev->ssid, nla_data(info->attrs[NL80211_ATTR_MESH_ID]), wdev->mesh_id_up_len); wdev_unlock(wdev); } return 0; } static int nl80211_del_interface(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->del_virtual_intf) return -EOPNOTSUPP; return rdev->ops->del_virtual_intf(&rdev->wiphy, dev); } struct get_key_cookie { struct sk_buff *msg; int error; int idx; }; static void get_key_callback(void *c, struct key_params *params) { struct nlattr *key; struct get_key_cookie *cookie = c; if (params->key) NLA_PUT(cookie->msg, NL80211_ATTR_KEY_DATA, params->key_len, params->key); if (params->seq) NLA_PUT(cookie->msg, NL80211_ATTR_KEY_SEQ, params->seq_len, params->seq); if (params->cipher) NLA_PUT_U32(cookie->msg, NL80211_ATTR_KEY_CIPHER, params->cipher); key = nla_nest_start(cookie->msg, NL80211_ATTR_KEY); if (!key) goto nla_put_failure; if (params->key) NLA_PUT(cookie->msg, NL80211_KEY_DATA, params->key_len, params->key); if (params->seq) NLA_PUT(cookie->msg, NL80211_KEY_SEQ, params->seq_len, params->seq); if (params->cipher) NLA_PUT_U32(cookie->msg, NL80211_KEY_CIPHER, params->cipher); NLA_PUT_U8(cookie->msg, NL80211_ATTR_KEY_IDX, cookie->idx); nla_nest_end(cookie->msg, key); return; nla_put_failure: cookie->error = 1; } static int nl80211_get_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 key_idx = 0; const u8 *mac_addr = NULL; bool pairwise; struct get_key_cookie cookie = { .error = 0, }; void *hdr; struct sk_buff *msg; if (info->attrs[NL80211_ATTR_KEY_IDX]) key_idx = nla_get_u8(info->attrs[NL80211_ATTR_KEY_IDX]); if (key_idx > 5) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); pairwise = !!mac_addr; if (info->attrs[NL80211_ATTR_KEY_TYPE]) { u32 kt = nla_get_u32(info->attrs[NL80211_ATTR_KEY_TYPE]); if (kt >= NUM_NL80211_KEYTYPES) return -EINVAL; if (kt != NL80211_KEYTYPE_GROUP && kt != NL80211_KEYTYPE_PAIRWISE) return -EINVAL; pairwise = kt == NL80211_KEYTYPE_PAIRWISE; } if (!rdev->ops->get_key) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_NEW_KEY); if (IS_ERR(hdr)) return PTR_ERR(hdr); cookie.msg = msg; cookie.idx = key_idx; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_idx); if (mac_addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); if (pairwise && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) return -ENOENT; err = rdev->ops->get_key(&rdev->wiphy, dev, key_idx, pairwise, mac_addr, &cookie, get_key_callback); if (err) goto free_msg; if (cookie.error) goto nla_put_failure; genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_set_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct key_parse key; int err; struct net_device *dev = info->user_ptr[1]; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx < 0) return -EINVAL; /* only support setting default key */ if (!key.def && !key.defmgmt) return -EINVAL; wdev_lock(dev->ieee80211_ptr); if (key.def) { if (!rdev->ops->set_default_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(dev->ieee80211_ptr); if (err) goto out; err = rdev->ops->set_default_key(&rdev->wiphy, dev, key.idx, key.def_uni, key.def_multi); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_key = key.idx; #endif } else { if (key.def_uni || !key.def_multi) { err = -EINVAL; goto out; } if (!rdev->ops->set_default_mgmt_key) { err = -EOPNOTSUPP; goto out; } err = nl80211_key_allowed(dev->ieee80211_ptr); if (err) goto out; err = rdev->ops->set_default_mgmt_key(&rdev->wiphy, dev, key.idx); if (err) goto out; #ifdef CONFIG_CFG80211_WEXT dev->ieee80211_ptr->wext.default_mgmt_key = key.idx; #endif } out: wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_new_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct key_parse key; const u8 *mac_addr = NULL; err = nl80211_parse_key(info, &key); if (err) return err; if (!key.p.key) return -EINVAL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!rdev->ops->add_key) return -EOPNOTSUPP; if (cfg80211_validate_key_settings(rdev, &key.p, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr)) return -EINVAL; wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (!err) err = rdev->ops->add_key(&rdev->wiphy, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr, &key.p); wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_del_key(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; u8 *mac_addr = NULL; struct key_parse key; err = nl80211_parse_key(info, &key); if (err) return err; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (key.type == -1) { if (mac_addr) key.type = NL80211_KEYTYPE_PAIRWISE; else key.type = NL80211_KEYTYPE_GROUP; } /* for now */ if (key.type != NL80211_KEYTYPE_PAIRWISE && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!rdev->ops->del_key) return -EOPNOTSUPP; wdev_lock(dev->ieee80211_ptr); err = nl80211_key_allowed(dev->ieee80211_ptr); if (key.type == NL80211_KEYTYPE_PAIRWISE && mac_addr && !(rdev->wiphy.flags & WIPHY_FLAG_IBSS_RSN)) err = -ENOENT; if (!err) err = rdev->ops->del_key(&rdev->wiphy, dev, key.idx, key.type == NL80211_KEYTYPE_PAIRWISE, mac_addr); #ifdef CONFIG_CFG80211_WEXT if (!err) { if (key.idx == dev->ieee80211_ptr->wext.default_key) dev->ieee80211_ptr->wext.default_key = -1; else if (key.idx == dev->ieee80211_ptr->wext.default_mgmt_key) dev->ieee80211_ptr->wext.default_mgmt_key = -1; } #endif wdev_unlock(dev->ieee80211_ptr); return err; } static int nl80211_addset_beacon(struct sk_buff *skb, struct genl_info *info) { int (*call)(struct wiphy *wiphy, struct net_device *dev, struct beacon_parameters *info); struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct beacon_parameters params; int haveinfo = 0, err; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_BEACON_TAIL])) return -EINVAL; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; memset(&params, 0, sizeof(params)); switch (info->genlhdr->cmd) { case NL80211_CMD_NEW_BEACON: /* these are required for NEW_BEACON */ if (!info->attrs[NL80211_ATTR_BEACON_INTERVAL] || !info->attrs[NL80211_ATTR_DTIM_PERIOD] || !info->attrs[NL80211_ATTR_BEACON_HEAD]) return -EINVAL; params.interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); params.dtim_period = nla_get_u32(info->attrs[NL80211_ATTR_DTIM_PERIOD]); err = cfg80211_validate_beacon_int(rdev, params.interval); if (err) return err; call = rdev->ops->add_beacon; break; case NL80211_CMD_SET_BEACON: call = rdev->ops->set_beacon; break; default: WARN_ON(1); return -EOPNOTSUPP; } if (!call) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_BEACON_HEAD]) { params.head = nla_data(info->attrs[NL80211_ATTR_BEACON_HEAD]); params.head_len = nla_len(info->attrs[NL80211_ATTR_BEACON_HEAD]); haveinfo = 1; } if (info->attrs[NL80211_ATTR_BEACON_TAIL]) { params.tail = nla_data(info->attrs[NL80211_ATTR_BEACON_TAIL]); params.tail_len = nla_len(info->attrs[NL80211_ATTR_BEACON_TAIL]); haveinfo = 1; } if (!haveinfo) return -EINVAL; err = call(&rdev->wiphy, dev, &params); if (!err && params.interval) wdev->beacon_interval = params.interval; return err; } static int nl80211_del_beacon(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; int err; if (!rdev->ops->del_beacon) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; err = rdev->ops->del_beacon(&rdev->wiphy, dev); if (!err) wdev->beacon_interval = 0; return err; } static const struct nla_policy sta_flags_policy[NL80211_STA_FLAG_MAX + 1] = { [NL80211_STA_FLAG_AUTHORIZED] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_SHORT_PREAMBLE] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_WME] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_MFP] = { .type = NLA_FLAG }, [NL80211_STA_FLAG_AUTHENTICATED] = { .type = NLA_FLAG }, }; static int parse_station_flags(struct genl_info *info, struct station_parameters *params) { struct nlattr *flags[NL80211_STA_FLAG_MAX + 1]; struct nlattr *nla; int flag; /* * Try parsing the new attribute first so userspace * can specify both for older kernels. */ nla = info->attrs[NL80211_ATTR_STA_FLAGS2]; if (nla) { struct nl80211_sta_flag_update *sta_flags; sta_flags = nla_data(nla); params->sta_flags_mask = sta_flags->mask; params->sta_flags_set = sta_flags->set; if ((params->sta_flags_mask | params->sta_flags_set) & BIT(__NL80211_STA_FLAG_INVALID)) return -EINVAL; return 0; } /* if present, parse the old attribute */ nla = info->attrs[NL80211_ATTR_STA_FLAGS]; if (!nla) return 0; if (nla_parse_nested(flags, NL80211_STA_FLAG_MAX, nla, sta_flags_policy)) return -EINVAL; params->sta_flags_mask = (1 << __NL80211_STA_FLAG_AFTER_LAST) - 1; params->sta_flags_mask &= ~1; for (flag = 1; flag <= NL80211_STA_FLAG_MAX; flag++) if (flags[flag]) params->sta_flags_set |= (1<<flag); return 0; } static bool nl80211_put_sta_rate(struct sk_buff *msg, struct rate_info *info, int attr) { struct nlattr *rate; u16 bitrate; rate = nla_nest_start(msg, attr); if (!rate) goto nla_put_failure; /* cfg80211_calculate_bitrate will return 0 for mcs >= 32 */ bitrate = cfg80211_calculate_bitrate(info); if (bitrate > 0) NLA_PUT_U16(msg, NL80211_RATE_INFO_BITRATE, bitrate); if (info->flags & RATE_INFO_FLAGS_MCS) NLA_PUT_U8(msg, NL80211_RATE_INFO_MCS, info->mcs); if (info->flags & RATE_INFO_FLAGS_40_MHZ_WIDTH) NLA_PUT_FLAG(msg, NL80211_RATE_INFO_40_MHZ_WIDTH); if (info->flags & RATE_INFO_FLAGS_SHORT_GI) NLA_PUT_FLAG(msg, NL80211_RATE_INFO_SHORT_GI); nla_nest_end(msg, rate); return true; nla_put_failure: return false; } static int nl80211_send_station(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo) { void *hdr; struct nlattr *sinfoattr, *bss_param; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, sinfo->generation); sinfoattr = nla_nest_start(msg, NL80211_ATTR_STA_INFO); if (!sinfoattr) goto nla_put_failure; if (sinfo->filled & STATION_INFO_CONNECTED_TIME) NLA_PUT_U32(msg, NL80211_STA_INFO_CONNECTED_TIME, sinfo->connected_time); if (sinfo->filled & STATION_INFO_INACTIVE_TIME) NLA_PUT_U32(msg, NL80211_STA_INFO_INACTIVE_TIME, sinfo->inactive_time); if (sinfo->filled & STATION_INFO_RX_BYTES) NLA_PUT_U32(msg, NL80211_STA_INFO_RX_BYTES, sinfo->rx_bytes); if (sinfo->filled & STATION_INFO_TX_BYTES) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_BYTES, sinfo->tx_bytes); if (sinfo->filled & STATION_INFO_LLID) NLA_PUT_U16(msg, NL80211_STA_INFO_LLID, sinfo->llid); if (sinfo->filled & STATION_INFO_PLID) NLA_PUT_U16(msg, NL80211_STA_INFO_PLID, sinfo->plid); if (sinfo->filled & STATION_INFO_PLINK_STATE) NLA_PUT_U8(msg, NL80211_STA_INFO_PLINK_STATE, sinfo->plink_state); if (sinfo->filled & STATION_INFO_SIGNAL) NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL, sinfo->signal); if (sinfo->filled & STATION_INFO_SIGNAL_AVG) NLA_PUT_U8(msg, NL80211_STA_INFO_SIGNAL_AVG, sinfo->signal_avg); if (sinfo->filled & STATION_INFO_TX_BITRATE) { if (!nl80211_put_sta_rate(msg, &sinfo->txrate, NL80211_STA_INFO_TX_BITRATE)) goto nla_put_failure; } if (sinfo->filled & STATION_INFO_RX_BITRATE) { if (!nl80211_put_sta_rate(msg, &sinfo->rxrate, NL80211_STA_INFO_RX_BITRATE)) goto nla_put_failure; } if (sinfo->filled & STATION_INFO_RX_PACKETS) NLA_PUT_U32(msg, NL80211_STA_INFO_RX_PACKETS, sinfo->rx_packets); if (sinfo->filled & STATION_INFO_TX_PACKETS) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_PACKETS, sinfo->tx_packets); if (sinfo->filled & STATION_INFO_TX_RETRIES) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_RETRIES, sinfo->tx_retries); if (sinfo->filled & STATION_INFO_TX_FAILED) NLA_PUT_U32(msg, NL80211_STA_INFO_TX_FAILED, sinfo->tx_failed); if (sinfo->filled & STATION_INFO_BSS_PARAM) { bss_param = nla_nest_start(msg, NL80211_STA_INFO_BSS_PARAM); if (!bss_param) goto nla_put_failure; if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_CTS_PROT) NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_CTS_PROT); if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_PREAMBLE) NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_PREAMBLE); if (sinfo->bss_param.flags & BSS_PARAM_FLAGS_SHORT_SLOT_TIME) NLA_PUT_FLAG(msg, NL80211_STA_BSS_PARAM_SHORT_SLOT_TIME); NLA_PUT_U8(msg, NL80211_STA_BSS_PARAM_DTIM_PERIOD, sinfo->bss_param.dtim_period); NLA_PUT_U16(msg, NL80211_STA_BSS_PARAM_BEACON_INTERVAL, sinfo->bss_param.beacon_interval); nla_nest_end(msg, bss_param); } nla_nest_end(msg, sinfoattr); if (sinfo->filled & STATION_INFO_ASSOC_REQ_IES) NLA_PUT(msg, NL80211_ATTR_IE, sinfo->assoc_req_ies_len, sinfo->assoc_req_ies); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_station(struct sk_buff *skb, struct netlink_callback *cb) { struct station_info sinfo; struct cfg80211_registered_device *dev; struct net_device *netdev; u8 mac_addr[ETH_ALEN]; int sta_idx = cb->args[1]; int err; err = nl80211_prepare_netdev_dump(skb, cb, &dev, &netdev); if (err) return err; if (!dev->ops->dump_station) { err = -EOPNOTSUPP; goto out_err; } while (1) { memset(&sinfo, 0, sizeof(sinfo)); err = dev->ops->dump_station(&dev->wiphy, netdev, sta_idx, mac_addr, &sinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_station(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, netdev, mac_addr, &sinfo) < 0) goto out; sta_idx++; } out: cb->args[1] = sta_idx; err = skb->len; out_err: nl80211_finish_netdev_dump(dev); return err; } static int nl80211_get_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct station_info sinfo; struct sk_buff *msg; u8 *mac_addr = NULL; int err; memset(&sinfo, 0, sizeof(sinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_station) return -EOPNOTSUPP; err = rdev->ops->get_station(&rdev->wiphy, dev, mac_addr, &sinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_station(msg, info->snd_pid, info->snd_seq, 0, dev, mac_addr, &sinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } /* * Get vlan interface making sure it is running and on the right wiphy. */ static int get_vlan(struct genl_info *info, struct cfg80211_registered_device *rdev, struct net_device **vlan) { struct nlattr *vlanattr = info->attrs[NL80211_ATTR_STA_VLAN]; *vlan = NULL; if (vlanattr) { *vlan = dev_get_by_index(genl_info_net(info), nla_get_u32(vlanattr)); if (!*vlan) return -ENODEV; if (!(*vlan)->ieee80211_ptr) return -EINVAL; if ((*vlan)->ieee80211_ptr->wiphy != &rdev->wiphy) return -EINVAL; if (!netif_running(*vlan)) return -ENETDOWN; } return 0; } static int nl80211_set_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct station_parameters params; u8 *mac_addr = NULL; memset(&params, 0, sizeof(params)); params.listen_interval = -1; params.plink_state = -1; if (info->attrs[NL80211_ATTR_STA_AID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) { params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); } if (info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (parse_station_flags(info, &params)) return -EINVAL; if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); if (info->attrs[NL80211_ATTR_STA_PLINK_STATE]) params.plink_state = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_STATE]); err = get_vlan(info, rdev, &params.vlan); if (err) goto out; /* validate settings */ err = 0; switch (dev->ieee80211_ptr->iftype) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_AP_VLAN: case NL80211_IFTYPE_P2P_GO: /* disallow mesh-specific things */ if (params.plink_action) err = -EINVAL; break; case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: /* disallow everything but AUTHORIZED flag */ if (params.plink_action) err = -EINVAL; if (params.vlan) err = -EINVAL; if (params.supported_rates) err = -EINVAL; if (params.ht_capa) err = -EINVAL; if (params.listen_interval >= 0) err = -EINVAL; if (params.sta_flags_mask & ~BIT(NL80211_STA_FLAG_AUTHORIZED)) err = -EINVAL; break; case NL80211_IFTYPE_MESH_POINT: /* disallow things mesh doesn't support */ if (params.vlan) err = -EINVAL; if (params.ht_capa) err = -EINVAL; if (params.listen_interval >= 0) err = -EINVAL; if (params.sta_flags_mask & ~(BIT(NL80211_STA_FLAG_AUTHENTICATED) | BIT(NL80211_STA_FLAG_MFP) | BIT(NL80211_STA_FLAG_AUTHORIZED))) err = -EINVAL; break; default: err = -EINVAL; } if (err) goto out; if (!rdev->ops->change_station) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->change_station(&rdev->wiphy, dev, mac_addr, &params); out: if (params.vlan) dev_put(params.vlan); return err; } static int nl80211_new_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct station_parameters params; u8 *mac_addr = NULL; memset(&params, 0, sizeof(params)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]) return -EINVAL; if (!info->attrs[NL80211_ATTR_STA_AID]) return -EINVAL; mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); params.supported_rates = nla_data(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.supported_rates_len = nla_len(info->attrs[NL80211_ATTR_STA_SUPPORTED_RATES]); params.listen_interval = nla_get_u16(info->attrs[NL80211_ATTR_STA_LISTEN_INTERVAL]); params.aid = nla_get_u16(info->attrs[NL80211_ATTR_STA_AID]); if (!params.aid || params.aid > IEEE80211_MAX_AID) return -EINVAL; if (info->attrs[NL80211_ATTR_HT_CAPABILITY]) params.ht_capa = nla_data(info->attrs[NL80211_ATTR_HT_CAPABILITY]); if (info->attrs[NL80211_ATTR_STA_PLINK_ACTION]) params.plink_action = nla_get_u8(info->attrs[NL80211_ATTR_STA_PLINK_ACTION]); if (parse_station_flags(info, &params)) return -EINVAL; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; err = get_vlan(info, rdev, &params.vlan); if (err) goto out; /* validate settings */ err = 0; if (!rdev->ops->add_station) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->add_station(&rdev->wiphy, dev, mac_addr, &params); out: if (params.vlan) dev_put(params.vlan); return err; } static int nl80211_del_station(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *mac_addr = NULL; if (info->attrs[NL80211_ATTR_MAC]) mac_addr = nla_data(info->attrs[NL80211_ATTR_MAC]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EINVAL; if (!rdev->ops->del_station) return -EOPNOTSUPP; return rdev->ops->del_station(&rdev->wiphy, dev, mac_addr); } static int nl80211_send_mpath(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, u8 *dst, u8 *next_hop, struct mpath_info *pinfo) { void *hdr; struct nlattr *pinfoattr; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_STATION); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, dst); NLA_PUT(msg, NL80211_ATTR_MPATH_NEXT_HOP, ETH_ALEN, next_hop); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, pinfo->generation); pinfoattr = nla_nest_start(msg, NL80211_ATTR_MPATH_INFO); if (!pinfoattr) goto nla_put_failure; if (pinfo->filled & MPATH_INFO_FRAME_QLEN) NLA_PUT_U32(msg, NL80211_MPATH_INFO_FRAME_QLEN, pinfo->frame_qlen); if (pinfo->filled & MPATH_INFO_SN) NLA_PUT_U32(msg, NL80211_MPATH_INFO_SN, pinfo->sn); if (pinfo->filled & MPATH_INFO_METRIC) NLA_PUT_U32(msg, NL80211_MPATH_INFO_METRIC, pinfo->metric); if (pinfo->filled & MPATH_INFO_EXPTIME) NLA_PUT_U32(msg, NL80211_MPATH_INFO_EXPTIME, pinfo->exptime); if (pinfo->filled & MPATH_INFO_FLAGS) NLA_PUT_U8(msg, NL80211_MPATH_INFO_FLAGS, pinfo->flags); if (pinfo->filled & MPATH_INFO_DISCOVERY_TIMEOUT) NLA_PUT_U32(msg, NL80211_MPATH_INFO_DISCOVERY_TIMEOUT, pinfo->discovery_timeout); if (pinfo->filled & MPATH_INFO_DISCOVERY_RETRIES) NLA_PUT_U8(msg, NL80211_MPATH_INFO_DISCOVERY_RETRIES, pinfo->discovery_retries); nla_nest_end(msg, pinfoattr); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_mpath(struct sk_buff *skb, struct netlink_callback *cb) { struct mpath_info pinfo; struct cfg80211_registered_device *dev; struct net_device *netdev; u8 dst[ETH_ALEN]; u8 next_hop[ETH_ALEN]; int path_idx = cb->args[1]; int err; err = nl80211_prepare_netdev_dump(skb, cb, &dev, &netdev); if (err) return err; if (!dev->ops->dump_mpath) { err = -EOPNOTSUPP; goto out_err; } if (netdev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) { err = -EOPNOTSUPP; goto out_err; } while (1) { err = dev->ops->dump_mpath(&dev->wiphy, netdev, path_idx, dst, next_hop, &pinfo); if (err == -ENOENT) break; if (err) goto out_err; if (nl80211_send_mpath(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, netdev, dst, next_hop, &pinfo) < 0) goto out; path_idx++; } out: cb->args[1] = path_idx; err = skb->len; out_err: nl80211_finish_netdev_dump(dev); return err; } static int nl80211_get_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; struct net_device *dev = info->user_ptr[1]; struct mpath_info pinfo; struct sk_buff *msg; u8 *dst = NULL; u8 next_hop[ETH_ALEN]; memset(&pinfo, 0, sizeof(pinfo)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->get_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; err = rdev->ops->get_mpath(&rdev->wiphy, dev, dst, next_hop, &pinfo); if (err) return err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; if (nl80211_send_mpath(msg, info->snd_pid, info->snd_seq, 0, dev, dst, next_hop, &pinfo) < 0) { nlmsg_free(msg); return -ENOBUFS; } return genlmsg_reply(msg, info); } static int nl80211_set_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->change_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev->ops->change_mpath(&rdev->wiphy, dev, dst, next_hop); } static int nl80211_new_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; u8 *next_hop = NULL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]) return -EINVAL; dst = nla_data(info->attrs[NL80211_ATTR_MAC]); next_hop = nla_data(info->attrs[NL80211_ATTR_MPATH_NEXT_HOP]); if (!rdev->ops->add_mpath) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; return rdev->ops->add_mpath(&rdev->wiphy, dev, dst, next_hop); } static int nl80211_del_mpath(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u8 *dst = NULL; if (info->attrs[NL80211_ATTR_MAC]) dst = nla_data(info->attrs[NL80211_ATTR_MAC]); if (!rdev->ops->del_mpath) return -EOPNOTSUPP; return rdev->ops->del_mpath(&rdev->wiphy, dev, dst); } static int nl80211_set_bss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct bss_parameters params; memset(&params, 0, sizeof(params)); /* default to not changing parameters */ params.use_cts_prot = -1; params.use_short_preamble = -1; params.use_short_slot_time = -1; params.ap_isolate = -1; params.ht_opmode = -1; if (info->attrs[NL80211_ATTR_BSS_CTS_PROT]) params.use_cts_prot = nla_get_u8(info->attrs[NL80211_ATTR_BSS_CTS_PROT]); if (info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]) params.use_short_preamble = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_PREAMBLE]); if (info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]) params.use_short_slot_time = nla_get_u8(info->attrs[NL80211_ATTR_BSS_SHORT_SLOT_TIME]); if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { params.basic_rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); params.basic_rates_len = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); } if (info->attrs[NL80211_ATTR_AP_ISOLATE]) params.ap_isolate = !!nla_get_u8(info->attrs[NL80211_ATTR_AP_ISOLATE]); if (info->attrs[NL80211_ATTR_BSS_HT_OPMODE]) params.ht_opmode = nla_get_u16(info->attrs[NL80211_ATTR_BSS_HT_OPMODE]); if (!rdev->ops->change_bss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; return rdev->ops->change_bss(&rdev->wiphy, dev, &params); } static const struct nla_policy reg_rule_policy[NL80211_REG_RULE_ATTR_MAX + 1] = { [NL80211_ATTR_REG_RULE_FLAGS] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_START] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_END] = { .type = NLA_U32 }, [NL80211_ATTR_FREQ_RANGE_MAX_BW] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN] = { .type = NLA_U32 }, [NL80211_ATTR_POWER_RULE_MAX_EIRP] = { .type = NLA_U32 }, }; static int parse_reg_rule(struct nlattr *tb[], struct ieee80211_reg_rule *reg_rule) { struct ieee80211_freq_range *freq_range = &reg_rule->freq_range; struct ieee80211_power_rule *power_rule = &reg_rule->power_rule; if (!tb[NL80211_ATTR_REG_RULE_FLAGS]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_START]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_END]) return -EINVAL; if (!tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]) return -EINVAL; if (!tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]) return -EINVAL; reg_rule->flags = nla_get_u32(tb[NL80211_ATTR_REG_RULE_FLAGS]); freq_range->start_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_START]); freq_range->end_freq_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_END]); freq_range->max_bandwidth_khz = nla_get_u32(tb[NL80211_ATTR_FREQ_RANGE_MAX_BW]); power_rule->max_eirp = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_EIRP]); if (tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]) power_rule->max_antenna_gain = nla_get_u32(tb[NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN]); return 0; } static int nl80211_req_set_reg(struct sk_buff *skb, struct genl_info *info) { int r; char *data = NULL; /* * You should only get this when cfg80211 hasn't yet initialized * completely when built-in to the kernel right between the time * window between nl80211_init() and regulatory_init(), if that is * even possible. */ mutex_lock(&cfg80211_mutex); if (unlikely(!cfg80211_regdomain)) { mutex_unlock(&cfg80211_mutex); return -EINPROGRESS; } mutex_unlock(&cfg80211_mutex); if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; data = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); r = regulatory_hint_user(data); return r; } static int nl80211_get_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cur_params; int err = 0; void *hdr; struct nlattr *pinfoattr; struct sk_buff *msg; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->get_mesh_config) return -EOPNOTSUPP; wdev_lock(wdev); /* If not connected, get default parameters */ if (!wdev->mesh_id_len) memcpy(&cur_params, &default_mesh_config, sizeof(cur_params)); else err = rdev->ops->get_mesh_config(&rdev->wiphy, dev, &cur_params); wdev_unlock(wdev); if (err) return err; /* Draw up a netlink message to send back */ msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_MESH_CONFIG); if (!hdr) goto out; pinfoattr = nla_nest_start(msg, NL80211_ATTR_MESH_CONFIG); if (!pinfoattr) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT_U16(msg, NL80211_MESHCONF_RETRY_TIMEOUT, cur_params.dot11MeshRetryTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_CONFIRM_TIMEOUT, cur_params.dot11MeshConfirmTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_HOLDING_TIMEOUT, cur_params.dot11MeshHoldingTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_MAX_PEER_LINKS, cur_params.dot11MeshMaxPeerLinks); NLA_PUT_U8(msg, NL80211_MESHCONF_MAX_RETRIES, cur_params.dot11MeshMaxRetries); NLA_PUT_U8(msg, NL80211_MESHCONF_TTL, cur_params.dot11MeshTTL); NLA_PUT_U8(msg, NL80211_MESHCONF_ELEMENT_TTL, cur_params.element_ttl); NLA_PUT_U8(msg, NL80211_MESHCONF_AUTO_OPEN_PLINKS, cur_params.auto_open_plinks); NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, cur_params.dot11MeshHWMPmaxPREQretries); NLA_PUT_U32(msg, NL80211_MESHCONF_PATH_REFRESH_TIME, cur_params.path_refresh_time); NLA_PUT_U16(msg, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, cur_params.min_discovery_timeout); NLA_PUT_U32(msg, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, cur_params.dot11MeshHWMPactivePathTimeout); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, cur_params.dot11MeshHWMPpreqMinInterval); NLA_PUT_U16(msg, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, cur_params.dot11MeshHWMPnetDiameterTraversalTime); NLA_PUT_U8(msg, NL80211_MESHCONF_HWMP_ROOTMODE, cur_params.dot11MeshHWMPRootMode); nla_nest_end(msg, pinfoattr); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: genlmsg_cancel(msg, hdr); out: nlmsg_free(msg); return -ENOBUFS; } static const struct nla_policy nl80211_meshconf_params_policy[NL80211_MESHCONF_ATTR_MAX+1] = { [NL80211_MESHCONF_RETRY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_CONFIRM_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_HOLDING_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_MAX_PEER_LINKS] = { .type = NLA_U16 }, [NL80211_MESHCONF_MAX_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_ELEMENT_TTL] = { .type = NLA_U8 }, [NL80211_MESHCONF_AUTO_OPEN_PLINKS] = { .type = NLA_U8 }, [NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES] = { .type = NLA_U8 }, [NL80211_MESHCONF_PATH_REFRESH_TIME] = { .type = NLA_U32 }, [NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT] = { .type = NLA_U32 }, [NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL] = { .type = NLA_U16 }, [NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME] = { .type = NLA_U16 }, }; static const struct nla_policy nl80211_mesh_setup_params_policy[NL80211_MESH_SETUP_ATTR_MAX+1] = { [NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC] = { .type = NLA_U8 }, [NL80211_MESH_SETUP_USERSPACE_AUTH] = { .type = NLA_FLAG }, [NL80211_MESH_SETUP_IE] = { .type = NLA_BINARY, .len = IEEE80211_MAX_DATA_LEN }, [NL80211_MESH_SETUP_USERSPACE_AMPE] = { .type = NLA_FLAG }, }; static int nl80211_parse_mesh_config(struct genl_info *info, struct mesh_config *cfg, u32 *mask_out) { struct nlattr *tb[NL80211_MESHCONF_ATTR_MAX + 1]; u32 mask = 0; #define FILL_IN_MESH_PARAM_IF_SET(table, cfg, param, mask, attr_num, nla_fn) \ do {\ if (table[attr_num]) {\ cfg->param = nla_fn(table[attr_num]); \ mask |= (1 << (attr_num - 1)); \ } \ } while (0);\ if (!info->attrs[NL80211_ATTR_MESH_CONFIG]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESHCONF_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_CONFIG], nl80211_meshconf_params_policy)) return -EINVAL; /* This makes sure that there aren't more than 32 mesh config * parameters (otherwise our bitfield scheme would not work.) */ BUILD_BUG_ON(NL80211_MESHCONF_ATTR_MAX > 32); /* Fill in the params struct */ FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshRetryTimeout, mask, NL80211_MESHCONF_RETRY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshConfirmTimeout, mask, NL80211_MESHCONF_CONFIRM_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHoldingTimeout, mask, NL80211_MESHCONF_HOLDING_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxPeerLinks, mask, NL80211_MESHCONF_MAX_PEER_LINKS, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshMaxRetries, mask, NL80211_MESHCONF_MAX_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshTTL, mask, NL80211_MESHCONF_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, element_ttl, mask, NL80211_MESHCONF_ELEMENT_TTL, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, auto_open_plinks, mask, NL80211_MESHCONF_AUTO_OPEN_PLINKS, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPmaxPREQretries, mask, NL80211_MESHCONF_HWMP_MAX_PREQ_RETRIES, nla_get_u8); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, path_refresh_time, mask, NL80211_MESHCONF_PATH_REFRESH_TIME, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, min_discovery_timeout, mask, NL80211_MESHCONF_MIN_DISCOVERY_TIMEOUT, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPactivePathTimeout, mask, NL80211_MESHCONF_HWMP_ACTIVE_PATH_TIMEOUT, nla_get_u32); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPpreqMinInterval, mask, NL80211_MESHCONF_HWMP_PREQ_MIN_INTERVAL, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPnetDiameterTraversalTime, mask, NL80211_MESHCONF_HWMP_NET_DIAM_TRVS_TIME, nla_get_u16); FILL_IN_MESH_PARAM_IF_SET(tb, cfg, dot11MeshHWMPRootMode, mask, NL80211_MESHCONF_HWMP_ROOTMODE, nla_get_u8); if (mask_out) *mask_out = mask; return 0; #undef FILL_IN_MESH_PARAM_IF_SET } static int nl80211_parse_mesh_setup(struct genl_info *info, struct mesh_setup *setup) { struct nlattr *tb[NL80211_MESH_SETUP_ATTR_MAX + 1]; if (!info->attrs[NL80211_ATTR_MESH_SETUP]) return -EINVAL; if (nla_parse_nested(tb, NL80211_MESH_SETUP_ATTR_MAX, info->attrs[NL80211_ATTR_MESH_SETUP], nl80211_mesh_setup_params_policy)) return -EINVAL; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL]) setup->path_sel_proto = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_PATH_SEL])) ? IEEE80211_PATH_PROTOCOL_VENDOR : IEEE80211_PATH_PROTOCOL_HWMP; if (tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC]) setup->path_metric = (nla_get_u8(tb[NL80211_MESH_SETUP_ENABLE_VENDOR_METRIC])) ? IEEE80211_PATH_METRIC_VENDOR : IEEE80211_PATH_METRIC_AIRTIME; if (tb[NL80211_MESH_SETUP_IE]) { struct nlattr *ieattr = tb[NL80211_MESH_SETUP_IE]; if (!is_valid_ie_attr(ieattr)) return -EINVAL; setup->ie = nla_data(ieattr); setup->ie_len = nla_len(ieattr); } setup->is_authenticated = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AUTH]); setup->is_secure = nla_get_flag(tb[NL80211_MESH_SETUP_USERSPACE_AMPE]); return 0; } static int nl80211_update_mesh_config(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct mesh_config cfg; u32 mask; int err; if (wdev->iftype != NL80211_IFTYPE_MESH_POINT) return -EOPNOTSUPP; if (!rdev->ops->update_mesh_config) return -EOPNOTSUPP; err = nl80211_parse_mesh_config(info, &cfg, &mask); if (err) return err; wdev_lock(wdev); if (!wdev->mesh_id_len) err = -ENOLINK; if (!err) err = rdev->ops->update_mesh_config(&rdev->wiphy, dev, mask, &cfg); wdev_unlock(wdev); return err; } static int nl80211_get_reg(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *msg; void *hdr = NULL; struct nlattr *nl_reg_rules; unsigned int i; int err = -EINVAL; mutex_lock(&cfg80211_mutex); if (!cfg80211_regdomain) goto out; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) { err = -ENOBUFS; goto out; } hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_REG); if (!hdr) goto put_failure; NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, cfg80211_regdomain->alpha2); nl_reg_rules = nla_nest_start(msg, NL80211_ATTR_REG_RULES); if (!nl_reg_rules) goto nla_put_failure; for (i = 0; i < cfg80211_regdomain->n_reg_rules; i++) { struct nlattr *nl_reg_rule; const struct ieee80211_reg_rule *reg_rule; const struct ieee80211_freq_range *freq_range; const struct ieee80211_power_rule *power_rule; reg_rule = &cfg80211_regdomain->reg_rules[i]; freq_range = &reg_rule->freq_range; power_rule = &reg_rule->power_rule; nl_reg_rule = nla_nest_start(msg, i); if (!nl_reg_rule) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_REG_RULE_FLAGS, reg_rule->flags); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_START, freq_range->start_freq_khz); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_END, freq_range->end_freq_khz); NLA_PUT_U32(msg, NL80211_ATTR_FREQ_RANGE_MAX_BW, freq_range->max_bandwidth_khz); NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_ANT_GAIN, power_rule->max_antenna_gain); NLA_PUT_U32(msg, NL80211_ATTR_POWER_RULE_MAX_EIRP, power_rule->max_eirp); nla_nest_end(msg, nl_reg_rule); } nla_nest_end(msg, nl_reg_rules); genlmsg_end(msg, hdr); err = genlmsg_reply(msg, info); goto out; nla_put_failure: genlmsg_cancel(msg, hdr); put_failure: nlmsg_free(msg); err = -EMSGSIZE; out: mutex_unlock(&cfg80211_mutex); return err; } static int nl80211_set_reg(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_REG_RULE_ATTR_MAX + 1]; struct nlattr *nl_reg_rule; char *alpha2 = NULL; int rem_reg_rules = 0, r = 0; u32 num_rules = 0, rule_idx = 0, size_of_regd; struct ieee80211_regdomain *rd = NULL; if (!info->attrs[NL80211_ATTR_REG_ALPHA2]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REG_RULES]) return -EINVAL; alpha2 = nla_data(info->attrs[NL80211_ATTR_REG_ALPHA2]); nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { num_rules++; if (num_rules > NL80211_MAX_SUPP_REG_RULES) return -EINVAL; } mutex_lock(&cfg80211_mutex); if (!reg_is_valid_request(alpha2)) { r = -EINVAL; goto bad_reg; } size_of_regd = sizeof(struct ieee80211_regdomain) + (num_rules * sizeof(struct ieee80211_reg_rule)); rd = kzalloc(size_of_regd, GFP_KERNEL); if (!rd) { r = -ENOMEM; goto bad_reg; } rd->n_reg_rules = num_rules; rd->alpha2[0] = alpha2[0]; rd->alpha2[1] = alpha2[1]; nla_for_each_nested(nl_reg_rule, info->attrs[NL80211_ATTR_REG_RULES], rem_reg_rules) { nla_parse(tb, NL80211_REG_RULE_ATTR_MAX, nla_data(nl_reg_rule), nla_len(nl_reg_rule), reg_rule_policy); r = parse_reg_rule(tb, &rd->reg_rules[rule_idx]); if (r) goto bad_reg; rule_idx++; if (rule_idx > NL80211_MAX_SUPP_REG_RULES) { r = -EINVAL; goto bad_reg; } } BUG_ON(rule_idx != num_rules); r = set_regdom(rd); mutex_unlock(&cfg80211_mutex); return r; bad_reg: mutex_unlock(&cfg80211_mutex); kfree(rd); return r; } static int validate_scan_freqs(struct nlattr *freqs) { struct nlattr *attr1, *attr2; int n_channels = 0, tmp1, tmp2; nla_for_each_nested(attr1, freqs, tmp1) { n_channels++; /* * Some hardware has a limited channel list for * scanning, and it is pretty much nonsensical * to scan for a channel twice, so disallow that * and don't require drivers to check that the * channel list they get isn't longer than what * they can scan, as long as they can scan all * the channels they registered at once. */ nla_for_each_nested(attr2, freqs, tmp2) if (attr1 != attr2 && nla_get_u32(attr1) == nla_get_u32(attr2)) return 0; } return n_channels; } static int nl80211_trigger_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_scan_request *request; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; size_t ie_len; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; wiphy = &rdev->wiphy; if (!rdev->ops->scan) return -EOPNOTSUPP; if (rdev->scan_req) return -EBUSY; if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) return -EINVAL; } else { enum ieee80211_band band; n_channels = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; } if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_scan_ssids) return -EINVAL; if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_scan_ie_len) return -EINVAL; request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) return -ENOMEM; if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (request->ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } i = 0; if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { enum ieee80211_band band; /* all channels */ for (band = 0; band < IEEE80211_NUM_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; i = 0; if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } if (info->attrs[NL80211_ATTR_IE]) { request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); memcpy((void *)request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), request->ie_len); } if (info->attrs[NL80211_ATTR_SCAN_FLAGS]) request->flags = nla_get_u32( info->attrs[NL80211_ATTR_SCAN_FLAGS]); for (i = 0; i < IEEE80211_NUM_BANDS; i++) if (wiphy->bands[i]) request->rates[i] = (1 << wiphy->bands[i]->n_bitrates) - 1; if (info->attrs[NL80211_ATTR_SCAN_SUPP_RATES]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SUPP_RATES], tmp) { enum ieee80211_band band = nla_type(attr); if (band < 0 || band >= IEEE80211_NUM_BANDS) { err = -EINVAL; goto out_free; } err = ieee80211_get_ratemask(wiphy->bands[band], nla_data(attr), nla_len(attr), &request->rates[band]); if (err) goto out_free; } } request->dev = dev; request->wiphy = &rdev->wiphy; rdev->scan_req = request; err = rdev->ops->scan(&rdev->wiphy, dev, request); if (!err) { nl80211_send_scan_start(rdev, dev); dev_hold(dev); } else { out_free: rdev->scan_req = NULL; kfree(request); } return err; } static int nl80211_start_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_sched_scan_request *request; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct nlattr *attr; struct wiphy *wiphy; int err, tmp, n_ssids = 0, n_channels, i; u32 interval; enum ieee80211_band band; size_t ie_len; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_start) return -EOPNOTSUPP; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]) return -EINVAL; interval = nla_get_u32(info->attrs[NL80211_ATTR_SCHED_SCAN_INTERVAL]); if (interval == 0) return -EINVAL; wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { n_channels = validate_scan_freqs( info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]); if (!n_channels) return -EINVAL; } else { n_channels = 0; for (band = 0; band < IEEE80211_NUM_BANDS; band++) if (wiphy->bands[band]) n_channels += wiphy->bands[band]->n_channels; } if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) n_ssids++; if (n_ssids > wiphy->max_sched_scan_ssids) return -EINVAL; if (info->attrs[NL80211_ATTR_IE]) ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); else ie_len = 0; if (ie_len > wiphy->max_sched_scan_ie_len) return -EINVAL; mutex_lock(&rdev->sched_scan_mtx); if (rdev->sched_scan_req) { err = -EINPROGRESS; goto out; } request = kzalloc(sizeof(*request) + sizeof(*request->ssids) * n_ssids + sizeof(*request->channels) * n_channels + ie_len, GFP_KERNEL); if (!request) { err = -ENOMEM; goto out; } if (n_ssids) request->ssids = (void *)&request->channels[n_channels]; request->n_ssids = n_ssids; if (ie_len) { if (request->ssids) request->ie = (void *)(request->ssids + n_ssids); else request->ie = (void *)(request->channels + n_channels); } i = 0; if (info->attrs[NL80211_ATTR_SCAN_FREQUENCIES]) { /* user specified, bail out if channel not found */ nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_FREQUENCIES], tmp) { struct ieee80211_channel *chan; chan = ieee80211_get_channel(wiphy, nla_get_u32(attr)); if (!chan) { err = -EINVAL; goto out_free; } /* ignore disabled channels */ if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } else { /* all channels */ for (band = 0; band < IEEE80211_NUM_BANDS; band++) { int j; if (!wiphy->bands[band]) continue; for (j = 0; j < wiphy->bands[band]->n_channels; j++) { struct ieee80211_channel *chan; chan = &wiphy->bands[band]->channels[j]; if (chan->flags & IEEE80211_CHAN_DISABLED) continue; request->channels[i] = chan; i++; } } } if (!i) { err = -EINVAL; goto out_free; } request->n_channels = i; i = 0; if (info->attrs[NL80211_ATTR_SCAN_SSIDS]) { nla_for_each_nested(attr, info->attrs[NL80211_ATTR_SCAN_SSIDS], tmp) { if (nla_len(attr) > IEEE80211_MAX_SSID_LEN) { err = -EINVAL; goto out_free; } request->ssids[i].ssid_len = nla_len(attr); memcpy(request->ssids[i].ssid, nla_data(attr), nla_len(attr)); i++; } } if (info->attrs[NL80211_ATTR_IE]) { request->ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); memcpy((void *)request->ie, nla_data(info->attrs[NL80211_ATTR_IE]), request->ie_len); } request->dev = dev; request->wiphy = &rdev->wiphy; request->interval = interval; err = rdev->ops->sched_scan_start(&rdev->wiphy, dev, request); if (!err) { rdev->sched_scan_req = request; nl80211_send_sched_scan(rdev, dev, NL80211_CMD_START_SCHED_SCAN); goto out; } out_free: kfree(request); out: mutex_unlock(&rdev->sched_scan_mtx); return err; } static int nl80211_stop_sched_scan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; if (!(rdev->wiphy.flags & WIPHY_FLAG_SUPPORTS_SCHED_SCAN) || !rdev->ops->sched_scan_stop) return -EOPNOTSUPP; mutex_lock(&rdev->sched_scan_mtx); err = __cfg80211_stop_sched_scan(rdev, false); mutex_unlock(&rdev->sched_scan_mtx); return err; } static int nl80211_send_bss(struct sk_buff *msg, struct netlink_callback *cb, u32 seq, int flags, struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, struct cfg80211_internal_bss *intbss) { struct cfg80211_bss *res = &intbss->pub; void *hdr; struct nlattr *bss; int i; ASSERT_WDEV_LOCK(wdev); hdr = nl80211hdr_put(msg, NETLINK_CB(cb->skb).pid, seq, flags, NL80211_CMD_NEW_SCAN_RESULTS); if (!hdr) return -1; genl_dump_check_consistent(cb, hdr, &nl80211_fam); NLA_PUT_U32(msg, NL80211_ATTR_GENERATION, rdev->bss_generation); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, wdev->netdev->ifindex); bss = nla_nest_start(msg, NL80211_ATTR_BSS); if (!bss) goto nla_put_failure; if (!is_zero_ether_addr(res->bssid)) NLA_PUT(msg, NL80211_BSS_BSSID, ETH_ALEN, res->bssid); if (res->information_elements && res->len_information_elements) NLA_PUT(msg, NL80211_BSS_INFORMATION_ELEMENTS, res->len_information_elements, res->information_elements); if (res->beacon_ies && res->len_beacon_ies && res->beacon_ies != res->information_elements) NLA_PUT(msg, NL80211_BSS_BEACON_IES, res->len_beacon_ies, res->beacon_ies); if (res->tsf) NLA_PUT_U64(msg, NL80211_BSS_TSF, res->tsf); if (res->beacon_interval) NLA_PUT_U16(msg, NL80211_BSS_BEACON_INTERVAL, res->beacon_interval); NLA_PUT_U16(msg, NL80211_BSS_CAPABILITY, res->capability); NLA_PUT_U32(msg, NL80211_BSS_FREQUENCY, res->channel->center_freq); NLA_PUT_U32(msg, NL80211_BSS_SEEN_MS_AGO, jiffies_to_msecs(jiffies - intbss->ts)); switch (rdev->wiphy.signal_type) { case CFG80211_SIGNAL_TYPE_MBM: NLA_PUT_U32(msg, NL80211_BSS_SIGNAL_MBM, res->signal); break; case CFG80211_SIGNAL_TYPE_UNSPEC: NLA_PUT_U8(msg, NL80211_BSS_SIGNAL_UNSPEC, res->signal); break; default: break; } switch (wdev->iftype) { case NL80211_IFTYPE_P2P_CLIENT: case NL80211_IFTYPE_STATION: if (intbss == wdev->current_bss) NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_ASSOCIATED); else for (i = 0; i < MAX_AUTH_BSSES; i++) { if (intbss != wdev->auth_bsses[i]) continue; NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_AUTHENTICATED); break; } break; case NL80211_IFTYPE_ADHOC: if (intbss == wdev->current_bss) NLA_PUT_U32(msg, NL80211_BSS_STATUS, NL80211_BSS_STATUS_IBSS_JOINED); break; default: break; } nla_nest_end(msg, bss); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_scan(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *rdev; struct net_device *dev; struct cfg80211_internal_bss *scan; struct wireless_dev *wdev; int start = cb->args[1], idx = 0; int err; err = nl80211_prepare_netdev_dump(skb, cb, &rdev, &dev); if (err) return err; wdev = dev->ieee80211_ptr; wdev_lock(wdev); spin_lock_bh(&rdev->bss_lock); cfg80211_bss_expire(rdev); cb->seq = rdev->bss_generation; list_for_each_entry(scan, &rdev->bss_list, list) { if (++idx <= start) continue; if (nl80211_send_bss(skb, cb, cb->nlh->nlmsg_seq, NLM_F_MULTI, rdev, wdev, scan) < 0) { idx--; break; } } spin_unlock_bh(&rdev->bss_lock); wdev_unlock(wdev); cb->args[1] = idx; nl80211_finish_netdev_dump(rdev); return skb->len; } static int nl80211_send_survey(struct sk_buff *msg, u32 pid, u32 seq, int flags, struct net_device *dev, struct survey_info *survey) { void *hdr; struct nlattr *infoattr; hdr = nl80211hdr_put(msg, pid, seq, flags, NL80211_CMD_NEW_SURVEY_RESULTS); if (!hdr) return -ENOMEM; NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); infoattr = nla_nest_start(msg, NL80211_ATTR_SURVEY_INFO); if (!infoattr) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_SURVEY_INFO_FREQUENCY, survey->channel->center_freq); if (survey->filled & SURVEY_INFO_NOISE_DBM) NLA_PUT_U8(msg, NL80211_SURVEY_INFO_NOISE, survey->noise); if (survey->filled & SURVEY_INFO_IN_USE) NLA_PUT_FLAG(msg, NL80211_SURVEY_INFO_IN_USE); if (survey->filled & SURVEY_INFO_CHANNEL_TIME) NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME, survey->channel_time); if (survey->filled & SURVEY_INFO_CHANNEL_TIME_BUSY) NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_BUSY, survey->channel_time_busy); if (survey->filled & SURVEY_INFO_CHANNEL_TIME_EXT_BUSY) NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_EXT_BUSY, survey->channel_time_ext_busy); if (survey->filled & SURVEY_INFO_CHANNEL_TIME_RX) NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_RX, survey->channel_time_rx); if (survey->filled & SURVEY_INFO_CHANNEL_TIME_TX) NLA_PUT_U64(msg, NL80211_SURVEY_INFO_CHANNEL_TIME_TX, survey->channel_time_tx); nla_nest_end(msg, infoattr); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_dump_survey(struct sk_buff *skb, struct netlink_callback *cb) { struct survey_info survey; struct cfg80211_registered_device *dev; struct net_device *netdev; int survey_idx = cb->args[1]; int res; res = nl80211_prepare_netdev_dump(skb, cb, &dev, &netdev); if (res) return res; if (!dev->ops->dump_survey) { res = -EOPNOTSUPP; goto out_err; } while (1) { struct ieee80211_channel *chan; res = dev->ops->dump_survey(&dev->wiphy, netdev, survey_idx, &survey); if (res == -ENOENT) break; if (res) goto out_err; /* Survey without a channel doesn't make sense */ if (!survey.channel) { res = -EINVAL; goto out; } chan = ieee80211_get_channel(&dev->wiphy, survey.channel->center_freq); if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) { survey_idx++; continue; } if (nl80211_send_survey(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, netdev, &survey) < 0) goto out; survey_idx++; } out: cb->args[1] = survey_idx; res = skb->len; out_err: nl80211_finish_netdev_dump(dev); return res; } static bool nl80211_valid_auth_type(enum nl80211_auth_type auth_type) { return auth_type <= NL80211_AUTHTYPE_MAX; } static bool nl80211_valid_wpa_versions(u32 wpa_versions) { return !(wpa_versions & ~(NL80211_WPA_VERSION_1 | NL80211_WPA_VERSION_2)); } static bool nl80211_valid_akm_suite(u32 akm) { return akm == WLAN_AKM_SUITE_8021X || akm == WLAN_AKM_SUITE_PSK; } static bool nl80211_valid_cipher_suite(u32 cipher) { return cipher == WLAN_CIPHER_SUITE_WEP40 || cipher == WLAN_CIPHER_SUITE_WEP104 || cipher == WLAN_CIPHER_SUITE_TKIP || cipher == WLAN_CIPHER_SUITE_CCMP || cipher == WLAN_CIPHER_SUITE_AES_CMAC; } static int nl80211_authenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; const u8 *bssid, *ssid, *ie = NULL; int err, ssid_len, ie_len = 0; enum nl80211_auth_type auth_type; struct key_parse key; bool local_state_change; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_AUTH_TYPE]) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID]) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; err = nl80211_parse_key(info, &key); if (err) return err; if (key.idx >= 0) { if (key.type != -1 && key.type != NL80211_KEYTYPE_GROUP) return -EINVAL; if (!key.p.key || !key.p.key_len) return -EINVAL; if ((key.p.cipher != WLAN_CIPHER_SUITE_WEP40 || key.p.key_len != WLAN_KEY_LEN_WEP40) && (key.p.cipher != WLAN_CIPHER_SUITE_WEP104 || key.p.key_len != WLAN_KEY_LEN_WEP104)) return -EINVAL; if (key.idx > 4) return -EINVAL; } else { key.p.key_len = 0; key.p.key = NULL; } if (key.idx >= 0) { int i; bool ok = false; for (i = 0; i < rdev->wiphy.n_cipher_suites; i++) { if (key.p.cipher == rdev->wiphy.cipher_suites[i]) { ok = true; break; } } if (!ok) return -EINVAL; } if (!rdev->ops->auth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = ieee80211_get_channel(&rdev->wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) return -EINVAL; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(auth_type)) return -EINVAL; local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; return cfg80211_mlme_auth(rdev, dev, chan, auth_type, bssid, ssid, ssid_len, ie, ie_len, key.p.key, key.p.key_len, key.idx, local_state_change); } static int nl80211_crypto_settings(struct cfg80211_registered_device *rdev, struct genl_info *info, struct cfg80211_crypto_settings *settings, int cipher_limit) { memset(settings, 0, sizeof(*settings)); settings->control_port = info->attrs[NL80211_ATTR_CONTROL_PORT]; if (info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]) { u16 proto; proto = nla_get_u16( info->attrs[NL80211_ATTR_CONTROL_PORT_ETHERTYPE]); settings->control_port_ethertype = cpu_to_be16(proto); if (!(rdev->wiphy.flags & WIPHY_FLAG_CONTROL_PORT_PROTOCOL) && proto != ETH_P_PAE) return -EINVAL; if (info->attrs[NL80211_ATTR_CONTROL_PORT_NO_ENCRYPT]) settings->control_port_no_encrypt = true; } else settings->control_port_ethertype = cpu_to_be16(ETH_P_PAE); if (info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); len = nla_len(info->attrs[NL80211_ATTR_CIPHER_SUITES_PAIRWISE]); settings->n_ciphers_pairwise = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_ciphers_pairwise > cipher_limit) return -EINVAL; memcpy(settings->ciphers_pairwise, data, len); for (i = 0; i < settings->n_ciphers_pairwise; i++) if (!nl80211_valid_cipher_suite( settings->ciphers_pairwise[i])) return -EINVAL; } if (info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]) { settings->cipher_group = nla_get_u32(info->attrs[NL80211_ATTR_CIPHER_SUITE_GROUP]); if (!nl80211_valid_cipher_suite(settings->cipher_group)) return -EINVAL; } if (info->attrs[NL80211_ATTR_WPA_VERSIONS]) { settings->wpa_versions = nla_get_u32(info->attrs[NL80211_ATTR_WPA_VERSIONS]); if (!nl80211_valid_wpa_versions(settings->wpa_versions)) return -EINVAL; } if (info->attrs[NL80211_ATTR_AKM_SUITES]) { void *data; int len, i; data = nla_data(info->attrs[NL80211_ATTR_AKM_SUITES]); len = nla_len(info->attrs[NL80211_ATTR_AKM_SUITES]); settings->n_akm_suites = len / sizeof(u32); if (len % sizeof(u32)) return -EINVAL; if (settings->n_akm_suites > NL80211_MAX_NR_AKM_SUITES) return -EINVAL; memcpy(settings->akm_suites, data, len); for (i = 0; i < settings->n_akm_suites; i++) if (!nl80211_valid_akm_suite(settings->akm_suites[i])) return -EINVAL; } return 0; } static int nl80211_associate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_crypto_settings crypto; struct ieee80211_channel *chan; const u8 *bssid, *ssid, *ie = NULL, *prev_bssid = NULL; int err, ssid_len, ie_len = 0; bool use_mfp = false; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC] || !info->attrs[NL80211_ATTR_SSID] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; if (!rdev->ops->assoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); chan = ieee80211_get_channel(&rdev->wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!chan || (chan->flags & IEEE80211_CHAN_DISABLED)) return -EINVAL; ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_USE_MFP]) { enum nl80211_mfp mfp = nla_get_u32(info->attrs[NL80211_ATTR_USE_MFP]); if (mfp == NL80211_MFP_REQUIRED) use_mfp = true; else if (mfp != NL80211_MFP_NO) return -EINVAL; } if (info->attrs[NL80211_ATTR_PREV_BSSID]) prev_bssid = nla_data(info->attrs[NL80211_ATTR_PREV_BSSID]); err = nl80211_crypto_settings(rdev, info, &crypto, 1); if (!err) err = cfg80211_mlme_assoc(rdev, dev, chan, bssid, prev_bssid, ssid, ssid_len, ie, ie_len, use_mfp, &crypto); return err; } static int nl80211_deauthenticate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0; u16 reason_code; bool local_state_change; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->deauth) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; return cfg80211_mlme_deauth(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); } static int nl80211_disassociate(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; const u8 *ie = NULL, *bssid; int ie_len = 0; u16 reason_code; bool local_state_change; if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_REASON_CODE]) return -EINVAL; if (!rdev->ops->disassoc) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); reason_code = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason_code == 0) { /* Reason Code 0 is reserved */ return -EINVAL; } if (info->attrs[NL80211_ATTR_IE]) { ie = nla_data(info->attrs[NL80211_ATTR_IE]); ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } local_state_change = !!info->attrs[NL80211_ATTR_LOCAL_STATE_CHANGE]; return cfg80211_mlme_disassoc(rdev, dev, bssid, ie, ie_len, reason_code, local_state_change); } static bool nl80211_parse_mcast_rate(struct cfg80211_registered_device *rdev, int mcast_rate[IEEE80211_NUM_BANDS], int rateval) { struct wiphy *wiphy = &rdev->wiphy; bool found = false; int band, i; for (band = 0; band < IEEE80211_NUM_BANDS; band++) { struct ieee80211_supported_band *sband; sband = wiphy->bands[band]; if (!sband) continue; for (i = 0; i < sband->n_bitrates; i++) { if (sband->bitrates[i].bitrate == rateval) { mcast_rate[band] = i + 1; found = true; break; } } } return found; } static int nl80211_join_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_ibss_params ibss; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&ibss, 0, sizeof(ibss)); if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; ibss.beacon_interval = 100; if (info->attrs[NL80211_ATTR_BEACON_INTERVAL]) { ibss.beacon_interval = nla_get_u32(info->attrs[NL80211_ATTR_BEACON_INTERVAL]); if (ibss.beacon_interval < 1 || ibss.beacon_interval > 10000) return -EINVAL; } if (!rdev->ops->join_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) ibss.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); ibss.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); ibss.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { ibss.ie = nla_data(info->attrs[NL80211_ATTR_IE]); ibss.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } ibss.channel = ieee80211_get_channel(wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!ibss.channel || ibss.channel->flags & IEEE80211_CHAN_NO_IBSS || ibss.channel->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; ibss.channel_fixed = !!info->attrs[NL80211_ATTR_FREQ_FIXED]; ibss.privacy = !!info->attrs[NL80211_ATTR_PRIVACY]; if (info->attrs[NL80211_ATTR_BSS_BASIC_RATES]) { u8 *rates = nla_data(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); int n_rates = nla_len(info->attrs[NL80211_ATTR_BSS_BASIC_RATES]); struct ieee80211_supported_band *sband = wiphy->bands[ibss.channel->band]; int err; err = ieee80211_get_ratemask(sband, rates, n_rates, &ibss.basic_rates); if (err) return err; } if (info->attrs[NL80211_ATTR_MCAST_RATE] && !nl80211_parse_mcast_rate(rdev, ibss.mcast_rate, nla_get_u32(info->attrs[NL80211_ATTR_MCAST_RATE]))) return -EINVAL; if (ibss.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info->attrs[NL80211_ATTR_KEYS]); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); } err = cfg80211_join_ibss(rdev, dev, &ibss, connkeys); if (err) kfree(connkeys); return err; } static int nl80211_leave_ibss(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (!rdev->ops->leave_ibss) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; return cfg80211_leave_ibss(rdev, dev, false); } #ifdef CONFIG_NL80211_TESTMODE static struct genl_multicast_group nl80211_testmode_mcgrp = { .name = "testmode", }; static int nl80211_testmode_do(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int err; if (!info->attrs[NL80211_ATTR_TESTDATA]) return -EINVAL; err = -EOPNOTSUPP; if (rdev->ops->testmode_cmd) { rdev->testmode_info = info; err = rdev->ops->testmode_cmd(&rdev->wiphy, nla_data(info->attrs[NL80211_ATTR_TESTDATA]), nla_len(info->attrs[NL80211_ATTR_TESTDATA])); rdev->testmode_info = NULL; } return err; } static int nl80211_testmode_dump(struct sk_buff *skb, struct netlink_callback *cb) { struct cfg80211_registered_device *dev; int err; long phy_idx; void *data = NULL; int data_len = 0; if (cb->args[0]) { /* * 0 is a valid index, but not valid for args[0], * so we need to offset by 1. */ phy_idx = cb->args[0] - 1; } else { err = nlmsg_parse(cb->nlh, GENL_HDRLEN + nl80211_fam.hdrsize, nl80211_fam.attrbuf, nl80211_fam.maxattr, nl80211_policy); if (err) return err; if (!nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]) return -EINVAL; phy_idx = nla_get_u32(nl80211_fam.attrbuf[NL80211_ATTR_WIPHY]); if (nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]) cb->args[1] = (long)nl80211_fam.attrbuf[NL80211_ATTR_TESTDATA]; } if (cb->args[1]) { data = nla_data((void *)cb->args[1]); data_len = nla_len((void *)cb->args[1]); } mutex_lock(&cfg80211_mutex); dev = cfg80211_rdev_by_wiphy_idx(phy_idx); if (!dev) { mutex_unlock(&cfg80211_mutex); return -ENOENT; } cfg80211_lock_rdev(dev); mutex_unlock(&cfg80211_mutex); if (!dev->ops->testmode_dump) { err = -EOPNOTSUPP; goto out_err; } while (1) { void *hdr = nl80211hdr_put(skb, NETLINK_CB(cb->skb).pid, cb->nlh->nlmsg_seq, NLM_F_MULTI, NL80211_CMD_TESTMODE); struct nlattr *tmdata; if (nla_put_u32(skb, NL80211_ATTR_WIPHY, dev->wiphy_idx) < 0) { genlmsg_cancel(skb, hdr); break; } tmdata = nla_nest_start(skb, NL80211_ATTR_TESTDATA); if (!tmdata) { genlmsg_cancel(skb, hdr); break; } err = dev->ops->testmode_dump(&dev->wiphy, skb, cb, data, data_len); nla_nest_end(skb, tmdata); if (err == -ENOBUFS || err == -ENOENT) { genlmsg_cancel(skb, hdr); break; } else if (err) { genlmsg_cancel(skb, hdr); goto out_err; } genlmsg_end(skb, hdr); } err = skb->len; /* see above */ cb->args[0] = phy_idx + 1; out_err: cfg80211_unlock_rdev(dev); return err; } static struct sk_buff * __cfg80211_testmode_alloc_skb(struct cfg80211_registered_device *rdev, int approxlen, u32 pid, u32 seq, gfp_t gfp) { struct sk_buff *skb; void *hdr; struct nlattr *data; skb = nlmsg_new(approxlen + 100, gfp); if (!skb) return NULL; hdr = nl80211hdr_put(skb, pid, seq, 0, NL80211_CMD_TESTMODE); if (!hdr) { kfree_skb(skb); return NULL; } NLA_PUT_U32(skb, NL80211_ATTR_WIPHY, rdev->wiphy_idx); data = nla_nest_start(skb, NL80211_ATTR_TESTDATA); ((void **)skb->cb)[0] = rdev; ((void **)skb->cb)[1] = hdr; ((void **)skb->cb)[2] = data; return skb; nla_put_failure: kfree_skb(skb); return NULL; } struct sk_buff *cfg80211_testmode_alloc_reply_skb(struct wiphy *wiphy, int approxlen) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); if (WARN_ON(!rdev->testmode_info)) return NULL; return __cfg80211_testmode_alloc_skb(rdev, approxlen, rdev->testmode_info->snd_pid, rdev->testmode_info->snd_seq, GFP_KERNEL); } EXPORT_SYMBOL(cfg80211_testmode_alloc_reply_skb); int cfg80211_testmode_reply(struct sk_buff *skb) { struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; if (WARN_ON(!rdev->testmode_info)) { kfree_skb(skb); return -EINVAL; } nla_nest_end(skb, data); genlmsg_end(skb, hdr); return genlmsg_reply(skb, rdev->testmode_info); } EXPORT_SYMBOL(cfg80211_testmode_reply); struct sk_buff *cfg80211_testmode_alloc_event_skb(struct wiphy *wiphy, int approxlen, gfp_t gfp) { struct cfg80211_registered_device *rdev = wiphy_to_dev(wiphy); return __cfg80211_testmode_alloc_skb(rdev, approxlen, 0, 0, gfp); } EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) { void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; nla_nest_end(skb, data); genlmsg_end(skb, hdr); genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); } EXPORT_SYMBOL(cfg80211_testmode_event); #endif static int nl80211_connect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct cfg80211_connect_params connect; struct wiphy *wiphy; struct cfg80211_cached_keys *connkeys = NULL; int err; memset(&connect, 0, sizeof(connect)); if (!is_valid_ie_attr(info->attrs[NL80211_ATTR_IE])) return -EINVAL; if (!info->attrs[NL80211_ATTR_SSID] || !nla_len(info->attrs[NL80211_ATTR_SSID])) return -EINVAL; if (info->attrs[NL80211_ATTR_AUTH_TYPE]) { connect.auth_type = nla_get_u32(info->attrs[NL80211_ATTR_AUTH_TYPE]); if (!nl80211_valid_auth_type(connect.auth_type)) return -EINVAL; } else connect.auth_type = NL80211_AUTHTYPE_AUTOMATIC; connect.privacy = info->attrs[NL80211_ATTR_PRIVACY]; err = nl80211_crypto_settings(rdev, info, &connect.crypto, NL80211_MAX_NR_CIPHER_SUITES); if (err) return err; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; wiphy = &rdev->wiphy; if (info->attrs[NL80211_ATTR_MAC]) connect.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); connect.ssid = nla_data(info->attrs[NL80211_ATTR_SSID]); connect.ssid_len = nla_len(info->attrs[NL80211_ATTR_SSID]); if (info->attrs[NL80211_ATTR_IE]) { connect.ie = nla_data(info->attrs[NL80211_ATTR_IE]); connect.ie_len = nla_len(info->attrs[NL80211_ATTR_IE]); } if (info->attrs[NL80211_ATTR_WIPHY_FREQ]) { connect.channel = ieee80211_get_channel(wiphy, nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ])); if (!connect.channel || connect.channel->flags & IEEE80211_CHAN_DISABLED) return -EINVAL; } if (connect.privacy && info->attrs[NL80211_ATTR_KEYS]) { connkeys = nl80211_parse_connkeys(rdev, info->attrs[NL80211_ATTR_KEYS]); if (IS_ERR(connkeys)) return PTR_ERR(connkeys); } err = cfg80211_connect(rdev, dev, &connect, connkeys); if (err) kfree(connkeys); return err; } static int nl80211_disconnect(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 reason; if (!info->attrs[NL80211_ATTR_REASON_CODE]) reason = WLAN_REASON_DEAUTH_LEAVING; else reason = nla_get_u16(info->attrs[NL80211_ATTR_REASON_CODE]); if (reason == 0) return -EINVAL; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; return cfg80211_disconnect(rdev, dev, reason, true); } static int nl80211_wiphy_netns(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net *net; int err; u32 pid; if (!info->attrs[NL80211_ATTR_PID]) return -EINVAL; pid = nla_get_u32(info->attrs[NL80211_ATTR_PID]); net = get_net_ns_by_pid(pid); if (IS_ERR(net)) return PTR_ERR(net); err = 0; /* check if anything to do */ if (!net_eq(wiphy_net(&rdev->wiphy), net)) err = cfg80211_switch_netns(rdev, net); put_net(net); return err; } static int nl80211_setdel_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; int (*rdev_ops)(struct wiphy *wiphy, struct net_device *dev, struct cfg80211_pmksa *pmksa) = NULL; struct net_device *dev = info->user_ptr[1]; struct cfg80211_pmksa pmksa; memset(&pmksa, 0, sizeof(struct cfg80211_pmksa)); if (!info->attrs[NL80211_ATTR_MAC]) return -EINVAL; if (!info->attrs[NL80211_ATTR_PMKID]) return -EINVAL; pmksa.pmkid = nla_data(info->attrs[NL80211_ATTR_PMKID]); pmksa.bssid = nla_data(info->attrs[NL80211_ATTR_MAC]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; switch (info->genlhdr->cmd) { case NL80211_CMD_SET_PMKSA: rdev_ops = rdev->ops->set_pmksa; break; case NL80211_CMD_DEL_PMKSA: rdev_ops = rdev->ops->del_pmksa; break; default: WARN_ON(1); break; } if (!rdev_ops) return -EOPNOTSUPP; return rdev_ops(&rdev->wiphy, dev, &pmksa); } static int nl80211_flush_pmksa(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; if (!rdev->ops->flush_pmksa) return -EOPNOTSUPP; return rdev->ops->flush_pmksa(&rdev->wiphy, dev); } static int nl80211_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; struct sk_buff *msg; void *hdr; u64 cookie; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; u32 freq, duration; int err; if (!info->attrs[NL80211_ATTR_WIPHY_FREQ] || !info->attrs[NL80211_ATTR_DURATION]) return -EINVAL; duration = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); /* * We should be on that channel for at least one jiffie, * and more than 5 seconds seems excessive. */ if (!duration || !msecs_to_jiffies(duration) || duration > rdev->wiphy.max_remain_on_channel_duration) return -EINVAL; if (!rdev->ops->remain_on_channel) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { channel_type = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && channel_type != NL80211_CHAN_HT40MINUS) return -EINVAL; } freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); chan = rdev_freq_to_chan(rdev, freq, channel_type); if (chan == NULL) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_REMAIN_ON_CHANNEL); if (IS_ERR(hdr)) { err = PTR_ERR(hdr); goto free_msg; } err = rdev->ops->remain_on_channel(&rdev->wiphy, dev, chan, channel_type, duration, &cookie); if (err) goto free_msg; NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_cancel_remain_on_channel(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->cancel_remain_on_channel) return -EOPNOTSUPP; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev->ops->cancel_remain_on_channel(&rdev->wiphy, dev, cookie); } static u32 rateset_to_mask(struct ieee80211_supported_band *sband, u8 *rates, u8 rates_len) { u8 i; u32 mask = 0; for (i = 0; i < rates_len; i++) { int rate = (rates[i] & 0x7f) * 5; int ridx; for (ridx = 0; ridx < sband->n_bitrates; ridx++) { struct ieee80211_rate *srate = &sband->bitrates[ridx]; if (rate == srate->bitrate) { mask |= 1 << ridx; break; } } if (ridx == sband->n_bitrates) return 0; /* rate not found */ } return mask; } static const struct nla_policy nl80211_txattr_policy[NL80211_TXRATE_MAX + 1] = { [NL80211_TXRATE_LEGACY] = { .type = NLA_BINARY, .len = NL80211_MAX_SUPP_RATES }, }; static int nl80211_set_tx_bitrate_mask(struct sk_buff *skb, struct genl_info *info) { struct nlattr *tb[NL80211_TXRATE_MAX + 1]; struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct cfg80211_bitrate_mask mask; int rem, i; struct net_device *dev = info->user_ptr[1]; struct nlattr *tx_rates; struct ieee80211_supported_band *sband; if (info->attrs[NL80211_ATTR_TX_RATES] == NULL) return -EINVAL; if (!rdev->ops->set_bitrate_mask) return -EOPNOTSUPP; memset(&mask, 0, sizeof(mask)); /* Default to all rates enabled */ for (i = 0; i < IEEE80211_NUM_BANDS; i++) { sband = rdev->wiphy.bands[i]; mask.control[i].legacy = sband ? (1 << sband->n_bitrates) - 1 : 0; } /* * The nested attribute uses enum nl80211_band as the index. This maps * directly to the enum ieee80211_band values used in cfg80211. */ nla_for_each_nested(tx_rates, info->attrs[NL80211_ATTR_TX_RATES], rem) { enum ieee80211_band band = nla_type(tx_rates); if (band < 0 || band >= IEEE80211_NUM_BANDS) return -EINVAL; sband = rdev->wiphy.bands[band]; if (sband == NULL) return -EINVAL; nla_parse(tb, NL80211_TXRATE_MAX, nla_data(tx_rates), nla_len(tx_rates), nl80211_txattr_policy); if (tb[NL80211_TXRATE_LEGACY]) { mask.control[band].legacy = rateset_to_mask( sband, nla_data(tb[NL80211_TXRATE_LEGACY]), nla_len(tb[NL80211_TXRATE_LEGACY])); if (mask.control[band].legacy == 0) return -EINVAL; } } return rdev->ops->set_bitrate_mask(&rdev->wiphy, dev, NULL, &mask); } static int nl80211_register_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u16 frame_type = IEEE80211_FTYPE_MGMT | IEEE80211_STYPE_ACTION; if (!info->attrs[NL80211_ATTR_FRAME_MATCH]) return -EINVAL; if (info->attrs[NL80211_ATTR_FRAME_TYPE]) frame_type = nla_get_u16(info->attrs[NL80211_ATTR_FRAME_TYPE]); if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; /* not much point in registering if we can't reply */ if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; return cfg80211_mlme_register_mgmt(dev->ieee80211_ptr, info->snd_pid, frame_type, nla_data(info->attrs[NL80211_ATTR_FRAME_MATCH]), nla_len(info->attrs[NL80211_ATTR_FRAME_MATCH])); } static int nl80211_tx_mgmt(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct ieee80211_channel *chan; enum nl80211_channel_type channel_type = NL80211_CHAN_NO_HT; bool channel_type_valid = false; u32 freq; int err; void *hdr; u64 cookie; struct sk_buff *msg; unsigned int wait = 0; bool offchan; if (!info->attrs[NL80211_ATTR_FRAME] || !info->attrs[NL80211_ATTR_WIPHY_FREQ]) return -EINVAL; if (!rdev->ops->mgmt_tx) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_MESH_POINT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; if (info->attrs[NL80211_ATTR_DURATION]) { if (!rdev->ops->mgmt_tx_cancel_wait) return -EINVAL; wait = nla_get_u32(info->attrs[NL80211_ATTR_DURATION]); } if (info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]) { channel_type = nla_get_u32( info->attrs[NL80211_ATTR_WIPHY_CHANNEL_TYPE]); if (channel_type != NL80211_CHAN_NO_HT && channel_type != NL80211_CHAN_HT20 && channel_type != NL80211_CHAN_HT40PLUS && channel_type != NL80211_CHAN_HT40MINUS) return -EINVAL; channel_type_valid = true; } offchan = info->attrs[NL80211_ATTR_OFFCHANNEL_TX_OK]; freq = nla_get_u32(info->attrs[NL80211_ATTR_WIPHY_FREQ]); chan = rdev_freq_to_chan(rdev, freq, channel_type); if (chan == NULL) return -EINVAL; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_FRAME); if (IS_ERR(hdr)) { err = PTR_ERR(hdr); goto free_msg; } err = cfg80211_mlme_mgmt_tx(rdev, dev, chan, offchan, channel_type, channel_type_valid, wait, nla_data(info->attrs[NL80211_ATTR_FRAME]), nla_len(info->attrs[NL80211_ATTR_FRAME]), &cookie); if (err) goto free_msg; NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static int nl80211_tx_mgmt_cancel_wait(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; u64 cookie; if (!info->attrs[NL80211_ATTR_COOKIE]) return -EINVAL; if (!rdev->ops->mgmt_tx_cancel_wait) return -EOPNOTSUPP; if (dev->ieee80211_ptr->iftype != NL80211_IFTYPE_STATION && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_ADHOC && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_CLIENT && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_AP_VLAN && dev->ieee80211_ptr->iftype != NL80211_IFTYPE_P2P_GO) return -EOPNOTSUPP; cookie = nla_get_u64(info->attrs[NL80211_ATTR_COOKIE]); return rdev->ops->mgmt_tx_cancel_wait(&rdev->wiphy, dev, cookie); } static int nl80211_set_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; u8 ps_state; bool state; int err; if (!info->attrs[NL80211_ATTR_PS_STATE]) return -EINVAL; ps_state = nla_get_u32(info->attrs[NL80211_ATTR_PS_STATE]); if (ps_state != NL80211_PS_DISABLED && ps_state != NL80211_PS_ENABLED) return -EINVAL; wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; state = (ps_state == NL80211_PS_ENABLED) ? true : false; if (state == wdev->ps) return 0; err = rdev->ops->set_power_mgmt(wdev->wiphy, dev, state, wdev->ps_timeout); if (!err) wdev->ps = state; return err; } static int nl80211_get_power_save(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; enum nl80211_ps_state ps_state; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; struct sk_buff *msg; void *hdr; int err; wdev = dev->ieee80211_ptr; if (!rdev->ops->set_power_mgmt) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_POWER_SAVE); if (!hdr) { err = -ENOBUFS; goto free_msg; } if (wdev->ps) ps_state = NL80211_PS_ENABLED; else ps_state = NL80211_PS_DISABLED; NLA_PUT_U32(msg, NL80211_ATTR_PS_STATE, ps_state); genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: err = -ENOBUFS; free_msg: nlmsg_free(msg); return err; } static struct nla_policy nl80211_attr_cqm_policy[NL80211_ATTR_CQM_MAX + 1] __read_mostly = { [NL80211_ATTR_CQM_RSSI_THOLD] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_HYST] = { .type = NLA_U32 }, [NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT] = { .type = NLA_U32 }, }; static int nl80211_set_cqm_rssi(struct genl_info *info, s32 threshold, u32 hysteresis) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct wireless_dev *wdev; struct net_device *dev = info->user_ptr[1]; if (threshold > 0) return -EINVAL; wdev = dev->ieee80211_ptr; if (!rdev->ops->set_cqm_rssi_config) return -EOPNOTSUPP; if (wdev->iftype != NL80211_IFTYPE_STATION && wdev->iftype != NL80211_IFTYPE_P2P_CLIENT) return -EOPNOTSUPP; return rdev->ops->set_cqm_rssi_config(wdev->wiphy, dev, threshold, hysteresis); } static int nl80211_set_cqm(struct sk_buff *skb, struct genl_info *info) { struct nlattr *attrs[NL80211_ATTR_CQM_MAX + 1]; struct nlattr *cqm; int err; cqm = info->attrs[NL80211_ATTR_CQM]; if (!cqm) { err = -EINVAL; goto out; } err = nla_parse_nested(attrs, NL80211_ATTR_CQM_MAX, cqm, nl80211_attr_cqm_policy); if (err) goto out; if (attrs[NL80211_ATTR_CQM_RSSI_THOLD] && attrs[NL80211_ATTR_CQM_RSSI_HYST]) { s32 threshold; u32 hysteresis; threshold = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_THOLD]); hysteresis = nla_get_u32(attrs[NL80211_ATTR_CQM_RSSI_HYST]); err = nl80211_set_cqm_rssi(info, threshold, hysteresis); } else err = -EINVAL; out: return err; } static int nl80211_join_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct mesh_config cfg; struct mesh_setup setup; int err; /* start with default */ memcpy(&cfg, &default_mesh_config, sizeof(cfg)); memcpy(&setup, &default_mesh_setup, sizeof(setup)); if (info->attrs[NL80211_ATTR_MESH_CONFIG]) { /* and parse parameters if given */ err = nl80211_parse_mesh_config(info, &cfg, NULL); if (err) return err; } if (!info->attrs[NL80211_ATTR_MESH_ID] || !nla_len(info->attrs[NL80211_ATTR_MESH_ID])) return -EINVAL; setup.mesh_id = nla_data(info->attrs[NL80211_ATTR_MESH_ID]); setup.mesh_id_len = nla_len(info->attrs[NL80211_ATTR_MESH_ID]); if (info->attrs[NL80211_ATTR_MESH_SETUP]) { /* parse additional setup parameters if given */ err = nl80211_parse_mesh_setup(info, &setup); if (err) return err; } return cfg80211_join_mesh(rdev, dev, &setup, &cfg); } static int nl80211_leave_mesh(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; return cfg80211_leave_mesh(rdev, dev); } static int nl80211_get_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct sk_buff *msg; void *hdr; if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) return -EOPNOTSUPP; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, info->snd_pid, info->snd_seq, 0, NL80211_CMD_GET_WOWLAN); if (!hdr) goto nla_put_failure; if (rdev->wowlan) { struct nlattr *nl_wowlan; nl_wowlan = nla_nest_start(msg, NL80211_ATTR_WOWLAN_TRIGGERS); if (!nl_wowlan) goto nla_put_failure; if (rdev->wowlan->any) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_ANY); if (rdev->wowlan->disconnect) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_DISCONNECT); if (rdev->wowlan->magic_pkt) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_MAGIC_PKT); if (rdev->wowlan->gtk_rekey_failure) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE); if (rdev->wowlan->eap_identity_req) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST); if (rdev->wowlan->four_way_handshake) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE); if (rdev->wowlan->rfkill_release) NLA_PUT_FLAG(msg, NL80211_WOWLAN_TRIG_RFKILL_RELEASE); if (rdev->wowlan->n_patterns) { struct nlattr *nl_pats, *nl_pat; int i, pat_len; nl_pats = nla_nest_start(msg, NL80211_WOWLAN_TRIG_PKT_PATTERN); if (!nl_pats) goto nla_put_failure; for (i = 0; i < rdev->wowlan->n_patterns; i++) { nl_pat = nla_nest_start(msg, i + 1); if (!nl_pat) goto nla_put_failure; pat_len = rdev->wowlan->patterns[i].pattern_len; NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_MASK, DIV_ROUND_UP(pat_len, 8), rdev->wowlan->patterns[i].mask); NLA_PUT(msg, NL80211_WOWLAN_PKTPAT_PATTERN, pat_len, rdev->wowlan->patterns[i].pattern); nla_nest_end(msg, nl_pat); } nla_nest_end(msg, nl_pats); } nla_nest_end(msg, nl_wowlan); } genlmsg_end(msg, hdr); return genlmsg_reply(msg, info); nla_put_failure: nlmsg_free(msg); return -ENOBUFS; } static int nl80211_set_wowlan(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct nlattr *tb[NUM_NL80211_WOWLAN_TRIG]; struct cfg80211_wowlan no_triggers = {}; struct cfg80211_wowlan new_triggers = {}; struct wiphy_wowlan_support *wowlan = &rdev->wiphy.wowlan; int err, i; if (!rdev->wiphy.wowlan.flags && !rdev->wiphy.wowlan.n_patterns) return -EOPNOTSUPP; if (!info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]) goto no_triggers; err = nla_parse(tb, MAX_NL80211_WOWLAN_TRIG, nla_data(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]), nla_len(info->attrs[NL80211_ATTR_WOWLAN_TRIGGERS]), nl80211_wowlan_policy); if (err) return err; if (tb[NL80211_WOWLAN_TRIG_ANY]) { if (!(wowlan->flags & WIPHY_WOWLAN_ANY)) return -EINVAL; new_triggers.any = true; } if (tb[NL80211_WOWLAN_TRIG_DISCONNECT]) { if (!(wowlan->flags & WIPHY_WOWLAN_DISCONNECT)) return -EINVAL; new_triggers.disconnect = true; } if (tb[NL80211_WOWLAN_TRIG_MAGIC_PKT]) { if (!(wowlan->flags & WIPHY_WOWLAN_MAGIC_PKT)) return -EINVAL; new_triggers.magic_pkt = true; } if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_SUPPORTED]) return -EINVAL; if (tb[NL80211_WOWLAN_TRIG_GTK_REKEY_FAILURE]) { if (!(wowlan->flags & WIPHY_WOWLAN_GTK_REKEY_FAILURE)) return -EINVAL; new_triggers.gtk_rekey_failure = true; } if (tb[NL80211_WOWLAN_TRIG_EAP_IDENT_REQUEST]) { if (!(wowlan->flags & WIPHY_WOWLAN_EAP_IDENTITY_REQ)) return -EINVAL; new_triggers.eap_identity_req = true; } if (tb[NL80211_WOWLAN_TRIG_4WAY_HANDSHAKE]) { if (!(wowlan->flags & WIPHY_WOWLAN_4WAY_HANDSHAKE)) return -EINVAL; new_triggers.four_way_handshake = true; } if (tb[NL80211_WOWLAN_TRIG_RFKILL_RELEASE]) { if (!(wowlan->flags & WIPHY_WOWLAN_RFKILL_RELEASE)) return -EINVAL; new_triggers.rfkill_release = true; } if (tb[NL80211_WOWLAN_TRIG_PKT_PATTERN]) { struct nlattr *pat; int n_patterns = 0; int rem, pat_len, mask_len; struct nlattr *pat_tb[NUM_NL80211_WOWLAN_PKTPAT]; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) n_patterns++; if (n_patterns > wowlan->n_patterns) return -EINVAL; new_triggers.patterns = kcalloc(n_patterns, sizeof(new_triggers.patterns[0]), GFP_KERNEL); if (!new_triggers.patterns) return -ENOMEM; new_triggers.n_patterns = n_patterns; i = 0; nla_for_each_nested(pat, tb[NL80211_WOWLAN_TRIG_PKT_PATTERN], rem) { nla_parse(pat_tb, MAX_NL80211_WOWLAN_PKTPAT, nla_data(pat), nla_len(pat), NULL); err = -EINVAL; if (!pat_tb[NL80211_WOWLAN_PKTPAT_MASK] || !pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]) goto error; pat_len = nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]); mask_len = DIV_ROUND_UP(pat_len, 8); if (nla_len(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]) != mask_len) goto error; if (pat_len > wowlan->pattern_max_len || pat_len < wowlan->pattern_min_len) goto error; new_triggers.patterns[i].mask = kmalloc(mask_len + pat_len, GFP_KERNEL); if (!new_triggers.patterns[i].mask) { err = -ENOMEM; goto error; } new_triggers.patterns[i].pattern = new_triggers.patterns[i].mask + mask_len; memcpy(new_triggers.patterns[i].mask, nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_MASK]), mask_len); new_triggers.patterns[i].pattern_len = pat_len; memcpy(new_triggers.patterns[i].pattern, nla_data(pat_tb[NL80211_WOWLAN_PKTPAT_PATTERN]), pat_len); i++; } } if (memcmp(&new_triggers, &no_triggers, sizeof(new_triggers))) { struct cfg80211_wowlan *ntrig; ntrig = kmemdup(&new_triggers, sizeof(new_triggers), GFP_KERNEL); if (!ntrig) { err = -ENOMEM; goto error; } cfg80211_rdev_free_wowlan(rdev); rdev->wowlan = ntrig; } else { no_triggers: cfg80211_rdev_free_wowlan(rdev); rdev->wowlan = NULL; } return 0; error: for (i = 0; i < new_triggers.n_patterns; i++) kfree(new_triggers.patterns[i].mask); kfree(new_triggers.patterns); return err; } static int nl80211_set_rekey_data(struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev = info->user_ptr[0]; struct net_device *dev = info->user_ptr[1]; struct wireless_dev *wdev = dev->ieee80211_ptr; struct nlattr *tb[NUM_NL80211_REKEY_DATA]; struct cfg80211_gtk_rekey_data rekey_data; int err; if (!info->attrs[NL80211_ATTR_REKEY_DATA]) return -EINVAL; err = nla_parse(tb, MAX_NL80211_REKEY_DATA, nla_data(info->attrs[NL80211_ATTR_REKEY_DATA]), nla_len(info->attrs[NL80211_ATTR_REKEY_DATA]), nl80211_rekey_policy); if (err) return err; if (nla_len(tb[NL80211_REKEY_DATA_REPLAY_CTR]) != NL80211_REPLAY_CTR_LEN) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KEK]) != NL80211_KEK_LEN) return -ERANGE; if (nla_len(tb[NL80211_REKEY_DATA_KCK]) != NL80211_KCK_LEN) return -ERANGE; memcpy(rekey_data.kek, nla_data(tb[NL80211_REKEY_DATA_KEK]), NL80211_KEK_LEN); memcpy(rekey_data.kck, nla_data(tb[NL80211_REKEY_DATA_KCK]), NL80211_KCK_LEN); memcpy(rekey_data.replay_ctr, nla_data(tb[NL80211_REKEY_DATA_REPLAY_CTR]), NL80211_REPLAY_CTR_LEN); wdev_lock(wdev); if (!wdev->current_bss) { err = -ENOTCONN; goto out; } if (!rdev->ops->set_rekey_data) { err = -EOPNOTSUPP; goto out; } err = rdev->ops->set_rekey_data(&rdev->wiphy, dev, &rekey_data); out: wdev_unlock(wdev); return err; } #define NL80211_FLAG_NEED_WIPHY 0x01 #define NL80211_FLAG_NEED_NETDEV 0x02 #define NL80211_FLAG_NEED_RTNL 0x04 #define NL80211_FLAG_CHECK_NETDEV_UP 0x08 #define NL80211_FLAG_NEED_NETDEV_UP (NL80211_FLAG_NEED_NETDEV |\ NL80211_FLAG_CHECK_NETDEV_UP) static int nl80211_pre_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { struct cfg80211_registered_device *rdev; struct net_device *dev; int err; bool rtnl = ops->internal_flags & NL80211_FLAG_NEED_RTNL; if (rtnl) rtnl_lock(); if (ops->internal_flags & NL80211_FLAG_NEED_WIPHY) { rdev = cfg80211_get_dev_from_info(info); if (IS_ERR(rdev)) { if (rtnl) rtnl_unlock(); return PTR_ERR(rdev); } info->user_ptr[0] = rdev; } else if (ops->internal_flags & NL80211_FLAG_NEED_NETDEV) { err = get_rdev_dev_by_info_ifindex(info, &rdev, &dev); if (err) { if (rtnl) rtnl_unlock(); return err; } if (ops->internal_flags & NL80211_FLAG_CHECK_NETDEV_UP && !netif_running(dev)) { cfg80211_unlock_rdev(rdev); dev_put(dev); if (rtnl) rtnl_unlock(); return -ENETDOWN; } info->user_ptr[0] = rdev; info->user_ptr[1] = dev; } return 0; } static void nl80211_post_doit(struct genl_ops *ops, struct sk_buff *skb, struct genl_info *info) { if (info->user_ptr[0]) cfg80211_unlock_rdev(info->user_ptr[0]); if (info->user_ptr[1]) dev_put(info->user_ptr[1]); if (ops->internal_flags & NL80211_FLAG_NEED_RTNL) rtnl_unlock(); } static struct genl_ops nl80211_ops[] = { { .cmd = NL80211_CMD_GET_WIPHY, .doit = nl80211_get_wiphy, .dumpit = nl80211_dump_wiphy, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_WIPHY, }, { .cmd = NL80211_CMD_SET_WIPHY, .doit = nl80211_set_wiphy, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_INTERFACE, .doit = nl80211_get_interface, .dumpit = nl80211_dump_interface, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_NETDEV, }, { .cmd = NL80211_CMD_SET_INTERFACE, .doit = nl80211_set_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_INTERFACE, .doit = nl80211_new_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_INTERFACE, .doit = nl80211_del_interface, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_KEY, .doit = nl80211_get_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_KEY, .doit = nl80211_set_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_KEY, .doit = nl80211_new_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_KEY, .doit = nl80211_del_key, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_addset_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_addset_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_BEACON, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .doit = nl80211_del_beacon, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_STATION, .doit = nl80211_get_station, .dumpit = nl80211_dump_station, .policy = nl80211_policy, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_STATION, .doit = nl80211_set_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_STATION, .doit = nl80211_new_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_STATION, .doit = nl80211_del_station, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_MPATH, .doit = nl80211_get_mpath, .dumpit = nl80211_dump_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MPATH, .doit = nl80211_set_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_NEW_MPATH, .doit = nl80211_new_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_MPATH, .doit = nl80211_del_mpath, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_BSS, .doit = nl80211_set_bss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_REG, .doit = nl80211_get_reg, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ }, { .cmd = NL80211_CMD_SET_REG, .doit = nl80211_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_REQ_SET_REG, .doit = nl80211_req_set_reg, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, }, { .cmd = NL80211_CMD_GET_MESH_CONFIG, .doit = nl80211_get_mesh_config, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_MESH_CONFIG, .doit = nl80211_update_mesh_config, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_TRIGGER_SCAN, .doit = nl80211_trigger_scan, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_SCAN, .policy = nl80211_policy, .dumpit = nl80211_dump_scan, }, { .cmd = NL80211_CMD_START_SCHED_SCAN, .doit = nl80211_start_sched_scan, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_STOP_SCHED_SCAN, .doit = nl80211_stop_sched_scan, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_AUTHENTICATE, .doit = nl80211_authenticate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_ASSOCIATE, .doit = nl80211_associate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEAUTHENTICATE, .doit = nl80211_deauthenticate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DISASSOCIATE, .doit = nl80211_disassociate, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_JOIN_IBSS, .doit = nl80211_join_ibss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_LEAVE_IBSS, .doit = nl80211_leave_ibss, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, #ifdef CONFIG_NL80211_TESTMODE { .cmd = NL80211_CMD_TESTMODE, .doit = nl80211_testmode_do, .dumpit = nl80211_testmode_dump, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, #endif { .cmd = NL80211_CMD_CONNECT, .doit = nl80211_connect, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DISCONNECT, .doit = nl80211_disconnect, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WIPHY_NETNS, .doit = nl80211_wiphy_netns, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_SURVEY, .policy = nl80211_policy, .dumpit = nl80211_dump_survey, }, { .cmd = NL80211_CMD_SET_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_DEL_PMKSA, .doit = nl80211_setdel_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FLUSH_PMKSA, .doit = nl80211_flush_pmksa, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REMAIN_ON_CHANNEL, .doit = nl80211_remain_on_channel, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, .doit = nl80211_cancel_remain_on_channel, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_TX_BITRATE_MASK, .doit = nl80211_set_tx_bitrate_mask, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_REGISTER_FRAME, .doit = nl80211_register_mgmt, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FRAME, .doit = nl80211_tx_mgmt, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_FRAME_WAIT_CANCEL, .doit = nl80211_tx_mgmt_cancel_wait, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_POWER_SAVE, .doit = nl80211_set_power_save, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_POWER_SAVE, .doit = nl80211_get_power_save, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_CQM, .doit = nl80211_set_cqm, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_CHANNEL, .doit = nl80211_set_channel, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WDS_PEER, .doit = nl80211_set_wds_peer, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_JOIN_MESH, .doit = nl80211_join_mesh, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_LEAVE_MESH, .doit = nl80211_leave_mesh, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_GET_WOWLAN, .doit = nl80211_get_wowlan, .policy = nl80211_policy, /* can be retrieved by unprivileged users */ .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_WOWLAN, .doit = nl80211_set_wowlan, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_WIPHY | NL80211_FLAG_NEED_RTNL, }, { .cmd = NL80211_CMD_SET_REKEY_OFFLOAD, .doit = nl80211_set_rekey_data, .policy = nl80211_policy, .flags = GENL_ADMIN_PERM, .internal_flags = NL80211_FLAG_NEED_NETDEV_UP | NL80211_FLAG_NEED_RTNL, }, }; static struct genl_multicast_group nl80211_mlme_mcgrp = { .name = "mlme", }; /* multicast groups */ static struct genl_multicast_group nl80211_config_mcgrp = { .name = "config", }; static struct genl_multicast_group nl80211_scan_mcgrp = { .name = "scan", }; static struct genl_multicast_group nl80211_regulatory_mcgrp = { .name = "regulatory", }; /* notification functions */ void nl80211_notify_dev_rename(struct cfg80211_registered_device *rdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_wiphy(msg, 0, 0, 0, rdev) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_config_mcgrp.id, GFP_KERNEL); } static int nl80211_add_scan_req(struct sk_buff *msg, struct cfg80211_registered_device *rdev) { struct cfg80211_scan_request *req = rdev->scan_req; struct nlattr *nest; int i; ASSERT_RDEV_LOCK(rdev); if (WARN_ON(!req)) return 0; nest = nla_nest_start(msg, NL80211_ATTR_SCAN_SSIDS); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_ssids; i++) NLA_PUT(msg, i, req->ssids[i].ssid_len, req->ssids[i].ssid); nla_nest_end(msg, nest); nest = nla_nest_start(msg, NL80211_ATTR_SCAN_FREQUENCIES); if (!nest) goto nla_put_failure; for (i = 0; i < req->n_channels; i++) NLA_PUT_U32(msg, i, req->channels[i]->center_freq); nla_nest_end(msg, nest); if (req->ie) NLA_PUT(msg, NL80211_ATTR_IE, req->ie_len, req->ie); NLA_PUT_U8(msg, NL80211_ATTR_SCAN_FLAGS, req->flags); return 0; nla_put_failure: return -ENOBUFS; } static int nl80211_send_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 pid, u32 seq, int flags, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); /* ignore errors and send incomplete event anyway */ nl80211_add_scan_req(msg, rdev); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } static int nl80211_send_sched_scan_msg(struct sk_buff *msg, struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 pid, u32 seq, int flags, u32 cmd) { void *hdr; hdr = nl80211hdr_put(msg, pid, seq, flags, cmd); if (!hdr) return -1; NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); return genlmsg_end(msg, hdr); nla_put_failure: genlmsg_cancel(msg, hdr); return -EMSGSIZE; } void nl80211_send_scan_start(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_TRIGGER_SCAN) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_scan_done(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_NEW_SCAN_RESULTS) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_scan_aborted(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_SCAN_ABORTED) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_sched_scan_results(struct cfg80211_registered_device *rdev, struct net_device *netdev) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, NL80211_CMD_SCHED_SCAN_RESULTS) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } void nl80211_send_sched_scan(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 cmd) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return; if (nl80211_send_sched_scan_msg(msg, rdev, netdev, 0, 0, 0, cmd) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_scan_mcgrp.id, GFP_KERNEL); } /* * This can happen on global regulatory changes or device specific settings * based on custom world regulatory domains. */ void nl80211_send_reg_change_event(struct regulatory_request *request) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_CHANGE); if (!hdr) { nlmsg_free(msg); return; } /* Userspace can always count this one always being set */ NLA_PUT_U8(msg, NL80211_ATTR_REG_INITIATOR, request->initiator); if (request->alpha2[0] == '0' && request->alpha2[1] == '0') NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_WORLD); else if (request->alpha2[0] == '9' && request->alpha2[1] == '9') NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_CUSTOM_WORLD); else if ((request->alpha2[0] == '9' && request->alpha2[1] == '8') || request->intersect) NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_INTERSECTION); else { NLA_PUT_U8(msg, NL80211_ATTR_REG_TYPE, NL80211_REGDOM_TYPE_COUNTRY); NLA_PUT_STRING(msg, NL80211_ATTR_REG_ALPHA2, request->alpha2); } if (wiphy_idx_valid(request->wiphy_idx)) NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, request->wiphy_idx); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } static void nl80211_send_mlme_event(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, enum nl80211_commands cmd, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_rx_auth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_AUTHENTICATE, gfp); } void nl80211_send_rx_assoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_ASSOCIATE, gfp); } void nl80211_send_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DEAUTHENTICATE, gfp); } void nl80211_send_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_DISASSOCIATE, gfp); } void nl80211_send_unprot_deauth(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_UNPROT_DEAUTHENTICATE, gfp); } void nl80211_send_unprot_disassoc(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *buf, size_t len, gfp_t gfp) { nl80211_send_mlme_event(rdev, netdev, buf, len, NL80211_CMD_UNPROT_DISASSOCIATE, gfp); } static void nl80211_send_mlme_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, int cmd, const u8 *addr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT_FLAG(msg, NL80211_ATTR_TIMED_OUT); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_auth_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_AUTHENTICATE, addr, gfp); } void nl80211_send_assoc_timeout(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, gfp_t gfp) { nl80211_send_mlme_timeout(rdev, netdev, NL80211_CMD_ASSOCIATE, addr, gfp); } void nl80211_send_connect_result(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, u16 status, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_CONNECT); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (bssid) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); NLA_PUT_U16(msg, NL80211_ATTR_STATUS_CODE, status); if (req_ie) NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_roamed(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *req_ie, size_t req_ie_len, const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_ROAM); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); if (req_ie) NLA_PUT(msg, NL80211_ATTR_REQ_IE, req_ie_len, req_ie); if (resp_ie) NLA_PUT(msg, NL80211_ATTR_RESP_IE, resp_ie_len, resp_ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_disconnected(struct cfg80211_registered_device *rdev, struct net_device *netdev, u16 reason, const u8 *ie, size_t ie_len, bool from_ap) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, GFP_KERNEL); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DISCONNECT); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (from_ap && reason) NLA_PUT_U16(msg, NL80211_ATTR_REASON_CODE, reason); if (from_ap) NLA_PUT_FLAG(msg, NL80211_ATTR_DISCONNECTED_BY_AP); if (ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len, ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, GFP_KERNEL); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_ibss_bssid(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_JOIN_IBSS); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_new_peer_candidate(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *macaddr, const u8* ie, u8 ie_len, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NEW_PEER_CANDIDATE); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, macaddr); if (ie_len && ie) NLA_PUT(msg, NL80211_ATTR_IE, ie_len , ie); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_michael_mic_failure(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *addr, enum nl80211_key_type key_type, int key_id, const u8 *tsc, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_MICHAEL_MIC_FAILURE); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); if (addr) NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, addr); NLA_PUT_U32(msg, NL80211_ATTR_KEY_TYPE, key_type); if (key_id != -1) NLA_PUT_U8(msg, NL80211_ATTR_KEY_IDX, key_id); if (tsc) NLA_PUT(msg, NL80211_ATTR_KEY_SEQ, 6, tsc); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_beacon_hint_event(struct wiphy *wiphy, struct ieee80211_channel *channel_before, struct ieee80211_channel *channel_after) { struct sk_buff *msg; void *hdr; struct nlattr *nl_freq; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_ATOMIC); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_REG_BEACON_HINT); if (!hdr) { nlmsg_free(msg); return; } /* * Since we are applying the beacon hint to a wiphy we know its * wiphy_idx is valid */ NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, get_wiphy_idx(wiphy)); /* Before */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_BEFORE); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, channel_before)) goto nla_put_failure; nla_nest_end(msg, nl_freq); /* After */ nl_freq = nla_nest_start(msg, NL80211_ATTR_FREQ_AFTER); if (!nl_freq) goto nla_put_failure; if (nl80211_msg_put_channel(msg, channel_after)) goto nla_put_failure; nla_nest_end(msg, nl_freq); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } rcu_read_lock(); genlmsg_multicast_allns(msg, 0, nl80211_regulatory_mcgrp.id, GFP_ATOMIC); rcu_read_unlock(); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } static void nl80211_send_remain_on_chan_event( int cmd, struct cfg80211_registered_device *rdev, struct net_device *netdev, u64 cookie, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, cmd); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, chan->center_freq); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_CHANNEL_TYPE, channel_type); NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); if (cmd == NL80211_CMD_REMAIN_ON_CHANNEL) NLA_PUT_U32(msg, NL80211_ATTR_DURATION, duration); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_remain_on_channel(struct cfg80211_registered_device *rdev, struct net_device *netdev, u64 cookie, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, unsigned int duration, gfp_t gfp) { nl80211_send_remain_on_chan_event(NL80211_CMD_REMAIN_ON_CHANNEL, rdev, netdev, cookie, chan, channel_type, duration, gfp); } void nl80211_send_remain_on_channel_cancel( struct cfg80211_registered_device *rdev, struct net_device *netdev, u64 cookie, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type, gfp_t gfp) { nl80211_send_remain_on_chan_event(NL80211_CMD_CANCEL_REMAIN_ON_CHANNEL, rdev, netdev, cookie, chan, channel_type, 0, gfp); } void nl80211_send_sta_event(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac_addr, struct station_info *sinfo, gfp_t gfp) { struct sk_buff *msg; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; if (nl80211_send_station(msg, 0, 0, 0, dev, mac_addr, sinfo) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); } void nl80211_send_sta_del_event(struct cfg80211_registered_device *rdev, struct net_device *dev, const u8 *mac_addr, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_DEL_STATION); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, dev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, mac_addr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } int nl80211_send_mgmt(struct cfg80211_registered_device *rdev, struct net_device *netdev, u32 nlpid, int freq, const u8 *buf, size_t len, gfp_t gfp) { struct sk_buff *msg; void *hdr; int err; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return -ENOMEM; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME); if (!hdr) { nlmsg_free(msg); return -ENOMEM; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT_U32(msg, NL80211_ATTR_WIPHY_FREQ, freq); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); err = genlmsg_end(msg, hdr); if (err < 0) { nlmsg_free(msg); return err; } err = genlmsg_unicast(wiphy_net(&rdev->wiphy), msg, nlpid); if (err < 0) return err; return 0; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); return -ENOBUFS; } void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, struct net_device *netdev, u64 cookie, const u8 *buf, size_t len, bool ack, gfp_t gfp) { struct sk_buff *msg; void *hdr; msg = nlmsg_new(NLMSG_DEFAULT_SIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_FRAME_TX_STATUS); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_FRAME, len, buf); NLA_PUT_U64(msg, NL80211_ATTR_COOKIE, cookie); if (ack) NLA_PUT_FLAG(msg, NL80211_ATTR_ACK); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_cqm_rssi_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, enum nl80211_cqm_rssi_threshold_event rssi_event, gfp_t gfp) { struct sk_buff *msg; struct nlattr *pinfoattr; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_CQM_RSSI_THRESHOLD_EVENT, rssi_event); nla_nest_end(msg, pinfoattr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_gtk_rekey_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *bssid, const u8 *replay_ctr, gfp_t gfp) { struct sk_buff *msg; struct nlattr *rekey_attr; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_SET_REKEY_OFFLOAD); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, bssid); rekey_attr = nla_nest_start(msg, NL80211_ATTR_REKEY_DATA); if (!rekey_attr) goto nla_put_failure; NLA_PUT(msg, NL80211_REKEY_DATA_REPLAY_CTR, NL80211_REPLAY_CTR_LEN, replay_ctr); nla_nest_end(msg, rekey_attr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } void nl80211_send_cqm_pktloss_notify(struct cfg80211_registered_device *rdev, struct net_device *netdev, const u8 *peer, u32 num_packets, gfp_t gfp) { struct sk_buff *msg; struct nlattr *pinfoattr; void *hdr; msg = nlmsg_new(NLMSG_GOODSIZE, gfp); if (!msg) return; hdr = nl80211hdr_put(msg, 0, 0, 0, NL80211_CMD_NOTIFY_CQM); if (!hdr) { nlmsg_free(msg); return; } NLA_PUT_U32(msg, NL80211_ATTR_WIPHY, rdev->wiphy_idx); NLA_PUT_U32(msg, NL80211_ATTR_IFINDEX, netdev->ifindex); NLA_PUT(msg, NL80211_ATTR_MAC, ETH_ALEN, peer); pinfoattr = nla_nest_start(msg, NL80211_ATTR_CQM); if (!pinfoattr) goto nla_put_failure; NLA_PUT_U32(msg, NL80211_ATTR_CQM_PKT_LOSS_EVENT, num_packets); nla_nest_end(msg, pinfoattr); if (genlmsg_end(msg, hdr) < 0) { nlmsg_free(msg); return; } genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: genlmsg_cancel(msg, hdr); nlmsg_free(msg); } static int nl80211_netlink_notify(struct notifier_block * nb, unsigned long state, void *_notify) { struct netlink_notify *notify = _notify; struct cfg80211_registered_device *rdev; struct wireless_dev *wdev; if (state != NETLINK_URELEASE) return NOTIFY_DONE; rcu_read_lock(); list_for_each_entry_rcu(rdev, &cfg80211_rdev_list, list) list_for_each_entry_rcu(wdev, &rdev->netdev_list, list) cfg80211_mlme_unregister_socket(wdev, notify->pid); rcu_read_unlock(); return NOTIFY_DONE; } static struct notifier_block nl80211_netlink_notifier = { .notifier_call = nl80211_netlink_notify, }; /* initialisation/exit functions */ int nl80211_init(void) { int err; err = genl_register_family_with_ops(&nl80211_fam, nl80211_ops, ARRAY_SIZE(nl80211_ops)); if (err) return err; err = genl_register_mc_group(&nl80211_fam, &nl80211_config_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_scan_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_regulatory_mcgrp); if (err) goto err_out; err = genl_register_mc_group(&nl80211_fam, &nl80211_mlme_mcgrp); if (err) goto err_out; #ifdef CONFIG_NL80211_TESTMODE err = genl_register_mc_group(&nl80211_fam, &nl80211_testmode_mcgrp); if (err) goto err_out; #endif err = netlink_register_notifier(&nl80211_netlink_notifier); if (err) goto err_out; return 0; err_out: genl_unregister_family(&nl80211_fam); return err; } void nl80211_exit(void) { netlink_unregister_notifier(&nl80211_netlink_notifier); genl_unregister_family(&nl80211_fam); }
gpl-2.0
hyunokoh/s4_qemu
roms/u-boot/board/bf537-stamp/bf537-stamp.c
27
1682
/* * U-boot - main board file * * Copyright (c) 2005-2008 Analog Devices Inc. * * (C) Copyright 2000-2004 * Wolfgang Denk, DENX Software Engineering, wd@denx.de. * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <config.h> #include <command.h> #include <asm/blackfin.h> #include <net.h> #include <asm/mach-common/bits/bootrom.h> #include <netdev.h> DECLARE_GLOBAL_DATA_PTR; int checkboard(void) { printf("Board: ADI BF537 stamp board\n"); printf(" Support: http://blackfin.uclinux.org/\n"); return 0; } #ifdef CONFIG_BFIN_MAC static void board_init_enetaddr(uchar *mac_addr) { #ifdef CONFIG_SYS_NO_FLASH # define USE_MAC_IN_FLASH 0 #else # define USE_MAC_IN_FLASH 1 #endif bool valid_mac = false; if (USE_MAC_IN_FLASH) { /* we cram the MAC in the last flash sector */ uchar *board_mac_addr = (uchar *)0x203F0000; if (is_valid_ether_addr(board_mac_addr)) { memcpy(mac_addr, board_mac_addr, 6); valid_mac = true; } } if (!valid_mac) { puts("Warning: Generating 'random' MAC address\n"); eth_random_addr(mac_addr); } eth_setenv_enetaddr("ethaddr", mac_addr); } int board_eth_init(bd_t *bis) { return bfin_EMAC_initialize(bis); } #endif /* miscellaneous platform dependent initialisations */ int misc_init_r(void) { #ifdef CONFIG_BFIN_MAC uchar enetaddr[6]; if (!eth_getenv_enetaddr("ethaddr", enetaddr)) board_init_enetaddr(enetaddr); #endif #ifndef CONFIG_SYS_NO_FLASH /* we use the last sector for the MAC address / POST LDR */ extern flash_info_t flash_info[]; flash_protect(FLAG_PROTECT_SET, 0x203F0000, 0x203FFFFF, &flash_info[0]); #endif #ifdef CONFIG_BFIN_IDE cf_ide_init(); #endif return 0; }
gpl-2.0
nitinkamble/x32-glibc
sysdeps/ieee754/ldbl-96/s_tanl.c
27
2091
/* s_tanl.c -- long double version of s_tan.c. * Conversion to long double by Ulrich Drepper, * Cygnus Support, drepper@cygnus.com. */ /* * ==================================================== * Copyright (C) 1993 by Sun Microsystems, Inc. All rights reserved. * * Developed at SunPro, a Sun Microsystems, Inc. business. * Permission to use, copy, modify, and distribute this * software is freely granted, provided that this notice * is preserved. * ==================================================== */ #if defined(LIBM_SCCS) && !defined(lint) static char rcsid[] = "$NetBSD: $"; #endif /* tanl(x) * Return tangent function of x. * * kernel function: * __kernel_tanl ... tangent function on [-pi/4,pi/4] * __ieee754_rem_pio2l ... argument reduction routine * * Method. * Let S,C and T denote the sin, cos and tan respectively on * [-PI/4, +PI/4]. Reduce the argument x to y1+y2 = x-k*pi/2 * in [-pi/4 , +pi/4], and let n = k mod 4. * We have * * n sin(x) cos(x) tan(x) * ---------------------------------------------------------- * 0 S C T * 1 C -S -1/T * 2 -S -C T * 3 -C S -1/T * ---------------------------------------------------------- * * Special cases: * Let trig be any of sin, cos, or tan. * trig(+-INF) is NaN, with signals; * trig(NaN) is that NaN; * * Accuracy: * TRIG(x) returns trig(x) nearly rounded */ #include "math.h" #include "math_private.h" #ifdef __STDC__ long double __tanl(long double x) #else long double __tanl(x) long double x; #endif { long double y[2],z=0.0; int32_t n, se; /* High word of x. */ GET_LDOUBLE_EXP(se,x); /* |x| ~< pi/4 */ se &= 0x7fff; if(se <= 0x3ffe) return __kernel_tanl(x,z,1); /* tan(Inf or NaN) is NaN */ else if (se==0x7fff) return x-x; /* NaN */ /* argument reduction needed */ else { n = __ieee754_rem_pio2l(x,y); return __kernel_tanl(y[0],y[1],1-((n&1)<<1)); /* 1 -- n even -1 -- n odd */ } } weak_alias (__tanl, tanl)
gpl-2.0
xsacha/SymbianGCC
libgfortran/generated/pow_r10_i16.c
27
1961
/* Support routines for the intrinsic power (**) operator. Copyright (C) 2004-2013 Free Software Foundation, Inc. Contributed by Paul Brook This file is part of the GNU Fortran 95 runtime library (libgfortran). Libgfortran is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. Libgfortran is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Under Section 7 of GPL version 3, you are granted additional permissions described in the GCC Runtime Library Exception, version 3.1, as published by the Free Software Foundation. You should have received a copy of the GNU General Public License and a copy of the GCC Runtime Library Exception along with this program; see the files COPYING3 and COPYING.RUNTIME respectively. If not, see <http://www.gnu.org/licenses/>. */ #include "libgfortran.h" /* Use Binary Method to calculate the powi. This is not an optimal but a simple and reasonable arithmetic. See section 4.6.3, "Evaluation of Powers" of Donald E. Knuth, "Seminumerical Algorithms", Vol. 2, "The Art of Computer Programming", 3rd Edition, 1998. */ #if defined (HAVE_GFC_REAL_10) && defined (HAVE_GFC_INTEGER_16) GFC_REAL_10 pow_r10_i16 (GFC_REAL_10 a, GFC_INTEGER_16 b); export_proto(pow_r10_i16); GFC_REAL_10 pow_r10_i16 (GFC_REAL_10 a, GFC_INTEGER_16 b) { GFC_REAL_10 pow, x; GFC_INTEGER_16 n; GFC_UINTEGER_16 u; n = b; x = a; pow = 1; if (n != 0) { if (n < 0) { u = -n; x = pow / x; } else { u = n; } for (;;) { if (u & 1) pow *= x; u >>= 1; if (u) x *= x; else break; } } return pow; } #endif
gpl-2.0
saeedhadi/ilum9260
drivers/usb/host/uhci-q.c
27
46464
/* * Universal Host Controller Interface driver for USB. * * Maintainer: Alan Stern <stern@rowland.harvard.edu> * * (C) Copyright 1999 Linus Torvalds * (C) Copyright 1999-2002 Johannes Erdfelt, johannes@erdfelt.com * (C) Copyright 1999 Randy Dunlap * (C) Copyright 1999 Georg Acher, acher@in.tum.de * (C) Copyright 1999 Deti Fliegl, deti@fliegl.de * (C) Copyright 1999 Thomas Sailer, sailer@ife.ee.ethz.ch * (C) Copyright 1999 Roman Weissgaerber, weissg@vienna.at * (C) Copyright 2000 Yggdrasil Computing, Inc. (port of new PCI interface * support from usb-ohci.c by Adam Richter, adam@yggdrasil.com). * (C) Copyright 1999 Gregory P. Smith (from usb-ohci.c) * (C) Copyright 2004-2007 Alan Stern, stern@rowland.harvard.edu */ /* * Technically, updating td->status here is a race, but it's not really a * problem. The worst that can happen is that we set the IOC bit again * generating a spurious interrupt. We could fix this by creating another * QH and leaving the IOC bit always set, but then we would have to play * games with the FSBR code to make sure we get the correct order in all * the cases. I don't think it's worth the effort */ static void uhci_set_next_interrupt(struct uhci_hcd *uhci) { if (uhci->is_stopped) mod_timer(&uhci_to_hcd(uhci)->rh_timer, jiffies); uhci->term_td->status |= cpu_to_le32(TD_CTRL_IOC); } static inline void uhci_clear_next_interrupt(struct uhci_hcd *uhci) { uhci->term_td->status &= ~cpu_to_le32(TD_CTRL_IOC); } /* * Full-Speed Bandwidth Reclamation (FSBR). * We turn on FSBR whenever a queue that wants it is advancing, * and leave it on for a short time thereafter. */ static void uhci_fsbr_on(struct uhci_hcd *uhci) { struct uhci_qh *lqh; /* The terminating skeleton QH always points back to the first * FSBR QH. Make the last async QH point to the terminating * skeleton QH. */ uhci->fsbr_is_on = 1; lqh = list_entry(uhci->skel_async_qh->node.prev, struct uhci_qh, node); lqh->link = LINK_TO_QH(uhci->skel_term_qh); } static void uhci_fsbr_off(struct uhci_hcd *uhci) { struct uhci_qh *lqh; /* Remove the link from the last async QH to the terminating * skeleton QH. */ uhci->fsbr_is_on = 0; lqh = list_entry(uhci->skel_async_qh->node.prev, struct uhci_qh, node); lqh->link = UHCI_PTR_TERM; } static void uhci_add_fsbr(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp = urb->hcpriv; if (!(urb->transfer_flags & URB_NO_FSBR)) urbp->fsbr = 1; } static void uhci_urbp_wants_fsbr(struct uhci_hcd *uhci, struct urb_priv *urbp) { if (urbp->fsbr) { uhci->fsbr_is_wanted = 1; if (!uhci->fsbr_is_on) uhci_fsbr_on(uhci); else if (uhci->fsbr_expiring) { uhci->fsbr_expiring = 0; del_timer(&uhci->fsbr_timer); } } } static void uhci_fsbr_timeout(unsigned long _uhci) { struct uhci_hcd *uhci = (struct uhci_hcd *) _uhci; unsigned long flags; spin_lock_irqsave(&uhci->lock, flags); if (uhci->fsbr_expiring) { uhci->fsbr_expiring = 0; uhci_fsbr_off(uhci); } spin_unlock_irqrestore(&uhci->lock, flags); } static struct uhci_td *uhci_alloc_td(struct uhci_hcd *uhci) { dma_addr_t dma_handle; struct uhci_td *td; td = dma_pool_alloc(uhci->td_pool, GFP_ATOMIC, &dma_handle); if (!td) return NULL; td->dma_handle = dma_handle; td->frame = -1; INIT_LIST_HEAD(&td->list); INIT_LIST_HEAD(&td->fl_list); return td; } static void uhci_free_td(struct uhci_hcd *uhci, struct uhci_td *td) { if (!list_empty(&td->list)) dev_WARN(uhci_dev(uhci), "td %p still in list!\n", td); if (!list_empty(&td->fl_list)) dev_WARN(uhci_dev(uhci), "td %p still in fl_list!\n", td); dma_pool_free(uhci->td_pool, td, td->dma_handle); } static inline void uhci_fill_td(struct uhci_td *td, u32 status, u32 token, u32 buffer) { td->status = cpu_to_le32(status); td->token = cpu_to_le32(token); td->buffer = cpu_to_le32(buffer); } static void uhci_add_td_to_urbp(struct uhci_td *td, struct urb_priv *urbp) { list_add_tail(&td->list, &urbp->td_list); } static void uhci_remove_td_from_urbp(struct uhci_td *td) { list_del_init(&td->list); } /* * We insert Isochronous URBs directly into the frame list at the beginning */ static inline void uhci_insert_td_in_frame_list(struct uhci_hcd *uhci, struct uhci_td *td, unsigned framenum) { framenum &= (UHCI_NUMFRAMES - 1); td->frame = framenum; /* Is there a TD already mapped there? */ if (uhci->frame_cpu[framenum]) { struct uhci_td *ftd, *ltd; ftd = uhci->frame_cpu[framenum]; ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); list_add_tail(&td->fl_list, &ftd->fl_list); td->link = ltd->link; wmb(); ltd->link = LINK_TO_TD(td); } else { td->link = uhci->frame[framenum]; wmb(); uhci->frame[framenum] = LINK_TO_TD(td); uhci->frame_cpu[framenum] = td; } } static inline void uhci_remove_td_from_frame_list(struct uhci_hcd *uhci, struct uhci_td *td) { /* If it's not inserted, don't remove it */ if (td->frame == -1) { WARN_ON(!list_empty(&td->fl_list)); return; } if (uhci->frame_cpu[td->frame] == td) { if (list_empty(&td->fl_list)) { uhci->frame[td->frame] = td->link; uhci->frame_cpu[td->frame] = NULL; } else { struct uhci_td *ntd; ntd = list_entry(td->fl_list.next, struct uhci_td, fl_list); uhci->frame[td->frame] = LINK_TO_TD(ntd); uhci->frame_cpu[td->frame] = ntd; } } else { struct uhci_td *ptd; ptd = list_entry(td->fl_list.prev, struct uhci_td, fl_list); ptd->link = td->link; } list_del_init(&td->fl_list); td->frame = -1; } static inline void uhci_remove_tds_from_frame(struct uhci_hcd *uhci, unsigned int framenum) { struct uhci_td *ftd, *ltd; framenum &= (UHCI_NUMFRAMES - 1); ftd = uhci->frame_cpu[framenum]; if (ftd) { ltd = list_entry(ftd->fl_list.prev, struct uhci_td, fl_list); uhci->frame[framenum] = ltd->link; uhci->frame_cpu[framenum] = NULL; while (!list_empty(&ftd->fl_list)) list_del_init(ftd->fl_list.prev); } } /* * Remove all the TDs for an Isochronous URB from the frame list */ static void uhci_unlink_isochronous_tds(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; struct uhci_td *td; list_for_each_entry(td, &urbp->td_list, list) uhci_remove_td_from_frame_list(uhci, td); } static struct uhci_qh *uhci_alloc_qh(struct uhci_hcd *uhci, struct usb_device *udev, struct usb_host_endpoint *hep) { dma_addr_t dma_handle; struct uhci_qh *qh; qh = dma_pool_alloc(uhci->qh_pool, GFP_ATOMIC, &dma_handle); if (!qh) return NULL; memset(qh, 0, sizeof(*qh)); qh->dma_handle = dma_handle; qh->element = UHCI_PTR_TERM; qh->link = UHCI_PTR_TERM; INIT_LIST_HEAD(&qh->queue); INIT_LIST_HEAD(&qh->node); if (udev) { /* Normal QH */ qh->type = hep->desc.bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; if (qh->type != USB_ENDPOINT_XFER_ISOC) { qh->dummy_td = uhci_alloc_td(uhci); if (!qh->dummy_td) { dma_pool_free(uhci->qh_pool, qh, dma_handle); return NULL; } } qh->state = QH_STATE_IDLE; qh->hep = hep; qh->udev = udev; hep->hcpriv = qh; if (qh->type == USB_ENDPOINT_XFER_INT || qh->type == USB_ENDPOINT_XFER_ISOC) qh->load = usb_calc_bus_time(udev->speed, usb_endpoint_dir_in(&hep->desc), qh->type == USB_ENDPOINT_XFER_ISOC, le16_to_cpu(hep->desc.wMaxPacketSize)) / 1000 + 1; } else { /* Skeleton QH */ qh->state = QH_STATE_ACTIVE; qh->type = -1; } return qh; } static void uhci_free_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { WARN_ON(qh->state != QH_STATE_IDLE && qh->udev); if (!list_empty(&qh->queue)) dev_WARN(uhci_dev(uhci), "qh %p list not empty!\n", qh); list_del(&qh->node); if (qh->udev) { qh->hep->hcpriv = NULL; if (qh->dummy_td) uhci_free_td(uhci, qh->dummy_td); } dma_pool_free(uhci->qh_pool, qh, qh->dma_handle); } /* * When a queue is stopped and a dequeued URB is given back, adjust * the previous TD link (if the URB isn't first on the queue) or * save its toggle value (if it is first and is currently executing). * * Returns 0 if the URB should not yet be given back, 1 otherwise. */ static int uhci_cleanup_queue(struct uhci_hcd *uhci, struct uhci_qh *qh, struct urb *urb) { struct urb_priv *urbp = urb->hcpriv; struct uhci_td *td; int ret = 1; /* Isochronous pipes don't use toggles and their TD link pointers * get adjusted during uhci_urb_dequeue(). But since their queues * cannot truly be stopped, we have to watch out for dequeues * occurring after the nominal unlink frame. */ if (qh->type == USB_ENDPOINT_XFER_ISOC) { ret = (uhci->frame_number + uhci->is_stopped != qh->unlink_frame); goto done; } /* If the URB isn't first on its queue, adjust the link pointer * of the last TD in the previous URB. The toggle doesn't need * to be saved since this URB can't be executing yet. */ if (qh->queue.next != &urbp->node) { struct urb_priv *purbp; struct uhci_td *ptd; purbp = list_entry(urbp->node.prev, struct urb_priv, node); WARN_ON(list_empty(&purbp->td_list)); ptd = list_entry(purbp->td_list.prev, struct uhci_td, list); td = list_entry(urbp->td_list.prev, struct uhci_td, list); ptd->link = td->link; goto done; } /* If the QH element pointer is UHCI_PTR_TERM then then currently * executing URB has already been unlinked, so this one isn't it. */ if (qh_element(qh) == UHCI_PTR_TERM) goto done; qh->element = UHCI_PTR_TERM; /* Control pipes don't have to worry about toggles */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) goto done; /* Save the next toggle value */ WARN_ON(list_empty(&urbp->td_list)); td = list_entry(urbp->td_list.next, struct uhci_td, list); qh->needs_fixup = 1; qh->initial_toggle = uhci_toggle(td_token(td)); done: return ret; } /* * Fix up the data toggles for URBs in a queue, when one of them * terminates early (short transfer, error, or dequeued). */ static void uhci_fixup_toggles(struct uhci_qh *qh, int skip_first) { struct urb_priv *urbp = NULL; struct uhci_td *td; unsigned int toggle = qh->initial_toggle; unsigned int pipe; /* Fixups for a short transfer start with the second URB in the * queue (the short URB is the first). */ if (skip_first) urbp = list_entry(qh->queue.next, struct urb_priv, node); /* When starting with the first URB, if the QH element pointer is * still valid then we know the URB's toggles are okay. */ else if (qh_element(qh) != UHCI_PTR_TERM) toggle = 2; /* Fix up the toggle for the URBs in the queue. Normally this * loop won't run more than once: When an error or short transfer * occurs, the queue usually gets emptied. */ urbp = list_prepare_entry(urbp, &qh->queue, node); list_for_each_entry_continue(urbp, &qh->queue, node) { /* If the first TD has the right toggle value, we don't * need to change any toggles in this URB */ td = list_entry(urbp->td_list.next, struct uhci_td, list); if (toggle > 1 || uhci_toggle(td_token(td)) == toggle) { td = list_entry(urbp->td_list.prev, struct uhci_td, list); toggle = uhci_toggle(td_token(td)) ^ 1; /* Otherwise all the toggles in the URB have to be switched */ } else { list_for_each_entry(td, &urbp->td_list, list) { td->token ^= cpu_to_le32( TD_TOKEN_TOGGLE); toggle ^= 1; } } } wmb(); pipe = list_entry(qh->queue.next, struct urb_priv, node)->urb->pipe; usb_settoggle(qh->udev, usb_pipeendpoint(pipe), usb_pipeout(pipe), toggle); qh->needs_fixup = 0; } /* * Link an Isochronous QH into its skeleton's list */ static inline void link_iso(struct uhci_hcd *uhci, struct uhci_qh *qh) { list_add_tail(&qh->node, &uhci->skel_iso_qh->node); /* Isochronous QHs aren't linked by the hardware */ } /* * Link a high-period interrupt QH into the schedule at the end of its * skeleton's list */ static void link_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; list_add_tail(&qh->node, &uhci->skelqh[qh->skel]->node); pqh = list_entry(qh->node.prev, struct uhci_qh, node); qh->link = pqh->link; wmb(); pqh->link = LINK_TO_QH(qh); } /* * Link a period-1 interrupt or async QH into the schedule at the * correct spot in the async skeleton's list, and update the FSBR link */ static void link_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; __le32 link_to_new_qh; /* Find the predecessor QH for our new one and insert it in the list. * The list of QHs is expected to be short, so linear search won't * take too long. */ list_for_each_entry_reverse(pqh, &uhci->skel_async_qh->node, node) { if (pqh->skel <= qh->skel) break; } list_add(&qh->node, &pqh->node); /* Link it into the schedule */ qh->link = pqh->link; wmb(); link_to_new_qh = LINK_TO_QH(qh); pqh->link = link_to_new_qh; /* If this is now the first FSBR QH, link the terminating skeleton * QH to it. */ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) uhci->skel_term_qh->link = link_to_new_qh; } /* * Put a QH on the schedule in both hardware and software */ static void uhci_activate_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { WARN_ON(list_empty(&qh->queue)); /* Set the element pointer if it isn't set already. * This isn't needed for Isochronous queues, but it doesn't hurt. */ if (qh_element(qh) == UHCI_PTR_TERM) { struct urb_priv *urbp = list_entry(qh->queue.next, struct urb_priv, node); struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); qh->element = LINK_TO_TD(td); } /* Treat the queue as if it has just advanced */ qh->wait_expired = 0; qh->advance_jiffies = jiffies; if (qh->state == QH_STATE_ACTIVE) return; qh->state = QH_STATE_ACTIVE; /* Move the QH from its old list to the correct spot in the appropriate * skeleton's list */ if (qh == uhci->next_qh) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); list_del(&qh->node); if (qh->skel == SKEL_ISO) link_iso(uhci, qh); else if (qh->skel < SKEL_ASYNC) link_interrupt(uhci, qh); else link_async(uhci, qh); } /* * Unlink a high-period interrupt QH from the schedule */ static void unlink_interrupt(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; pqh = list_entry(qh->node.prev, struct uhci_qh, node); pqh->link = qh->link; mb(); } /* * Unlink a period-1 interrupt or async QH from the schedule */ static void unlink_async(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct uhci_qh *pqh; __le32 link_to_next_qh = qh->link; pqh = list_entry(qh->node.prev, struct uhci_qh, node); pqh->link = link_to_next_qh; /* If this was the old first FSBR QH, link the terminating skeleton * QH to the next (new first FSBR) QH. */ if (pqh->skel < SKEL_FSBR && qh->skel >= SKEL_FSBR) uhci->skel_term_qh->link = link_to_next_qh; mb(); } /* * Take a QH off the hardware schedule */ static void uhci_unlink_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { if (qh->state == QH_STATE_UNLINKING) return; WARN_ON(qh->state != QH_STATE_ACTIVE || !qh->udev); qh->state = QH_STATE_UNLINKING; /* Unlink the QH from the schedule and record when we did it */ if (qh->skel == SKEL_ISO) ; else if (qh->skel < SKEL_ASYNC) unlink_interrupt(uhci, qh); else unlink_async(uhci, qh); uhci_get_current_frame_number(uhci); qh->unlink_frame = uhci->frame_number; /* Force an interrupt so we know when the QH is fully unlinked */ if (list_empty(&uhci->skel_unlink_qh->node)) uhci_set_next_interrupt(uhci); /* Move the QH from its old list to the end of the unlinking list */ if (qh == uhci->next_qh) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); list_move_tail(&qh->node, &uhci->skel_unlink_qh->node); } /* * When we and the controller are through with a QH, it becomes IDLE. * This happens when a QH has been off the schedule (on the unlinking * list) for more than one frame, or when an error occurs while adding * the first URB onto a new QH. */ static void uhci_make_qh_idle(struct uhci_hcd *uhci, struct uhci_qh *qh) { WARN_ON(qh->state == QH_STATE_ACTIVE); if (qh == uhci->next_qh) uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); list_move(&qh->node, &uhci->idle_qh_list); qh->state = QH_STATE_IDLE; /* Now that the QH is idle, its post_td isn't being used */ if (qh->post_td) { uhci_free_td(uhci, qh->post_td); qh->post_td = NULL; } /* If anyone is waiting for a QH to become idle, wake them up */ if (uhci->num_waiting) wake_up_all(&uhci->waitqh); } /* * Find the highest existing bandwidth load for a given phase and period. */ static int uhci_highest_load(struct uhci_hcd *uhci, int phase, int period) { int highest_load = uhci->load[phase]; for (phase += period; phase < MAX_PHASE; phase += period) highest_load = max_t(int, highest_load, uhci->load[phase]); return highest_load; } /* * Set qh->phase to the optimal phase for a periodic transfer and * check whether the bandwidth requirement is acceptable. */ static int uhci_check_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) { int minimax_load; /* Find the optimal phase (unless it is already set) and get * its load value. */ if (qh->phase >= 0) minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); else { int phase, load; int max_phase = min_t(int, MAX_PHASE, qh->period); qh->phase = 0; minimax_load = uhci_highest_load(uhci, qh->phase, qh->period); for (phase = 1; phase < max_phase; ++phase) { load = uhci_highest_load(uhci, phase, qh->period); if (load < minimax_load) { minimax_load = load; qh->phase = phase; } } } /* Maximum allowable periodic bandwidth is 90%, or 900 us per frame */ if (minimax_load + qh->load > 900) { dev_dbg(uhci_dev(uhci), "bandwidth allocation failed: " "period %d, phase %d, %d + %d us\n", qh->period, qh->phase, minimax_load, qh->load); return -ENOSPC; } return 0; } /* * Reserve a periodic QH's bandwidth in the schedule */ static void uhci_reserve_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) { int i; int load = qh->load; char *p = "??"; for (i = qh->phase; i < MAX_PHASE; i += qh->period) { uhci->load[i] += load; uhci->total_load += load; } uhci_to_hcd(uhci)->self.bandwidth_allocated = uhci->total_load / MAX_PHASE; switch (qh->type) { case USB_ENDPOINT_XFER_INT: ++uhci_to_hcd(uhci)->self.bandwidth_int_reqs; p = "INT"; break; case USB_ENDPOINT_XFER_ISOC: ++uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; p = "ISO"; break; } qh->bandwidth_reserved = 1; dev_dbg(uhci_dev(uhci), "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", "reserve", qh->udev->devnum, qh->hep->desc.bEndpointAddress, p, qh->period, qh->phase, load); } /* * Release a periodic QH's bandwidth reservation */ static void uhci_release_bandwidth(struct uhci_hcd *uhci, struct uhci_qh *qh) { int i; int load = qh->load; char *p = "??"; for (i = qh->phase; i < MAX_PHASE; i += qh->period) { uhci->load[i] -= load; uhci->total_load -= load; } uhci_to_hcd(uhci)->self.bandwidth_allocated = uhci->total_load / MAX_PHASE; switch (qh->type) { case USB_ENDPOINT_XFER_INT: --uhci_to_hcd(uhci)->self.bandwidth_int_reqs; p = "INT"; break; case USB_ENDPOINT_XFER_ISOC: --uhci_to_hcd(uhci)->self.bandwidth_isoc_reqs; p = "ISO"; break; } qh->bandwidth_reserved = 0; dev_dbg(uhci_dev(uhci), "%s dev %d ep%02x-%s, period %d, phase %d, %d us\n", "release", qh->udev->devnum, qh->hep->desc.bEndpointAddress, p, qh->period, qh->phase, load); } static inline struct urb_priv *uhci_alloc_urb_priv(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp; urbp = kmem_cache_zalloc(uhci_up_cachep, GFP_ATOMIC); if (!urbp) return NULL; urbp->urb = urb; urb->hcpriv = urbp; INIT_LIST_HEAD(&urbp->node); INIT_LIST_HEAD(&urbp->td_list); return urbp; } static void uhci_free_urb_priv(struct uhci_hcd *uhci, struct urb_priv *urbp) { struct uhci_td *td, *tmp; if (!list_empty(&urbp->node)) dev_WARN(uhci_dev(uhci), "urb %p still on QH's list!\n", urbp->urb); list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { uhci_remove_td_from_urbp(td); uhci_free_td(uhci, td); } kmem_cache_free(uhci_up_cachep, urbp); } /* * Map status to standard result codes * * <status> is (td_status(td) & 0xF60000), a.k.a. * uhci_status_bits(td_status(td)). * Note: <status> does not include the TD_CTRL_NAK bit. * <dir_out> is True for output TDs and False for input TDs. */ static int uhci_map_status(int status, int dir_out) { if (!status) return 0; if (status & TD_CTRL_BITSTUFF) /* Bitstuff error */ return -EPROTO; if (status & TD_CTRL_CRCTIMEO) { /* CRC/Timeout */ if (dir_out) return -EPROTO; else return -EILSEQ; } if (status & TD_CTRL_BABBLE) /* Babble */ return -EOVERFLOW; if (status & TD_CTRL_DBUFERR) /* Buffer error */ return -ENOSR; if (status & TD_CTRL_STALLED) /* Stalled */ return -EPIPE; return 0; } /* * Control transfers */ static int uhci_submit_control(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { struct uhci_td *td; unsigned long destination, status; int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); int len = urb->transfer_buffer_length; dma_addr_t data = urb->transfer_dma; __le32 *plink; struct urb_priv *urbp = urb->hcpriv; int skel; /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | USB_PID_SETUP; /* 3 errors, dummy TD remains inactive */ status = uhci_maxerr(3); if (urb->dev->speed == USB_SPEED_LOW) status |= TD_CTRL_LS; /* * Build the TD for the control request setup packet */ td = qh->dummy_td; uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status, destination | uhci_explen(8), urb->setup_dma); plink = &td->link; status |= TD_CTRL_ACTIVE; /* * If direction is "send", change the packet ID from SETUP (0x2D) * to OUT (0xE1). Else change it from SETUP to IN (0x69) and * set Short Packet Detect (SPD) for all data packets. * * 0-length transfers always get treated as "send". */ if (usb_pipeout(urb->pipe) || len == 0) destination ^= (USB_PID_SETUP ^ USB_PID_OUT); else { destination ^= (USB_PID_SETUP ^ USB_PID_IN); status |= TD_CTRL_SPD; } /* * Build the DATA TDs */ while (len > 0) { int pktsze = maxsze; if (len <= pktsze) { /* The last data packet */ pktsze = len; status &= ~TD_CTRL_SPD; } td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); /* Alternate Data0/1 (start with Data1) */ destination ^= TD_TOKEN_TOGGLE; uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status, destination | uhci_explen(pktsze), data); plink = &td->link; data += pktsze; len -= pktsze; } /* * Build the final TD for control status */ td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); /* Change direction for the status transaction */ destination ^= (USB_PID_IN ^ USB_PID_OUT); destination |= TD_TOKEN_TOGGLE; /* End in Data1 */ uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status | TD_CTRL_IOC, destination | uhci_explen(0), 0); plink = &td->link; /* * Build the new dummy TD and activate the old one */ td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); qh->dummy_td = td; /* Low-speed transfers get a different queue, and won't hog the bus. * Also, some devices enumerate better without FSBR; the easiest way * to do that is to put URBs on the low-speed queue while the device * isn't in the CONFIGURED state. */ if (urb->dev->speed == USB_SPEED_LOW || urb->dev->state != USB_STATE_CONFIGURED) skel = SKEL_LS_CONTROL; else { skel = SKEL_FS_CONTROL; uhci_add_fsbr(uhci, urb); } if (qh->state != QH_STATE_ACTIVE) qh->skel = skel; return 0; nomem: /* Remove the dummy TD from the td_list so it doesn't get freed */ uhci_remove_td_from_urbp(qh->dummy_td); return -ENOMEM; } /* * Common submit for bulk and interrupt */ static int uhci_submit_common(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { struct uhci_td *td; unsigned long destination, status; int maxsze = le16_to_cpu(qh->hep->desc.wMaxPacketSize); int len = urb->transfer_buffer_length; dma_addr_t data = urb->transfer_dma; __le32 *plink; struct urb_priv *urbp = urb->hcpriv; unsigned int toggle; if (len < 0) return -EINVAL; /* The "pipe" thing contains the destination in bits 8--18 */ destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); toggle = usb_gettoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe)); /* 3 errors, dummy TD remains inactive */ status = uhci_maxerr(3); if (urb->dev->speed == USB_SPEED_LOW) status |= TD_CTRL_LS; if (usb_pipein(urb->pipe)) status |= TD_CTRL_SPD; /* * Build the DATA TDs */ plink = NULL; td = qh->dummy_td; do { /* Allow zero length packets */ int pktsze = maxsze; if (len <= pktsze) { /* The last packet */ pktsze = len; if (!(urb->transfer_flags & URB_SHORT_NOT_OK)) status &= ~TD_CTRL_SPD; } if (plink) { td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); } uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status, destination | uhci_explen(pktsze) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); plink = &td->link; status |= TD_CTRL_ACTIVE; data += pktsze; len -= maxsze; toggle ^= 1; } while (len > 0); /* * URB_ZERO_PACKET means adding a 0-length packet, if direction * is OUT and the transfer_length was an exact multiple of maxsze, * hence (len = transfer_length - N * maxsze) == 0 * however, if transfer_length == 0, the zero packet was already * prepared above. */ if ((urb->transfer_flags & URB_ZERO_PACKET) && usb_pipeout(urb->pipe) && len == 0 && urb->transfer_buffer_length > 0) { td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status, destination | uhci_explen(0) | (toggle << TD_TOKEN_TOGGLE_SHIFT), data); plink = &td->link; toggle ^= 1; } /* Set the interrupt-on-completion flag on the last packet. * A more-or-less typical 4 KB URB (= size of one memory page) * will require about 3 ms to transfer; that's a little on the * fast side but not enough to justify delaying an interrupt * more than 2 or 3 URBs, so we will ignore the URB_NO_INTERRUPT * flag setting. */ td->status |= cpu_to_le32(TD_CTRL_IOC); /* * Build the new dummy TD and activate the old one */ td = uhci_alloc_td(uhci); if (!td) goto nomem; *plink = LINK_TO_TD(td); uhci_fill_td(td, 0, USB_PID_OUT | uhci_explen(0), 0); wmb(); qh->dummy_td->status |= cpu_to_le32(TD_CTRL_ACTIVE); qh->dummy_td = td; usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), toggle); return 0; nomem: /* Remove the dummy TD from the td_list so it doesn't get freed */ uhci_remove_td_from_urbp(qh->dummy_td); return -ENOMEM; } static int uhci_submit_bulk(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { int ret; /* Can't have low-speed bulk transfers */ if (urb->dev->speed == USB_SPEED_LOW) return -EINVAL; if (qh->state != QH_STATE_ACTIVE) qh->skel = SKEL_BULK; ret = uhci_submit_common(uhci, urb, qh); if (ret == 0) uhci_add_fsbr(uhci, urb); return ret; } static int uhci_submit_interrupt(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { int ret; /* USB 1.1 interrupt transfers only involve one packet per interval. * Drivers can submit URBs of any length, but longer ones will need * multiple intervals to complete. */ if (!qh->bandwidth_reserved) { int exponent; /* Figure out which power-of-two queue to use */ for (exponent = 7; exponent >= 0; --exponent) { if ((1 << exponent) <= urb->interval) break; } if (exponent < 0) return -EINVAL; /* If the slot is full, try a lower period */ do { qh->period = 1 << exponent; qh->skel = SKEL_INDEX(exponent); /* For now, interrupt phase is fixed by the layout * of the QH lists. */ qh->phase = (qh->period / 2) & (MAX_PHASE - 1); ret = uhci_check_bandwidth(uhci, qh); } while (ret != 0 && --exponent >= 0); if (ret) return ret; } else if (qh->period > urb->interval) return -EINVAL; /* Can't decrease the period */ ret = uhci_submit_common(uhci, urb, qh); if (ret == 0) { urb->interval = qh->period; if (!qh->bandwidth_reserved) uhci_reserve_bandwidth(uhci, qh); } return ret; } /* * Fix up the data structures following a short transfer */ static int uhci_fixup_short_transfer(struct uhci_hcd *uhci, struct uhci_qh *qh, struct urb_priv *urbp) { struct uhci_td *td; struct list_head *tmp; int ret; td = list_entry(urbp->td_list.prev, struct uhci_td, list); if (qh->type == USB_ENDPOINT_XFER_CONTROL) { /* When a control transfer is short, we have to restart * the queue at the status stage transaction, which is * the last TD. */ WARN_ON(list_empty(&urbp->td_list)); qh->element = LINK_TO_TD(td); tmp = td->list.prev; ret = -EINPROGRESS; } else { /* When a bulk/interrupt transfer is short, we have to * fix up the toggles of the following URBs on the queue * before restarting the queue at the next URB. */ qh->initial_toggle = uhci_toggle(td_token(qh->post_td)) ^ 1; uhci_fixup_toggles(qh, 1); if (list_empty(&urbp->td_list)) td = qh->post_td; qh->element = td->link; tmp = urbp->td_list.prev; ret = 0; } /* Remove all the TDs we skipped over, from tmp back to the start */ while (tmp != &urbp->td_list) { td = list_entry(tmp, struct uhci_td, list); tmp = tmp->prev; uhci_remove_td_from_urbp(td); uhci_free_td(uhci, td); } return ret; } /* * Common result for control, bulk, and interrupt */ static int uhci_result_common(struct uhci_hcd *uhci, struct urb *urb) { struct urb_priv *urbp = urb->hcpriv; struct uhci_qh *qh = urbp->qh; struct uhci_td *td, *tmp; unsigned status; int ret = 0; list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { unsigned int ctrlstat; int len; ctrlstat = td_status(td); status = uhci_status_bits(ctrlstat); if (status & TD_CTRL_ACTIVE) return -EINPROGRESS; len = uhci_actual_length(ctrlstat); urb->actual_length += len; if (status) { ret = uhci_map_status(status, uhci_packetout(td_token(td))); if ((debug == 1 && ret != -EPIPE) || debug > 1) { /* Some debugging code */ dev_dbg(&urb->dev->dev, "%s: failed with status %x\n", __func__, status); if (debug > 1 && errbuf) { /* Print the chain for debugging */ uhci_show_qh(uhci, urbp->qh, errbuf, ERRBUF_LEN, 0); lprintk(errbuf); } } /* Did we receive a short packet? */ } else if (len < uhci_expected_length(td_token(td))) { /* For control transfers, go to the status TD if * this isn't already the last data TD */ if (qh->type == USB_ENDPOINT_XFER_CONTROL) { if (td->list.next != urbp->td_list.prev) ret = 1; } /* For bulk and interrupt, this may be an error */ else if (urb->transfer_flags & URB_SHORT_NOT_OK) ret = -EREMOTEIO; /* Fixup needed only if this isn't the URB's last TD */ else if (&td->list != urbp->td_list.prev) ret = 1; } uhci_remove_td_from_urbp(td); if (qh->post_td) uhci_free_td(uhci, qh->post_td); qh->post_td = td; if (ret != 0) goto err; } return ret; err: if (ret < 0) { /* Note that the queue has stopped and save * the next toggle value */ qh->element = UHCI_PTR_TERM; qh->is_stopped = 1; qh->needs_fixup = (qh->type != USB_ENDPOINT_XFER_CONTROL); qh->initial_toggle = uhci_toggle(td_token(td)) ^ (ret == -EREMOTEIO); } else /* Short packet received */ ret = uhci_fixup_short_transfer(uhci, qh, urbp); return ret; } /* * Isochronous transfers */ static int uhci_submit_isochronous(struct uhci_hcd *uhci, struct urb *urb, struct uhci_qh *qh) { struct uhci_td *td = NULL; /* Since urb->number_of_packets > 0 */ int i, frame; unsigned long destination, status; struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; /* Values must not be too big (could overflow below) */ if (urb->interval >= UHCI_NUMFRAMES || urb->number_of_packets >= UHCI_NUMFRAMES) return -EFBIG; /* Check the period and figure out the starting frame number */ if (!qh->bandwidth_reserved) { qh->period = urb->interval; if (urb->transfer_flags & URB_ISO_ASAP) { qh->phase = -1; /* Find the best phase */ i = uhci_check_bandwidth(uhci, qh); if (i) return i; /* Allow a little time to allocate the TDs */ uhci_get_current_frame_number(uhci); frame = uhci->frame_number + 10; /* Move forward to the first frame having the * correct phase */ urb->start_frame = frame + ((qh->phase - frame) & (qh->period - 1)); } else { i = urb->start_frame - uhci->last_iso_frame; if (i <= 0 || i >= UHCI_NUMFRAMES) return -EINVAL; qh->phase = urb->start_frame & (qh->period - 1); i = uhci_check_bandwidth(uhci, qh); if (i) return i; } } else if (qh->period != urb->interval) { return -EINVAL; /* Can't change the period */ } else { /* Find the next unused frame */ if (list_empty(&qh->queue)) { frame = qh->iso_frame; } else { struct urb *lurb; lurb = list_entry(qh->queue.prev, struct urb_priv, node)->urb; frame = lurb->start_frame + lurb->number_of_packets * lurb->interval; } if (urb->transfer_flags & URB_ISO_ASAP) { /* Skip some frames if necessary to insure * the start frame is in the future. */ uhci_get_current_frame_number(uhci); if (uhci_frame_before_eq(frame, uhci->frame_number)) { frame = uhci->frame_number + 1; frame += ((qh->phase - frame) & (qh->period - 1)); } } /* Otherwise pick up where the last URB leaves off */ urb->start_frame = frame; } /* Make sure we won't have to go too far into the future */ if (uhci_frame_before_eq(uhci->last_iso_frame + UHCI_NUMFRAMES, urb->start_frame + urb->number_of_packets * urb->interval)) return -EFBIG; status = TD_CTRL_ACTIVE | TD_CTRL_IOS; destination = (urb->pipe & PIPE_DEVEP_MASK) | usb_packetid(urb->pipe); for (i = 0; i < urb->number_of_packets; i++) { td = uhci_alloc_td(uhci); if (!td) return -ENOMEM; uhci_add_td_to_urbp(td, urbp); uhci_fill_td(td, status, destination | uhci_explen(urb->iso_frame_desc[i].length), urb->transfer_dma + urb->iso_frame_desc[i].offset); } /* Set the interrupt-on-completion flag on the last packet. */ td->status |= cpu_to_le32(TD_CTRL_IOC); /* Add the TDs to the frame list */ frame = urb->start_frame; list_for_each_entry(td, &urbp->td_list, list) { uhci_insert_td_in_frame_list(uhci, td, frame); frame += qh->period; } if (list_empty(&qh->queue)) { qh->iso_packet_desc = &urb->iso_frame_desc[0]; qh->iso_frame = urb->start_frame; } qh->skel = SKEL_ISO; if (!qh->bandwidth_reserved) uhci_reserve_bandwidth(uhci, qh); return 0; } static int uhci_result_isochronous(struct uhci_hcd *uhci, struct urb *urb) { struct uhci_td *td, *tmp; struct urb_priv *urbp = urb->hcpriv; struct uhci_qh *qh = urbp->qh; list_for_each_entry_safe(td, tmp, &urbp->td_list, list) { unsigned int ctrlstat; int status; int actlength; if (uhci_frame_before_eq(uhci->cur_iso_frame, qh->iso_frame)) return -EINPROGRESS; uhci_remove_tds_from_frame(uhci, qh->iso_frame); ctrlstat = td_status(td); if (ctrlstat & TD_CTRL_ACTIVE) { status = -EXDEV; /* TD was added too late? */ } else { status = uhci_map_status(uhci_status_bits(ctrlstat), usb_pipeout(urb->pipe)); actlength = uhci_actual_length(ctrlstat); urb->actual_length += actlength; qh->iso_packet_desc->actual_length = actlength; qh->iso_packet_desc->status = status; } if (status) urb->error_count++; uhci_remove_td_from_urbp(td); uhci_free_td(uhci, td); qh->iso_frame += qh->period; ++qh->iso_packet_desc; } return 0; } static int uhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags) { int ret; struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned long flags; struct urb_priv *urbp; struct uhci_qh *qh; spin_lock_irqsave(&uhci->lock, flags); ret = usb_hcd_link_urb_to_ep(hcd, urb); if (ret) goto done_not_linked; ret = -ENOMEM; urbp = uhci_alloc_urb_priv(uhci, urb); if (!urbp) goto done; if (urb->ep->hcpriv) qh = urb->ep->hcpriv; else { qh = uhci_alloc_qh(uhci, urb->dev, urb->ep); if (!qh) goto err_no_qh; } urbp->qh = qh; switch (qh->type) { case USB_ENDPOINT_XFER_CONTROL: ret = uhci_submit_control(uhci, urb, qh); break; case USB_ENDPOINT_XFER_BULK: ret = uhci_submit_bulk(uhci, urb, qh); break; case USB_ENDPOINT_XFER_INT: ret = uhci_submit_interrupt(uhci, urb, qh); break; case USB_ENDPOINT_XFER_ISOC: urb->error_count = 0; ret = uhci_submit_isochronous(uhci, urb, qh); break; } if (ret != 0) goto err_submit_failed; /* Add this URB to the QH */ urbp->qh = qh; list_add_tail(&urbp->node, &qh->queue); /* If the new URB is the first and only one on this QH then either * the QH is new and idle or else it's unlinked and waiting to * become idle, so we can activate it right away. But only if the * queue isn't stopped. */ if (qh->queue.next == &urbp->node && !qh->is_stopped) { uhci_activate_qh(uhci, qh); uhci_urbp_wants_fsbr(uhci, urbp); } goto done; err_submit_failed: if (qh->state == QH_STATE_IDLE) uhci_make_qh_idle(uhci, qh); /* Reclaim unused QH */ err_no_qh: uhci_free_urb_priv(uhci, urbp); done: if (ret) usb_hcd_unlink_urb_from_ep(hcd, urb); done_not_linked: spin_unlock_irqrestore(&uhci->lock, flags); return ret; } static int uhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) { struct uhci_hcd *uhci = hcd_to_uhci(hcd); unsigned long flags; struct uhci_qh *qh; int rc; spin_lock_irqsave(&uhci->lock, flags); rc = usb_hcd_check_unlink_urb(hcd, urb, status); if (rc) goto done; qh = ((struct urb_priv *) urb->hcpriv)->qh; /* Remove Isochronous TDs from the frame list ASAP */ if (qh->type == USB_ENDPOINT_XFER_ISOC) { uhci_unlink_isochronous_tds(uhci, urb); mb(); /* If the URB has already started, update the QH unlink time */ uhci_get_current_frame_number(uhci); if (uhci_frame_before_eq(urb->start_frame, uhci->frame_number)) qh->unlink_frame = uhci->frame_number; } uhci_unlink_qh(uhci, qh); done: spin_unlock_irqrestore(&uhci->lock, flags); return rc; } /* * Finish unlinking an URB and give it back */ static void uhci_giveback_urb(struct uhci_hcd *uhci, struct uhci_qh *qh, struct urb *urb, int status) __releases(uhci->lock) __acquires(uhci->lock) { struct urb_priv *urbp = (struct urb_priv *) urb->hcpriv; if (qh->type == USB_ENDPOINT_XFER_CONTROL) { /* Subtract off the length of the SETUP packet from * urb->actual_length. */ urb->actual_length -= min_t(u32, 8, urb->actual_length); } /* When giving back the first URB in an Isochronous queue, * reinitialize the QH's iso-related members for the next URB. */ else if (qh->type == USB_ENDPOINT_XFER_ISOC && urbp->node.prev == &qh->queue && urbp->node.next != &qh->queue) { struct urb *nurb = list_entry(urbp->node.next, struct urb_priv, node)->urb; qh->iso_packet_desc = &nurb->iso_frame_desc[0]; qh->iso_frame = nurb->start_frame; } /* Take the URB off the QH's queue. If the queue is now empty, * this is a perfect time for a toggle fixup. */ list_del_init(&urbp->node); if (list_empty(&qh->queue) && qh->needs_fixup) { usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), qh->initial_toggle); qh->needs_fixup = 0; } uhci_free_urb_priv(uhci, urbp); usb_hcd_unlink_urb_from_ep(uhci_to_hcd(uhci), urb); spin_unlock(&uhci->lock); usb_hcd_giveback_urb(uhci_to_hcd(uhci), urb, status); spin_lock(&uhci->lock); /* If the queue is now empty, we can unlink the QH and give up its * reserved bandwidth. */ if (list_empty(&qh->queue)) { uhci_unlink_qh(uhci, qh); if (qh->bandwidth_reserved) uhci_release_bandwidth(uhci, qh); } } /* * Scan the URBs in a QH's queue */ #define QH_FINISHED_UNLINKING(qh) \ (qh->state == QH_STATE_UNLINKING && \ uhci->frame_number + uhci->is_stopped != qh->unlink_frame) static void uhci_scan_qh(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct urb_priv *urbp; struct urb *urb; int status; while (!list_empty(&qh->queue)) { urbp = list_entry(qh->queue.next, struct urb_priv, node); urb = urbp->urb; if (qh->type == USB_ENDPOINT_XFER_ISOC) status = uhci_result_isochronous(uhci, urb); else status = uhci_result_common(uhci, urb); if (status == -EINPROGRESS) break; /* Dequeued but completed URBs can't be given back unless * the QH is stopped or has finished unlinking. */ if (urb->unlinked) { if (QH_FINISHED_UNLINKING(qh)) qh->is_stopped = 1; else if (!qh->is_stopped) return; } uhci_giveback_urb(uhci, qh, urb, status); if (status < 0) break; } /* If the QH is neither stopped nor finished unlinking (normal case), * our work here is done. */ if (QH_FINISHED_UNLINKING(qh)) qh->is_stopped = 1; else if (!qh->is_stopped) return; /* Otherwise give back each of the dequeued URBs */ restart: list_for_each_entry(urbp, &qh->queue, node) { urb = urbp->urb; if (urb->unlinked) { /* Fix up the TD links and save the toggles for * non-Isochronous queues. For Isochronous queues, * test for too-recent dequeues. */ if (!uhci_cleanup_queue(uhci, qh, urb)) { qh->is_stopped = 0; return; } uhci_giveback_urb(uhci, qh, urb, 0); goto restart; } } qh->is_stopped = 0; /* There are no more dequeued URBs. If there are still URBs on the * queue, the QH can now be re-activated. */ if (!list_empty(&qh->queue)) { if (qh->needs_fixup) uhci_fixup_toggles(qh, 0); /* If the first URB on the queue wants FSBR but its time * limit has expired, set the next TD to interrupt on * completion before reactivating the QH. */ urbp = list_entry(qh->queue.next, struct urb_priv, node); if (urbp->fsbr && qh->wait_expired) { struct uhci_td *td = list_entry(urbp->td_list.next, struct uhci_td, list); td->status |= __cpu_to_le32(TD_CTRL_IOC); } uhci_activate_qh(uhci, qh); } /* The queue is empty. The QH can become idle if it is fully * unlinked. */ else if (QH_FINISHED_UNLINKING(qh)) uhci_make_qh_idle(uhci, qh); } /* * Check for queues that have made some forward progress. * Returns 0 if the queue is not Isochronous, is ACTIVE, and * has not advanced since last examined; 1 otherwise. * * Early Intel controllers have a bug which causes qh->element sometimes * not to advance when a TD completes successfully. The queue remains * stuck on the inactive completed TD. We detect such cases and advance * the element pointer by hand. */ static int uhci_advance_check(struct uhci_hcd *uhci, struct uhci_qh *qh) { struct urb_priv *urbp = NULL; struct uhci_td *td; int ret = 1; unsigned status; if (qh->type == USB_ENDPOINT_XFER_ISOC) goto done; /* Treat an UNLINKING queue as though it hasn't advanced. * This is okay because reactivation will treat it as though * it has advanced, and if it is going to become IDLE then * this doesn't matter anyway. Furthermore it's possible * for an UNLINKING queue not to have any URBs at all, or * for its first URB not to have any TDs (if it was dequeued * just as it completed). So it's not easy in any case to * test whether such queues have advanced. */ if (qh->state != QH_STATE_ACTIVE) { urbp = NULL; status = 0; } else { urbp = list_entry(qh->queue.next, struct urb_priv, node); td = list_entry(urbp->td_list.next, struct uhci_td, list); status = td_status(td); if (!(status & TD_CTRL_ACTIVE)) { /* We're okay, the queue has advanced */ qh->wait_expired = 0; qh->advance_jiffies = jiffies; goto done; } ret = 0; } /* The queue hasn't advanced; check for timeout */ if (qh->wait_expired) goto done; if (time_after(jiffies, qh->advance_jiffies + QH_WAIT_TIMEOUT)) { /* Detect the Intel bug and work around it */ if (qh->post_td && qh_element(qh) == LINK_TO_TD(qh->post_td)) { qh->element = qh->post_td->link; qh->advance_jiffies = jiffies; ret = 1; goto done; } qh->wait_expired = 1; /* If the current URB wants FSBR, unlink it temporarily * so that we can safely set the next TD to interrupt on * completion. That way we'll know as soon as the queue * starts moving again. */ if (urbp && urbp->fsbr && !(status & TD_CTRL_IOC)) uhci_unlink_qh(uhci, qh); } else { /* Unmoving but not-yet-expired queues keep FSBR alive */ if (urbp) uhci_urbp_wants_fsbr(uhci, urbp); } done: return ret; } /* * Process events in the schedule, but only in one thread at a time */ static void uhci_scan_schedule(struct uhci_hcd *uhci) { int i; struct uhci_qh *qh; /* Don't allow re-entrant calls */ if (uhci->scan_in_progress) { uhci->need_rescan = 1; return; } uhci->scan_in_progress = 1; rescan: uhci->need_rescan = 0; uhci->fsbr_is_wanted = 0; uhci_clear_next_interrupt(uhci); uhci_get_current_frame_number(uhci); uhci->cur_iso_frame = uhci->frame_number; /* Go through all the QH queues and process the URBs in each one */ for (i = 0; i < UHCI_NUM_SKELQH - 1; ++i) { uhci->next_qh = list_entry(uhci->skelqh[i]->node.next, struct uhci_qh, node); while ((qh = uhci->next_qh) != uhci->skelqh[i]) { uhci->next_qh = list_entry(qh->node.next, struct uhci_qh, node); if (uhci_advance_check(uhci, qh)) { uhci_scan_qh(uhci, qh); if (qh->state == QH_STATE_ACTIVE) { uhci_urbp_wants_fsbr(uhci, list_entry(qh->queue.next, struct urb_priv, node)); } } } } uhci->last_iso_frame = uhci->cur_iso_frame; if (uhci->need_rescan) goto rescan; uhci->scan_in_progress = 0; if (uhci->fsbr_is_on && !uhci->fsbr_is_wanted && !uhci->fsbr_expiring) { uhci->fsbr_expiring = 1; mod_timer(&uhci->fsbr_timer, jiffies + FSBR_OFF_DELAY); } if (list_empty(&uhci->skel_unlink_qh->node)) uhci_clear_next_interrupt(uhci); else uhci_set_next_interrupt(uhci); }
gpl-2.0
dh-electronics/linux-am33x
drivers/platform/x86/toshiba_bluetooth.c
539
7115
/* * Toshiba Bluetooth Enable Driver * * Copyright (C) 2009 Jes Sorensen <Jes.Sorensen@gmail.com> * Copyright (C) 2015 Azael Avalos <coproscefalo@gmail.com> * * Thanks to Matthew Garrett for background info on ACPI innards which * normal people aren't meant to understand :-) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/acpi.h> #include <linux/rfkill.h> #define BT_KILLSWITCH_MASK 0x01 #define BT_PLUGGED_MASK 0x40 #define BT_POWER_MASK 0x80 MODULE_AUTHOR("Jes Sorensen <Jes.Sorensen@gmail.com>"); MODULE_DESCRIPTION("Toshiba Laptop ACPI Bluetooth Enable Driver"); MODULE_LICENSE("GPL"); struct toshiba_bluetooth_dev { struct acpi_device *acpi_dev; struct rfkill *rfk; bool killswitch; bool plugged; bool powered; }; static int toshiba_bt_rfkill_add(struct acpi_device *device); static int toshiba_bt_rfkill_remove(struct acpi_device *device); static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event); static const struct acpi_device_id bt_device_ids[] = { { "TOS6205", 0}, { "", 0}, }; MODULE_DEVICE_TABLE(acpi, bt_device_ids); #ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev); #endif static SIMPLE_DEV_PM_OPS(toshiba_bt_pm, NULL, toshiba_bt_resume); static struct acpi_driver toshiba_bt_rfkill_driver = { .name = "Toshiba BT", .class = "Toshiba", .ids = bt_device_ids, .ops = { .add = toshiba_bt_rfkill_add, .remove = toshiba_bt_rfkill_remove, .notify = toshiba_bt_rfkill_notify, }, .owner = THIS_MODULE, .drv.pm = &toshiba_bt_pm, }; static int toshiba_bluetooth_present(acpi_handle handle) { acpi_status result; u64 bt_present; /* * Some Toshiba laptops may have a fake TOS6205 device in * their ACPI BIOS, so query the _STA method to see if there * is really anything there. */ result = acpi_evaluate_integer(handle, "_STA", NULL, &bt_present); if (ACPI_FAILURE(result)) { pr_err("ACPI call to query Bluetooth presence failed"); return -ENXIO; } else if (!bt_present) { pr_info("Bluetooth device not present\n"); return -ENODEV; } return 0; } static int toshiba_bluetooth_status(acpi_handle handle) { acpi_status result; u64 status; result = acpi_evaluate_integer(handle, "BTST", NULL, &status); if (ACPI_FAILURE(result)) { pr_err("Could not get Bluetooth device status\n"); return -ENXIO; } return status; } static int toshiba_bluetooth_enable(acpi_handle handle) { acpi_status result; result = acpi_evaluate_object(handle, "AUSB", NULL, NULL); if (ACPI_FAILURE(result)) { pr_err("Could not attach USB Bluetooth device\n"); return -ENXIO; } result = acpi_evaluate_object(handle, "BTPO", NULL, NULL); if (ACPI_FAILURE(result)) { pr_err("Could not power ON Bluetooth device\n"); return -ENXIO; } return 0; } static int toshiba_bluetooth_disable(acpi_handle handle) { acpi_status result; result = acpi_evaluate_object(handle, "BTPF", NULL, NULL); if (ACPI_FAILURE(result)) { pr_err("Could not power OFF Bluetooth device\n"); return -ENXIO; } result = acpi_evaluate_object(handle, "DUSB", NULL, NULL); if (ACPI_FAILURE(result)) { pr_err("Could not detach USB Bluetooth device\n"); return -ENXIO; } return 0; } /* Helper function */ static int toshiba_bluetooth_sync_status(struct toshiba_bluetooth_dev *bt_dev) { int status; status = toshiba_bluetooth_status(bt_dev->acpi_dev->handle); if (status < 0) { pr_err("Could not sync bluetooth device status\n"); return status; } bt_dev->killswitch = (status & BT_KILLSWITCH_MASK) ? true : false; bt_dev->plugged = (status & BT_PLUGGED_MASK) ? true : false; bt_dev->powered = (status & BT_POWER_MASK) ? true : false; pr_debug("Bluetooth status %d killswitch %d plugged %d powered %d\n", status, bt_dev->killswitch, bt_dev->plugged, bt_dev->powered); return 0; } /* RFKill handlers */ static int bt_rfkill_set_block(void *data, bool blocked) { struct toshiba_bluetooth_dev *bt_dev = data; int ret; ret = toshiba_bluetooth_sync_status(bt_dev); if (ret) return ret; if (!bt_dev->killswitch) return 0; if (blocked) ret = toshiba_bluetooth_disable(bt_dev->acpi_dev->handle); else ret = toshiba_bluetooth_enable(bt_dev->acpi_dev->handle); return ret; } static void bt_rfkill_poll(struct rfkill *rfkill, void *data) { struct toshiba_bluetooth_dev *bt_dev = data; if (toshiba_bluetooth_sync_status(bt_dev)) return; /* * Note the Toshiba Bluetooth RFKill switch seems to be a strange * fish. It only provides a BT event when the switch is flipped to * the 'on' position. When flipping it to 'off', the USB device is * simply pulled away underneath us, without any BT event being * delivered. */ rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); } static const struct rfkill_ops rfk_ops = { .set_block = bt_rfkill_set_block, .poll = bt_rfkill_poll, }; /* ACPI driver functions */ static void toshiba_bt_rfkill_notify(struct acpi_device *device, u32 event) { struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device); if (toshiba_bluetooth_sync_status(bt_dev)) return; rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); } #ifdef CONFIG_PM_SLEEP static int toshiba_bt_resume(struct device *dev) { struct toshiba_bluetooth_dev *bt_dev; int ret; bt_dev = acpi_driver_data(to_acpi_device(dev)); ret = toshiba_bluetooth_sync_status(bt_dev); if (ret) return ret; rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); return 0; } #endif static int toshiba_bt_rfkill_add(struct acpi_device *device) { struct toshiba_bluetooth_dev *bt_dev; int result; result = toshiba_bluetooth_present(device->handle); if (result) return result; pr_info("Toshiba ACPI Bluetooth device driver\n"); bt_dev = kzalloc(sizeof(*bt_dev), GFP_KERNEL); if (!bt_dev) return -ENOMEM; bt_dev->acpi_dev = device; device->driver_data = bt_dev; dev_set_drvdata(&device->dev, bt_dev); result = toshiba_bluetooth_sync_status(bt_dev); if (result) { kfree(bt_dev); return result; } bt_dev->rfk = rfkill_alloc("Toshiba Bluetooth", &device->dev, RFKILL_TYPE_BLUETOOTH, &rfk_ops, bt_dev); if (!bt_dev->rfk) { pr_err("Unable to allocate rfkill device\n"); kfree(bt_dev); return -ENOMEM; } rfkill_set_hw_state(bt_dev->rfk, !bt_dev->killswitch); result = rfkill_register(bt_dev->rfk); if (result) { pr_err("Unable to register rfkill device\n"); rfkill_destroy(bt_dev->rfk); kfree(bt_dev); } return result; } static int toshiba_bt_rfkill_remove(struct acpi_device *device) { struct toshiba_bluetooth_dev *bt_dev = acpi_driver_data(device); /* clean up */ if (bt_dev->rfk) { rfkill_unregister(bt_dev->rfk); rfkill_destroy(bt_dev->rfk); } kfree(bt_dev); return toshiba_bluetooth_disable(device->handle); } module_acpi_driver(toshiba_bt_rfkill_driver);
gpl-2.0
balika011/android_kernel_lenovo_spark
fs/nfs/client.c
2075
37777
/* client.c: NFS client sharing and management code * * Copyright (C) 2006 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/stat.h> #include <linux/errno.h> #include <linux/unistd.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/xprtrdma.h> #include <linux/nfs_fs.h> #include <linux/nfs_mount.h> #include <linux/nfs4_mount.h> #include <linux/lockd/bind.h> #include <linux/seq_file.h> #include <linux/mount.h> #include <linux/nfs_idmap.h> #include <linux/vfs.h> #include <linux/inet.h> #include <linux/in6.h> #include <linux/slab.h> #include <linux/idr.h> #include <net/ipv6.h> #include <linux/nfs_xdr.h> #include <linux/sunrpc/bc_xprt.h> #include <linux/nsproxy.h> #include <linux/pid_namespace.h> #include "nfs4_fs.h" #include "callback.h" #include "delegation.h" #include "iostat.h" #include "internal.h" #include "fscache.h" #include "pnfs.h" #include "nfs.h" #include "netns.h" #define NFSDBG_FACILITY NFSDBG_CLIENT static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq); static DEFINE_SPINLOCK(nfs_version_lock); static DEFINE_MUTEX(nfs_version_mutex); static LIST_HEAD(nfs_versions); /* * RPC cruft for NFS */ static const struct rpc_version *nfs_version[5] = { [2] = NULL, [3] = NULL, [4] = NULL, }; const struct rpc_program nfs_program = { .name = "nfs", .number = NFS_PROGRAM, .nrvers = ARRAY_SIZE(nfs_version), .version = nfs_version, .stats = &nfs_rpcstat, .pipe_dir_name = NFS_PIPE_DIRNAME, }; struct rpc_stat nfs_rpcstat = { .program = &nfs_program }; static struct nfs_subversion *find_nfs_version(unsigned int version) { struct nfs_subversion *nfs; spin_lock(&nfs_version_lock); list_for_each_entry(nfs, &nfs_versions, list) { if (nfs->rpc_ops->version == version) { spin_unlock(&nfs_version_lock); return nfs; } } spin_unlock(&nfs_version_lock); return ERR_PTR(-EPROTONOSUPPORT); } struct nfs_subversion *get_nfs_version(unsigned int version) { struct nfs_subversion *nfs = find_nfs_version(version); if (IS_ERR(nfs)) { mutex_lock(&nfs_version_mutex); request_module("nfsv%d", version); nfs = find_nfs_version(version); mutex_unlock(&nfs_version_mutex); } if (!IS_ERR(nfs)) try_module_get(nfs->owner); return nfs; } void put_nfs_version(struct nfs_subversion *nfs) { module_put(nfs->owner); } void register_nfs_version(struct nfs_subversion *nfs) { spin_lock(&nfs_version_lock); list_add(&nfs->list, &nfs_versions); nfs_version[nfs->rpc_ops->version] = nfs->rpc_vers; spin_unlock(&nfs_version_lock); } EXPORT_SYMBOL_GPL(register_nfs_version); void unregister_nfs_version(struct nfs_subversion *nfs) { spin_lock(&nfs_version_lock); nfs_version[nfs->rpc_ops->version] = NULL; list_del(&nfs->list); spin_unlock(&nfs_version_lock); } EXPORT_SYMBOL_GPL(unregister_nfs_version); /* * Allocate a shared client record * * Since these are allocated/deallocated very rarely, we don't * bother putting them in a slab cache... */ struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_init) { struct nfs_client *clp; struct rpc_cred *cred; int err = -ENOMEM; if ((clp = kzalloc(sizeof(*clp), GFP_KERNEL)) == NULL) goto error_0; clp->cl_nfs_mod = cl_init->nfs_mod; try_module_get(clp->cl_nfs_mod->owner); clp->rpc_ops = clp->cl_nfs_mod->rpc_ops; atomic_set(&clp->cl_count, 1); clp->cl_cons_state = NFS_CS_INITING; memcpy(&clp->cl_addr, cl_init->addr, cl_init->addrlen); clp->cl_addrlen = cl_init->addrlen; if (cl_init->hostname) { err = -ENOMEM; clp->cl_hostname = kstrdup(cl_init->hostname, GFP_KERNEL); if (!clp->cl_hostname) goto error_cleanup; } INIT_LIST_HEAD(&clp->cl_superblocks); clp->cl_rpcclient = ERR_PTR(-EINVAL); clp->cl_proto = cl_init->proto; clp->cl_net = get_net(cl_init->net); cred = rpc_lookup_machine_cred("*"); if (!IS_ERR(cred)) clp->cl_machine_cred = cred; nfs_fscache_get_client_cookie(clp); return clp; error_cleanup: put_nfs_version(clp->cl_nfs_mod); kfree(clp); error_0: return ERR_PTR(err); } EXPORT_SYMBOL_GPL(nfs_alloc_client); #if IS_ENABLED(CONFIG_NFS_V4) void nfs_cleanup_cb_ident_idr(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); idr_destroy(&nn->cb_ident_idr); } /* nfs_client_lock held */ static void nfs_cb_idr_remove_locked(struct nfs_client *clp) { struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); if (clp->cl_cb_ident) idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident); } static void pnfs_init_server(struct nfs_server *server) { rpc_init_wait_queue(&server->roc_rpcwaitq, "pNFS ROC"); } #else void nfs_cleanup_cb_ident_idr(struct net *net) { } static void nfs_cb_idr_remove_locked(struct nfs_client *clp) { } static void pnfs_init_server(struct nfs_server *server) { } #endif /* CONFIG_NFS_V4 */ /* * Destroy a shared client record */ void nfs_free_client(struct nfs_client *clp) { dprintk("--> nfs_free_client(%u)\n", clp->rpc_ops->version); nfs_fscache_release_client_cookie(clp); /* -EIO all pending I/O */ if (!IS_ERR(clp->cl_rpcclient)) rpc_shutdown_client(clp->cl_rpcclient); if (clp->cl_machine_cred != NULL) put_rpccred(clp->cl_machine_cred); put_net(clp->cl_net); put_nfs_version(clp->cl_nfs_mod); kfree(clp->cl_hostname); kfree(clp); dprintk("<-- nfs_free_client()\n"); } EXPORT_SYMBOL_GPL(nfs_free_client); /* * Release a reference to a shared client record */ void nfs_put_client(struct nfs_client *clp) { struct nfs_net *nn; if (!clp) return; dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count)); nn = net_generic(clp->cl_net, nfs_net_id); if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) { list_del(&clp->cl_share_link); nfs_cb_idr_remove_locked(clp); spin_unlock(&nn->nfs_client_lock); WARN_ON_ONCE(!list_empty(&clp->cl_superblocks)); clp->rpc_ops->free_client(clp); } } EXPORT_SYMBOL_GPL(nfs_put_client); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) /* * Test if two ip6 socket addresses refer to the same socket by * comparing relevant fields. The padding bytes specifically, are not * compared. sin6_flowinfo is not compared because it only affects QoS * and sin6_scope_id is only compared if the address is "link local" * because "link local" addresses need only be unique to a specific * link. Conversely, ordinary unicast addresses might have different * sin6_scope_id. * * The caller should ensure both socket addresses are AF_INET6. */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; if (!ipv6_addr_equal(&sin1->sin6_addr, &sin2->sin6_addr)) return 0; else if (ipv6_addr_type(&sin1->sin6_addr) & IPV6_ADDR_LINKLOCAL) return sin1->sin6_scope_id == sin2->sin6_scope_id; return 1; } #else /* !defined(CONFIG_IPV6) && !defined(CONFIG_IPV6_MODULE) */ static int nfs_sockaddr_match_ipaddr6(const struct sockaddr *sa1, const struct sockaddr *sa2) { return 0; } #endif /* * Test if two ip4 socket addresses refer to the same socket, by * comparing relevant fields. The padding bytes specifically, are * not compared. * * The caller should ensure both socket addresses are AF_INET. */ static int nfs_sockaddr_match_ipaddr4(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; return sin1->sin_addr.s_addr == sin2->sin_addr.s_addr; } static int nfs_sockaddr_cmp_ip6(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in6 *sin1 = (const struct sockaddr_in6 *)sa1; const struct sockaddr_in6 *sin2 = (const struct sockaddr_in6 *)sa2; return nfs_sockaddr_match_ipaddr6(sa1, sa2) && (sin1->sin6_port == sin2->sin6_port); } static int nfs_sockaddr_cmp_ip4(const struct sockaddr *sa1, const struct sockaddr *sa2) { const struct sockaddr_in *sin1 = (const struct sockaddr_in *)sa1; const struct sockaddr_in *sin2 = (const struct sockaddr_in *)sa2; return nfs_sockaddr_match_ipaddr4(sa1, sa2) && (sin1->sin_port == sin2->sin_port); } #if defined(CONFIG_NFS_V4_1) /* * Test if two socket addresses represent the same actual socket, * by comparing (only) relevant fields, excluding the port number. */ int nfs_sockaddr_match_ipaddr(const struct sockaddr *sa1, const struct sockaddr *sa2) { if (sa1->sa_family != sa2->sa_family) return 0; switch (sa1->sa_family) { case AF_INET: return nfs_sockaddr_match_ipaddr4(sa1, sa2); case AF_INET6: return nfs_sockaddr_match_ipaddr6(sa1, sa2); } return 0; } EXPORT_SYMBOL_GPL(nfs_sockaddr_match_ipaddr); #endif /* CONFIG_NFS_V4_1 */ /* * Test if two socket addresses represent the same actual socket, * by comparing (only) relevant fields, including the port number. */ static int nfs_sockaddr_cmp(const struct sockaddr *sa1, const struct sockaddr *sa2) { if (sa1->sa_family != sa2->sa_family) return 0; switch (sa1->sa_family) { case AF_INET: return nfs_sockaddr_cmp_ip4(sa1, sa2); case AF_INET6: return nfs_sockaddr_cmp_ip6(sa1, sa2); } return 0; } /* * Find an nfs_client on the list that matches the initialisation data * that is supplied. */ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *data) { struct nfs_client *clp; const struct sockaddr *sap = data->addr; struct nfs_net *nn = net_generic(data->net, nfs_net_id); list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { const struct sockaddr *clap = (struct sockaddr *)&clp->cl_addr; /* Don't match clients that failed to initialise properly */ if (clp->cl_cons_state < 0) continue; /* Different NFS versions cannot share the same nfs_client */ if (clp->rpc_ops != data->nfs_mod->rpc_ops) continue; if (clp->cl_proto != data->proto) continue; /* Match nfsv4 minorversion */ if (clp->cl_minorversion != data->minorversion) continue; /* Match the full socket address */ if (!nfs_sockaddr_cmp(sap, clap)) continue; atomic_inc(&clp->cl_count); return clp; } return NULL; } static bool nfs_client_init_is_complete(const struct nfs_client *clp) { return clp->cl_cons_state != NFS_CS_INITING; } int nfs_wait_client_init_complete(const struct nfs_client *clp) { return wait_event_killable(nfs_client_active_wq, nfs_client_init_is_complete(clp)); } EXPORT_SYMBOL_GPL(nfs_wait_client_init_complete); /* * Found an existing client. Make sure it's ready before returning. */ static struct nfs_client * nfs_found_client(const struct nfs_client_initdata *cl_init, struct nfs_client *clp) { int error; error = nfs_wait_client_init_complete(clp); if (error < 0) { nfs_put_client(clp); return ERR_PTR(-ERESTARTSYS); } if (clp->cl_cons_state < NFS_CS_READY) { error = clp->cl_cons_state; nfs_put_client(clp); return ERR_PTR(error); } smp_rmb(); dprintk("<-- %s found nfs_client %p for %s\n", __func__, clp, cl_init->hostname ?: ""); return clp; } /* * Look up a client by IP address and protocol version * - creates a new record if one doesn't yet exist */ struct nfs_client * nfs_get_client(const struct nfs_client_initdata *cl_init, const struct rpc_timeout *timeparms, const char *ip_addr, rpc_authflavor_t authflavour) { struct nfs_client *clp, *new = NULL; struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id); const struct nfs_rpc_ops *rpc_ops = cl_init->nfs_mod->rpc_ops; dprintk("--> nfs_get_client(%s,v%u)\n", cl_init->hostname ?: "", rpc_ops->version); /* see if the client already exists */ do { spin_lock(&nn->nfs_client_lock); clp = nfs_match_client(cl_init); if (clp) { spin_unlock(&nn->nfs_client_lock); if (new) new->rpc_ops->free_client(new); return nfs_found_client(cl_init, clp); } if (new) { list_add_tail(&new->cl_share_link, &nn->nfs_client_list); spin_unlock(&nn->nfs_client_lock); new->cl_flags = cl_init->init_flags; return rpc_ops->init_client(new, timeparms, ip_addr, authflavour); } spin_unlock(&nn->nfs_client_lock); new = rpc_ops->alloc_client(cl_init); } while (!IS_ERR(new)); dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n", cl_init->hostname ?: "", PTR_ERR(new)); return new; } EXPORT_SYMBOL_GPL(nfs_get_client); /* * Mark a server as ready or failed */ void nfs_mark_client_ready(struct nfs_client *clp, int state) { smp_wmb(); clp->cl_cons_state = state; wake_up_all(&nfs_client_active_wq); } EXPORT_SYMBOL_GPL(nfs_mark_client_ready); /* * Initialise the timeout values for a connection */ void nfs_init_timeout_values(struct rpc_timeout *to, int proto, unsigned int timeo, unsigned int retrans) { to->to_initval = timeo * HZ / 10; to->to_retries = retrans; switch (proto) { case XPRT_TRANSPORT_TCP: case XPRT_TRANSPORT_RDMA: if (to->to_retries == 0) to->to_retries = NFS_DEF_TCP_RETRANS; if (to->to_initval == 0) to->to_initval = NFS_DEF_TCP_TIMEO * HZ / 10; if (to->to_initval > NFS_MAX_TCP_TIMEOUT) to->to_initval = NFS_MAX_TCP_TIMEOUT; to->to_increment = to->to_initval; to->to_maxval = to->to_initval + (to->to_increment * to->to_retries); if (to->to_maxval > NFS_MAX_TCP_TIMEOUT) to->to_maxval = NFS_MAX_TCP_TIMEOUT; if (to->to_maxval < to->to_initval) to->to_maxval = to->to_initval; to->to_exponential = 0; break; case XPRT_TRANSPORT_UDP: if (to->to_retries == 0) to->to_retries = NFS_DEF_UDP_RETRANS; if (!to->to_initval) to->to_initval = NFS_DEF_UDP_TIMEO * HZ / 10; if (to->to_initval > NFS_MAX_UDP_TIMEOUT) to->to_initval = NFS_MAX_UDP_TIMEOUT; to->to_maxval = NFS_MAX_UDP_TIMEOUT; to->to_exponential = 1; break; default: BUG(); } } EXPORT_SYMBOL_GPL(nfs_init_timeout_values); /* * Create an RPC client handle */ int nfs_create_rpc_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, rpc_authflavor_t flavor) { struct rpc_clnt *clnt = NULL; struct rpc_create_args args = { .net = clp->cl_net, .protocol = clp->cl_proto, .address = (struct sockaddr *)&clp->cl_addr, .addrsize = clp->cl_addrlen, .timeout = timeparms, .servername = clp->cl_hostname, .program = &nfs_program, .version = clp->rpc_ops->version, .authflavor = flavor, }; if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags)) args.flags |= RPC_CLNT_CREATE_DISCRTRY; if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags)) args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; if (test_bit(NFS_CS_INFINITE_SLOTS, &clp->cl_flags)) args.flags |= RPC_CLNT_CREATE_INFINITE_SLOTS; if (!IS_ERR(clp->cl_rpcclient)) return 0; clnt = rpc_create(&args); if (IS_ERR(clnt)) { dprintk("%s: cannot create RPC client. Error = %ld\n", __func__, PTR_ERR(clnt)); return PTR_ERR(clnt); } clp->cl_rpcclient = clnt; return 0; } EXPORT_SYMBOL_GPL(nfs_create_rpc_client); /* * Version 2 or 3 client destruction */ static void nfs_destroy_server(struct nfs_server *server) { if (server->nlm_host) nlmclnt_done(server->nlm_host); } /* * Version 2 or 3 lockd setup */ static int nfs_start_lockd(struct nfs_server *server) { struct nlm_host *host; struct nfs_client *clp = server->nfs_client; struct nlmclnt_initdata nlm_init = { .hostname = clp->cl_hostname, .address = (struct sockaddr *)&clp->cl_addr, .addrlen = clp->cl_addrlen, .nfs_version = clp->rpc_ops->version, .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 1 : 0, .net = clp->cl_net, }; if (nlm_init.nfs_version > 3) return 0; if ((server->flags & NFS_MOUNT_LOCAL_FLOCK) && (server->flags & NFS_MOUNT_LOCAL_FCNTL)) return 0; switch (clp->cl_proto) { default: nlm_init.protocol = IPPROTO_TCP; break; case XPRT_TRANSPORT_UDP: nlm_init.protocol = IPPROTO_UDP; } host = nlmclnt_init(&nlm_init); if (IS_ERR(host)) return PTR_ERR(host); server->nlm_host = host; server->destroy = nfs_destroy_server; return 0; } /* * Create a general RPC client */ int nfs_init_server_rpcclient(struct nfs_server *server, const struct rpc_timeout *timeo, rpc_authflavor_t pseudoflavour) { struct nfs_client *clp = server->nfs_client; server->client = rpc_clone_client_set_auth(clp->cl_rpcclient, pseudoflavour); if (IS_ERR(server->client)) { dprintk("%s: couldn't create rpc_client!\n", __func__); return PTR_ERR(server->client); } memcpy(&server->client->cl_timeout_default, timeo, sizeof(server->client->cl_timeout_default)); server->client->cl_timeout = &server->client->cl_timeout_default; server->client->cl_softrtry = 0; if (server->flags & NFS_MOUNT_SOFT) server->client->cl_softrtry = 1; return 0; } EXPORT_SYMBOL_GPL(nfs_init_server_rpcclient); /** * nfs_init_client - Initialise an NFS2 or NFS3 client * * @clp: nfs_client to initialise * @timeparms: timeout parameters for underlying RPC transport * @ip_addr: IP presentation address (not used) * @authflavor: authentication flavor for underlying RPC transport * * Returns pointer to an NFS client, or an ERR_PTR value. */ struct nfs_client *nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, const char *ip_addr, rpc_authflavor_t authflavour) { int error; if (clp->cl_cons_state == NFS_CS_READY) { /* the client is already initialised */ dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp); return clp; } /* * Create a client RPC handle for doing FSSTAT with UNIX auth only * - RFC 2623, sec 2.3.2 */ error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX); if (error < 0) goto error; nfs_mark_client_ready(clp, NFS_CS_READY); return clp; error: nfs_mark_client_ready(clp, error); nfs_put_client(clp); dprintk("<-- nfs_init_client() = xerror %d\n", error); return ERR_PTR(error); } EXPORT_SYMBOL_GPL(nfs_init_client); /* * Create a version 2 or 3 client */ static int nfs_init_server(struct nfs_server *server, const struct nfs_parsed_mount_data *data, struct nfs_subversion *nfs_mod) { struct nfs_client_initdata cl_init = { .hostname = data->nfs_server.hostname, .addr = (const struct sockaddr *)&data->nfs_server.address, .addrlen = data->nfs_server.addrlen, .nfs_mod = nfs_mod, .proto = data->nfs_server.protocol, .net = data->net, }; struct rpc_timeout timeparms; struct nfs_client *clp; int error; dprintk("--> nfs_init_server()\n"); nfs_init_timeout_values(&timeparms, data->nfs_server.protocol, data->timeo, data->retrans); if (data->flags & NFS_MOUNT_NORESVPORT) set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags); if (server->options & NFS_OPTION_MIGRATION) set_bit(NFS_CS_MIGRATION, &cl_init.init_flags); /* Allocate or find a client reference we can use */ clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX); if (IS_ERR(clp)) { dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp)); return PTR_ERR(clp); } server->nfs_client = clp; /* Initialise the client representation from the mount data */ server->flags = data->flags; server->options = data->options; server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR; if (data->rsize) server->rsize = nfs_block_size(data->rsize, NULL); if (data->wsize) server->wsize = nfs_block_size(data->wsize, NULL); server->acregmin = data->acregmin * HZ; server->acregmax = data->acregmax * HZ; server->acdirmin = data->acdirmin * HZ; server->acdirmax = data->acdirmax * HZ; /* Start lockd here, before we might error out */ error = nfs_start_lockd(server); if (error < 0) goto error; server->port = data->nfs_server.port; error = nfs_init_server_rpcclient(server, &timeparms, data->auth_flavors[0]); if (error < 0) goto error; /* Preserve the values of mount_server-related mount options */ if (data->mount_server.addrlen) { memcpy(&server->mountd_address, &data->mount_server.address, data->mount_server.addrlen); server->mountd_addrlen = data->mount_server.addrlen; } server->mountd_version = data->mount_server.version; server->mountd_port = data->mount_server.port; server->mountd_protocol = data->mount_server.protocol; server->namelen = data->namlen; dprintk("<-- nfs_init_server() = 0 [new %p]\n", clp); return 0; error: server->nfs_client = NULL; nfs_put_client(clp); dprintk("<-- nfs_init_server() = xerror %d\n", error); return error; } /* * Load up the server record from information gained in an fsinfo record */ static void nfs_server_set_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fsinfo *fsinfo) { unsigned long max_rpc_payload; /* Work out a lot of parameters */ if (server->rsize == 0) server->rsize = nfs_block_size(fsinfo->rtpref, NULL); if (server->wsize == 0) server->wsize = nfs_block_size(fsinfo->wtpref, NULL); if (fsinfo->rtmax >= 512 && server->rsize > fsinfo->rtmax) server->rsize = nfs_block_size(fsinfo->rtmax, NULL); if (fsinfo->wtmax >= 512 && server->wsize > fsinfo->wtmax) server->wsize = nfs_block_size(fsinfo->wtmax, NULL); max_rpc_payload = nfs_block_size(rpc_max_payload(server->client), NULL); if (server->rsize > max_rpc_payload) server->rsize = max_rpc_payload; if (server->rsize > NFS_MAX_FILE_IO_SIZE) server->rsize = NFS_MAX_FILE_IO_SIZE; server->rpages = (server->rsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; server->backing_dev_info.name = "nfs"; server->backing_dev_info.ra_pages = server->rpages * NFS_MAX_READAHEAD; if (server->wsize > max_rpc_payload) server->wsize = max_rpc_payload; if (server->wsize > NFS_MAX_FILE_IO_SIZE) server->wsize = NFS_MAX_FILE_IO_SIZE; server->wpages = (server->wsize + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; server->wtmult = nfs_block_bits(fsinfo->wtmult, NULL); server->dtsize = nfs_block_size(fsinfo->dtpref, NULL); if (server->dtsize > PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES) server->dtsize = PAGE_CACHE_SIZE * NFS_MAX_READDIR_PAGES; if (server->dtsize > server->rsize) server->dtsize = server->rsize; if (server->flags & NFS_MOUNT_NOAC) { server->acregmin = server->acregmax = 0; server->acdirmin = server->acdirmax = 0; } server->maxfilesize = fsinfo->maxfilesize; server->time_delta = fsinfo->time_delta; /* We're airborne Set socket buffersize */ rpc_setbufsize(server->client, server->wsize + 100, server->rsize + 100); } /* * Probe filesystem information, including the FSID on v2/v3 */ int nfs_probe_fsinfo(struct nfs_server *server, struct nfs_fh *mntfh, struct nfs_fattr *fattr) { struct nfs_fsinfo fsinfo; struct nfs_client *clp = server->nfs_client; int error; dprintk("--> nfs_probe_fsinfo()\n"); if (clp->rpc_ops->set_capabilities != NULL) { error = clp->rpc_ops->set_capabilities(server, mntfh); if (error < 0) goto out_error; } fsinfo.fattr = fattr; fsinfo.layouttype = 0; error = clp->rpc_ops->fsinfo(server, mntfh, &fsinfo); if (error < 0) goto out_error; nfs_server_set_fsinfo(server, mntfh, &fsinfo); /* Get some general file system info */ if (server->namelen == 0) { struct nfs_pathconf pathinfo; pathinfo.fattr = fattr; nfs_fattr_init(fattr); if (clp->rpc_ops->pathconf(server, mntfh, &pathinfo) >= 0) server->namelen = pathinfo.max_namelen; } dprintk("<-- nfs_probe_fsinfo() = 0\n"); return 0; out_error: dprintk("nfs_probe_fsinfo: error = %d\n", -error); return error; } EXPORT_SYMBOL_GPL(nfs_probe_fsinfo); /* * Copy useful information when duplicating a server record */ void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_server *source) { target->flags = source->flags; target->rsize = source->rsize; target->wsize = source->wsize; target->acregmin = source->acregmin; target->acregmax = source->acregmax; target->acdirmin = source->acdirmin; target->acdirmax = source->acdirmax; target->caps = source->caps; target->options = source->options; } EXPORT_SYMBOL_GPL(nfs_server_copy_userdata); void nfs_server_insert_lists(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id); spin_lock(&nn->nfs_client_lock); list_add_tail_rcu(&server->client_link, &clp->cl_superblocks); list_add_tail(&server->master_link, &nn->nfs_volume_list); clear_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); spin_unlock(&nn->nfs_client_lock); } EXPORT_SYMBOL_GPL(nfs_server_insert_lists); static void nfs_server_remove_lists(struct nfs_server *server) { struct nfs_client *clp = server->nfs_client; struct nfs_net *nn; if (clp == NULL) return; nn = net_generic(clp->cl_net, nfs_net_id); spin_lock(&nn->nfs_client_lock); list_del_rcu(&server->client_link); if (list_empty(&clp->cl_superblocks)) set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state); list_del(&server->master_link); spin_unlock(&nn->nfs_client_lock); synchronize_rcu(); } /* * Allocate and initialise a server record */ struct nfs_server *nfs_alloc_server(void) { struct nfs_server *server; server = kzalloc(sizeof(struct nfs_server), GFP_KERNEL); if (!server) return NULL; server->client = server->client_acl = ERR_PTR(-EINVAL); /* Zero out the NFS state stuff */ INIT_LIST_HEAD(&server->client_link); INIT_LIST_HEAD(&server->master_link); INIT_LIST_HEAD(&server->delegations); INIT_LIST_HEAD(&server->layouts); INIT_LIST_HEAD(&server->state_owners_lru); atomic_set(&server->active, 0); server->io_stats = nfs_alloc_iostats(); if (!server->io_stats) { kfree(server); return NULL; } if (bdi_init(&server->backing_dev_info)) { nfs_free_iostats(server->io_stats); kfree(server); return NULL; } ida_init(&server->openowner_id); ida_init(&server->lockowner_id); pnfs_init_server(server); return server; } EXPORT_SYMBOL_GPL(nfs_alloc_server); /* * Free up a server record */ void nfs_free_server(struct nfs_server *server) { dprintk("--> nfs_free_server()\n"); nfs_server_remove_lists(server); if (server->destroy != NULL) server->destroy(server); if (!IS_ERR(server->client_acl)) rpc_shutdown_client(server->client_acl); if (!IS_ERR(server->client)) rpc_shutdown_client(server->client); nfs_put_client(server->nfs_client); ida_destroy(&server->lockowner_id); ida_destroy(&server->openowner_id); nfs_free_iostats(server->io_stats); bdi_destroy(&server->backing_dev_info); kfree(server); nfs_release_automount_timer(); dprintk("<-- nfs_free_server()\n"); } EXPORT_SYMBOL_GPL(nfs_free_server); /* * Create a version 2 or 3 volume record * - keyed on server and FSID */ struct nfs_server *nfs_create_server(struct nfs_mount_info *mount_info, struct nfs_subversion *nfs_mod) { struct nfs_server *server; struct nfs_fattr *fattr; int error; server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); error = -ENOMEM; fattr = nfs_alloc_fattr(); if (fattr == NULL) goto error; /* Get a client representation */ error = nfs_init_server(server, mount_info->parsed, nfs_mod); if (error < 0) goto error; /* Probe the root fh to retrieve its FSID */ error = nfs_probe_fsinfo(server, mount_info->mntfh, fattr); if (error < 0) goto error; if (server->nfs_client->rpc_ops->version == 3) { if (server->namelen == 0 || server->namelen > NFS3_MAXNAMLEN) server->namelen = NFS3_MAXNAMLEN; if (!(mount_info->parsed->flags & NFS_MOUNT_NORDIRPLUS)) server->caps |= NFS_CAP_READDIRPLUS; } else { if (server->namelen == 0 || server->namelen > NFS2_MAXNAMLEN) server->namelen = NFS2_MAXNAMLEN; } if (!(fattr->valid & NFS_ATTR_FATTR)) { error = nfs_mod->rpc_ops->getattr(server, mount_info->mntfh, fattr); if (error < 0) { dprintk("nfs_create_server: getattr error = %d\n", -error); goto error; } } memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid)); dprintk("Server FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); nfs_server_insert_lists(server); server->mount_time = jiffies; nfs_free_fattr(fattr); return server; error: nfs_free_fattr(fattr); nfs_free_server(server); return ERR_PTR(error); } EXPORT_SYMBOL_GPL(nfs_create_server); /* * Clone an NFS2, NFS3 or NFS4 server record */ struct nfs_server *nfs_clone_server(struct nfs_server *source, struct nfs_fh *fh, struct nfs_fattr *fattr, rpc_authflavor_t flavor) { struct nfs_server *server; struct nfs_fattr *fattr_fsinfo; int error; dprintk("--> nfs_clone_server(,%llx:%llx,)\n", (unsigned long long) fattr->fsid.major, (unsigned long long) fattr->fsid.minor); server = nfs_alloc_server(); if (!server) return ERR_PTR(-ENOMEM); error = -ENOMEM; fattr_fsinfo = nfs_alloc_fattr(); if (fattr_fsinfo == NULL) goto out_free_server; /* Copy data from the source */ server->nfs_client = source->nfs_client; server->destroy = source->destroy; atomic_inc(&server->nfs_client->cl_count); nfs_server_copy_userdata(server, source); server->fsid = fattr->fsid; error = nfs_init_server_rpcclient(server, source->client->cl_timeout, flavor); if (error < 0) goto out_free_server; /* probe the filesystem info for this server filesystem */ error = nfs_probe_fsinfo(server, fh, fattr_fsinfo); if (error < 0) goto out_free_server; if (server->namelen == 0 || server->namelen > NFS4_MAXNAMLEN) server->namelen = NFS4_MAXNAMLEN; dprintk("Cloned FSID: %llx:%llx\n", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); error = nfs_start_lockd(server); if (error < 0) goto out_free_server; nfs_server_insert_lists(server); server->mount_time = jiffies; nfs_free_fattr(fattr_fsinfo); dprintk("<-- nfs_clone_server() = %p\n", server); return server; out_free_server: nfs_free_fattr(fattr_fsinfo); nfs_free_server(server); dprintk("<-- nfs_clone_server() = error %d\n", error); return ERR_PTR(error); } EXPORT_SYMBOL_GPL(nfs_clone_server); void nfs_clients_init(struct net *net) { struct nfs_net *nn = net_generic(net, nfs_net_id); INIT_LIST_HEAD(&nn->nfs_client_list); INIT_LIST_HEAD(&nn->nfs_volume_list); #if IS_ENABLED(CONFIG_NFS_V4) idr_init(&nn->cb_ident_idr); #endif spin_lock_init(&nn->nfs_client_lock); nn->boot_time = CURRENT_TIME; } #ifdef CONFIG_PROC_FS static struct proc_dir_entry *proc_fs_nfs; static int nfs_server_list_open(struct inode *inode, struct file *file); static void *nfs_server_list_start(struct seq_file *p, loff_t *pos); static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_server_list_stop(struct seq_file *p, void *v); static int nfs_server_list_show(struct seq_file *m, void *v); static const struct seq_operations nfs_server_list_ops = { .start = nfs_server_list_start, .next = nfs_server_list_next, .stop = nfs_server_list_stop, .show = nfs_server_list_show, }; static const struct file_operations nfs_server_list_fops = { .open = nfs_server_list_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; static int nfs_volume_list_open(struct inode *inode, struct file *file); static void *nfs_volume_list_start(struct seq_file *p, loff_t *pos); static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos); static void nfs_volume_list_stop(struct seq_file *p, void *v); static int nfs_volume_list_show(struct seq_file *m, void *v); static const struct seq_operations nfs_volume_list_ops = { .start = nfs_volume_list_start, .next = nfs_volume_list_next, .stop = nfs_volume_list_stop, .show = nfs_volume_list_show, }; static const struct file_operations nfs_volume_list_fops = { .open = nfs_volume_list_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, .owner = THIS_MODULE, }; /* * open "/proc/fs/nfsfs/servers" which provides a summary of servers with which * we're dealing */ static int nfs_server_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; struct net *net = pid_ns->child_reaper->nsproxy->net_ns; ret = seq_open(file, &nfs_server_list_ops); if (ret < 0) return ret; m = file->private_data; m->private = net; return 0; } /* * set up the iterator to start reading from the server list and return the first item */ static void *nfs_server_list_start(struct seq_file *m, loff_t *_pos) { struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* lock the list against modification */ spin_lock(&nn->nfs_client_lock); return seq_list_start_head(&nn->nfs_client_list, *_pos); } /* * move to next server */ static void *nfs_server_list_next(struct seq_file *p, void *v, loff_t *pos) { struct nfs_net *nn = net_generic(p->private, nfs_net_id); return seq_list_next(v, &nn->nfs_client_list, pos); } /* * clean up after reading from the transports list */ static void nfs_server_list_stop(struct seq_file *p, void *v) { struct nfs_net *nn = net_generic(p->private, nfs_net_id); spin_unlock(&nn->nfs_client_lock); } /* * display a header line followed by a load of call lines */ static int nfs_server_list_show(struct seq_file *m, void *v) { struct nfs_client *clp; struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* display header on line 1 */ if (v == &nn->nfs_client_list) { seq_puts(m, "NV SERVER PORT USE HOSTNAME\n"); return 0; } /* display one transport per line on subsequent lines */ clp = list_entry(v, struct nfs_client, cl_share_link); /* Check if the client is initialized */ if (clp->cl_cons_state != NFS_CS_READY) return 0; rcu_read_lock(); seq_printf(m, "v%u %s %s %3d %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), atomic_read(&clp->cl_count), clp->cl_hostname); rcu_read_unlock(); return 0; } /* * open "/proc/fs/nfsfs/volumes" which provides a summary of extant volumes */ static int nfs_volume_list_open(struct inode *inode, struct file *file) { struct seq_file *m; int ret; struct pid_namespace *pid_ns = file->f_dentry->d_sb->s_fs_info; struct net *net = pid_ns->child_reaper->nsproxy->net_ns; ret = seq_open(file, &nfs_volume_list_ops); if (ret < 0) return ret; m = file->private_data; m->private = net; return 0; } /* * set up the iterator to start reading from the volume list and return the first item */ static void *nfs_volume_list_start(struct seq_file *m, loff_t *_pos) { struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* lock the list against modification */ spin_lock(&nn->nfs_client_lock); return seq_list_start_head(&nn->nfs_volume_list, *_pos); } /* * move to next volume */ static void *nfs_volume_list_next(struct seq_file *p, void *v, loff_t *pos) { struct nfs_net *nn = net_generic(p->private, nfs_net_id); return seq_list_next(v, &nn->nfs_volume_list, pos); } /* * clean up after reading from the transports list */ static void nfs_volume_list_stop(struct seq_file *p, void *v) { struct nfs_net *nn = net_generic(p->private, nfs_net_id); spin_unlock(&nn->nfs_client_lock); } /* * display a header line followed by a load of call lines */ static int nfs_volume_list_show(struct seq_file *m, void *v) { struct nfs_server *server; struct nfs_client *clp; char dev[8], fsid[17]; struct nfs_net *nn = net_generic(m->private, nfs_net_id); /* display header on line 1 */ if (v == &nn->nfs_volume_list) { seq_puts(m, "NV SERVER PORT DEV FSID FSC\n"); return 0; } /* display one transport per line on subsequent lines */ server = list_entry(v, struct nfs_server, master_link); clp = server->nfs_client; snprintf(dev, 8, "%u:%u", MAJOR(server->s_dev), MINOR(server->s_dev)); snprintf(fsid, 17, "%llx:%llx", (unsigned long long) server->fsid.major, (unsigned long long) server->fsid.minor); rcu_read_lock(); seq_printf(m, "v%u %s %s %-7s %-17s %s\n", clp->rpc_ops->version, rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR), rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT), dev, fsid, nfs_server_fscache_state(server)); rcu_read_unlock(); return 0; } /* * initialise the /proc/fs/nfsfs/ directory */ int __init nfs_fs_proc_init(void) { struct proc_dir_entry *p; proc_fs_nfs = proc_mkdir("fs/nfsfs", NULL); if (!proc_fs_nfs) goto error_0; /* a file of servers with which we're dealing */ p = proc_create("servers", S_IFREG|S_IRUGO, proc_fs_nfs, &nfs_server_list_fops); if (!p) goto error_1; /* a file of volumes that we have mounted */ p = proc_create("volumes", S_IFREG|S_IRUGO, proc_fs_nfs, &nfs_volume_list_fops); if (!p) goto error_2; return 0; error_2: remove_proc_entry("servers", proc_fs_nfs); error_1: remove_proc_entry("fs/nfsfs", NULL); error_0: return -ENOMEM; } /* * clean up the /proc/fs/nfsfs/ directory */ void nfs_fs_proc_exit(void) { remove_proc_entry("volumes", proc_fs_nfs); remove_proc_entry("servers", proc_fs_nfs); remove_proc_entry("fs/nfsfs", NULL); } #endif /* CONFIG_PROC_FS */
gpl-2.0
garwynn/D710VMUB_GB28_Kernel
arch/sparc/kernel/auxio_32.c
2587
3751
/* auxio.c: Probing for the Sparc AUXIO register at boot time. * * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/stddef.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/oplib.h> #include <asm/io.h> #include <asm/auxio.h> #include <asm/string.h> /* memset(), Linux has no bzero() */ /* Probe and map in the Auxiliary I/O register */ /* auxio_register is not static because it is referenced * in entry.S::floppy_tdone */ void __iomem *auxio_register = NULL; static DEFINE_SPINLOCK(auxio_lock); void __init auxio_probe(void) { phandle node, auxio_nd; struct linux_prom_registers auxregs[1]; struct resource r; switch (sparc_cpu_model) { case sparc_leon: case sun4d: case sun4: return; default: break; } node = prom_getchild(prom_root_node); auxio_nd = prom_searchsiblings(node, "auxiliary-io"); if(!auxio_nd) { node = prom_searchsiblings(node, "obio"); node = prom_getchild(node); auxio_nd = prom_searchsiblings(node, "auxio"); if(!auxio_nd) { #ifdef CONFIG_PCI /* There may be auxio on Ebus */ return; #else if(prom_searchsiblings(node, "leds")) { /* VME chassis sun4m machine, no auxio exists. */ return; } prom_printf("Cannot find auxio node, cannot continue...\n"); prom_halt(); #endif } } if(prom_getproperty(auxio_nd, "reg", (char *) auxregs, sizeof(auxregs)) <= 0) return; prom_apply_obio_ranges(auxregs, 0x1); /* Map the register both read and write */ r.flags = auxregs[0].which_io & 0xF; r.start = auxregs[0].phys_addr; r.end = auxregs[0].phys_addr + auxregs[0].reg_size - 1; auxio_register = of_ioremap(&r, 0, auxregs[0].reg_size, "auxio"); /* Fix the address on sun4m and sun4c. */ if((((unsigned long) auxregs[0].phys_addr) & 3) == 3 || sparc_cpu_model == sun4c) auxio_register += (3 - ((unsigned long)auxio_register & 3)); set_auxio(AUXIO_LED, 0); } unsigned char get_auxio(void) { if(auxio_register) return sbus_readb(auxio_register); return 0; } EXPORT_SYMBOL(get_auxio); void set_auxio(unsigned char bits_on, unsigned char bits_off) { unsigned char regval; unsigned long flags; spin_lock_irqsave(&auxio_lock, flags); switch(sparc_cpu_model) { case sun4c: regval = sbus_readb(auxio_register); sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN, auxio_register); break; case sun4m: if(!auxio_register) break; /* VME chassis sun4m, no auxio. */ regval = sbus_readb(auxio_register); sbus_writeb(((regval | bits_on) & ~bits_off) | AUXIO_ORMEIN4M, auxio_register); break; case sun4d: break; default: panic("Can't set AUXIO register on this machine."); } spin_unlock_irqrestore(&auxio_lock, flags); } EXPORT_SYMBOL(set_auxio); /* sun4m power control register (AUXIO2) */ volatile unsigned char * auxio_power_register = NULL; void __init auxio_power_probe(void) { struct linux_prom_registers regs; phandle node; struct resource r; /* Attempt to find the sun4m power control node. */ node = prom_getchild(prom_root_node); node = prom_searchsiblings(node, "obio"); node = prom_getchild(node); node = prom_searchsiblings(node, "power"); if (node == 0 || (s32)node == -1) return; /* Map the power control register. */ if (prom_getproperty(node, "reg", (char *)&regs, sizeof(regs)) <= 0) return; prom_apply_obio_ranges(&regs, 1); memset(&r, 0, sizeof(r)); r.flags = regs.which_io & 0xF; r.start = regs.phys_addr; r.end = regs.phys_addr + regs.reg_size - 1; auxio_power_register = (unsigned char *) of_ioremap(&r, 0, regs.reg_size, "auxpower"); /* Display a quick message on the console. */ if (auxio_power_register) printk(KERN_INFO "Power off control detected.\n"); }
gpl-2.0
Dm47021/Holo-a200
net/sched/sch_multiq.c
3099
9468
/* * Copyright (c) 2008, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Author: Alexander Duyck <alexander.h.duyck@intel.com> */ #include <linux/module.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/skbuff.h> #include <net/netlink.h> #include <net/pkt_sched.h> struct multiq_sched_data { u16 bands; u16 max_bands; u16 curband; struct tcf_proto *filter_list; struct Qdisc **queues; }; static struct Qdisc * multiq_classify(struct sk_buff *skb, struct Qdisc *sch, int *qerr) { struct multiq_sched_data *q = qdisc_priv(sch); u32 band; struct tcf_result res; int err; *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS; err = tc_classify(skb, q->filter_list, &res); #ifdef CONFIG_NET_CLS_ACT switch (err) { case TC_ACT_STOLEN: case TC_ACT_QUEUED: *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN; case TC_ACT_SHOT: return NULL; } #endif band = skb_get_queue_mapping(skb); if (band >= q->bands) return q->queues[0]; return q->queues[band]; } static int multiq_enqueue(struct sk_buff *skb, struct Qdisc *sch) { struct Qdisc *qdisc; int ret; qdisc = multiq_classify(skb, sch, &ret); #ifdef CONFIG_NET_CLS_ACT if (qdisc == NULL) { if (ret & __NET_XMIT_BYPASS) sch->qstats.drops++; kfree_skb(skb); return ret; } #endif ret = qdisc_enqueue(skb, qdisc); if (ret == NET_XMIT_SUCCESS) { sch->q.qlen++; return NET_XMIT_SUCCESS; } if (net_xmit_drop_count(ret)) sch->qstats.drops++; return ret; } static struct sk_buff *multiq_dequeue(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ q->curband++; if (q->curband >= q->bands) q->curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!__netif_subqueue_stopped(qdisc_dev(sch), q->curband)) { qdisc = q->queues[q->curband]; skb = qdisc->dequeue(qdisc); if (skb) { qdisc_bstats_update(sch, skb); sch->q.qlen--; return skb; } } } return NULL; } static struct sk_buff *multiq_peek(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned int curband = q->curband; struct Qdisc *qdisc; struct sk_buff *skb; int band; for (band = 0; band < q->bands; band++) { /* cycle through bands to ensure fairness */ curband++; if (curband >= q->bands) curband = 0; /* Check that target subqueue is available before * pulling an skb to avoid head-of-line blocking. */ if (!__netif_subqueue_stopped(qdisc_dev(sch), curband)) { qdisc = q->queues[curband]; skb = qdisc->ops->peek(qdisc); if (skb) return skb; } } return NULL; } static unsigned int multiq_drop(struct Qdisc *sch) { struct multiq_sched_data *q = qdisc_priv(sch); int band; unsigned int len; struct Qdisc *qdisc; for (band = q->bands - 1; band >= 0; band--) { qdisc = q->queues[band]; if (qdisc->ops->drop) { len = qdisc->ops->drop(qdisc); if (len != 0) { sch->q.qlen--; return len; } } } return 0; } static void multiq_reset(struct Qdisc *sch) { u16 band; struct multiq_sched_data *q = qdisc_priv(sch); for (band = 0; band < q->bands; band++) qdisc_reset(q->queues[band]); sch->q.qlen = 0; q->curband = 0; } static void multiq_destroy(struct Qdisc *sch) { int band; struct multiq_sched_data *q = qdisc_priv(sch); tcf_destroy_chain(&q->filter_list); for (band = 0; band < q->bands; band++) qdisc_destroy(q->queues[band]); kfree(q->queues); } static int multiq_tune(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); struct tc_multiq_qopt *qopt; int i; if (!netif_is_multiqueue(qdisc_dev(sch))) return -EOPNOTSUPP; if (nla_len(opt) < sizeof(*qopt)) return -EINVAL; qopt = nla_data(opt); qopt->bands = qdisc_dev(sch)->real_num_tx_queues; sch_tree_lock(sch); q->bands = qopt->bands; for (i = q->bands; i < q->max_bands; i++) { if (q->queues[i] != &noop_qdisc) { struct Qdisc *child = q->queues[i]; q->queues[i] = &noop_qdisc; qdisc_tree_decrease_qlen(child, child->q.qlen); qdisc_destroy(child); } } sch_tree_unlock(sch); for (i = 0; i < q->bands; i++) { if (q->queues[i] == &noop_qdisc) { struct Qdisc *child, *old; child = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops, TC_H_MAKE(sch->handle, i + 1)); if (child) { sch_tree_lock(sch); old = q->queues[i]; q->queues[i] = child; if (old != &noop_qdisc) { qdisc_tree_decrease_qlen(old, old->q.qlen); qdisc_destroy(old); } sch_tree_unlock(sch); } } } return 0; } static int multiq_init(struct Qdisc *sch, struct nlattr *opt) { struct multiq_sched_data *q = qdisc_priv(sch); int i, err; q->queues = NULL; if (opt == NULL) return -EINVAL; q->max_bands = qdisc_dev(sch)->num_tx_queues; q->queues = kcalloc(q->max_bands, sizeof(struct Qdisc *), GFP_KERNEL); if (!q->queues) return -ENOBUFS; for (i = 0; i < q->max_bands; i++) q->queues[i] = &noop_qdisc; err = multiq_tune(sch, opt); if (err) kfree(q->queues); return err; } static int multiq_dump(struct Qdisc *sch, struct sk_buff *skb) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned char *b = skb_tail_pointer(skb); struct tc_multiq_qopt opt; opt.bands = q->bands; opt.max_bands = q->max_bands; NLA_PUT(skb, TCA_OPTIONS, sizeof(opt), &opt); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static int multiq_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new, struct Qdisc **old) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; if (new == NULL) new = &noop_qdisc; sch_tree_lock(sch); *old = q->queues[band]; q->queues[band] = new; qdisc_tree_decrease_qlen(*old, (*old)->q.qlen); qdisc_reset(*old); sch_tree_unlock(sch); return 0; } static struct Qdisc * multiq_leaf(struct Qdisc *sch, unsigned long arg) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = arg - 1; return q->queues[band]; } static unsigned long multiq_get(struct Qdisc *sch, u32 classid) { struct multiq_sched_data *q = qdisc_priv(sch); unsigned long band = TC_H_MIN(classid); if (band - 1 >= q->bands) return 0; return band; } static unsigned long multiq_bind(struct Qdisc *sch, unsigned long parent, u32 classid) { return multiq_get(sch, classid); } static void multiq_put(struct Qdisc *q, unsigned long cl) { } static int multiq_dump_class(struct Qdisc *sch, unsigned long cl, struct sk_buff *skb, struct tcmsg *tcm) { struct multiq_sched_data *q = qdisc_priv(sch); tcm->tcm_handle |= TC_H_MIN(cl); tcm->tcm_info = q->queues[cl - 1]->handle; return 0; } static int multiq_dump_class_stats(struct Qdisc *sch, unsigned long cl, struct gnet_dump *d) { struct multiq_sched_data *q = qdisc_priv(sch); struct Qdisc *cl_q; cl_q = q->queues[cl - 1]; cl_q->qstats.qlen = cl_q->q.qlen; if (gnet_stats_copy_basic(d, &cl_q->bstats) < 0 || gnet_stats_copy_queue(d, &cl_q->qstats) < 0) return -1; return 0; } static void multiq_walk(struct Qdisc *sch, struct qdisc_walker *arg) { struct multiq_sched_data *q = qdisc_priv(sch); int band; if (arg->stop) return; for (band = 0; band < q->bands; band++) { if (arg->count < arg->skip) { arg->count++; continue; } if (arg->fn(sch, band + 1, arg) < 0) { arg->stop = 1; break; } arg->count++; } } static struct tcf_proto **multiq_find_tcf(struct Qdisc *sch, unsigned long cl) { struct multiq_sched_data *q = qdisc_priv(sch); if (cl) return NULL; return &q->filter_list; } static const struct Qdisc_class_ops multiq_class_ops = { .graft = multiq_graft, .leaf = multiq_leaf, .get = multiq_get, .put = multiq_put, .walk = multiq_walk, .tcf_chain = multiq_find_tcf, .bind_tcf = multiq_bind, .unbind_tcf = multiq_put, .dump = multiq_dump_class, .dump_stats = multiq_dump_class_stats, }; static struct Qdisc_ops multiq_qdisc_ops __read_mostly = { .next = NULL, .cl_ops = &multiq_class_ops, .id = "multiq", .priv_size = sizeof(struct multiq_sched_data), .enqueue = multiq_enqueue, .dequeue = multiq_dequeue, .peek = multiq_peek, .drop = multiq_drop, .init = multiq_init, .reset = multiq_reset, .destroy = multiq_destroy, .change = multiq_tune, .dump = multiq_dump, .owner = THIS_MODULE, }; static int __init multiq_module_init(void) { return register_qdisc(&multiq_qdisc_ops); } static void __exit multiq_module_exit(void) { unregister_qdisc(&multiq_qdisc_ops); } module_init(multiq_module_init) module_exit(multiq_module_exit) MODULE_LICENSE("GPL");
gpl-2.0
hanjin1987/hw_msm8x25_kernel
arch/mips/kernel/irq.c
4379
3345
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Code to handle x86 style IRQs plus some generic interrupt stuff. * * Copyright (C) 1992 Linus Torvalds * Copyright (C) 1994 - 2000 Ralf Baechle */ #include <linux/kernel.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/proc_fs.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <linux/seq_file.h> #include <linux/kallsyms.h> #include <linux/kgdb.h> #include <linux/ftrace.h> #include <linux/atomic.h> #include <asm/uaccess.h> #ifdef CONFIG_KGDB int kgdb_early_setup; #endif static unsigned long irq_map[NR_IRQS / BITS_PER_LONG]; int allocate_irqno(void) { int irq; again: irq = find_first_zero_bit(irq_map, NR_IRQS); if (irq >= NR_IRQS) return -ENOSPC; if (test_and_set_bit(irq, irq_map)) goto again; return irq; } /* * Allocate the 16 legacy interrupts for i8259 devices. This happens early * in the kernel initialization so treating allocation failure as BUG() is * ok. */ void __init alloc_legacy_irqno(void) { int i; for (i = 0; i <= 16; i++) BUG_ON(test_and_set_bit(i, irq_map)); } void free_irqno(unsigned int irq) { smp_mb__before_clear_bit(); clear_bit(irq, irq_map); smp_mb__after_clear_bit(); } /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. */ void ack_bad_irq(unsigned int irq) { smtc_im_ack_irq(irq); printk("unexpected IRQ # %d\n", irq); } atomic_t irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); return 0; } asmlinkage void spurious_interrupt(void) { atomic_inc(&irq_err_count); } void __init init_IRQ(void) { int i; #ifdef CONFIG_KGDB if (kgdb_early_setup) return; #endif for (i = 0; i < NR_IRQS; i++) irq_set_noprobe(i); arch_init_irq(); #ifdef CONFIG_KGDB if (!kgdb_early_setup) kgdb_early_setup = 1; #endif } #ifdef DEBUG_STACKOVERFLOW static inline void check_stack_overflow(void) { unsigned long sp; __asm__ __volatile__("move %0, $sp" : "=r" (sp)); sp &= THREAD_MASK; /* * Check for stack overflow: is there less than STACK_WARN free? * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. */ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { printk("do_IRQ: stack overflow: %ld\n", sp - sizeof(struct thread_info)); dump_stack(); } } #else static inline void check_stack_overflow(void) {} #endif /* * do_IRQ handles all normal device IRQ's (the special * SMP cross-CPU interrupts have their own specific * handlers). */ void __irq_entry do_IRQ(unsigned int irq) { irq_enter(); check_stack_overflow(); if (!smtc_handle_on_other_cpu(irq)) generic_handle_irq(irq); irq_exit(); } #ifdef CONFIG_MIPS_MT_SMTC_IRQAFF /* * To avoid inefficient and in some cases pathological re-checking of * IRQ affinity, we have this variant that skips the affinity check. */ void __irq_entry do_IRQ_no_affinity(unsigned int irq) { irq_enter(); smtc_im_backstop(irq); generic_handle_irq(irq); irq_exit(); } #endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
gpl-2.0
bensonhsu2013/old_samsung-lt02wifi-kernel
drivers/net/usb/catc.c
4891
23697
/* * Copyright (c) 2001 Vojtech Pavlik * * CATC EL1210A NetMate USB Ethernet driver * * Sponsored by SuSE * * Based on the work of * Donald Becker * * Old chipset support added by Simon Evans <spse@secret.org.uk> 2002 * - adds support for Belkin F5U011 */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@suse.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/skbuff.h> #include <linux/spinlock.h> #include <linux/ethtool.h> #include <linux/crc32.h> #include <linux/bitops.h> #include <linux/gfp.h> #include <asm/uaccess.h> #undef DEBUG #include <linux/usb.h> /* * Version information. */ #define DRIVER_VERSION "v2.8" #define DRIVER_AUTHOR "Vojtech Pavlik <vojtech@suse.cz>" #define DRIVER_DESC "CATC EL1210A NetMate USB Ethernet driver" #define SHORT_DRIVER_DESC "EL1210A NetMate USB Ethernet" MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL"); static const char driver_name[] = "catc"; /* * Some defines. */ #define STATS_UPDATE (HZ) /* Time between stats updates */ #define TX_TIMEOUT (5*HZ) /* Max time the queue can be stopped */ #define PKT_SZ 1536 /* Max Ethernet packet size */ #define RX_MAX_BURST 15 /* Max packets per rx buffer (> 0, < 16) */ #define TX_MAX_BURST 15 /* Max full sized packets per tx buffer (> 0) */ #define CTRL_QUEUE 16 /* Max control requests in flight (power of two) */ #define RX_PKT_SZ 1600 /* Max size of receive packet for F5U011 */ /* * Control requests. */ enum control_requests { ReadMem = 0xf1, GetMac = 0xf2, Reset = 0xf4, SetMac = 0xf5, SetRxMode = 0xf5, /* F5U011 only */ WriteROM = 0xf8, SetReg = 0xfa, GetReg = 0xfb, WriteMem = 0xfc, ReadROM = 0xfd, }; /* * Registers. */ enum register_offsets { TxBufCount = 0x20, RxBufCount = 0x21, OpModes = 0x22, TxQed = 0x23, RxQed = 0x24, MaxBurst = 0x25, RxUnit = 0x60, EthStatus = 0x61, StationAddr0 = 0x67, EthStats = 0x69, LEDCtrl = 0x81, }; enum eth_stats { TxSingleColl = 0x00, TxMultiColl = 0x02, TxExcessColl = 0x04, RxFramErr = 0x06, }; enum op_mode_bits { Op3MemWaits = 0x03, OpLenInclude = 0x08, OpRxMerge = 0x10, OpTxMerge = 0x20, OpWin95bugfix = 0x40, OpLoopback = 0x80, }; enum rx_filter_bits { RxEnable = 0x01, RxPolarity = 0x02, RxForceOK = 0x04, RxMultiCast = 0x08, RxPromisc = 0x10, AltRxPromisc = 0x20, /* F5U011 uses different bit */ }; enum led_values { LEDFast = 0x01, LEDSlow = 0x02, LEDFlash = 0x03, LEDPulse = 0x04, LEDLink = 0x08, }; enum link_status { LinkNoChange = 0, LinkGood = 1, LinkBad = 2 }; /* * The catc struct. */ #define CTRL_RUNNING 0 #define RX_RUNNING 1 #define TX_RUNNING 2 struct catc { struct net_device *netdev; struct usb_device *usbdev; unsigned long flags; unsigned int tx_ptr, tx_idx; unsigned int ctrl_head, ctrl_tail; spinlock_t tx_lock, ctrl_lock; u8 tx_buf[2][TX_MAX_BURST * (PKT_SZ + 2)]; u8 rx_buf[RX_MAX_BURST * (PKT_SZ + 2)]; u8 irq_buf[2]; u8 ctrl_buf[64]; struct usb_ctrlrequest ctrl_dr; struct timer_list timer; u8 stats_buf[8]; u16 stats_vals[4]; unsigned long last_stats; u8 multicast[64]; struct ctrl_queue { u8 dir; u8 request; u16 value; u16 index; void *buf; int len; void (*callback)(struct catc *catc, struct ctrl_queue *q); } ctrl_queue[CTRL_QUEUE]; struct urb *tx_urb, *rx_urb, *irq_urb, *ctrl_urb; u8 is_f5u011; /* Set if device is an F5U011 */ u8 rxmode[2]; /* Used for F5U011 */ atomic_t recq_sz; /* Used for F5U011 - counter of waiting rx packets */ }; /* * Useful macros. */ #define catc_get_mac(catc, mac) catc_ctrl_msg(catc, USB_DIR_IN, GetMac, 0, 0, mac, 6) #define catc_reset(catc) catc_ctrl_msg(catc, USB_DIR_OUT, Reset, 0, 0, NULL, 0) #define catc_set_reg(catc, reg, val) catc_ctrl_msg(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0) #define catc_get_reg(catc, reg, buf) catc_ctrl_msg(catc, USB_DIR_IN, GetReg, 0, reg, buf, 1) #define catc_write_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size) #define catc_read_mem(catc, addr, buf, size) catc_ctrl_msg(catc, USB_DIR_IN, ReadMem, 0, addr, buf, size) #define f5u011_rxmode(catc, rxmode) catc_ctrl_msg(catc, USB_DIR_OUT, SetRxMode, 0, 1, rxmode, 2) #define f5u011_rxmode_async(catc, rxmode) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 1, &rxmode, 2, NULL) #define f5u011_mchash_async(catc, hash) catc_ctrl_async(catc, USB_DIR_OUT, SetRxMode, 0, 2, &hash, 8, NULL) #define catc_set_reg_async(catc, reg, val) catc_ctrl_async(catc, USB_DIR_OUT, SetReg, val, reg, NULL, 0, NULL) #define catc_get_reg_async(catc, reg, cb) catc_ctrl_async(catc, USB_DIR_IN, GetReg, 0, reg, NULL, 1, cb) #define catc_write_mem_async(catc, addr, buf, size) catc_ctrl_async(catc, USB_DIR_OUT, WriteMem, 0, addr, buf, size, NULL) /* * Receive routines. */ static void catc_rx_done(struct urb *urb) { struct catc *catc = urb->context; u8 *pkt_start = urb->transfer_buffer; struct sk_buff *skb; int pkt_len, pkt_offset = 0; int status = urb->status; if (!catc->is_f5u011) { clear_bit(RX_RUNNING, &catc->flags); pkt_offset = 2; } if (status) { dbg("rx_done, status %d, length %d", status, urb->actual_length); return; } do { if(!catc->is_f5u011) { pkt_len = le16_to_cpup((__le16*)pkt_start); if (pkt_len > urb->actual_length) { catc->netdev->stats.rx_length_errors++; catc->netdev->stats.rx_errors++; break; } } else { pkt_len = urb->actual_length; } if (!(skb = dev_alloc_skb(pkt_len))) return; skb_copy_to_linear_data(skb, pkt_start + pkt_offset, pkt_len); skb_put(skb, pkt_len); skb->protocol = eth_type_trans(skb, catc->netdev); netif_rx(skb); catc->netdev->stats.rx_packets++; catc->netdev->stats.rx_bytes += pkt_len; /* F5U011 only does one packet per RX */ if (catc->is_f5u011) break; pkt_start += (((pkt_len + 1) >> 6) + 1) << 6; } while (pkt_start - (u8 *) urb->transfer_buffer < urb->actual_length); if (catc->is_f5u011) { if (atomic_read(&catc->recq_sz)) { int state; atomic_dec(&catc->recq_sz); dbg("getting extra packet"); urb->dev = catc->usbdev; if ((state = usb_submit_urb(urb, GFP_ATOMIC)) < 0) { dbg("submit(rx_urb) status %d", state); } } else { clear_bit(RX_RUNNING, &catc->flags); } } } static void catc_irq_done(struct urb *urb) { struct catc *catc = urb->context; u8 *data = urb->transfer_buffer; int status = urb->status; unsigned int hasdata = 0, linksts = LinkNoChange; int res; if (!catc->is_f5u011) { hasdata = data[1] & 0x80; if (data[1] & 0x40) linksts = LinkGood; else if (data[1] & 0x20) linksts = LinkBad; } else { hasdata = (unsigned int)(be16_to_cpup((__be16*)data) & 0x0fff); if (data[0] == 0x90) linksts = LinkGood; else if (data[0] == 0xA0) linksts = LinkBad; } switch (status) { case 0: /* success */ break; case -ECONNRESET: /* unlink */ case -ENOENT: case -ESHUTDOWN: return; /* -EPIPE: should clear the halt */ default: /* error */ dbg("irq_done, status %d, data %02x %02x.", status, data[0], data[1]); goto resubmit; } if (linksts == LinkGood) { netif_carrier_on(catc->netdev); dbg("link ok"); } if (linksts == LinkBad) { netif_carrier_off(catc->netdev); dbg("link bad"); } if (hasdata) { if (test_and_set_bit(RX_RUNNING, &catc->flags)) { if (catc->is_f5u011) atomic_inc(&catc->recq_sz); } else { catc->rx_urb->dev = catc->usbdev; if ((res = usb_submit_urb(catc->rx_urb, GFP_ATOMIC)) < 0) { err("submit(rx_urb) status %d", res); } } } resubmit: res = usb_submit_urb (urb, GFP_ATOMIC); if (res) err ("can't resubmit intr, %s-%s, status %d", catc->usbdev->bus->bus_name, catc->usbdev->devpath, res); } /* * Transmit routines. */ static int catc_tx_run(struct catc *catc) { int status; if (catc->is_f5u011) catc->tx_ptr = (catc->tx_ptr + 63) & ~63; catc->tx_urb->transfer_buffer_length = catc->tx_ptr; catc->tx_urb->transfer_buffer = catc->tx_buf[catc->tx_idx]; catc->tx_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->tx_urb, GFP_ATOMIC)) < 0) err("submit(tx_urb), status %d", status); catc->tx_idx = !catc->tx_idx; catc->tx_ptr = 0; catc->netdev->trans_start = jiffies; return status; } static void catc_tx_done(struct urb *urb) { struct catc *catc = urb->context; unsigned long flags; int r, status = urb->status; if (status == -ECONNRESET) { dbg("Tx Reset."); urb->status = 0; catc->netdev->trans_start = jiffies; catc->netdev->stats.tx_errors++; clear_bit(TX_RUNNING, &catc->flags); netif_wake_queue(catc->netdev); return; } if (status) { dbg("tx_done, status %d, length %d", status, urb->actual_length); return; } spin_lock_irqsave(&catc->tx_lock, flags); if (catc->tx_ptr) { r = catc_tx_run(catc); if (unlikely(r < 0)) clear_bit(TX_RUNNING, &catc->flags); } else { clear_bit(TX_RUNNING, &catc->flags); } netif_wake_queue(catc->netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); } static netdev_tx_t catc_start_xmit(struct sk_buff *skb, struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); unsigned long flags; int r = 0; char *tx_buf; spin_lock_irqsave(&catc->tx_lock, flags); catc->tx_ptr = (((catc->tx_ptr - 1) >> 6) + 1) << 6; tx_buf = catc->tx_buf[catc->tx_idx] + catc->tx_ptr; if (catc->is_f5u011) *(__be16 *)tx_buf = cpu_to_be16(skb->len); else *(__le16 *)tx_buf = cpu_to_le16(skb->len); skb_copy_from_linear_data(skb, tx_buf + 2, skb->len); catc->tx_ptr += skb->len + 2; if (!test_and_set_bit(TX_RUNNING, &catc->flags)) { r = catc_tx_run(catc); if (r < 0) clear_bit(TX_RUNNING, &catc->flags); } if ((catc->is_f5u011 && catc->tx_ptr) || (catc->tx_ptr >= ((TX_MAX_BURST - 1) * (PKT_SZ + 2)))) netif_stop_queue(netdev); spin_unlock_irqrestore(&catc->tx_lock, flags); if (r >= 0) { catc->netdev->stats.tx_bytes += skb->len; catc->netdev->stats.tx_packets++; } dev_kfree_skb(skb); return NETDEV_TX_OK; } static void catc_tx_timeout(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); dev_warn(&netdev->dev, "Transmit timed out.\n"); usb_unlink_urb(catc->tx_urb); } /* * Control messages. */ static int catc_ctrl_msg(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len) { int retval = usb_control_msg(catc->usbdev, dir ? usb_rcvctrlpipe(catc->usbdev, 0) : usb_sndctrlpipe(catc->usbdev, 0), request, 0x40 | dir, value, index, buf, len, 1000); return retval < 0 ? retval : 0; } static void catc_ctrl_run(struct catc *catc) { struct ctrl_queue *q = catc->ctrl_queue + catc->ctrl_tail; struct usb_device *usbdev = catc->usbdev; struct urb *urb = catc->ctrl_urb; struct usb_ctrlrequest *dr = &catc->ctrl_dr; int status; dr->bRequest = q->request; dr->bRequestType = 0x40 | q->dir; dr->wValue = cpu_to_le16(q->value); dr->wIndex = cpu_to_le16(q->index); dr->wLength = cpu_to_le16(q->len); urb->pipe = q->dir ? usb_rcvctrlpipe(usbdev, 0) : usb_sndctrlpipe(usbdev, 0); urb->transfer_buffer_length = q->len; urb->transfer_buffer = catc->ctrl_buf; urb->setup_packet = (void *) dr; urb->dev = usbdev; if (!q->dir && q->buf && q->len) memcpy(catc->ctrl_buf, q->buf, q->len); if ((status = usb_submit_urb(catc->ctrl_urb, GFP_ATOMIC))) err("submit(ctrl_urb) status %d", status); } static void catc_ctrl_done(struct urb *urb) { struct catc *catc = urb->context; struct ctrl_queue *q; unsigned long flags; int status = urb->status; if (status) dbg("ctrl_done, status %d, len %d.", status, urb->actual_length); spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_tail; if (q->dir) { if (q->buf && q->len) memcpy(q->buf, catc->ctrl_buf, q->len); else q->buf = catc->ctrl_buf; } if (q->callback) q->callback(catc, q); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head != catc->ctrl_tail) catc_ctrl_run(catc); else clear_bit(CTRL_RUNNING, &catc->flags); spin_unlock_irqrestore(&catc->ctrl_lock, flags); } static int catc_ctrl_async(struct catc *catc, u8 dir, u8 request, u16 value, u16 index, void *buf, int len, void (*callback)(struct catc *catc, struct ctrl_queue *q)) { struct ctrl_queue *q; int retval = 0; unsigned long flags; spin_lock_irqsave(&catc->ctrl_lock, flags); q = catc->ctrl_queue + catc->ctrl_head; q->dir = dir; q->request = request; q->value = value; q->index = index; q->buf = buf; q->len = len; q->callback = callback; catc->ctrl_head = (catc->ctrl_head + 1) & (CTRL_QUEUE - 1); if (catc->ctrl_head == catc->ctrl_tail) { err("ctrl queue full"); catc->ctrl_tail = (catc->ctrl_tail + 1) & (CTRL_QUEUE - 1); retval = -1; } if (!test_and_set_bit(CTRL_RUNNING, &catc->flags)) catc_ctrl_run(catc); spin_unlock_irqrestore(&catc->ctrl_lock, flags); return retval; } /* * Statistics. */ static void catc_stats_done(struct catc *catc, struct ctrl_queue *q) { int index = q->index - EthStats; u16 data, last; catc->stats_buf[index] = *((char *)q->buf); if (index & 1) return; data = ((u16)catc->stats_buf[index] << 8) | catc->stats_buf[index + 1]; last = catc->stats_vals[index >> 1]; switch (index) { case TxSingleColl: case TxMultiColl: catc->netdev->stats.collisions += data - last; break; case TxExcessColl: catc->netdev->stats.tx_aborted_errors += data - last; catc->netdev->stats.tx_errors += data - last; break; case RxFramErr: catc->netdev->stats.rx_frame_errors += data - last; catc->netdev->stats.rx_errors += data - last; break; } catc->stats_vals[index >> 1] = data; } static void catc_stats_timer(unsigned long data) { struct catc *catc = (void *) data; int i; for (i = 0; i < 8; i++) catc_get_reg_async(catc, EthStats + 7 - i, catc_stats_done); mod_timer(&catc->timer, jiffies + STATS_UPDATE); } /* * Receive modes. Broadcast, Multicast, Promisc. */ static void catc_multicast(unsigned char *addr, u8 *multicast) { u32 crc; crc = ether_crc_le(6, addr); multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } static void catc_set_multicast_list(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); struct netdev_hw_addr *ha; u8 broadcast[6]; u8 rx = RxEnable | RxPolarity | RxMultiCast; memset(broadcast, 0xff, 6); memset(catc->multicast, 0, 64); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); if (netdev->flags & IFF_PROMISC) { memset(catc->multicast, 0xff, 64); rx |= (!catc->is_f5u011) ? RxPromisc : AltRxPromisc; } if (netdev->flags & IFF_ALLMULTI) { memset(catc->multicast, 0xff, 64); } else { netdev_for_each_mc_addr(ha, netdev) { u32 crc = ether_crc_le(6, ha->addr); if (!catc->is_f5u011) { catc->multicast[(crc >> 3) & 0x3f] |= 1 << (crc & 7); } else { catc->multicast[7-(crc >> 29)] |= 1 << ((crc >> 26) & 7); } } } if (!catc->is_f5u011) { catc_set_reg_async(catc, RxUnit, rx); catc_write_mem_async(catc, 0xfa80, catc->multicast, 64); } else { f5u011_mchash_async(catc, catc->multicast); if (catc->rxmode[0] != rx) { catc->rxmode[0] = rx; dbg("Setting RX mode to %2.2X %2.2X", catc->rxmode[0], catc->rxmode[1]); f5u011_rxmode_async(catc, catc->rxmode); } } } static void catc_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct catc *catc = netdev_priv(dev); strncpy(info->driver, driver_name, ETHTOOL_BUSINFO_LEN); strncpy(info->version, DRIVER_VERSION, ETHTOOL_BUSINFO_LEN); usb_make_path (catc->usbdev, info->bus_info, sizeof info->bus_info); } static int catc_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct catc *catc = netdev_priv(dev); if (!catc->is_f5u011) return -EOPNOTSUPP; cmd->supported = SUPPORTED_10baseT_Half | SUPPORTED_TP; cmd->advertising = ADVERTISED_10baseT_Half | ADVERTISED_TP; ethtool_cmd_speed_set(cmd, SPEED_10); cmd->duplex = DUPLEX_HALF; cmd->port = PORT_TP; cmd->phy_address = 0; cmd->transceiver = XCVR_INTERNAL; cmd->autoneg = AUTONEG_DISABLE; cmd->maxtxpkt = 1; cmd->maxrxpkt = 1; return 0; } static const struct ethtool_ops ops = { .get_drvinfo = catc_get_drvinfo, .get_settings = catc_get_settings, .get_link = ethtool_op_get_link }; /* * Open, close. */ static int catc_open(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); int status; catc->irq_urb->dev = catc->usbdev; if ((status = usb_submit_urb(catc->irq_urb, GFP_KERNEL)) < 0) { err("submit(irq_urb) status %d", status); return -1; } netif_start_queue(netdev); if (!catc->is_f5u011) mod_timer(&catc->timer, jiffies + STATS_UPDATE); return 0; } static int catc_stop(struct net_device *netdev) { struct catc *catc = netdev_priv(netdev); netif_stop_queue(netdev); if (!catc->is_f5u011) del_timer_sync(&catc->timer); usb_kill_urb(catc->rx_urb); usb_kill_urb(catc->tx_urb); usb_kill_urb(catc->irq_urb); usb_kill_urb(catc->ctrl_urb); return 0; } static const struct net_device_ops catc_netdev_ops = { .ndo_open = catc_open, .ndo_stop = catc_stop, .ndo_start_xmit = catc_start_xmit, .ndo_tx_timeout = catc_tx_timeout, .ndo_set_rx_mode = catc_set_multicast_list, .ndo_change_mtu = eth_change_mtu, .ndo_set_mac_address = eth_mac_addr, .ndo_validate_addr = eth_validate_addr, }; /* * USB probe, disconnect. */ static int catc_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *usbdev = interface_to_usbdev(intf); struct net_device *netdev; struct catc *catc; u8 broadcast[6]; int i, pktsz; if (usb_set_interface(usbdev, intf->altsetting->desc.bInterfaceNumber, 1)) { err("Can't set altsetting 1."); return -EIO; } netdev = alloc_etherdev(sizeof(struct catc)); if (!netdev) return -ENOMEM; catc = netdev_priv(netdev); netdev->netdev_ops = &catc_netdev_ops; netdev->watchdog_timeo = TX_TIMEOUT; SET_ETHTOOL_OPS(netdev, &ops); catc->usbdev = usbdev; catc->netdev = netdev; spin_lock_init(&catc->tx_lock); spin_lock_init(&catc->ctrl_lock); init_timer(&catc->timer); catc->timer.data = (long) catc; catc->timer.function = catc_stats_timer; catc->ctrl_urb = usb_alloc_urb(0, GFP_KERNEL); catc->tx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->rx_urb = usb_alloc_urb(0, GFP_KERNEL); catc->irq_urb = usb_alloc_urb(0, GFP_KERNEL); if ((!catc->ctrl_urb) || (!catc->tx_urb) || (!catc->rx_urb) || (!catc->irq_urb)) { err("No free urbs available."); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -ENOMEM; } /* The F5U011 has the same vendor/product as the netmate but a device version of 0x130 */ if (le16_to_cpu(usbdev->descriptor.idVendor) == 0x0423 && le16_to_cpu(usbdev->descriptor.idProduct) == 0xa && le16_to_cpu(catc->usbdev->descriptor.bcdDevice) == 0x0130) { dbg("Testing for f5u011"); catc->is_f5u011 = 1; atomic_set(&catc->recq_sz, 0); pktsz = RX_PKT_SZ; } else { pktsz = RX_MAX_BURST * (PKT_SZ + 2); } usb_fill_control_urb(catc->ctrl_urb, usbdev, usb_sndctrlpipe(usbdev, 0), NULL, NULL, 0, catc_ctrl_done, catc); usb_fill_bulk_urb(catc->tx_urb, usbdev, usb_sndbulkpipe(usbdev, 1), NULL, 0, catc_tx_done, catc); usb_fill_bulk_urb(catc->rx_urb, usbdev, usb_rcvbulkpipe(usbdev, 1), catc->rx_buf, pktsz, catc_rx_done, catc); usb_fill_int_urb(catc->irq_urb, usbdev, usb_rcvintpipe(usbdev, 2), catc->irq_buf, 2, catc_irq_done, catc, 1); if (!catc->is_f5u011) { dbg("Checking memory size\n"); i = 0x12345678; catc_write_mem(catc, 0x7a80, &i, 4); i = 0x87654321; catc_write_mem(catc, 0xfa80, &i, 4); catc_read_mem(catc, 0x7a80, &i, 4); switch (i) { case 0x12345678: catc_set_reg(catc, TxBufCount, 8); catc_set_reg(catc, RxBufCount, 32); dbg("64k Memory\n"); break; default: dev_warn(&intf->dev, "Couldn't detect memory size, assuming 32k\n"); case 0x87654321: catc_set_reg(catc, TxBufCount, 4); catc_set_reg(catc, RxBufCount, 16); dbg("32k Memory\n"); break; } dbg("Getting MAC from SEEROM."); catc_get_mac(catc, netdev->dev_addr); dbg("Setting MAC into registers."); for (i = 0; i < 6; i++) catc_set_reg(catc, StationAddr0 - i, netdev->dev_addr[i]); dbg("Filling the multicast list."); memset(broadcast, 0xff, 6); catc_multicast(broadcast, catc->multicast); catc_multicast(netdev->dev_addr, catc->multicast); catc_write_mem(catc, 0xfa80, catc->multicast, 64); dbg("Clearing error counters."); for (i = 0; i < 8; i++) catc_set_reg(catc, EthStats + i, 0); catc->last_stats = jiffies; dbg("Enabling."); catc_set_reg(catc, MaxBurst, RX_MAX_BURST); catc_set_reg(catc, OpModes, OpTxMerge | OpRxMerge | OpLenInclude | Op3MemWaits); catc_set_reg(catc, LEDCtrl, LEDLink); catc_set_reg(catc, RxUnit, RxEnable | RxPolarity | RxMultiCast); } else { dbg("Performing reset\n"); catc_reset(catc); catc_get_mac(catc, netdev->dev_addr); dbg("Setting RX Mode"); catc->rxmode[0] = RxEnable | RxPolarity | RxMultiCast; catc->rxmode[1] = 0; f5u011_rxmode(catc, catc->rxmode); } dbg("Init done."); printk(KERN_INFO "%s: %s USB Ethernet at usb-%s-%s, %pM.\n", netdev->name, (catc->is_f5u011) ? "Belkin F5U011" : "CATC EL1210A NetMate", usbdev->bus->bus_name, usbdev->devpath, netdev->dev_addr); usb_set_intfdata(intf, catc); SET_NETDEV_DEV(netdev, &intf->dev); if (register_netdev(netdev) != 0) { usb_set_intfdata(intf, NULL); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(netdev); return -EIO; } return 0; } static void catc_disconnect(struct usb_interface *intf) { struct catc *catc = usb_get_intfdata(intf); usb_set_intfdata(intf, NULL); if (catc) { unregister_netdev(catc->netdev); usb_free_urb(catc->ctrl_urb); usb_free_urb(catc->tx_urb); usb_free_urb(catc->rx_urb); usb_free_urb(catc->irq_urb); free_netdev(catc->netdev); } } /* * Module functions and tables. */ static struct usb_device_id catc_id_table [] = { { USB_DEVICE(0x0423, 0xa) }, /* CATC Netmate, Belkin F5U011 */ { USB_DEVICE(0x0423, 0xc) }, /* CATC Netmate II, Belkin F5U111 */ { USB_DEVICE(0x08d1, 0x1) }, /* smartBridges smartNIC */ { } }; MODULE_DEVICE_TABLE(usb, catc_id_table); static struct usb_driver catc_driver = { .name = driver_name, .probe = catc_probe, .disconnect = catc_disconnect, .id_table = catc_id_table, }; module_usb_driver(catc_driver);
gpl-2.0
kumajaya/android_kernel_samsung_lt01
drivers/gpu/drm/gma500/intel_gmbus.c
5403
12546
/* * Copyright (c) 2006 Dave Airlie <airlied@linux.ie> * Copyright © 2006-2008,2010 Intel Corporation * Jesse Barnes <jesse.barnes@intel.com> * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Eric Anholt <eric@anholt.net> * Chris Wilson <chris@chris-wilson.co.uk> */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/i2c-algo-bit.h> #include "drmP.h" #include "drm.h" #include "psb_intel_drv.h" #include "gma_drm.h" #include "psb_drv.h" #include "psb_intel_reg.h" #define _wait_for(COND, MS, W) ({ \ unsigned long timeout__ = jiffies + msecs_to_jiffies(MS); \ int ret__ = 0; \ while (! (COND)) { \ if (time_after(jiffies, timeout__)) { \ ret__ = -ETIMEDOUT; \ break; \ } \ if (W && !(in_atomic() || in_dbg_master())) msleep(W); \ } \ ret__; \ }) #define wait_for(COND, MS) _wait_for(COND, MS, 1) #define wait_for_atomic(COND, MS) _wait_for(COND, MS, 0) /* Intel GPIO access functions */ #define I2C_RISEFALL_TIME 20 static inline struct intel_gmbus * to_intel_gmbus(struct i2c_adapter *i2c) { return container_of(i2c, struct intel_gmbus, adapter); } struct intel_gpio { struct i2c_adapter adapter; struct i2c_algo_bit_data algo; struct drm_psb_private *dev_priv; u32 reg; }; void gma_intel_i2c_reset(struct drm_device *dev) { REG_WRITE(GMBUS0, 0); } static void intel_i2c_quirk_set(struct drm_psb_private *dev_priv, bool enable) { /* When using bit bashing for I2C, this bit needs to be set to 1 */ /* FIXME: We are never Pineview, right? u32 val; if (!IS_PINEVIEW(dev_priv->dev)) return; val = REG_READ(DSPCLK_GATE_D); if (enable) val |= DPCUNIT_CLOCK_GATE_DISABLE; else val &= ~DPCUNIT_CLOCK_GATE_DISABLE; REG_WRITE(DSPCLK_GATE_D, val); return; */ } static u32 get_reserved(struct intel_gpio *gpio) { struct drm_psb_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = 0; /* On most chips, these bits must be preserved in software. */ reserved = REG_READ(gpio->reg) & (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); return reserved; } static int get_clock(void *data) { struct intel_gpio *gpio = data; struct drm_psb_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = get_reserved(gpio); REG_WRITE(gpio->reg, reserved | GPIO_CLOCK_DIR_MASK); REG_WRITE(gpio->reg, reserved); return (REG_READ(gpio->reg) & GPIO_CLOCK_VAL_IN) != 0; } static int get_data(void *data) { struct intel_gpio *gpio = data; struct drm_psb_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = get_reserved(gpio); REG_WRITE(gpio->reg, reserved | GPIO_DATA_DIR_MASK); REG_WRITE(gpio->reg, reserved); return (REG_READ(gpio->reg) & GPIO_DATA_VAL_IN) != 0; } static void set_clock(void *data, int state_high) { struct intel_gpio *gpio = data; struct drm_psb_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = get_reserved(gpio); u32 clock_bits; if (state_high) clock_bits = GPIO_CLOCK_DIR_IN | GPIO_CLOCK_DIR_MASK; else clock_bits = GPIO_CLOCK_DIR_OUT | GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK; REG_WRITE(gpio->reg, reserved | clock_bits); REG_READ(gpio->reg); /* Posting */ } static void set_data(void *data, int state_high) { struct intel_gpio *gpio = data; struct drm_psb_private *dev_priv = gpio->dev_priv; struct drm_device *dev = dev_priv->dev; u32 reserved = get_reserved(gpio); u32 data_bits; if (state_high) data_bits = GPIO_DATA_DIR_IN | GPIO_DATA_DIR_MASK; else data_bits = GPIO_DATA_DIR_OUT | GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; REG_WRITE(gpio->reg, reserved | data_bits); REG_READ(gpio->reg); } static struct i2c_adapter * intel_gpio_create(struct drm_psb_private *dev_priv, u32 pin) { static const int map_pin_to_reg[] = { 0, GPIOB, GPIOA, GPIOC, GPIOD, GPIOE, 0, GPIOF, }; struct intel_gpio *gpio; if (pin >= ARRAY_SIZE(map_pin_to_reg) || !map_pin_to_reg[pin]) return NULL; gpio = kzalloc(sizeof(struct intel_gpio), GFP_KERNEL); if (gpio == NULL) return NULL; gpio->reg = map_pin_to_reg[pin]; gpio->dev_priv = dev_priv; snprintf(gpio->adapter.name, sizeof(gpio->adapter.name), "gma500 GPIO%c", "?BACDE?F"[pin]); gpio->adapter.owner = THIS_MODULE; gpio->adapter.algo_data = &gpio->algo; gpio->adapter.dev.parent = &dev_priv->dev->pdev->dev; gpio->algo.setsda = set_data; gpio->algo.setscl = set_clock; gpio->algo.getsda = get_data; gpio->algo.getscl = get_clock; gpio->algo.udelay = I2C_RISEFALL_TIME; gpio->algo.timeout = usecs_to_jiffies(2200); gpio->algo.data = gpio; if (i2c_bit_add_bus(&gpio->adapter)) goto out_free; return &gpio->adapter; out_free: kfree(gpio); return NULL; } static int intel_i2c_quirk_xfer(struct drm_psb_private *dev_priv, struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gpio *gpio = container_of(adapter, struct intel_gpio, adapter); int ret; gma_intel_i2c_reset(dev_priv->dev); intel_i2c_quirk_set(dev_priv, true); set_data(gpio, 1); set_clock(gpio, 1); udelay(I2C_RISEFALL_TIME); ret = adapter->algo->master_xfer(adapter, msgs, num); set_data(gpio, 1); set_clock(gpio, 1); intel_i2c_quirk_set(dev_priv, false); return ret; } static int gmbus_xfer(struct i2c_adapter *adapter, struct i2c_msg *msgs, int num) { struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, adapter); struct drm_psb_private *dev_priv = adapter->algo_data; struct drm_device *dev = dev_priv->dev; int i, reg_offset; if (bus->force_bit) return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); reg_offset = 0; REG_WRITE(GMBUS0 + reg_offset, bus->reg0); for (i = 0; i < num; i++) { u16 len = msgs[i].len; u8 *buf = msgs[i].buf; if (msgs[i].flags & I2C_M_RD) { REG_WRITE(GMBUS1 + reg_offset, GMBUS_CYCLE_WAIT | (i + 1 == num ? GMBUS_CYCLE_STOP : 0) | (len << GMBUS_BYTE_COUNT_SHIFT) | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_READ | GMBUS_SW_RDY); REG_READ(GMBUS2+reg_offset); do { u32 val, loop = 0; if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) goto timeout; if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) goto clear_err; val = REG_READ(GMBUS3 + reg_offset); do { *buf++ = val & 0xff; val >>= 8; } while (--len && ++loop < 4); } while (len); } else { u32 val, loop; val = loop = 0; do { val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); REG_WRITE(GMBUS3 + reg_offset, val); REG_WRITE(GMBUS1 + reg_offset, (i + 1 == num ? GMBUS_CYCLE_STOP : GMBUS_CYCLE_WAIT) | (msgs[i].len << GMBUS_BYTE_COUNT_SHIFT) | (msgs[i].addr << GMBUS_SLAVE_ADDR_SHIFT) | GMBUS_SLAVE_WRITE | GMBUS_SW_RDY); REG_READ(GMBUS2+reg_offset); while (len) { if (wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_RDY), 50)) goto timeout; if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) goto clear_err; val = loop = 0; do { val |= *buf++ << (8 * loop); } while (--len && ++loop < 4); REG_WRITE(GMBUS3 + reg_offset, val); REG_READ(GMBUS2+reg_offset); } } if (i + 1 < num && wait_for(REG_READ(GMBUS2 + reg_offset) & (GMBUS_SATOER | GMBUS_HW_WAIT_PHASE), 50)) goto timeout; if (REG_READ(GMBUS2 + reg_offset) & GMBUS_SATOER) goto clear_err; } goto done; clear_err: /* Toggle the Software Clear Interrupt bit. This has the effect * of resetting the GMBUS controller and so clearing the * BUS_ERROR raised by the slave's NAK. */ REG_WRITE(GMBUS1 + reg_offset, GMBUS_SW_CLR_INT); REG_WRITE(GMBUS1 + reg_offset, 0); done: /* Mark the GMBUS interface as disabled. We will re-enable it at the * start of the next xfer, till then let it sleep. */ REG_WRITE(GMBUS0 + reg_offset, 0); return i; timeout: DRM_INFO("GMBUS timed out, falling back to bit banging on pin %d [%s]\n", bus->reg0 & 0xff, bus->adapter.name); REG_WRITE(GMBUS0 + reg_offset, 0); /* Hardware may not support GMBUS over these pins? Try GPIO bitbanging instead. */ bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); if (!bus->force_bit) return -ENOMEM; return intel_i2c_quirk_xfer(dev_priv, bus->force_bit, msgs, num); } static u32 gmbus_func(struct i2c_adapter *adapter) { struct intel_gmbus *bus = container_of(adapter, struct intel_gmbus, adapter); if (bus->force_bit) bus->force_bit->algo->functionality(bus->force_bit); return (I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | /* I2C_FUNC_10BIT_ADDR | */ I2C_FUNC_SMBUS_READ_BLOCK_DATA | I2C_FUNC_SMBUS_BLOCK_PROC_CALL); } static const struct i2c_algorithm gmbus_algorithm = { .master_xfer = gmbus_xfer, .functionality = gmbus_func }; /** * intel_gmbus_setup - instantiate all Intel i2c GMBuses * @dev: DRM device */ int gma_intel_setup_gmbus(struct drm_device *dev) { static const char *names[GMBUS_NUM_PORTS] = { "disabled", "ssc", "vga", "panel", "dpc", "dpb", "reserved", "dpd", }; struct drm_psb_private *dev_priv = dev->dev_private; int ret, i; dev_priv->gmbus = kcalloc(GMBUS_NUM_PORTS, sizeof(struct intel_gmbus), GFP_KERNEL); if (dev_priv->gmbus == NULL) return -ENOMEM; for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; bus->adapter.owner = THIS_MODULE; bus->adapter.class = I2C_CLASS_DDC; snprintf(bus->adapter.name, sizeof(bus->adapter.name), "gma500 gmbus %s", names[i]); bus->adapter.dev.parent = &dev->pdev->dev; bus->adapter.algo_data = dev_priv; bus->adapter.algo = &gmbus_algorithm; ret = i2c_add_adapter(&bus->adapter); if (ret) goto err; /* By default use a conservative clock rate */ bus->reg0 = i | GMBUS_RATE_100KHZ; /* XXX force bit banging until GMBUS is fully debugged */ bus->force_bit = intel_gpio_create(dev_priv, i); } gma_intel_i2c_reset(dev_priv->dev); return 0; err: while (--i) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; i2c_del_adapter(&bus->adapter); } kfree(dev_priv->gmbus); dev_priv->gmbus = NULL; return ret; } void gma_intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed) { struct intel_gmbus *bus = to_intel_gmbus(adapter); /* speed: * 0x0 = 100 KHz * 0x1 = 50 KHz * 0x2 = 400 KHz * 0x3 = 1000 Khz */ bus->reg0 = (bus->reg0 & ~(0x3 << 8)) | (speed << 8); } void gma_intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit) { struct intel_gmbus *bus = to_intel_gmbus(adapter); if (force_bit) { if (bus->force_bit == NULL) { struct drm_psb_private *dev_priv = adapter->algo_data; bus->force_bit = intel_gpio_create(dev_priv, bus->reg0 & 0xff); } } else { if (bus->force_bit) { i2c_del_adapter(bus->force_bit); kfree(bus->force_bit); bus->force_bit = NULL; } } } void gma_intel_teardown_gmbus(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; int i; if (dev_priv->gmbus == NULL) return; for (i = 0; i < GMBUS_NUM_PORTS; i++) { struct intel_gmbus *bus = &dev_priv->gmbus[i]; if (bus->force_bit) { i2c_del_adapter(bus->force_bit); kfree(bus->force_bit); } i2c_del_adapter(&bus->adapter); } kfree(dev_priv->gmbus); dev_priv->gmbus = NULL; }
gpl-2.0
arnavgosain/android_kernel_sony_msm8x27
arch/mips/bcm47xx/irq.c
7195
2174
/* * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <bcm47xx.h> void plat_irq_dispatch(void) { u32 cause; cause = read_c0_cause() & read_c0_status() & CAUSEF_IP; clear_c0_status(cause); if (cause & CAUSEF_IP7) do_IRQ(7); if (cause & CAUSEF_IP2) do_IRQ(2); if (cause & CAUSEF_IP3) do_IRQ(3); if (cause & CAUSEF_IP4) do_IRQ(4); if (cause & CAUSEF_IP5) do_IRQ(5); if (cause & CAUSEF_IP6) do_IRQ(6); } void __init arch_init_irq(void) { #ifdef CONFIG_BCM47XX_BCMA if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core, BCMA_MIPS_MIPS74K_INTMASK(5), 1 << 31); /* * the kernel reads the timer irq from some register and thinks * it's #5, but we offset it by 2 and route to #7 */ cp0_compare_irq = 7; } #endif mips_cpu_irq_init(); }
gpl-2.0
garwedgess/android_kernel_lge_g4
arch/mips/bcm47xx/irq.c
7195
2174
/* * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <asm/irq_cpu.h> #include <bcm47xx.h> void plat_irq_dispatch(void) { u32 cause; cause = read_c0_cause() & read_c0_status() & CAUSEF_IP; clear_c0_status(cause); if (cause & CAUSEF_IP7) do_IRQ(7); if (cause & CAUSEF_IP2) do_IRQ(2); if (cause & CAUSEF_IP3) do_IRQ(3); if (cause & CAUSEF_IP4) do_IRQ(4); if (cause & CAUSEF_IP5) do_IRQ(5); if (cause & CAUSEF_IP6) do_IRQ(6); } void __init arch_init_irq(void) { #ifdef CONFIG_BCM47XX_BCMA if (bcm47xx_bus_type == BCM47XX_BUS_TYPE_BCMA) { bcma_write32(bcm47xx_bus.bcma.bus.drv_mips.core, BCMA_MIPS_MIPS74K_INTMASK(5), 1 << 31); /* * the kernel reads the timer irq from some register and thinks * it's #5, but we offset it by 2 and route to #7 */ cp0_compare_irq = 7; } #endif mips_cpu_irq_init(); }
gpl-2.0
MoKee/android_kernel_amazon_otter-common
samples/tracepoints/tracepoint-sample.c
8475
1212
/* tracepoint-sample.c * * Executes a tracepoint when /proc/tracepoint-sample is opened. * * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> * * This file is released under the GPLv2. * See the file COPYING for more details. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include "tp-samples-trace.h" DEFINE_TRACE(subsys_event); DEFINE_TRACE(subsys_eventb); struct proc_dir_entry *pentry_sample; static int my_open(struct inode *inode, struct file *file) { int i; trace_subsys_event(inode, file); for (i = 0; i < 10; i++) trace_subsys_eventb(); return -EPERM; } static const struct file_operations mark_ops = { .open = my_open, .llseek = noop_llseek, }; static int __init sample_init(void) { printk(KERN_ALERT "sample init\n"); pentry_sample = proc_create("tracepoint-sample", 0444, NULL, &mark_ops); if (!pentry_sample) return -EPERM; return 0; } static void __exit sample_exit(void) { printk(KERN_ALERT "sample exit\n"); remove_proc_entry("tracepoint-sample", NULL); } module_init(sample_init) module_exit(sample_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mathieu Desnoyers"); MODULE_DESCRIPTION("Tracepoint sample");
gpl-2.0
cr1exe/android_kernel_sony_taoshan
samples/tracepoints/tracepoint-sample.c
8475
1212
/* tracepoint-sample.c * * Executes a tracepoint when /proc/tracepoint-sample is opened. * * (C) Copyright 2007 Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca> * * This file is released under the GPLv2. * See the file COPYING for more details. */ #include <linux/module.h> #include <linux/sched.h> #include <linux/proc_fs.h> #include "tp-samples-trace.h" DEFINE_TRACE(subsys_event); DEFINE_TRACE(subsys_eventb); struct proc_dir_entry *pentry_sample; static int my_open(struct inode *inode, struct file *file) { int i; trace_subsys_event(inode, file); for (i = 0; i < 10; i++) trace_subsys_eventb(); return -EPERM; } static const struct file_operations mark_ops = { .open = my_open, .llseek = noop_llseek, }; static int __init sample_init(void) { printk(KERN_ALERT "sample init\n"); pentry_sample = proc_create("tracepoint-sample", 0444, NULL, &mark_ops); if (!pentry_sample) return -EPERM; return 0; } static void __exit sample_exit(void) { printk(KERN_ALERT "sample exit\n"); remove_proc_entry("tracepoint-sample", NULL); } module_init(sample_init) module_exit(sample_exit) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mathieu Desnoyers"); MODULE_DESCRIPTION("Tracepoint sample");
gpl-2.0
xdatravelbug/N909D_Kernel_JB_4.1.2
drivers/staging/cxt1e1/pmc93x6_eeprom.c
10779
17136
/* pmc93x6_eeprom.c - PMC's 93LC46 EEPROM Device * * The 93LC46 is a low-power, serial Electrically Erasable and * Programmable Read Only Memory organized as 128 8-bit bytes. * * Accesses to the 93LC46 are done in a bit serial stream, organized * in a 3 wire format. Writes are internally timed by the device * (the In data bit is pulled low until the write is complete and * then is pulled high) and take about 6 milliseconds. * * Copyright (C) 2003-2005 SBE, Inc. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/types.h> #include "pmcc4_sysdep.h" #include "sbecom_inline_linux.h" #include "pmcc4.h" #include "sbe_promformat.h" #ifndef TRUE #define TRUE 1 #define FALSE 0 #endif #ifdef SBE_INCLUDE_SYMBOLS #define STATIC #else #define STATIC static #endif /*------------------------------------------------------------------------ * EEPROM address definitions *------------------------------------------------------------------------ * * The offset in the definitions below allows the test to skip over * areas of the EEPROM that other programs (such a VxWorks) are * using. */ #define EE_MFG (long)0 /* Index to manufacturing record */ #define EE_FIRST 0x28 /* Index to start testing at */ #define EE_LIMIT 128 /* Index to end testing at */ /* Bit Ordering for Instructions ** ** A0, A1, A2, A3, A4, A5, A6, OP0, OP1, SB (lsb, or 1st bit out) ** */ #define EPROM_EWEN 0x0019 /* Erase/Write enable (reversed) */ #define EPROM_EWDS 0x0001 /* Erase/Write disable (reversed) */ #define EPROM_READ 0x0003 /* Read (reversed) */ #define EPROM_WRITE 0x0005 /* Write (reversed) */ #define EPROM_ERASE 0x0007 /* Erase (reversed) */ #define EPROM_ERAL 0x0009 /* Erase All (reversed) */ #define EPROM_WRAL 0x0011 /* Write All (reversed) */ #define EPROM_ADR_SZ 7 /* Number of bits in offset address */ #define EPROM_OP_SZ 3 /* Number of bits in command */ #define SIZE_ADDR_OP (EPROM_ADR_SZ + EPROM_OP_SZ) #define LC46A_MAX_OPS 10 /* Number of bits in Instruction */ #define NUM_OF_BITS 8 /* Number of bits in data */ /* EEPROM signal bits */ #define EPROM_ACTIVE_OUT_BIT 0x0001 /* Out data bit */ #define EPROM_ACTIVE_IN_BIT 0x0002 /* In data bit */ #define ACTIVE_IN_BIT_SHIFT 0x0001 /* Shift In data bit to LSB */ #define EPROM_ENCS 0x0004 /* Set EEPROM CS during operation */ /*------------------------------------------------------------------------ * The ByteReverse table is used to reverses the 8 bits within a byte *------------------------------------------------------------------------ */ static unsigned char ByteReverse[256]; static int ByteReverseBuilt = FALSE; /*------------------------------------------------------------------------ * mfg_template - initial serial EEPROM data structure *------------------------------------------------------------------------ */ short mfg_template[sizeof (FLD_TYPE2)] = { PROM_FORMAT_TYPE2, /* type; */ 0x00, 0x1A, /* length[2]; */ 0x00, 0x00, 0x00, 0x00, /* Crc32[4]; */ 0x11, 0x76, /* Id[2]; */ 0x07, 0x05, /* SubId[2] E1; */ 0x00, 0xA0, 0xD6, 0x00, 0x00, 0x00, /* Serial[6]; */ 0x00, 0x00, 0x00, 0x00, /* CreateTime[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunTime[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunIterations[4]; */ 0x00, 0x00, 0x00, 0x00, /* HeatRunErrors[4]; */ }; /*------------------------------------------------------------------------ * BuildByteReverse - build the 8-bit reverse table *------------------------------------------------------------------------ * * The 'ByteReverse' table reverses the 8 bits within a byte * (the MSB becomes the LSB etc.). */ STATIC void BuildByteReverse (void) { long half; /* Used to build by powers to 2 */ int i; ByteReverse[0] = 0; for (half = 1; half < sizeof (ByteReverse); half <<= 1) for (i = 0; i < half; i++) ByteReverse[half + i] = (char) (ByteReverse[i] | (0x80 / half)); ByteReverseBuilt = TRUE; } /*------------------------------------------------------------------------ * eeprom_delay - small delay for EEPROM timing *------------------------------------------------------------------------ */ STATIC void eeprom_delay (void) { int timeout; for (timeout = 20; timeout; --timeout) { OS_uwait_dummy (); } } /*------------------------------------------------------------------------ * eeprom_put_byte - Send a byte to the EEPROM serially *------------------------------------------------------------------------ * * Given the PCI address and the data, this routine serially sends * the data to the EEPROM. */ void eeprom_put_byte (long addr, long data, int count) { u_int32_t output; while (--count >= 0) { output = (data & EPROM_ACTIVE_OUT_BIT) ? 1 : 0; /* Get next data bit */ output |= EPROM_ENCS; /* Add Chip Select */ data >>= 1; eeprom_delay (); pci_write_32 ((u_int32_t *) addr, output); /* Output it */ } } /*------------------------------------------------------------------------ * eeprom_get_byte - Receive a byte from the EEPROM serially *------------------------------------------------------------------------ * * Given the PCI address, this routine serially fetches the data * from the EEPROM. */ u_int32_t eeprom_get_byte (long addr) { u_int32_t input; u_int32_t data; int count; /* Start the Reading of DATA ** ** The first read is a dummy as the data is latched in the ** EPLD and read on the next read access to the EEPROM. */ input = pci_read_32 ((u_int32_t *) addr); data = 0; count = NUM_OF_BITS; while (--count >= 0) { eeprom_delay (); input = pci_read_32 ((u_int32_t *) addr); data <<= 1; /* Shift data over */ data |= (input & EPROM_ACTIVE_IN_BIT) ? 1 : 0; } return data; } /*------------------------------------------------------------------------ * disable_pmc_eeprom - Disable writes to the EEPROM *------------------------------------------------------------------------ * * Issue the EEPROM command to disable writes. */ STATIC void disable_pmc_eeprom (long addr) { eeprom_put_byte (addr, EPROM_EWDS, SIZE_ADDR_OP); pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select * from EEPROM */ } /*------------------------------------------------------------------------ * enable_pmc_eeprom - Enable writes to the EEPROM *------------------------------------------------------------------------ * * Issue the EEPROM command to enable writes. */ STATIC void enable_pmc_eeprom (long addr) { eeprom_put_byte (addr, EPROM_EWEN, SIZE_ADDR_OP); pci_write_32 ((u_int32_t *) addr, 0); /* this removes Chip Select * from EEPROM */ } /*------------------------------------------------------------------------ * pmc_eeprom_read - EEPROM location read *------------------------------------------------------------------------ * * Given a EEPROM PCI address and location offset, this routine returns * the contents of the specified location to the calling routine. */ u_int32_t pmc_eeprom_read (long addr, long mem_offset) { u_int32_t data; /* Data from chip */ if (!ByteReverseBuilt) BuildByteReverse (); mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */ /* * NOTE: The max offset address is 128 or half the reversal table. So the * LSB is always zero and counts as a built in shift of one bit. So even * though we need to shift 3 bits to make room for the command, we only * need to shift twice more because of the built in shift. */ mem_offset <<= 2; /* Shift for command */ mem_offset |= EPROM_READ; /* Add command */ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */ data = eeprom_get_byte (addr); /* Read chip data */ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from * EEPROM */ return (data & 0x000000FF); } /*------------------------------------------------------------------------ * pmc_eeprom_write - EEPROM location write *------------------------------------------------------------------------ * * Given a EEPROM PCI address, location offset and value, this * routine writes the value to the specified location. * * Note: it is up to the caller to determine if the write * operation succeeded. */ int pmc_eeprom_write (long addr, long mem_offset, u_int32_t data) { volatile u_int32_t temp; int count; if (!ByteReverseBuilt) BuildByteReverse (); mem_offset = ByteReverse[0x7F & mem_offset]; /* Reverse address */ /* * NOTE: The max offset address is 128 or half the reversal table. So the * LSB is always zero and counts as a built in shift of one bit. So even * though we need to shift 3 bits to make room for the command, we only * need to shift twice more because of the built in shift. */ mem_offset <<= 2; /* Shift for command */ mem_offset |= EPROM_WRITE; /* Add command */ eeprom_put_byte (addr, mem_offset, SIZE_ADDR_OP); /* Output chip address */ data = ByteReverse[0xFF & data];/* Reverse data */ eeprom_put_byte (addr, data, NUM_OF_BITS); /* Output chip data */ pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select from * EEPROM */ /* ** Must see Data In at a low state before completing this transaction. ** ** Afterwards, the data bit will return to a high state, ~6 ms, terminating ** the operation. */ pci_write_32 ((u_int32_t *) addr, EPROM_ENCS); /* Re-enable Chip Select */ temp = pci_read_32 ((u_int32_t *) addr); /* discard first read */ temp = pci_read_32 ((u_int32_t *) addr); if (temp & EPROM_ACTIVE_IN_BIT) { temp = pci_read_32 ((u_int32_t *) addr); if (temp & EPROM_ACTIVE_IN_BIT) { pci_write_32 ((u_int32_t *) addr, 0); /* Remove Chip Select * from EEPROM */ return (1); } } count = 1000; while (count--) { for (temp = 0; temp < 0x10; temp++) OS_uwait_dummy (); if (pci_read_32 ((u_int32_t *) addr) & EPROM_ACTIVE_IN_BIT) break; } if (count == -1) return (2); return (0); } /*------------------------------------------------------------------------ * pmcGetBuffValue - read the specified value from buffer *------------------------------------------------------------------------ */ long pmcGetBuffValue (char *ptr, int size) { long value = 0; int index; for (index = 0; index < size; ++index) { value <<= 8; value |= ptr[index] & 0xFF; } return value; } /*------------------------------------------------------------------------ * pmcSetBuffValue - save the specified value to buffer *------------------------------------------------------------------------ */ void pmcSetBuffValue (char *ptr, long value, int size) { int index = size; while (--index >= 0) { ptr[index] = (char) (value & 0xFF); value >>= 8; } } /*------------------------------------------------------------------------ * pmc_eeprom_read_buffer - read EEPROM data into specified buffer *------------------------------------------------------------------------ */ void pmc_eeprom_read_buffer (long addr, long mem_offset, char *dest_ptr, int size) { while (--size >= 0) *dest_ptr++ = (char) pmc_eeprom_read (addr, mem_offset++); } /*------------------------------------------------------------------------ * pmc_eeprom_write_buffer - write EEPROM data from specified buffer *------------------------------------------------------------------------ */ void pmc_eeprom_write_buffer (long addr, long mem_offset, char *dest_ptr, int size) { enable_pmc_eeprom (addr); while (--size >= 0) pmc_eeprom_write (addr, mem_offset++, *dest_ptr++); disable_pmc_eeprom (addr); } /*------------------------------------------------------------------------ * pmcCalcCrc - calculate the CRC for the serial EEPROM structure *------------------------------------------------------------------------ */ u_int32_t pmcCalcCrc_T01 (void *bufp) { FLD_TYPE2 *buf = bufp; u_int32_t crc; /* CRC of the structure */ /* Calc CRC for type and length fields */ sbeCrc ( (u_int8_t *) &buf->type, (u_int32_t) STRUCT_OFFSET (FLD_TYPE1, Crc32), (u_int32_t) 0, (u_int32_t *) &crc); #ifdef EEPROM_TYPE_DEBUG pr_info("sbeCrc: crc 1 calculated as %08x\n", crc); /* RLD DEBUG */ #endif return ~crc; } u_int32_t pmcCalcCrc_T02 (void *bufp) { FLD_TYPE2 *buf = bufp; u_int32_t crc; /* CRC of the structure */ /* Calc CRC for type and length fields */ sbeCrc ( (u_int8_t *) &buf->type, (u_int32_t) STRUCT_OFFSET (FLD_TYPE2, Crc32), (u_int32_t) 0, (u_int32_t *) &crc); /* Calc CRC for remaining fields */ sbeCrc ( (u_int8_t *) &buf->Id[0], (u_int32_t) (sizeof (FLD_TYPE2) - STRUCT_OFFSET (FLD_TYPE2, Id)), (u_int32_t) crc, (u_int32_t *) &crc); #ifdef EEPROM_TYPE_DEBUG pr_info("sbeCrc: crc 2 calculated as %08x\n", crc); /* RLD DEBUG */ #endif return crc; } /*------------------------------------------------------------------------ * pmc_init_seeprom - initialize the serial EEPROM structure *------------------------------------------------------------------------ * * At the front of the serial EEPROM there is a record that contains * manufacturing information. If the info does not already exist, it * is created. The only field modifiable by the operator is the * serial number field. */ void pmc_init_seeprom (u_int32_t addr, u_int32_t serialNum) { PROMFORMAT buffer; /* Memory image of structure */ u_int32_t crc; /* CRC of structure */ time_t createTime; int i; createTime = get_seconds (); /* use template data */ for (i = 0; i < sizeof (FLD_TYPE2); ++i) buffer.bytes[i] = mfg_template[i]; /* Update serial number field in buffer */ pmcSetBuffValue (&buffer.fldType2.Serial[3], serialNum, 3); /* Update create time field in buffer */ pmcSetBuffValue (&buffer.fldType2.CreateTime[0], createTime, 4); /* Update CRC field in buffer */ crc = pmcCalcCrc_T02 (&buffer); pmcSetBuffValue (&buffer.fldType2.Crc32[0], crc, 4); #ifdef DEBUG for (i = 0; i < sizeof (FLD_TYPE2); ++i) pr_info("[%02X] = %02X\n", i, buffer.bytes[i] & 0xFF); #endif /* Write structure to serial EEPROM */ pmc_eeprom_write_buffer (addr, EE_MFG, (char *) &buffer, sizeof (FLD_TYPE2)); } char pmc_verify_cksum (void *bufp) { FLD_TYPE1 *buf1 = bufp; FLD_TYPE2 *buf2 = bufp; u_int32_t crc1, crc2; /* CRC read from EEPROM */ /* Retrieve contents of CRC field */ crc1 = pmcGetBuffValue (&buf1->Crc32[0], sizeof (buf1->Crc32)); #ifdef EEPROM_TYPE_DEBUG pr_info("EEPROM: chksum 1 reads as %08x\n", crc1); /* RLD DEBUG */ #endif if ((buf1->type == PROM_FORMAT_TYPE1) && (pmcCalcCrc_T01 ((void *) buf1) == crc1)) return PROM_FORMAT_TYPE1; /* checksum type 1 verified */ crc2 = pmcGetBuffValue (&buf2->Crc32[0], sizeof (buf2->Crc32)); #ifdef EEPROM_TYPE_DEBUG pr_info("EEPROM: chksum 2 reads as %08x\n", crc2); /* RLD DEBUG */ #endif if ((buf2->type == PROM_FORMAT_TYPE2) && (pmcCalcCrc_T02 ((void *) buf2) == crc2)) return PROM_FORMAT_TYPE2; /* checksum type 2 verified */ return PROM_FORMAT_Unk; /* failed to validate */ } /*** End-of-File ***/
gpl-2.0
slebit/android_kernel_lge_v500
arch/parisc/math-emu/fcnvuf.c
14107
8168
/* * Linux/PA-RISC Project (http://www.parisc-linux.org/) * * Floating-point emulation code * Copyright (C) 2001 Hewlett-Packard (Paul Bame) <bame@debian.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /* * BEGIN_DESC * * File: * @(#) pa/spmath/fcnvuf.c $Revision: 1.1 $ * * Purpose: * Fixed point to Floating-point Converts * * External Interfaces: * dbl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) * dbl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) * sgl_to_dbl_fcnvuf(srcptr,nullptr,dstptr,status) * sgl_to_sgl_fcnvuf(srcptr,nullptr,dstptr,status) * * Internal Interfaces: * * Theory: * <<please update with a overview of the operation of this file>> * * END_DESC */ #include "float.h" #include "sgl_float.h" #include "dbl_float.h" #include "cnv_float.h" /************************************************************************ * Fixed point to Floating-point Converts * ************************************************************************/ /* * Convert Single Unsigned Fixed to Single Floating-point format */ int sgl_to_sgl_fcnvuf( unsigned int *srcptr, unsigned int *nullptr, sgl_floating_point *dstptr, unsigned int *status) { register unsigned int src, result = 0; register int dst_exponent; src = *srcptr; /* Check for zero */ if (src == 0) { Sgl_setzero(result); *dstptr = result; return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(src,dst_exponent); /* left justify source, with msb at bit position 0 */ src <<= dst_exponent+1; Sgl_set_mantissa(result, src >> SGL_EXP_LENGTH); Sgl_set_exponent(result, 30+SGL_BIAS - dst_exponent); /* check for inexact */ if (Suint_isinexact_to_sgl(src)) { switch (Rounding_mode()) { case ROUNDPLUS: Sgl_increment(result); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Sgl_roundnearest_from_suint(src,result); break; } if (Is_inexacttrap_enabled()) { *dstptr = result; return(INEXACTEXCEPTION); } else Set_inexactflag(); } *dstptr = result; return(NOEXCEPTION); } /* * Single Unsigned Fixed to Double Floating-point */ int sgl_to_dbl_fcnvuf( unsigned int *srcptr, unsigned int *nullptr, dbl_floating_point *dstptr, unsigned int *status) { register int dst_exponent; register unsigned int src, resultp1 = 0, resultp2 = 0; src = *srcptr; /* Check for zero */ if (src == 0) { Dbl_setzero(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(src,dst_exponent); /* left justify source, with msb at bit position 0 */ src <<= dst_exponent+1; Dbl_set_mantissap1(resultp1, src >> DBL_EXP_LENGTH); Dbl_set_mantissap2(resultp2, src << (32-DBL_EXP_LENGTH)); Dbl_set_exponent(resultp1, (30+DBL_BIAS) - dst_exponent); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Double Unsigned Fixed to Single Floating-point */ int dbl_to_sgl_fcnvuf( dbl_unsigned *srcptr, unsigned int *nullptr, sgl_floating_point *dstptr, unsigned int *status) { int dst_exponent; unsigned int srcp1, srcp2, result = 0; Duint_copyfromptr(srcptr,srcp1,srcp2); /* Check for zero */ if (srcp1 == 0 && srcp2 == 0) { Sgl_setzero(result); *dstptr = result; return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ if (srcp1 == 0) { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp2,dst_exponent); /* left justify source, with msb at bit position 0 */ srcp1 = srcp2 << dst_exponent+1; srcp2 = 0; /* * since msb set is in second word, need to * adjust bit position count */ dst_exponent += 32; } else { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. * */ Find_ms_one_bit(srcp1,dst_exponent); /* left justify source, with msb at bit position 0 */ if (dst_exponent >= 0) { Variable_shift_double(srcp1,srcp2,(31-dst_exponent), srcp1); srcp2 <<= dst_exponent+1; } } Sgl_set_mantissa(result, srcp1 >> SGL_EXP_LENGTH); Sgl_set_exponent(result, (62+SGL_BIAS) - dst_exponent); /* check for inexact */ if (Duint_isinexact_to_sgl(srcp1,srcp2)) { switch (Rounding_mode()) { case ROUNDPLUS: Sgl_increment(result); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Sgl_roundnearest_from_duint(srcp1,srcp2,result); break; } if (Is_inexacttrap_enabled()) { *dstptr = result; return(INEXACTEXCEPTION); } else Set_inexactflag(); } *dstptr = result; return(NOEXCEPTION); } /* * Double Unsigned Fixed to Double Floating-point */ int dbl_to_dbl_fcnvuf( dbl_unsigned *srcptr, unsigned int *nullptr, dbl_floating_point *dstptr, unsigned int *status) { register int dst_exponent; register unsigned int srcp1, srcp2, resultp1 = 0, resultp2 = 0; Duint_copyfromptr(srcptr,srcp1,srcp2); /* Check for zero */ if (srcp1 == 0 && srcp2 ==0) { Dbl_setzero(resultp1,resultp2); Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); } /* * Generate exponent and normalized mantissa */ dst_exponent = 16; /* initialize for normalization */ if (srcp1 == 0) { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp2,dst_exponent); /* left justify source, with msb at bit position 0 */ srcp1 = srcp2 << dst_exponent+1; srcp2 = 0; /* * since msb set is in second word, need to * adjust bit position count */ dst_exponent += 32; } else { /* * Check word for most significant bit set. Returns * a value in dst_exponent indicating the bit position, * between -1 and 30. */ Find_ms_one_bit(srcp1,dst_exponent); /* left justify source, with msb at bit position 0 */ if (dst_exponent >= 0) { Variable_shift_double(srcp1,srcp2,(31-dst_exponent), srcp1); srcp2 <<= dst_exponent+1; } } Dbl_set_mantissap1(resultp1, srcp1 >> DBL_EXP_LENGTH); Shiftdouble(srcp1,srcp2,DBL_EXP_LENGTH,resultp2); Dbl_set_exponent(resultp1, (62+DBL_BIAS) - dst_exponent); /* check for inexact */ if (Duint_isinexact_to_dbl(srcp2)) { switch (Rounding_mode()) { case ROUNDPLUS: Dbl_increment(resultp1,resultp2); break; case ROUNDMINUS: /* never negative */ break; case ROUNDNEAREST: Dbl_roundnearest_from_duint(srcp2,resultp1, resultp2); break; } if (Is_inexacttrap_enabled()) { Dbl_copytoptr(resultp1,resultp2,dstptr); return(INEXACTEXCEPTION); } else Set_inexactflag(); } Dbl_copytoptr(resultp1,resultp2,dstptr); return(NOEXCEPTION); }
gpl-2.0
DC07/spirit_msm8226
fs/dlm/util.c
14875
4610
/****************************************************************************** ******************************************************************************* ** ** Copyright (C) 2005-2008 Red Hat, Inc. All rights reserved. ** ** This copyrighted material is made available to anyone wishing to use, ** modify, copy, or redistribute it subject to the terms and conditions ** of the GNU General Public License v.2. ** ******************************************************************************* ******************************************************************************/ #include "dlm_internal.h" #include "rcom.h" #include "util.h" #define DLM_ERRNO_EDEADLK 35 #define DLM_ERRNO_EBADR 53 #define DLM_ERRNO_EBADSLT 57 #define DLM_ERRNO_EPROTO 71 #define DLM_ERRNO_EOPNOTSUPP 95 #define DLM_ERRNO_ETIMEDOUT 110 #define DLM_ERRNO_EINPROGRESS 115 static void header_out(struct dlm_header *hd) { hd->h_version = cpu_to_le32(hd->h_version); hd->h_lockspace = cpu_to_le32(hd->h_lockspace); hd->h_nodeid = cpu_to_le32(hd->h_nodeid); hd->h_length = cpu_to_le16(hd->h_length); } static void header_in(struct dlm_header *hd) { hd->h_version = le32_to_cpu(hd->h_version); hd->h_lockspace = le32_to_cpu(hd->h_lockspace); hd->h_nodeid = le32_to_cpu(hd->h_nodeid); hd->h_length = le16_to_cpu(hd->h_length); } /* higher errno values are inconsistent across architectures, so select one set of values for on the wire */ static int to_dlm_errno(int err) { switch (err) { case -EDEADLK: return -DLM_ERRNO_EDEADLK; case -EBADR: return -DLM_ERRNO_EBADR; case -EBADSLT: return -DLM_ERRNO_EBADSLT; case -EPROTO: return -DLM_ERRNO_EPROTO; case -EOPNOTSUPP: return -DLM_ERRNO_EOPNOTSUPP; case -ETIMEDOUT: return -DLM_ERRNO_ETIMEDOUT; case -EINPROGRESS: return -DLM_ERRNO_EINPROGRESS; } return err; } static int from_dlm_errno(int err) { switch (err) { case -DLM_ERRNO_EDEADLK: return -EDEADLK; case -DLM_ERRNO_EBADR: return -EBADR; case -DLM_ERRNO_EBADSLT: return -EBADSLT; case -DLM_ERRNO_EPROTO: return -EPROTO; case -DLM_ERRNO_EOPNOTSUPP: return -EOPNOTSUPP; case -DLM_ERRNO_ETIMEDOUT: return -ETIMEDOUT; case -DLM_ERRNO_EINPROGRESS: return -EINPROGRESS; } return err; } void dlm_message_out(struct dlm_message *ms) { header_out(&ms->m_header); ms->m_type = cpu_to_le32(ms->m_type); ms->m_nodeid = cpu_to_le32(ms->m_nodeid); ms->m_pid = cpu_to_le32(ms->m_pid); ms->m_lkid = cpu_to_le32(ms->m_lkid); ms->m_remid = cpu_to_le32(ms->m_remid); ms->m_parent_lkid = cpu_to_le32(ms->m_parent_lkid); ms->m_parent_remid = cpu_to_le32(ms->m_parent_remid); ms->m_exflags = cpu_to_le32(ms->m_exflags); ms->m_sbflags = cpu_to_le32(ms->m_sbflags); ms->m_flags = cpu_to_le32(ms->m_flags); ms->m_lvbseq = cpu_to_le32(ms->m_lvbseq); ms->m_hash = cpu_to_le32(ms->m_hash); ms->m_status = cpu_to_le32(ms->m_status); ms->m_grmode = cpu_to_le32(ms->m_grmode); ms->m_rqmode = cpu_to_le32(ms->m_rqmode); ms->m_bastmode = cpu_to_le32(ms->m_bastmode); ms->m_asts = cpu_to_le32(ms->m_asts); ms->m_result = cpu_to_le32(to_dlm_errno(ms->m_result)); } void dlm_message_in(struct dlm_message *ms) { header_in(&ms->m_header); ms->m_type = le32_to_cpu(ms->m_type); ms->m_nodeid = le32_to_cpu(ms->m_nodeid); ms->m_pid = le32_to_cpu(ms->m_pid); ms->m_lkid = le32_to_cpu(ms->m_lkid); ms->m_remid = le32_to_cpu(ms->m_remid); ms->m_parent_lkid = le32_to_cpu(ms->m_parent_lkid); ms->m_parent_remid = le32_to_cpu(ms->m_parent_remid); ms->m_exflags = le32_to_cpu(ms->m_exflags); ms->m_sbflags = le32_to_cpu(ms->m_sbflags); ms->m_flags = le32_to_cpu(ms->m_flags); ms->m_lvbseq = le32_to_cpu(ms->m_lvbseq); ms->m_hash = le32_to_cpu(ms->m_hash); ms->m_status = le32_to_cpu(ms->m_status); ms->m_grmode = le32_to_cpu(ms->m_grmode); ms->m_rqmode = le32_to_cpu(ms->m_rqmode); ms->m_bastmode = le32_to_cpu(ms->m_bastmode); ms->m_asts = le32_to_cpu(ms->m_asts); ms->m_result = from_dlm_errno(le32_to_cpu(ms->m_result)); } void dlm_rcom_out(struct dlm_rcom *rc) { header_out(&rc->rc_header); rc->rc_type = cpu_to_le32(rc->rc_type); rc->rc_result = cpu_to_le32(rc->rc_result); rc->rc_id = cpu_to_le64(rc->rc_id); rc->rc_seq = cpu_to_le64(rc->rc_seq); rc->rc_seq_reply = cpu_to_le64(rc->rc_seq_reply); } void dlm_rcom_in(struct dlm_rcom *rc) { header_in(&rc->rc_header); rc->rc_type = le32_to_cpu(rc->rc_type); rc->rc_result = le32_to_cpu(rc->rc_result); rc->rc_id = le64_to_cpu(rc->rc_id); rc->rc_seq = le64_to_cpu(rc->rc_seq); rc->rc_seq_reply = le64_to_cpu(rc->rc_seq_reply); }
gpl-2.0
udhos/qpimd
ospf6d/ospf6d.c
28
56724
/* * Copyright (C) 2003 Yasuhiro Ohara * * This file is part of GNU Zebra. * * GNU Zebra is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * GNU Zebra is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with GNU Zebra; see the file COPYING. If not, write to the * Free Software Foundation, Inc., 59 Temple Place - Suite 330, * Boston, MA 02111-1307, USA. */ #include <zebra.h> #include "thread.h" #include "linklist.h" #include "vty.h" #include "command.h" #include "ospf6_proto.h" #include "ospf6_network.h" #include "ospf6_lsa.h" #include "ospf6_lsdb.h" #include "ospf6_message.h" #include "ospf6_route.h" #include "ospf6_zebra.h" #include "ospf6_spf.h" #include "ospf6_top.h" #include "ospf6_area.h" #include "ospf6_interface.h" #include "ospf6_neighbor.h" #include "ospf6_intra.h" #include "ospf6_asbr.h" #include "ospf6_abr.h" #include "ospf6_flood.h" #include "ospf6d.h" #ifdef HAVE_SNMP #include "ospf6_snmp.h" #endif /*HAVE_SNMP*/ char ospf6_daemon_version[] = OSPF6_DAEMON_VERSION; struct route_node * route_prev (struct route_node *node) { struct route_node *end; struct route_node *prev = NULL; end = node; node = node->parent; if (node) route_lock_node (node); while (node) { prev = node; node = route_next (node); if (node == end) { route_unlock_node (node); node = NULL; } } route_unlock_node (end); if (prev) route_lock_node (prev); return prev; } /* show database functions */ DEFUN (show_version_ospf6, show_version_ospf6_cmd, "show version ospf6", SHOW_STR "Displays ospf6d version\n" ) { vty_out (vty, "Zebra OSPF6d Version: %s%s", ospf6_daemon_version, VNL); return CMD_SUCCESS; } static struct cmd_node debug_node = { DEBUG_NODE, "", 1 /* VTYSH */ }; static int config_write_ospf6_debug (struct vty *vty) { config_write_ospf6_debug_message (vty); config_write_ospf6_debug_lsa (vty); config_write_ospf6_debug_zebra (vty); config_write_ospf6_debug_interface (vty); config_write_ospf6_debug_neighbor (vty); config_write_ospf6_debug_spf (vty); config_write_ospf6_debug_route (vty); config_write_ospf6_debug_brouter (vty); config_write_ospf6_debug_asbr (vty); config_write_ospf6_debug_abr (vty); config_write_ospf6_debug_flood (vty); vty_out (vty, "!%s", VNL); return 0; } #define AREA_LSDB_TITLE_FORMAT \ "%s Area Scoped Link State Database (Area %s)%s%s" #define IF_LSDB_TITLE_FORMAT \ "%s I/F Scoped Link State Database (I/F %s in Area %s)%s%s" #define AS_LSDB_TITLE_FORMAT \ "%s AS Scoped Link State Database%s%s" static int parse_show_level (int argc, const char *argv[]) { int level = 0; if (argc) { if (! strncmp (argv[0], "de", 2)) level = OSPF6_LSDB_SHOW_LEVEL_DETAIL; else if (! strncmp (argv[0], "du", 2)) level = OSPF6_LSDB_SHOW_LEVEL_DUMP; else if (! strncmp (argv[0], "in", 2)) level = OSPF6_LSDB_SHOW_LEVEL_INTERNAL; } else level = OSPF6_LSDB_SHOW_LEVEL_NORMAL; return level; } static u_int16_t parse_type_spec (int argc, const char *argv[]) { u_int16_t type = 0; assert (argc); if (! strcmp (argv[0], "router")) type = htons (OSPF6_LSTYPE_ROUTER); else if (! strcmp (argv[0], "network")) type = htons (OSPF6_LSTYPE_NETWORK); else if (! strcmp (argv[0], "as-external")) type = htons (OSPF6_LSTYPE_AS_EXTERNAL); else if (! strcmp (argv[0], "intra-prefix")) type = htons (OSPF6_LSTYPE_INTRA_PREFIX); else if (! strcmp (argv[0], "inter-router")) type = htons (OSPF6_LSTYPE_INTER_ROUTER); else if (! strcmp (argv[0], "inter-prefix")) type = htons (OSPF6_LSTYPE_INTER_PREFIX); else if (! strcmp (argv[0], "link")) type = htons (OSPF6_LSTYPE_LINK); return type; } DEFUN (show_ipv6_ospf6_database, show_ipv6_ospf6_database_cmd, "show ipv6 ospf6 database", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; OSPF6_CMD_CHECK_RUNNING (); level = parse_show_level (argc, argv); for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, NULL, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, NULL, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, NULL, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database, show_ipv6_ospf6_database_detail_cmd, "show ipv6 ospf6 database (detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type, show_ipv6_ospf6_database_type_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; level = parse_show_level (argc, argv); switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, NULL, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, NULL, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, NULL, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type, show_ipv6_ospf6_database_type_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_id, show_ipv6_ospf6_database_id_cmd, "show ipv6 ospf6 database * A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Specify Link state ID as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int32_t id = 0; OSPF6_CMD_CHECK_RUNNING (); if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link State ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, NULL, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, NULL, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, NULL, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_id, show_ipv6_ospf6_database_id_detail_cmd, "show ipv6 ospf6 database * A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) ALIAS (show_ipv6_ospf6_database_id, show_ipv6_ospf6_database_linkstate_id_cmd, "show ipv6 ospf6 database linkstate-id A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" ) ALIAS (show_ipv6_ospf6_database_id, show_ipv6_ospf6_database_linkstate_id_detail_cmd, "show ipv6 ospf6 database linkstate-id A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_router, show_ipv6_ospf6_database_router_cmd, "show ipv6 ospf6 database * * A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Any Link state ID\n" "Specify Advertising Router as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_router, show_ipv6_ospf6_database_router_detail_cmd, "show ipv6 ospf6 database * * A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Any Link state ID\n" "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) ALIAS (show_ipv6_ospf6_database_router, show_ipv6_ospf6_database_adv_router_cmd, "show ipv6 ospf6 database adv-router A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" ) ALIAS (show_ipv6_ospf6_database_router, show_ipv6_ospf6_database_adv_router_detail_cmd, "show ipv6 ospf6 database adv-router A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_id, show_ipv6_ospf6_database_type_id_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Specify Link state ID as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t id = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link state ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, NULL, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, NULL, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, NULL, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_id, show_ipv6_ospf6_database_type_id_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) ALIAS (show_ipv6_ospf6_database_type_id, show_ipv6_ospf6_database_type_linkstate_id_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) linkstate-id A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" ) ALIAS (show_ipv6_ospf6_database_type_id, show_ipv6_ospf6_database_type_linkstate_id_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) linkstate-id A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_router, show_ipv6_ospf6_database_type_router_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) * A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Any Link state ID\n" "Specify Advertising Router as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_router, show_ipv6_ospf6_database_type_router_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) * A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Any Link state ID\n" "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) ALIAS (show_ipv6_ospf6_database_type_router, show_ipv6_ospf6_database_type_adv_router_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) adv-router A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" ) ALIAS (show_ipv6_ospf6_database_type_router, show_ipv6_ospf6_database_type_adv_router_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) adv-router A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_id_router, show_ipv6_ospf6_database_id_router_cmd, "show ipv6 ospf6 database * A.B.C.D A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Specify Link state ID as IPv4 address notation\n" "Specify Advertising Router as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int32_t id = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link state ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_id_router, show_ipv6_ospf6_database_id_router_detail_cmd, "show ipv6 ospf6 database * A.B.C.D A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Any Link state Type\n" "Specify Link state ID as IPv4 address notation\n" "Specify Advertising Router as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_adv_router_linkstate_id, show_ipv6_ospf6_database_adv_router_linkstate_id_cmd, "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int32_t id = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link state ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, &id, &adv_router, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_adv_router_linkstate_id, show_ipv6_ospf6_database_adv_router_linkstate_id_detail_cmd, "show ipv6 ospf6 database adv-router A.B.C.D linkstate-id A.B.C.D " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_id_router, show_ipv6_ospf6_database_type_id_router_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Specify Link state ID as IPv4 address notation\n" "Specify Advertising Router as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t id = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link state ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_id_router, show_ipv6_ospf6_database_type_id_router_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D A.B.C.D " "(dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Specify Link state ID as IPv4 address notation\n" "Specify Advertising Router as IPv4 address notation\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_adv_router_linkstate_id, show_ipv6_ospf6_database_type_adv_router_linkstate_id_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) " "adv-router A.B.C.D linkstate-id A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t id = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Advertising Router is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link state ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_adv_router_linkstate_id, show_ipv6_ospf6_database_type_adv_router_linkstate_id_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) " "adv-router A.B.C.D linkstate-id A.B.C.D " "(dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Search by Advertising Router\n" "Specify Advertising Router as IPv4 address notation\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_self_originated, show_ipv6_ospf6_database_self_originated_cmd, "show ipv6 ospf6 database self-originated", SHOW_STR IPV6_STR OSPF6_STR "Display Self-originated LSAs\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); level = parse_show_level (argc, argv); adv_router = o->router_id; for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, oa->lsdb); } for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, oi->lsdb); } } vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, NULL, NULL, &adv_router, o->lsdb); vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_self_originated, show_ipv6_ospf6_database_self_originated_detail_cmd, "show ipv6 ospf6 database self-originated " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Self-originated LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_self_originated, show_ipv6_ospf6_database_type_self_originated_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) self-originated", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display Self-originated LSAs\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t adv_router = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; level = parse_show_level (argc, argv); adv_router = o->router_id; switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, NULL, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_self_originated, show_ipv6_ospf6_database_type_self_originated_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) self-originated " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display Self-originated LSAs\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_self_originated_linkstate_id, show_ipv6_ospf6_database_type_self_originated_linkstate_id_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) self-originated " "linkstate-id A.B.C.D", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display Self-originated LSAs\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t adv_router = 0; u_int32_t id = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link State ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); adv_router = o->router_id; switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_self_originated_linkstate_id, show_ipv6_ospf6_database_type_self_originated_linkstate_id_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) self-originated " "linkstate-id A.B.C.D (detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display Self-originated LSAs\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_database_type_id_self_originated, show_ipv6_ospf6_database_type_id_self_originated_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D self-originated", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Specify Link state ID as IPv4 address notation\n" "Display Self-originated LSAs\n" ) { int level; struct listnode *i, *j; struct ospf6 *o = ospf6; struct ospf6_area *oa; struct ospf6_interface *oi; u_int16_t type = 0; u_int32_t adv_router = 0; u_int32_t id = 0; OSPF6_CMD_CHECK_RUNNING (); type = parse_type_spec (argc, argv); argc--; argv++; if ((inet_pton (AF_INET, argv[0], &id)) != 1) { vty_out (vty, "Link State ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } argc--; argv++; level = parse_show_level (argc, argv); adv_router = o->router_id; switch (OSPF6_LSA_SCOPE (type)) { case OSPF6_SCOPE_AREA: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { vty_out (vty, AREA_LSDB_TITLE_FORMAT, VNL, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oa->lsdb); } break; case OSPF6_SCOPE_LINKLOCAL: for (ALL_LIST_ELEMENTS_RO (o->area_list, i, oa)) { for (ALL_LIST_ELEMENTS_RO (oa->if_list, j, oi)) { vty_out (vty, IF_LSDB_TITLE_FORMAT, VNL, oi->interface->name, oa->name, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, oi->lsdb); } } break; case OSPF6_SCOPE_AS: vty_out (vty, AS_LSDB_TITLE_FORMAT, VNL, VNL, VNL); ospf6_lsdb_show (vty, level, &type, &id, &adv_router, o->lsdb); break; default: assert (0); break; } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_database_type_id_self_originated, show_ipv6_ospf6_database_type_id_self_originated_detail_cmd, "show ipv6 ospf6 database " "(router|network|inter-prefix|inter-router|as-external|" "group-membership|type-7|link|intra-prefix) A.B.C.D self-originated " "(detail|dump|internal)", SHOW_STR IPV6_STR OSPF6_STR "Display Link state database\n" "Display Router LSAs\n" "Display Network LSAs\n" "Display Inter-Area-Prefix LSAs\n" "Display Inter-Area-Router LSAs\n" "Display As-External LSAs\n" "Display Group-Membership LSAs\n" "Display Type-7 LSAs\n" "Display Link LSAs\n" "Display Intra-Area-Prefix LSAs\n" "Display Self-originated LSAs\n" "Search by Link state ID\n" "Specify Link state ID as IPv4 address notation\n" "Display details of LSAs\n" "Dump LSAs\n" "Display LSA's internal information\n" ) DEFUN (show_ipv6_ospf6_border_routers, show_ipv6_ospf6_border_routers_cmd, "show ipv6 ospf6 border-routers", SHOW_STR IP6_STR OSPF6_STR "Display routing table for ABR and ASBR\n" ) { u_int32_t adv_router; void (*showfunc) (struct vty *, struct ospf6_route *); struct ospf6_route *ro; struct prefix prefix; OSPF6_CMD_CHECK_RUNNING (); if (argc && ! strcmp ("detail", argv[0])) { showfunc = ospf6_route_show_detail; argc--; argv++; } else showfunc = ospf6_brouter_show; if (argc) { if ((inet_pton (AF_INET, argv[0], &adv_router)) != 1) { vty_out (vty, "Router ID is not parsable: %s%s", argv[0], VNL); return CMD_SUCCESS; } ospf6_linkstate_prefix (adv_router, 0, &prefix); ro = ospf6_route_lookup (&prefix, ospf6->brouter_table); if (!ro) { vty_out (vty, "No Route found for Router ID: %s%s", argv[0], VNL); return CMD_SUCCESS; } ospf6_route_show_detail (vty, ro); return CMD_SUCCESS; } if (showfunc == ospf6_brouter_show) ospf6_brouter_show_header (vty); for (ro = ospf6_route_head (ospf6->brouter_table); ro; ro = ospf6_route_next (ro)) (*showfunc) (vty, ro); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_border_routers, show_ipv6_ospf6_border_routers_detail_cmd, "show ipv6 ospf6 border-routers (A.B.C.D|detail)", SHOW_STR IP6_STR OSPF6_STR "Display routing table for ABR and ASBR\n" "Specify Router-ID\n" "Display Detail\n" ) DEFUN (show_ipv6_ospf6_linkstate, show_ipv6_ospf6_linkstate_cmd, "show ipv6 ospf6 linkstate", SHOW_STR IP6_STR OSPF6_STR "Display linkstate routing table\n" ) { struct listnode *node; struct ospf6_area *oa; for (ALL_LIST_ELEMENTS_RO (ospf6->area_list, node, oa)) { vty_out (vty, "%s SPF Result in Area %s%s%s", VNL, oa->name, VNL, VNL); ospf6_linkstate_table_show (vty, argc, argv, oa->spf_table); } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } ALIAS (show_ipv6_ospf6_linkstate, show_ipv6_ospf6_linkstate_router_cmd, "show ipv6 ospf6 linkstate router A.B.C.D", SHOW_STR IP6_STR OSPF6_STR "Display linkstate routing table\n" "Display Router Entry\n" "Specify Router ID as IPv4 address notation\n" ) ALIAS (show_ipv6_ospf6_linkstate, show_ipv6_ospf6_linkstate_network_cmd, "show ipv6 ospf6 linkstate network A.B.C.D A.B.C.D", SHOW_STR IP6_STR OSPF6_STR "Display linkstate routing table\n" "Display Network Entry\n" "Specify Router ID as IPv4 address notation\n" "Specify Link state ID as IPv4 address notation\n" ) DEFUN (show_ipv6_ospf6_linkstate_detail, show_ipv6_ospf6_linkstate_detail_cmd, "show ipv6 ospf6 linkstate detail", SHOW_STR IP6_STR OSPF6_STR "Display linkstate routing table\n" ) { const char *sargv[CMD_ARGC_MAX]; int i, sargc; struct listnode *node; struct ospf6_area *oa; /* copy argv to sargv and then append "detail" */ for (i = 0; i < argc; i++) sargv[i] = argv[i]; sargc = argc; sargv[sargc++] = "detail"; sargv[sargc] = NULL; for (ALL_LIST_ELEMENTS_RO (ospf6->area_list, node, oa)) { vty_out (vty, "%s SPF Result in Area %s%s%s", VNL, oa->name, VNL, VNL); ospf6_linkstate_table_show (vty, sargc, sargv, oa->spf_table); } vty_out (vty, "%s", VNL); return CMD_SUCCESS; } /* Install ospf related commands. */ void ospf6_init (void) { ospf6_top_init (); ospf6_area_init (); ospf6_interface_init (); ospf6_neighbor_init (); ospf6_zebra_init (); ospf6_lsa_init (); ospf6_spf_init (); ospf6_intra_init (); ospf6_asbr_init (); ospf6_abr_init (); #ifdef HAVE_SNMP ospf6_snmp_init (master); #endif /*HAVE_SNMP*/ install_node (&debug_node, config_write_ospf6_debug); install_element_ospf6_debug_message (); install_element_ospf6_debug_lsa (); install_element_ospf6_debug_interface (); install_element_ospf6_debug_neighbor (); install_element_ospf6_debug_zebra (); install_element_ospf6_debug_spf (); install_element_ospf6_debug_route (); install_element_ospf6_debug_brouter (); install_element_ospf6_debug_asbr (); install_element_ospf6_debug_abr (); install_element_ospf6_debug_flood (); install_element (VIEW_NODE, &show_version_ospf6_cmd); install_element (ENABLE_NODE, &show_version_ospf6_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_border_routers_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_border_routers_detail_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_border_routers_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_border_routers_detail_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_router_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_network_cmd); install_element (VIEW_NODE, &show_ipv6_ospf6_linkstate_detail_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_router_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_network_cmd); install_element (ENABLE_NODE, &show_ipv6_ospf6_linkstate_detail_cmd); #define INSTALL(n,c) \ install_element (n ## _NODE, &show_ipv6_ospf6_ ## c) INSTALL (VIEW, database_cmd); INSTALL (VIEW, database_detail_cmd); INSTALL (VIEW, database_type_cmd); INSTALL (VIEW, database_type_detail_cmd); INSTALL (VIEW, database_id_cmd); INSTALL (VIEW, database_id_detail_cmd); INSTALL (VIEW, database_linkstate_id_cmd); INSTALL (VIEW, database_linkstate_id_detail_cmd); INSTALL (VIEW, database_router_cmd); INSTALL (VIEW, database_router_detail_cmd); INSTALL (VIEW, database_adv_router_cmd); INSTALL (VIEW, database_adv_router_detail_cmd); INSTALL (VIEW, database_type_id_cmd); INSTALL (VIEW, database_type_id_detail_cmd); INSTALL (VIEW, database_type_linkstate_id_cmd); INSTALL (VIEW, database_type_linkstate_id_detail_cmd); INSTALL (VIEW, database_type_router_cmd); INSTALL (VIEW, database_type_router_detail_cmd); INSTALL (VIEW, database_type_adv_router_cmd); INSTALL (VIEW, database_type_adv_router_detail_cmd); INSTALL (VIEW, database_adv_router_linkstate_id_cmd); INSTALL (VIEW, database_adv_router_linkstate_id_detail_cmd); INSTALL (VIEW, database_id_router_cmd); INSTALL (VIEW, database_id_router_detail_cmd); INSTALL (VIEW, database_type_id_router_cmd); INSTALL (VIEW, database_type_id_router_detail_cmd); INSTALL (VIEW, database_type_adv_router_linkstate_id_cmd); INSTALL (VIEW, database_type_adv_router_linkstate_id_detail_cmd); INSTALL (VIEW, database_self_originated_cmd); INSTALL (VIEW, database_self_originated_detail_cmd); INSTALL (VIEW, database_type_self_originated_cmd); INSTALL (VIEW, database_type_self_originated_detail_cmd); INSTALL (VIEW, database_type_id_self_originated_cmd); INSTALL (VIEW, database_type_id_self_originated_detail_cmd); INSTALL (VIEW, database_type_self_originated_linkstate_id_cmd); INSTALL (VIEW, database_type_self_originated_linkstate_id_detail_cmd); INSTALL (ENABLE, database_cmd); INSTALL (ENABLE, database_detail_cmd); INSTALL (ENABLE, database_type_cmd); INSTALL (ENABLE, database_type_detail_cmd); INSTALL (ENABLE, database_id_cmd); INSTALL (ENABLE, database_id_detail_cmd); INSTALL (ENABLE, database_linkstate_id_cmd); INSTALL (ENABLE, database_linkstate_id_detail_cmd); INSTALL (ENABLE, database_router_cmd); INSTALL (ENABLE, database_router_detail_cmd); INSTALL (ENABLE, database_adv_router_cmd); INSTALL (ENABLE, database_adv_router_detail_cmd); INSTALL (ENABLE, database_type_id_cmd); INSTALL (ENABLE, database_type_id_detail_cmd); INSTALL (ENABLE, database_type_linkstate_id_cmd); INSTALL (ENABLE, database_type_linkstate_id_detail_cmd); INSTALL (ENABLE, database_type_router_cmd); INSTALL (ENABLE, database_type_router_detail_cmd); INSTALL (ENABLE, database_type_adv_router_cmd); INSTALL (ENABLE, database_type_adv_router_detail_cmd); INSTALL (ENABLE, database_adv_router_linkstate_id_cmd); INSTALL (ENABLE, database_adv_router_linkstate_id_detail_cmd); INSTALL (ENABLE, database_id_router_cmd); INSTALL (ENABLE, database_id_router_detail_cmd); INSTALL (ENABLE, database_type_id_router_cmd); INSTALL (ENABLE, database_type_id_router_detail_cmd); INSTALL (ENABLE, database_type_adv_router_linkstate_id_cmd); INSTALL (ENABLE, database_type_adv_router_linkstate_id_detail_cmd); INSTALL (ENABLE, database_self_originated_cmd); INSTALL (ENABLE, database_self_originated_detail_cmd); INSTALL (ENABLE, database_type_self_originated_cmd); INSTALL (ENABLE, database_type_self_originated_detail_cmd); INSTALL (ENABLE, database_type_id_self_originated_cmd); INSTALL (ENABLE, database_type_id_self_originated_detail_cmd); INSTALL (ENABLE, database_type_self_originated_linkstate_id_cmd); INSTALL (ENABLE, database_type_self_originated_linkstate_id_detail_cmd); /* Make ospf protocol socket. */ ospf6_serv_sock (); thread_add_read (master, ospf6_receive, NULL, ospf6_sock); } void ospf6_clean (void) { if (ospf6->route_table) ospf6_route_remove_all (ospf6->route_table); if (ospf6->brouter_table) ospf6_route_remove_all (ospf6->brouter_table); }
gpl-2.0
alexfeinman/nv_tegra.19.2
arch/cris/arch-v10/kernel/kgdb.c
28
53171
/*!************************************************************************** *! *! FILE NAME : kgdb.c *! *! DESCRIPTION: Implementation of the gdb stub with respect to ETRAX 100. *! It is a mix of arch/m68k/kernel/kgdb.c and cris_stub.c. *! *!--------------------------------------------------------------------------- *! HISTORY *! *! DATE NAME CHANGES *! ---- ---- ------- *! Apr 26 1999 Hendrik Ruijter Initial version. *! May 6 1999 Hendrik Ruijter Removed call to strlen in libc and removed *! struct assignment as it generates calls to *! memcpy in libc. *! Jun 17 1999 Hendrik Ruijter Added gdb 4.18 support. 'X', 'qC' and 'qL'. *! Jul 21 1999 Bjorn Wesen eLinux port *! *! $Log: kgdb.c,v $ *! Revision 1.5 2004/10/07 13:59:08 starvik *! Corrected call to set_int_vector *! *! Revision 1.4 2003/04/09 05:20:44 starvik *! Merge of Linux 2.5.67 *! *! Revision 1.3 2003/01/21 19:11:08 starvik *! Modified include path for new dir layout *! *! Revision 1.2 2002/11/19 14:35:24 starvik *! Changes from linux 2.4 *! Changed struct initializer syntax to the currently prefered notation *! *! Revision 1.1 2001/12/17 13:59:27 bjornw *! Initial revision *! *! Revision 1.6 2001/10/09 13:10:03 matsfg *! Added $ on registers and removed some underscores *! *! Revision 1.5 2001/04/17 13:58:39 orjanf *! * Renamed CONFIG_KGDB to CONFIG_ETRAX_KGDB. *! *! Revision 1.4 2001/02/23 13:45:19 bjornw *! config.h check *! *! Revision 1.3 2001/01/31 18:08:23 orjanf *! Removed kgdb_handle_breakpoint from being the break 8 handler. *! *! Revision 1.2 2001/01/12 14:22:25 orjanf *! Updated kernel debugging support to work with ETRAX 100LX. *! *! Revision 1.1 2000/07/10 16:25:21 bjornw *! Initial revision *! *! Revision 1.1.1.1 1999/12/03 14:57:31 bjornw *! * Initial version of arch/cris, the latest CRIS architecture with an MMU. *! Mostly copied from arch/etrax100 with appropriate renames of files. *! The mm/ subdir is copied from arch/i386. *! This does not compile yet at all. *! *! *! Revision 1.4 1999/07/22 17:25:25 bjornw *! Dont wait for + in putpacket if we havent hit the initial breakpoint yet. Added a kgdb_init function which sets up the break and irq vectors. *! *! Revision 1.3 1999/07/21 19:51:18 bjornw *! Check if the interrupting char is a ctrl-C, ignore otherwise. *! *! Revision 1.2 1999/07/21 18:09:39 bjornw *! Ported to eLinux architecture, and added some kgdb documentation. *! *! *!--------------------------------------------------------------------------- *! *! $Id: kgdb.c,v 1.5 2004/10/07 13:59:08 starvik Exp $ *! *! (C) Copyright 1999, Axis Communications AB, LUND, SWEDEN *! *!**************************************************************************/ /* @(#) cris_stub.c 1.3 06/17/99 */ /* * kgdb usage notes: * ----------------- * * If you select CONFIG_ETRAX_KGDB in the configuration, the kernel will be * built with different gcc flags: "-g" is added to get debug infos, and * "-fomit-frame-pointer" is omitted to make debugging easier. Since the * resulting kernel will be quite big (approx. > 7 MB), it will be stripped * before compresion. Such a kernel will behave just as usually, except if * given a "debug=<device>" command line option. (Only serial devices are * allowed for <device>, i.e. no printers or the like; possible values are * machine depedend and are the same as for the usual debug device, the one * for logging kernel messages.) If that option is given and the device can be * initialized, the kernel will connect to the remote gdb in trap_init(). The * serial parameters are fixed to 8N1 and 115200 bps, for easyness of * implementation. * * To start a debugging session, start that gdb with the debugging kernel * image (the one with the symbols, vmlinux.debug) named on the command line. * This file will be used by gdb to get symbol and debugging infos about the * kernel. Next, select remote debug mode by * target remote <device> * where <device> is the name of the serial device over which the debugged * machine is connected. Maybe you have to adjust the baud rate by * set remotebaud <rate> * or also other parameters with stty: * shell stty ... </dev/... * If the kernel to debug has already booted, it waited for gdb and now * connects, and you'll see a breakpoint being reported. If the kernel isn't * running yet, start it now. The order of gdb and the kernel doesn't matter. * Another thing worth knowing about in the getting-started phase is how to * debug the remote protocol itself. This is activated with * set remotedebug 1 * gdb will then print out each packet sent or received. You'll also get some * messages about the gdb stub on the console of the debugged machine. * * If all that works, you can use lots of the usual debugging techniques on * the kernel, e.g. inspecting and changing variables/memory, setting * breakpoints, single stepping and so on. It's also possible to interrupt the * debugged kernel by pressing C-c in gdb. Have fun! :-) * * The gdb stub is entered (and thus the remote gdb gets control) in the * following situations: * * - If breakpoint() is called. This is just after kgdb initialization, or if * a breakpoint() call has been put somewhere into the kernel source. * (Breakpoints can of course also be set the usual way in gdb.) * In eLinux, we call breakpoint() in init/main.c after IRQ initialization. * * - If there is a kernel exception, i.e. bad_super_trap() or die_if_kernel() * are entered. All the CPU exceptions are mapped to (more or less..., see * the hard_trap_info array below) appropriate signal, which are reported * to gdb. die_if_kernel() is usually called after some kind of access * error and thus is reported as SIGSEGV. * * - When panic() is called. This is reported as SIGABRT. * * - If C-c is received over the serial line, which is treated as * SIGINT. * * Of course, all these signals are just faked for gdb, since there is no * signal concept as such for the kernel. It also isn't possible --obviously-- * to set signal handlers from inside gdb, or restart the kernel with a * signal. * * Current limitations: * * - While the kernel is stopped, interrupts are disabled for safety reasons * (i.e., variables not changing magically or the like). But this also * means that the clock isn't running anymore, and that interrupts from the * hardware may get lost/not be served in time. This can cause some device * errors... * * - When single-stepping, only one instruction of the current thread is * executed, but interrupts are allowed for that time and will be serviced * if pending. Be prepared for that. * * - All debugging happens in kernel virtual address space. There's no way to * access physical memory not mapped in kernel space, or to access user * space. A way to work around this is using get_user_long & Co. in gdb * expressions, but only for the current process. * * - Interrupting the kernel only works if interrupts are currently allowed, * and the interrupt of the serial line isn't blocked by some other means * (IPL too high, disabled, ...) * * - The gdb stub is currently not reentrant, i.e. errors that happen therein * (e.g. accessing invalid memory) may not be caught correctly. This could * be removed in future by introducing a stack of struct registers. * */ /* * To enable debugger support, two things need to happen. One, a * call to kgdb_init() is necessary in order to allow any breakpoints * or error conditions to be properly intercepted and reported to gdb. * Two, a breakpoint needs to be generated to begin communication. This * is most easily accomplished by a call to breakpoint(). * * The following gdb commands are supported: * * command function Return value * * g return the value of the CPU registers hex data or ENN * G set the value of the CPU registers OK or ENN * * mAA..AA,LLLL Read LLLL bytes at address AA..AA hex data or ENN * MAA..AA,LLLL: Write LLLL bytes at address AA.AA OK or ENN * * c Resume at current address SNN ( signal NN) * cAA..AA Continue at address AA..AA SNN * * s Step one instruction SNN * sAA..AA Step one instruction from AA..AA SNN * * k kill * * ? What was the last sigval ? SNN (signal NN) * * bBB..BB Set baud rate to BB..BB OK or BNN, then sets * baud rate * * All commands and responses are sent with a packet which includes a * checksum. A packet consists of * * $<packet info>#<checksum>. * * where * <packet info> :: <characters representing the command or response> * <checksum> :: < two hex digits computed as modulo 256 sum of <packetinfo>> * * When a packet is received, it is first acknowledged with either '+' or '-'. * '+' indicates a successful transfer. '-' indicates a failed transfer. * * Example: * * Host: Reply: * $m0,10#2a +$00010203040506070809101112131415#42 * */ #include <linux/string.h> #include <linux/signal.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/linkage.h> #include <asm/setup.h> #include <asm/ptrace.h> #include <asm/arch/svinto.h> #include <asm/irq.h> static int kgdb_started = 0; /********************************* Register image ****************************/ /* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's Reference", p. 1-1, with the additional register definitions of the ETRAX 100LX in cris-opc.h. There are 16 general 32-bit registers, R0-R15, where R14 is the stack pointer, SP, and R15 is the program counter, PC. There are 16 special registers, P0-P15, where three of the unimplemented registers, P0, P4 and P8, are reserved as zero-registers. A read from any of these registers returns zero and a write has no effect. */ typedef struct register_image { /* Offset */ unsigned int r0; /* 0x00 */ unsigned int r1; /* 0x04 */ unsigned int r2; /* 0x08 */ unsigned int r3; /* 0x0C */ unsigned int r4; /* 0x10 */ unsigned int r5; /* 0x14 */ unsigned int r6; /* 0x18 */ unsigned int r7; /* 0x1C */ unsigned int r8; /* 0x20 Frame pointer */ unsigned int r9; /* 0x24 */ unsigned int r10; /* 0x28 */ unsigned int r11; /* 0x2C */ unsigned int r12; /* 0x30 */ unsigned int r13; /* 0x34 */ unsigned int sp; /* 0x38 Stack pointer */ unsigned int pc; /* 0x3C Program counter */ unsigned char p0; /* 0x40 8-bit zero-register */ unsigned char vr; /* 0x41 Version register */ unsigned short p4; /* 0x42 16-bit zero-register */ unsigned short ccr; /* 0x44 Condition code register */ unsigned int mof; /* 0x46 Multiply overflow register */ unsigned int p8; /* 0x4A 32-bit zero-register */ unsigned int ibr; /* 0x4E Interrupt base register */ unsigned int irp; /* 0x52 Interrupt return pointer */ unsigned int srp; /* 0x56 Subroutine return pointer */ unsigned int bar; /* 0x5A Breakpoint address register */ unsigned int dccr; /* 0x5E Double condition code register */ unsigned int brp; /* 0x62 Breakpoint return pointer (pc in caller) */ unsigned int usp; /* 0x66 User mode stack pointer */ } registers; /************** Prototypes for local library functions ***********************/ /* Copy of strcpy from libc. */ static char *gdb_cris_strcpy (char *s1, const char *s2); /* Copy of strlen from libc. */ static int gdb_cris_strlen (const char *s); /* Copy of memchr from libc. */ static void *gdb_cris_memchr (const void *s, int c, int n); /* Copy of strtol from libc. Does only support base 16. */ static int gdb_cris_strtol (const char *s, char **endptr, int base); /********************** Prototypes for local functions. **********************/ /* Copy the content of a register image into another. The size n is the size of the register image. Due to struct assignment generation of memcpy in libc. */ static void copy_registers (registers *dptr, registers *sptr, int n); /* Copy the stored registers from the stack. Put the register contents of thread thread_id in the struct reg. */ static void copy_registers_from_stack (int thread_id, registers *reg); /* Copy the registers to the stack. Put the register contents of thread thread_id from struct reg to the stack. */ static void copy_registers_to_stack (int thread_id, registers *reg); /* Write a value to a specified register regno in the register image of the current thread. */ static int write_register (int regno, char *val); /* Write a value to a specified register in the stack of a thread other than the current thread. */ static write_stack_register (int thread_id, int regno, char *valptr); /* Read a value from a specified register in the register image. Returns the status of the read operation. The register value is returned in valptr. */ static int read_register (char regno, unsigned int *valptr); /* Serial port, reads one character. ETRAX 100 specific. from debugport.c */ int getDebugChar (void); /* Serial port, writes one character. ETRAX 100 specific. from debugport.c */ void putDebugChar (int val); void enableDebugIRQ (void); /* Returns the character equivalent of a nibble, bit 7, 6, 5, and 4 of a byte, represented by int x. */ static char highhex (int x); /* Returns the character equivalent of a nibble, bit 3, 2, 1, and 0 of a byte, represented by int x. */ static char lowhex (int x); /* Returns the integer equivalent of a hexadecimal character. */ static int hex (char ch); /* Convert the memory, pointed to by mem into hexadecimal representation. Put the result in buf, and return a pointer to the last character in buf (null). */ static char *mem2hex (char *buf, unsigned char *mem, int count); /* Convert the array, in hexadecimal representation, pointed to by buf into binary representation. Put the result in mem, and return a pointer to the character after the last byte written. */ static unsigned char *hex2mem (unsigned char *mem, char *buf, int count); /* Put the content of the array, in binary representation, pointed to by buf into memory pointed to by mem, and return a pointer to the character after the last byte written. */ static unsigned char *bin2mem (unsigned char *mem, unsigned char *buf, int count); /* Await the sequence $<data>#<checksum> and store <data> in the array buffer returned. */ static void getpacket (char *buffer); /* Send $<data>#<checksum> from the <data> in the array buffer. */ static void putpacket (char *buffer); /* Build and send a response packet in order to inform the host the stub is stopped. */ static void stub_is_stopped (int sigval); /* All expected commands are sent from remote.c. Send a response according to the description in remote.c. */ static void handle_exception (int sigval); /* Performs a complete re-start from scratch. ETRAX specific. */ static void kill_restart (void); /******************** Prototypes for global functions. ***********************/ /* The string str is prepended with the GDB printout token and sent. */ void putDebugString (const unsigned char *str, int length); /* used by etrax100ser.c */ /* The hook for both static (compiled) and dynamic breakpoints set by GDB. ETRAX 100 specific. */ void handle_breakpoint (void); /* used by irq.c */ /* The hook for an interrupt generated by GDB. ETRAX 100 specific. */ void handle_interrupt (void); /* used by irq.c */ /* A static breakpoint to be used at startup. */ void breakpoint (void); /* called by init/main.c */ /* From osys_int.c, executing_task contains the number of the current executing task in osys. Does not know of object-oriented threads. */ extern unsigned char executing_task; /* The number of characters used for a 64 bit thread identifier. */ #define HEXCHARS_IN_THREAD_ID 16 /* Avoid warning as the internal_stack is not used in the C-code. */ #define USEDVAR(name) { if (name) { ; } } #define USEDFUN(name) { void (*pf)(void) = (void *)name; USEDVAR(pf) } /********************************** Packet I/O ******************************/ /* BUFMAX defines the maximum number of characters in inbound/outbound buffers */ #define BUFMAX 512 /* Run-length encoding maximum length. Send 64 at most. */ #define RUNLENMAX 64 /* Definition of all valid hexadecimal characters */ static const char hexchars[] = "0123456789abcdef"; /* The inbound/outbound buffers used in packet I/O */ static char remcomInBuffer[BUFMAX]; static char remcomOutBuffer[BUFMAX]; /* Error and warning messages. */ enum error_type { SUCCESS, E01, E02, E03, E04, E05, E06, E07 }; static char *error_message[] = { "", "E01 Set current or general thread - H[c,g] - internal error.", "E02 Change register content - P - cannot change read-only register.", "E03 Thread is not alive.", /* T, not used. */ "E04 The command is not supported - [s,C,S,!,R,d,r] - internal error.", "E05 Change register content - P - the register is not implemented..", "E06 Change memory content - M - internal error.", "E07 Change register content - P - the register is not stored on the stack" }; /********************************* Register image ****************************/ /* Use the order of registers as defined in "AXIS ETRAX CRIS Programmer's Reference", p. 1-1, with the additional register definitions of the ETRAX 100LX in cris-opc.h. There are 16 general 32-bit registers, R0-R15, where R14 is the stack pointer, SP, and R15 is the program counter, PC. There are 16 special registers, P0-P15, where three of the unimplemented registers, P0, P4 and P8, are reserved as zero-registers. A read from any of these registers returns zero and a write has no effect. */ enum register_name { R0, R1, R2, R3, R4, R5, R6, R7, R8, R9, R10, R11, R12, R13, SP, PC, P0, VR, P2, P3, P4, CCR, P6, MOF, P8, IBR, IRP, SRP, BAR, DCCR, BRP, USP }; /* The register sizes of the registers in register_name. An unimplemented register is designated by size 0 in this array. */ static int register_size[] = { 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 1, 1, 0, 0, 2, 2, 0, 4, 4, 4, 4, 4, 4, 4, 4, 4 }; /* Contains the register image of the executing thread in the assembler part of the code in order to avoid horrible addressing modes. */ static registers reg; /* FIXME: Should this be used? Delete otherwise. */ /* Contains the assumed consistency state of the register image. Uses the enum error_type for state information. */ static int consistency_status = SUCCESS; /********************************** Handle exceptions ************************/ /* The variable reg contains the register image associated with the current_thread_c variable. It is a complete register image created at entry. The reg_g contains a register image of a task where the general registers are taken from the stack and all special registers are taken from the executing task. It is associated with current_thread_g and used in order to provide access mainly for 'g', 'G' and 'P'. */ /* Need two task id pointers in order to handle Hct and Hgt commands. */ static int current_thread_c = 0; static int current_thread_g = 0; /* Need two register images in order to handle Hct and Hgt commands. The variable reg_g is in addition to reg above. */ static registers reg_g; /********************************** Breakpoint *******************************/ /* Use an internal stack in the breakpoint and interrupt response routines */ #define INTERNAL_STACK_SIZE 1024 static char internal_stack[INTERNAL_STACK_SIZE]; /* Due to the breakpoint return pointer, a state variable is needed to keep track of whether it is a static (compiled) or dynamic (gdb-invoked) breakpoint to be handled. A static breakpoint uses the content of register BRP as it is whereas a dynamic breakpoint requires subtraction with 2 in order to execute the instruction. The first breakpoint is static. */ static unsigned char is_dyn_brkp = 0; /********************************* String library ****************************/ /* Single-step over library functions creates trap loops. */ /* Copy char s2[] to s1[]. */ static char* gdb_cris_strcpy (char *s1, const char *s2) { char *s = s1; for (s = s1; (*s++ = *s2++) != '\0'; ) ; return (s1); } /* Find length of s[]. */ static int gdb_cris_strlen (const char *s) { const char *sc; for (sc = s; *sc != '\0'; sc++) ; return (sc - s); } /* Find first occurrence of c in s[n]. */ static void* gdb_cris_memchr (const void *s, int c, int n) { const unsigned char uc = c; const unsigned char *su; for (su = s; 0 < n; ++su, --n) if (*su == uc) return ((void *)su); return (NULL); } /******************************* Standard library ****************************/ /* Single-step over library functions creates trap loops. */ /* Convert string to long. */ static int gdb_cris_strtol (const char *s, char **endptr, int base) { char *s1; char *sd; int x = 0; for (s1 = (char*)s; (sd = gdb_cris_memchr(hexchars, *s1, base)) != NULL; ++s1) x = x * base + (sd - hexchars); if (endptr) { /* Unconverted suffix is stored in endptr unless endptr is NULL. */ *endptr = s1; } return x; } int double_this(int x) { return 2 * x; } /********************************* Register image ****************************/ /* Copy the content of a register image into another. The size n is the size of the register image. Due to struct assignment generation of memcpy in libc. */ static void copy_registers (registers *dptr, registers *sptr, int n) { unsigned char *dreg; unsigned char *sreg; for (dreg = (unsigned char*)dptr, sreg = (unsigned char*)sptr; n > 0; n--) *dreg++ = *sreg++; } #ifdef PROCESS_SUPPORT /* Copy the stored registers from the stack. Put the register contents of thread thread_id in the struct reg. */ static void copy_registers_from_stack (int thread_id, registers *regptr) { int j; stack_registers *s = (stack_registers *)stack_list[thread_id]; unsigned int *d = (unsigned int *)regptr; for (j = 13; j >= 0; j--) *d++ = s->r[j]; regptr->sp = (unsigned int)stack_list[thread_id]; regptr->pc = s->pc; regptr->dccr = s->dccr; regptr->srp = s->srp; } /* Copy the registers to the stack. Put the register contents of thread thread_id from struct reg to the stack. */ static void copy_registers_to_stack (int thread_id, registers *regptr) { int i; stack_registers *d = (stack_registers *)stack_list[thread_id]; unsigned int *s = (unsigned int *)regptr; for (i = 0; i < 14; i++) { d->r[i] = *s++; } d->pc = regptr->pc; d->dccr = regptr->dccr; d->srp = regptr->srp; } #endif /* Write a value to a specified register in the register image of the current thread. Returns status code SUCCESS, E02 or E05. */ static int write_register (int regno, char *val) { int status = SUCCESS; registers *current_reg = &reg; if (regno >= R0 && regno <= PC) { /* 32-bit register with simple offset. */ hex2mem ((unsigned char *)current_reg + regno * sizeof(unsigned int), val, sizeof(unsigned int)); } else if (regno == P0 || regno == VR || regno == P4 || regno == P8) { /* Do not support read-only registers. */ status = E02; } else if (regno == CCR) { /* 16 bit register with complex offset. (P4 is read-only, P6 is not implemented, and P7 (MOF) is 32 bits in ETRAX 100LX. */ hex2mem ((unsigned char *)&(current_reg->ccr) + (regno-CCR) * sizeof(unsigned short), val, sizeof(unsigned short)); } else if (regno >= MOF && regno <= USP) { /* 32 bit register with complex offset. (P8 has been taken care of.) */ hex2mem ((unsigned char *)&(current_reg->ibr) + (regno-IBR) * sizeof(unsigned int), val, sizeof(unsigned int)); } else { /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */ status = E05; } return status; } #ifdef PROCESS_SUPPORT /* Write a value to a specified register in the stack of a thread other than the current thread. Returns status code SUCCESS or E07. */ static int write_stack_register (int thread_id, int regno, char *valptr) { int status = SUCCESS; stack_registers *d = (stack_registers *)stack_list[thread_id]; unsigned int val; hex2mem ((unsigned char *)&val, valptr, sizeof(unsigned int)); if (regno >= R0 && regno < SP) { d->r[regno] = val; } else if (regno == SP) { stack_list[thread_id] = val; } else if (regno == PC) { d->pc = val; } else if (regno == SRP) { d->srp = val; } else if (regno == DCCR) { d->dccr = val; } else { /* Do not support registers in the current thread. */ status = E07; } return status; } #endif /* Read a value from a specified register in the register image. Returns the value in the register or -1 for non-implemented registers. Should check consistency_status after a call which may be E05 after changes in the implementation. */ static int read_register (char regno, unsigned int *valptr) { registers *current_reg = &reg; if (regno >= R0 && regno <= PC) { /* 32-bit register with simple offset. */ *valptr = *(unsigned int *)((char *)current_reg + regno * sizeof(unsigned int)); return SUCCESS; } else if (regno == P0 || regno == VR) { /* 8 bit register with complex offset. */ *valptr = (unsigned int)(*(unsigned char *) ((char *)&(current_reg->p0) + (regno-P0) * sizeof(char))); return SUCCESS; } else if (regno == P4 || regno == CCR) { /* 16 bit register with complex offset. */ *valptr = (unsigned int)(*(unsigned short *) ((char *)&(current_reg->p4) + (regno-P4) * sizeof(unsigned short))); return SUCCESS; } else if (regno >= MOF && regno <= USP) { /* 32 bit register with complex offset. */ *valptr = *(unsigned int *)((char *)&(current_reg->p8) + (regno-P8) * sizeof(unsigned int)); return SUCCESS; } else { /* Do not support nonexisting or unimplemented registers (P2, P3, and P6). */ consistency_status = E05; return E05; } } /********************************** Packet I/O ******************************/ /* Returns the character equivalent of a nibble, bit 7, 6, 5, and 4 of a byte, represented by int x. */ static inline char highhex(int x) { return hexchars[(x >> 4) & 0xf]; } /* Returns the character equivalent of a nibble, bit 3, 2, 1, and 0 of a byte, represented by int x. */ static inline char lowhex(int x) { return hexchars[x & 0xf]; } /* Returns the integer equivalent of a hexadecimal character. */ static int hex (char ch) { if ((ch >= 'a') && (ch <= 'f')) return (ch - 'a' + 10); if ((ch >= '0') && (ch <= '9')) return (ch - '0'); if ((ch >= 'A') && (ch <= 'F')) return (ch - 'A' + 10); return (-1); } /* Convert the memory, pointed to by mem into hexadecimal representation. Put the result in buf, and return a pointer to the last character in buf (null). */ static int do_printk = 0; static char * mem2hex(char *buf, unsigned char *mem, int count) { int i; int ch; if (mem == NULL) { /* Bogus read from m0. FIXME: What constitutes a valid address? */ for (i = 0; i < count; i++) { *buf++ = '0'; *buf++ = '0'; } } else { /* Valid mem address. */ for (i = 0; i < count; i++) { ch = *mem++; *buf++ = highhex (ch); *buf++ = lowhex (ch); } } /* Terminate properly. */ *buf = '\0'; return (buf); } /* Convert the array, in hexadecimal representation, pointed to by buf into binary representation. Put the result in mem, and return a pointer to the character after the last byte written. */ static unsigned char* hex2mem (unsigned char *mem, char *buf, int count) { int i; unsigned char ch; for (i = 0; i < count; i++) { ch = hex (*buf++) << 4; ch = ch + hex (*buf++); *mem++ = ch; } return (mem); } /* Put the content of the array, in binary representation, pointed to by buf into memory pointed to by mem, and return a pointer to the character after the last byte written. Gdb will escape $, #, and the escape char (0x7d). */ static unsigned char* bin2mem (unsigned char *mem, unsigned char *buf, int count) { int i; unsigned char *next; for (i = 0; i < count; i++) { /* Check for any escaped characters. Be paranoid and only unescape chars that should be escaped. */ if (*buf == 0x7d) { next = buf + 1; if (*next == 0x3 || *next == 0x4 || *next == 0x5D) /* #, $, ESC */ { buf++; *buf += 0x20; } } *mem++ = *buf++; } return (mem); } /* Await the sequence $<data>#<checksum> and store <data> in the array buffer returned. */ static void getpacket (char *buffer) { unsigned char checksum; unsigned char xmitcsum; int i; int count; char ch; do { while ((ch = getDebugChar ()) != '$') /* Wait for the start character $ and ignore all other characters */; checksum = 0; xmitcsum = -1; count = 0; /* Read until a # or the end of the buffer is reached */ while (count < BUFMAX) { ch = getDebugChar (); if (ch == '#') break; checksum = checksum + ch; buffer[count] = ch; count = count + 1; } buffer[count] = '\0'; if (ch == '#') { xmitcsum = hex (getDebugChar ()) << 4; xmitcsum += hex (getDebugChar ()); if (checksum != xmitcsum) { /* Wrong checksum */ putDebugChar ('-'); } else { /* Correct checksum */ putDebugChar ('+'); /* If sequence characters are received, reply with them */ if (buffer[2] == ':') { putDebugChar (buffer[0]); putDebugChar (buffer[1]); /* Remove the sequence characters from the buffer */ count = gdb_cris_strlen (buffer); for (i = 3; i <= count; i++) buffer[i - 3] = buffer[i]; } } } } while (checksum != xmitcsum); } /* Send $<data>#<checksum> from the <data> in the array buffer. */ static void putpacket(char *buffer) { int checksum; int runlen; int encode; do { char *src = buffer; putDebugChar ('$'); checksum = 0; while (*src) { /* Do run length encoding */ putDebugChar (*src); checksum += *src; runlen = 0; while (runlen < RUNLENMAX && *src == src[runlen]) { runlen++; } if (runlen > 3) { /* Got a useful amount */ putDebugChar ('*'); checksum += '*'; encode = runlen + ' ' - 4; putDebugChar (encode); checksum += encode; src += runlen; } else { src++; } } putDebugChar ('#'); putDebugChar (highhex (checksum)); putDebugChar (lowhex (checksum)); } while(kgdb_started && (getDebugChar() != '+')); } /* The string str is prepended with the GDB printout token and sent. Required in traditional implementations. */ void putDebugString (const unsigned char *str, int length) { remcomOutBuffer[0] = 'O'; mem2hex(&remcomOutBuffer[1], (unsigned char *)str, length); putpacket(remcomOutBuffer); } /********************************** Handle exceptions ************************/ /* Build and send a response packet in order to inform the host the stub is stopped. TAAn...:r...;n...:r...;n...:r...; AA = signal number n... = register number (hex) r... = register contents n... = `thread' r... = thread process ID. This is a hex integer. n... = other string not starting with valid hex digit. gdb should ignore this n,r pair and go on to the next. This way we can extend the protocol. */ static void stub_is_stopped(int sigval) { char *ptr = remcomOutBuffer; int regno; unsigned int reg_cont; int status; /* Send trap type (converted to signal) */ *ptr++ = 'T'; *ptr++ = highhex (sigval); *ptr++ = lowhex (sigval); /* Send register contents. We probably only need to send the * PC, frame pointer and stack pointer here. Other registers will be * explicitely asked for. But for now, send all. */ for (regno = R0; regno <= USP; regno++) { /* Store n...:r...; for the registers in the buffer. */ status = read_register (regno, &reg_cont); if (status == SUCCESS) { *ptr++ = highhex (regno); *ptr++ = lowhex (regno); *ptr++ = ':'; ptr = mem2hex(ptr, (unsigned char *)&reg_cont, register_size[regno]); *ptr++ = ';'; } } #ifdef PROCESS_SUPPORT /* Store the registers of the executing thread. Assume that both step, continue, and register content requests are with respect to this thread. The executing task is from the operating system scheduler. */ current_thread_c = executing_task; current_thread_g = executing_task; /* A struct assignment translates into a libc memcpy call. Avoid all libc functions in order to prevent recursive break points. */ copy_registers (&reg_g, &reg, sizeof(registers)); /* Store thread:r...; with the executing task TID. */ gdb_cris_strcpy (&remcomOutBuffer[pos], "thread:"); pos += gdb_cris_strlen ("thread:"); remcomOutBuffer[pos++] = highhex (executing_task); remcomOutBuffer[pos++] = lowhex (executing_task); gdb_cris_strcpy (&remcomOutBuffer[pos], ";"); #endif /* null-terminate and send it off */ *ptr = 0; putpacket (remcomOutBuffer); } /* All expected commands are sent from remote.c. Send a response according to the description in remote.c. */ static void handle_exception (int sigval) { /* Avoid warning of not used. */ USEDFUN(handle_exception); USEDVAR(internal_stack[0]); /* Send response. */ stub_is_stopped (sigval); for (;;) { remcomOutBuffer[0] = '\0'; getpacket (remcomInBuffer); switch (remcomInBuffer[0]) { case 'g': /* Read registers: g Success: Each byte of register data is described by two hex digits. Registers are in the internal order for GDB, and the bytes in a register are in the same order the machine uses. Failure: void. */ { #ifdef PROCESS_SUPPORT /* Use the special register content in the executing thread. */ copy_registers (&reg_g, &reg, sizeof(registers)); /* Replace the content available on the stack. */ if (current_thread_g != executing_task) { copy_registers_from_stack (current_thread_g, &reg_g); } mem2hex ((unsigned char *)remcomOutBuffer, (unsigned char *)&reg_g, sizeof(registers)); #else mem2hex(remcomOutBuffer, (char *)&reg, sizeof(registers)); #endif } break; case 'G': /* Write registers. GXX..XX Each byte of register data is described by two hex digits. Success: OK Failure: void. */ #ifdef PROCESS_SUPPORT hex2mem ((unsigned char *)&reg_g, &remcomInBuffer[1], sizeof(registers)); if (current_thread_g == executing_task) { copy_registers (&reg, &reg_g, sizeof(registers)); } else { copy_registers_to_stack(current_thread_g, &reg_g); } #else hex2mem((char *)&reg, &remcomInBuffer[1], sizeof(registers)); #endif gdb_cris_strcpy (remcomOutBuffer, "OK"); break; case 'P': /* Write register. Pn...=r... Write register n..., hex value without 0x, with value r..., which contains a hex value without 0x and two hex digits for each byte in the register (target byte order). P1f=11223344 means set register 31 to 44332211. Success: OK Failure: E02, E05 */ { char *suffix; int regno = gdb_cris_strtol (&remcomInBuffer[1], &suffix, 16); int status; #ifdef PROCESS_SUPPORT if (current_thread_g != executing_task) status = write_stack_register (current_thread_g, regno, suffix+1); else #endif status = write_register (regno, suffix+1); switch (status) { case E02: /* Do not support read-only registers. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E02]); break; case E05: /* Do not support non-existing registers. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E05]); break; case E07: /* Do not support non-existing registers on the stack. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E07]); break; default: /* Valid register number. */ gdb_cris_strcpy (remcomOutBuffer, "OK"); break; } } break; case 'm': /* Read from memory. mAA..AA,LLLL AA..AA is the address and LLLL is the length. Success: XX..XX is the memory content. Can be fewer bytes than requested if only part of the data may be read. m6000120a,6c means retrieve 108 byte from base address 6000120a. Failure: void. */ { char *suffix; unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1], &suffix, 16); int length = gdb_cris_strtol(suffix+1, 0, 16); mem2hex(remcomOutBuffer, addr, length); } break; case 'X': /* Write to memory. XAA..AA,LLLL:XX..XX AA..AA is the start address, LLLL is the number of bytes, and XX..XX is the binary data. Success: OK Failure: void. */ case 'M': /* Write to memory. MAA..AA,LLLL:XX..XX AA..AA is the start address, LLLL is the number of bytes, and XX..XX is the hexadecimal data. Success: OK Failure: void. */ { char *lenptr; char *dataptr; unsigned char *addr = (unsigned char *)gdb_cris_strtol(&remcomInBuffer[1], &lenptr, 16); int length = gdb_cris_strtol(lenptr+1, &dataptr, 16); if (*lenptr == ',' && *dataptr == ':') { if (remcomInBuffer[0] == 'M') { hex2mem(addr, dataptr + 1, length); } else /* X */ { bin2mem(addr, dataptr + 1, length); } gdb_cris_strcpy (remcomOutBuffer, "OK"); } else { gdb_cris_strcpy (remcomOutBuffer, error_message[E06]); } } break; case 'c': /* Continue execution. cAA..AA AA..AA is the address where execution is resumed. If AA..AA is omitted, resume at the present address. Success: return to the executing thread. Failure: will never know. */ if (remcomInBuffer[1] != '\0') { reg.pc = gdb_cris_strtol (&remcomInBuffer[1], 0, 16); } enableDebugIRQ(); return; case 's': /* Step. sAA..AA AA..AA is the address where execution is resumed. If AA..AA is omitted, resume at the present address. Success: return to the executing thread. Failure: will never know. Should never be invoked. The single-step is implemented on the host side. If ever invoked, it is an internal error E04. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E04]); putpacket (remcomOutBuffer); return; case '?': /* The last signal which caused a stop. ? Success: SAA, where AA is the signal number. Failure: void. */ remcomOutBuffer[0] = 'S'; remcomOutBuffer[1] = highhex (sigval); remcomOutBuffer[2] = lowhex (sigval); remcomOutBuffer[3] = 0; break; case 'D': /* Detach from host. D Success: OK, and return to the executing thread. Failure: will never know */ putpacket ("OK"); return; case 'k': case 'r': /* kill request or reset request. Success: restart of target. Failure: will never know. */ kill_restart (); break; case 'C': case 'S': case '!': case 'R': case 'd': /* Continue with signal sig. Csig;AA..AA Step with signal sig. Ssig;AA..AA Use the extended remote protocol. ! Restart the target system. R0 Toggle debug flag. d Search backwards. tAA:PP,MM Not supported: E04 */ gdb_cris_strcpy (remcomOutBuffer, error_message[E04]); break; #ifdef PROCESS_SUPPORT case 'T': /* Thread alive. TXX Is thread XX alive? Success: OK, thread XX is alive. Failure: E03, thread XX is dead. */ { int thread_id = (int)gdb_cris_strtol (&remcomInBuffer[1], 0, 16); /* Cannot tell whether it is alive or not. */ if (thread_id >= 0 && thread_id < number_of_tasks) gdb_cris_strcpy (remcomOutBuffer, "OK"); } break; case 'H': /* Set thread for subsequent operations: Hct c = 'c' for thread used in step and continue; t can be -1 for all threads. c = 'g' for thread used in other operations. t = 0 means pick any thread. Success: OK Failure: E01 */ { int thread_id = gdb_cris_strtol (&remcomInBuffer[2], 0, 16); if (remcomInBuffer[1] == 'c') { /* c = 'c' for thread used in step and continue */ /* Do not change current_thread_c here. It would create a mess in the scheduler. */ gdb_cris_strcpy (remcomOutBuffer, "OK"); } else if (remcomInBuffer[1] == 'g') { /* c = 'g' for thread used in other operations. t = 0 means pick any thread. Impossible since the scheduler does not allow that. */ if (thread_id >= 0 && thread_id < number_of_tasks) { current_thread_g = thread_id; gdb_cris_strcpy (remcomOutBuffer, "OK"); } else { /* Not expected - send an error message. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E01]); } } else { /* Not expected - send an error message. */ gdb_cris_strcpy (remcomOutBuffer, error_message[E01]); } } break; case 'q': case 'Q': /* Query of general interest. qXXXX Set general value XXXX. QXXXX=yyyy */ { int pos; int nextpos; int thread_id; switch (remcomInBuffer[1]) { case 'C': /* Identify the remote current thread. */ gdb_cris_strcpy (&remcomOutBuffer[0], "QC"); remcomOutBuffer[2] = highhex (current_thread_c); remcomOutBuffer[3] = lowhex (current_thread_c); remcomOutBuffer[4] = '\0'; break; case 'L': gdb_cris_strcpy (&remcomOutBuffer[0], "QM"); /* Reply with number of threads. */ if (os_is_started()) { remcomOutBuffer[2] = highhex (number_of_tasks); remcomOutBuffer[3] = lowhex (number_of_tasks); } else { remcomOutBuffer[2] = highhex (0); remcomOutBuffer[3] = lowhex (1); } /* Done with the reply. */ remcomOutBuffer[4] = lowhex (1); pos = 5; /* Expects the argument thread id. */ for (; pos < (5 + HEXCHARS_IN_THREAD_ID); pos++) remcomOutBuffer[pos] = remcomInBuffer[pos]; /* Reply with the thread identifiers. */ if (os_is_started()) { /* Store the thread identifiers of all tasks. */ for (thread_id = 0; thread_id < number_of_tasks; thread_id++) { nextpos = pos + HEXCHARS_IN_THREAD_ID - 1; for (; pos < nextpos; pos ++) remcomOutBuffer[pos] = lowhex (0); remcomOutBuffer[pos++] = lowhex (thread_id); } } else { /* Store the thread identifier of the boot task. */ nextpos = pos + HEXCHARS_IN_THREAD_ID - 1; for (; pos < nextpos; pos ++) remcomOutBuffer[pos] = lowhex (0); remcomOutBuffer[pos++] = lowhex (current_thread_c); } remcomOutBuffer[pos] = '\0'; break; default: /* Not supported: "" */ /* Request information about section offsets: qOffsets. */ remcomOutBuffer[0] = 0; break; } } break; #endif /* PROCESS_SUPPORT */ default: /* The stub should ignore other request and send an empty response ($#<checksum>). This way we can extend the protocol and GDB can tell whether the stub it is talking to uses the old or the new. */ remcomOutBuffer[0] = 0; break; } putpacket(remcomOutBuffer); } } /* The jump is to the address 0x00000002. Performs a complete re-start from scratch. */ static void kill_restart () { __asm__ volatile ("jump 2"); } /********************************** Breakpoint *******************************/ /* The hook for both a static (compiled) and a dynamic breakpoint set by GDB. An internal stack is used by the stub. The register image of the caller is stored in the structure register_image. Interactive communication with the host is handled by handle_exception and finally the register image is restored. */ void kgdb_handle_breakpoint(void); asm (" .global kgdb_handle_breakpoint kgdb_handle_breakpoint: ;; ;; Response to the break-instruction ;; ;; Create a register image of the caller ;; move $dccr,[reg+0x5E] ; Save the flags in DCCR before disable interrupts di ; Disable interrupts move.d $r0,[reg] ; Save R0 move.d $r1,[reg+0x04] ; Save R1 move.d $r2,[reg+0x08] ; Save R2 move.d $r3,[reg+0x0C] ; Save R3 move.d $r4,[reg+0x10] ; Save R4 move.d $r5,[reg+0x14] ; Save R5 move.d $r6,[reg+0x18] ; Save R6 move.d $r7,[reg+0x1C] ; Save R7 move.d $r8,[reg+0x20] ; Save R8 move.d $r9,[reg+0x24] ; Save R9 move.d $r10,[reg+0x28] ; Save R10 move.d $r11,[reg+0x2C] ; Save R11 move.d $r12,[reg+0x30] ; Save R12 move.d $r13,[reg+0x34] ; Save R13 move.d $sp,[reg+0x38] ; Save SP (R14) ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,$r0 subq 2,$r0 ; Set to address of previous instruction. move.d $r0,[reg+0x3c] ; Save the address in PC (R15) clear.b [reg+0x40] ; Clear P0 move $vr,[reg+0x41] ; Save special register P1 clear.w [reg+0x42] ; Clear P4 move $ccr,[reg+0x44] ; Save special register CCR move $mof,[reg+0x46] ; P7 clear.d [reg+0x4A] ; Clear P8 move $ibr,[reg+0x4E] ; P9, move $irp,[reg+0x52] ; P10, move $srp,[reg+0x56] ; P11, move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR ; P13, register DCCR already saved ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,r0 ;; Static (compiled) breakpoints must return to the next instruction in order ;; to avoid infinite loops. Dynamic (gdb-invoked) must restore the instruction ;; in order to execute it when execution is continued. test.b [is_dyn_brkp] ; Is this a dynamic breakpoint? beq is_static ; No, a static breakpoint nop subq 2,$r0 ; rerun the instruction the break replaced is_static: moveq 1,$r1 move.b $r1,[is_dyn_brkp] ; Set the state variable to dynamic breakpoint move.d $r0,[reg+0x62] ; Save the return address in BRP move $usp,[reg+0x66] ; USP ;; ;; Handle the communication ;; move.d internal_stack+1020,$sp ; Use the internal stack which grows upward moveq 5,$r10 ; SIGTRAP jsr handle_exception ; Interactive routine ;; ;; Return to the caller ;; move.d [reg],$r0 ; Restore R0 move.d [reg+0x04],$r1 ; Restore R1 move.d [reg+0x08],$r2 ; Restore R2 move.d [reg+0x0C],$r3 ; Restore R3 move.d [reg+0x10],$r4 ; Restore R4 move.d [reg+0x14],$r5 ; Restore R5 move.d [reg+0x18],$r6 ; Restore R6 move.d [reg+0x1C],$r7 ; Restore R7 move.d [reg+0x20],$r8 ; Restore R8 move.d [reg+0x24],$r9 ; Restore R9 move.d [reg+0x28],$r10 ; Restore R10 move.d [reg+0x2C],$r11 ; Restore R11 move.d [reg+0x30],$r12 ; Restore R12 move.d [reg+0x34],$r13 ; Restore R13 ;; ;; FIXME: Which registers should be restored? ;; move.d [reg+0x38],$sp ; Restore SP (R14) move [reg+0x56],$srp ; Restore the subroutine return pointer. move [reg+0x5E],$dccr ; Restore DCCR move [reg+0x66],$usp ; Restore USP jump [reg+0x62] ; A jump to the content in register BRP works. nop ; "); /* The hook for an interrupt generated by GDB. An internal stack is used by the stub. The register image of the caller is stored in the structure register_image. Interactive communication with the host is handled by handle_exception and finally the register image is restored. Due to the old assembler which does not recognise the break instruction and the breakpoint return pointer hex-code is used. */ void kgdb_handle_serial(void); asm (" .global kgdb_handle_serial kgdb_handle_serial: ;; ;; Response to a serial interrupt ;; move $dccr,[reg+0x5E] ; Save the flags in DCCR di ; Disable interrupts move.d $r0,[reg] ; Save R0 move.d $r1,[reg+0x04] ; Save R1 move.d $r2,[reg+0x08] ; Save R2 move.d $r3,[reg+0x0C] ; Save R3 move.d $r4,[reg+0x10] ; Save R4 move.d $r5,[reg+0x14] ; Save R5 move.d $r6,[reg+0x18] ; Save R6 move.d $r7,[reg+0x1C] ; Save R7 move.d $r8,[reg+0x20] ; Save R8 move.d $r9,[reg+0x24] ; Save R9 move.d $r10,[reg+0x28] ; Save R10 move.d $r11,[reg+0x2C] ; Save R11 move.d $r12,[reg+0x30] ; Save R12 move.d $r13,[reg+0x34] ; Save R13 move.d $sp,[reg+0x38] ; Save SP (R14) move $irp,[reg+0x3c] ; Save the address in PC (R15) clear.b [reg+0x40] ; Clear P0 move $vr,[reg+0x41] ; Save special register P1, clear.w [reg+0x42] ; Clear P4 move $ccr,[reg+0x44] ; Save special register CCR move $mof,[reg+0x46] ; P7 clear.d [reg+0x4A] ; Clear P8 move $ibr,[reg+0x4E] ; P9, move $irp,[reg+0x52] ; P10, move $srp,[reg+0x56] ; P11, move $dtp0,[reg+0x5A] ; P12, register BAR, assembler might not know BAR ; P13, register DCCR already saved ;; Due to the old assembler-versions BRP might not be recognized .word 0xE670 ; move brp,r0 move.d $r0,[reg+0x62] ; Save the return address in BRP move $usp,[reg+0x66] ; USP ;; get the serial character (from debugport.c) and check if it is a ctrl-c jsr getDebugChar cmp.b 3, $r10 bne goback nop ;; ;; Handle the communication ;; move.d internal_stack+1020,$sp ; Use the internal stack moveq 2,$r10 ; SIGINT jsr handle_exception ; Interactive routine goback: ;; ;; Return to the caller ;; move.d [reg],$r0 ; Restore R0 move.d [reg+0x04],$r1 ; Restore R1 move.d [reg+0x08],$r2 ; Restore R2 move.d [reg+0x0C],$r3 ; Restore R3 move.d [reg+0x10],$r4 ; Restore R4 move.d [reg+0x14],$r5 ; Restore R5 move.d [reg+0x18],$r6 ; Restore R6 move.d [reg+0x1C],$r7 ; Restore R7 move.d [reg+0x20],$r8 ; Restore R8 move.d [reg+0x24],$r9 ; Restore R9 move.d [reg+0x28],$r10 ; Restore R10 move.d [reg+0x2C],$r11 ; Restore R11 move.d [reg+0x30],$r12 ; Restore R12 move.d [reg+0x34],$r13 ; Restore R13 ;; ;; FIXME: Which registers should be restored? ;; move.d [reg+0x38],$sp ; Restore SP (R14) move [reg+0x56],$srp ; Restore the subroutine return pointer. move [reg+0x5E],$dccr ; Restore DCCR move [reg+0x66],$usp ; Restore USP reti ; Return from the interrupt routine nop "); /* Use this static breakpoint in the start-up only. */ void breakpoint(void) { kgdb_started = 1; is_dyn_brkp = 0; /* This is a static, not a dynamic breakpoint. */ __asm__ volatile ("break 8"); /* Jump to handle_breakpoint. */ } /* initialize kgdb. doesn't break into the debugger, but sets up irq and ports */ void kgdb_init(void) { /* could initialize debug port as well but it's done in head.S already... */ /* breakpoint handler is now set in irq.c */ set_int_vector(8, kgdb_handle_serial); enableDebugIRQ(); } /****************************** End of file **********************************/
gpl-2.0
ShinySide/SM-G360T1_kernel
drivers/nfc/sec_nfc.c
28
23960
/* * SAMSUNG NFC Controller * * Copyright (C) 2013 Samsung Electronics Co.Ltd * Author: Woonki Lee <woonki84.lee@samsung.com> * Heejae Kim <heejae12.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Last update: 2014-07-15 * */ #ifdef CONFIG_SEC_NFC_IF_I2C_GPIO #define CONFIG_SEC_NFC_IF_I2C #endif #include <linux/wait.h> #include <linux/delay.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/module.h> #include <linux/miscdevice.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/uaccess.h> #include <linux/sec_nfc.h> #include <linux/wakelock.h> #include <linux/of_gpio.h> #include <linux/regulator/consumer.h> #ifdef CONFIG_SEC_NFC_CLK_REQ #include <linux/interrupt.h> #include <linux/clk.h> #endif #ifdef CONFIG_SEC_NFC_USE_8916_BBCLK2 #include <linux/clk.h> #endif #ifndef CONFIG_SEC_NFC_IF_I2C struct sec_nfc_i2c_info {}; #define sec_nfc_read NULL #define sec_nfc_write NULL #define sec_nfc_poll NULL #define sec_nfc_i2c_irq_clear(x) #define SEC_NFC_GET_INFO(dev) platform_get_drvdata(to_platform_device(dev)) #else /* CONFIG_SEC_NFC_IF_I2C */ #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/i2c.h> #define SEC_NFC_GET_INFO(dev) i2c_get_clientdata(to_i2c_client(dev)) enum sec_nfc_irq { SEC_NFC_NONE, SEC_NFC_INT, SEC_NFC_SKIP, }; struct sec_nfc_i2c_info { struct i2c_client *i2c_dev; struct mutex read_mutex; enum sec_nfc_irq read_irq; wait_queue_head_t read_wait; size_t buflen; u8 *buf; }; #endif struct sec_nfc_info { struct miscdevice miscdev; struct mutex mutex; enum sec_nfc_mode mode; struct device *dev; struct sec_nfc_platform_data *pdata; struct sec_nfc_i2c_info i2c_info; #ifdef CONFIG_SEC_NFC_CLK_REQ bool clk_ctl; bool clk_state; #endif struct wake_lock wake_lock; }; #ifdef CONFIG_SEC_NFC_IF_I2C static irqreturn_t sec_nfc_irq_thread_fn(int irq, void *dev_id) { struct sec_nfc_info *info = dev_id; struct sec_nfc_platform_data *pdata = info->pdata; dev_dbg(info->dev, "[NFC] Read Interrupt is occurred!\n"); if(gpio_get_value(pdata->irq) == 0) { dev_err(info->dev, "[NFC] Warning,irq-gpio state is low!\n"); return IRQ_HANDLED; } mutex_lock(&info->i2c_info.read_mutex); /* Skip interrupt during power switching * It is released after first write */ if (info->i2c_info.read_irq == SEC_NFC_SKIP) { dev_dbg(info->dev, "%s: Now power swiching. Skip this IRQ\n", __func__); mutex_unlock(&info->i2c_info.read_mutex); return IRQ_HANDLED; } info->i2c_info.read_irq = SEC_NFC_INT; mutex_unlock(&info->i2c_info.read_mutex); wake_up_interruptible(&info->i2c_info.read_wait); if(!wake_lock_active(&info->wake_lock)) { dev_dbg(info->dev, "%s: Set wake_lock_timeout for 2 sec. !!!\n", __func__); wake_lock_timeout(&info->wake_lock, 2 * HZ); } return IRQ_HANDLED; } static ssize_t sec_nfc_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); enum sec_nfc_irq irq; int ret = 0; dev_dbg(info->dev, "[NFC] %s: info: %p, count: %zu\n", __func__, info, count); mutex_lock(&info->mutex); if (info->mode == SEC_NFC_MODE_OFF) { dev_err(info->dev, "[NFC] sec_nfc is not enabled\n"); ret = -ENODEV; goto out; } mutex_lock(&info->i2c_info.read_mutex); irq = info->i2c_info.read_irq; mutex_unlock(&info->i2c_info.read_mutex); if (irq == SEC_NFC_NONE) { if (file->f_flags & O_NONBLOCK) { dev_err(info->dev, "[NFC] it is nonblock\n"); ret = -EAGAIN; goto out; } } /* i2c recv */ if (count > info->i2c_info.buflen) count = info->i2c_info.buflen; if (count > SEC_NFC_MSG_MAX_SIZE) { dev_err(info->dev, "[NFC] user required wrong size :%d\n", count); ret = -EINVAL; goto out; } mutex_lock(&info->i2c_info.read_mutex); memset(info->i2c_info.buf, 0, count); ret = i2c_master_recv(info->i2c_info.i2c_dev, info->i2c_info.buf, count); dev_dbg(info->dev, "%s: recv size : %d\n", __func__, ret); if (ret == -EREMOTEIO) { ret = -ERESTART; goto read_error; } else if (ret != count) { dev_err(info->dev, "[NFC] read failed: return: %d count: %d\n", ret, count); /* ret = -EREMOTEIO; */ goto read_error; } info->i2c_info.read_irq = SEC_NFC_NONE; mutex_unlock(&info->i2c_info.read_mutex); if (copy_to_user(buf, info->i2c_info.buf, ret)) { dev_err(info->dev, "[NFC] copy failed to user\n"); ret = -EFAULT; } goto out; read_error: info->i2c_info.read_irq = SEC_NFC_NONE; mutex_unlock(&info->i2c_info.read_mutex); out: mutex_unlock(&info->mutex); return ret; } static ssize_t sec_nfc_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); int ret = 0; dev_dbg(info->dev, "[NFC] %s: info: %p, count %zu\n", __func__, info, count); mutex_lock(&info->mutex); if (info->mode == SEC_NFC_MODE_OFF) { dev_err(info->dev, "[NFC] sec_nfc is not enabled\n"); ret = -ENODEV; goto out; } if (count > info->i2c_info.buflen) count = info->i2c_info.buflen; if (count > SEC_NFC_MSG_MAX_SIZE) { dev_err(info->dev, "[NFC] user required wrong size :%d\n", count); ret = -EINVAL; goto out; } if (copy_from_user(info->i2c_info.buf, buf, count)) { dev_err(info->dev, "[NFC] copy failed from user\n"); ret = -EFAULT; goto out; } /* Skip interrupt during power switching * It is released after first write */ mutex_lock(&info->i2c_info.read_mutex); ret = i2c_master_send(info->i2c_info.i2c_dev, info->i2c_info.buf, count); if (info->i2c_info.read_irq == SEC_NFC_SKIP) info->i2c_info.read_irq = SEC_NFC_NONE; mutex_unlock(&info->i2c_info.read_mutex); if (ret == -EREMOTEIO) { dev_err(info->dev, "[NFC] send failed: return: %d count: %d\n", ret, count); ret = -ERESTART; goto out; } if (ret != count) { dev_err(info->dev, "[NFC] send failed: return: %d count: %d\n", ret, count); ret = -EREMOTEIO; } out: mutex_unlock(&info->mutex); return ret; } static unsigned int sec_nfc_poll(struct file *file, poll_table *wait) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); enum sec_nfc_irq irq; int ret = 0; dev_dbg(info->dev, "[NFC] %s: info: %p\n", __func__, info); mutex_lock(&info->mutex); if (info->mode == SEC_NFC_MODE_OFF) { dev_err(info->dev, "[NFC] sec_nfc is not enabled\n"); ret = -ENODEV; goto out; } poll_wait(file, &info->i2c_info.read_wait, wait); mutex_lock(&info->i2c_info.read_mutex); irq = info->i2c_info.read_irq; if (irq == SEC_NFC_INT) ret = (POLLIN | POLLRDNORM); mutex_unlock(&info->i2c_info.read_mutex); out: mutex_unlock(&info->mutex); return ret; } void sec_nfc_i2c_irq_clear(struct sec_nfc_info *info) { /* clear interrupt. Interrupt will be occured at power off */ mutex_lock(&info->i2c_info.read_mutex); info->i2c_info.read_irq = SEC_NFC_NONE; mutex_unlock(&info->i2c_info.read_mutex); } int sec_nfc_i2c_probe(struct i2c_client *client) { struct device *dev = &client->dev; struct sec_nfc_info *info = dev_get_drvdata(dev); struct sec_nfc_platform_data *pdata = info->pdata; int ret; pr_info("%s start\n", __func__); pr_info("%s : info : %p\n", __func__, info); pr_info("%s : pdata : %p\n", __func__, pdata); pr_info("%s : pdata->irq : %d\n", __func__, pdata->irq); pr_info("%s : client->irq: %d\n", __func__, client->irq); /* pdata->irq = client->irq;*/ info->i2c_info.buflen = SEC_NFC_MAX_BUFFER_SIZE; info->i2c_info.buf = kzalloc(SEC_NFC_MAX_BUFFER_SIZE, GFP_KERNEL); if (!info->i2c_info.buf) { dev_err(dev, "[NFC] failed to allocate memory for sec_nfc_info->buf\n"); return -ENOMEM; } info->i2c_info.i2c_dev = client; info->i2c_info.read_irq = SEC_NFC_NONE; mutex_init(&info->i2c_info.read_mutex); init_waitqueue_head(&info->i2c_info.read_wait); i2c_set_clientdata(client, info); info->dev = dev; ret = gpio_request(pdata->irq, "nfc_int"); if (ret) { dev_err(dev, "GPIO request is failed to register IRQ\n"); goto err_irq_req; } gpio_direction_input(pdata->irq); ret = request_threaded_irq(client->irq, NULL, sec_nfc_irq_thread_fn, IRQF_TRIGGER_RISING | IRQF_ONESHOT, SEC_NFC_DRIVER_NAME, info); if (ret < 0) { dev_err(dev, "[NFC] failed to register IRQ handler\n"); kfree(info->i2c_info.buf); return ret; } pr_info("%s success\n", __func__); return 0; err_irq_req: return ret; } void sec_nfc_i2c_remove(struct device *dev) { struct sec_nfc_info *info = dev_get_drvdata(dev); struct i2c_client *client = info->i2c_info.i2c_dev; struct sec_nfc_platform_data *pdata = info->pdata; free_irq(client->irq, info); gpio_free(pdata->irq); } #endif /* CONFIG_SEC_NFC_IF_I2C */ #ifdef CONFIG_SEC_NFC_CLK_REQ #ifdef CONFIG_SEC_NFC_USE_8916_BBCLK2 /* PMIC */ #define sec_nfc_clk_on(clk) clk_prepare_enable(clk) #define sec_nfc_clk_off(clk) clk_disable_unprepare(clk) #else /* default GPIO */ #define sec_nfc_clk_on(clk) gpio_set_value(clk, 1) #define sec_nfc_clk_off(clk) gpio_set_value(clk, 0) #endif static irqreturn_t sec_nfc_clk_irq_thread(int irq, void *dev_id) { struct sec_nfc_info *info = dev_id; struct sec_nfc_platform_data *pdata = info->pdata; bool value; value = gpio_get_value(pdata->clk_req) > 0 ? false : true; if (value == info->clk_state) return IRQ_HANDLED; /* pr_info("[NFC] %s: %s\n", __func__, value ? "True" : "False");*/ if (value) sec_nfc_clk_on(pdata->clk_enable); else sec_nfc_clk_off(pdata->clk_enable); info->clk_state = value; return IRQ_HANDLED; } void sec_nfc_clk_ctl_enable(struct sec_nfc_info *info) { struct sec_nfc_platform_data *pdata = info->pdata; int ret; pr_info("%s : start\n", __func__); if (info->clk_ctl) return; ret = gpio_request(pdata->clk_req, "nfc-ex-clk"); if (ret) { dev_err(info->dev, "failed to get gpio ven\n"); return; } info->clk_state = false; pdata->clk_irq = gpio_to_irq(pdata->clk_req); pr_info("%s : goio to irq = %d\n", __func__, pdata->clk_irq); ret = request_threaded_irq(pdata->clk_irq, NULL, sec_nfc_clk_irq_thread, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT, SEC_NFC_DRIVER_NAME, info); if (ret < 0) dev_err(info->dev, "[NFC] failed to register CLK REQ IRQ handler\n"); info->clk_ctl = true; } void sec_nfc_clk_ctl_disable(struct sec_nfc_info *info) { struct sec_nfc_platform_data *pdata = info->pdata; pr_info("%s : start\n", __func__); if (!info->clk_ctl) return; free_irq(pdata->clk_irq, info); gpio_free(pdata->clk_req); pr_info("%s : free irq = %d\n", __func__, pdata->clk_irq); if (info->clk_state) sec_nfc_clk_off(pdata->clk_enable); info->clk_state = false; info->clk_ctl = false; } #else #define sec_nfc_clk_ctl_enable(x) #define sec_nfc_clk_ctl_disable(x) #endif /* CONFIG_SEC_NFC_CLK_REQ */ static void sec_nfc_set_mode(struct sec_nfc_info *info, enum sec_nfc_mode mode) { struct sec_nfc_platform_data *pdata = info->pdata; pr_info("%s : start %d\n", __func__, mode); /* intfo lock is aleady gotten before calling this function */ if (info->mode == mode) { dev_dbg(info->dev, "Power mode is already %d", mode); return; } info->mode = mode; #ifdef CONFIG_SEC_NFC_IF_I2C /* Skip interrupt during power switching * It is released after first write */ mutex_lock(&info->i2c_info.read_mutex); info->i2c_info.read_irq = SEC_NFC_SKIP; mutex_unlock(&info->i2c_info.read_mutex); #endif gpio_set_value_cansleep(pdata->ven, 0); gpio_set_value(pdata->ven, SEC_NFC_PW_OFF); if (pdata->firm) gpio_set_value(pdata->firm, SEC_NFC_FW_OFF); if (mode == SEC_NFC_MODE_BOOTLOADER) if (pdata->firm) gpio_set_value(pdata->firm, SEC_NFC_FW_ON); if (mode != SEC_NFC_MODE_OFF) { msleep(SEC_NFC_VEN_WAIT_TIME); gpio_set_value_cansleep(pdata->ven, 1); gpio_set_value(pdata->ven, SEC_NFC_PW_ON); sec_nfc_clk_ctl_enable(info); msleep(SEC_NFC_VEN_WAIT_TIME/2); #ifdef CONFIG_SEC_NFC_IF_I2C enable_irq_wake(info->i2c_info.i2c_dev->irq); #endif } else { sec_nfc_clk_ctl_disable(info); #ifdef CONFIG_SEC_NFC_IF_I2C disable_irq_wake(info->i2c_info.i2c_dev->irq); #endif } if(wake_lock_active(&info->wake_lock)) wake_unlock(&info->wake_lock); dev_dbg(info->dev, "[NFC] Power mode is : %d\n", mode); } static long sec_nfc_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); #ifdef CONFIG_SEC_NFC_PRODUCT_N5 struct sec_nfc_platform_data *pdata = info->pdata; #endif unsigned int new = (unsigned int)arg; int ret = 0; dev_dbg(info->dev, "[NFC] %s: info: %p, cmd: 0x%x\n", __func__, info, cmd); mutex_lock(&info->mutex); switch (cmd) { case SEC_NFC_SET_MODE: dev_dbg(info->dev, "[NFC] %s: SEC_NFC_SET_MODE\n", __func__); if (info->mode == new) break; if (new >= SEC_NFC_MODE_COUNT) { dev_err(info->dev, "[NFC] wrong mode (%d)\n", new); ret = -EFAULT; break; } sec_nfc_set_mode(info, new); break; #ifdef CONFIG_SEC_NFC_PRODUCT_N3 case SEC_NFC_SLEEP: case SEC_NFC_WAKEUP: break; #elif defined(CONFIG_SEC_NFC_PRODUCT_N5) case SEC_NFC_SLEEP: if (info->mode != SEC_NFC_MODE_BOOTLOADER) { if(wake_lock_active(&info->wake_lock)) wake_unlock(&info->wake_lock); gpio_set_value(pdata->wake, SEC_NFC_WAKE_SLEEP); } break; case SEC_NFC_WAKEUP: if (info->mode != SEC_NFC_MODE_BOOTLOADER) { gpio_set_value(pdata->wake, SEC_NFC_WAKE_UP); if(!wake_lock_active(&info->wake_lock)) wake_lock(&info->wake_lock); } break; #endif default: dev_err(info->dev, "[NFC] Unknow ioctl 0x%x\n", cmd); ret = -ENOIOCTLCMD; break; } mutex_unlock(&info->mutex); return ret; } static int sec_nfc_open(struct inode *inode, struct file *file) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); int ret = 0; pr_info("%s : start\n", __func__); dev_dbg(info->dev, "[NFC] %s: info : %p\n" , __func__, info); mutex_lock(&info->mutex); if (info->mode != SEC_NFC_MODE_OFF) { dev_err(info->dev, "[NFC] sec_nfc is busy\n"); ret = -EBUSY; goto out; } sec_nfc_set_mode(info, SEC_NFC_MODE_OFF); out: mutex_unlock(&info->mutex); return ret; } static int sec_nfc_close(struct inode *inode, struct file *file) { struct sec_nfc_info *info = container_of(file->private_data, struct sec_nfc_info, miscdev); pr_info("%s : start\n", __func__); dev_dbg(info->dev, "[NFC] %s: info : %p\n" , __func__, info); mutex_lock(&info->mutex); sec_nfc_set_mode(info, SEC_NFC_MODE_OFF); mutex_unlock(&info->mutex); return 0; } static const struct file_operations sec_nfc_fops = { .owner = THIS_MODULE, .read = sec_nfc_read, .write = sec_nfc_write, .poll = sec_nfc_poll, .open = sec_nfc_open, .release = sec_nfc_close, .unlocked_ioctl = sec_nfc_ioctl, }; #ifdef CONFIG_PM static int sec_nfc_suspend(struct device *dev) { struct sec_nfc_info *info = SEC_NFC_GET_INFO(dev); int ret = 0; mutex_lock(&info->mutex); if (info->mode == SEC_NFC_MODE_BOOTLOADER) ret = -EPERM; mutex_unlock(&info->mutex); return ret; } static int sec_nfc_resume(struct device *dev) { return 0; } static SIMPLE_DEV_PM_OPS(sec_nfc_pm_ops, sec_nfc_suspend, sec_nfc_resume); #endif #ifdef CONFIG_OF /*device tree parsing*/ static int sec_nfc_parse_dt(struct device *dev, struct sec_nfc_platform_data *pdata) { //int gpio; #ifdef CONFIG_SEC_NFC_CLK_REQ enum of_gpio_flags flags; #endif struct device_node *np = dev->of_node; pdata->ven = of_get_named_gpio_flags(np, "sec-nfc,ven-gpio", 0, &pdata->ven_gpio_flags); pdata->firm = of_get_named_gpio_flags(np, "sec-nfc,firm-gpio", 0, &pdata->firm_gpio_flags); pdata->wake = pdata->firm; #ifdef CONFIG_SEC_NFC_IO_EN pdata->pvdd = of_get_named_gpio_flags(np, "sec-nfc,pvdd-gpio", 0, &pdata->pvdd_gpio_flags); #endif #ifdef CONFIG_SEC_NFC_IF_I2C pdata->irq = of_get_named_gpio_flags(np, "sec-nfc,irq-gpio", 0, &pdata->irq_gpio_flags); #endif #ifdef CONFIG_SEC_NFC_CLK_REQ pdata->clk_enable = of_get_named_gpio_flags(np, "sec-nfc,ext_clk-gpio", 0, &flags); #endif #ifdef CONFIG_SEC_NFC_USE_8916_BBCLK2 pdata->nfc_clk = clk_get(dev, "nfc_clock"); if (IS_ERR(pdata->nfc_clk)) { pr_err("[NFC] %s: Couldn't get D1)\n", __func__); } else { if (clk_prepare_enable(pdata->nfc_clk)) pr_err("[NFC] %s: Couldn't prepare D1\n", __func__); } #endif pr_info("%s : ven-gpio\t= %d\n", __func__, pdata->ven); pr_info("%s : firm-gpio\t= %d\n", __func__, pdata->firm); #ifdef CONFIG_SEC_NFC_IO_EN pr_info("%s : pvdd-gpio\t= %d\n", __func__, pdata->pvdd); #endif pr_info("%s : wake-gpio\t= %d\n", __func__, pdata->wake); pr_info("%s : irq num\t= %d\n", __func__, pdata->irq); pr_info("%s : done\n", __func__); return 0; } static int sec_nfc_pinctrl(struct device *dev) { int ret = 0; struct pinctrl *nfc_pinctrl; struct pinctrl_state *nfc_suspend; struct pinctrl_state *nfc_active; /* Get pinctrl if target uses pinctrl */ nfc_pinctrl = devm_pinctrl_get(dev); if (IS_ERR(nfc_pinctrl)) { pr_debug("Target does not use pinctrl\n"); nfc_pinctrl = NULL; } else { nfc_suspend = pinctrl_lookup_state(nfc_pinctrl, "nfc_suspend"); nfc_active = pinctrl_lookup_state(nfc_pinctrl, "nfc_active"); if (IS_ERR(nfc_suspend)) { pr_info("%s fail to suspend lookup_state\n", __func__); goto err_exit; } if (IS_ERR(nfc_active)) { pr_info("%s fail to active lookup_state\n", __func__); goto err_exit; } ret = pinctrl_select_state(nfc_pinctrl, nfc_suspend); if (ret != 0) { pr_err("%s: fail to select_state suspend\n", __func__); goto err_exit; } ret = pinctrl_select_state(nfc_pinctrl, nfc_active); if (ret != 0) { pr_err("%s: fail to select_state active\n", __func__); goto err_exit; } devm_pinctrl_put(nfc_pinctrl); } err_exit: return ret; } #else static int sec_nfc_parse_dt(struct device *dev, struct sec_nfc_platform_data *pdata) { return -ENODEV; } static int sec_nfc_pinctrl(struct device *dev) { return 0; } #endif static int __sec_nfc_probe(struct device *dev) { struct sec_nfc_info *info; struct sec_nfc_platform_data *pdata = NULL; int ret = 0; pr_info("%s : start\n", __func__); if (dev->of_node) { pdata = devm_kzalloc(dev, sizeof(struct sec_nfc_platform_data), GFP_KERNEL); if (!pdata) { dev_err(dev, "[NFC] Failed to allocate memory\n"); return -ENOMEM; } ret = sec_nfc_parse_dt(dev, pdata); if (ret) return ret; } else pdata = dev->platform_data; if (!pdata) { dev_err(dev, "[NFC] No platform data\n"); ret = -ENOMEM; goto err_pdata; } ret = sec_nfc_pinctrl(dev); if (ret) { dev_err(dev, "[NFC] failed to nfc pinctrl\n"); goto err_pinctrl; } info = kzalloc(sizeof(struct sec_nfc_info), GFP_KERNEL); if (!info) { dev_err(dev, "[NFC] failed to allocate memory for sec_nfc_info\n"); ret = -ENOMEM; goto err_info_alloc; } info->dev = dev; info->pdata = pdata; info->mode = SEC_NFC_MODE_OFF; mutex_init(&info->mutex); dev_set_drvdata(dev, info); info->miscdev.minor = MISC_DYNAMIC_MINOR; info->miscdev.name = SEC_NFC_DRIVER_NAME; info->miscdev.fops = &sec_nfc_fops; info->miscdev.parent = dev; ret = misc_register(&info->miscdev); if (ret < 0) { dev_err(dev, "[NFC] failed to register Device\n"); goto err_dev_reg; } #ifdef CONFIG_SEC_NFC_IO_EN ret = gpio_request(pdata->pvdd, "pvdd-gpio"); if (ret) { dev_err(dev, "[NFC] failed to get gpio pvdd\n"); } gpio_direction_output(pdata->pvdd, SEC_NFC_PVDD_ON); pr_info("%s : pvdd-gpio value\t= %d\n", __func__, gpio_get_value(pdata->pvdd)); #endif ret = gpio_request(pdata->ven, "ven-gpio"); if (ret) { dev_err(dev, "[NFC] failed to get gpio ven\n"); goto err_gpio_ven; } gpio_direction_output(pdata->ven, SEC_NFC_PW_OFF); if (pdata->firm) { ret = gpio_request(pdata->firm, "firm-gpio"); if (ret) { dev_err(dev, "[NFC] failed to get gpio firm\n"); goto err_gpio_firm; } gpio_direction_output(pdata->firm, SEC_NFC_FW_OFF); } wake_lock_init(&info->wake_lock, WAKE_LOCK_SUSPEND, "NFCWAKE"); dev_dbg(dev, "[NFC] %s: info: %p, pdata %p\n", __func__, info, pdata); pr_info("%s : success\n", __func__); return 0; err_gpio_firm: gpio_free(pdata->ven); err_gpio_ven: err_dev_reg: err_pinctrl: err_info_alloc: kfree(info); err_pdata: return ret; } static int __sec_nfc_remove(struct device *dev) { struct sec_nfc_info *info = dev_get_drvdata(dev); struct sec_nfc_platform_data *pdata = info->pdata; dev_dbg(info->dev, "%s\n", __func__); #ifdef CONFIG_SEC_NFC_USE_8916_BBCLK2 if (pdata->nfc_clk) clk_unprepare(pdata->nfc_clk); #endif misc_deregister(&info->miscdev); sec_nfc_set_mode(info, SEC_NFC_MODE_OFF); gpio_set_value(pdata->firm, 0); gpio_set_value_cansleep(pdata->ven, 0); gpio_free(pdata->ven); if (pdata->firm) gpio_free(pdata->firm); wake_lock_destroy(&info->wake_lock); kfree(info); return 0; } #ifdef CONFIG_SEC_NFC_IF_I2C MODULE_DEVICE_TABLE(i2c, sec_nfc_id_table); static int sec_nfc_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; ret = __sec_nfc_probe(&client->dev); if (ret) return ret; if (sec_nfc_i2c_probe(client)) __sec_nfc_remove(&client->dev); return ret; } static int sec_nfc_remove(struct i2c_client *client) { sec_nfc_i2c_remove(&client->dev); return __sec_nfc_remove(&client->dev); } static struct i2c_device_id sec_nfc_id_table[] = { { SEC_NFC_DRIVER_NAME, 0 }, { } }; #else /* CONFIG_SEC_NFC_IF_I2C */ MODULE_DEVICE_TABLE(platform, sec_nfc_id_table); static int sec_nfc_probe(struct platform_device *pdev) { return __sec_nfc_probe(&pdev->dev); } static int sec_nfc_remove(struct platform_device *pdev) { return __sec_nfc_remove(&pdev->dev); } static struct platform_device_id sec_nfc_id_table[] = { { SEC_NFC_DRIVER_NAME, 0 }, { } }; #endif /* CONFIG_SEC_NFC_IF_I2C */ #ifdef CONFIG_OF static struct of_device_id nfc_match_table[] = { { .compatible = SEC_NFC_DRIVER_NAME,}, {}, }; #else #define nfc_match_table NULL #endif #ifdef CONFIG_SEC_NFC_IF_I2C static struct i2c_driver sec_nfc_driver = { .probe = sec_nfc_probe, .id_table = sec_nfc_id_table, .remove = sec_nfc_remove, .driver = { .name = SEC_NFC_DRIVER_NAME, #ifdef CONFIG_PM .pm = &sec_nfc_pm_ops, #endif .of_match_table = nfc_match_table, }, }; #else /* CONFIG_SEC_NFC_IF_I2C */ static struct platform_driver sec_nfc_driver = { .probe = sec_nfc_probe, .id_table = sec_nfc_id_table, .remove = sec_nfc_remove, .driver = { .name = SEC_NFC_DRIVER_NAME, #ifdef CONFIG_PM .pm = &sec_nfc_pm_ops, #endif .of_match_table = nfc_match_table, }, }; #endif /* CONFIG_SEC_NFC_IF_I2C */ static int __init sec_nfc_init(void) { #ifdef CONFIG_SEC_NFC_IF_I2C return i2c_add_driver(&sec_nfc_driver); #else return platform_driver_register(&sec_nfc_driver); #endif } static void __exit sec_nfc_exit(void) { #ifdef CONFIG_SEC_NFC_IF_I2C return i2c_del_driver(&sec_nfc_driver); #else return platform_driver_unregister(&sec_nfc_driver); #endif } module_init(sec_nfc_init); module_exit(sec_nfc_exit); MODULE_DESCRIPTION("Samsung sec_nfc driver"); MODULE_LICENSE("GPL");
gpl-2.0
myran2/MassChaosCore
dep/acelite/ace/DEV_Addr.cpp
540
2242
// $Id: DEV_Addr.cpp 91368 2010-08-16 13:03:34Z mhengstmengel $ #include "ace/DEV_Addr.h" #if !defined (__ACE_INLINE__) #include "ace/DEV_Addr.inl" #endif /* __ACE_INLINE__ */ #include "ace/Log_Msg.h" #include "ace/OS_NS_string.h" ACE_BEGIN_VERSIONED_NAMESPACE_DECL ACE_ALLOC_HOOK_DEFINE(ACE_DEV_Addr) // Transform the current address into string format. int ACE_DEV_Addr::addr_to_string (ACE_TCHAR *s, size_t len) const { ACE_TRACE ("ACE_DEV_Addr::addr_to_string"); ACE_OS::strsncpy (s, this->devname_, len); return 0; } // Return a pointer to the address. void * ACE_DEV_Addr::get_addr (void) const { ACE_TRACE ("ACE_DEV_Addr::get_addr"); return (void *) &this->devname_; } void ACE_DEV_Addr::dump (void) const { #if defined (ACE_HAS_DUMP) ACE_TRACE ("ACE_DEV_Addr::dump"); ACE_DEBUG ((LM_DEBUG, ACE_BEGIN_DUMP, this)); ACE_DEBUG ((LM_DEBUG, ACE_TEXT ("devname_ = %s"), this->devname_)); ACE_DEBUG ((LM_DEBUG, ACE_END_DUMP)); #endif /* ACE_HAS_DUMP */ } // Do nothing constructor. ACE_DEV_Addr::ACE_DEV_Addr (void) : ACE_Addr (AF_DEV, sizeof this->devname_) { ACE_TRACE ("ACE_DEV_Addr::ACE_DEV_Addr"); (void) ACE_OS::memset ((void *) &this->devname_, 0, sizeof this->devname_); } int ACE_DEV_Addr::set (const ACE_DEV_Addr &sa) { this->base_set (sa.get_type (), sa.get_size ()); if (sa.get_type () == AF_ANY) (void) ACE_OS::memset ((void *) &this->devname_, 0, sizeof this->devname_); else (void) ACE_OS::strsncpy (this->devname_, sa.devname_, ACE_DEV_Addr::DEVNAME_LENGTH); return 0; } // Copy constructor. ACE_DEV_Addr::ACE_DEV_Addr (const ACE_DEV_Addr &sa) : ACE_Addr (AF_DEV, sizeof this->devname_) { ACE_TRACE ("ACE_DEV_Addr::ACE_DEV_Addr"); this->set (sa); } ACE_DEV_Addr::ACE_DEV_Addr (const ACE_TCHAR *devname) : ACE_Addr (AF_DEV, sizeof this->devname_) { ACE_TRACE ("ACE_DEV_Addr::ACE_DEV_Addr"); this->set (devname); } ACE_DEV_Addr & ACE_DEV_Addr::operator= (const ACE_DEV_Addr &sa) { ACE_TRACE ("ACE_DEV_Addr::operator="); if (this != &sa) this->set (sa); return *this; } ACE_END_VERSIONED_NAMESPACE_DECL
gpl-2.0
Admetric/android_kernel_s5pv210
drivers/parisc/eisa.c
540
11895
/* * eisa.c - provide support for EISA adapters in PA-RISC machines * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * * Copyright (c) 2001 Matthew Wilcox for Hewlett Packard * Copyright (c) 2001 Daniel Engstrom <5116@telia.com> * * There are two distinct EISA adapters. Mongoose is found in machines * before the 712; then the Wax ASIC is used. To complicate matters, the * Wax ASIC also includes a PS/2 and RS-232 controller, but those are * dealt with elsewhere; this file is concerned only with the EISA portions * of Wax. * * * HINT: * ----- * To allow an ISA card to work properly in the EISA slot you need to * set an edge trigger level. This may be done on the palo command line * by adding the kernel parameter "eisa_irq_edge=n,n2,[...]]", with * n and n2 as the irq levels you want to use. * * Example: "eisa_irq_edge=10,11" allows ISA cards to operate at * irq levels 10 and 11. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/eisa.h> #include <asm/byteorder.h> #include <asm/io.h> #include <asm/hardware.h> #include <asm/processor.h> #include <asm/parisc-device.h> #include <asm/delay.h> #include <asm/eisa_bus.h> #include <asm/eisa_eeprom.h> #if 0 #define EISA_DBG(msg, arg... ) printk(KERN_DEBUG "eisa: " msg , ## arg ) #else #define EISA_DBG(msg, arg... ) #endif #define SNAKES_EEPROM_BASE_ADDR 0xF0810400 #define MIRAGE_EEPROM_BASE_ADDR 0xF00C0400 static DEFINE_SPINLOCK(eisa_irq_lock); void __iomem *eisa_eeprom_addr __read_mostly; /* We can only have one EISA adapter in the system because neither * implementation can be flexed. */ static struct eisa_ba { struct pci_hba_data hba; unsigned long eeprom_addr; struct eisa_root_device root; } eisa_dev; /* Port ops */ static inline unsigned long eisa_permute(unsigned short port) { if (port & 0x300) { return 0xfc000000 | ((port & 0xfc00) >> 6) | ((port & 0x3f8) << 9) | (port & 7); } else { return 0xfc000000 | port; } } unsigned char eisa_in8(unsigned short port) { if (EISA_bus) return gsc_readb(eisa_permute(port)); return 0xff; } unsigned short eisa_in16(unsigned short port) { if (EISA_bus) return le16_to_cpu(gsc_readw(eisa_permute(port))); return 0xffff; } unsigned int eisa_in32(unsigned short port) { if (EISA_bus) return le32_to_cpu(gsc_readl(eisa_permute(port))); return 0xffffffff; } void eisa_out8(unsigned char data, unsigned short port) { if (EISA_bus) gsc_writeb(data, eisa_permute(port)); } void eisa_out16(unsigned short data, unsigned short port) { if (EISA_bus) gsc_writew(cpu_to_le16(data), eisa_permute(port)); } void eisa_out32(unsigned int data, unsigned short port) { if (EISA_bus) gsc_writel(cpu_to_le32(data), eisa_permute(port)); } #ifndef CONFIG_PCI /* We call these directly without PCI. See asm/io.h. */ EXPORT_SYMBOL(eisa_in8); EXPORT_SYMBOL(eisa_in16); EXPORT_SYMBOL(eisa_in32); EXPORT_SYMBOL(eisa_out8); EXPORT_SYMBOL(eisa_out16); EXPORT_SYMBOL(eisa_out32); #endif /* Interrupt handling */ /* cached interrupt mask registers */ static int master_mask; static int slave_mask; /* the trig level can be set with the * eisa_irq_edge=n,n,n commandline parameter * We should really read this from the EEPROM * in the furure. */ /* irq 13,8,2,1,0 must be edge */ static unsigned int eisa_irq_level __read_mostly; /* default to edge triggered */ /* called by free irq */ static void eisa_disable_irq(unsigned int irq) { unsigned long flags; EISA_DBG("disable irq %d\n", irq); /* just mask for now */ spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); } /* called by request irq */ static void eisa_enable_irq(unsigned int irq) { unsigned long flags; EISA_DBG("enable irq %d\n", irq); spin_lock_irqsave(&eisa_irq_lock, flags); if (irq & 8) { slave_mask &= ~(1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask &= ~(1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); } static unsigned int eisa_startup_irq(unsigned int irq) { eisa_enable_irq(irq); return 0; } static struct irq_chip eisa_interrupt_type = { .typename = "EISA", .startup = eisa_startup_irq, .shutdown = eisa_disable_irq, .enable = eisa_enable_irq, .disable = eisa_disable_irq, .ack = no_ack_irq, .end = no_end_irq, }; static irqreturn_t eisa_irq(int wax_irq, void *intr_dev) { int irq = gsc_readb(0xfc01f000); /* EISA supports 16 irqs */ unsigned long flags; spin_lock_irqsave(&eisa_irq_lock, flags); /* read IRR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq IAR %02x 8259-1 irr %02x 8259-2 irr %02x\n", irq, eisa_in8(0x20), eisa_in8(0xa0)); /* read ISR command */ eisa_out8(0x0a, 0x20); eisa_out8(0x0a, 0xa0); EISA_DBG("irq 8259-1 isr %02x imr %02x 8259-2 isr %02x imr %02x\n", eisa_in8(0x20), eisa_in8(0x21), eisa_in8(0xa0), eisa_in8(0xa1)); irq &= 0xf; /* mask irq and write eoi */ if (irq & 8) { slave_mask |= (1 << (irq&7)); eisa_out8(slave_mask, 0xa1); eisa_out8(0x60 | (irq&7),0xa0);/* 'Specific EOI' to slave */ eisa_out8(0x62,0x20); /* 'Specific EOI' to master-IRQ2 */ } else { master_mask |= (1 << (irq&7)); eisa_out8(master_mask, 0x21); eisa_out8(0x60|irq,0x20); /* 'Specific EOI' to master */ } spin_unlock_irqrestore(&eisa_irq_lock, flags); __do_IRQ(irq); spin_lock_irqsave(&eisa_irq_lock, flags); /* unmask */ if (irq & 8) { slave_mask &= ~(1 << (irq&7)); eisa_out8(slave_mask, 0xa1); } else { master_mask &= ~(1 << (irq&7)); eisa_out8(master_mask, 0x21); } spin_unlock_irqrestore(&eisa_irq_lock, flags); return IRQ_HANDLED; } static irqreturn_t dummy_irq2_handler(int _, void *dev) { printk(KERN_ALERT "eisa: uhh, irq2?\n"); return IRQ_HANDLED; } static struct irqaction irq2_action = { .handler = dummy_irq2_handler, .name = "cascade", }; static void init_eisa_pic(void) { unsigned long flags; spin_lock_irqsave(&eisa_irq_lock, flags); eisa_out8(0xff, 0x21); /* mask during init */ eisa_out8(0xff, 0xa1); /* mask during init */ /* master pic */ eisa_out8(0x11,0x20); /* ICW1 */ eisa_out8(0x00,0x21); /* ICW2 */ eisa_out8(0x04,0x21); /* ICW3 */ eisa_out8(0x01,0x21); /* ICW4 */ eisa_out8(0x40,0x20); /* OCW2 */ /* slave pic */ eisa_out8(0x11,0xa0); /* ICW1 */ eisa_out8(0x08,0xa1); /* ICW2 */ eisa_out8(0x02,0xa1); /* ICW3 */ eisa_out8(0x01,0xa1); /* ICW4 */ eisa_out8(0x40,0xa0); /* OCW2 */ udelay(100); slave_mask = 0xff; master_mask = 0xfb; eisa_out8(slave_mask, 0xa1); /* OCW1 */ eisa_out8(master_mask, 0x21); /* OCW1 */ /* setup trig level */ EISA_DBG("EISA edge/level %04x\n", eisa_irq_level); eisa_out8(eisa_irq_level&0xff, 0x4d0); /* Set all irq's to edge */ eisa_out8((eisa_irq_level >> 8) & 0xff, 0x4d1); EISA_DBG("pic0 mask %02x\n", eisa_in8(0x21)); EISA_DBG("pic1 mask %02x\n", eisa_in8(0xa1)); EISA_DBG("pic0 edge/level %02x\n", eisa_in8(0x4d0)); EISA_DBG("pic1 edge/level %02x\n", eisa_in8(0x4d1)); spin_unlock_irqrestore(&eisa_irq_lock, flags); } /* Device initialisation */ #define is_mongoose(dev) (dev->id.sversion == 0x00076) static int __init eisa_probe(struct parisc_device *dev) { int i, result; char *name = is_mongoose(dev) ? "Mongoose" : "Wax"; printk(KERN_INFO "%s EISA Adapter found at 0x%08lx\n", name, (unsigned long)dev->hpa.start); eisa_dev.hba.dev = dev; eisa_dev.hba.iommu = ccio_get_iommu(dev); eisa_dev.hba.lmmio_space.name = "EISA"; eisa_dev.hba.lmmio_space.start = F_EXTEND(0xfc000000); eisa_dev.hba.lmmio_space.end = F_EXTEND(0xffbfffff); eisa_dev.hba.lmmio_space.flags = IORESOURCE_MEM; result = ccio_request_resource(dev, &eisa_dev.hba.lmmio_space); if (result < 0) { printk(KERN_ERR "EISA: failed to claim EISA Bus address space!\n"); return result; } eisa_dev.hba.io_space.name = "EISA"; eisa_dev.hba.io_space.start = 0; eisa_dev.hba.io_space.end = 0xffff; eisa_dev.hba.lmmio_space.flags = IORESOURCE_IO; result = request_resource(&ioport_resource, &eisa_dev.hba.io_space); if (result < 0) { printk(KERN_ERR "EISA: failed to claim EISA Bus port space!\n"); return result; } pcibios_register_hba(&eisa_dev.hba); result = request_irq(dev->irq, eisa_irq, IRQF_SHARED, "EISA", &eisa_dev); if (result) { printk(KERN_ERR "EISA: request_irq failed!\n"); return result; } /* Reserve IRQ2 */ irq_to_desc(2)->action = &irq2_action; for (i = 0; i < 16; i++) { irq_to_desc(i)->chip = &eisa_interrupt_type; } EISA_bus = 1; if (dev->num_addrs) { /* newer firmware hand out the eeprom address */ eisa_dev.eeprom_addr = dev->addr[0]; } else { /* old firmware, need to figure out the box */ if (is_mongoose(dev)) { eisa_dev.eeprom_addr = SNAKES_EEPROM_BASE_ADDR; } else { eisa_dev.eeprom_addr = MIRAGE_EEPROM_BASE_ADDR; } } eisa_eeprom_addr = ioremap_nocache(eisa_dev.eeprom_addr, HPEE_MAX_LENGTH); result = eisa_enumerator(eisa_dev.eeprom_addr, &eisa_dev.hba.io_space, &eisa_dev.hba.lmmio_space); init_eisa_pic(); if (result >= 0) { /* FIXME : Don't enumerate the bus twice. */ eisa_dev.root.dev = &dev->dev; dev_set_drvdata(&dev->dev, &eisa_dev.root); eisa_dev.root.bus_base_addr = 0; eisa_dev.root.res = &eisa_dev.hba.io_space; eisa_dev.root.slots = result; eisa_dev.root.dma_mask = 0xffffffff; /* wild guess */ if (eisa_root_register (&eisa_dev.root)) { printk(KERN_ERR "EISA: Failed to register EISA root\n"); return -1; } } return 0; } static const struct parisc_device_id eisa_tbl[] = { { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00076 }, /* Mongoose */ { HPHW_BA, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, 0x00090 }, /* Wax EISA */ { 0, } }; MODULE_DEVICE_TABLE(parisc, eisa_tbl); static struct parisc_driver eisa_driver = { .name = "eisa_ba", .id_table = eisa_tbl, .probe = eisa_probe, }; void __init eisa_init(void) { register_parisc_driver(&eisa_driver); } static unsigned int eisa_irq_configured; void eisa_make_irq_level(int num) { if (eisa_irq_configured& (1<<num)) { printk(KERN_WARNING "IRQ %d polarity configured twice (last to level)\n", num); } eisa_irq_level |= (1<<num); /* set the corresponding bit */ eisa_irq_configured |= (1<<num); /* set the corresponding bit */ } void eisa_make_irq_edge(int num) { if (eisa_irq_configured& (1<<num)) { printk(KERN_WARNING "IRQ %d polarity configured twice (last to edge)\n", num); } eisa_irq_level &= ~(1<<num); /* clear the corresponding bit */ eisa_irq_configured |= (1<<num); /* set the corresponding bit */ } static int __init eisa_irq_setup(char *str) { char *cur = str; int val; EISA_DBG("IRQ setup\n"); while (cur != NULL) { char *pe; val = (int) simple_strtoul(cur, &pe, 0); if (val > 15 || val < 0) { printk(KERN_ERR "eisa: EISA irq value are 0-15\n"); continue; } if (val == 2) { val = 9; } eisa_make_irq_edge(val); /* clear the corresponding bit */ EISA_DBG("setting IRQ %d to edge-triggered mode\n", val); if ((cur = strchr(cur, ','))) { cur++; } else { break; } } return 1; } __setup("eisa_irq_edge=", eisa_irq_setup);
gpl-2.0
KenYang/2.6.29
drivers/net/lne390.c
796
12830
/* lne390.c Linux driver for Mylex LNE390 EISA Network Adapter Copyright (C) 1996-1998, Paul Gortmaker. This software may be used and distributed according to the terms of the GNU General Public License, incorporated herein by reference. Information and Code Sources: 1) Based upon framework of es3210 driver. 2) The existing myriad of other Linux 8390 drivers by Donald Becker. 3) Russ Nelson's asm packet driver provided additional info. 4) Info for getting IRQ and sh-mem gleaned from the EISA cfg files. The LNE390 is an EISA shared memory NS8390 implementation. Note that all memory copies to/from the board must be 32bit transfers. There are two versions of the card: the lne390a and the lne390b. Going by the EISA cfg files, the "a" has jumpers to select between BNC/AUI, but the "b" also has RJ-45 and selection is via the SCU. The shared memory address selection is also slightly different. Note that shared memory address > 1MB are supported with this driver. You can try <http://www.mylex.com> if you want more info, as I've never even seen one of these cards. :) Arnaldo Carvalho de Melo <acme@conectiva.com.br> - 2000/09/01 - get rid of check_region - no need to check if dev == NULL in lne390_probe1 */ static const char *version = "lne390.c: Driver revision v0.99.1, 01/09/2000\n"; #include <linux/module.h> #include <linux/eisa.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <asm/io.h> #include <asm/system.h> #include "8390.h" #define DRV_NAME "lne390" static int lne390_probe1(struct net_device *dev, int ioaddr); static void lne390_reset_8390(struct net_device *dev); static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page); static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset); static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page); #define LNE390_START_PG 0x00 /* First page of TX buffer */ #define LNE390_STOP_PG 0x80 /* Last page +1 of RX ring */ #define LNE390_ID_PORT 0xc80 /* Same for all EISA cards */ #define LNE390_IO_EXTENT 0x20 #define LNE390_SA_PROM 0x16 /* Start of e'net addr. */ #define LNE390_RESET_PORT 0xc84 /* From the pkt driver source */ #define LNE390_NIC_OFFSET 0x00 /* Hello, the 8390 is *here* */ #define LNE390_ADDR0 0x00 /* 3 byte vendor prefix */ #define LNE390_ADDR1 0x80 #define LNE390_ADDR2 0xe5 #define LNE390_ID0 0x10009835 /* 0x3598 = 01101 01100 11000 = mlx */ #define LNE390_ID1 0x11009835 /* above is the 390A, this is 390B */ #define LNE390_CFG1 0xc84 /* NB: 0xc84 is also "reset" port. */ #define LNE390_CFG2 0xc90 /* * You can OR any of the following bits together and assign it * to LNE390_DEBUG to get verbose driver info during operation. * Currently only the probe one is implemented. */ #define LNE390_D_PROBE 0x01 #define LNE390_D_RX_PKT 0x02 #define LNE390_D_TX_PKT 0x04 #define LNE390_D_IRQ 0x08 #define LNE390_DEBUG 0 static unsigned char irq_map[] __initdata = {15, 12, 11, 10, 9, 7, 5, 3}; static unsigned int shmem_mapA[] __initdata = {0xff, 0xfe, 0xfd, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; static unsigned int shmem_mapB[] __initdata = {0xff, 0xfe, 0x0e, 0xfff, 0xffe, 0xffc, 0x0d, 0x0}; /* * Probe for the card. The best way is to read the EISA ID if it * is known. Then we can check the prefix of the station address * PROM for a match against the value assigned to Mylex. */ static int __init do_lne390_probe(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; int irq = dev->irq; int mem_start = dev->mem_start; int ret; if (ioaddr > 0x1ff) { /* Check a single specified location. */ if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) return -EBUSY; ret = lne390_probe1(dev, ioaddr); if (ret) release_region(ioaddr, LNE390_IO_EXTENT); return ret; } else if (ioaddr > 0) /* Don't probe at all. */ return -ENXIO; if (!EISA_bus) { #if LNE390_DEBUG & LNE390_D_PROBE printk("lne390-debug: Not an EISA bus. Not probing high ports.\n"); #endif return -ENXIO; } /* EISA spec allows for up to 16 slots, but 8 is typical. */ for (ioaddr = 0x1000; ioaddr < 0x9000; ioaddr += 0x1000) { if (!request_region(ioaddr, LNE390_IO_EXTENT, DRV_NAME)) continue; if (lne390_probe1(dev, ioaddr) == 0) return 0; release_region(ioaddr, LNE390_IO_EXTENT); dev->irq = irq; dev->mem_start = mem_start; } return -ENODEV; } #ifndef MODULE struct net_device * __init lne390_probe(int unit) { struct net_device *dev = alloc_ei_netdev(); int err; if (!dev) return ERR_PTR(-ENOMEM); sprintf(dev->name, "eth%d", unit); netdev_boot_setup_check(dev); err = do_lne390_probe(dev); if (err) goto out; return dev; out: free_netdev(dev); return ERR_PTR(err); } #endif static int __init lne390_probe1(struct net_device *dev, int ioaddr) { int i, revision, ret; unsigned long eisa_id; if (inb_p(ioaddr + LNE390_ID_PORT) == 0xff) return -ENODEV; #if LNE390_DEBUG & LNE390_D_PROBE printk("lne390-debug: probe at %#x, ID %#8x\n", ioaddr, inl(ioaddr + LNE390_ID_PORT)); printk("lne390-debug: config regs: %#x %#x\n", inb(ioaddr + LNE390_CFG1), inb(ioaddr + LNE390_CFG2)); #endif /* Check the EISA ID of the card. */ eisa_id = inl(ioaddr + LNE390_ID_PORT); if ((eisa_id != LNE390_ID0) && (eisa_id != LNE390_ID1)) { return -ENODEV; } revision = (eisa_id >> 24) & 0x01; /* 0 = rev A, 1 rev B */ #if 0 /* Check the Mylex vendor ID as well. Not really required. */ if (inb(ioaddr + LNE390_SA_PROM + 0) != LNE390_ADDR0 || inb(ioaddr + LNE390_SA_PROM + 1) != LNE390_ADDR1 || inb(ioaddr + LNE390_SA_PROM + 2) != LNE390_ADDR2 ) { printk("lne390.c: card not found"); for(i = 0; i < ETHER_ADDR_LEN; i++) printk(" %02x", inb(ioaddr + LNE390_SA_PROM + i)); printk(" (invalid prefix).\n"); return -ENODEV; } #endif for(i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = inb(ioaddr + LNE390_SA_PROM + i); printk("lne390.c: LNE390%X in EISA slot %d, address %pM.\n", 0xa+revision, ioaddr/0x1000, dev->dev_addr); printk("lne390.c: "); /* Snarf the interrupt now. CFG file has them all listed as `edge' with share=NO */ if (dev->irq == 0) { unsigned char irq_reg = inb(ioaddr + LNE390_CFG2) >> 3; dev->irq = irq_map[irq_reg & 0x07]; printk("using"); } else { /* This is useless unless we reprogram the card here too */ if (dev->irq == 2) dev->irq = 9; /* Doh! */ printk("assigning"); } printk(" IRQ %d,", dev->irq); if ((ret = request_irq(dev->irq, ei_interrupt, 0, DRV_NAME, dev))) { printk (" unable to get IRQ %d.\n", dev->irq); return ret; } if (dev->mem_start == 0) { unsigned char mem_reg = inb(ioaddr + LNE390_CFG2) & 0x07; if (revision) /* LNE390B */ dev->mem_start = shmem_mapB[mem_reg] * 0x10000; else /* LNE390A */ dev->mem_start = shmem_mapA[mem_reg] * 0x10000; printk(" using "); } else { /* Should check for value in shmem_map and reprogram the card to use it */ dev->mem_start &= 0xfff0000; printk(" assigning "); } printk("%dkB memory at physical address %#lx\n", LNE390_STOP_PG/4, dev->mem_start); /* BEWARE!! Some dain-bramaged EISA SCUs will allow you to put the card mem within the region covered by `normal' RAM !!! ioremap() will fail in that case. */ ei_status.mem = ioremap(dev->mem_start, LNE390_STOP_PG*0x100); if (!ei_status.mem) { printk(KERN_ERR "lne390.c: Unable to remap card memory above 1MB !!\n"); printk(KERN_ERR "lne390.c: Try using EISA SCU to set memory below 1MB.\n"); printk(KERN_ERR "lne390.c: Driver NOT installed.\n"); ret = -EAGAIN; goto cleanup; } printk("lne390.c: remapped %dkB card memory to virtual address %p\n", LNE390_STOP_PG/4, ei_status.mem); dev->mem_start = (unsigned long)ei_status.mem; dev->mem_end = dev->mem_start + (LNE390_STOP_PG - LNE390_START_PG)*256; /* The 8390 offset is zero for the LNE390 */ dev->base_addr = ioaddr; ei_status.name = "LNE390"; ei_status.tx_start_page = LNE390_START_PG; ei_status.rx_start_page = LNE390_START_PG + TX_PAGES; ei_status.stop_page = LNE390_STOP_PG; ei_status.word16 = 1; if (ei_debug > 0) printk(version); ei_status.reset_8390 = &lne390_reset_8390; ei_status.block_input = &lne390_block_input; ei_status.block_output = &lne390_block_output; ei_status.get_8390_hdr = &lne390_get_8390_hdr; dev->netdev_ops = &ei_netdev_ops; NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) goto unmap; return 0; unmap: if (ei_status.reg0) iounmap(ei_status.mem); cleanup: free_irq(dev->irq, dev); return ret; } /* * Reset as per the packet driver method. Judging by the EISA cfg * file, this just toggles the "Board Enable" bits (bit 2 and 0). */ static void lne390_reset_8390(struct net_device *dev) { unsigned short ioaddr = dev->base_addr; outb(0x04, ioaddr + LNE390_RESET_PORT); if (ei_debug > 1) printk("%s: resetting the LNE390...", dev->name); mdelay(2); ei_status.txing = 0; outb(0x01, ioaddr + LNE390_RESET_PORT); if (ei_debug > 1) printk("reset done\n"); return; } /* * Note: In the following three functions is the implicit assumption * that the associated memcpy will only use "rep; movsl" as long as * we keep the counts as some multiple of doublewords. This is a * requirement of the hardware, and also prevents us from using * eth_io_copy_and_sum() since we can't guarantee it will limit * itself to doubleword access. */ /* * Grab the 8390 specific header. Similar to the block_input routine, but * we don't need to be concerned with ring wrap as the header will be at * the start of a page, so we optimize accordingly. (A single doubleword.) */ static void lne390_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { void __iomem *hdr_start = ei_status.mem + ((ring_page - LNE390_START_PG)<<8); memcpy_fromio(hdr, hdr_start, sizeof(struct e8390_pkt_hdr)); hdr->count = (hdr->count + 3) & ~3; /* Round up allocation. */ } /* * Block input and output are easy on shared memory ethercards, the only * complication is when the ring buffer wraps. The count will already * be rounded up to a doubleword value via lne390_get_8390_hdr() above. */ static void lne390_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { void __iomem *xfer_start = ei_status.mem + ring_offset - (LNE390_START_PG<<8); if (ring_offset + count > (LNE390_STOP_PG<<8)) { /* Packet wraps over end of ring buffer. */ int semi_count = (LNE390_STOP_PG<<8) - ring_offset; memcpy_fromio(skb->data, xfer_start, semi_count); count -= semi_count; memcpy_fromio(skb->data + semi_count, ei_status.mem + (TX_PAGES<<8), count); } else { /* Packet is in one chunk. */ memcpy_fromio(skb->data, xfer_start, count); } } static void lne390_block_output(struct net_device *dev, int count, const unsigned char *buf, int start_page) { void __iomem *shmem = ei_status.mem + ((start_page - LNE390_START_PG)<<8); count = (count + 3) & ~3; /* Round up to doubleword */ memcpy_toio(shmem, buf, count); } #ifdef MODULE #define MAX_LNE_CARDS 4 /* Max number of LNE390 cards per module */ static struct net_device *dev_lne[MAX_LNE_CARDS]; static int io[MAX_LNE_CARDS]; static int irq[MAX_LNE_CARDS]; static int mem[MAX_LNE_CARDS]; module_param_array(io, int, NULL, 0); module_param_array(irq, int, NULL, 0); module_param_array(mem, int, NULL, 0); MODULE_PARM_DESC(io, "I/O base address(es)"); MODULE_PARM_DESC(irq, "IRQ number(s)"); MODULE_PARM_DESC(mem, "memory base address(es)"); MODULE_DESCRIPTION("Mylex LNE390A/B EISA Ethernet driver"); MODULE_LICENSE("GPL"); int __init init_module(void) { struct net_device *dev; int this_dev, found = 0; for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { if (io[this_dev] == 0 && this_dev != 0) break; dev = alloc_ei_netdev(); if (!dev) break; dev->irq = irq[this_dev]; dev->base_addr = io[this_dev]; dev->mem_start = mem[this_dev]; if (do_lne390_probe(dev) == 0) { dev_lne[found++] = dev; continue; } free_netdev(dev); printk(KERN_WARNING "lne390.c: No LNE390 card found (i/o = 0x%x).\n", io[this_dev]); break; } if (found) return 0; return -ENXIO; } static void cleanup_card(struct net_device *dev) { free_irq(dev->irq, dev); release_region(dev->base_addr, LNE390_IO_EXTENT); iounmap(ei_status.mem); } void __exit cleanup_module(void) { int this_dev; for (this_dev = 0; this_dev < MAX_LNE_CARDS; this_dev++) { struct net_device *dev = dev_lne[this_dev]; if (dev) { unregister_netdev(dev); cleanup_card(dev); free_netdev(dev); } } } #endif /* MODULE */
gpl-2.0
ryanli/kernel_huawei_c8650
drivers/ata/sata_svw.c
1052
15474
/* * sata_svw.c - ServerWorks / Apple K2 SATA * * Maintained by: Benjamin Herrenschmidt <benh@kernel.crashing.org> and * Jeff Garzik <jgarzik@pobox.com> * Please ALWAYS copy linux-ide@vger.kernel.org * on emails. * * Copyright 2003 Benjamin Herrenschmidt <benh@kernel.crashing.org> * * Bits from Jeff Garzik, Copyright RedHat, Inc. * * This driver probably works with non-Apple versions of the * Broadcom chipset... * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * * libata documentation is available via 'make {ps|pdf}docs', * as Documentation/DocBook/libata.* * * Hardware documentation available under NDA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/device.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi.h> #include <linux/libata.h> #ifdef CONFIG_PPC_OF #include <asm/prom.h> #include <asm/pci-bridge.h> #endif /* CONFIG_PPC_OF */ #define DRV_NAME "sata_svw" #define DRV_VERSION "2.3" enum { /* ap->flags bits */ K2_FLAG_SATA_8_PORTS = (1 << 24), K2_FLAG_NO_ATAPI_DMA = (1 << 25), K2_FLAG_BAR_POS_3 = (1 << 26), /* Taskfile registers offsets */ K2_SATA_TF_CMD_OFFSET = 0x00, K2_SATA_TF_DATA_OFFSET = 0x00, K2_SATA_TF_ERROR_OFFSET = 0x04, K2_SATA_TF_NSECT_OFFSET = 0x08, K2_SATA_TF_LBAL_OFFSET = 0x0c, K2_SATA_TF_LBAM_OFFSET = 0x10, K2_SATA_TF_LBAH_OFFSET = 0x14, K2_SATA_TF_DEVICE_OFFSET = 0x18, K2_SATA_TF_CMDSTAT_OFFSET = 0x1c, K2_SATA_TF_CTL_OFFSET = 0x20, /* DMA base */ K2_SATA_DMA_CMD_OFFSET = 0x30, /* SCRs base */ K2_SATA_SCR_STATUS_OFFSET = 0x40, K2_SATA_SCR_ERROR_OFFSET = 0x44, K2_SATA_SCR_CONTROL_OFFSET = 0x48, /* Others */ K2_SATA_SICR1_OFFSET = 0x80, K2_SATA_SICR2_OFFSET = 0x84, K2_SATA_SIM_OFFSET = 0x88, /* Port stride */ K2_SATA_PORT_OFFSET = 0x100, chip_svw4 = 0, chip_svw8 = 1, chip_svw42 = 2, /* bar 3 */ chip_svw43 = 3, /* bar 5 */ }; static u8 k2_stat_check_status(struct ata_port *ap); static int k2_sata_check_atapi_dma(struct ata_queued_cmd *qc) { u8 cmnd = qc->scsicmd->cmnd[0]; if (qc->ap->flags & K2_FLAG_NO_ATAPI_DMA) return -1; /* ATAPI DMA not supported */ else { switch (cmnd) { case READ_10: case READ_12: case READ_16: case WRITE_10: case WRITE_12: case WRITE_16: return 0; default: return -1; } } } static int k2_sata_scr_read(struct ata_link *link, unsigned int sc_reg, u32 *val) { if (sc_reg > SCR_CONTROL) return -EINVAL; *val = readl(link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static int k2_sata_scr_write(struct ata_link *link, unsigned int sc_reg, u32 val) { if (sc_reg > SCR_CONTROL) return -EINVAL; writel(val, link->ap->ioaddr.scr_addr + (sc_reg * 4)); return 0; } static void k2_sata_tf_load(struct ata_port *ap, const struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR; if (tf->ctl != ap->last_ctl) { writeb(tf->ctl, ioaddr->ctl_addr); ap->last_ctl = tf->ctl; ata_wait_idle(ap); } if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) { writew(tf->feature | (((u16)tf->hob_feature) << 8), ioaddr->feature_addr); writew(tf->nsect | (((u16)tf->hob_nsect) << 8), ioaddr->nsect_addr); writew(tf->lbal | (((u16)tf->hob_lbal) << 8), ioaddr->lbal_addr); writew(tf->lbam | (((u16)tf->hob_lbam) << 8), ioaddr->lbam_addr); writew(tf->lbah | (((u16)tf->hob_lbah) << 8), ioaddr->lbah_addr); } else if (is_addr) { writew(tf->feature, ioaddr->feature_addr); writew(tf->nsect, ioaddr->nsect_addr); writew(tf->lbal, ioaddr->lbal_addr); writew(tf->lbam, ioaddr->lbam_addr); writew(tf->lbah, ioaddr->lbah_addr); } if (tf->flags & ATA_TFLAG_DEVICE) writeb(tf->device, ioaddr->device_addr); ata_wait_idle(ap); } static void k2_sata_tf_read(struct ata_port *ap, struct ata_taskfile *tf) { struct ata_ioports *ioaddr = &ap->ioaddr; u16 nsect, lbal, lbam, lbah, feature; tf->command = k2_stat_check_status(ap); tf->device = readw(ioaddr->device_addr); feature = readw(ioaddr->error_addr); nsect = readw(ioaddr->nsect_addr); lbal = readw(ioaddr->lbal_addr); lbam = readw(ioaddr->lbam_addr); lbah = readw(ioaddr->lbah_addr); tf->feature = feature; tf->nsect = nsect; tf->lbal = lbal; tf->lbam = lbam; tf->lbah = lbah; if (tf->flags & ATA_TFLAG_LBA48) { tf->hob_feature = feature >> 8; tf->hob_nsect = nsect >> 8; tf->hob_lbal = lbal >> 8; tf->hob_lbam = lbam >> 8; tf->hob_lbah = lbah >> 8; } } /** * k2_bmdma_setup_mmio - Set up PCI IDE BMDMA transaction (MMIO) * @qc: Info associated with this ATA transaction. * * LOCKING: * spin_lock_irqsave(host lock) */ static void k2_bmdma_setup_mmio(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE); u8 dmactl; void __iomem *mmio = ap->ioaddr.bmdma_addr; /* load PRD table addr. */ mb(); /* make sure PRD table writes are visible to controller */ writel(ap->bmdma_prd_dma, mmio + ATA_DMA_TABLE_OFS); /* specify data direction, triple-check start bit is clear */ dmactl = readb(mmio + ATA_DMA_CMD); dmactl &= ~(ATA_DMA_WR | ATA_DMA_START); if (!rw) dmactl |= ATA_DMA_WR; writeb(dmactl, mmio + ATA_DMA_CMD); /* issue r/w command if this is not a ATA DMA command*/ if (qc->tf.protocol != ATA_PROT_DMA) ap->ops->sff_exec_command(ap, &qc->tf); } /** * k2_bmdma_start_mmio - Start a PCI IDE BMDMA transaction (MMIO) * @qc: Info associated with this ATA transaction. * * LOCKING: * spin_lock_irqsave(host lock) */ static void k2_bmdma_start_mmio(struct ata_queued_cmd *qc) { struct ata_port *ap = qc->ap; void __iomem *mmio = ap->ioaddr.bmdma_addr; u8 dmactl; /* start host DMA transaction */ dmactl = readb(mmio + ATA_DMA_CMD); writeb(dmactl | ATA_DMA_START, mmio + ATA_DMA_CMD); /* This works around possible data corruption. On certain SATA controllers that can be seen when the r/w command is given to the controller before the host DMA is started. On a Read command, the controller would initiate the command to the drive even before it sees the DMA start. When there are very fast drives connected to the controller, or when the data request hits in the drive cache, there is the possibility that the drive returns a part or all of the requested data to the controller before the DMA start is issued. In this case, the controller would become confused as to what to do with the data. In the worst case when all the data is returned back to the controller, the controller could hang. In other cases it could return partial data returning in data corruption. This problem has been seen in PPC systems and can also appear on an system with very fast disks, where the SATA controller is sitting behind a number of bridges, and hence there is significant latency between the r/w command and the start command. */ /* issue r/w command if the access is to ATA */ if (qc->tf.protocol == ATA_PROT_DMA) ap->ops->sff_exec_command(ap, &qc->tf); } static u8 k2_stat_check_status(struct ata_port *ap) { return readl(ap->ioaddr.status_addr); } #ifdef CONFIG_PPC_OF /* * k2_sata_proc_info * inout : decides on the direction of the dataflow and the meaning of the * variables * buffer: If inout==FALSE data is being written to it else read from it * *start: If inout==FALSE start of the valid data in the buffer * offset: If inout==FALSE offset from the beginning of the imaginary file * from which we start writing into the buffer * length: If inout==FALSE max number of bytes to be written into the buffer * else number of bytes in the buffer */ static int k2_sata_proc_info(struct Scsi_Host *shost, char *page, char **start, off_t offset, int count, int inout) { struct ata_port *ap; struct device_node *np; int len, index; /* Find the ata_port */ ap = ata_shost_to_port(shost); if (ap == NULL) return 0; /* Find the OF node for the PCI device proper */ np = pci_device_to_OF_node(to_pci_dev(ap->host->dev)); if (np == NULL) return 0; /* Match it to a port node */ index = (ap == ap->host->ports[0]) ? 0 : 1; for (np = np->child; np != NULL; np = np->sibling) { const u32 *reg = of_get_property(np, "reg", NULL); if (!reg) continue; if (index == *reg) break; } if (np == NULL) return 0; len = sprintf(page, "devspec: %s\n", np->full_name); return len; } #endif /* CONFIG_PPC_OF */ static struct scsi_host_template k2_sata_sht = { ATA_BMDMA_SHT(DRV_NAME), #ifdef CONFIG_PPC_OF .proc_info = k2_sata_proc_info, #endif }; static struct ata_port_operations k2_sata_ops = { .inherits = &ata_bmdma_port_ops, .sff_tf_load = k2_sata_tf_load, .sff_tf_read = k2_sata_tf_read, .sff_check_status = k2_stat_check_status, .check_atapi_dma = k2_sata_check_atapi_dma, .bmdma_setup = k2_bmdma_setup_mmio, .bmdma_start = k2_bmdma_start_mmio, .scr_read = k2_sata_scr_read, .scr_write = k2_sata_scr_write, }; static const struct ata_port_info k2_port_info[] = { /* chip_svw4 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, /* chip_svw8 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | K2_FLAG_NO_ATAPI_DMA | K2_FLAG_SATA_8_PORTS, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, /* chip_svw42 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO | K2_FLAG_BAR_POS_3, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, /* chip_svw43 */ { .flags = ATA_FLAG_SATA | ATA_FLAG_NO_LEGACY | ATA_FLAG_MMIO, .pio_mask = ATA_PIO4, .mwdma_mask = ATA_MWDMA2, .udma_mask = ATA_UDMA6, .port_ops = &k2_sata_ops, }, }; static void k2_sata_setup_port(struct ata_ioports *port, void __iomem *base) { port->cmd_addr = base + K2_SATA_TF_CMD_OFFSET; port->data_addr = base + K2_SATA_TF_DATA_OFFSET; port->feature_addr = port->error_addr = base + K2_SATA_TF_ERROR_OFFSET; port->nsect_addr = base + K2_SATA_TF_NSECT_OFFSET; port->lbal_addr = base + K2_SATA_TF_LBAL_OFFSET; port->lbam_addr = base + K2_SATA_TF_LBAM_OFFSET; port->lbah_addr = base + K2_SATA_TF_LBAH_OFFSET; port->device_addr = base + K2_SATA_TF_DEVICE_OFFSET; port->command_addr = port->status_addr = base + K2_SATA_TF_CMDSTAT_OFFSET; port->altstatus_addr = port->ctl_addr = base + K2_SATA_TF_CTL_OFFSET; port->bmdma_addr = base + K2_SATA_DMA_CMD_OFFSET; port->scr_addr = base + K2_SATA_SCR_STATUS_OFFSET; } static int k2_sata_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { static int printed_version; const struct ata_port_info *ppi[] = { &k2_port_info[ent->driver_data], NULL }; struct ata_host *host; void __iomem *mmio_base; int n_ports, i, rc, bar_pos; if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); /* allocate host */ n_ports = 4; if (ppi[0]->flags & K2_FLAG_SATA_8_PORTS) n_ports = 8; host = ata_host_alloc_pinfo(&pdev->dev, ppi, n_ports); if (!host) return -ENOMEM; bar_pos = 5; if (ppi[0]->flags & K2_FLAG_BAR_POS_3) bar_pos = 3; /* * If this driver happens to only be useful on Apple's K2, then * we should check that here as it has a normal Serverworks ID */ rc = pcim_enable_device(pdev); if (rc) return rc; /* * Check if we have resources mapped at all (second function may * have been disabled by firmware) */ if (pci_resource_len(pdev, bar_pos) == 0) { /* In IDE mode we need to pin the device to ensure that pcim_release does not clear the busmaster bit in config space, clearing causes busmaster DMA to fail on ports 3 & 4 */ pcim_pin_device(pdev); return -ENODEV; } /* Request and iomap PCI regions */ rc = pcim_iomap_regions(pdev, 1 << bar_pos, DRV_NAME); if (rc == -EBUSY) pcim_pin_device(pdev); if (rc) return rc; host->iomap = pcim_iomap_table(pdev); mmio_base = host->iomap[bar_pos]; /* different controllers have different number of ports - currently 4 or 8 */ /* All ports are on the same function. Multi-function device is no * longer available. This should not be seen in any system. */ for (i = 0; i < host->n_ports; i++) { struct ata_port *ap = host->ports[i]; unsigned int offset = i * K2_SATA_PORT_OFFSET; k2_sata_setup_port(&ap->ioaddr, mmio_base + offset); ata_port_pbar_desc(ap, 5, -1, "mmio"); ata_port_pbar_desc(ap, 5, offset, "port"); } rc = pci_set_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; rc = pci_set_consistent_dma_mask(pdev, ATA_DMA_MASK); if (rc) return rc; /* Clear a magic bit in SCR1 according to Darwin, those help * some funky seagate drives (though so far, those were already * set by the firmware on the machines I had access to) */ writel(readl(mmio_base + K2_SATA_SICR1_OFFSET) & ~0x00040000, mmio_base + K2_SATA_SICR1_OFFSET); /* Clear SATA error & interrupts we don't use */ writel(0xffffffff, mmio_base + K2_SATA_SCR_ERROR_OFFSET); writel(0x0, mmio_base + K2_SATA_SIM_OFFSET); pci_set_master(pdev); return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt, IRQF_SHARED, &k2_sata_sht); } /* 0x240 is device ID for Apple K2 device * 0x241 is device ID for Serverworks Frodo4 * 0x242 is device ID for Serverworks Frodo8 * 0x24a is device ID for BCM5785 (aka HT1000) HT southbridge integrated SATA * controller * */ static const struct pci_device_id k2_sata_pci_tbl[] = { { PCI_VDEVICE(SERVERWORKS, 0x0240), chip_svw4 }, { PCI_VDEVICE(SERVERWORKS, 0x0241), chip_svw8 }, { PCI_VDEVICE(SERVERWORKS, 0x0242), chip_svw4 }, { PCI_VDEVICE(SERVERWORKS, 0x024a), chip_svw4 }, { PCI_VDEVICE(SERVERWORKS, 0x024b), chip_svw4 }, { PCI_VDEVICE(SERVERWORKS, 0x0410), chip_svw42 }, { PCI_VDEVICE(SERVERWORKS, 0x0411), chip_svw43 }, { } }; static struct pci_driver k2_sata_pci_driver = { .name = DRV_NAME, .id_table = k2_sata_pci_tbl, .probe = k2_sata_init_one, .remove = ata_pci_remove_one, }; static int __init k2_sata_init(void) { return pci_register_driver(&k2_sata_pci_driver); } static void __exit k2_sata_exit(void) { pci_unregister_driver(&k2_sata_pci_driver); } MODULE_AUTHOR("Benjamin Herrenschmidt"); MODULE_DESCRIPTION("low-level driver for K2 SATA controller"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, k2_sata_pci_tbl); MODULE_VERSION(DRV_VERSION); module_init(k2_sata_init); module_exit(k2_sata_exit);
gpl-2.0
nimengyu2/dm3730-android-gingerbread-2.3-dk2.1-kernel
arch/microblaze/kernel/ftrace.c
1052
6137
/* * Ftrace support for Microblaze. * * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * Based on MIPS and PowerPC ftrace code * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <asm/cacheflush.h> #include <linux/ftrace.h> #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* * Hook the return address and push it in the stack of return addrs * in current thread info. */ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr) { unsigned long old; int faulted, err; struct ftrace_graph_ent trace; unsigned long return_hooker = (unsigned long) &return_to_handler; if (unlikely(atomic_read(&current->tracing_graph_pause))) return; /* * Protect against fault, even if it shouldn't * happen. This tool is too much intrusive to * ignore such a protection. */ asm volatile(" 1: lwi %0, %2, 0; \ 2: swi %3, %2, 0; \ addik %1, r0, 0; \ 3: \ .section .fixup, \"ax\"; \ 4: brid 3b; \ addik %1, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b; \ .word 2b,4b; \ .previous;" \ : "=&r" (old), "=r" (faulted) : "r" (parent), "r" (return_hooker) ); if (unlikely(faulted)) { ftrace_graph_stop(); WARN_ON(1); return; } err = ftrace_push_return_trace(old, self_addr, &trace.depth, 0); if (err == -EBUSY) { *parent = old; return; } trace.func = self_addr; /* Only trace if the calling function expects to */ if (!ftrace_graph_entry(&trace)) { current->curr_ret_stack--; *parent = old; } } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #ifdef CONFIG_DYNAMIC_FTRACE /* save value to addr - it is save to do it in asm */ static int ftrace_modify_code(unsigned long addr, unsigned int value) { int faulted = 0; __asm__ __volatile__(" 1: swi %2, %1, 0; \ addik %0, r0, 0; \ 2: \ .section .fixup, \"ax\"; \ 3: brid 2b; \ addik %0, r0, 1; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,3b; \ .previous;" \ : "=r" (faulted) : "r" (addr), "r" (value) ); if (unlikely(faulted)) return -EFAULT; return 0; } #define MICROBLAZE_NOP 0x80000000 #define MICROBLAZE_BRI 0xb800000C static unsigned int recorded; /* if save was or not */ static unsigned int imm; /* saving whole imm instruction */ /* There are two approaches howto solve ftrace_make nop function - look below */ #undef USE_FTRACE_NOP #ifdef USE_FTRACE_NOP static unsigned int bralid; /* saving whole bralid instruction */ #endif int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { /* we have this part of code which we are working with * b000c000 imm -16384 * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * * The first solution (!USE_FTRACE_NOP-could be called branch solution) * b000c000 bri 12 (0xC - jump to any other instruction) * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount> * 80000000 or r0, r0, r0 * any other instruction * * The second solution (USE_FTRACE_NOP) - no jump just nops * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 * 80000000 or r0, r0, r0 */ int ret = 0; if (recorded == 0) { recorded = 1; imm = *(unsigned int *)rec->ip; pr_debug("%s: imm:0x%x\n", __func__, imm); #ifdef USE_FTRACE_NOP bralid = *(unsigned int *)(rec->ip + 4); pr_debug("%s: bralid 0x%x\n", __func__, bralid); #endif /* USE_FTRACE_NOP */ } #ifdef USE_FTRACE_NOP ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP); ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP); #else /* USE_FTRACE_NOP */ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI); #endif /* USE_FTRACE_NOP */ return ret; } /* I believe that first is called ftrace_make_nop before this function */ int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { int ret; pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n", __func__, (unsigned int)addr, (unsigned int)rec->ip, imm); ret = ftrace_modify_code(rec->ip, imm); #ifdef USE_FTRACE_NOP pr_debug("%s: bralid:0x%x\n", __func__, bralid); ret += ftrace_modify_code(rec->ip + 4, bralid); #endif /* USE_FTRACE_NOP */ return ret; } int __init ftrace_dyn_arch_init(void *data) { /* The return code is retured via data */ *(unsigned long *)data = 0; return 0; } int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip = (unsigned long)(&ftrace_call); unsigned int upper = (unsigned int)func; unsigned int lower = (unsigned int)func; int ret = 0; /* create proper saving to ftrace_call poll */ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n", __func__, (unsigned int)func, (unsigned int)ip, upper, lower); /* save upper and lower code */ ret = ftrace_modify_code(ip, upper); ret += ftrace_modify_code(ip + 4, lower); /* We just need to replace the rtsd r15, 8 with NOP */ ret += ftrace_modify_code((unsigned long)&ftrace_caller, MICROBLAZE_NOP); /* All changes are done - lets do caches consistent */ flush_icache(); return ret; } #ifdef CONFIG_FUNCTION_GRAPH_TRACER unsigned int old_jump; /* saving place for jump instruction */ int ftrace_enable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); old_jump = *(unsigned int *)ip; /* save jump over instruction */ ret = ftrace_modify_code(ip, MICROBLAZE_NOP); flush_icache(); pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump); return ret; } int ftrace_disable_ftrace_graph_caller(void) { unsigned int ret; unsigned long ip = (unsigned long)(&ftrace_call_graph); ret = ftrace_modify_code(ip, old_jump); flush_icache(); pr_debug("%s\n", __func__); return ret; } #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ #endif /* CONFIG_DYNAMIC_FTRACE */
gpl-2.0
YAOSP/kernel_huawei_angler
tools/perf/builtin-kvm.c
2076
24509
#include "builtin.h" #include "perf.h" #include "util/evsel.h" #include "util/util.h" #include "util/cache.h" #include "util/symbol.h" #include "util/thread.h" #include "util/header.h" #include "util/session.h" #include "util/parse-options.h" #include "util/trace-event.h" #include "util/debug.h" #include <lk/debugfs.h> #include "util/tool.h" #include "util/stat.h" #include <sys/prctl.h> #include <semaphore.h> #include <pthread.h> #include <math.h> #if defined(__i386__) || defined(__x86_64__) #include <asm/svm.h> #include <asm/vmx.h> #include <asm/kvm.h> struct event_key { #define INVALID_KEY (~0ULL) u64 key; int info; }; struct kvm_event_stats { u64 time; struct stats stats; }; struct kvm_event { struct list_head hash_entry; struct rb_node rb; struct event_key key; struct kvm_event_stats total; #define DEFAULT_VCPU_NUM 8 int max_vcpu; struct kvm_event_stats *vcpu; }; typedef int (*key_cmp_fun)(struct kvm_event*, struct kvm_event*, int); struct kvm_event_key { const char *name; key_cmp_fun key; }; struct perf_kvm_stat; struct kvm_events_ops { bool (*is_begin_event)(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key); bool (*is_end_event)(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key); void (*decode_key)(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]); const char *name; }; struct exit_reasons_table { unsigned long exit_code; const char *reason; }; #define EVENTS_BITS 12 #define EVENTS_CACHE_SIZE (1UL << EVENTS_BITS) struct perf_kvm_stat { struct perf_tool tool; struct perf_session *session; const char *file_name; const char *report_event; const char *sort_key; int trace_vcpu; struct exit_reasons_table *exit_reasons; int exit_reasons_size; const char *exit_reasons_isa; struct kvm_events_ops *events_ops; key_cmp_fun compare; struct list_head kvm_events_cache[EVENTS_CACHE_SIZE]; u64 total_time; u64 total_count; struct rb_root result; }; static void exit_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { key->info = 0; key->key = perf_evsel__intval(evsel, sample, "exit_reason"); } static bool kvm_exit_event(struct perf_evsel *evsel) { return !strcmp(evsel->name, "kvm:kvm_exit"); } static bool exit_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { if (kvm_exit_event(evsel)) { exit_event_get_key(evsel, sample, key); return true; } return false; } static bool kvm_entry_event(struct perf_evsel *evsel) { return !strcmp(evsel->name, "kvm:kvm_entry"); } static bool exit_event_end(struct perf_evsel *evsel, struct perf_sample *sample __maybe_unused, struct event_key *key __maybe_unused) { return kvm_entry_event(evsel); } static struct exit_reasons_table vmx_exit_reasons[] = { VMX_EXIT_REASONS }; static struct exit_reasons_table svm_exit_reasons[] = { SVM_EXIT_REASONS }; static const char *get_exit_reason(struct perf_kvm_stat *kvm, u64 exit_code) { int i = kvm->exit_reasons_size; struct exit_reasons_table *tbl = kvm->exit_reasons; while (i--) { if (tbl->exit_code == exit_code) return tbl->reason; tbl++; } pr_err("unknown kvm exit code:%lld on %s\n", (unsigned long long)exit_code, kvm->exit_reasons_isa); return "UNKNOWN"; } static void exit_event_decode_key(struct perf_kvm_stat *kvm, struct event_key *key, char decode[20]) { const char *exit_reason = get_exit_reason(kvm, key->key); scnprintf(decode, 20, "%s", exit_reason); } static struct kvm_events_ops exit_events = { .is_begin_event = exit_event_begin, .is_end_event = exit_event_end, .decode_key = exit_event_decode_key, .name = "VM-EXIT" }; /* * For the mmio events, we treat: * the time of MMIO write: kvm_mmio(KVM_TRACE_MMIO_WRITE...) -> kvm_entry * the time of MMIO read: kvm_exit -> kvm_mmio(KVM_TRACE_MMIO_READ...). */ static void mmio_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { key->key = perf_evsel__intval(evsel, sample, "gpa"); key->info = perf_evsel__intval(evsel, sample, "type"); } #define KVM_TRACE_MMIO_READ_UNSATISFIED 0 #define KVM_TRACE_MMIO_READ 1 #define KVM_TRACE_MMIO_WRITE 2 static bool mmio_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { /* MMIO read begin event in kernel. */ if (kvm_exit_event(evsel)) return true; /* MMIO write begin event in kernel. */ if (!strcmp(evsel->name, "kvm:kvm_mmio") && perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_WRITE) { mmio_event_get_key(evsel, sample, key); return true; } return false; } static bool mmio_event_end(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { /* MMIO write end event in kernel. */ if (kvm_entry_event(evsel)) return true; /* MMIO read end event in kernel.*/ if (!strcmp(evsel->name, "kvm:kvm_mmio") && perf_evsel__intval(evsel, sample, "type") == KVM_TRACE_MMIO_READ) { mmio_event_get_key(evsel, sample, key); return true; } return false; } static void mmio_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { scnprintf(decode, 20, "%#lx:%s", (unsigned long)key->key, key->info == KVM_TRACE_MMIO_WRITE ? "W" : "R"); } static struct kvm_events_ops mmio_events = { .is_begin_event = mmio_event_begin, .is_end_event = mmio_event_end, .decode_key = mmio_event_decode_key, .name = "MMIO Access" }; /* The time of emulation pio access is from kvm_pio to kvm_entry. */ static void ioport_event_get_key(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { key->key = perf_evsel__intval(evsel, sample, "port"); key->info = perf_evsel__intval(evsel, sample, "rw"); } static bool ioport_event_begin(struct perf_evsel *evsel, struct perf_sample *sample, struct event_key *key) { if (!strcmp(evsel->name, "kvm:kvm_pio")) { ioport_event_get_key(evsel, sample, key); return true; } return false; } static bool ioport_event_end(struct perf_evsel *evsel, struct perf_sample *sample __maybe_unused, struct event_key *key __maybe_unused) { return kvm_entry_event(evsel); } static void ioport_event_decode_key(struct perf_kvm_stat *kvm __maybe_unused, struct event_key *key, char decode[20]) { scnprintf(decode, 20, "%#llx:%s", (unsigned long long)key->key, key->info ? "POUT" : "PIN"); } static struct kvm_events_ops ioport_events = { .is_begin_event = ioport_event_begin, .is_end_event = ioport_event_end, .decode_key = ioport_event_decode_key, .name = "IO Port Access" }; static bool register_kvm_events_ops(struct perf_kvm_stat *kvm) { bool ret = true; if (!strcmp(kvm->report_event, "vmexit")) kvm->events_ops = &exit_events; else if (!strcmp(kvm->report_event, "mmio")) kvm->events_ops = &mmio_events; else if (!strcmp(kvm->report_event, "ioport")) kvm->events_ops = &ioport_events; else { pr_err("Unknown report event:%s\n", kvm->report_event); ret = false; } return ret; } struct vcpu_event_record { int vcpu_id; u64 start_time; struct kvm_event *last_event; }; static void init_kvm_event_record(struct perf_kvm_stat *kvm) { unsigned int i; for (i = 0; i < EVENTS_CACHE_SIZE; i++) INIT_LIST_HEAD(&kvm->kvm_events_cache[i]); } static int kvm_events_hash_fn(u64 key) { return key & (EVENTS_CACHE_SIZE - 1); } static bool kvm_event_expand(struct kvm_event *event, int vcpu_id) { int old_max_vcpu = event->max_vcpu; if (vcpu_id < event->max_vcpu) return true; while (event->max_vcpu <= vcpu_id) event->max_vcpu += DEFAULT_VCPU_NUM; event->vcpu = realloc(event->vcpu, event->max_vcpu * sizeof(*event->vcpu)); if (!event->vcpu) { pr_err("Not enough memory\n"); return false; } memset(event->vcpu + old_max_vcpu, 0, (event->max_vcpu - old_max_vcpu) * sizeof(*event->vcpu)); return true; } static struct kvm_event *kvm_alloc_init_event(struct event_key *key) { struct kvm_event *event; event = zalloc(sizeof(*event)); if (!event) { pr_err("Not enough memory\n"); return NULL; } event->key = *key; return event; } static struct kvm_event *find_create_kvm_event(struct perf_kvm_stat *kvm, struct event_key *key) { struct kvm_event *event; struct list_head *head; BUG_ON(key->key == INVALID_KEY); head = &kvm->kvm_events_cache[kvm_events_hash_fn(key->key)]; list_for_each_entry(event, head, hash_entry) { if (event->key.key == key->key && event->key.info == key->info) return event; } event = kvm_alloc_init_event(key); if (!event) return NULL; list_add(&event->hash_entry, head); return event; } static bool handle_begin_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, u64 timestamp) { struct kvm_event *event = NULL; if (key->key != INVALID_KEY) event = find_create_kvm_event(kvm, key); vcpu_record->last_event = event; vcpu_record->start_time = timestamp; return true; } static void kvm_update_event_stats(struct kvm_event_stats *kvm_stats, u64 time_diff) { kvm_stats->time += time_diff; update_stats(&kvm_stats->stats, time_diff); } static double kvm_event_rel_stddev(int vcpu_id, struct kvm_event *event) { struct kvm_event_stats *kvm_stats = &event->total; if (vcpu_id != -1) kvm_stats = &event->vcpu[vcpu_id]; return rel_stddev_stats(stddev_stats(&kvm_stats->stats), avg_stats(&kvm_stats->stats)); } static bool update_kvm_event(struct kvm_event *event, int vcpu_id, u64 time_diff) { if (vcpu_id == -1) { kvm_update_event_stats(&event->total, time_diff); return true; } if (!kvm_event_expand(event, vcpu_id)) return false; kvm_update_event_stats(&event->vcpu[vcpu_id], time_diff); return true; } static bool handle_end_event(struct perf_kvm_stat *kvm, struct vcpu_event_record *vcpu_record, struct event_key *key, u64 timestamp) { struct kvm_event *event; u64 time_begin, time_diff; int vcpu; if (kvm->trace_vcpu == -1) vcpu = -1; else vcpu = vcpu_record->vcpu_id; event = vcpu_record->last_event; time_begin = vcpu_record->start_time; /* The begin event is not caught. */ if (!time_begin) return true; /* * In some case, the 'begin event' only records the start timestamp, * the actual event is recognized in the 'end event' (e.g. mmio-event). */ /* Both begin and end events did not get the key. */ if (!event && key->key == INVALID_KEY) return true; if (!event) event = find_create_kvm_event(kvm, key); if (!event) return false; vcpu_record->last_event = NULL; vcpu_record->start_time = 0; BUG_ON(timestamp < time_begin); time_diff = timestamp - time_begin; return update_kvm_event(event, vcpu, time_diff); } static struct vcpu_event_record *per_vcpu_record(struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) { /* Only kvm_entry records vcpu id. */ if (!thread->priv && kvm_entry_event(evsel)) { struct vcpu_event_record *vcpu_record; vcpu_record = zalloc(sizeof(*vcpu_record)); if (!vcpu_record) { pr_err("%s: Not enough memory\n", __func__); return NULL; } vcpu_record->vcpu_id = perf_evsel__intval(evsel, sample, "vcpu_id"); thread->priv = vcpu_record; } return thread->priv; } static bool handle_kvm_event(struct perf_kvm_stat *kvm, struct thread *thread, struct perf_evsel *evsel, struct perf_sample *sample) { struct vcpu_event_record *vcpu_record; struct event_key key = {.key = INVALID_KEY}; vcpu_record = per_vcpu_record(thread, evsel, sample); if (!vcpu_record) return true; /* only process events for vcpus user cares about */ if ((kvm->trace_vcpu != -1) && (kvm->trace_vcpu != vcpu_record->vcpu_id)) return true; if (kvm->events_ops->is_begin_event(evsel, sample, &key)) return handle_begin_event(kvm, vcpu_record, &key, sample->time); if (kvm->events_ops->is_end_event(evsel, sample, &key)) return handle_end_event(kvm, vcpu_record, &key, sample->time); return true; } #define GET_EVENT_KEY(func, field) \ static u64 get_event_ ##func(struct kvm_event *event, int vcpu) \ { \ if (vcpu == -1) \ return event->total.field; \ \ if (vcpu >= event->max_vcpu) \ return 0; \ \ return event->vcpu[vcpu].field; \ } #define COMPARE_EVENT_KEY(func, field) \ GET_EVENT_KEY(func, field) \ static int compare_kvm_event_ ## func(struct kvm_event *one, \ struct kvm_event *two, int vcpu)\ { \ return get_event_ ##func(one, vcpu) > \ get_event_ ##func(two, vcpu); \ } GET_EVENT_KEY(time, time); COMPARE_EVENT_KEY(count, stats.n); COMPARE_EVENT_KEY(mean, stats.mean); #define DEF_SORT_NAME_KEY(name, compare_key) \ { #name, compare_kvm_event_ ## compare_key } static struct kvm_event_key keys[] = { DEF_SORT_NAME_KEY(sample, count), DEF_SORT_NAME_KEY(time, mean), { NULL, NULL } }; static bool select_key(struct perf_kvm_stat *kvm) { int i; for (i = 0; keys[i].name; i++) { if (!strcmp(keys[i].name, kvm->sort_key)) { kvm->compare = keys[i].key; return true; } } pr_err("Unknown compare key:%s\n", kvm->sort_key); return false; } static void insert_to_result(struct rb_root *result, struct kvm_event *event, key_cmp_fun bigger, int vcpu) { struct rb_node **rb = &result->rb_node; struct rb_node *parent = NULL; struct kvm_event *p; while (*rb) { p = container_of(*rb, struct kvm_event, rb); parent = *rb; if (bigger(event, p, vcpu)) rb = &(*rb)->rb_left; else rb = &(*rb)->rb_right; } rb_link_node(&event->rb, parent, rb); rb_insert_color(&event->rb, result); } static void update_total_count(struct perf_kvm_stat *kvm, struct kvm_event *event) { int vcpu = kvm->trace_vcpu; kvm->total_count += get_event_count(event, vcpu); kvm->total_time += get_event_time(event, vcpu); } static bool event_is_valid(struct kvm_event *event, int vcpu) { return !!get_event_count(event, vcpu); } static void sort_result(struct perf_kvm_stat *kvm) { unsigned int i; int vcpu = kvm->trace_vcpu; struct kvm_event *event; for (i = 0; i < EVENTS_CACHE_SIZE; i++) { list_for_each_entry(event, &kvm->kvm_events_cache[i], hash_entry) { if (event_is_valid(event, vcpu)) { update_total_count(kvm, event); insert_to_result(&kvm->result, event, kvm->compare, vcpu); } } } } /* returns left most element of result, and erase it */ static struct kvm_event *pop_from_result(struct rb_root *result) { struct rb_node *node = rb_first(result); if (!node) return NULL; rb_erase(node, result); return container_of(node, struct kvm_event, rb); } static void print_vcpu_info(int vcpu) { pr_info("Analyze events for "); if (vcpu == -1) pr_info("all VCPUs:\n\n"); else pr_info("VCPU %d:\n\n", vcpu); } static void print_result(struct perf_kvm_stat *kvm) { char decode[20]; struct kvm_event *event; int vcpu = kvm->trace_vcpu; pr_info("\n\n"); print_vcpu_info(vcpu); pr_info("%20s ", kvm->events_ops->name); pr_info("%10s ", "Samples"); pr_info("%9s ", "Samples%"); pr_info("%9s ", "Time%"); pr_info("%16s ", "Avg time"); pr_info("\n\n"); while ((event = pop_from_result(&kvm->result))) { u64 ecount, etime; ecount = get_event_count(event, vcpu); etime = get_event_time(event, vcpu); kvm->events_ops->decode_key(kvm, &event->key, decode); pr_info("%20s ", decode); pr_info("%10llu ", (unsigned long long)ecount); pr_info("%8.2f%% ", (double)ecount / kvm->total_count * 100); pr_info("%8.2f%% ", (double)etime / kvm->total_time * 100); pr_info("%9.2fus ( +-%7.2f%% )", (double)etime / ecount/1e3, kvm_event_rel_stddev(vcpu, event)); pr_info("\n"); } pr_info("\nTotal Samples:%" PRIu64 ", Total events handled time:%.2fus.\n\n", kvm->total_count, kvm->total_time / 1e3); } static int process_sample_event(struct perf_tool *tool, union perf_event *event, struct perf_sample *sample, struct perf_evsel *evsel, struct machine *machine) { struct thread *thread = machine__findnew_thread(machine, sample->tid); struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat, tool); if (thread == NULL) { pr_debug("problem processing %d event, skipping it.\n", event->header.type); return -1; } if (!handle_kvm_event(kvm, thread, evsel, sample)) return -1; return 0; } static int get_cpu_isa(struct perf_session *session) { char *cpuid = session->header.env.cpuid; int isa; if (strstr(cpuid, "Intel")) isa = 1; else if (strstr(cpuid, "AMD")) isa = 0; else { pr_err("CPU %s is not supported.\n", cpuid); isa = -ENOTSUP; } return isa; } static int read_events(struct perf_kvm_stat *kvm) { int ret; struct perf_tool eops = { .sample = process_sample_event, .comm = perf_event__process_comm, .ordered_samples = true, }; kvm->tool = eops; kvm->session = perf_session__new(kvm->file_name, O_RDONLY, 0, false, &kvm->tool); if (!kvm->session) { pr_err("Initializing perf session failed\n"); return -EINVAL; } if (!perf_session__has_traces(kvm->session, "kvm record")) return -EINVAL; /* * Do not use 'isa' recorded in kvm_exit tracepoint since it is not * traced in the old kernel. */ ret = get_cpu_isa(kvm->session); if (ret < 0) return ret; if (ret == 1) { kvm->exit_reasons = vmx_exit_reasons; kvm->exit_reasons_size = ARRAY_SIZE(vmx_exit_reasons); kvm->exit_reasons_isa = "VMX"; } return perf_session__process_events(kvm->session, &kvm->tool); } static bool verify_vcpu(int vcpu) { if (vcpu != -1 && vcpu < 0) { pr_err("Invalid vcpu:%d.\n", vcpu); return false; } return true; } static int kvm_events_report_vcpu(struct perf_kvm_stat *kvm) { int ret = -EINVAL; int vcpu = kvm->trace_vcpu; if (!verify_vcpu(vcpu)) goto exit; if (!select_key(kvm)) goto exit; if (!register_kvm_events_ops(kvm)) goto exit; init_kvm_event_record(kvm); setup_pager(); ret = read_events(kvm); if (ret) goto exit; sort_result(kvm); print_result(kvm); exit: return ret; } static const char * const record_args[] = { "record", "-R", "-f", "-m", "1024", "-c", "1", "-e", "kvm:kvm_entry", "-e", "kvm:kvm_exit", "-e", "kvm:kvm_mmio", "-e", "kvm:kvm_pio", }; #define STRDUP_FAIL_EXIT(s) \ ({ char *_p; \ _p = strdup(s); \ if (!_p) \ return -ENOMEM; \ _p; \ }) static int kvm_events_record(struct perf_kvm_stat *kvm, int argc, const char **argv) { unsigned int rec_argc, i, j; const char **rec_argv; rec_argc = ARRAY_SIZE(record_args) + argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); if (rec_argv == NULL) return -ENOMEM; for (i = 0; i < ARRAY_SIZE(record_args); i++) rec_argv[i] = STRDUP_FAIL_EXIT(record_args[i]); rec_argv[i++] = STRDUP_FAIL_EXIT("-o"); rec_argv[i++] = STRDUP_FAIL_EXIT(kvm->file_name); for (j = 1; j < (unsigned int)argc; j++, i++) rec_argv[i] = argv[j]; return cmd_record(i, rec_argv, NULL); } static int kvm_events_report(struct perf_kvm_stat *kvm, int argc, const char **argv) { const struct option kvm_events_report_options[] = { OPT_STRING(0, "event", &kvm->report_event, "report event", "event for reporting: vmexit, mmio, ioport"), OPT_INTEGER(0, "vcpu", &kvm->trace_vcpu, "vcpu id to report"), OPT_STRING('k', "key", &kvm->sort_key, "sort-key", "key for sorting: sample(sort by samples number)" " time (sort by avg time)"), OPT_END() }; const char * const kvm_events_report_usage[] = { "perf kvm stat report [<options>]", NULL }; symbol__init(); if (argc) { argc = parse_options(argc, argv, kvm_events_report_options, kvm_events_report_usage, 0); if (argc) usage_with_options(kvm_events_report_usage, kvm_events_report_options); } return kvm_events_report_vcpu(kvm); } static void print_kvm_stat_usage(void) { printf("Usage: perf kvm stat <command>\n\n"); printf("# Available commands:\n"); printf("\trecord: record kvm events\n"); printf("\treport: report statistical data of kvm events\n"); printf("\nOtherwise, it is the alias of 'perf stat':\n"); } static int kvm_cmd_stat(const char *file_name, int argc, const char **argv) { struct perf_kvm_stat kvm = { .file_name = file_name, .trace_vcpu = -1, .report_event = "vmexit", .sort_key = "sample", .exit_reasons = svm_exit_reasons, .exit_reasons_size = ARRAY_SIZE(svm_exit_reasons), .exit_reasons_isa = "SVM", }; if (argc == 1) { print_kvm_stat_usage(); goto perf_stat; } if (!strncmp(argv[1], "rec", 3)) return kvm_events_record(&kvm, argc - 1, argv + 1); if (!strncmp(argv[1], "rep", 3)) return kvm_events_report(&kvm, argc - 1 , argv + 1); perf_stat: return cmd_stat(argc, argv, NULL); } #endif static int __cmd_record(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("record"); rec_argv[i++] = strdup("-o"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_record(i, rec_argv, NULL); } static int __cmd_report(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("report"); rec_argv[i++] = strdup("-i"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_report(i, rec_argv, NULL); } static int __cmd_buildid_list(const char *file_name, int argc, const char **argv) { int rec_argc, i = 0, j; const char **rec_argv; rec_argc = argc + 2; rec_argv = calloc(rec_argc + 1, sizeof(char *)); rec_argv[i++] = strdup("buildid-list"); rec_argv[i++] = strdup("-i"); rec_argv[i++] = strdup(file_name); for (j = 1; j < argc; j++, i++) rec_argv[i] = argv[j]; BUG_ON(i != rec_argc); return cmd_buildid_list(i, rec_argv, NULL); } int cmd_kvm(int argc, const char **argv, const char *prefix __maybe_unused) { const char *file_name = NULL; const struct option kvm_options[] = { OPT_STRING('i', "input", &file_name, "file", "Input file name"), OPT_STRING('o', "output", &file_name, "file", "Output file name"), OPT_BOOLEAN(0, "guest", &perf_guest, "Collect guest os data"), OPT_BOOLEAN(0, "host", &perf_host, "Collect host os data"), OPT_STRING(0, "guestmount", &symbol_conf.guestmount, "directory", "guest mount directory under which every guest os" " instance has a subdir"), OPT_STRING(0, "guestvmlinux", &symbol_conf.default_guest_vmlinux_name, "file", "file saving guest os vmlinux"), OPT_STRING(0, "guestkallsyms", &symbol_conf.default_guest_kallsyms, "file", "file saving guest os /proc/kallsyms"), OPT_STRING(0, "guestmodules", &symbol_conf.default_guest_modules, "file", "file saving guest os /proc/modules"), OPT_END() }; const char * const kvm_usage[] = { "perf kvm [<options>] {top|record|report|diff|buildid-list|stat}", NULL }; perf_host = 0; perf_guest = 1; argc = parse_options(argc, argv, kvm_options, kvm_usage, PARSE_OPT_STOP_AT_NON_OPTION); if (!argc) usage_with_options(kvm_usage, kvm_options); if (!perf_host) perf_guest = 1; if (!file_name) { if (perf_host && !perf_guest) file_name = strdup("perf.data.host"); else if (!perf_host && perf_guest) file_name = strdup("perf.data.guest"); else file_name = strdup("perf.data.kvm"); if (!file_name) { pr_err("Failed to allocate memory for filename\n"); return -ENOMEM; } } if (!strncmp(argv[0], "rec", 3)) return __cmd_record(file_name, argc, argv); else if (!strncmp(argv[0], "rep", 3)) return __cmd_report(file_name, argc, argv); else if (!strncmp(argv[0], "diff", 4)) return cmd_diff(argc, argv, NULL); else if (!strncmp(argv[0], "top", 3)) return cmd_top(argc, argv, NULL); else if (!strncmp(argv[0], "buildid-list", 12)) return __cmd_buildid_list(file_name, argc, argv); #if defined(__i386__) || defined(__x86_64__) else if (!strncmp(argv[0], "stat", 4)) return kvm_cmd_stat(file_name, argc, argv); #endif else usage_with_options(kvm_usage, kvm_options); return 0; }
gpl-2.0
chaubeyprateek/Hyper_Kernel_Redmi2
drivers/dma/timb_dma.c
2076
21145
/* * timb_dma.c timberdale FPGA DMA driver * Copyright (c) 2010 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Supports: * Timberdale FPGA DMA engine */ #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/timb_dma.h> #include "dmaengine.h" #define DRIVER_NAME "timb-dma" /* Global DMA registers */ #define TIMBDMA_ACR 0x34 #define TIMBDMA_32BIT_ADDR 0x01 #define TIMBDMA_ISR 0x080000 #define TIMBDMA_IPR 0x080004 #define TIMBDMA_IER 0x080008 /* Channel specific registers */ /* RX instances base addresses are 0x00, 0x40, 0x80 ... * TX instances base addresses are 0x18, 0x58, 0x98 ... */ #define TIMBDMA_INSTANCE_OFFSET 0x40 #define TIMBDMA_INSTANCE_TX_OFFSET 0x18 /* RX registers, relative the instance base */ #define TIMBDMA_OFFS_RX_DHAR 0x00 #define TIMBDMA_OFFS_RX_DLAR 0x04 #define TIMBDMA_OFFS_RX_LR 0x0C #define TIMBDMA_OFFS_RX_BLR 0x10 #define TIMBDMA_OFFS_RX_ER 0x14 #define TIMBDMA_RX_EN 0x01 /* bytes per Row, video specific register * which is placed after the TX registers... */ #define TIMBDMA_OFFS_RX_BPRR 0x30 /* TX registers, relative the instance base */ #define TIMBDMA_OFFS_TX_DHAR 0x00 #define TIMBDMA_OFFS_TX_DLAR 0x04 #define TIMBDMA_OFFS_TX_BLR 0x0C #define TIMBDMA_OFFS_TX_LR 0x14 #define TIMB_DMA_DESC_SIZE 8 struct timb_dma_desc { struct list_head desc_node; struct dma_async_tx_descriptor txd; u8 *desc_list; unsigned int desc_list_len; bool interrupt; }; struct timb_dma_chan { struct dma_chan chan; void __iomem *membase; spinlock_t lock; /* Used to protect data structures, especially the lists and descriptors, from races between the tasklet and calls from above */ bool ongoing; struct list_head active_list; struct list_head queue; struct list_head free_list; unsigned int bytes_per_line; enum dma_transfer_direction direction; unsigned int descs; /* Descriptors to allocate */ unsigned int desc_elems; /* number of elems per descriptor */ }; struct timb_dma { struct dma_device dma; void __iomem *membase; struct tasklet_struct tasklet; struct timb_dma_chan channels[0]; }; static struct device *chan2dev(struct dma_chan *chan) { return &chan->dev->device; } static struct device *chan2dmadev(struct dma_chan *chan) { return chan2dev(chan)->parent->parent; } static struct timb_dma *tdchantotd(struct timb_dma_chan *td_chan) { int id = td_chan->chan.chan_id; return (struct timb_dma *)((u8 *)td_chan - id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); } /* Must be called with the spinlock held */ static void __td_enable_chan_irq(struct timb_dma_chan *td_chan) { int id = td_chan->chan.chan_id; struct timb_dma *td = tdchantotd(td_chan); u32 ier; /* enable interrupt for this channel */ ier = ioread32(td->membase + TIMBDMA_IER); ier |= 1 << id; dev_dbg(chan2dev(&td_chan->chan), "Enabling irq: %d, IER: 0x%x\n", id, ier); iowrite32(ier, td->membase + TIMBDMA_IER); } /* Should be called with the spinlock held */ static bool __td_dma_done_ack(struct timb_dma_chan *td_chan) { int id = td_chan->chan.chan_id; struct timb_dma *td = (struct timb_dma *)((u8 *)td_chan - id * sizeof(struct timb_dma_chan) - sizeof(struct timb_dma)); u32 isr; bool done = false; dev_dbg(chan2dev(&td_chan->chan), "Checking irq: %d, td: %p\n", id, td); isr = ioread32(td->membase + TIMBDMA_ISR) & (1 << id); if (isr) { iowrite32(isr, td->membase + TIMBDMA_ISR); done = true; } return done; } static void __td_unmap_desc(struct timb_dma_chan *td_chan, const u8 *dma_desc, bool single) { dma_addr_t addr; int len; addr = (dma_desc[7] << 24) | (dma_desc[6] << 16) | (dma_desc[5] << 8) | dma_desc[4]; len = (dma_desc[3] << 8) | dma_desc[2]; if (single) dma_unmap_single(chan2dev(&td_chan->chan), addr, len, DMA_TO_DEVICE); else dma_unmap_page(chan2dev(&td_chan->chan), addr, len, DMA_TO_DEVICE); } static void __td_unmap_descs(struct timb_dma_desc *td_desc, bool single) { struct timb_dma_chan *td_chan = container_of(td_desc->txd.chan, struct timb_dma_chan, chan); u8 *descs; for (descs = td_desc->desc_list; ; descs += TIMB_DMA_DESC_SIZE) { __td_unmap_desc(td_chan, descs, single); if (descs[0] & 0x02) break; } } static int td_fill_desc(struct timb_dma_chan *td_chan, u8 *dma_desc, struct scatterlist *sg, bool last) { if (sg_dma_len(sg) > USHRT_MAX) { dev_err(chan2dev(&td_chan->chan), "Too big sg element\n"); return -EINVAL; } /* length must be word aligned */ if (sg_dma_len(sg) % sizeof(u32)) { dev_err(chan2dev(&td_chan->chan), "Incorrect length: %d\n", sg_dma_len(sg)); return -EINVAL; } dev_dbg(chan2dev(&td_chan->chan), "desc: %p, addr: 0x%llx\n", dma_desc, (unsigned long long)sg_dma_address(sg)); dma_desc[7] = (sg_dma_address(sg) >> 24) & 0xff; dma_desc[6] = (sg_dma_address(sg) >> 16) & 0xff; dma_desc[5] = (sg_dma_address(sg) >> 8) & 0xff; dma_desc[4] = (sg_dma_address(sg) >> 0) & 0xff; dma_desc[3] = (sg_dma_len(sg) >> 8) & 0xff; dma_desc[2] = (sg_dma_len(sg) >> 0) & 0xff; dma_desc[1] = 0x00; dma_desc[0] = 0x21 | (last ? 0x02 : 0); /* tran, valid */ return 0; } /* Must be called with the spinlock held */ static void __td_start_dma(struct timb_dma_chan *td_chan) { struct timb_dma_desc *td_desc; if (td_chan->ongoing) { dev_err(chan2dev(&td_chan->chan), "Transfer already ongoing\n"); return; } td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, desc_node); dev_dbg(chan2dev(&td_chan->chan), "td_chan: %p, chan: %d, membase: %p\n", td_chan, td_chan->chan.chan_id, td_chan->membase); if (td_chan->direction == DMA_DEV_TO_MEM) { /* descriptor address */ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_DHAR); iowrite32(td_desc->txd.phys, td_chan->membase + TIMBDMA_OFFS_RX_DLAR); /* Bytes per line */ iowrite32(td_chan->bytes_per_line, td_chan->membase + TIMBDMA_OFFS_RX_BPRR); /* enable RX */ iowrite32(TIMBDMA_RX_EN, td_chan->membase + TIMBDMA_OFFS_RX_ER); } else { /* address high */ iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DHAR); iowrite32(td_desc->txd.phys, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); } td_chan->ongoing = true; if (td_desc->interrupt) __td_enable_chan_irq(td_chan); } static void __td_finish(struct timb_dma_chan *td_chan) { dma_async_tx_callback callback; void *param; struct dma_async_tx_descriptor *txd; struct timb_dma_desc *td_desc; /* can happen if the descriptor is canceled */ if (list_empty(&td_chan->active_list)) return; td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, desc_node); txd = &td_desc->txd; dev_dbg(chan2dev(&td_chan->chan), "descriptor %u complete\n", txd->cookie); /* make sure to stop the transfer */ if (td_chan->direction == DMA_DEV_TO_MEM) iowrite32(0, td_chan->membase + TIMBDMA_OFFS_RX_ER); /* Currently no support for stopping DMA transfers else iowrite32(0, td_chan->membase + TIMBDMA_OFFS_TX_DLAR); */ dma_cookie_complete(txd); td_chan->ongoing = false; callback = txd->callback; param = txd->callback_param; list_move(&td_desc->desc_node, &td_chan->free_list); if (!(txd->flags & DMA_COMPL_SKIP_SRC_UNMAP)) __td_unmap_descs(td_desc, txd->flags & DMA_COMPL_SRC_UNMAP_SINGLE); /* * The API requires that no submissions are done from a * callback, so we don't need to drop the lock here */ if (callback) callback(param); } static u32 __td_ier_mask(struct timb_dma *td) { int i; u32 ret = 0; for (i = 0; i < td->dma.chancnt; i++) { struct timb_dma_chan *td_chan = td->channels + i; if (td_chan->ongoing) { struct timb_dma_desc *td_desc = list_entry(td_chan->active_list.next, struct timb_dma_desc, desc_node); if (td_desc->interrupt) ret |= 1 << i; } } return ret; } static void __td_start_next(struct timb_dma_chan *td_chan) { struct timb_dma_desc *td_desc; BUG_ON(list_empty(&td_chan->queue)); BUG_ON(td_chan->ongoing); td_desc = list_entry(td_chan->queue.next, struct timb_dma_desc, desc_node); dev_dbg(chan2dev(&td_chan->chan), "%s: started %u\n", __func__, td_desc->txd.cookie); list_move(&td_desc->desc_node, &td_chan->active_list); __td_start_dma(td_chan); } static dma_cookie_t td_tx_submit(struct dma_async_tx_descriptor *txd) { struct timb_dma_desc *td_desc = container_of(txd, struct timb_dma_desc, txd); struct timb_dma_chan *td_chan = container_of(txd->chan, struct timb_dma_chan, chan); dma_cookie_t cookie; spin_lock_bh(&td_chan->lock); cookie = dma_cookie_assign(txd); if (list_empty(&td_chan->active_list)) { dev_dbg(chan2dev(txd->chan), "%s: started %u\n", __func__, txd->cookie); list_add_tail(&td_desc->desc_node, &td_chan->active_list); __td_start_dma(td_chan); } else { dev_dbg(chan2dev(txd->chan), "tx_submit: queued %u\n", txd->cookie); list_add_tail(&td_desc->desc_node, &td_chan->queue); } spin_unlock_bh(&td_chan->lock); return cookie; } static struct timb_dma_desc *td_alloc_init_desc(struct timb_dma_chan *td_chan) { struct dma_chan *chan = &td_chan->chan; struct timb_dma_desc *td_desc; int err; td_desc = kzalloc(sizeof(struct timb_dma_desc), GFP_KERNEL); if (!td_desc) { dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); goto out; } td_desc->desc_list_len = td_chan->desc_elems * TIMB_DMA_DESC_SIZE; td_desc->desc_list = kzalloc(td_desc->desc_list_len, GFP_KERNEL); if (!td_desc->desc_list) { dev_err(chan2dev(chan), "Failed to alloc descriptor\n"); goto err; } dma_async_tx_descriptor_init(&td_desc->txd, chan); td_desc->txd.tx_submit = td_tx_submit; td_desc->txd.flags = DMA_CTRL_ACK; td_desc->txd.phys = dma_map_single(chan2dmadev(chan), td_desc->desc_list, td_desc->desc_list_len, DMA_TO_DEVICE); err = dma_mapping_error(chan2dmadev(chan), td_desc->txd.phys); if (err) { dev_err(chan2dev(chan), "DMA mapping error: %d\n", err); goto err; } return td_desc; err: kfree(td_desc->desc_list); kfree(td_desc); out: return NULL; } static void td_free_desc(struct timb_dma_desc *td_desc) { dev_dbg(chan2dev(td_desc->txd.chan), "Freeing desc: %p\n", td_desc); dma_unmap_single(chan2dmadev(td_desc->txd.chan), td_desc->txd.phys, td_desc->desc_list_len, DMA_TO_DEVICE); kfree(td_desc->desc_list); kfree(td_desc); } static void td_desc_put(struct timb_dma_chan *td_chan, struct timb_dma_desc *td_desc) { dev_dbg(chan2dev(&td_chan->chan), "Putting desc: %p\n", td_desc); spin_lock_bh(&td_chan->lock); list_add(&td_desc->desc_node, &td_chan->free_list); spin_unlock_bh(&td_chan->lock); } static struct timb_dma_desc *td_desc_get(struct timb_dma_chan *td_chan) { struct timb_dma_desc *td_desc, *_td_desc; struct timb_dma_desc *ret = NULL; spin_lock_bh(&td_chan->lock); list_for_each_entry_safe(td_desc, _td_desc, &td_chan->free_list, desc_node) { if (async_tx_test_ack(&td_desc->txd)) { list_del(&td_desc->desc_node); ret = td_desc; break; } dev_dbg(chan2dev(&td_chan->chan), "desc %p not ACKed\n", td_desc); } spin_unlock_bh(&td_chan->lock); return ret; } static int td_alloc_chan_resources(struct dma_chan *chan) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); int i; dev_dbg(chan2dev(chan), "%s: entry\n", __func__); BUG_ON(!list_empty(&td_chan->free_list)); for (i = 0; i < td_chan->descs; i++) { struct timb_dma_desc *td_desc = td_alloc_init_desc(td_chan); if (!td_desc) { if (i) break; else { dev_err(chan2dev(chan), "Couldnt allocate any descriptors\n"); return -ENOMEM; } } td_desc_put(td_chan, td_desc); } spin_lock_bh(&td_chan->lock); dma_cookie_init(chan); spin_unlock_bh(&td_chan->lock); return 0; } static void td_free_chan_resources(struct dma_chan *chan) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); struct timb_dma_desc *td_desc, *_td_desc; LIST_HEAD(list); dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); /* check that all descriptors are free */ BUG_ON(!list_empty(&td_chan->active_list)); BUG_ON(!list_empty(&td_chan->queue)); spin_lock_bh(&td_chan->lock); list_splice_init(&td_chan->free_list, &list); spin_unlock_bh(&td_chan->lock); list_for_each_entry_safe(td_desc, _td_desc, &list, desc_node) { dev_dbg(chan2dev(chan), "%s: Freeing desc: %p\n", __func__, td_desc); td_free_desc(td_desc); } } static enum dma_status td_tx_status(struct dma_chan *chan, dma_cookie_t cookie, struct dma_tx_state *txstate) { enum dma_status ret; dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); ret = dma_cookie_status(chan, cookie, txstate); dev_dbg(chan2dev(chan), "%s: exit, ret: %d\n", __func__, ret); return ret; } static void td_issue_pending(struct dma_chan *chan) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); spin_lock_bh(&td_chan->lock); if (!list_empty(&td_chan->active_list)) /* transfer ongoing */ if (__td_dma_done_ack(td_chan)) __td_finish(td_chan); if (list_empty(&td_chan->active_list) && !list_empty(&td_chan->queue)) __td_start_next(td_chan); spin_unlock_bh(&td_chan->lock); } static struct dma_async_tx_descriptor *td_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, unsigned int sg_len, enum dma_transfer_direction direction, unsigned long flags, void *context) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); struct timb_dma_desc *td_desc; struct scatterlist *sg; unsigned int i; unsigned int desc_usage = 0; if (!sgl || !sg_len) { dev_err(chan2dev(chan), "%s: No SG list\n", __func__); return NULL; } /* even channels are for RX, odd for TX */ if (td_chan->direction != direction) { dev_err(chan2dev(chan), "Requesting channel in wrong direction\n"); return NULL; } td_desc = td_desc_get(td_chan); if (!td_desc) { dev_err(chan2dev(chan), "Not enough descriptors available\n"); return NULL; } td_desc->interrupt = (flags & DMA_PREP_INTERRUPT) != 0; for_each_sg(sgl, sg, sg_len, i) { int err; if (desc_usage > td_desc->desc_list_len) { dev_err(chan2dev(chan), "No descriptor space\n"); return NULL; } err = td_fill_desc(td_chan, td_desc->desc_list + desc_usage, sg, i == (sg_len - 1)); if (err) { dev_err(chan2dev(chan), "Failed to update desc: %d\n", err); td_desc_put(td_chan, td_desc); return NULL; } desc_usage += TIMB_DMA_DESC_SIZE; } dma_sync_single_for_device(chan2dmadev(chan), td_desc->txd.phys, td_desc->desc_list_len, DMA_MEM_TO_DEV); return &td_desc->txd; } static int td_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd, unsigned long arg) { struct timb_dma_chan *td_chan = container_of(chan, struct timb_dma_chan, chan); struct timb_dma_desc *td_desc, *_td_desc; dev_dbg(chan2dev(chan), "%s: Entry\n", __func__); if (cmd != DMA_TERMINATE_ALL) return -ENXIO; /* first the easy part, put the queue into the free list */ spin_lock_bh(&td_chan->lock); list_for_each_entry_safe(td_desc, _td_desc, &td_chan->queue, desc_node) list_move(&td_desc->desc_node, &td_chan->free_list); /* now tear down the running */ __td_finish(td_chan); spin_unlock_bh(&td_chan->lock); return 0; } static void td_tasklet(unsigned long data) { struct timb_dma *td = (struct timb_dma *)data; u32 isr; u32 ipr; u32 ier; int i; isr = ioread32(td->membase + TIMBDMA_ISR); ipr = isr & __td_ier_mask(td); /* ack the interrupts */ iowrite32(ipr, td->membase + TIMBDMA_ISR); for (i = 0; i < td->dma.chancnt; i++) if (ipr & (1 << i)) { struct timb_dma_chan *td_chan = td->channels + i; spin_lock(&td_chan->lock); __td_finish(td_chan); if (!list_empty(&td_chan->queue)) __td_start_next(td_chan); spin_unlock(&td_chan->lock); } ier = __td_ier_mask(td); iowrite32(ier, td->membase + TIMBDMA_IER); } static irqreturn_t td_irq(int irq, void *devid) { struct timb_dma *td = devid; u32 ipr = ioread32(td->membase + TIMBDMA_IPR); if (ipr) { /* disable interrupts, will be re-enabled in tasklet */ iowrite32(0, td->membase + TIMBDMA_IER); tasklet_schedule(&td->tasklet); return IRQ_HANDLED; } else return IRQ_NONE; } static int td_probe(struct platform_device *pdev) { struct timb_dma_platform_data *pdata = pdev->dev.platform_data; struct timb_dma *td; struct resource *iomem; int irq; int err; int i; if (!pdata) { dev_err(&pdev->dev, "No platform data\n"); return -EINVAL; } iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!iomem) return -EINVAL; irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; if (!request_mem_region(iomem->start, resource_size(iomem), DRIVER_NAME)) return -EBUSY; td = kzalloc(sizeof(struct timb_dma) + sizeof(struct timb_dma_chan) * pdata->nr_channels, GFP_KERNEL); if (!td) { err = -ENOMEM; goto err_release_region; } dev_dbg(&pdev->dev, "Allocated TD: %p\n", td); td->membase = ioremap(iomem->start, resource_size(iomem)); if (!td->membase) { dev_err(&pdev->dev, "Failed to remap I/O memory\n"); err = -ENOMEM; goto err_free_mem; } /* 32bit addressing */ iowrite32(TIMBDMA_32BIT_ADDR, td->membase + TIMBDMA_ACR); /* disable and clear any interrupts */ iowrite32(0x0, td->membase + TIMBDMA_IER); iowrite32(0xFFFFFFFF, td->membase + TIMBDMA_ISR); tasklet_init(&td->tasklet, td_tasklet, (unsigned long)td); err = request_irq(irq, td_irq, IRQF_SHARED, DRIVER_NAME, td); if (err) { dev_err(&pdev->dev, "Failed to request IRQ\n"); goto err_tasklet_kill; } td->dma.device_alloc_chan_resources = td_alloc_chan_resources; td->dma.device_free_chan_resources = td_free_chan_resources; td->dma.device_tx_status = td_tx_status; td->dma.device_issue_pending = td_issue_pending; dma_cap_set(DMA_SLAVE, td->dma.cap_mask); dma_cap_set(DMA_PRIVATE, td->dma.cap_mask); td->dma.device_prep_slave_sg = td_prep_slave_sg; td->dma.device_control = td_control; td->dma.dev = &pdev->dev; INIT_LIST_HEAD(&td->dma.channels); for (i = 0; i < pdata->nr_channels; i++) { struct timb_dma_chan *td_chan = &td->channels[i]; struct timb_dma_platform_data_channel *pchan = pdata->channels + i; /* even channels are RX, odd are TX */ if ((i % 2) == pchan->rx) { dev_err(&pdev->dev, "Wrong channel configuration\n"); err = -EINVAL; goto err_free_irq; } td_chan->chan.device = &td->dma; dma_cookie_init(&td_chan->chan); spin_lock_init(&td_chan->lock); INIT_LIST_HEAD(&td_chan->active_list); INIT_LIST_HEAD(&td_chan->queue); INIT_LIST_HEAD(&td_chan->free_list); td_chan->descs = pchan->descriptors; td_chan->desc_elems = pchan->descriptor_elements; td_chan->bytes_per_line = pchan->bytes_per_line; td_chan->direction = pchan->rx ? DMA_DEV_TO_MEM : DMA_MEM_TO_DEV; td_chan->membase = td->membase + (i / 2) * TIMBDMA_INSTANCE_OFFSET + (pchan->rx ? 0 : TIMBDMA_INSTANCE_TX_OFFSET); dev_dbg(&pdev->dev, "Chan: %d, membase: %p\n", i, td_chan->membase); list_add_tail(&td_chan->chan.device_node, &td->dma.channels); } err = dma_async_device_register(&td->dma); if (err) { dev_err(&pdev->dev, "Failed to register async device\n"); goto err_free_irq; } platform_set_drvdata(pdev, td); dev_dbg(&pdev->dev, "Probe result: %d\n", err); return err; err_free_irq: free_irq(irq, td); err_tasklet_kill: tasklet_kill(&td->tasklet); iounmap(td->membase); err_free_mem: kfree(td); err_release_region: release_mem_region(iomem->start, resource_size(iomem)); return err; } static int td_remove(struct platform_device *pdev) { struct timb_dma *td = platform_get_drvdata(pdev); struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); int irq = platform_get_irq(pdev, 0); dma_async_device_unregister(&td->dma); free_irq(irq, td); tasklet_kill(&td->tasklet); iounmap(td->membase); kfree(td); release_mem_region(iomem->start, resource_size(iomem)); platform_set_drvdata(pdev, NULL); dev_dbg(&pdev->dev, "Removed...\n"); return 0; } static struct platform_driver td_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, .probe = td_probe, .remove = td_remove, }; module_platform_driver(td_driver); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("Timberdale DMA controller driver"); MODULE_AUTHOR("Pelagicore AB <info@pelagicore.com>"); MODULE_ALIAS("platform:"DRIVER_NAME);
gpl-2.0
kannu1994/sgs2_kernel
drivers/gpu/drm/nouveau/nv50_grctx.c
2332
127204
/* * Copyright 2009 Marcin Kościelnicki * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. */ #define CP_FLAG_CLEAR 0 #define CP_FLAG_SET 1 #define CP_FLAG_SWAP_DIRECTION ((0 * 32) + 0) #define CP_FLAG_SWAP_DIRECTION_LOAD 0 #define CP_FLAG_SWAP_DIRECTION_SAVE 1 #define CP_FLAG_UNK01 ((0 * 32) + 1) #define CP_FLAG_UNK01_CLEAR 0 #define CP_FLAG_UNK01_SET 1 #define CP_FLAG_UNK03 ((0 * 32) + 3) #define CP_FLAG_UNK03_CLEAR 0 #define CP_FLAG_UNK03_SET 1 #define CP_FLAG_USER_SAVE ((0 * 32) + 5) #define CP_FLAG_USER_SAVE_NOT_PENDING 0 #define CP_FLAG_USER_SAVE_PENDING 1 #define CP_FLAG_USER_LOAD ((0 * 32) + 6) #define CP_FLAG_USER_LOAD_NOT_PENDING 0 #define CP_FLAG_USER_LOAD_PENDING 1 #define CP_FLAG_UNK0B ((0 * 32) + 0xb) #define CP_FLAG_UNK0B_CLEAR 0 #define CP_FLAG_UNK0B_SET 1 #define CP_FLAG_UNK1D ((0 * 32) + 0x1d) #define CP_FLAG_UNK1D_CLEAR 0 #define CP_FLAG_UNK1D_SET 1 #define CP_FLAG_UNK20 ((1 * 32) + 0) #define CP_FLAG_UNK20_CLEAR 0 #define CP_FLAG_UNK20_SET 1 #define CP_FLAG_STATUS ((2 * 32) + 0) #define CP_FLAG_STATUS_BUSY 0 #define CP_FLAG_STATUS_IDLE 1 #define CP_FLAG_AUTO_SAVE ((2 * 32) + 4) #define CP_FLAG_AUTO_SAVE_NOT_PENDING 0 #define CP_FLAG_AUTO_SAVE_PENDING 1 #define CP_FLAG_AUTO_LOAD ((2 * 32) + 5) #define CP_FLAG_AUTO_LOAD_NOT_PENDING 0 #define CP_FLAG_AUTO_LOAD_PENDING 1 #define CP_FLAG_NEWCTX ((2 * 32) + 10) #define CP_FLAG_NEWCTX_BUSY 0 #define CP_FLAG_NEWCTX_DONE 1 #define CP_FLAG_XFER ((2 * 32) + 11) #define CP_FLAG_XFER_IDLE 0 #define CP_FLAG_XFER_BUSY 1 #define CP_FLAG_ALWAYS ((2 * 32) + 13) #define CP_FLAG_ALWAYS_FALSE 0 #define CP_FLAG_ALWAYS_TRUE 1 #define CP_FLAG_INTR ((2 * 32) + 15) #define CP_FLAG_INTR_NOT_PENDING 0 #define CP_FLAG_INTR_PENDING 1 #define CP_CTX 0x00100000 #define CP_CTX_COUNT 0x000f0000 #define CP_CTX_COUNT_SHIFT 16 #define CP_CTX_REG 0x00003fff #define CP_LOAD_SR 0x00200000 #define CP_LOAD_SR_VALUE 0x000fffff #define CP_BRA 0x00400000 #define CP_BRA_IP 0x0001ff00 #define CP_BRA_IP_SHIFT 8 #define CP_BRA_IF_CLEAR 0x00000080 #define CP_BRA_FLAG 0x0000007f #define CP_WAIT 0x00500000 #define CP_WAIT_SET 0x00000080 #define CP_WAIT_FLAG 0x0000007f #define CP_SET 0x00700000 #define CP_SET_1 0x00000080 #define CP_SET_FLAG 0x0000007f #define CP_NEWCTX 0x00600004 #define CP_NEXT_TO_SWAP 0x00600005 #define CP_SET_CONTEXT_POINTER 0x00600006 #define CP_SET_XFER_POINTER 0x00600007 #define CP_ENABLE 0x00600009 #define CP_END 0x0060000c #define CP_NEXT_TO_CURRENT 0x0060000d #define CP_DISABLE1 0x0090ffff #define CP_DISABLE2 0x0091ffff #define CP_XFER_1 0x008000ff #define CP_XFER_2 0x008800ff #define CP_SEEK_1 0x00c000ff #define CP_SEEK_2 0x00c800ff #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_grctx.h" #define IS_NVA3F(x) (((x) > 0xa0 && (x) < 0xaa) || (x) == 0xaf) #define IS_NVAAF(x) ((x) >= 0xaa && (x) <= 0xac) /* * This code deals with PGRAPH contexts on NV50 family cards. Like NV40, it's * the GPU itself that does context-switching, but it needs a special * microcode to do it. And it's the driver's task to supply this microcode, * further known as ctxprog, as well as the initial context values, known * as ctxvals. * * Without ctxprog, you cannot switch contexts. Not even in software, since * the majority of context [xfer strands] isn't accessible directly. You're * stuck with a single channel, and you also suffer all the problems resulting * from missing ctxvals, since you cannot load them. * * Without ctxvals, you're stuck with PGRAPH's default context. It's enough to * run 2d operations, but trying to utilise 3d or CUDA will just lock you up, * since you don't have... some sort of needed setup. * * Nouveau will just disable acceleration if not given ctxprog + ctxvals, since * it's too much hassle to handle no-ctxprog as a special case. */ /* * How ctxprogs work. * * The ctxprog is written in its own kind of microcode, with very small and * crappy set of available commands. You upload it to a small [512 insns] * area of memory on PGRAPH, and it'll be run when PFIFO wants PGRAPH to * switch channel. or when the driver explicitely requests it. Stuff visible * to ctxprog consists of: PGRAPH MMIO registers, PGRAPH context strands, * the per-channel context save area in VRAM [known as ctxvals or grctx], * 4 flags registers, a scratch register, two grctx pointers, plus many * random poorly-understood details. * * When ctxprog runs, it's supposed to check what operations are asked of it, * save old context if requested, optionally reset PGRAPH and switch to the * new channel, and load the new context. Context consists of three major * parts: subset of MMIO registers and two "xfer areas". */ /* TODO: * - document unimplemented bits compared to nvidia * - NVAx: make a TP subroutine, use it. * - use 0x4008fc instead of 0x1540? */ enum cp_label { cp_check_load = 1, cp_setup_auto_load, cp_setup_load, cp_setup_save, cp_swap_state, cp_prepare_exit, cp_exit, }; static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx); /* Main function: construct the ctxprog skeleton, call the other functions. */ int nv50_grctx_init(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; switch (dev_priv->chipset) { case 0x50: case 0x84: case 0x86: case 0x92: case 0x94: case 0x96: case 0x98: case 0xa0: case 0xa3: case 0xa5: case 0xa8: case 0xaa: case 0xac: case 0xaf: break; default: NV_ERROR(ctx->dev, "I don't know how to make a ctxprog for " "your NV%x card.\n", dev_priv->chipset); NV_ERROR(ctx->dev, "Disabling acceleration. Please contact " "the devs.\n"); return -ENOSYS; } /* decide whether we're loading/unloading the context */ cp_bra (ctx, AUTO_SAVE, PENDING, cp_setup_save); cp_bra (ctx, USER_SAVE, PENDING, cp_setup_save); cp_name(ctx, cp_check_load); cp_bra (ctx, AUTO_LOAD, PENDING, cp_setup_auto_load); cp_bra (ctx, USER_LOAD, PENDING, cp_setup_load); cp_bra (ctx, ALWAYS, TRUE, cp_exit); /* setup for context load */ cp_name(ctx, cp_setup_auto_load); cp_out (ctx, CP_DISABLE1); cp_out (ctx, CP_DISABLE2); cp_out (ctx, CP_ENABLE); cp_out (ctx, CP_NEXT_TO_SWAP); cp_set (ctx, UNK01, SET); cp_name(ctx, cp_setup_load); cp_out (ctx, CP_NEWCTX); cp_wait(ctx, NEWCTX, BUSY); cp_set (ctx, UNK1D, CLEAR); cp_set (ctx, SWAP_DIRECTION, LOAD); cp_bra (ctx, UNK0B, SET, cp_prepare_exit); cp_bra (ctx, ALWAYS, TRUE, cp_swap_state); /* setup for context save */ cp_name(ctx, cp_setup_save); cp_set (ctx, UNK1D, SET); cp_wait(ctx, STATUS, BUSY); cp_wait(ctx, INTR, PENDING); cp_bra (ctx, STATUS, BUSY, cp_setup_save); cp_set (ctx, UNK01, SET); cp_set (ctx, SWAP_DIRECTION, SAVE); /* general PGRAPH state */ cp_name(ctx, cp_swap_state); cp_set (ctx, UNK03, SET); cp_pos (ctx, 0x00004/4); cp_ctx (ctx, 0x400828, 1); /* needed. otherwise, flickering happens. */ cp_pos (ctx, 0x00100/4); nv50_graph_construct_mmio(ctx); nv50_graph_construct_xfer1(ctx); nv50_graph_construct_xfer2(ctx); cp_bra (ctx, SWAP_DIRECTION, SAVE, cp_check_load); cp_set (ctx, UNK20, SET); cp_set (ctx, SWAP_DIRECTION, SAVE); /* no idea why this is needed, but fixes at least one lockup. */ cp_lsr (ctx, ctx->ctxvals_base); cp_out (ctx, CP_SET_XFER_POINTER); cp_lsr (ctx, 4); cp_out (ctx, CP_SEEK_1); cp_out (ctx, CP_XFER_1); cp_wait(ctx, XFER, BUSY); /* pre-exit state updates */ cp_name(ctx, cp_prepare_exit); cp_set (ctx, UNK01, CLEAR); cp_set (ctx, UNK03, CLEAR); cp_set (ctx, UNK1D, CLEAR); cp_bra (ctx, USER_SAVE, PENDING, cp_exit); cp_out (ctx, CP_NEXT_TO_CURRENT); cp_name(ctx, cp_exit); cp_set (ctx, USER_SAVE, NOT_PENDING); cp_set (ctx, USER_LOAD, NOT_PENDING); cp_out (ctx, CP_END); ctx->ctxvals_pos += 0x400; /* padding... no idea why you need it */ return 0; } /* * Constructs MMIO part of ctxprog and ctxvals. Just a matter of knowing which * registers to save/restore and the default values for them. */ static void nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx); static void nv50_graph_construct_mmio(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i, j; int offset, base; uint32_t units = nv_rd32 (ctx->dev, 0x1540); /* 0800: DISPATCH */ cp_ctx(ctx, 0x400808, 7); gr_def(ctx, 0x400814, 0x00000030); cp_ctx(ctx, 0x400834, 0x32); if (dev_priv->chipset == 0x50) { gr_def(ctx, 0x400834, 0xff400040); gr_def(ctx, 0x400838, 0xfff00080); gr_def(ctx, 0x40083c, 0xfff70090); gr_def(ctx, 0x400840, 0xffe806a8); } gr_def(ctx, 0x400844, 0x00000002); if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x400894, 0x00001000); gr_def(ctx, 0x4008e8, 0x00000003); gr_def(ctx, 0x4008ec, 0x00001000); if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x400908, 0xb); else if (dev_priv->chipset < 0xa0) cp_ctx(ctx, 0x400908, 0xc); else cp_ctx(ctx, 0x400908, 0xe); if (dev_priv->chipset >= 0xa0) cp_ctx(ctx, 0x400b00, 0x1); if (IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x400b10, 0x1); gr_def(ctx, 0x400b10, 0x0001629d); cp_ctx(ctx, 0x400b20, 0x1); gr_def(ctx, 0x400b20, 0x0001629d); } nv50_graph_construct_mmio_ddata(ctx); /* 0C00: VFETCH */ cp_ctx(ctx, 0x400c08, 0x2); gr_def(ctx, 0x400c08, 0x0000fe0c); /* 1000 */ if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x401008, 0x4); gr_def(ctx, 0x401014, 0x00001000); } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, 0x401008, 0x5); gr_def(ctx, 0x401018, 0x00001000); } else { cp_ctx(ctx, 0x401008, 0x5); gr_def(ctx, 0x401018, 0x00004000); } /* 1400 */ cp_ctx(ctx, 0x401400, 0x8); cp_ctx(ctx, 0x401424, 0x3); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x40142c, 0x0001fd87); else gr_def(ctx, 0x40142c, 0x00000187); cp_ctx(ctx, 0x401540, 0x5); gr_def(ctx, 0x401550, 0x00001018); /* 1800: STREAMOUT */ cp_ctx(ctx, 0x401814, 0x1); gr_def(ctx, 0x401814, 0x000000ff); if (dev_priv->chipset == 0x50) { cp_ctx(ctx, 0x40181c, 0xe); gr_def(ctx, 0x401850, 0x00000004); } else if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x40181c, 0xf); gr_def(ctx, 0x401854, 0x00000004); } else { cp_ctx(ctx, 0x40181c, 0x13); gr_def(ctx, 0x401864, 0x00000004); } /* 1C00 */ cp_ctx(ctx, 0x401c00, 0x1); switch (dev_priv->chipset) { case 0x50: gr_def(ctx, 0x401c00, 0x0001005f); break; case 0x84: case 0x86: case 0x94: gr_def(ctx, 0x401c00, 0x044d00df); break; case 0x92: case 0x96: case 0x98: case 0xa0: case 0xaa: case 0xac: gr_def(ctx, 0x401c00, 0x042500df); break; case 0xa3: case 0xa5: case 0xa8: case 0xaf: gr_def(ctx, 0x401c00, 0x142500df); break; } /* 2000 */ /* 2400 */ cp_ctx(ctx, 0x402400, 0x1); if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x402408, 0x1); else cp_ctx(ctx, 0x402408, 0x2); gr_def(ctx, 0x402408, 0x00000600); /* 2800: CSCHED */ cp_ctx(ctx, 0x402800, 0x1); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x402800, 0x00000006); /* 2C00: ZCULL */ cp_ctx(ctx, 0x402c08, 0x6); if (dev_priv->chipset != 0x50) gr_def(ctx, 0x402c14, 0x01000000); gr_def(ctx, 0x402c18, 0x000000ff); if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x402ca0, 0x1); else cp_ctx(ctx, 0x402ca0, 0x2); if (dev_priv->chipset < 0xa0) gr_def(ctx, 0x402ca0, 0x00000400); else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x402ca0, 0x00000800); else gr_def(ctx, 0x402ca0, 0x00000400); cp_ctx(ctx, 0x402cac, 0x4); /* 3000: ENG2D */ cp_ctx(ctx, 0x403004, 0x1); gr_def(ctx, 0x403004, 0x00000001); /* 3400 */ if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, 0x403404, 0x1); gr_def(ctx, 0x403404, 0x00000001); } /* 5000: CCACHE */ cp_ctx(ctx, 0x405000, 0x1); switch (dev_priv->chipset) { case 0x50: gr_def(ctx, 0x405000, 0x00300080); break; case 0x84: case 0xa0: case 0xa3: case 0xa5: case 0xa8: case 0xaa: case 0xac: case 0xaf: gr_def(ctx, 0x405000, 0x000e0080); break; case 0x86: case 0x92: case 0x94: case 0x96: case 0x98: gr_def(ctx, 0x405000, 0x00000080); break; } cp_ctx(ctx, 0x405014, 0x1); gr_def(ctx, 0x405014, 0x00000004); cp_ctx(ctx, 0x40501c, 0x1); cp_ctx(ctx, 0x405024, 0x1); cp_ctx(ctx, 0x40502c, 0x1); /* 6000? */ if (dev_priv->chipset == 0x50) cp_ctx(ctx, 0x4063e0, 0x1); /* 6800: M2MF */ if (dev_priv->chipset < 0x90) { cp_ctx(ctx, 0x406814, 0x2b); gr_def(ctx, 0x406818, 0x00000f80); gr_def(ctx, 0x406860, 0x007f0080); gr_def(ctx, 0x40689c, 0x007f0080); } else { cp_ctx(ctx, 0x406814, 0x4); if (dev_priv->chipset == 0x98) gr_def(ctx, 0x406818, 0x00000f80); else gr_def(ctx, 0x406818, 0x00001f80); if (IS_NVA3F(dev_priv->chipset)) gr_def(ctx, 0x40681c, 0x00000030); cp_ctx(ctx, 0x406830, 0x3); } /* 7000: per-ROP group state */ for (i = 0; i < 8; i++) { if (units & (1<<(i+16))) { cp_ctx(ctx, 0x407000 + (i<<8), 3); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x407000 + (i<<8), 0x1b74f820); else if (dev_priv->chipset != 0xa5) gr_def(ctx, 0x407000 + (i<<8), 0x3b74f821); else gr_def(ctx, 0x407000 + (i<<8), 0x7b74f821); gr_def(ctx, 0x407004 + (i<<8), 0x89058001); if (dev_priv->chipset == 0x50) { cp_ctx(ctx, 0x407010 + (i<<8), 1); } else if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x407010 + (i<<8), 2); gr_def(ctx, 0x407010 + (i<<8), 0x00001000); gr_def(ctx, 0x407014 + (i<<8), 0x0000001f); } else { cp_ctx(ctx, 0x407010 + (i<<8), 3); gr_def(ctx, 0x407010 + (i<<8), 0x00001000); if (dev_priv->chipset != 0xa5) gr_def(ctx, 0x407014 + (i<<8), 0x000000ff); else gr_def(ctx, 0x407014 + (i<<8), 0x000001ff); } cp_ctx(ctx, 0x407080 + (i<<8), 4); if (dev_priv->chipset != 0xa5) gr_def(ctx, 0x407080 + (i<<8), 0x027c10fa); else gr_def(ctx, 0x407080 + (i<<8), 0x827c10fa); if (dev_priv->chipset == 0x50) gr_def(ctx, 0x407084 + (i<<8), 0x000000c0); else gr_def(ctx, 0x407084 + (i<<8), 0x400000c0); gr_def(ctx, 0x407088 + (i<<8), 0xb7892080); if (dev_priv->chipset < 0xa0) cp_ctx(ctx, 0x407094 + (i<<8), 1); else if (!IS_NVA3F(dev_priv->chipset)) cp_ctx(ctx, 0x407094 + (i<<8), 3); else { cp_ctx(ctx, 0x407094 + (i<<8), 4); gr_def(ctx, 0x4070a0 + (i<<8), 1); } } } cp_ctx(ctx, 0x407c00, 0x3); if (dev_priv->chipset < 0x90) gr_def(ctx, 0x407c00, 0x00010040); else if (dev_priv->chipset < 0xa0) gr_def(ctx, 0x407c00, 0x00390040); else gr_def(ctx, 0x407c00, 0x003d0040); gr_def(ctx, 0x407c08, 0x00000022); if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, 0x407c10, 0x3); cp_ctx(ctx, 0x407c20, 0x1); cp_ctx(ctx, 0x407c2c, 0x1); } if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, 0x407d00, 0x9); } else { cp_ctx(ctx, 0x407d00, 0x15); } if (dev_priv->chipset == 0x98) gr_def(ctx, 0x407d08, 0x00380040); else { if (dev_priv->chipset < 0x90) gr_def(ctx, 0x407d08, 0x00010040); else if (dev_priv->chipset < 0xa0) gr_def(ctx, 0x407d08, 0x00390040); else gr_def(ctx, 0x407d08, 0x003d0040); gr_def(ctx, 0x407d0c, 0x00000022); } /* 8000+: per-TP state */ for (i = 0; i < 10; i++) { if (units & (1<<i)) { if (dev_priv->chipset < 0xa0) base = 0x408000 + (i<<12); else base = 0x408000 + (i<<11); if (dev_priv->chipset < 0xa0) offset = base + 0xc00; else offset = base + 0x80; cp_ctx(ctx, offset + 0x00, 1); gr_def(ctx, offset + 0x00, 0x0000ff0a); cp_ctx(ctx, offset + 0x08, 1); /* per-MP state */ for (j = 0; j < (dev_priv->chipset < 0xa0 ? 2 : 4); j++) { if (!(units & (1 << (j+24)))) continue; if (dev_priv->chipset < 0xa0) offset = base + 0x200 + (j<<7); else offset = base + 0x100 + (j<<7); cp_ctx(ctx, offset, 0x20); gr_def(ctx, offset + 0x00, 0x01800000); gr_def(ctx, offset + 0x04, 0x00160000); gr_def(ctx, offset + 0x08, 0x01800000); gr_def(ctx, offset + 0x18, 0x0003ffff); switch (dev_priv->chipset) { case 0x50: gr_def(ctx, offset + 0x1c, 0x00080000); break; case 0x84: gr_def(ctx, offset + 0x1c, 0x00880000); break; case 0x86: gr_def(ctx, offset + 0x1c, 0x008c0000); break; case 0x92: case 0x96: case 0x98: gr_def(ctx, offset + 0x1c, 0x118c0000); break; case 0x94: gr_def(ctx, offset + 0x1c, 0x10880000); break; case 0xa0: case 0xa5: gr_def(ctx, offset + 0x1c, 0x310c0000); break; case 0xa3: case 0xa8: case 0xaa: case 0xac: case 0xaf: gr_def(ctx, offset + 0x1c, 0x300c0000); break; } gr_def(ctx, offset + 0x40, 0x00010401); if (dev_priv->chipset == 0x50) gr_def(ctx, offset + 0x48, 0x00000040); else gr_def(ctx, offset + 0x48, 0x00000078); gr_def(ctx, offset + 0x50, 0x000000bf); gr_def(ctx, offset + 0x58, 0x00001210); if (dev_priv->chipset == 0x50) gr_def(ctx, offset + 0x5c, 0x00000080); else gr_def(ctx, offset + 0x5c, 0x08000080); if (dev_priv->chipset >= 0xa0) gr_def(ctx, offset + 0x68, 0x0000003e); } if (dev_priv->chipset < 0xa0) cp_ctx(ctx, base + 0x300, 0x4); else cp_ctx(ctx, base + 0x300, 0x5); if (dev_priv->chipset == 0x50) gr_def(ctx, base + 0x304, 0x00007070); else if (dev_priv->chipset < 0xa0) gr_def(ctx, base + 0x304, 0x00027070); else if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x304, 0x01127070); else gr_def(ctx, base + 0x304, 0x05127070); if (dev_priv->chipset < 0xa0) cp_ctx(ctx, base + 0x318, 1); else cp_ctx(ctx, base + 0x320, 1); if (dev_priv->chipset == 0x50) gr_def(ctx, base + 0x318, 0x0003ffff); else if (dev_priv->chipset < 0xa0) gr_def(ctx, base + 0x318, 0x03ffffff); else gr_def(ctx, base + 0x320, 0x07ffffff); if (dev_priv->chipset < 0xa0) cp_ctx(ctx, base + 0x324, 5); else cp_ctx(ctx, base + 0x328, 4); if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, base + 0x340, 9); offset = base + 0x340; } else if (!IS_NVA3F(dev_priv->chipset)) { cp_ctx(ctx, base + 0x33c, 0xb); offset = base + 0x344; } else { cp_ctx(ctx, base + 0x33c, 0xd); offset = base + 0x344; } gr_def(ctx, offset + 0x0, 0x00120407); gr_def(ctx, offset + 0x4, 0x05091507); if (dev_priv->chipset == 0x84) gr_def(ctx, offset + 0x8, 0x05100202); else gr_def(ctx, offset + 0x8, 0x05010202); gr_def(ctx, offset + 0xc, 0x00030201); if (dev_priv->chipset == 0xa3) cp_ctx(ctx, base + 0x36c, 1); cp_ctx(ctx, base + 0x400, 2); gr_def(ctx, base + 0x404, 0x00000040); cp_ctx(ctx, base + 0x40c, 2); gr_def(ctx, base + 0x40c, 0x0d0c0b0a); gr_def(ctx, base + 0x410, 0x00141210); if (dev_priv->chipset < 0xa0) offset = base + 0x800; else offset = base + 0x500; cp_ctx(ctx, offset, 6); gr_def(ctx, offset + 0x0, 0x000001f0); gr_def(ctx, offset + 0x4, 0x00000001); gr_def(ctx, offset + 0x8, 0x00000003); if (dev_priv->chipset == 0x50 || IS_NVAAF(dev_priv->chipset)) gr_def(ctx, offset + 0xc, 0x00008000); gr_def(ctx, offset + 0x14, 0x00039e00); cp_ctx(ctx, offset + 0x1c, 2); if (dev_priv->chipset == 0x50) gr_def(ctx, offset + 0x1c, 0x00000040); else gr_def(ctx, offset + 0x1c, 0x00000100); gr_def(ctx, offset + 0x20, 0x00003800); if (dev_priv->chipset >= 0xa0) { cp_ctx(ctx, base + 0x54c, 2); if (!IS_NVA3F(dev_priv->chipset)) gr_def(ctx, base + 0x54c, 0x003fe006); else gr_def(ctx, base + 0x54c, 0x003fe007); gr_def(ctx, base + 0x550, 0x003fe000); } if (dev_priv->chipset < 0xa0) offset = base + 0xa00; else offset = base + 0x680; cp_ctx(ctx, offset, 1); gr_def(ctx, offset, 0x00404040); if (dev_priv->chipset < 0xa0) offset = base + 0xe00; else offset = base + 0x700; cp_ctx(ctx, offset, 2); if (dev_priv->chipset < 0xa0) gr_def(ctx, offset, 0x0077f005); else if (dev_priv->chipset == 0xa5) gr_def(ctx, offset, 0x6cf7f007); else if (dev_priv->chipset == 0xa8) gr_def(ctx, offset, 0x6cfff007); else if (dev_priv->chipset == 0xac) gr_def(ctx, offset, 0x0cfff007); else gr_def(ctx, offset, 0x0cf7f007); if (dev_priv->chipset == 0x50) gr_def(ctx, offset + 0x4, 0x00007fff); else if (dev_priv->chipset < 0xa0) gr_def(ctx, offset + 0x4, 0x003f7fff); else gr_def(ctx, offset + 0x4, 0x02bf7fff); cp_ctx(ctx, offset + 0x2c, 1); if (dev_priv->chipset == 0x50) { cp_ctx(ctx, offset + 0x50, 9); gr_def(ctx, offset + 0x54, 0x000003ff); gr_def(ctx, offset + 0x58, 0x00000003); gr_def(ctx, offset + 0x5c, 0x00000003); gr_def(ctx, offset + 0x60, 0x000001ff); gr_def(ctx, offset + 0x64, 0x0000001f); gr_def(ctx, offset + 0x68, 0x0000000f); gr_def(ctx, offset + 0x6c, 0x0000000f); } else if (dev_priv->chipset < 0xa0) { cp_ctx(ctx, offset + 0x50, 1); cp_ctx(ctx, offset + 0x70, 1); } else { cp_ctx(ctx, offset + 0x50, 1); cp_ctx(ctx, offset + 0x60, 5); } } } } static void dd_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { int i; if (val && ctx->mode == NOUVEAU_GRCTX_VALS) for (i = 0; i < num; i++) nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + i), val); ctx->ctxvals_pos += num; } static void nv50_graph_construct_mmio_ddata(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int base, num; base = ctx->ctxvals_pos; /* tesla state */ dd_emit(ctx, 1, 0); /* 00000001 UNK0F90 */ dd_emit(ctx, 1, 0); /* 00000001 UNK135C */ /* SRC_TIC state */ dd_emit(ctx, 1, 0); /* 00000007 SRC_TILE_MODE_Z */ dd_emit(ctx, 1, 2); /* 00000007 SRC_TILE_MODE_Y */ dd_emit(ctx, 1, 1); /* 00000001 SRC_LINEAR #1 */ dd_emit(ctx, 1, 0); /* 000000ff SRC_ADDRESS_HIGH */ dd_emit(ctx, 1, 0); /* 00000001 SRC_SRGB */ if (dev_priv->chipset >= 0x94) dd_emit(ctx, 1, 0); /* 00000003 eng2d UNK0258 */ dd_emit(ctx, 1, 1); /* 00000fff SRC_DEPTH */ dd_emit(ctx, 1, 0x100); /* 0000ffff SRC_HEIGHT */ /* turing state */ dd_emit(ctx, 1, 0); /* 0000000f TEXTURES_LOG2 */ dd_emit(ctx, 1, 0); /* 0000000f SAMPLERS_LOG2 */ dd_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ dd_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ dd_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ dd_emit(ctx, 1, 1); /* 0000ffff BLOCK_ALLOC_THREADS */ dd_emit(ctx, 1, 1); /* 00000001 LANES32 */ dd_emit(ctx, 1, 0); /* 000000ff UNK370 */ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_UNK */ dd_emit(ctx, 1, 0); /* 000000ff USER_PARAM_COUNT */ dd_emit(ctx, 1, 1); /* 000000ff UNK384 bits 8-15 */ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_X */ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_XMY */ dd_emit(ctx, 1, 0); /* 00000001 BLOCKDIM_XMY_OVERFLOW */ dd_emit(ctx, 1, 1); /* 0003ffff BLOCKDIM_XMYMZ */ dd_emit(ctx, 1, 1); /* 000007ff BLOCKDIM_Y */ dd_emit(ctx, 1, 1); /* 0000007f BLOCKDIM_Z */ dd_emit(ctx, 1, 4); /* 000000ff CP_REG_ALLOC_TEMP */ dd_emit(ctx, 1, 1); /* 00000001 BLOCKDIM_DIRTY */ if (IS_NVA3F(dev_priv->chipset)) dd_emit(ctx, 1, 0); /* 00000003 UNK03E8 */ dd_emit(ctx, 1, 1); /* 0000007f BLOCK_ALLOC_HALFWARPS */ dd_emit(ctx, 1, 1); /* 00000007 LOCAL_WARPS_NO_CLAMP */ dd_emit(ctx, 1, 7); /* 00000007 LOCAL_WARPS_LOG_ALLOC */ dd_emit(ctx, 1, 1); /* 00000007 STACK_WARPS_NO_CLAMP */ dd_emit(ctx, 1, 7); /* 00000007 STACK_WARPS_LOG_ALLOC */ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_PACKED */ dd_emit(ctx, 1, 1); /* 00001fff BLOCK_ALLOC_REGSLOTS_STRIDED */ dd_emit(ctx, 1, 1); /* 000007ff BLOCK_ALLOC_THREADS */ /* compat 2d state */ if (dev_priv->chipset == 0x50) { dd_emit(ctx, 4, 0); /* 0000ffff clip X, Y, W, H */ dd_emit(ctx, 1, 1); /* ffffffff chroma COLOR_FORMAT */ dd_emit(ctx, 1, 1); /* ffffffff pattern COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff pattern SHAPE */ dd_emit(ctx, 1, 1); /* ffffffff pattern PATTERN_SELECT */ dd_emit(ctx, 1, 0xa); /* ffffffff surf2d SRC_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff surf2d DMA_SRC */ dd_emit(ctx, 1, 0); /* 000000ff surf2d SRC_ADDRESS_HIGH */ dd_emit(ctx, 1, 0); /* ffffffff surf2d SRC_ADDRESS_LOW */ dd_emit(ctx, 1, 0x40); /* 0000ffff surf2d SRC_PITCH */ dd_emit(ctx, 1, 0); /* 0000000f surf2d SRC_TILE_MODE_Z */ dd_emit(ctx, 1, 2); /* 0000000f surf2d SRC_TILE_MODE_Y */ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_HEIGHT */ dd_emit(ctx, 1, 1); /* 00000001 surf2d SRC_LINEAR */ dd_emit(ctx, 1, 0x100); /* ffffffff surf2d SRC_WIDTH */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_X */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_B_Y */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_X */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_C_Y */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_X */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect CLIP_D_Y */ dd_emit(ctx, 1, 1); /* ffffffff gdirect COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff gdirect OPERATION */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_X */ dd_emit(ctx, 1, 0); /* 0000ffff gdirect POINT_Y */ dd_emit(ctx, 1, 0); /* 0000ffff blit SRC_Y */ dd_emit(ctx, 1, 0); /* ffffffff blit OPERATION */ dd_emit(ctx, 1, 0); /* ffffffff ifc OPERATION */ dd_emit(ctx, 1, 0); /* ffffffff iifc INDEX_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff iifc LUT_OFFSET */ dd_emit(ctx, 1, 4); /* ffffffff iifc COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff iifc OPERATION */ } /* m2mf state */ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_COUNT */ dd_emit(ctx, 1, 0); /* ffffffff m2mf LINE_LENGTH_IN */ dd_emit(ctx, 2, 0); /* ffffffff m2mf OFFSET_IN, OFFSET_OUT */ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_OUT */ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_OUT */ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_OUT_Z */ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_OUT */ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_OUT_X, Y */ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_OUT */ dd_emit(ctx, 1, 1); /* ffffffff m2mf TILING_DEPTH_IN */ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_HEIGHT_IN */ dd_emit(ctx, 1, 0); /* ffffffff m2mf TILING_POSITION_IN_Z */ dd_emit(ctx, 1, 1); /* 00000001 m2mf LINEAR_IN */ dd_emit(ctx, 2, 0); /* 0000ffff m2mf TILING_POSITION_IN_X, Y */ dd_emit(ctx, 1, 0x100); /* ffffffff m2mf TILING_PITCH_IN */ /* more compat 2d state */ if (dev_priv->chipset == 0x50) { dd_emit(ctx, 1, 1); /* ffffffff line COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff line OPERATION */ dd_emit(ctx, 1, 1); /* ffffffff triangle COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff triangle OPERATION */ dd_emit(ctx, 1, 0); /* 0000000f sifm TILE_MODE_Z */ dd_emit(ctx, 1, 2); /* 0000000f sifm TILE_MODE_Y */ dd_emit(ctx, 1, 0); /* 000000ff sifm FORMAT_FILTER */ dd_emit(ctx, 1, 1); /* 000000ff sifm FORMAT_ORIGIN */ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_PITCH */ dd_emit(ctx, 1, 1); /* 00000001 sifm SRC_LINEAR */ dd_emit(ctx, 1, 0); /* 000000ff sifm SRC_OFFSET_HIGH */ dd_emit(ctx, 1, 0); /* ffffffff sifm SRC_OFFSET */ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_HEIGHT */ dd_emit(ctx, 1, 0); /* 0000ffff sifm SRC_WIDTH */ dd_emit(ctx, 1, 3); /* ffffffff sifm COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff sifm OPERATION */ dd_emit(ctx, 1, 0); /* ffffffff sifc OPERATION */ } /* tesla state */ dd_emit(ctx, 1, 0); /* 0000000f GP_TEXTURES_LOG2 */ dd_emit(ctx, 1, 0); /* 0000000f GP_SAMPLERS_LOG2 */ dd_emit(ctx, 1, 0); /* 000000ff */ dd_emit(ctx, 1, 0); /* ffffffff */ dd_emit(ctx, 1, 4); /* 000000ff UNK12B0_0 */ dd_emit(ctx, 1, 0x70); /* 000000ff UNK12B0_1 */ dd_emit(ctx, 1, 0x80); /* 000000ff UNK12B0_3 */ dd_emit(ctx, 1, 0); /* 000000ff UNK12B0_2 */ dd_emit(ctx, 1, 0); /* 0000000f FP_TEXTURES_LOG2 */ dd_emit(ctx, 1, 0); /* 0000000f FP_SAMPLERS_LOG2 */ if (IS_NVA3F(dev_priv->chipset)) { dd_emit(ctx, 1, 0); /* ffffffff */ dd_emit(ctx, 1, 0); /* 0000007f MULTISAMPLE_SAMPLES_LOG2 */ } else { dd_emit(ctx, 1, 0); /* 0000000f MULTISAMPLE_SAMPLES_LOG2 */ } dd_emit(ctx, 1, 0xc); /* 000000ff SEMANTIC_COLOR.BFC0_ID */ if (dev_priv->chipset != 0x50) dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_COLOR.CLMP_EN */ dd_emit(ctx, 1, 8); /* 000000ff SEMANTIC_COLOR.COLR_NR */ dd_emit(ctx, 1, 0x14); /* 000000ff SEMANTIC_COLOR.FFC0_ID */ if (dev_priv->chipset == 0x50) { dd_emit(ctx, 1, 0); /* 000000ff SEMANTIC_LAYER */ dd_emit(ctx, 1, 0); /* 00000001 */ } else { dd_emit(ctx, 1, 0); /* 00000001 SEMANTIC_PTSZ.ENABLE */ dd_emit(ctx, 1, 0x29); /* 000000ff SEMANTIC_PTSZ.PTSZ_ID */ dd_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM */ dd_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ dd_emit(ctx, 1, 8); /* 0000000f SMENATIC_CLIP.CLIP_HIGH */ dd_emit(ctx, 1, 4); /* 000000ff SEMANTIC_CLIP.CLIP_LO */ dd_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ dd_emit(ctx, 1, 0); /* 00000001 UNK1900 */ } dd_emit(ctx, 1, 0); /* 00000007 RT_CONTROL_MAP0 */ dd_emit(ctx, 1, 1); /* 00000007 RT_CONTROL_MAP1 */ dd_emit(ctx, 1, 2); /* 00000007 RT_CONTROL_MAP2 */ dd_emit(ctx, 1, 3); /* 00000007 RT_CONTROL_MAP3 */ dd_emit(ctx, 1, 4); /* 00000007 RT_CONTROL_MAP4 */ dd_emit(ctx, 1, 5); /* 00000007 RT_CONTROL_MAP5 */ dd_emit(ctx, 1, 6); /* 00000007 RT_CONTROL_MAP6 */ dd_emit(ctx, 1, 7); /* 00000007 RT_CONTROL_MAP7 */ dd_emit(ctx, 1, 1); /* 0000000f RT_CONTROL_COUNT */ dd_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_UNK */ dd_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ dd_emit(ctx, 1, 0xcf); /* 000000ff RT_FORMAT */ dd_emit(ctx, 7, 0); /* 000000ff RT_FORMAT */ if (dev_priv->chipset != 0x50) dd_emit(ctx, 3, 0); /* 1, 1, 1 */ else dd_emit(ctx, 2, 0); /* 1, 1 */ dd_emit(ctx, 1, 0); /* ffffffff GP_ENABLE */ dd_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT*/ dd_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ dd_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ if (IS_NVA3F(dev_priv->chipset)) { dd_emit(ctx, 1, 3); /* 00000003 */ dd_emit(ctx, 1, 0); /* 00000001 UNK1418. Alone. */ } if (dev_priv->chipset != 0x50) dd_emit(ctx, 1, 3); /* 00000003 UNK15AC */ dd_emit(ctx, 1, 1); /* ffffffff RASTERIZE_ENABLE */ dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.EXPORTS_Z */ if (dev_priv->chipset != 0x50) dd_emit(ctx, 1, 0); /* 00000001 FP_CONTROL.MULTIPLE_RESULTS */ dd_emit(ctx, 1, 0x12); /* 000000ff FP_INTERPOLANT_CTRL.COUNT */ dd_emit(ctx, 1, 0x10); /* 000000ff FP_INTERPOLANT_CTRL.COUNT_NONFLAT */ dd_emit(ctx, 1, 0xc); /* 000000ff FP_INTERPOLANT_CTRL.OFFSET */ dd_emit(ctx, 1, 1); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.W */ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.X */ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Y */ dd_emit(ctx, 1, 0); /* 00000001 FP_INTERPOLANT_CTRL.UMASK.Z */ dd_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ dd_emit(ctx, 1, 2); /* ffffffff REG_MODE */ dd_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ if (dev_priv->chipset >= 0xa0) dd_emit(ctx, 1, 0); /* ffffffff */ dd_emit(ctx, 1, 0); /* 00000001 GP_BUILTIN_RESULT_EN.LAYER_IDX */ dd_emit(ctx, 1, 0); /* ffffffff STRMOUT_ENABLE */ dd_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ dd_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ dd_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE*/ if (dev_priv->chipset != 0x50) dd_emit(ctx, 8, 0); /* 00000001 */ if (dev_priv->chipset >= 0xa0) { dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.COMP */ dd_emit(ctx, 1, 1); /* 00000007 VTX_ATTR_DEFINE.SIZE */ dd_emit(ctx, 1, 2); /* 00000007 VTX_ATTR_DEFINE.TYPE */ dd_emit(ctx, 1, 0); /* 000000ff VTX_ATTR_DEFINE.ATTR */ } dd_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ dd_emit(ctx, 1, 0x14); /* 0000001f ZETA_FORMAT */ dd_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ dd_emit(ctx, 1, 0); /* 0000000f VP_TEXTURES_LOG2 */ dd_emit(ctx, 1, 0); /* 0000000f VP_SAMPLERS_LOG2 */ if (IS_NVA3F(dev_priv->chipset)) dd_emit(ctx, 1, 0); /* 00000001 */ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_BACK */ if (dev_priv->chipset >= 0xa0) dd_emit(ctx, 1, 0); /* 00000003 VTX_ATTR_DEFINE.SIZE - 1 */ dd_emit(ctx, 1, 0); /* 0000ffff CB_ADDR_INDEX */ if (dev_priv->chipset >= 0xa0) dd_emit(ctx, 1, 0); /* 00000003 */ dd_emit(ctx, 1, 0); /* 00000001 CULL_FACE_ENABLE */ dd_emit(ctx, 1, 1); /* 00000003 CULL_FACE */ dd_emit(ctx, 1, 0); /* 00000001 FRONT_FACE */ dd_emit(ctx, 1, 2); /* 00000003 POLYGON_MODE_FRONT */ dd_emit(ctx, 1, 0x1000); /* 00007fff UNK141C */ if (dev_priv->chipset != 0x50) { dd_emit(ctx, 1, 0xe00); /* 7fff */ dd_emit(ctx, 1, 0x1000); /* 7fff */ dd_emit(ctx, 1, 0x1e00); /* 7fff */ } dd_emit(ctx, 1, 0); /* 00000001 BEGIN_END_ACTIVE */ dd_emit(ctx, 1, 1); /* 00000001 POLYGON_MODE_??? */ dd_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP / 4 rounded up */ dd_emit(ctx, 1, 1); /* 000000ff FP_REG_ALLOC_TEMP... without /4? */ dd_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP / 4 rounded up */ dd_emit(ctx, 1, 1); /* 00000001 */ dd_emit(ctx, 1, 0); /* 00000001 */ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK0 nonempty */ dd_emit(ctx, 1, 0); /* 00000001 VTX_ATTR_MASK_UNK1 nonempty */ dd_emit(ctx, 1, 0x200); /* 0003ffff GP_VERTEX_OUTPUT_COUNT*GP_REG_ALLOC_RESULT */ if (IS_NVA3F(dev_priv->chipset)) dd_emit(ctx, 1, 0x200); dd_emit(ctx, 1, 0); /* 00000001 */ if (dev_priv->chipset < 0xa0) { dd_emit(ctx, 1, 1); /* 00000001 */ dd_emit(ctx, 1, 0x70); /* 000000ff */ dd_emit(ctx, 1, 0x80); /* 000000ff */ dd_emit(ctx, 1, 0); /* 000000ff */ dd_emit(ctx, 1, 0); /* 00000001 */ dd_emit(ctx, 1, 1); /* 00000001 */ dd_emit(ctx, 1, 0x70); /* 000000ff */ dd_emit(ctx, 1, 0x80); /* 000000ff */ dd_emit(ctx, 1, 0); /* 000000ff */ } else { dd_emit(ctx, 1, 1); /* 00000001 */ dd_emit(ctx, 1, 0xf0); /* 000000ff */ dd_emit(ctx, 1, 0xff); /* 000000ff */ dd_emit(ctx, 1, 0); /* 000000ff */ dd_emit(ctx, 1, 0); /* 00000001 */ dd_emit(ctx, 1, 1); /* 00000001 */ dd_emit(ctx, 1, 0xf0); /* 000000ff */ dd_emit(ctx, 1, 0xff); /* 000000ff */ dd_emit(ctx, 1, 0); /* 000000ff */ dd_emit(ctx, 1, 9); /* 0000003f UNK114C.COMP,SIZE */ } /* eng2d state */ dd_emit(ctx, 1, 0); /* 00000001 eng2d COLOR_KEY_ENABLE */ dd_emit(ctx, 1, 0); /* 00000007 eng2d COLOR_KEY_FORMAT */ dd_emit(ctx, 1, 1); /* ffffffff eng2d DST_DEPTH */ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DST_FORMAT */ dd_emit(ctx, 1, 0); /* ffffffff eng2d DST_LAYER */ dd_emit(ctx, 1, 1); /* 00000001 eng2d DST_LINEAR */ dd_emit(ctx, 1, 0); /* 00000007 eng2d PATTERN_COLOR_FORMAT */ dd_emit(ctx, 1, 0); /* 00000007 eng2d OPERATION */ dd_emit(ctx, 1, 0); /* 00000003 eng2d PATTERN_SELECT */ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SIFC_FORMAT */ dd_emit(ctx, 1, 0); /* 00000001 eng2d SIFC_BITMAP_ENABLE */ dd_emit(ctx, 1, 2); /* 00000003 eng2d SIFC_BITMAP_UNK808 */ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DU_DX_FRACT */ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DU_DX_INT */ dd_emit(ctx, 1, 0); /* ffffffff eng2d BLIT_DV_DY_FRACT */ dd_emit(ctx, 1, 1); /* ffffffff eng2d BLIT_DV_DY_INT */ dd_emit(ctx, 1, 0); /* 00000001 eng2d BLIT_CONTROL_FILTER */ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d DRAW_COLOR_FORMAT */ dd_emit(ctx, 1, 0xcf); /* 000000ff eng2d SRC_FORMAT */ dd_emit(ctx, 1, 1); /* 00000001 eng2d SRC_LINEAR #2 */ num = ctx->ctxvals_pos - base; ctx->ctxvals_pos = base; if (IS_NVA3F(dev_priv->chipset)) cp_ctx(ctx, 0x404800, num); else cp_ctx(ctx, 0x405400, num); } /* * xfer areas. These are a pain. * * There are 2 xfer areas: the first one is big and contains all sorts of * stuff, the second is small and contains some per-TP context. * * Each area is split into 8 "strands". The areas, when saved to grctx, * are made of 8-word blocks. Each block contains a single word from * each strand. The strands are independent of each other, their * addresses are unrelated to each other, and data in them is closely * packed together. The strand layout varies a bit between cards: here * and there, a single word is thrown out in the middle and the whole * strand is offset by a bit from corresponding one on another chipset. * For this reason, addresses of stuff in strands are almost useless. * Knowing sequence of stuff and size of gaps between them is much more * useful, and that's how we build the strands in our generator. * * NVA0 takes this mess to a whole new level by cutting the old strands * into a few dozen pieces [known as genes], rearranging them randomly, * and putting them back together to make new strands. Hopefully these * genes correspond more or less directly to the same PGRAPH subunits * as in 400040 register. * * The most common value in default context is 0, and when the genes * are separated by 0's, gene bounduaries are quite speculative... * some of them can be clearly deduced, others can be guessed, and yet * others won't be resolved without figuring out the real meaning of * given ctxval. For the same reason, ending point of each strand * is unknown. Except for strand 0, which is the longest strand and * its end corresponds to end of the whole xfer. * * An unsolved mystery is the seek instruction: it takes an argument * in bits 8-18, and that argument is clearly the place in strands to * seek to... but the offsets don't seem to correspond to offsets as * seen in grctx. Perhaps there's another, real, not randomly-changing * addressing in strands, and the xfer insn just happens to skip over * the unused bits? NV10-NV30 PIPE comes to mind... * * As far as I know, there's no way to access the xfer areas directly * without the help of ctxprog. */ static void xf_emit(struct nouveau_grctx *ctx, int num, uint32_t val) { int i; if (val && ctx->mode == NOUVEAU_GRCTX_VALS) for (i = 0; i < num; i++) nv_wo32(ctx->data, 4 * (ctx->ctxvals_pos + (i << 3)), val); ctx->ctxvals_pos += num << 3; } /* Gene declarations... */ static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx); static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx); static void nv50_graph_construct_xfer1(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i; int offset; int size = 0; uint32_t units = nv_rd32 (ctx->dev, 0x1540); offset = (ctx->ctxvals_pos+0x3f)&~0x3f; ctx->ctxvals_base = offset; if (dev_priv->chipset < 0xa0) { /* Strand 0 */ ctx->ctxvals_pos = offset; nv50_graph_construct_gene_dispatch(ctx); nv50_graph_construct_gene_m2mf(ctx); nv50_graph_construct_gene_unk24xx(ctx); nv50_graph_construct_gene_clipid(ctx); nv50_graph_construct_gene_zcull(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 0x1; nv50_graph_construct_gene_vfetch(ctx); nv50_graph_construct_gene_eng2d(ctx); nv50_graph_construct_gene_csched(ctx); nv50_graph_construct_gene_ropm1(ctx); nv50_graph_construct_gene_ropm2(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 0x2; nv50_graph_construct_gene_ccache(ctx); nv50_graph_construct_gene_unk1cxx(ctx); nv50_graph_construct_gene_strmout(ctx); nv50_graph_construct_gene_unk14xx(ctx); nv50_graph_construct_gene_unk10xx(ctx); nv50_graph_construct_gene_unk34xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 3: per-ROP group state */ ctx->ctxvals_pos = offset + 3; for (i = 0; i < 6; i++) if (units & (1 << (i + 16))) nv50_graph_construct_gene_ropc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strands 4-7: per-TP state */ for (i = 0; i < 4; i++) { ctx->ctxvals_pos = offset + 4 + i; if (units & (1 << (2 * i))) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << (2 * i + 1))) nv50_graph_construct_xfer_tp(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } } else { /* Strand 0 */ ctx->ctxvals_pos = offset; nv50_graph_construct_gene_dispatch(ctx); nv50_graph_construct_gene_m2mf(ctx); nv50_graph_construct_gene_unk34xx(ctx); nv50_graph_construct_gene_csched(ctx); nv50_graph_construct_gene_unk1cxx(ctx); nv50_graph_construct_gene_strmout(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1 */ ctx->ctxvals_pos = offset + 1; nv50_graph_construct_gene_unk10xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2 */ ctx->ctxvals_pos = offset + 2; if (dev_priv->chipset == 0xa0) nv50_graph_construct_gene_unk14xx(ctx); nv50_graph_construct_gene_unk24xx(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 3 */ ctx->ctxvals_pos = offset + 3; nv50_graph_construct_gene_vfetch(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 4 */ ctx->ctxvals_pos = offset + 4; nv50_graph_construct_gene_ccache(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 5 */ ctx->ctxvals_pos = offset + 5; nv50_graph_construct_gene_ropm2(ctx); nv50_graph_construct_gene_ropm1(ctx); /* per-ROP context */ for (i = 0; i < 8; i++) if (units & (1<<(i+16))) nv50_graph_construct_gene_ropc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 6 */ ctx->ctxvals_pos = offset + 6; nv50_graph_construct_gene_zcull(ctx); nv50_graph_construct_gene_clipid(ctx); nv50_graph_construct_gene_eng2d(ctx); if (units & (1 << 0)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 1)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 2)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 3)) nv50_graph_construct_xfer_tp(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 7 */ ctx->ctxvals_pos = offset + 7; if (dev_priv->chipset == 0xa0) { if (units & (1 << 4)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 5)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 6)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 7)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 8)) nv50_graph_construct_xfer_tp(ctx); if (units & (1 << 9)) nv50_graph_construct_xfer_tp(ctx); } else { nv50_graph_construct_gene_unk14xx(ctx); } if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } ctx->ctxvals_pos = offset + size * 8; ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; cp_lsr (ctx, offset); cp_out (ctx, CP_SET_XFER_POINTER); cp_lsr (ctx, size); cp_out (ctx, CP_SEEK_1); cp_out (ctx, CP_XFER_1); cp_wait(ctx, XFER, BUSY); } /* * non-trivial demagiced parts of ctx init go here */ static void nv50_graph_construct_gene_dispatch(struct nouveau_grctx *ctx) { /* start of strand 0 */ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* SEEK */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 5, 0); else if (!IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 6, 0); else xf_emit(ctx, 4, 0); /* SEEK */ /* the PGRAPH's internal FIFO */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 8*3, 0); else xf_emit(ctx, 0x100*3, 0); /* and another bonus slot?!? */ xf_emit(ctx, 3, 0); /* and YET ANOTHER bonus slot? */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 3, 0); /* SEEK */ /* CTX_SWITCH: caches of gr objects bound to subchannels. 8 values, last used index */ xf_emit(ctx, 9, 0); /* SEEK */ xf_emit(ctx, 9, 0); /* SEEK */ xf_emit(ctx, 9, 0); /* SEEK */ xf_emit(ctx, 9, 0); /* SEEK */ if (dev_priv->chipset < 0x90) xf_emit(ctx, 4, 0); /* SEEK */ xf_emit(ctx, 2, 0); /* SEEK */ xf_emit(ctx, 6*2, 0); xf_emit(ctx, 2, 0); /* SEEK */ xf_emit(ctx, 2, 0); /* SEEK */ xf_emit(ctx, 6*2, 0); xf_emit(ctx, 2, 0); /* SEEK */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 0x1c, 0); else if (dev_priv->chipset < 0xa0) xf_emit(ctx, 0x1e, 0); else xf_emit(ctx, 0x22, 0); /* SEEK */ xf_emit(ctx, 0x15, 0); } static void nv50_graph_construct_gene_m2mf(struct nouveau_grctx *ctx) { /* Strand 0, right after dispatch */ struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int smallm2mf = 0; if (dev_priv->chipset < 0x92 || dev_priv->chipset == 0x98) smallm2mf = 1; /* SEEK */ xf_emit (ctx, 1, 0); /* DMA_NOTIFY instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_IN instance >> 4 */ xf_emit (ctx, 1, 0); /* DMA_BUFFER_OUT instance >> 4 */ xf_emit (ctx, 1, 0); /* OFFSET_IN */ xf_emit (ctx, 1, 0); /* OFFSET_OUT */ xf_emit (ctx, 1, 0); /* PITCH_IN */ xf_emit (ctx, 1, 0); /* PITCH_OUT */ xf_emit (ctx, 1, 0); /* LINE_LENGTH */ xf_emit (ctx, 1, 0); /* LINE_COUNT */ xf_emit (ctx, 1, 0x21); /* FORMAT: bits 0-4 INPUT_INC, bits 5-9 OUTPUT_INC */ xf_emit (ctx, 1, 1); /* LINEAR_IN */ xf_emit (ctx, 1, 0x2); /* TILING_MODE_IN: bits 0-2 y tiling, bits 3-5 z tiling */ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_IN */ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_IN */ xf_emit (ctx, 1, 1); /* TILING_DEPTH_IN */ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN_Z */ xf_emit (ctx, 1, 0); /* TILING_POSITION_IN */ xf_emit (ctx, 1, 1); /* LINEAR_OUT */ xf_emit (ctx, 1, 0x2); /* TILING_MODE_OUT: bits 0-2 y tiling, bits 3-5 z tiling */ xf_emit (ctx, 1, 0x100); /* TILING_PITCH_OUT */ xf_emit (ctx, 1, 0x100); /* TILING_HEIGHT_OUT */ xf_emit (ctx, 1, 1); /* TILING_DEPTH_OUT */ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT_Z */ xf_emit (ctx, 1, 0); /* TILING_POSITION_OUT */ xf_emit (ctx, 1, 0); /* OFFSET_IN_HIGH */ xf_emit (ctx, 1, 0); /* OFFSET_OUT_HIGH */ /* SEEK */ if (smallm2mf) xf_emit(ctx, 0x40, 0); /* 20 * ffffffff, 3ffff */ else xf_emit(ctx, 0x100, 0); /* 80 * ffffffff, 3ffff */ xf_emit(ctx, 4, 0); /* 1f/7f, 0, 1f/7f, 0 [1f for smallm2mf, 7f otherwise] */ /* SEEK */ if (smallm2mf) xf_emit(ctx, 0x400, 0); /* ffffffff */ else xf_emit(ctx, 0x800, 0); /* ffffffff */ xf_emit(ctx, 4, 0); /* ff/1ff, 0, 0, 0 [ff for smallm2mf, 1ff otherwise] */ /* SEEK */ xf_emit(ctx, 0x40, 0); /* 20 * bits ffffffff, 3ffff */ xf_emit(ctx, 0x6, 0); /* 1f, 0, 1f, 0, 1f, 0 */ } static void nv50_graph_construct_gene_ccache(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 2, 0); /* RO */ xf_emit(ctx, 0x800, 0); /* ffffffff */ switch (dev_priv->chipset) { case 0x50: case 0x92: case 0xa0: xf_emit(ctx, 0x2b, 0); break; case 0x84: xf_emit(ctx, 0x29, 0); break; case 0x94: case 0x96: case 0xa3: xf_emit(ctx, 0x27, 0); break; case 0x86: case 0x98: case 0xa5: case 0xa8: case 0xaa: case 0xac: case 0xaf: xf_emit(ctx, 0x25, 0); break; } /* CB bindings, 0x80 of them. first word is address >> 8, second is * size >> 4 | valid << 24 */ xf_emit(ctx, 0x100, 0); /* ffffffff CB_DEF */ xf_emit(ctx, 1, 0); /* 0000007f CB_ADDR_BUFFER */ xf_emit(ctx, 1, 0); /* 0 */ xf_emit(ctx, 0x30, 0); /* ff SET_PROGRAM_CB */ xf_emit(ctx, 1, 0); /* 3f last SET_PROGRAM_CB */ xf_emit(ctx, 4, 0); /* RO */ xf_emit(ctx, 0x100, 0); /* ffffffff */ xf_emit(ctx, 8, 0); /* 1f, 0, 0, ... */ xf_emit(ctx, 8, 0); /* ffffffff */ xf_emit(ctx, 4, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 3 */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CODE_CB */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TIC */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TSC */ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ xf_emit(ctx, 1, 0); /* 000000ff TIC_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff TIC_ADDRESS_LOW */ xf_emit(ctx, 1, 0x3fffff); /* 003fffff TIC_LIMIT */ xf_emit(ctx, 1, 0); /* 000000ff TSC_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff TSC_ADDRESS_LOW */ xf_emit(ctx, 1, 0x1fff); /* 000fffff TSC_LIMIT */ xf_emit(ctx, 1, 0); /* 000000ff VP_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff VP_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 00ffffff VP_START_ID */ xf_emit(ctx, 1, 0); /* 000000ff CB_DEF_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff CB_DEF_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0); /* 000000ff GP_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff GP_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 00ffffff GP_START_ID */ xf_emit(ctx, 1, 0); /* 000000ff FP_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 00ffffff FP_START_ID */ } static void nv50_graph_construct_gene_unk10xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i; /* end of area 2 on pre-NVA0, area 1 on NVAx */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); else xf_emit(ctx, 1, 0x7ff); /* 000007ff */ xf_emit(ctx, 1, 0); /* 111/113 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ for (i = 0; i < 8; i++) { switch (dev_priv->chipset) { case 0x50: case 0x86: case 0x98: case 0xaa: case 0xac: xf_emit(ctx, 0xa0, 0); /* ffffffff */ break; case 0x84: case 0x92: case 0x94: case 0x96: xf_emit(ctx, 0x120, 0); break; case 0xa5: case 0xa8: xf_emit(ctx, 0x100, 0); /* ffffffff */ break; case 0xa0: case 0xa3: case 0xaf: xf_emit(ctx, 0x400, 0); /* ffffffff */ break; } xf_emit(ctx, 4, 0); /* 3f, 0, 0, 0 */ xf_emit(ctx, 4, 0); /* ffffffff */ } xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_TEMP */ xf_emit(ctx, 1, 1); /* 00000001 RASTERIZE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0x27); /* 000000ff UNK0FD4 */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void nv50_graph_construct_gene_unk34xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* end of area 2 on pre-NVA0, area 1 on NVAx */ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ xf_emit(ctx, 1, 0); /* 00000003 VIEWPORT_CLIP_MODE */ xf_emit(ctx, 0x10, 0x04000000); /* 07ffffff VIEWPORT_CLIP_HORIZ*8, VIEWPORT_CLIP_VERT*8 */ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ xf_emit(ctx, 0x20, 0); /* ffffffff POLYGON_STIPPLE */ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0x1fe21); /* 0001ffff tesla UNK0FAC */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0x0fac6881); if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 1); xf_emit(ctx, 3, 0); } } static void nv50_graph_construct_gene_unk14xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* middle of area 2 on pre-NVA0, beginning of area 2 on NVA0, area 7 on >NVA0 */ if (dev_priv->chipset != 0x50) { xf_emit(ctx, 5, 0); /* ffffffff */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 000003ff */ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 2, 4); /* 7f, ff */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ } xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 000000ff VP_CLIP_DISTANCE_ENABLE */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0); /* 3ff */ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1940 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ xf_emit(ctx, 1, 0x804); /* 00000fff SEMANTIC_CLIP */ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0x7f); /* 000000ff tesla UNK0FFC */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 1); /* 00000001 SHADE_MODEL */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0D7C */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0F8C */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0); /* 0000000f */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ else xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_SCALE: X0, Y0, Z0, X1, Y1, ... */ xf_emit(ctx, 3, 0); /* f, 0, 0 */ xf_emit(ctx, 3, 0); /* ffffffff last VIEWPORT_SCALE? */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 1); /* 00000001 VIEWPORT_TRANSFORM_EN */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 0x30, 0); /* ffffffff VIEWPORT_TRANSLATE */ xf_emit(ctx, 3, 0); /* f, 0, 0 */ xf_emit(ctx, 3, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 2, 0x88); /* 000001ff tesla UNK19D8 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ if (dev_priv->chipset != 0x50) { xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 000003ff */ } xf_emit(ctx, 0x20, 0); /* 10xbits ffffffff, 3fffff. SCISSOR_* */ xf_emit(ctx, 1, 0); /* f */ xf_emit(ctx, 1, 0); /* 0? */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 003fffff */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0x26); /* 000000ff SEMANTIC_LAYER */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ xf_emit(ctx, 1, 0); /* 0000000f */ } static void nv50_graph_construct_gene_zcull(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* end of strand 0 on pre-NVA0, beginning of strand 6 on NVAx */ /* SEEK */ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ xf_emit(ctx, 1, 0); /* 0000ffff */ xf_emit(ctx, 1, 0); /* 00000001 UNK0FB0 */ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_STIPPLE_ENABLE */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ xf_emit(ctx, 1, 0); /* 00000007 */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1108 */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ /* SEEK */ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ xf_emit(ctx, 1, 0x10); /* 7f/ff/3ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 00000001 VIEWPORT_CLIP_RECTS_EN */ xf_emit(ctx, 1, 3); /* 00000003 FP_CTRL_UNK196C */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1968 */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0); /* 0fffffff tesla UNK1104 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK151C */ } static void nv50_graph_construct_gene_clipid(struct nouveau_grctx *ctx) { /* middle of strand 0 on pre-NVA0 [after 24xx], middle of area 6 on NVAx */ /* SEEK */ xf_emit(ctx, 1, 0); /* 00000007 UNK0FB4 */ /* SEEK */ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_HORIZ */ xf_emit(ctx, 4, 0); /* 07ffffff CLIPID_REGION_VERT */ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ xf_emit(ctx, 2, 0x04000000); /* 07ffffff UNK1508 */ xf_emit(ctx, 1, 0); /* 00000001 CLIPID_ENABLE */ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_WIDTH */ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ID */ xf_emit(ctx, 1, 0); /* 000000ff CLIPID_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff CLIPID_ADDRESS_LOW */ xf_emit(ctx, 1, 0x80); /* 00003fff CLIPID_HEIGHT */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_CLIPID */ } static void nv50_graph_construct_gene_unk24xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i; /* middle of strand 0 on pre-NVA0 [after m2mf], end of strand 2 on NVAx */ /* SEEK */ xf_emit(ctx, 0x33, 0); /* SEEK */ xf_emit(ctx, 2, 0); /* SEEK */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 4, 0); /* RO */ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ xf_emit(ctx, 1, 0); /* 1ff */ xf_emit(ctx, 8, 0); /* 0? */ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ xf_emit(ctx, 4, 0); /* RO */ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ xf_emit(ctx, 1, 0); /* 1ff */ xf_emit(ctx, 8, 0); /* 0? */ xf_emit(ctx, 9, 0); /* ffffffff, 7ff */ } else { xf_emit(ctx, 0xc, 0); /* RO */ /* SEEK */ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ xf_emit(ctx, 1, 0); /* 1ff */ xf_emit(ctx, 8, 0); /* 0? */ /* SEEK */ xf_emit(ctx, 0xc, 0); /* RO */ /* SEEK */ xf_emit(ctx, 0xe10, 0); /* 190 * 9: 8*ffffffff, 7ff */ xf_emit(ctx, 1, 0); /* 1ff */ xf_emit(ctx, 8, 0); /* 0? */ } /* SEEK */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ /* SEEK */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 1); /* 00000001 */ /* SEEK */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 2, 4); /* 000000ff */ xf_emit(ctx, 1, 0x80c14); /* 01ffffff SEMANTIC_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 0x27); /* 000000ff SEMANTIC_PRIM_ID */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 1); /* 00000001 */ for (i = 0; i < 10; i++) { /* SEEK */ xf_emit(ctx, 0x40, 0); /* ffffffff */ xf_emit(ctx, 0x10, 0); /* 3, 0, 0.... */ xf_emit(ctx, 0x10, 0); /* ffffffff */ } /* SEEK */ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_CTRL */ xf_emit(ctx, 1, 1); /* 00000001 */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 4, 0); /* ffffffff NOPERSPECTIVE_BITMAP */ xf_emit(ctx, 0x10, 0); /* 00ffffff POINT_COORD_REPLACE_MAP */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0); /* 000003ff */ } static void nv50_graph_construct_gene_vfetch(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int acnt = 0x10, rep, i; /* beginning of strand 1 on pre-NVA0, strand 3 on NVAx */ if (IS_NVA3F(dev_priv->chipset)) acnt = 0x20; /* SEEK */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 1, 0); /* ffffffff tesla UNK13A4 */ xf_emit(ctx, 1, 1); /* 00000fff tesla UNK1318 */ } xf_emit(ctx, 1, 0); /* ffffffff VERTEX_BUFFER_FIRST */ xf_emit(ctx, 1, 0); /* 00000001 PRIMITIVE_RESTART_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 UNK0DE8 */ xf_emit(ctx, 1, 0); /* ffffffff PRIMITIVE_RESTART_INDEX */ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATR_MASK_UNK0DD0 */ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ xf_emit(ctx, 1, 0x20); /* 0000ffff tesla UNK129C */ xf_emit(ctx, 1, 0); /* 000000ff turing UNK370??? */ xf_emit(ctx, 1, 0); /* 0000ffff turing USER_PARAM_COUNT */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0xb, 0); /* RO */ else if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 0x9, 0); /* RO */ else xf_emit(ctx, 0x8, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0); /* 00000001 EDGE_FLAG */ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ /* SEEK */ xf_emit(ctx, 0xc, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0); /* 7f/ff */ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ xf_emit(ctx, 1, 4); /* 000001ff UNK1A28 */ xf_emit(ctx, 1, 8); /* 000001ff UNK0DF0 */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); /* 3ff tesla UNK0D68 */ else xf_emit(ctx, 1, 0x7ff); /* 7ff tesla UNK0D68 */ if (dev_priv->chipset == 0xa8) xf_emit(ctx, 1, 0x1e00); /* 7fff */ /* SEEK */ xf_emit(ctx, 0xc, 0); /* RO or close */ /* SEEK */ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ if (dev_priv->chipset > 0x50 && dev_priv->chipset < 0xa0) xf_emit(ctx, 2, 0); /* ffffffff */ else xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0FD8 */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 0x10, 0); /* 0? */ xf_emit(ctx, 2, 0); /* weird... */ xf_emit(ctx, 2, 0); /* RO */ } else { xf_emit(ctx, 8, 0); /* 0? */ xf_emit(ctx, 1, 0); /* weird... */ xf_emit(ctx, 2, 0); /* RO */ } /* SEEK */ xf_emit(ctx, 1, 0); /* ffffffff VB_ELEMENT_BASE */ xf_emit(ctx, 1, 0); /* ffffffff UNK1438 */ xf_emit(ctx, acnt, 0); /* 1 tesla UNK1000 */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1118? */ /* SEEK */ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ xf_emit(ctx, 1, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_UNK90C */ xf_emit(ctx, 1, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* RO */ xf_emit(ctx, 2, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK111C? */ xf_emit(ctx, 1, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0); /* 000000ff UNK15F4_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff UNK15F4_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 000000ff UNK0F84_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff UNK0F84_ADDRESS_LOW */ /* SEEK */ xf_emit(ctx, acnt, 0); /* 00003fff VERTEX_ARRAY_ATTRIB_OFFSET */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* 00000fff VERTEX_ARRAY_STRIDE */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_ARRAY_LOW */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_ARRAY_HIGH */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* ffffffff VERTEX_LIMIT_LOW */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ xf_emit(ctx, acnt, 0); /* 000000ff VERTEX_LIMIT_HIGH */ xf_emit(ctx, 3, 0); /* f/1f */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, acnt, 0); /* f */ xf_emit(ctx, 3, 0); /* f/1f */ } /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 2, 0); /* RO */ else xf_emit(ctx, 5, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0); /* ffff DMA_VTXBUF */ /* SEEK */ if (dev_priv->chipset < 0xa0) { xf_emit(ctx, 0x41, 0); /* RO */ /* SEEK */ xf_emit(ctx, 0x11, 0); /* RO */ } else if (!IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x50, 0); /* RO */ else xf_emit(ctx, 0x58, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, 1, 1); /* 1 UNK0DEC */ /* SEEK */ xf_emit(ctx, acnt*4, 0); /* ffffffff VTX_ATTR */ xf_emit(ctx, 4, 0); /* f/1f, 0, 0, 0 */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x1d, 0); /* RO */ else xf_emit(ctx, 0x16, 0); /* RO */ /* SEEK */ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ /* SEEK */ if (dev_priv->chipset < 0xa0) xf_emit(ctx, 8, 0); /* RO */ else if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0xc, 0); /* RO */ else xf_emit(ctx, 7, 0); /* RO */ /* SEEK */ xf_emit(ctx, 0xa, 0); /* RO */ if (dev_priv->chipset == 0xa0) rep = 0xc; else rep = 4; for (i = 0; i < rep; i++) { /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x20, 0); /* ffffffff */ xf_emit(ctx, 0x200, 0); /* ffffffff */ xf_emit(ctx, 4, 0); /* 7f/ff, 0, 0, 0 */ xf_emit(ctx, 4, 0); /* ffffffff */ } /* SEEK */ xf_emit(ctx, 1, 0); /* 113/111 */ xf_emit(ctx, 1, 0xf); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, (acnt/8)-1, 0); /* ffffffff VP_ATTR_EN */ xf_emit(ctx, acnt/8, 0); /* ffffffff VTX_ATTR_MASK_UNK0DD0 */ xf_emit(ctx, 1, 0); /* 0000000f VP_GP_BUILTIN_ATTR_EN */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ /* SEEK */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 7, 0); /* weird... */ else xf_emit(ctx, 5, 0); /* weird... */ } static void nv50_graph_construct_gene_eng2d(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* middle of strand 1 on pre-NVA0 [after vfetch], middle of strand 6 on NVAx */ /* SEEK */ xf_emit(ctx, 2, 0); /* 0001ffff CLIP_X, CLIP_Y */ xf_emit(ctx, 2, 0); /* 0000ffff CLIP_W, CLIP_H */ xf_emit(ctx, 1, 0); /* 00000001 CLIP_ENABLE */ if (dev_priv->chipset < 0xa0) { /* this is useless on everything but the original NV50, * guess they forgot to nuke it. Or just didn't bother. */ xf_emit(ctx, 2, 0); /* 0000ffff IFC_CLIP_X, Y */ xf_emit(ctx, 2, 1); /* 0000ffff IFC_CLIP_W, H */ xf_emit(ctx, 1, 0); /* 00000001 IFC_CLIP_ENABLE */ } xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ xf_emit(ctx, 1, 0x11); /* 3f[NV50]/7f[NV84+] DST_FORMAT */ xf_emit(ctx, 1, 0); /* 0001ffff DRAW_POINT_X */ xf_emit(ctx, 1, 8); /* 0000000f DRAW_UNK58C */ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_X_FRACT */ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_X_INT */ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DST_Y_FRACT */ xf_emit(ctx, 1, 0); /* 0001ffff SIFC_DST_Y_INT */ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DX_DU_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DX_DU_INT */ xf_emit(ctx, 1, 0); /* 000fffff SIFC_DY_DV_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff SIFC_DY_DV_INT */ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_WIDTH */ xf_emit(ctx, 1, 1); /* 0000ffff SIFC_HEIGHT */ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ xf_emit(ctx, 1, 2); /* 00000003 SIFC_BITMAP_UNK808 */ xf_emit(ctx, 1, 0); /* 00000003 SIFC_BITMAP_LINE_PACK_MODE */ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_LSB_FIRST */ xf_emit(ctx, 1, 0); /* 00000001 SIFC_BITMAP_ENABLE */ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_X */ xf_emit(ctx, 1, 0); /* 0000ffff BLIT_DST_Y */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_W */ xf_emit(ctx, 1, 1); /* 0000ffff BLIT_DST_H */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_X_FRACT */ xf_emit(ctx, 1, 0); /* 0001ffff BLIT_SRC_X_INT */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_SRC_Y_FRACT */ xf_emit(ctx, 1, 0); /* 00000001 UNK888 */ xf_emit(ctx, 1, 4); /* 0000003f UNK884 */ xf_emit(ctx, 1, 0); /* 00000007 UNK880 */ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK0FB8 */ xf_emit(ctx, 1, 0x15); /* 000000ff tesla UNK128C */ xf_emit(ctx, 2, 0); /* 00000007, ffff0ff3 */ xf_emit(ctx, 1, 0); /* 00000001 UNK260 */ xf_emit(ctx, 1, 0x4444480); /* 1fffffff UNK870 */ /* SEEK */ xf_emit(ctx, 0x10, 0); /* SEEK */ xf_emit(ctx, 0x27, 0); } static void nv50_graph_construct_gene_csched(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* middle of strand 1 on pre-NVA0 [after eng2d], middle of strand 0 on NVAx */ /* SEEK */ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY... what is it doing here??? */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1924 */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 0); /* 000003ff */ /* SEEK */ xf_emit(ctx, 1, 0); /* ffffffff turing UNK364 */ xf_emit(ctx, 1, 0); /* 0000000f turing UNK36C */ xf_emit(ctx, 1, 0); /* 0000ffff USER_PARAM_COUNT */ xf_emit(ctx, 1, 0x100); /* 00ffffff turing UNK384 */ xf_emit(ctx, 1, 0); /* 0000000f turing UNK2A0 */ xf_emit(ctx, 1, 0); /* 0000ffff GRIDID */ xf_emit(ctx, 1, 0x10001); /* ffffffff GRIDDIM_XY */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ /* SEEK */ xf_emit(ctx, 0x40, 0); /* ffffffff USER_PARAM */ switch (dev_priv->chipset) { case 0x50: case 0x92: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0x80, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 0x10*2, 0); /* ffffffff, 1f */ break; case 0x84: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0x60, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ break; case 0x94: case 0x96: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0x40, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 8*2, 0); /* ffffffff, 1f */ break; case 0x86: case 0x98: xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ xf_emit(ctx, 0x10, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ break; case 0xa0: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0xf0, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 0x1e*2, 0); /* ffffffff, 1f */ break; case 0xa3: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0x60, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 0xc*2, 0); /* ffffffff, 1f */ break; case 0xa5: case 0xaf: xf_emit(ctx, 8, 0); /* 7, 0, 0, 0, ... */ xf_emit(ctx, 0x30, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 6*2, 0); /* ffffffff, 1f */ break; case 0xaa: xf_emit(ctx, 0x12, 0); break; case 0xa8: case 0xac: xf_emit(ctx, 4, 0); /* f, 0, 0, 0 */ xf_emit(ctx, 0x10, 0); /* fff */ xf_emit(ctx, 2, 0); /* ff, fff */ xf_emit(ctx, 2*2, 0); /* ffffffff, 1f */ break; } xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 0); /* 00000000 */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 0000001f */ xf_emit(ctx, 4, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 4, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000003 turing UNK35C */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 000000ff */ } static void nv50_graph_construct_gene_unk1cxx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ xf_emit(ctx, 1, 0x3f800000); /* ffffffff LINE_WIDTH */ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ xf_emit(ctx, 3, 0); /* 00000001 POLYGON_OFFSET_*_ENABLE */ xf_emit(ctx, 1, 4); /* 0000000f CULL_MODE */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ xf_emit(ctx, 0x10, 0); /* 00000001 SCISSOR_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_UNITS */ xf_emit(ctx, 1, 0); /* ffffffff POLYGON_OFFSET_FACTOR */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ xf_emit(ctx, 2, 0); /* 07ffffff SCREEN_SCISSOR */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ xf_emit(ctx, 8, 0); /* 00000001 RT_HORIZ_LINEAR */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 3); /* 00000003 UNK16B4 */ else if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 1); /* 00000001 UNK16B4 */ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ xf_emit(ctx, 2, 0x04000000); /* 07ffffff tesla UNK0D6C */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 5); /* 0000000f UNK1408 */ xf_emit(ctx, 1, 0x52); /* 000001ff SEMANTIC_PTSZ */ xf_emit(ctx, 1, 0); /* ffffffff POINT_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 00000007 tesla UNK0FB4 */ if (dev_priv->chipset != 0x50) { xf_emit(ctx, 1, 0); /* 3ff */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK1110 */ } if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ xf_emit(ctx, 1, 0x10); /* 000000ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 0x20, 0); /* 07ffffff VIEWPORT_HORIZ, then VIEWPORT_VERT. (W&0x3fff)<<13 | (X&0x1fff). */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK187C */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ xf_emit(ctx, 1, 0x8100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 5); /* 0000000f tesla UNK1220 */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 000000ff tesla UNK1A20 */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ if (dev_priv->chipset < 0xa0) xf_emit(ctx, 0x1c, 0); /* RO */ else if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x9, 0); xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ xf_emit(ctx, 1, 0); /* 00000003 WINDOW_ORIGIN */ if (dev_priv->chipset != 0x50) { xf_emit(ctx, 1, 3); /* 00000003 tesla UNK1100 */ xf_emit(ctx, 1, 0); /* 3ff */ } /* XXX: the following block could belong either to unk1cxx, or * to STRMOUT. Rather hard to tell. */ if (dev_priv->chipset < 0xa0) xf_emit(ctx, 0x25, 0); else xf_emit(ctx, 0x3b, 0); } static void nv50_graph_construct_gene_strmout(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ } xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 4); /* 0000007f VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0x3ff); /* 000003ff tesla UNK0D68 */ else xf_emit(ctx, 1, 0x7ff); /* 000007ff tesla UNK0D68 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ /* SEEK */ xf_emit(ctx, 1, 0x102); /* 0000ffff STRMOUT_BUFFER_CTRL */ xf_emit(ctx, 1, 0); /* ffffffff STRMOUT_PRIMITIVE_COUNT */ xf_emit(ctx, 4, 0); /* 000000ff STRMOUT_ADDRESS_HIGH */ xf_emit(ctx, 4, 0); /* ffffffff STRMOUT_ADDRESS_LOW */ xf_emit(ctx, 4, 4); /* 000000ff STRMOUT_NUM_ATTRIBS */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 4, 0); /* ffffffff UNK1A8C */ xf_emit(ctx, 4, 0); /* ffffffff UNK1780 */ } xf_emit(ctx, 1, 0); /* 0000ffff DMA_STRMOUT */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW QUERY_COUNTER */ xf_emit(ctx, 2, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ /* SEEK */ xf_emit(ctx, 0x20, 0); /* ffffffff STRMOUT_MAP */ xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 0); /* 00000000? */ xf_emit(ctx, 2, 0); /* ffffffff */ } static void nv50_graph_construct_gene_ropm1(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* 000003ff */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } static void nv50_graph_construct_gene_ropm2(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; /* SEEK */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 2, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 0); /* 7 */ /* SEEK */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_QUERY */ xf_emit(ctx, 1, 0); /* 000000ff QUERY_ADDRESS_HIGH */ xf_emit(ctx, 2, 0); /* ffffffff QUERY_ADDRESS_LOW, COUNTER */ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0D64 */ xf_emit(ctx, 1, 0x4e3bfdf); /* ffffffff UNK0DF4 */ xf_emit(ctx, 1, 0); /* 00000001 eng2d UNK260 */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* 00000007 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0x11); /* 000000ff tesla UNK1968 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } static void nv50_graph_construct_gene_ropc(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic2; if (dev_priv->chipset == 0x50) { magic2 = 0x00003e60; } else if (!IS_NVA3F(dev_priv->chipset)) { magic2 = 0x001ffe67; } else { magic2 = 0x00087e67; } xf_emit(ctx, 1, 0); /* f/7 MUTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ if (dev_priv->chipset >= 0xa0 && !IS_NVAAF(dev_priv->chipset)) xf_emit(ctx, 1, 0x15); /* 000000ff */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 0x10); /* 3ff/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x92 || dev_priv->chipset == 0x98 || dev_priv->chipset >= 0xa0) { xf_emit(ctx, 3, 0); /* ff, ffffffff, ffffffff */ xf_emit(ctx, 1, 4); /* 7 */ xf_emit(ctx, 1, 0x400); /* fffffff */ xf_emit(ctx, 1, 0x300); /* ffff */ xf_emit(ctx, 1, 0x1001); /* 1fff */ if (dev_priv->chipset != 0xa0) { if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0); /* 0000000f UNK15C8 */ else xf_emit(ctx, 1, 0x15); /* ff */ } } xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1900 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 0x10, 0); /* ffffffff DEPTH_RANGE_NEAR */ xf_emit(ctx, 0x10, 0x3f800000); /* ffffffff DEPTH_RANGE_FAR */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_BACK_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_FUNC_REF */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 2, 0); /* ffffffff DEPTH_BOUNDS */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000007 DEPTH_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0); /* 000000ff CLEAR_STENCIL */ xf_emit(ctx, 1, 0); /* 00000007 STENCIL_FRONT_FUNC_FUNC */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_FUNC_REF */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 1, 0x10); /* 7f/ff VIEW_VOLUME_CLIP_CTRL */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 0x3f); /* 0000003f UNK1590 */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 2, 0); /* ffff0ff3, ffff */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff CLEAR_DEPTH */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 2, 0); xf_emit(ctx, 1, 0x1001); xf_emit(ctx, 0xb, 0); } else { xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ } xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, 0x11); /* 3f/7f */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ if (dev_priv->chipset != 0x50) { xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ xf_emit(ctx, 1, 0); /* 000000ff */ } xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ xf_emit(ctx, 2, 1); /* 00000007 BLEND_EQUATION_RGB, ALPHA */ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* 00000001 tesla UNK12E4 */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ xf_emit(ctx, 2, 0); /* 00000001 */ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* 0000000f */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 2, 0); /* 00000001 */ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 000003ff */ } else if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 2, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 2, 0); /* 00000001 */ } else { xf_emit(ctx, 1, 0); /* 00000007 MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1430 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ } xf_emit(ctx, 4, 0); /* ffffffff CLEAR_COLOR */ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR A R G B */ xf_emit(ctx, 1, 0); /* 00000fff eng2d UNK2B0 */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 2, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 000003ff */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f LOGIC_OP */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0); /* 00000001 UNK12E4? NVA3+ only? */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK15C4 */ xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1140 */ } xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ xf_emit(ctx, 1, 0); /* 00000007 PATTERN_COLOR_FORMAT */ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 PATTERN_MONO_FORMAT */ xf_emit(ctx, 2, 0); /* ffffffff PATTERN_MONO_BITMAP */ xf_emit(ctx, 1, 0); /* 00000003 PATTERN_SELECT */ xf_emit(ctx, 1, 0); /* 000000ff ROP */ xf_emit(ctx, 1, 0); /* ffffffff BETA1 */ xf_emit(ctx, 1, 0); /* ffffffff BETA4 */ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ xf_emit(ctx, 0x50, 0); /* 10x ffffff, ffffff, ffffff, ffffff, 3 PATTERN */ } static void nv50_graph_construct_xfer_unk84xx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic3; switch (dev_priv->chipset) { case 0x50: magic3 = 0x1000; break; case 0x86: case 0x98: case 0xa8: case 0xaa: case 0xac: case 0xaf: magic3 = 0x1e00; break; default: magic3 = 0; } xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 7f/ff[NVA0+] VP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 111/113[NVA0+] */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x1f, 0); /* ffffffff */ else if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 0x0f, 0); /* ffffffff */ else xf_emit(ctx, 0x10, 0); /* fffffff VP_RESULT_MAP_1 up */ xf_emit(ctx, 2, 0); /* f/1f[NVA3], fffffff/ffffffff[NVA0+] */ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0x03020100); /* ffffffff */ else xf_emit(ctx, 1, 0x00608080); /* fffffff VP_RESULT_MAP_0 */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 2, 0); /* 111/113, 7f/ff */ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ if (magic3) xf_emit(ctx, 1, magic3); /* 00007fff tesla UNK141C */ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 111/113 */ xf_emit(ctx, 0x1f, 0); /* ffffffff GP_RESULT_MAP_1 up */ xf_emit(ctx, 1, 0); /* 0000001f */ xf_emit(ctx, 1, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff GP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 0x80); /* 0000ffff GP_VERTEX_OUTPUT_COUNT */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0x03020100); /* ffffffff GP_RESULT_MAP_0 */ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ if (magic3) xf_emit(ctx, 1, magic3); /* 7fff tesla UNK141C */ xf_emit(ctx, 1, 4); /* 7f/ff VP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 111/113 */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK13A0 */ xf_emit(ctx, 1, 4); /* 7f/ff VP_REG_ALLOC_RESULT */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ xf_emit(ctx, 1, 0); /* 111/113 */ if (dev_priv->chipset == 0x94 || dev_priv->chipset == 0x96) xf_emit(ctx, 0x1020, 0); /* 4 x (0x400 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ else if (dev_priv->chipset < 0xa0) xf_emit(ctx, 0xa20, 0); /* 4 x (0x280 x 0xffffffff, ff, 0, 0, 0, 4 x ffffffff) */ else if (!IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x210, 0); /* ffffffff */ else xf_emit(ctx, 0x410, 0); /* ffffffff */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff GP_RESULT_MAP_SIZE */ xf_emit(ctx, 1, 3); /* 00000003 GP_OUTPUT_PRIMITIVE_TYPE */ xf_emit(ctx, 1, 0); /* 00000001 PROVOKING_VERTEX_LAST */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ } static void nv50_graph_construct_xfer_tprop(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int magic1, magic2; if (dev_priv->chipset == 0x50) { magic1 = 0x3ff; magic2 = 0x00003e60; } else if (!IS_NVA3F(dev_priv->chipset)) { magic1 = 0x7ff; magic2 = 0x001ffe67; } else { magic1 = 0x7ff; magic2 = 0x00087e67; } xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ xf_emit(ctx, 1, 0); /* ffffffff ALPHA_TEST_REF */ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000000f UNK16A0 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_BACK_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_BACK_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 4, 0); /* ffffffff BLEND_COLOR */ xf_emit(ctx, 1, 0); /* 00000001 UNK19C0 */ xf_emit(ctx, 1, 0); /* 00000001 UNK0FDC */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ xf_emit(ctx, 1, 0); /* ff[NV50]/3ff[NV84+] */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 3, 0); /* 00000007 STENCIL_FRONT_OP_FAIL, ZFAIL, ZPASS */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_BACK_ENABLE */ xf_emit(ctx, 2, 0); /* 00007fff WINDOW_OFFSET_XY */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ xf_emit(ctx, 1, 0); /* 7 */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff COLOR_KEY */ xf_emit(ctx, 1, 0); /* 00000001 COLOR_KEY_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 COLOR_KEY_FORMAT */ xf_emit(ctx, 2, 0); /* ffffffff SIFC_BITMAP_COLOR */ xf_emit(ctx, 1, 1); /* 00000001 SIFC_BITMAP_WRITE_BIT0_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1298 */ } else if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 1, 1); /* 00000001 tesla UNK16B4 */ xf_emit(ctx, 1, 0); /* 00000003 */ } else { xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ } xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_RGB */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_RGB */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_SRC_ALPHA */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_DST_ALPHA */ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ } xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ xf_emit(ctx, 1, 0); /* 7 */ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ xf_emit(ctx, 1, 0); /* 00000007 OPERATION */ xf_emit(ctx, 1, 0xcf); /* 000000ff SIFC_FORMAT */ xf_emit(ctx, 1, 0xcf); /* 000000ff DRAW_COLOR_FORMAT */ xf_emit(ctx, 1, 0xcf); /* 000000ff SRC_FORMAT */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ xf_emit(ctx, 1, 0); /* 7/f[NVA3] MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 8, 1); /* 00000001 UNK19E0 */ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0); /* ff */ else xf_emit(ctx, 3, 0); /* 1, 7, 3ff */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DU_DX_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DU_DX_INT */ xf_emit(ctx, 1, 0); /* 000fffff BLIT_DV_DY_FRACT */ xf_emit(ctx, 1, 1); /* 0001ffff BLIT_DV_DY_INT */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, magic1); /* 3ff/7ff tesla UNK0D68 */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 8, 0); /* 0000ffff DMA_COLOR */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_GLOBAL */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_LOCAL */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_STACK */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_DST */ xf_emit(ctx, 1, 0); /* 7 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 8, 0); /* 000000ff RT_ADDRESS_HIGH */ xf_emit(ctx, 8, 0); /* ffffffff RT_LAYER_STRIDE */ xf_emit(ctx, 8, 0); /* ffffffff RT_ADDRESS_LOW */ xf_emit(ctx, 8, 8); /* 0000007f RT_TILE_MODE */ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 8, 0x400); /* 0fffffff RT_HORIZ */ xf_emit(ctx, 8, 0x300); /* 0000ffff RT_VERT */ xf_emit(ctx, 1, 1); /* 00001fff RT_ARRAY_MODE */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, 0x20); /* 00000fff DST_TILE_MODE */ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_HEIGHT */ xf_emit(ctx, 1, 0); /* 000007ff DST_LAYER */ xf_emit(ctx, 1, 1); /* 00000001 DST_LINEAR */ xf_emit(ctx, 1, 0); /* ffffffff DST_ADDRESS_LOW */ xf_emit(ctx, 1, 0); /* 000000ff DST_ADDRESS_HIGH */ xf_emit(ctx, 1, 0x40); /* 0007ffff DST_PITCH */ xf_emit(ctx, 1, 0x100); /* 0001ffff DST_WIDTH */ xf_emit(ctx, 1, 0); /* 0000ffff */ xf_emit(ctx, 1, 3); /* 00000003 tesla UNK15AC */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ xf_emit(ctx, 1, 0); /* 00000007 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 2); /* 00000003 tesla UNK143C */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_ZETA */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 2, 0); /* ffff, ff/3ff */ xf_emit(ctx, 1, 0); /* 0001ffff GP_BUILTIN_RESULT_EN */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 0); /* 000000ff STENCIL_FRONT_MASK */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* ffffffff ZETA_LAYER_STRIDE */ xf_emit(ctx, 1, 0); /* 000000ff ZETA_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff ZETA_ADDRESS_LOW */ xf_emit(ctx, 1, 4); /* 00000007 ZETA_TILE_MODE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ xf_emit(ctx, 1, 0x400); /* 0fffffff ZETA_HORIZ */ xf_emit(ctx, 1, 0x300); /* 0000ffff ZETA_VERT */ xf_emit(ctx, 1, 0x1001); /* 00001fff ZETA_ARRAY_MODE */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 0); /* 00000001 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0x11); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 3f/7f RT_FORMAT */ xf_emit(ctx, 1, 0x0fac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0xf); /* 0000000f COLOR_MASK */ xf_emit(ctx, 7, 0); /* 0000000f COLOR_MASK */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 0); /* 00000003 UNK0F90 */ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ xf_emit(ctx, 1, 0); /* 7 */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ } xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0x0fac6881); /* fffffff */ xf_emit(ctx, 1, magic2); /* 001fffff tesla UNK0F78 */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_BOUNDS_EN */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE_ENABLE */ xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK0FB0 */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* 00000001 STENCIL_FRONT_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK15B4 */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK19CC */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0); /* 00000001 SAMPLECNT_ENABLE */ xf_emit(ctx, 1, 0); /* 0000000f ZETA_FORMAT */ xf_emit(ctx, 1, 1); /* 00000001 ZETA_ENABLE */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* 0000000f tesla UNK15C8 */ } xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A3C */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 3, 0); /* 7/f, 1, ffff0ff3 */ xf_emit(ctx, 1, 0xfac6881); /* fffffff */ xf_emit(ctx, 4, 0); /* 1, 1, 1, 3ff */ xf_emit(ctx, 1, 4); /* 7 */ xf_emit(ctx, 1, 0); /* 1 */ xf_emit(ctx, 2, 1); /* 1 */ xf_emit(ctx, 2, 0); /* 7, f */ xf_emit(ctx, 1, 1); /* 1 */ xf_emit(ctx, 1, 0); /* 7/f */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 0x9, 0); /* 1 */ else xf_emit(ctx, 0x8, 0); /* 1 */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 8, 1); /* 1 */ xf_emit(ctx, 1, 0x11); /* 7f */ xf_emit(ctx, 7, 0); /* 7f */ xf_emit(ctx, 1, 0xfac6881); /* fffffff */ xf_emit(ctx, 1, 0xf); /* f */ xf_emit(ctx, 7, 0); /* f */ xf_emit(ctx, 1, 0x11); /* 7f */ xf_emit(ctx, 1, 1); /* 1 */ xf_emit(ctx, 5, 0); /* 1, 7, 3ff, 3, 7 */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ } } } static void nv50_graph_construct_xfer_tex(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 2, 0); /* 1 LINKED_TSC. yes, 2. */ if (dev_priv->chipset != 0x50) xf_emit(ctx, 1, 0); /* 3 */ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DU_DX_INT */ xf_emit(ctx, 1, 0); /* fffff BLIT_DU_DX_FRACT */ xf_emit(ctx, 1, 1); /* 1ffff BLIT_DV_DY_INT */ xf_emit(ctx, 1, 0); /* fffff BLIT_DV_DY_FRACT */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 1, 0); /* 3 BLIT_CONTROL */ else xf_emit(ctx, 2, 0); /* 3ff, 1 */ xf_emit(ctx, 1, 0x2a712488); /* ffffffff SRC_TIC_0 */ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_1 */ xf_emit(ctx, 1, 0x4085c000); /* ffffffff SRC_TIC_2 */ xf_emit(ctx, 1, 0x40); /* ffffffff SRC_TIC_3 */ xf_emit(ctx, 1, 0x100); /* ffffffff SRC_TIC_4 */ xf_emit(ctx, 1, 0x10100); /* ffffffff SRC_TIC_5 */ xf_emit(ctx, 1, 0x02800000); /* ffffffff SRC_TIC_6 */ xf_emit(ctx, 1, 0); /* ffffffff SRC_TIC_7 */ if (dev_priv->chipset == 0x50) { xf_emit(ctx, 1, 0); /* 00000001 turing UNK358 */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ xf_emit(ctx, 1, 0); /* 00000003 turing UNK37C tesla UNK1690 */ xf_emit(ctx, 1, 0); /* 00000003 BLIT_CONTROL */ xf_emit(ctx, 1, 0); /* 00000001 turing UNK32C tesla UNK0F94 */ } else if (!IS_NVAAF(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34? */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* 000003ff */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* 000003ff */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1664 / turing UNK03E8 */ xf_emit(ctx, 1, 0); /* 00000003 */ xf_emit(ctx, 1, 0); /* 000003ff */ } else { xf_emit(ctx, 0x6, 0); } xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_TEXTURE */ xf_emit(ctx, 1, 0); /* 0000ffff DMA_SRC */ } static void nv50_graph_construct_xfer_unk8cxx(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 2, 0); /* 7, ffff0ff3 */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0D64 */ xf_emit(ctx, 1, 0x04e3bfdf); /* ffffffff UNK0DF4 */ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK0F98 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1668 */ xf_emit(ctx, 1, 0); /* 00000001 LINE_STIPPLE_ENABLE */ xf_emit(ctx, 1, 0x00ffff00); /* 00ffffff LINE_STIPPLE_PATTERN */ xf_emit(ctx, 1, 0); /* 00000001 POLYGON_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 UNK1534 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1658 */ xf_emit(ctx, 1, 0); /* 00000001 LINE_SMOOTH_ENABLE */ xf_emit(ctx, 1, 0); /* ffff0ff3 */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 DEPTH_WRITE */ xf_emit(ctx, 1, 1); /* 00000001 UNK15B4 */ xf_emit(ctx, 1, 0); /* 00000001 POINT_SPRITE_ENABLE */ xf_emit(ctx, 1, 1); /* 00000001 tesla UNK165C */ xf_emit(ctx, 1, 0x30201000); /* ffffffff tesla UNK1670 */ xf_emit(ctx, 1, 0x70605040); /* ffffffff tesla UNK1670 */ xf_emit(ctx, 1, 0xb8a89888); /* ffffffff tesla UNK1670 */ xf_emit(ctx, 1, 0xf8e8d8c8); /* ffffffff tesla UNK1670 */ xf_emit(ctx, 1, 0); /* 00000001 VERTEX_TWO_SIDE_ENABLE */ xf_emit(ctx, 1, 0x1a); /* 0000001f POLYGON_MODE */ } static void nv50_graph_construct_xfer_tp(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; if (dev_priv->chipset < 0xa0) { nv50_graph_construct_xfer_unk84xx(ctx); nv50_graph_construct_xfer_tprop(ctx); nv50_graph_construct_xfer_tex(ctx); nv50_graph_construct_xfer_unk8cxx(ctx); } else { nv50_graph_construct_xfer_tex(ctx); nv50_graph_construct_xfer_tprop(ctx); nv50_graph_construct_xfer_unk8cxx(ctx); nv50_graph_construct_xfer_unk84xx(ctx); } } static void nv50_graph_construct_xfer_mpc(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i, mpcnt = 2; switch (dev_priv->chipset) { case 0x98: case 0xaa: mpcnt = 1; break; case 0x50: case 0x84: case 0x86: case 0x92: case 0x94: case 0x96: case 0xa8: case 0xac: mpcnt = 2; break; case 0xa0: case 0xa3: case 0xa5: case 0xaf: mpcnt = 3; break; } for (i = 0; i < mpcnt; i++) { xf_emit(ctx, 1, 0); /* ff */ xf_emit(ctx, 1, 0x80); /* ffffffff tesla UNK1404 */ xf_emit(ctx, 1, 0x80007004); /* ffffffff tesla UNK12B0 */ xf_emit(ctx, 1, 0x04000400); /* ffffffff */ if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 1, 0xc0); /* 00007fff tesla UNK152C */ xf_emit(ctx, 1, 0x1000); /* 0000ffff tesla UNK0D60 */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A30 */ if (dev_priv->chipset == 0x86 || dev_priv->chipset == 0x98 || dev_priv->chipset == 0xa8 || IS_NVAAF(dev_priv->chipset)) { xf_emit(ctx, 1, 0xe00); /* 7fff */ xf_emit(ctx, 1, 0x1e00); /* 7fff */ } xf_emit(ctx, 1, 1); /* 000000ff VP_REG_ALLOC_TEMP */ xf_emit(ctx, 1, 0); /* 00000001 LINKED_TSC */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 2, 0x1000); /* 7fff tesla UNK141C */ xf_emit(ctx, 1, 1); /* 000000ff GP_REG_ALLOC_TEMP */ xf_emit(ctx, 1, 0); /* 00000001 GP_ENABLE */ xf_emit(ctx, 1, 4); /* 000000ff FP_REG_ALLOC_TEMP */ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ if (IS_NVAAF(dev_priv->chipset)) xf_emit(ctx, 0xb, 0); /* RO */ else if (dev_priv->chipset >= 0xa0) xf_emit(ctx, 0xc, 0); /* RO */ else xf_emit(ctx, 0xa, 0); /* RO */ } xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 0); /* ff/3ff */ if (dev_priv->chipset >= 0xa0) { xf_emit(ctx, 1, 0x1fe21); /* 0003ffff tesla UNK0FAC */ } xf_emit(ctx, 3, 0); /* 7fff, 0, 0 */ xf_emit(ctx, 1, 0); /* 00000001 tesla UNK1534 */ xf_emit(ctx, 1, 0); /* 7/f MULTISAMPLE_SAMPLES_LOG2 */ xf_emit(ctx, 4, 0xffff); /* 0000ffff MSAA_MASK */ xf_emit(ctx, 1, 1); /* 00000001 LANES32 */ xf_emit(ctx, 1, 0x10001); /* 00ffffff BLOCK_ALLOC */ xf_emit(ctx, 1, 0x10001); /* ffffffff BLOCKDIM_XY */ xf_emit(ctx, 1, 1); /* 0000ffff BLOCKDIM_Z */ xf_emit(ctx, 1, 0); /* ffffffff SHARED_SIZE */ xf_emit(ctx, 1, 0x1fe21); /* 1ffff/3ffff[NVA0+] tesla UNk0FAC */ xf_emit(ctx, 1, 0); /* ffffffff tesla UNK1A34 */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 1); /* 0000001f tesla UNK169C */ xf_emit(ctx, 1, 0); /* ff/3ff */ xf_emit(ctx, 1, 0); /* 1 LINKED_TSC */ xf_emit(ctx, 1, 0); /* ff FP_ADDRESS_HIGH */ xf_emit(ctx, 1, 0); /* ffffffff FP_ADDRESS_LOW */ xf_emit(ctx, 1, 0x08100c12); /* 1fffffff FP_INTERPOLANT_CTRL */ xf_emit(ctx, 1, 4); /* 00000007 FP_CONTROL */ xf_emit(ctx, 1, 0); /* 000000ff FRAG_COLOR_CLAMP_EN */ xf_emit(ctx, 1, 2); /* 00000003 REG_MODE */ xf_emit(ctx, 1, 0x11); /* 0000007f RT_FORMAT */ xf_emit(ctx, 7, 0); /* 0000007f RT_FORMAT */ xf_emit(ctx, 1, 0); /* 00000007 */ xf_emit(ctx, 1, 0xfac6881); /* 0fffffff RT_CONTROL */ xf_emit(ctx, 1, 0); /* 00000003 MULTISAMPLE_CTRL */ if (IS_NVA3F(dev_priv->chipset)) xf_emit(ctx, 1, 3); /* 00000003 tesla UNK16B4 */ xf_emit(ctx, 1, 0); /* 00000001 ALPHA_TEST_ENABLE */ xf_emit(ctx, 1, 0); /* 00000007 ALPHA_TEST_FUNC */ xf_emit(ctx, 1, 0); /* 00000001 FRAMEBUFFER_SRGB */ xf_emit(ctx, 1, 4); /* ffffffff tesla UNK1400 */ xf_emit(ctx, 8, 0); /* 00000001 BLEND_ENABLE */ xf_emit(ctx, 1, 0); /* 00000001 LOGIC_OP_ENABLE */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_RGB */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_RGB */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_RGB */ xf_emit(ctx, 1, 2); /* 0000001f BLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 1, 1); /* 0000001f BLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 1, 1); /* 00000007 BLEND_EQUATION_ALPHA */ xf_emit(ctx, 1, 1); /* 00000001 UNK133C */ if (IS_NVA3F(dev_priv->chipset)) { xf_emit(ctx, 1, 0); /* 00000001 UNK12E4 */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_RGB */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_RGB */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_RGB */ xf_emit(ctx, 8, 2); /* 0000001f IBLEND_FUNC_SRC_ALPHA */ xf_emit(ctx, 8, 1); /* 0000001f IBLEND_FUNC_DST_ALPHA */ xf_emit(ctx, 8, 1); /* 00000007 IBLEND_EQUATION_ALPHA */ xf_emit(ctx, 8, 1); /* 00000001 IBLEND_UNK00 */ xf_emit(ctx, 1, 0); /* 00000003 tesla UNK1928 */ xf_emit(ctx, 1, 0); /* 00000001 UNK1140 */ } xf_emit(ctx, 1, 0); /* 00000003 tesla UNK0F90 */ xf_emit(ctx, 1, 4); /* 000000ff FP_RESULT_COUNT */ /* XXX: demagic this part some day */ if (dev_priv->chipset == 0x50) xf_emit(ctx, 0x3a0, 0); else if (dev_priv->chipset < 0x94) xf_emit(ctx, 0x3a2, 0); else if (dev_priv->chipset == 0x98 || dev_priv->chipset == 0xaa) xf_emit(ctx, 0x39f, 0); else xf_emit(ctx, 0x3a3, 0); xf_emit(ctx, 1, 0x11); /* 3f/7f DST_FORMAT */ xf_emit(ctx, 1, 0); /* 7 OPERATION */ xf_emit(ctx, 1, 1); /* 1 DST_LINEAR */ xf_emit(ctx, 0x2d, 0); } static void nv50_graph_construct_xfer2(struct nouveau_grctx *ctx) { struct drm_nouveau_private *dev_priv = ctx->dev->dev_private; int i; uint32_t offset; uint32_t units = nv_rd32 (ctx->dev, 0x1540); int size = 0; offset = (ctx->ctxvals_pos+0x3f)&~0x3f; if (dev_priv->chipset < 0xa0) { for (i = 0; i < 8; i++) { ctx->ctxvals_pos = offset + i; /* that little bugger belongs to csched. No idea * what it's doing here. */ if (i == 0) xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << i)) nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } } else { /* Strand 0: TPs 0, 1 */ ctx->ctxvals_pos = offset; /* that little bugger belongs to csched. No idea * what it's doing here. */ xf_emit(ctx, 1, 0x08100c12); /* FP_INTERPOLANT_CTRL */ if (units & (1 << 0)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 1)) nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 1: TPs 2, 3 */ ctx->ctxvals_pos = offset + 1; if (units & (1 << 2)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 3)) nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 2: TPs 4, 5, 6 */ ctx->ctxvals_pos = offset + 2; if (units & (1 << 4)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 5)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 6)) nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; /* Strand 3: TPs 7, 8, 9 */ ctx->ctxvals_pos = offset + 3; if (units & (1 << 7)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 8)) nv50_graph_construct_xfer_mpc(ctx); if (units & (1 << 9)) nv50_graph_construct_xfer_mpc(ctx); if ((ctx->ctxvals_pos-offset)/8 > size) size = (ctx->ctxvals_pos-offset)/8; } ctx->ctxvals_pos = offset + size * 8; ctx->ctxvals_pos = (ctx->ctxvals_pos+0x3f)&~0x3f; cp_lsr (ctx, offset); cp_out (ctx, CP_SET_XFER_POINTER); cp_lsr (ctx, size); cp_out (ctx, CP_SEEK_2); cp_out (ctx, CP_XFER_2); cp_wait(ctx, XFER, BUSY); }
gpl-2.0
versusx/android_kernel_samsung_logan2g
arch/powerpc/platforms/powermac/setup.c
2332
16107
/* * Powermac setup and early boot code plus other random bits. * * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Adapted for Power Macintosh by Paul Mackerras * Copyright (C) 1996 Paul Mackerras (paulus@samba.org) * * Derived from "arch/alpha/kernel/setup.c" * Copyright (C) 1995 Linus Torvalds * * Maintained by Benjamin Herrenschmidt (benh@kernel.crashing.org) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ /* * bootup setup stuff.. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/stddef.h> #include <linux/unistd.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/tty.h> #include <linux/string.h> #include <linux/delay.h> #include <linux/ioport.h> #include <linux/major.h> #include <linux/initrd.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/adb.h> #include <linux/cuda.h> #include <linux/pmu.h> #include <linux/irq.h> #include <linux/seq_file.h> #include <linux/root_dev.h> #include <linux/bitops.h> #include <linux/suspend.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <linux/memblock.h> #include <asm/reg.h> #include <asm/sections.h> #include <asm/prom.h> #include <asm/system.h> #include <asm/pgtable.h> #include <asm/io.h> #include <asm/pci-bridge.h> #include <asm/ohare.h> #include <asm/mediabay.h> #include <asm/machdep.h> #include <asm/dma.h> #include <asm/cputable.h> #include <asm/btext.h> #include <asm/pmac_feature.h> #include <asm/time.h> #include <asm/mmu_context.h> #include <asm/iommu.h> #include <asm/smu.h> #include <asm/pmc.h> #include <asm/udbg.h> #include "pmac.h" #undef SHOW_GATWICK_IRQS int ppc_override_l2cr = 0; int ppc_override_l2cr_value; int has_l2cache = 0; int pmac_newworld; static int current_root_goodness = -1; extern struct machdep_calls pmac_md; #define DEFAULT_ROOT_DEVICE Root_SDA1 /* sda1 - slightly silly choice */ #ifdef CONFIG_PPC64 int sccdbg; #endif sys_ctrler_t sys_ctrler = SYS_CTRLER_UNKNOWN; EXPORT_SYMBOL(sys_ctrler); #ifdef CONFIG_PMAC_SMU unsigned long smu_cmdbuf_abs; EXPORT_SYMBOL(smu_cmdbuf_abs); #endif static void pmac_show_cpuinfo(struct seq_file *m) { struct device_node *np; const char *pp; int plen; int mbmodel; unsigned int mbflags; char* mbname; mbmodel = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_MODEL, 0); mbflags = pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_FLAGS, 0); if (pmac_call_feature(PMAC_FTR_GET_MB_INFO, NULL, PMAC_MB_INFO_NAME, (long) &mbname) != 0) mbname = "Unknown"; /* find motherboard type */ seq_printf(m, "machine\t\t: "); np = of_find_node_by_path("/"); if (np != NULL) { pp = of_get_property(np, "model", NULL); if (pp != NULL) seq_printf(m, "%s\n", pp); else seq_printf(m, "PowerMac\n"); pp = of_get_property(np, "compatible", &plen); if (pp != NULL) { seq_printf(m, "motherboard\t:"); while (plen > 0) { int l = strlen(pp) + 1; seq_printf(m, " %s", pp); plen -= l; pp += l; } seq_printf(m, "\n"); } of_node_put(np); } else seq_printf(m, "PowerMac\n"); /* print parsed model */ seq_printf(m, "detected as\t: %d (%s)\n", mbmodel, mbname); seq_printf(m, "pmac flags\t: %08x\n", mbflags); /* find l2 cache info */ np = of_find_node_by_name(NULL, "l2-cache"); if (np == NULL) np = of_find_node_by_type(NULL, "cache"); if (np != NULL) { const unsigned int *ic = of_get_property(np, "i-cache-size", NULL); const unsigned int *dc = of_get_property(np, "d-cache-size", NULL); seq_printf(m, "L2 cache\t:"); has_l2cache = 1; if (of_get_property(np, "cache-unified", NULL) != 0 && dc) { seq_printf(m, " %dK unified", *dc / 1024); } else { if (ic) seq_printf(m, " %dK instruction", *ic / 1024); if (dc) seq_printf(m, "%s %dK data", (ic? " +": ""), *dc / 1024); } pp = of_get_property(np, "ram-type", NULL); if (pp) seq_printf(m, " %s", pp); seq_printf(m, "\n"); of_node_put(np); } /* Indicate newworld/oldworld */ seq_printf(m, "pmac-generation\t: %s\n", pmac_newworld ? "NewWorld" : "OldWorld"); } #ifndef CONFIG_ADB_CUDA int find_via_cuda(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-cuda"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is CUDA-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_CUDA option !\n"); return 0; } #endif #ifndef CONFIG_ADB_PMU int find_via_pmu(void) { struct device_node *dn = of_find_node_by_name(NULL, "via-pmu"); if (!dn) return 0; of_node_put(dn); printk("WARNING ! Your machine is PMU-based but your kernel\n"); printk(" wasn't compiled with CONFIG_ADB_PMU option !\n"); return 0; } #endif #ifndef CONFIG_PMAC_SMU int smu_init(void) { /* should check and warn if SMU is present */ return 0; } #endif #ifdef CONFIG_PPC32 static volatile u32 *sysctrl_regs; static void __init ohare_init(void) { struct device_node *dn; /* this area has the CPU identification register and some registers used by smp boards */ sysctrl_regs = (volatile u32 *) ioremap(0xf8000000, 0x1000); /* * Turn on the L2 cache. * We assume that we have a PSX memory controller iff * we have an ohare I/O controller. */ dn = of_find_node_by_name(NULL, "ohare"); if (dn) { of_node_put(dn); if (((sysctrl_regs[2] >> 24) & 0xf) >= 3) { if (sysctrl_regs[4] & 0x10) sysctrl_regs[4] |= 0x04000020; else sysctrl_regs[4] |= 0x04000000; if(has_l2cache) printk(KERN_INFO "Level 2 cache enabled\n"); } } } static void __init l2cr_init(void) { /* Checks "l2cr-value" property in the registry */ if (cpu_has_feature(CPU_FTR_L2CR)) { struct device_node *np = of_find_node_by_name(NULL, "cpus"); if (np == 0) np = of_find_node_by_type(NULL, "cpu"); if (np != 0) { const unsigned int *l2cr = of_get_property(np, "l2cr-value", NULL); if (l2cr != 0) { ppc_override_l2cr = 1; ppc_override_l2cr_value = *l2cr; _set_L2CR(0); _set_L2CR(ppc_override_l2cr_value); } of_node_put(np); } } if (ppc_override_l2cr) printk(KERN_INFO "L2CR overridden (0x%x), " "backside cache is %s\n", ppc_override_l2cr_value, (ppc_override_l2cr_value & 0x80000000) ? "enabled" : "disabled"); } #endif static void __init pmac_setup_arch(void) { struct device_node *cpu, *ic; const int *fp; unsigned long pvr; pvr = PVR_VER(mfspr(SPRN_PVR)); /* Set loops_per_jiffy to a half-way reasonable value, for use until calibrate_delay gets called. */ loops_per_jiffy = 50000000 / HZ; cpu = of_find_node_by_type(NULL, "cpu"); if (cpu != NULL) { fp = of_get_property(cpu, "clock-frequency", NULL); if (fp != NULL) { if (pvr >= 0x30 && pvr < 0x80) /* PPC970 etc. */ loops_per_jiffy = *fp / (3 * HZ); else if (pvr == 4 || pvr >= 8) /* 604, G3, G4 etc. */ loops_per_jiffy = *fp / HZ; else /* 601, 603, etc. */ loops_per_jiffy = *fp / (2 * HZ); } of_node_put(cpu); } /* See if newworld or oldworld */ ic = of_find_node_with_property(NULL, "interrupt-controller"); if (ic) { pmac_newworld = 1; of_node_put(ic); } /* Lookup PCI hosts */ pmac_pci_init(); #ifdef CONFIG_PPC32 ohare_init(); l2cr_init(); #endif /* CONFIG_PPC32 */ find_via_cuda(); find_via_pmu(); smu_init(); #if defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) || \ defined(CONFIG_PPC64) pmac_nvram_init(); #endif #ifdef CONFIG_PPC32 #ifdef CONFIG_BLK_DEV_INITRD if (initrd_start) ROOT_DEV = Root_RAM0; else #endif ROOT_DEV = DEFAULT_ROOT_DEVICE; #endif #ifdef CONFIG_ADB if (strstr(cmd_line, "adb_sync")) { extern int __adb_probe_sync; __adb_probe_sync = 1; } #endif /* CONFIG_ADB */ } #ifdef CONFIG_SCSI void note_scsi_host(struct device_node *node, void *host) { } EXPORT_SYMBOL(note_scsi_host); #endif static int initializing = 1; static int pmac_late_init(void) { initializing = 0; /* this is udbg (which is __init) and we can later use it during * cpu hotplug (in smp_core99_kick_cpu) */ ppc_md.progress = NULL; return 0; } machine_late_initcall(powermac, pmac_late_init); /* * This is __init_refok because we check for "initializing" before * touching any of the __init sensitive things and "initializing" * will be false after __init time. This can't be __init because it * can be called whenever a disk is first accessed. */ void __init_refok note_bootable_part(dev_t dev, int part, int goodness) { char *p; if (!initializing) return; if ((goodness <= current_root_goodness) && ROOT_DEV != DEFAULT_ROOT_DEVICE) return; p = strstr(boot_command_line, "root="); if (p != NULL && (p == boot_command_line || p[-1] == ' ')) return; ROOT_DEV = dev + part; current_root_goodness = goodness; } #ifdef CONFIG_ADB_CUDA static void cuda_restart(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_RESET_SYSTEM); for (;;) cuda_poll(); } static void cuda_shutdown(void) { struct adb_request req; cuda_request(&req, NULL, 2, CUDA_PACKET, CUDA_POWERDOWN); for (;;) cuda_poll(); } #else #define cuda_restart() #define cuda_shutdown() #endif #ifndef CONFIG_ADB_PMU #define pmu_restart() #define pmu_shutdown() #endif #ifndef CONFIG_PMAC_SMU #define smu_restart() #define smu_shutdown() #endif static void pmac_restart(char *cmd) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_restart(); break; case SYS_CTRLER_PMU: pmu_restart(); break; case SYS_CTRLER_SMU: smu_restart(); break; default: ; } } static void pmac_power_off(void) { switch (sys_ctrler) { case SYS_CTRLER_CUDA: cuda_shutdown(); break; case SYS_CTRLER_PMU: pmu_shutdown(); break; case SYS_CTRLER_SMU: smu_shutdown(); break; default: ; } } static void pmac_halt(void) { pmac_power_off(); } /* * Early initialization. */ static void __init pmac_init_early(void) { /* Enable early btext debug if requested */ if (strstr(cmd_line, "btextdbg")) { udbg_adb_init_early(); register_early_udbg_console(); } /* Probe motherboard chipset */ pmac_feature_init(); /* Initialize debug stuff */ udbg_scc_init(!!strstr(cmd_line, "sccdbg")); udbg_adb_init(!!strstr(cmd_line, "btextdbg")); #ifdef CONFIG_PPC64 iommu_init_early_dart(); #endif /* SMP Init has to be done early as we need to patch up * cpu_possible_mask before interrupt stacks are allocated * or kaboom... */ #ifdef CONFIG_SMP pmac_setup_smp(); #endif } static int __init pmac_declare_of_platform_devices(void) { struct device_node *np; if (machine_is(chrp)) return -1; np = of_find_node_by_name(NULL, "valkyrie"); if (np) of_platform_device_create(np, "valkyrie", NULL); np = of_find_node_by_name(NULL, "platinum"); if (np) of_platform_device_create(np, "platinum", NULL); np = of_find_node_by_type(NULL, "smu"); if (np) { of_platform_device_create(np, "smu", NULL); of_node_put(np); } np = of_find_node_by_type(NULL, "fcu"); if (np == NULL) { /* Some machines have strangely broken device-tree */ np = of_find_node_by_path("/u3@0,f8000000/i2c@f8001000/fan@15e"); } if (np) { of_platform_device_create(np, "temperature", NULL); of_node_put(np); } return 0; } machine_device_initcall(powermac, pmac_declare_of_platform_devices); #ifdef CONFIG_SERIAL_PMACZILOG_CONSOLE /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good * default console on serial ports. It tries to match the open firmware * default output with one of the available serial console drivers. */ static int __init check_pmac_serial_console(void) { struct device_node *prom_stdout = NULL; int offset = 0; const char *name; #ifdef CONFIG_SERIAL_PMACZILOG_TTYS char *devname = "ttyS"; #else char *devname = "ttyPZ"; #endif pr_debug(" -> check_pmac_serial_console()\n"); /* The user has requested a console so this is already set up. */ if (strstr(boot_command_line, "console=")) { pr_debug(" console was specified !\n"); return -EBUSY; } if (!of_chosen) { pr_debug(" of_chosen is NULL !\n"); return -ENODEV; } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { pr_debug(" no linux,stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); if (!prom_stdout) { pr_debug(" can't find stdout package %s !\n", name); return -ENODEV; } pr_debug("stdout is %s\n", prom_stdout->full_name); name = of_get_property(prom_stdout, "name", NULL); if (!name) { pr_debug(" stdout package has no name !\n"); goto not_found; } if (strcmp(name, "ch-a") == 0) offset = 0; else if (strcmp(name, "ch-b") == 0) offset = 1; else goto not_found; of_node_put(prom_stdout); pr_debug("Found serial console at %s%d\n", devname, offset); return add_preferred_console(devname, offset, NULL); not_found: pr_debug("No preferred console found !\n"); of_node_put(prom_stdout); return -ENODEV; } console_initcall(check_pmac_serial_console); #endif /* CONFIG_SERIAL_PMACZILOG_CONSOLE */ /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init pmac_probe(void) { unsigned long root = of_get_flat_dt_root(); if (!of_flat_dt_is_compatible(root, "Power Macintosh") && !of_flat_dt_is_compatible(root, "MacRISC")) return 0; #ifdef CONFIG_PPC64 /* * On U3, the DART (iommu) must be allocated now since it * has an impact on htab_initialize (due to the large page it * occupies having to be broken up so the DART itself is not * part of the cacheable linar mapping */ alloc_dart_table(); hpte_init_native(); #endif #ifdef CONFIG_PPC32 /* isa_io_base gets set in pmac_pci_init */ ISA_DMA_THRESHOLD = ~0L; DMA_MODE_READ = 1; DMA_MODE_WRITE = 2; #endif /* CONFIG_PPC32 */ #ifdef CONFIG_PMAC_SMU /* * SMU based G5s need some memory below 2Gb, at least the current * driver needs that. We have to allocate it now. We allocate 4k * (1 small page) for now. */ smu_cmdbuf_abs = memblock_alloc_base(4096, 4096, 0x80000000UL); #endif /* CONFIG_PMAC_SMU */ return 1; } #ifdef CONFIG_PPC64 /* Move that to pci.c */ static int pmac_pci_probe_mode(struct pci_bus *bus) { struct device_node *node = pci_bus_to_OF_node(bus); /* We need to use normal PCI probing for the AGP bus, * since the device for the AGP bridge isn't in the tree. * Same for the PCIe host on U4 and the HT host bridge. */ if (bus->self == NULL && (of_device_is_compatible(node, "u3-agp") || of_device_is_compatible(node, "u4-pcie") || of_device_is_compatible(node, "u3-ht"))) return PCI_PROBE_NORMAL; return PCI_PROBE_DEVTREE; } #endif /* CONFIG_PPC64 */ define_machine(powermac) { .name = "PowerMac", .probe = pmac_probe, .setup_arch = pmac_setup_arch, .init_early = pmac_init_early, .show_cpuinfo = pmac_show_cpuinfo, .init_IRQ = pmac_pic_init, .get_irq = NULL, /* changed later */ .pci_irq_fixup = pmac_pci_irq_fixup, .restart = pmac_restart, .power_off = pmac_power_off, .halt = pmac_halt, .time_init = pmac_time_init, .get_boot_time = pmac_get_boot_time, .set_rtc_time = pmac_set_rtc_time, .get_rtc_time = pmac_get_rtc_time, .calibrate_decr = pmac_calibrate_decr, .feature_call = pmac_do_feature_call, .progress = udbg_progress, #ifdef CONFIG_PPC64 .pci_probe_mode = pmac_pci_probe_mode, .power_save = power4_idle, .enable_pmcs = power4_enable_pmcs, #endif /* CONFIG_PPC64 */ #ifdef CONFIG_PPC32 .pcibios_enable_device_hook = pmac_pci_enable_device_hook, .pcibios_after_init = pmac_pcibios_after_init, .phys_mem_access_prot = pci_phys_mem_access_prot, #endif };
gpl-2.0
pio-masaki/kernel_at300se
arch/mips/cavium-octeon/flash_setup.c
2588
2024
/* * Octeon Bootbus flash setup * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2007, 2008 Cavium Networks */ #include <linux/kernel.h> #include <linux/mtd/mtd.h> #include <linux/mtd/map.h> #include <linux/mtd/partitions.h> #include <asm/octeon/octeon.h> static struct map_info flash_map; static struct mtd_info *mymtd; static int nr_parts; static struct mtd_partition *parts; static const char *part_probe_types[] = { "cmdlinepart", #ifdef CONFIG_MTD_REDBOOT_PARTS "RedBoot", #endif NULL }; /** * Module/ driver initialization. * * Returns Zero on success */ static int __init flash_init(void) { /* * Read the bootbus region 0 setup to determine the base * address of the flash. */ union cvmx_mio_boot_reg_cfgx region_cfg; region_cfg.u64 = cvmx_read_csr(CVMX_MIO_BOOT_REG_CFGX(0)); if (region_cfg.s.en) { /* * The bootloader always takes the flash and sets its * address so the entire flash fits below * 0x1fc00000. This way the flash aliases to * 0x1fc00000 for booting. Software can access the * full flash at the true address, while core boot can * access 4MB. */ /* Use this name so old part lines work */ flash_map.name = "phys_mapped_flash"; flash_map.phys = region_cfg.s.base << 16; flash_map.size = 0x1fc00000 - flash_map.phys; flash_map.bankwidth = 1; flash_map.virt = ioremap(flash_map.phys, flash_map.size); pr_notice("Bootbus flash: Setting flash for %luMB flash at " "0x%08llx\n", flash_map.size >> 20, flash_map.phys); simple_map_init(&flash_map); mymtd = do_map_probe("cfi_probe", &flash_map); if (mymtd) { mymtd->owner = THIS_MODULE; nr_parts = parse_mtd_partitions(mymtd, part_probe_types, &parts, 0); mtd_device_register(mymtd, parts, nr_parts); } else { pr_err("Failed to register MTD device for flash\n"); } } return 0; } late_initcall(flash_init);
gpl-2.0
Elite-Kernels/Elite_angler
arch/sparc/kernel/pci_fire.c
3100
14146
/* pci_fire.c: Sun4u platform PCI-E controller support. * * Copyright (C) 2007 David S. Miller (davem@davemloft.net) */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/msi.h> #include <linux/export.h> #include <linux/irq.h> #include <linux/of_device.h> #include <asm/prom.h> #include <asm/irq.h> #include <asm/upa.h> #include "pci_impl.h" #define DRIVER_NAME "fire" #define PFX DRIVER_NAME ": " #define FIRE_IOMMU_CONTROL 0x40000UL #define FIRE_IOMMU_TSBBASE 0x40008UL #define FIRE_IOMMU_FLUSH 0x40100UL #define FIRE_IOMMU_FLUSHINV 0x40108UL static int pci_fire_pbm_iommu_init(struct pci_pbm_info *pbm) { struct iommu *iommu = pbm->iommu; u32 vdma[2], dma_mask; u64 control; int tsbsize, err; /* No virtual-dma property on these guys, use largest size. */ vdma[0] = 0xc0000000; /* base */ vdma[1] = 0x40000000; /* size */ dma_mask = 0xffffffff; tsbsize = 128; /* Register addresses. */ iommu->iommu_control = pbm->pbm_regs + FIRE_IOMMU_CONTROL; iommu->iommu_tsbbase = pbm->pbm_regs + FIRE_IOMMU_TSBBASE; iommu->iommu_flush = pbm->pbm_regs + FIRE_IOMMU_FLUSH; iommu->iommu_flushinv = pbm->pbm_regs + FIRE_IOMMU_FLUSHINV; /* We use the main control/status register of FIRE as the write * completion register. */ iommu->write_complete_reg = pbm->controller_regs + 0x410000UL; /* * Invalidate TLB Entries. */ upa_writeq(~(u64)0, iommu->iommu_flushinv); err = iommu_table_init(iommu, tsbsize * 8 * 1024, vdma[0], dma_mask, pbm->numa_node); if (err) return err; upa_writeq(__pa(iommu->page_table) | 0x7UL, iommu->iommu_tsbbase); control = upa_readq(iommu->iommu_control); control |= (0x00000400 /* TSB cache snoop enable */ | 0x00000300 /* Cache mode */ | 0x00000002 /* Bypass enable */ | 0x00000001 /* Translation enable */); upa_writeq(control, iommu->iommu_control); return 0; } #ifdef CONFIG_PCI_MSI struct pci_msiq_entry { u64 word0; #define MSIQ_WORD0_RESV 0x8000000000000000UL #define MSIQ_WORD0_FMT_TYPE 0x7f00000000000000UL #define MSIQ_WORD0_FMT_TYPE_SHIFT 56 #define MSIQ_WORD0_LEN 0x00ffc00000000000UL #define MSIQ_WORD0_LEN_SHIFT 46 #define MSIQ_WORD0_ADDR0 0x00003fff00000000UL #define MSIQ_WORD0_ADDR0_SHIFT 32 #define MSIQ_WORD0_RID 0x00000000ffff0000UL #define MSIQ_WORD0_RID_SHIFT 16 #define MSIQ_WORD0_DATA0 0x000000000000ffffUL #define MSIQ_WORD0_DATA0_SHIFT 0 #define MSIQ_TYPE_MSG 0x6 #define MSIQ_TYPE_MSI32 0xb #define MSIQ_TYPE_MSI64 0xf u64 word1; #define MSIQ_WORD1_ADDR1 0xffffffffffff0000UL #define MSIQ_WORD1_ADDR1_SHIFT 16 #define MSIQ_WORD1_DATA1 0x000000000000ffffUL #define MSIQ_WORD1_DATA1_SHIFT 0 u64 resv[6]; }; /* All MSI registers are offset from pbm->pbm_regs */ #define EVENT_QUEUE_BASE_ADDR_REG 0x010000UL #define EVENT_QUEUE_BASE_ADDR_ALL_ONES 0xfffc000000000000UL #define EVENT_QUEUE_CONTROL_SET(EQ) (0x011000UL + (EQ) * 0x8UL) #define EVENT_QUEUE_CONTROL_SET_OFLOW 0x0200000000000000UL #define EVENT_QUEUE_CONTROL_SET_EN 0x0000100000000000UL #define EVENT_QUEUE_CONTROL_CLEAR(EQ) (0x011200UL + (EQ) * 0x8UL) #define EVENT_QUEUE_CONTROL_CLEAR_OF 0x0200000000000000UL #define EVENT_QUEUE_CONTROL_CLEAR_E2I 0x0000800000000000UL #define EVENT_QUEUE_CONTROL_CLEAR_DIS 0x0000100000000000UL #define EVENT_QUEUE_STATE(EQ) (0x011400UL + (EQ) * 0x8UL) #define EVENT_QUEUE_STATE_MASK 0x0000000000000007UL #define EVENT_QUEUE_STATE_IDLE 0x0000000000000001UL #define EVENT_QUEUE_STATE_ACTIVE 0x0000000000000002UL #define EVENT_QUEUE_STATE_ERROR 0x0000000000000004UL #define EVENT_QUEUE_TAIL(EQ) (0x011600UL + (EQ) * 0x8UL) #define EVENT_QUEUE_TAIL_OFLOW 0x0200000000000000UL #define EVENT_QUEUE_TAIL_VAL 0x000000000000007fUL #define EVENT_QUEUE_HEAD(EQ) (0x011800UL + (EQ) * 0x8UL) #define EVENT_QUEUE_HEAD_VAL 0x000000000000007fUL #define MSI_MAP(MSI) (0x020000UL + (MSI) * 0x8UL) #define MSI_MAP_VALID 0x8000000000000000UL #define MSI_MAP_EQWR_N 0x4000000000000000UL #define MSI_MAP_EQNUM 0x000000000000003fUL #define MSI_CLEAR(MSI) (0x028000UL + (MSI) * 0x8UL) #define MSI_CLEAR_EQWR_N 0x4000000000000000UL #define IMONDO_DATA0 0x02C000UL #define IMONDO_DATA0_DATA 0xffffffffffffffc0UL #define IMONDO_DATA1 0x02C008UL #define IMONDO_DATA1_DATA 0xffffffffffffffffUL #define MSI_32BIT_ADDR 0x034000UL #define MSI_32BIT_ADDR_VAL 0x00000000ffff0000UL #define MSI_64BIT_ADDR 0x034008UL #define MSI_64BIT_ADDR_VAL 0xffffffffffff0000UL static int pci_fire_get_head(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long *head) { *head = upa_readq(pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); return 0; } static int pci_fire_dequeue_msi(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long *head, unsigned long *msi) { unsigned long type_fmt, type, msi_num; struct pci_msiq_entry *base, *ep; base = (pbm->msi_queues + ((msiqid - pbm->msiq_first) * 8192)); ep = &base[*head]; if ((ep->word0 & MSIQ_WORD0_FMT_TYPE) == 0) return 0; type_fmt = ((ep->word0 & MSIQ_WORD0_FMT_TYPE) >> MSIQ_WORD0_FMT_TYPE_SHIFT); type = (type_fmt >> 3); if (unlikely(type != MSIQ_TYPE_MSI32 && type != MSIQ_TYPE_MSI64)) return -EINVAL; *msi = msi_num = ((ep->word0 & MSIQ_WORD0_DATA0) >> MSIQ_WORD0_DATA0_SHIFT); upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi_num)); /* Clear the entry. */ ep->word0 &= ~MSIQ_WORD0_FMT_TYPE; /* Go to next entry in ring. */ (*head)++; if (*head >= pbm->msiq_ent_count) *head = 0; return 1; } static int pci_fire_set_head(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long head) { upa_writeq(head, pbm->pbm_regs + EVENT_QUEUE_HEAD(msiqid)); return 0; } static int pci_fire_msi_setup(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long msi, int is_msi64) { u64 val; val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); val &= ~(MSI_MAP_EQNUM); val |= msiqid; upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); upa_writeq(MSI_CLEAR_EQWR_N, pbm->pbm_regs + MSI_CLEAR(msi)); val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); val |= MSI_MAP_VALID; upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); return 0; } static int pci_fire_msi_teardown(struct pci_pbm_info *pbm, unsigned long msi) { u64 val; val = upa_readq(pbm->pbm_regs + MSI_MAP(msi)); val &= ~MSI_MAP_VALID; upa_writeq(val, pbm->pbm_regs + MSI_MAP(msi)); return 0; } static int pci_fire_msiq_alloc(struct pci_pbm_info *pbm) { unsigned long pages, order, i; order = get_order(512 * 1024); pages = __get_free_pages(GFP_KERNEL | __GFP_COMP, order); if (pages == 0UL) { printk(KERN_ERR "MSI: Cannot allocate MSI queues (o=%lu).\n", order); return -ENOMEM; } memset((char *)pages, 0, PAGE_SIZE << order); pbm->msi_queues = (void *) pages; upa_writeq((EVENT_QUEUE_BASE_ADDR_ALL_ONES | __pa(pbm->msi_queues)), pbm->pbm_regs + EVENT_QUEUE_BASE_ADDR_REG); upa_writeq(pbm->portid << 6, pbm->pbm_regs + IMONDO_DATA0); upa_writeq(0, pbm->pbm_regs + IMONDO_DATA1); upa_writeq(pbm->msi32_start, pbm->pbm_regs + MSI_32BIT_ADDR); upa_writeq(pbm->msi64_start, pbm->pbm_regs + MSI_64BIT_ADDR); for (i = 0; i < pbm->msiq_num; i++) { upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_HEAD(i)); upa_writeq(0, pbm->pbm_regs + EVENT_QUEUE_TAIL(i)); } return 0; } static void pci_fire_msiq_free(struct pci_pbm_info *pbm) { unsigned long pages, order; order = get_order(512 * 1024); pages = (unsigned long) pbm->msi_queues; free_pages(pages, order); pbm->msi_queues = NULL; } static int pci_fire_msiq_build_irq(struct pci_pbm_info *pbm, unsigned long msiqid, unsigned long devino) { unsigned long cregs = (unsigned long) pbm->pbm_regs; unsigned long imap_reg, iclr_reg, int_ctrlr; unsigned int irq; int fixup; u64 val; imap_reg = cregs + (0x001000UL + (devino * 0x08UL)); iclr_reg = cregs + (0x001400UL + (devino * 0x08UL)); /* XXX iterate amongst the 4 IRQ controllers XXX */ int_ctrlr = (1UL << 6); val = upa_readq(imap_reg); val |= (1UL << 63) | int_ctrlr; upa_writeq(val, imap_reg); fixup = ((pbm->portid << 6) | devino) - int_ctrlr; irq = build_irq(fixup, iclr_reg, imap_reg); if (!irq) return -ENOMEM; upa_writeq(EVENT_QUEUE_CONTROL_SET_EN, pbm->pbm_regs + EVENT_QUEUE_CONTROL_SET(msiqid)); return irq; } static const struct sparc64_msiq_ops pci_fire_msiq_ops = { .get_head = pci_fire_get_head, .dequeue_msi = pci_fire_dequeue_msi, .set_head = pci_fire_set_head, .msi_setup = pci_fire_msi_setup, .msi_teardown = pci_fire_msi_teardown, .msiq_alloc = pci_fire_msiq_alloc, .msiq_free = pci_fire_msiq_free, .msiq_build_irq = pci_fire_msiq_build_irq, }; static void pci_fire_msi_init(struct pci_pbm_info *pbm) { sparc64_pbm_msi_init(pbm, &pci_fire_msiq_ops); } #else /* CONFIG_PCI_MSI */ static void pci_fire_msi_init(struct pci_pbm_info *pbm) { } #endif /* !(CONFIG_PCI_MSI) */ /* Based at pbm->controller_regs */ #define FIRE_PARITY_CONTROL 0x470010UL #define FIRE_PARITY_ENAB 0x8000000000000000UL #define FIRE_FATAL_RESET_CTL 0x471028UL #define FIRE_FATAL_RESET_SPARE 0x0000000004000000UL #define FIRE_FATAL_RESET_MB 0x0000000002000000UL #define FIRE_FATAL_RESET_CPE 0x0000000000008000UL #define FIRE_FATAL_RESET_APE 0x0000000000004000UL #define FIRE_FATAL_RESET_PIO 0x0000000000000040UL #define FIRE_FATAL_RESET_JW 0x0000000000000004UL #define FIRE_FATAL_RESET_JI 0x0000000000000002UL #define FIRE_FATAL_RESET_JR 0x0000000000000001UL #define FIRE_CORE_INTR_ENABLE 0x471800UL /* Based at pbm->pbm_regs */ #define FIRE_TLU_CTRL 0x80000UL #define FIRE_TLU_CTRL_TIM 0x00000000da000000UL #define FIRE_TLU_CTRL_QDET 0x0000000000000100UL #define FIRE_TLU_CTRL_CFG 0x0000000000000001UL #define FIRE_TLU_DEV_CTRL 0x90008UL #define FIRE_TLU_LINK_CTRL 0x90020UL #define FIRE_TLU_LINK_CTRL_CLK 0x0000000000000040UL #define FIRE_LPU_RESET 0xe2008UL #define FIRE_LPU_LLCFG 0xe2200UL #define FIRE_LPU_LLCFG_VC0 0x0000000000000100UL #define FIRE_LPU_FCTRL_UCTRL 0xe2240UL #define FIRE_LPU_FCTRL_UCTRL_N 0x0000000000000002UL #define FIRE_LPU_FCTRL_UCTRL_P 0x0000000000000001UL #define FIRE_LPU_TXL_FIFOP 0xe2430UL #define FIRE_LPU_LTSSM_CFG2 0xe2788UL #define FIRE_LPU_LTSSM_CFG3 0xe2790UL #define FIRE_LPU_LTSSM_CFG4 0xe2798UL #define FIRE_LPU_LTSSM_CFG5 0xe27a0UL #define FIRE_DMC_IENAB 0x31800UL #define FIRE_DMC_DBG_SEL_A 0x53000UL #define FIRE_DMC_DBG_SEL_B 0x53008UL #define FIRE_PEC_IENAB 0x51800UL static void pci_fire_hw_init(struct pci_pbm_info *pbm) { u64 val; upa_writeq(FIRE_PARITY_ENAB, pbm->controller_regs + FIRE_PARITY_CONTROL); upa_writeq((FIRE_FATAL_RESET_SPARE | FIRE_FATAL_RESET_MB | FIRE_FATAL_RESET_CPE | FIRE_FATAL_RESET_APE | FIRE_FATAL_RESET_PIO | FIRE_FATAL_RESET_JW | FIRE_FATAL_RESET_JI | FIRE_FATAL_RESET_JR), pbm->controller_regs + FIRE_FATAL_RESET_CTL); upa_writeq(~(u64)0, pbm->controller_regs + FIRE_CORE_INTR_ENABLE); val = upa_readq(pbm->pbm_regs + FIRE_TLU_CTRL); val |= (FIRE_TLU_CTRL_TIM | FIRE_TLU_CTRL_QDET | FIRE_TLU_CTRL_CFG); upa_writeq(val, pbm->pbm_regs + FIRE_TLU_CTRL); upa_writeq(0, pbm->pbm_regs + FIRE_TLU_DEV_CTRL); upa_writeq(FIRE_TLU_LINK_CTRL_CLK, pbm->pbm_regs + FIRE_TLU_LINK_CTRL); upa_writeq(0, pbm->pbm_regs + FIRE_LPU_RESET); upa_writeq(FIRE_LPU_LLCFG_VC0, pbm->pbm_regs + FIRE_LPU_LLCFG); upa_writeq((FIRE_LPU_FCTRL_UCTRL_N | FIRE_LPU_FCTRL_UCTRL_P), pbm->pbm_regs + FIRE_LPU_FCTRL_UCTRL); upa_writeq(((0xffff << 16) | (0x0000 << 0)), pbm->pbm_regs + FIRE_LPU_TXL_FIFOP); upa_writeq(3000000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG2); upa_writeq(500000, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG3); upa_writeq((2 << 16) | (140 << 8), pbm->pbm_regs + FIRE_LPU_LTSSM_CFG4); upa_writeq(0, pbm->pbm_regs + FIRE_LPU_LTSSM_CFG5); upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_DMC_IENAB); upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_A); upa_writeq(0, pbm->pbm_regs + FIRE_DMC_DBG_SEL_B); upa_writeq(~(u64)0, pbm->pbm_regs + FIRE_PEC_IENAB); } static int pci_fire_pbm_init(struct pci_pbm_info *pbm, struct platform_device *op, u32 portid) { const struct linux_prom64_registers *regs; struct device_node *dp = op->dev.of_node; int err; pbm->numa_node = -1; pbm->pci_ops = &sun4u_pci_ops; pbm->config_space_reg_bits = 12; pbm->index = pci_num_pbms++; pbm->portid = portid; pbm->op = op; pbm->name = dp->full_name; regs = of_get_property(dp, "reg", NULL); pbm->pbm_regs = regs[0].phys_addr; pbm->controller_regs = regs[1].phys_addr - 0x410000UL; printk("%s: SUN4U PCIE Bus Module\n", pbm->name); pci_determine_mem_io_space(pbm); pci_get_pbm_props(pbm); pci_fire_hw_init(pbm); err = pci_fire_pbm_iommu_init(pbm); if (err) return err; pci_fire_msi_init(pbm); pbm->pci_bus = pci_scan_one_pbm(pbm, &op->dev); /* XXX register error interrupt handlers XXX */ pbm->next = pci_pbm_root; pci_pbm_root = pbm; return 0; } static int fire_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; struct pci_pbm_info *pbm; struct iommu *iommu; u32 portid; int err; portid = of_getintprop_default(dp, "portid", 0xff); err = -ENOMEM; pbm = kzalloc(sizeof(*pbm), GFP_KERNEL); if (!pbm) { printk(KERN_ERR PFX "Cannot allocate pci_pbminfo.\n"); goto out_err; } iommu = kzalloc(sizeof(struct iommu), GFP_KERNEL); if (!iommu) { printk(KERN_ERR PFX "Cannot allocate PBM iommu.\n"); goto out_free_controller; } pbm->iommu = iommu; err = pci_fire_pbm_init(pbm, op, portid); if (err) goto out_free_iommu; dev_set_drvdata(&op->dev, pbm); return 0; out_free_iommu: kfree(pbm->iommu); out_free_controller: kfree(pbm); out_err: return err; } static const struct of_device_id fire_match[] = { { .name = "pci", .compatible = "pciex108e,80f0", }, {}, }; static struct platform_driver fire_driver = { .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, .of_match_table = fire_match, }, .probe = fire_probe, }; static int __init fire_init(void) { return platform_driver_register(&fire_driver); } subsys_initcall(fire_init);
gpl-2.0
thanhphat11/android_kernel_xiaomi_msm8996
arch/sparc/kernel/chmc.c
3100
20522
/* chmc.c: Driver for UltraSPARC-III memory controller. * * Copyright (C) 2001, 2007, 2008 David S. Miller (davem@davemloft.net) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/list.h> #include <linux/string.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/of.h> #include <linux/of_device.h> #include <asm/spitfire.h> #include <asm/chmctrl.h> #include <asm/cpudata.h> #include <asm/oplib.h> #include <asm/prom.h> #include <asm/head.h> #include <asm/io.h> #include <asm/memctrl.h> #define DRV_MODULE_NAME "chmc" #define PFX DRV_MODULE_NAME ": " #define DRV_MODULE_VERSION "0.2" MODULE_AUTHOR("David S. Miller (davem@davemloft.net)"); MODULE_DESCRIPTION("UltraSPARC-III memory controller driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_MODULE_VERSION); static int mc_type; #define MC_TYPE_SAFARI 1 #define MC_TYPE_JBUS 2 static dimm_printer_t us3mc_dimm_printer; #define CHMCTRL_NDGRPS 2 #define CHMCTRL_NDIMMS 4 #define CHMC_DIMMS_PER_MC (CHMCTRL_NDGRPS * CHMCTRL_NDIMMS) /* OBP memory-layout property format. */ struct chmc_obp_map { unsigned char dimm_map[144]; unsigned char pin_map[576]; }; #define DIMM_LABEL_SZ 8 struct chmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[CHMC_DIMMS_PER_MC][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct chmc_obp_map map[2]; }; #define CHMCTRL_NBANKS 4 struct chmc_bank_info { struct chmc *p; int bank_id; u64 raw_reg; int valid; int uk; int um; int lk; int lm; int interleave; unsigned long base; unsigned long size; }; struct chmc { struct list_head list; int portid; struct chmc_obp_mem_layout layout_prop; int layout_size; void __iomem *regs; u64 timing_control1; u64 timing_control2; u64 timing_control3; u64 timing_control4; u64 memaddr_control; struct chmc_bank_info logical_banks[CHMCTRL_NBANKS]; }; #define JBUSMC_REGS_SIZE 8 #define JB_MC_REG1_DIMM2_BANK3 0x8000000000000000UL #define JB_MC_REG1_DIMM1_BANK1 0x4000000000000000UL #define JB_MC_REG1_DIMM2_BANK2 0x2000000000000000UL #define JB_MC_REG1_DIMM1_BANK0 0x1000000000000000UL #define JB_MC_REG1_XOR 0x0000010000000000UL #define JB_MC_REG1_ADDR_GEN_2 0x000000e000000000UL #define JB_MC_REG1_ADDR_GEN_2_SHIFT 37 #define JB_MC_REG1_ADDR_GEN_1 0x0000001c00000000UL #define JB_MC_REG1_ADDR_GEN_1_SHIFT 34 #define JB_MC_REG1_INTERLEAVE 0x0000000001800000UL #define JB_MC_REG1_INTERLEAVE_SHIFT 23 #define JB_MC_REG1_DIMM2_PTYPE 0x0000000000200000UL #define JB_MC_REG1_DIMM2_PTYPE_SHIFT 21 #define JB_MC_REG1_DIMM1_PTYPE 0x0000000000100000UL #define JB_MC_REG1_DIMM1_PTYPE_SHIFT 20 #define PART_TYPE_X8 0 #define PART_TYPE_X4 1 #define INTERLEAVE_NONE 0 #define INTERLEAVE_SAME 1 #define INTERLEAVE_INTERNAL 2 #define INTERLEAVE_BOTH 3 #define ADDR_GEN_128MB 0 #define ADDR_GEN_256MB 1 #define ADDR_GEN_512MB 2 #define ADDR_GEN_1GB 3 #define JB_NUM_DIMM_GROUPS 2 #define JB_NUM_DIMMS_PER_GROUP 2 #define JB_NUM_DIMMS (JB_NUM_DIMM_GROUPS * JB_NUM_DIMMS_PER_GROUP) struct jbusmc_obp_map { unsigned char dimm_map[18]; unsigned char pin_map[144]; }; struct jbusmc_obp_mem_layout { /* One max 8-byte string label per DIMM. Usually * this matches the label on the motherboard where * that DIMM resides. */ char dimm_labels[JB_NUM_DIMMS][DIMM_LABEL_SZ]; /* If symmetric use map[0], else it is * asymmetric and map[1] should be used. */ char symmetric; struct jbusmc_obp_map map; char _pad; }; struct jbusmc_dimm_group { struct jbusmc *controller; int index; u64 base_addr; u64 size; }; struct jbusmc { void __iomem *regs; u64 mc_reg_1; u32 portid; struct jbusmc_obp_mem_layout layout; int layout_len; int num_dimm_groups; struct jbusmc_dimm_group dimm_groups[JB_NUM_DIMM_GROUPS]; struct list_head list; }; static DEFINE_SPINLOCK(mctrl_list_lock); static LIST_HEAD(mctrl_list); static void mc_list_add(struct list_head *list) { spin_lock(&mctrl_list_lock); list_add(list, &mctrl_list); spin_unlock(&mctrl_list_lock); } static void mc_list_del(struct list_head *list) { spin_lock(&mctrl_list_lock); list_del_init(list); spin_unlock(&mctrl_list_lock); } #define SYNDROME_MIN -1 #define SYNDROME_MAX 144 /* Covert syndrome code into the way the bits are positioned * on the bus. */ static int syndrome_to_qword_code(int syndrome_code) { if (syndrome_code < 128) syndrome_code += 16; else if (syndrome_code < 128 + 9) syndrome_code -= (128 - 7); else if (syndrome_code < (128 + 9 + 3)) syndrome_code -= (128 + 9 - 4); else syndrome_code -= (128 + 9 + 3); return syndrome_code; } /* All this magic has to do with how a cache line comes over the wire * on Safari and JBUS. A 64-bit line comes over in 1 or more quadword * cycles, each of which transmit ECC/MTAG info as well as the actual * data. */ #define L2_LINE_SIZE 64 #define L2_LINE_ADDR_MSK (L2_LINE_SIZE - 1) #define QW_PER_LINE 4 #define QW_BYTES (L2_LINE_SIZE / QW_PER_LINE) #define QW_BITS 144 #define SAFARI_LAST_BIT (576 - 1) #define JBUS_LAST_BIT (144 - 1) static void get_pin_and_dimm_str(int syndrome_code, unsigned long paddr, int *pin_p, char **dimm_str_p, void *_prop, int base_dimm_offset) { int qword_code = syndrome_to_qword_code(syndrome_code); int cache_line_offset; int offset_inverse; int dimm_map_index; int map_val; if (mc_type == MC_TYPE_JBUS) { struct jbusmc_obp_mem_layout *p = _prop; /* JBUS */ cache_line_offset = qword_code; offset_inverse = (JBUS_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse / 8; map_val = p->map.dimm_map[dimm_map_index]; map_val = ((map_val >> ((7 - (offset_inverse & 7)))) & 1); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = p->map.pin_map[cache_line_offset]; } else { struct chmc_obp_mem_layout *p = _prop; struct chmc_obp_map *mp; int qword; /* Safari */ if (p->symmetric) mp = &p->map[0]; else mp = &p->map[1]; qword = (paddr & L2_LINE_ADDR_MSK) / QW_BYTES; cache_line_offset = ((3 - qword) * QW_BITS) + qword_code; offset_inverse = (SAFARI_LAST_BIT - cache_line_offset); dimm_map_index = offset_inverse >> 2; map_val = mp->dimm_map[dimm_map_index]; map_val = ((map_val >> ((3 - (offset_inverse & 3)) << 1)) & 0x3); *dimm_str_p = p->dimm_labels[base_dimm_offset + map_val]; *pin_p = mp->pin_map[cache_line_offset]; } } static struct jbusmc_dimm_group *jbusmc_find_dimm_group(unsigned long phys_addr) { struct jbusmc *p; list_for_each_entry(p, &mctrl_list, list) { int i; for (i = 0; i < p->num_dimm_groups; i++) { struct jbusmc_dimm_group *dp = &p->dimm_groups[i]; if (phys_addr < dp->base_addr || (dp->base_addr + dp->size) <= phys_addr) continue; return dp; } } return NULL; } static int jbusmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct jbusmc_obp_mem_layout *prop; struct jbusmc_dimm_group *dp; struct jbusmc *p; int first_dimm; dp = jbusmc_find_dimm_group(phys_addr); if (dp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } p = dp->controller; prop = &p->layout; first_dimm = dp->index * JB_NUM_DIMMS_PER_GROUP; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this dimm group. */ for (dimm = 0; dimm < JB_NUM_DIMMS_PER_GROUP; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } static u64 jbusmc_dimm_group_size(u64 base, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { u64 max = base + (8UL * 1024 * 1024 * 1024); u64 max_seen = base; int i; for (i = 0; i < num_mem_regs; i++) { const struct linux_prom64_registers *ent; u64 this_base; u64 this_end; ent = &mem_regs[i]; this_base = ent->phys_addr; this_end = this_base + ent->reg_size; if (base < this_base || base >= this_end) continue; if (this_end > max) this_end = max; if (this_end > max_seen) max_seen = this_end; } return max_seen - base; } static void jbusmc_construct_one_dimm_group(struct jbusmc *p, unsigned long index, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { struct jbusmc_dimm_group *dp = &p->dimm_groups[index]; dp->controller = p; dp->index = index; dp->base_addr = (p->portid * (64UL * 1024 * 1024 * 1024)); dp->base_addr += (index * (8UL * 1024 * 1024 * 1024)); dp->size = jbusmc_dimm_group_size(dp->base_addr, mem_regs, num_mem_regs); } static void jbusmc_construct_dimm_groups(struct jbusmc *p, const struct linux_prom64_registers *mem_regs, int num_mem_regs) { if (p->mc_reg_1 & JB_MC_REG1_DIMM1_BANK0) { jbusmc_construct_one_dimm_group(p, 0, mem_regs, num_mem_regs); p->num_dimm_groups++; } if (p->mc_reg_1 & JB_MC_REG1_DIMM2_BANK2) { jbusmc_construct_one_dimm_group(p, 1, mem_regs, num_mem_regs); p->num_dimm_groups++; } } static int jbusmc_probe(struct platform_device *op) { const struct linux_prom64_registers *mem_regs; struct device_node *mem_node; int err, len, num_mem_regs; struct jbusmc *p; const u32 *prop; const void *ml; err = -ENODEV; mem_node = of_find_node_by_path("/memory"); if (!mem_node) { printk(KERN_ERR PFX "Cannot find /memory node.\n"); goto out; } mem_regs = of_get_property(mem_node, "reg", &len); if (!mem_regs) { printk(KERN_ERR PFX "Cannot get reg property of /memory node.\n"); goto out; } num_mem_regs = len / sizeof(*mem_regs); err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Cannot allocate struct jbusmc.\n"); goto out; } INIT_LIST_HEAD(&p->list); err = -ENODEV; prop = of_get_property(op->dev.of_node, "portid", &len); if (!prop || len != 4) { printk(KERN_ERR PFX "Cannot find portid.\n"); goto out_free; } p->portid = *prop; prop = of_get_property(op->dev.of_node, "memory-control-register-1", &len); if (!prop || len != 8) { printk(KERN_ERR PFX "Cannot get memory control register 1.\n"); goto out_free; } p->mc_reg_1 = ((u64)prop[0] << 32) | (u64) prop[1]; err = -ENOMEM; p->regs = of_ioremap(&op->resource[0], 0, JBUSMC_REGS_SIZE, "jbusmc"); if (!p->regs) { printk(KERN_ERR PFX "Cannot map jbusmc regs.\n"); goto out_free; } err = -ENODEV; ml = of_get_property(op->dev.of_node, "memory-layout", &p->layout_len); if (!ml) { printk(KERN_ERR PFX "Cannot get memory layout property.\n"); goto out_iounmap; } if (p->layout_len > sizeof(p->layout)) { printk(KERN_ERR PFX "Unexpected memory-layout size %d\n", p->layout_len); goto out_iounmap; } memcpy(&p->layout, ml, p->layout_len); jbusmc_construct_dimm_groups(p, mem_regs, num_mem_regs); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-IIIi memory controller at %s\n", op->dev.of_node->full_name); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_iounmap: of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); out_free: kfree(p); goto out; } /* Does BANK decode PHYS_ADDR? */ static int chmc_bank_match(struct chmc_bank_info *bp, unsigned long phys_addr) { unsigned long upper_bits = (phys_addr & PA_UPPER_BITS) >> PA_UPPER_BITS_SHIFT; unsigned long lower_bits = (phys_addr & PA_LOWER_BITS) >> PA_LOWER_BITS_SHIFT; /* Bank must be enabled to match. */ if (bp->valid == 0) return 0; /* Would BANK match upper bits? */ upper_bits ^= bp->um; /* What bits are different? */ upper_bits = ~upper_bits; /* Invert. */ upper_bits |= bp->uk; /* What bits don't matter for matching? */ upper_bits = ~upper_bits; /* Invert. */ if (upper_bits) return 0; /* Would BANK match lower bits? */ lower_bits ^= bp->lm; /* What bits are different? */ lower_bits = ~lower_bits; /* Invert. */ lower_bits |= bp->lk; /* What bits don't matter for matching? */ lower_bits = ~lower_bits; /* Invert. */ if (lower_bits) return 0; /* I always knew you'd be the one. */ return 1; } /* Given PHYS_ADDR, search memory controller banks for a match. */ static struct chmc_bank_info *chmc_find_bank(unsigned long phys_addr) { struct chmc *p; list_for_each_entry(p, &mctrl_list, list) { int bank_no; for (bank_no = 0; bank_no < CHMCTRL_NBANKS; bank_no++) { struct chmc_bank_info *bp; bp = &p->logical_banks[bank_no]; if (chmc_bank_match(bp, phys_addr)) return bp; } } return NULL; } /* This is the main purpose of this driver. */ static int chmc_print_dimm(int syndrome_code, unsigned long phys_addr, char *buf, int buflen) { struct chmc_bank_info *bp; struct chmc_obp_mem_layout *prop; int bank_in_controller, first_dimm; bp = chmc_find_bank(phys_addr); if (bp == NULL || syndrome_code < SYNDROME_MIN || syndrome_code > SYNDROME_MAX) { buf[0] = '?'; buf[1] = '?'; buf[2] = '?'; buf[3] = '\0'; return 0; } prop = &bp->p->layout_prop; bank_in_controller = bp->bank_id & (CHMCTRL_NBANKS - 1); first_dimm = (bank_in_controller & (CHMCTRL_NDGRPS - 1)); first_dimm *= CHMCTRL_NDIMMS; if (syndrome_code != SYNDROME_MIN) { char *dimm_str; int pin; get_pin_and_dimm_str(syndrome_code, phys_addr, &pin, &dimm_str, prop, first_dimm); sprintf(buf, "%s, pin %3d", dimm_str, pin); } else { int dimm; /* Multi-bit error, we just dump out all the * dimm labels associated with this bank. */ for (dimm = 0; dimm < CHMCTRL_NDIMMS; dimm++) { sprintf(buf, "%s ", prop->dimm_labels[first_dimm + dimm]); buf += strlen(buf); } } return 0; } /* Accessing the registers is slightly complicated. If you want * to get at the memory controller which is on the same processor * the code is executing, you must use special ASI load/store else * you go through the global mapping. */ static u64 chmc_read_mcreg(struct chmc *p, unsigned long offset) { unsigned long ret, this_cpu; preempt_disable(); this_cpu = real_hard_smp_processor_id(); if (p->portid == this_cpu) { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa [%1] %2, %0" : "=r" (ret) : "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } preempt_enable(); return ret; } #if 0 /* currently unused */ static void chmc_write_mcreg(struct chmc *p, unsigned long offset, u64 val) { if (p->portid == smp_processor_id()) { __asm__ __volatile__("stxa %0, [%1] %2" : : "r" (val), "r" (offset), "i" (ASI_MCU_CTRL_REG)); } else { __asm__ __volatile__("ldxa %0, [%1] %2" : : "r" (val), "r" (p->regs + offset), "i" (ASI_PHYS_BYPASS_EC_E)); } } #endif static void chmc_interpret_one_decode_reg(struct chmc *p, int which_bank, u64 val) { struct chmc_bank_info *bp = &p->logical_banks[which_bank]; bp->p = p; bp->bank_id = (CHMCTRL_NBANKS * p->portid) + which_bank; bp->raw_reg = val; bp->valid = (val & MEM_DECODE_VALID) >> MEM_DECODE_VALID_SHIFT; bp->uk = (val & MEM_DECODE_UK) >> MEM_DECODE_UK_SHIFT; bp->um = (val & MEM_DECODE_UM) >> MEM_DECODE_UM_SHIFT; bp->lk = (val & MEM_DECODE_LK) >> MEM_DECODE_LK_SHIFT; bp->lm = (val & MEM_DECODE_LM) >> MEM_DECODE_LM_SHIFT; bp->base = (bp->um); bp->base &= ~(bp->uk); bp->base <<= PA_UPPER_BITS_SHIFT; switch(bp->lk) { case 0xf: default: bp->interleave = 1; break; case 0xe: bp->interleave = 2; break; case 0xc: bp->interleave = 4; break; case 0x8: bp->interleave = 8; break; case 0x0: bp->interleave = 16; break; } /* UK[10] is reserved, and UK[11] is not set for the SDRAM * bank size definition. */ bp->size = (((unsigned long)bp->uk & ((1UL << 10UL) - 1UL)) + 1UL) << PA_UPPER_BITS_SHIFT; bp->size /= bp->interleave; } static void chmc_fetch_decode_regs(struct chmc *p) { if (p->layout_size == 0) return; chmc_interpret_one_decode_reg(p, 0, chmc_read_mcreg(p, CHMCTRL_DECODE1)); chmc_interpret_one_decode_reg(p, 1, chmc_read_mcreg(p, CHMCTRL_DECODE2)); chmc_interpret_one_decode_reg(p, 2, chmc_read_mcreg(p, CHMCTRL_DECODE3)); chmc_interpret_one_decode_reg(p, 3, chmc_read_mcreg(p, CHMCTRL_DECODE4)); } static int chmc_probe(struct platform_device *op) { struct device_node *dp = op->dev.of_node; unsigned long ver; const void *pval; int len, portid; struct chmc *p; int err; err = -ENODEV; __asm__ ("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) goto out; portid = of_getintprop_default(dp, "portid", -1); if (portid == -1) goto out; pval = of_get_property(dp, "memory-layout", &len); if (pval && len > sizeof(p->layout_prop)) { printk(KERN_ERR PFX "Unexpected memory-layout property " "size %d.\n", len); goto out; } err = -ENOMEM; p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) { printk(KERN_ERR PFX "Could not allocate struct chmc.\n"); goto out; } p->portid = portid; p->layout_size = len; if (!pval) p->layout_size = 0; else memcpy(&p->layout_prop, pval, len); p->regs = of_ioremap(&op->resource[0], 0, 0x48, "chmc"); if (!p->regs) { printk(KERN_ERR PFX "Could not map registers.\n"); goto out_free; } if (p->layout_size != 0UL) { p->timing_control1 = chmc_read_mcreg(p, CHMCTRL_TCTRL1); p->timing_control2 = chmc_read_mcreg(p, CHMCTRL_TCTRL2); p->timing_control3 = chmc_read_mcreg(p, CHMCTRL_TCTRL3); p->timing_control4 = chmc_read_mcreg(p, CHMCTRL_TCTRL4); p->memaddr_control = chmc_read_mcreg(p, CHMCTRL_MACTRL); } chmc_fetch_decode_regs(p); mc_list_add(&p->list); printk(KERN_INFO PFX "UltraSPARC-III memory controller at %s [%s]\n", dp->full_name, (p->layout_size ? "ACTIVE" : "INACTIVE")); dev_set_drvdata(&op->dev, p); err = 0; out: return err; out_free: kfree(p); goto out; } static int us3mc_probe(struct platform_device *op) { if (mc_type == MC_TYPE_SAFARI) return chmc_probe(op); else if (mc_type == MC_TYPE_JBUS) return jbusmc_probe(op); return -ENODEV; } static void chmc_destroy(struct platform_device *op, struct chmc *p) { list_del(&p->list); of_iounmap(&op->resource[0], p->regs, 0x48); kfree(p); } static void jbusmc_destroy(struct platform_device *op, struct jbusmc *p) { mc_list_del(&p->list); of_iounmap(&op->resource[0], p->regs, JBUSMC_REGS_SIZE); kfree(p); } static int us3mc_remove(struct platform_device *op) { void *p = dev_get_drvdata(&op->dev); if (p) { if (mc_type == MC_TYPE_SAFARI) chmc_destroy(op, p); else if (mc_type == MC_TYPE_JBUS) jbusmc_destroy(op, p); } return 0; } static const struct of_device_id us3mc_match[] = { { .name = "memory-controller", }, {}, }; MODULE_DEVICE_TABLE(of, us3mc_match); static struct platform_driver us3mc_driver = { .driver = { .name = "us3mc", .owner = THIS_MODULE, .of_match_table = us3mc_match, }, .probe = us3mc_probe, .remove = us3mc_remove, }; static inline bool us3mc_platform(void) { if (tlb_type == cheetah || tlb_type == cheetah_plus) return true; return false; } static int __init us3mc_init(void) { unsigned long ver; int ret; if (!us3mc_platform()) return -ENODEV; __asm__ __volatile__("rdpr %%ver, %0" : "=r" (ver)); if ((ver >> 32UL) == __JALAPENO_ID || (ver >> 32UL) == __SERRANO_ID) { mc_type = MC_TYPE_JBUS; us3mc_dimm_printer = jbusmc_print_dimm; } else { mc_type = MC_TYPE_SAFARI; us3mc_dimm_printer = chmc_print_dimm; } ret = register_dimm_printer(us3mc_dimm_printer); if (!ret) { ret = platform_driver_register(&us3mc_driver); if (ret) unregister_dimm_printer(us3mc_dimm_printer); } return ret; } static void __exit us3mc_cleanup(void) { if (us3mc_platform()) { unregister_dimm_printer(us3mc_dimm_printer); platform_driver_unregister(&us3mc_driver); } } module_init(us3mc_init); module_exit(us3mc_cleanup);
gpl-2.0
alteredlikeness/android_kernel_msm
arch/arm/mach-msm/rpc_hsusb.c
3356
15325
/* linux/arch/arm/mach-msm/rpc_hsusb.c * * Copyright (c) 2008-2012, The Linux Foundation. All rights reserved. * * All source code in this file is licensed under the following license except * where indicated. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * See the GNU General Public License for more details. * You should have received a copy of the GNU General Public License * along with this program; if not, you can find it at http://www.fsf.org */ #include <linux/err.h> #include <linux/slab.h> #include <linux/module.h> #include <mach/rpc_hsusb.h> #include <asm/mach-types.h> static struct msm_rpc_endpoint *usb_ep; static struct msm_rpc_endpoint *chg_ep; #define MSM_RPC_CHG_PROG 0x3000001a struct msm_chg_rpc_ids { unsigned long vers_comp; unsigned chg_usb_charger_connected_proc; unsigned chg_usb_charger_disconnected_proc; unsigned chg_usb_i_is_available_proc; unsigned chg_usb_i_is_not_available_proc; }; struct msm_hsusb_rpc_ids { unsigned long prog; unsigned long vers_comp; unsigned long init_phy; unsigned long vbus_pwr_up; unsigned long vbus_pwr_down; unsigned long update_product_id; unsigned long update_serial_num; unsigned long update_is_serial_num_null; unsigned long reset_rework_installed; unsigned long enable_pmic_ulpi_data0; unsigned long disable_pmic_ulpi_data0; }; static struct msm_hsusb_rpc_ids usb_rpc_ids; static struct msm_chg_rpc_ids chg_rpc_ids; static int msm_hsusb_init_rpc_ids(unsigned long vers) { if (vers == 0x00010001) { usb_rpc_ids.prog = 0x30000064; usb_rpc_ids.vers_comp = 0x00010001; usb_rpc_ids.init_phy = 2; usb_rpc_ids.vbus_pwr_up = 6; usb_rpc_ids.vbus_pwr_down = 7; usb_rpc_ids.update_product_id = 8; usb_rpc_ids.update_serial_num = 9; usb_rpc_ids.update_is_serial_num_null = 10; usb_rpc_ids.reset_rework_installed = 17; usb_rpc_ids.enable_pmic_ulpi_data0 = 18; usb_rpc_ids.disable_pmic_ulpi_data0 = 19; return 0; } else if (vers == 0x00010002) { usb_rpc_ids.prog = 0x30000064; usb_rpc_ids.vers_comp = 0x00010002; usb_rpc_ids.init_phy = 2; usb_rpc_ids.vbus_pwr_up = 6; usb_rpc_ids.vbus_pwr_down = 7; usb_rpc_ids.update_product_id = 8; usb_rpc_ids.update_serial_num = 9; usb_rpc_ids.update_is_serial_num_null = 10; usb_rpc_ids.reset_rework_installed = 17; usb_rpc_ids.enable_pmic_ulpi_data0 = 18; usb_rpc_ids.disable_pmic_ulpi_data0 = 19; return 0; } else { pr_err("%s: no matches found for version\n", __func__); return -ENODATA; } } static int msm_chg_init_rpc(unsigned long vers) { if (((vers & RPC_VERSION_MAJOR_MASK) == 0x00010000) || ((vers & RPC_VERSION_MAJOR_MASK) == 0x00020000) || ((vers & RPC_VERSION_MAJOR_MASK) == 0x00030000) || ((vers & RPC_VERSION_MAJOR_MASK) == 0x00040000)) { chg_ep = msm_rpc_connect_compatible(MSM_RPC_CHG_PROG, vers, MSM_RPC_UNINTERRUPTIBLE); if (IS_ERR(chg_ep)) return -ENODATA; chg_rpc_ids.vers_comp = vers; chg_rpc_ids.chg_usb_charger_connected_proc = 7; chg_rpc_ids.chg_usb_charger_disconnected_proc = 8; chg_rpc_ids.chg_usb_i_is_available_proc = 9; chg_rpc_ids.chg_usb_i_is_not_available_proc = 10; return 0; } else return -ENODATA; } /* rpc connect for hsusb */ int msm_hsusb_rpc_connect(void) { if (usb_ep && !IS_ERR(usb_ep)) { pr_debug("%s: usb_ep already connected\n", __func__); return 0; } /* Initialize rpc ids */ if (msm_hsusb_init_rpc_ids(0x00010001)) { pr_err("%s: rpc ids initialization failed\n" , __func__); return -ENODATA; } usb_ep = msm_rpc_connect_compatible(usb_rpc_ids.prog, usb_rpc_ids.vers_comp, MSM_RPC_UNINTERRUPTIBLE); if (IS_ERR(usb_ep)) { pr_err("%s: connect compatible failed vers = %lx\n", __func__, usb_rpc_ids.vers_comp); /* Initialize rpc ids */ if (msm_hsusb_init_rpc_ids(0x00010002)) { pr_err("%s: rpc ids initialization failed\n", __func__); return -ENODATA; } usb_ep = msm_rpc_connect_compatible(usb_rpc_ids.prog, usb_rpc_ids.vers_comp, MSM_RPC_UNINTERRUPTIBLE); } if (IS_ERR(usb_ep)) { pr_err("%s: connect compatible failed vers = %lx\n", __func__, usb_rpc_ids.vers_comp); return -EAGAIN; } else pr_debug("%s: rpc connect success vers = %lx\n", __func__, usb_rpc_ids.vers_comp); return 0; } EXPORT_SYMBOL(msm_hsusb_rpc_connect); /* rpc connect for charging */ int msm_chg_rpc_connect(void) { uint32_t chg_vers; if (machine_is_msm7x27_surf() || machine_is_qsd8x50_surf()) return -ENOTSUPP; if (chg_ep && !IS_ERR(chg_ep)) { pr_debug("%s: chg_ep already connected\n", __func__); return 0; } chg_vers = 0x00040001; if (!msm_chg_init_rpc(chg_vers)) goto chg_found; chg_vers = 0x00030001; if (!msm_chg_init_rpc(chg_vers)) goto chg_found; chg_vers = 0x00020001; if (!msm_chg_init_rpc(chg_vers)) goto chg_found; chg_vers = 0x00010001; if (!msm_chg_init_rpc(chg_vers)) goto chg_found; pr_err("%s: connect compatible failed \n", __func__); return -EAGAIN; chg_found: pr_debug("%s: connected to rpc vers = %x\n", __func__, chg_vers); return 0; } EXPORT_SYMBOL(msm_chg_rpc_connect); /* rpc call for phy_reset */ int msm_hsusb_phy_reset(void) { int rc = 0; struct hsusb_phy_start_req { struct rpc_request_hdr hdr; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: phy_reset rpc failed before call," "rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } rc = msm_rpc_call(usb_ep, usb_rpc_ids.init_phy, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: phy_reset rpc failed! rc = %d\n", __func__, rc); } else pr_debug("msm_hsusb_phy_reset\n"); return rc; } EXPORT_SYMBOL(msm_hsusb_phy_reset); /* rpc call for vbus powerup */ int msm_hsusb_vbus_powerup(void) { int rc = 0; struct hsusb_phy_start_req { struct rpc_request_hdr hdr; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: vbus_powerup rpc failed before call," "rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } rc = msm_rpc_call(usb_ep, usb_rpc_ids.vbus_pwr_up, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: vbus_powerup failed! rc = %d\n", __func__, rc); } else pr_debug("msm_hsusb_vbus_powerup\n"); return rc; } EXPORT_SYMBOL(msm_hsusb_vbus_powerup); /* rpc call for vbus shutdown */ int msm_hsusb_vbus_shutdown(void) { int rc = 0; struct hsusb_phy_start_req { struct rpc_request_hdr hdr; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: vbus_shutdown rpc failed before call," "rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } rc = msm_rpc_call(usb_ep, usb_rpc_ids.vbus_pwr_down, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: vbus_shutdown failed! rc = %d\n", __func__, rc); } else pr_debug("msm_hsusb_vbus_shutdown\n"); return rc; } EXPORT_SYMBOL(msm_hsusb_vbus_shutdown); int msm_hsusb_send_productID(uint32_t product_id) { int rc = 0; struct hsusb_phy_start_req { struct rpc_request_hdr hdr; uint32_t product_id; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: rpc connect failed: rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } req.product_id = cpu_to_be32(product_id); rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_product_id, &req, sizeof(req), 5 * HZ); if (rc < 0) pr_err("%s: rpc call failed! error: %d\n", __func__, rc); else pr_debug("%s: rpc call success\n" , __func__); return rc; } EXPORT_SYMBOL(msm_hsusb_send_productID); int msm_hsusb_send_serial_number(const char *serial_number) { int rc = 0, serial_len, rlen; struct hsusb_send_sn_req { struct rpc_request_hdr hdr; uint32_t length; char sn[0]; } *req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: rpc connect failed: rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } /* * USB driver passes null terminated string to us. Modem processor * expects serial number to be 32 bit aligned. */ serial_len = strlen(serial_number)+1; rlen = sizeof(struct rpc_request_hdr) + sizeof(uint32_t) + ((serial_len + 3) & ~3); req = kmalloc(rlen, GFP_KERNEL); if (!req) return -ENOMEM; req->length = cpu_to_be32(serial_len); strncpy(req->sn , serial_number, serial_len); rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_serial_num, req, rlen, 5 * HZ); if (rc < 0) pr_err("%s: rpc call failed! error: %d\n", __func__, rc); else pr_debug("%s: rpc call success\n", __func__); kfree(req); return rc; } EXPORT_SYMBOL(msm_hsusb_send_serial_number); int msm_hsusb_is_serial_num_null(uint32_t val) { int rc = 0; struct hsusb_phy_start_req { struct rpc_request_hdr hdr; uint32_t value; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: rpc connect failed: rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } if (!usb_rpc_ids.update_is_serial_num_null) { pr_err("%s: proc id not supported \n", __func__); return -ENODATA; } req.value = cpu_to_be32(val); rc = msm_rpc_call(usb_ep, usb_rpc_ids.update_is_serial_num_null, &req, sizeof(req), 5 * HZ); if (rc < 0) pr_err("%s: rpc call failed! error: %d\n" , __func__, rc); else pr_debug("%s: rpc call success\n", __func__); return rc; } EXPORT_SYMBOL(msm_hsusb_is_serial_num_null); int msm_chg_usb_charger_connected(uint32_t device) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; uint32_t otg_dev; } req; if (!chg_ep || IS_ERR(chg_ep)) return -EAGAIN; req.otg_dev = cpu_to_be32(device); rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_charger_connected_proc, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: charger_connected failed! rc = %d\n", __func__, rc); } else pr_debug("msm_chg_usb_charger_connected\n"); return rc; } EXPORT_SYMBOL(msm_chg_usb_charger_connected); int msm_chg_usb_i_is_available(uint32_t sample) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; uint32_t i_ma; } req; if (!chg_ep || IS_ERR(chg_ep)) return -EAGAIN; req.i_ma = cpu_to_be32(sample); rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_i_is_available_proc, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: charger_i_available failed! rc = %d\n", __func__, rc); } else pr_debug("msm_chg_usb_i_is_available(%u)\n", sample); return rc; } EXPORT_SYMBOL(msm_chg_usb_i_is_available); int msm_chg_usb_i_is_not_available(void) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; } req; if (!chg_ep || IS_ERR(chg_ep)) return -EAGAIN; rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_i_is_not_available_proc, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: charger_i_not_available failed! rc =" "%d \n", __func__, rc); } else pr_debug("msm_chg_usb_i_is_not_available\n"); return rc; } EXPORT_SYMBOL(msm_chg_usb_i_is_not_available); int msm_chg_usb_charger_disconnected(void) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; } req; if (!chg_ep || IS_ERR(chg_ep)) return -EAGAIN; rc = msm_rpc_call(chg_ep, chg_rpc_ids.chg_usb_charger_disconnected_proc, &req, sizeof(req), 5 * HZ); if (rc < 0) { pr_err("%s: charger_disconnected failed! rc = %d\n", __func__, rc); } else pr_debug("msm_chg_usb_charger_disconnected\n"); return rc; } EXPORT_SYMBOL(msm_chg_usb_charger_disconnected); /* rpc call to close connection */ int msm_hsusb_rpc_close(void) { int rc = 0; if (IS_ERR(usb_ep)) { pr_err("%s: rpc_close failed before call, rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } rc = msm_rpc_close(usb_ep); usb_ep = NULL; if (rc < 0) { pr_err("%s: close rpc failed! rc = %d\n", __func__, rc); return -EAGAIN; } else pr_debug("rpc close success\n"); return rc; } EXPORT_SYMBOL(msm_hsusb_rpc_close); /* rpc call to close charging connection */ int msm_chg_rpc_close(void) { int rc = 0; if (IS_ERR(chg_ep)) { pr_err("%s: rpc_close failed before call, rc = %ld\n", __func__, PTR_ERR(chg_ep)); return -EAGAIN; } rc = msm_rpc_close(chg_ep); chg_ep = NULL; if (rc < 0) { pr_err("%s: close rpc failed! rc = %d\n", __func__, rc); return -EAGAIN; } else pr_debug("rpc close success\n"); return rc; } EXPORT_SYMBOL(msm_chg_rpc_close); int msm_hsusb_reset_rework_installed(void) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; } req; struct hsusb_rpc_rep { struct rpc_reply_hdr hdr; uint32_t rework; } rep; memset(&rep, 0, sizeof(rep)); if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: hsusb rpc connection not initialized, rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } rc = msm_rpc_call_reply(usb_ep, usb_rpc_ids.reset_rework_installed, &req, sizeof(req), &rep, sizeof(rep), 5 * HZ); if (rc < 0) { pr_err("%s: rpc call failed! error: (%d)" "proc id: (%lx)\n", __func__, rc, usb_rpc_ids.reset_rework_installed); return rc; } pr_info("%s: rework: (%d)\n", __func__, rep.rework); return be32_to_cpu(rep.rework); } EXPORT_SYMBOL(msm_hsusb_reset_rework_installed); static int msm_hsusb_pmic_ulpidata0_config(int enable) { int rc = 0; struct hsusb_start_req { struct rpc_request_hdr hdr; } req; if (!usb_ep || IS_ERR(usb_ep)) { pr_err("%s: hsusb rpc connection not initialized, rc = %ld\n", __func__, PTR_ERR(usb_ep)); return -EAGAIN; } if (enable) rc = msm_rpc_call(usb_ep, usb_rpc_ids.enable_pmic_ulpi_data0, &req, sizeof(req), 5 * HZ); else rc = msm_rpc_call(usb_ep, usb_rpc_ids.disable_pmic_ulpi_data0, &req, sizeof(req), 5 * HZ); if (rc < 0) pr_err("%s: rpc call failed! error: %d\n", __func__, rc); return rc; } int msm_hsusb_enable_pmic_ulpidata0(void) { return msm_hsusb_pmic_ulpidata0_config(1); } EXPORT_SYMBOL(msm_hsusb_enable_pmic_ulpidata0); int msm_hsusb_disable_pmic_ulpidata0(void) { return msm_hsusb_pmic_ulpidata0_config(0); } EXPORT_SYMBOL(msm_hsusb_disable_pmic_ulpidata0); /* wrapper for sending pid and serial# info to bootloader */ int usb_diag_update_pid_and_serial_num(uint32_t pid, const char *snum) { int ret; ret = msm_hsusb_send_productID(pid); if (ret) return ret; if (!snum) { ret = msm_hsusb_is_serial_num_null(1); if (ret) return ret; } ret = msm_hsusb_is_serial_num_null(0); if (ret) return ret; ret = msm_hsusb_send_serial_number(snum); if (ret) return ret; return 0; } #ifdef CONFIG_USB_MSM_72K /* charger api wrappers */ int hsusb_chg_init(int connect) { if (connect) return msm_chg_rpc_connect(); else return msm_chg_rpc_close(); } EXPORT_SYMBOL(hsusb_chg_init); void hsusb_chg_vbus_draw(unsigned mA) { msm_chg_usb_i_is_available(mA); } EXPORT_SYMBOL(hsusb_chg_vbus_draw); void hsusb_chg_connected(enum chg_type chgtype) { char *chg_types[] = {"STD DOWNSTREAM PORT", "CARKIT", "DEDICATED CHARGER", "INVALID"}; if (chgtype == USB_CHG_TYPE__INVALID) { msm_chg_usb_i_is_not_available(); msm_chg_usb_charger_disconnected(); return; } pr_info("\nCharger Type: %s\n", chg_types[chgtype]); msm_chg_usb_charger_connected(chgtype); } EXPORT_SYMBOL(hsusb_chg_connected); #endif
gpl-2.0
drod2169/KangBang-AOSP-BFS
sound/pci/echoaudio/gina20.c
3612
3018
/* * ALSA driver for Echoaudio soundcards. * Copyright (C) 2003-2004 Giuliano Pochini <pochini@shiny.it> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #define ECHOGALS_FAMILY #define ECHOCARD_GINA20 #define ECHOCARD_NAME "Gina20" #define ECHOCARD_HAS_MONITOR #define ECHOCARD_HAS_INPUT_GAIN #define ECHOCARD_HAS_DIGITAL_IO #define ECHOCARD_HAS_EXTERNAL_CLOCK #define ECHOCARD_HAS_ADAT FALSE /* Pipe indexes */ #define PX_ANALOG_OUT 0 /* 8 */ #define PX_DIGITAL_OUT 8 /* 2 */ #define PX_ANALOG_IN 10 /* 2 */ #define PX_DIGITAL_IN 12 /* 2 */ #define PX_NUM 14 /* Bus indexes */ #define BX_ANALOG_OUT 0 /* 8 */ #define BX_DIGITAL_OUT 8 /* 2 */ #define BX_ANALOG_IN 10 /* 2 */ #define BX_DIGITAL_IN 12 /* 2 */ #define BX_NUM 14 #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/firmware.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/tlv.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/asoundef.h> #include <sound/initval.h> #include <asm/io.h> #include <asm/atomic.h> #include "echoaudio.h" MODULE_FIRMWARE("ea/gina20_dsp.fw"); #define FW_GINA20_DSP 0 static const struct firmware card_fw[] = { {0, "gina20_dsp.fw"} }; static DEFINE_PCI_DEVICE_TABLE(snd_echo_ids) = { {0x1057, 0x1801, 0xECC0, 0x0020, 0, 0, 0}, /* DSP 56301 Gina20 rev.0 */ {0,} }; static struct snd_pcm_hardware pcm_hardware_skel = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_PAUSE | SNDRV_PCM_INFO_SYNC_START, .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S32_BE, .rates = SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000, .rate_min = 44100, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = 262144, .period_bytes_min = 32, .period_bytes_max = 131072, .periods_min = 2, .periods_max = 220, /* One page (4k) contains 512 instructions. I don't know if the hw supports lists longer than this. In this case periods_max=220 is a safe limit to make sure the list never exceeds 512 instructions. */ }; #include "gina20_dsp.c" #include "echoaudio_dsp.c" #include "echoaudio.c"
gpl-2.0
xXminiWHOOPERxX/xXminiWHOOPERxX-Kernel-for-M4-MLG-
drivers/mtd/nand/pasemi_nand.c
4892
5494
/* * Copyright (C) 2006-2007 PA Semi, Inc * * Author: Egor Martovetsky <egor@pasemi.com> * Maintained by: Olof Johansson <olof@lixom.net> * * Driver for the PWRficient onchip NAND flash interface * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #undef DEBUG #include <linux/slab.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/pci.h> #include <asm/io.h> #define LBICTRL_LPCCTL_NR 0x00004000 #define CLE_PIN_CTL 15 #define ALE_PIN_CTL 14 static unsigned int lpcctl; static struct mtd_info *pasemi_nand_mtd; static const char driver_name[] = "pasemi-nand"; static void pasemi_read_buf(struct mtd_info *mtd, u_char *buf, int len) { struct nand_chip *chip = mtd->priv; while (len > 0x800) { memcpy_fromio(buf, chip->IO_ADDR_R, 0x800); buf += 0x800; len -= 0x800; } memcpy_fromio(buf, chip->IO_ADDR_R, len); } static void pasemi_write_buf(struct mtd_info *mtd, const u_char *buf, int len) { struct nand_chip *chip = mtd->priv; while (len > 0x800) { memcpy_toio(chip->IO_ADDR_R, buf, 0x800); buf += 0x800; len -= 0x800; } memcpy_toio(chip->IO_ADDR_R, buf, len); } static void pasemi_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) { struct nand_chip *chip = mtd->priv; if (cmd == NAND_CMD_NONE) return; if (ctrl & NAND_CLE) out_8(chip->IO_ADDR_W + (1 << CLE_PIN_CTL), cmd); else out_8(chip->IO_ADDR_W + (1 << ALE_PIN_CTL), cmd); /* Push out posted writes */ eieio(); inl(lpcctl); } int pasemi_device_ready(struct mtd_info *mtd) { return !!(inl(lpcctl) & LBICTRL_LPCCTL_NR); } static int __devinit pasemi_nand_probe(struct platform_device *ofdev) { struct pci_dev *pdev; struct device_node *np = ofdev->dev.of_node; struct resource res; struct nand_chip *chip; int err = 0; err = of_address_to_resource(np, 0, &res); if (err) return -EINVAL; /* We only support one device at the moment */ if (pasemi_nand_mtd) return -ENODEV; pr_debug("pasemi_nand at %pR\n", &res); /* Allocate memory for MTD device structure and private data */ pasemi_nand_mtd = kzalloc(sizeof(struct mtd_info) + sizeof(struct nand_chip), GFP_KERNEL); if (!pasemi_nand_mtd) { printk(KERN_WARNING "Unable to allocate PASEMI NAND MTD device structure\n"); err = -ENOMEM; goto out; } /* Get pointer to private data */ chip = (struct nand_chip *)&pasemi_nand_mtd[1]; /* Link the private data with the MTD structure */ pasemi_nand_mtd->priv = chip; pasemi_nand_mtd->owner = THIS_MODULE; chip->IO_ADDR_R = of_iomap(np, 0); chip->IO_ADDR_W = chip->IO_ADDR_R; if (!chip->IO_ADDR_R) { err = -EIO; goto out_mtd; } pdev = pci_get_device(PCI_VENDOR_ID_PASEMI, 0xa008, NULL); if (!pdev) { err = -ENODEV; goto out_ior; } lpcctl = pci_resource_start(pdev, 0); pci_dev_put(pdev); if (!request_region(lpcctl, 4, driver_name)) { err = -EBUSY; goto out_ior; } chip->cmd_ctrl = pasemi_hwcontrol; chip->dev_ready = pasemi_device_ready; chip->read_buf = pasemi_read_buf; chip->write_buf = pasemi_write_buf; chip->chip_delay = 0; chip->ecc.mode = NAND_ECC_SOFT; /* Enable the following for a flash based bad block table */ chip->options = NAND_NO_AUTOINCR; chip->bbt_options = NAND_BBT_USE_FLASH; /* Scan to find existence of the device */ if (nand_scan(pasemi_nand_mtd, 1)) { err = -ENXIO; goto out_lpc; } if (mtd_device_register(pasemi_nand_mtd, NULL, 0)) { printk(KERN_ERR "pasemi_nand: Unable to register MTD device\n"); err = -ENODEV; goto out_lpc; } printk(KERN_INFO "PA Semi NAND flash at %08llx, control at I/O %x\n", res.start, lpcctl); return 0; out_lpc: release_region(lpcctl, 4); out_ior: iounmap(chip->IO_ADDR_R); out_mtd: kfree(pasemi_nand_mtd); out: return err; } static int __devexit pasemi_nand_remove(struct platform_device *ofdev) { struct nand_chip *chip; if (!pasemi_nand_mtd) return 0; chip = pasemi_nand_mtd->priv; /* Release resources, unregister device */ nand_release(pasemi_nand_mtd); release_region(lpcctl, 4); iounmap(chip->IO_ADDR_R); /* Free the MTD device structure */ kfree(pasemi_nand_mtd); pasemi_nand_mtd = NULL; return 0; } static const struct of_device_id pasemi_nand_match[] = { { .compatible = "pasemi,localbus-nand", }, {}, }; MODULE_DEVICE_TABLE(of, pasemi_nand_match); static struct platform_driver pasemi_nand_driver = { .driver = { .name = (char*)driver_name, .owner = THIS_MODULE, .of_match_table = pasemi_nand_match, }, .probe = pasemi_nand_probe, .remove = pasemi_nand_remove, }; module_platform_driver(pasemi_nand_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Egor Martovetsky <egor@pasemi.com>"); MODULE_DESCRIPTION("NAND flash interface driver for PA Semi PWRficient");
gpl-2.0
profglavcho/mt6735-kernel-3.10.61
sound/soc/fsl/wm1133-ev1.c
7196
9122
/* * wm1133-ev1.c - Audio for WM1133-EV1 on i.MX31ADS * * Copyright (c) 2010 Wolfson Microelectronics plc * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * Based on an earlier driver for the same hardware by Liam Girdwood. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/module.h> #include <sound/core.h> #include <sound/jack.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include "imx-ssi.h" #include "../codecs/wm8350.h" #include "imx-audmux.h" /* There is a silicon mic on the board optionally connected via a solder pad * SP1. Define this to enable it. */ #undef USE_SIMIC struct _wm8350_audio { unsigned int channels; snd_pcm_format_t format; unsigned int rate; unsigned int sysclk; unsigned int bclkdiv; unsigned int clkdiv; unsigned int lr_rate; }; /* in order of power consumption per rate (lowest first) */ static const struct _wm8350_audio wm8350_audio[] = { /* 16bit mono modes */ {1, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000 >> 1, WM8350_BCLK_DIV_48, WM8350_DACDIV_3, 16,}, /* 16 bit stereo modes */ {2, SNDRV_PCM_FORMAT_S16_LE, 8000, 12288000, WM8350_BCLK_DIV_48, WM8350_DACDIV_6, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 16000, 12288000, WM8350_BCLK_DIV_24, WM8350_DACDIV_3, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 32000, 12288000, WM8350_BCLK_DIV_12, WM8350_DACDIV_1_5, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 48000, 12288000, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 96000, 24576000, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 11025, 11289600, WM8350_BCLK_DIV_32, WM8350_DACDIV_4, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 22050, 11289600, WM8350_BCLK_DIV_16, WM8350_DACDIV_2, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 44100, 11289600, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, {2, SNDRV_PCM_FORMAT_S16_LE, 88200, 22579200, WM8350_BCLK_DIV_8, WM8350_DACDIV_1, 32,}, /* 24bit stereo modes */ {2, SNDRV_PCM_FORMAT_S24_LE, 48000, 12288000, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 96000, 24576000, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 44100, 11289600, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, {2, SNDRV_PCM_FORMAT_S24_LE, 88200, 22579200, WM8350_BCLK_DIV_4, WM8350_DACDIV_1, 64,}, }; static int wm1133_ev1_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int i, found = 0; snd_pcm_format_t format = params_format(params); unsigned int rate = params_rate(params); unsigned int channels = params_channels(params); u32 dai_format; /* find the correct audio parameters */ for (i = 0; i < ARRAY_SIZE(wm8350_audio); i++) { if (rate == wm8350_audio[i].rate && format == wm8350_audio[i].format && channels == wm8350_audio[i].channels) { found = 1; break; } } if (!found) return -EINVAL; /* codec FLL input is 14.75 MHz from MCLK */ snd_soc_dai_set_pll(codec_dai, 0, 0, 14750000, wm8350_audio[i].sysclk); dai_format = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBM_CFM; /* set codec DAI configuration */ snd_soc_dai_set_fmt(codec_dai, dai_format); /* set cpu DAI configuration */ snd_soc_dai_set_fmt(cpu_dai, dai_format); /* TODO: The SSI driver should figure this out for us */ switch (channels) { case 2: snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffc, 0xffffffc, 2, 0); break; case 1: snd_soc_dai_set_tdm_slot(cpu_dai, 0xffffffe, 0xffffffe, 1, 0); break; default: return -EINVAL; } /* set MCLK as the codec system clock for DAC and ADC */ snd_soc_dai_set_sysclk(codec_dai, WM8350_MCLK_SEL_PLL_MCLK, wm8350_audio[i].sysclk, SND_SOC_CLOCK_IN); /* set codec BCLK division for sample rate */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_BCLK_CLKDIV, wm8350_audio[i].bclkdiv); /* DAI is synchronous and clocked with DAC LRCLK & ADC LRC */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_DACLR_CLKDIV, wm8350_audio[i].lr_rate); snd_soc_dai_set_clkdiv(codec_dai, WM8350_ADCLR_CLKDIV, wm8350_audio[i].lr_rate); /* now configure DAC and ADC clocks */ snd_soc_dai_set_clkdiv(codec_dai, WM8350_DAC_CLKDIV, wm8350_audio[i].clkdiv); snd_soc_dai_set_clkdiv(codec_dai, WM8350_ADC_CLKDIV, wm8350_audio[i].clkdiv); return 0; } static struct snd_soc_ops wm1133_ev1_ops = { .hw_params = wm1133_ev1_hw_params, }; static const struct snd_soc_dapm_widget wm1133_ev1_widgets[] = { #ifdef USE_SIMIC SND_SOC_DAPM_MIC("SiMIC", NULL), #endif SND_SOC_DAPM_MIC("Mic1 Jack", NULL), SND_SOC_DAPM_MIC("Mic2 Jack", NULL), SND_SOC_DAPM_LINE("Line In Jack", NULL), SND_SOC_DAPM_LINE("Line Out Jack", NULL), SND_SOC_DAPM_HP("Headphone Jack", NULL), }; /* imx32ads soc_card audio map */ static const struct snd_soc_dapm_route wm1133_ev1_map[] = { #ifdef USE_SIMIC /* SiMIC --> IN1LN (with automatic bias) via SP1 */ { "IN1LN", NULL, "Mic Bias" }, { "Mic Bias", NULL, "SiMIC" }, #endif /* Mic 1 Jack --> IN1LN and IN1LP (with automatic bias) */ { "IN1LN", NULL, "Mic Bias" }, { "IN1LP", NULL, "Mic1 Jack" }, { "Mic Bias", NULL, "Mic1 Jack" }, /* Mic 2 Jack --> IN1RN and IN1RP (with automatic bias) */ { "IN1RN", NULL, "Mic Bias" }, { "IN1RP", NULL, "Mic2 Jack" }, { "Mic Bias", NULL, "Mic2 Jack" }, /* Line in Jack --> AUX (L+R) */ { "IN3R", NULL, "Line In Jack" }, { "IN3L", NULL, "Line In Jack" }, /* Out1 --> Headphone Jack */ { "Headphone Jack", NULL, "OUT1R" }, { "Headphone Jack", NULL, "OUT1L" }, /* Out1 --> Line Out Jack */ { "Line Out Jack", NULL, "OUT2R" }, { "Line Out Jack", NULL, "OUT2L" }, }; static struct snd_soc_jack hp_jack; static struct snd_soc_jack_pin hp_jack_pins[] = { { .pin = "Headphone Jack", .mask = SND_JACK_HEADPHONE }, }; static struct snd_soc_jack mic_jack; static struct snd_soc_jack_pin mic_jack_pins[] = { { .pin = "Mic1 Jack", .mask = SND_JACK_MICROPHONE }, { .pin = "Mic2 Jack", .mask = SND_JACK_MICROPHONE }, }; static int wm1133_ev1_init(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_codec *codec = rtd->codec; struct snd_soc_dapm_context *dapm = &codec->dapm; snd_soc_dapm_new_controls(dapm, wm1133_ev1_widgets, ARRAY_SIZE(wm1133_ev1_widgets)); snd_soc_dapm_add_routes(dapm, wm1133_ev1_map, ARRAY_SIZE(wm1133_ev1_map)); /* Headphone jack detection */ snd_soc_jack_new(codec, "Headphone", SND_JACK_HEADPHONE, &hp_jack); snd_soc_jack_add_pins(&hp_jack, ARRAY_SIZE(hp_jack_pins), hp_jack_pins); wm8350_hp_jack_detect(codec, WM8350_JDR, &hp_jack, SND_JACK_HEADPHONE); /* Microphone jack detection */ snd_soc_jack_new(codec, "Microphone", SND_JACK_MICROPHONE | SND_JACK_BTN_0, &mic_jack); snd_soc_jack_add_pins(&mic_jack, ARRAY_SIZE(mic_jack_pins), mic_jack_pins); wm8350_mic_jack_detect(codec, &mic_jack, SND_JACK_MICROPHONE, SND_JACK_BTN_0); snd_soc_dapm_force_enable_pin(dapm, "Mic Bias"); return 0; } static struct snd_soc_dai_link wm1133_ev1_dai = { .name = "WM1133-EV1", .stream_name = "Audio", .cpu_dai_name = "imx-ssi.0", .codec_dai_name = "wm8350-hifi", .platform_name = "imx-fiq-pcm-audio.0", .codec_name = "wm8350-codec.0-0x1a", .init = wm1133_ev1_init, .ops = &wm1133_ev1_ops, .symmetric_rates = 1, }; static struct snd_soc_card wm1133_ev1 = { .name = "WM1133-EV1", .owner = THIS_MODULE, .dai_link = &wm1133_ev1_dai, .num_links = 1, }; static struct platform_device *wm1133_ev1_snd_device; static int __init wm1133_ev1_audio_init(void) { int ret; unsigned int ptcr, pdcr; /* SSI0 mastered by port 5 */ ptcr = IMX_AUDMUX_V2_PTCR_SYN | IMX_AUDMUX_V2_PTCR_TFSDIR | IMX_AUDMUX_V2_PTCR_TFSEL(MX31_AUDMUX_PORT5_SSI_PINS_5) | IMX_AUDMUX_V2_PTCR_TCLKDIR | IMX_AUDMUX_V2_PTCR_TCSEL(MX31_AUDMUX_PORT5_SSI_PINS_5); pdcr = IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT5_SSI_PINS_5); imx_audmux_v2_configure_port(MX31_AUDMUX_PORT1_SSI0, ptcr, pdcr); ptcr = IMX_AUDMUX_V2_PTCR_SYN; pdcr = IMX_AUDMUX_V2_PDCR_RXDSEL(MX31_AUDMUX_PORT1_SSI0); imx_audmux_v2_configure_port(MX31_AUDMUX_PORT5_SSI_PINS_5, ptcr, pdcr); wm1133_ev1_snd_device = platform_device_alloc("soc-audio", -1); if (!wm1133_ev1_snd_device) return -ENOMEM; platform_set_drvdata(wm1133_ev1_snd_device, &wm1133_ev1); ret = platform_device_add(wm1133_ev1_snd_device); if (ret) platform_device_put(wm1133_ev1_snd_device); return ret; } module_init(wm1133_ev1_audio_init); static void __exit wm1133_ev1_audio_exit(void) { platform_device_unregister(wm1133_ev1_snd_device); } module_exit(wm1133_ev1_audio_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("Audio for WM1133-EV1 on i.MX31ADS"); MODULE_LICENSE("GPL");
gpl-2.0
rachitrawat/Vengeance-Kernel-MSM7x27-JLO
drivers/isdn/divert/divert_procfs.c
7708
9035
/* $Id: divert_procfs.c,v 1.11.6.2 2001/09/23 22:24:36 kai Exp $ * * Filesystem handling for the diversion supplementary services. * * Copyright 1998 by Werner Cornelius (werner@isdn4linux.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/poll.h> #include <linux/slab.h> #ifdef CONFIG_PROC_FS #include <linux/proc_fs.h> #else #include <linux/fs.h> #endif #include <linux/sched.h> #include <linux/isdnif.h> #include <net/net_namespace.h> #include <linux/mutex.h> #include "isdn_divert.h" /*********************************/ /* Variables for interface queue */ /*********************************/ ulong if_used = 0; /* number of interface users */ static DEFINE_MUTEX(isdn_divert_mutex); static struct divert_info *divert_info_head = NULL; /* head of queue */ static struct divert_info *divert_info_tail = NULL; /* pointer to last entry */ static DEFINE_SPINLOCK(divert_info_lock);/* lock for queue */ static wait_queue_head_t rd_queue; /*********************************/ /* put an info buffer into queue */ /*********************************/ void put_info_buffer(char *cp) { struct divert_info *ib; unsigned long flags; if (if_used <= 0) return; if (!cp) return; if (!*cp) return; if (!(ib = kmalloc(sizeof(struct divert_info) + strlen(cp), GFP_ATOMIC))) return; /* no memory */ strcpy(ib->info_start, cp); /* set output string */ ib->next = NULL; spin_lock_irqsave(&divert_info_lock, flags); ib->usage_cnt = if_used; if (!divert_info_head) divert_info_head = ib; /* new head */ else divert_info_tail->next = ib; /* follows existing messages */ divert_info_tail = ib; /* new tail */ /* delete old entrys */ while (divert_info_head->next) { if ((divert_info_head->usage_cnt <= 0) && (divert_info_head->next->usage_cnt <= 0)) { ib = divert_info_head; divert_info_head = divert_info_head->next; kfree(ib); } else break; } /* divert_info_head->next */ spin_unlock_irqrestore(&divert_info_lock, flags); wake_up_interruptible(&(rd_queue)); } /* put_info_buffer */ #ifdef CONFIG_PROC_FS /**********************************/ /* deflection device read routine */ /**********************************/ static ssize_t isdn_divert_read(struct file *file, char __user *buf, size_t count, loff_t *off) { struct divert_info *inf; int len; if (!*((struct divert_info **) file->private_data)) { if (file->f_flags & O_NONBLOCK) return -EAGAIN; interruptible_sleep_on(&(rd_queue)); } if (!(inf = *((struct divert_info **) file->private_data))) return (0); inf->usage_cnt--; /* new usage count */ file->private_data = &inf->next; /* next structure */ if ((len = strlen(inf->info_start)) <= count) { if (copy_to_user(buf, inf->info_start, len)) return -EFAULT; *off += len; return (len); } return (0); } /* isdn_divert_read */ /**********************************/ /* deflection device write routine */ /**********************************/ static ssize_t isdn_divert_write(struct file *file, const char __user *buf, size_t count, loff_t *off) { return (-ENODEV); } /* isdn_divert_write */ /***************************************/ /* select routines for various kernels */ /***************************************/ static unsigned int isdn_divert_poll(struct file *file, poll_table *wait) { unsigned int mask = 0; poll_wait(file, &(rd_queue), wait); /* mask = POLLOUT | POLLWRNORM; */ if (*((struct divert_info **) file->private_data)) { mask |= POLLIN | POLLRDNORM; } return mask; } /* isdn_divert_poll */ /****************/ /* Open routine */ /****************/ static int isdn_divert_open(struct inode *ino, struct file *filep) { unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used++; if (divert_info_head) filep->private_data = &(divert_info_tail->next); else filep->private_data = &divert_info_head; spin_unlock_irqrestore(&divert_info_lock, flags); /* start_divert(); */ return nonseekable_open(ino, filep); } /* isdn_divert_open */ /*******************/ /* close routine */ /*******************/ static int isdn_divert_close(struct inode *ino, struct file *filep) { struct divert_info *inf; unsigned long flags; spin_lock_irqsave(&divert_info_lock, flags); if_used--; inf = *((struct divert_info **) filep->private_data); while (inf) { inf->usage_cnt--; inf = inf->next; } if (if_used <= 0) while (divert_info_head) { inf = divert_info_head; divert_info_head = divert_info_head->next; kfree(inf); } spin_unlock_irqrestore(&divert_info_lock, flags); return (0); } /* isdn_divert_close */ /*********/ /* IOCTL */ /*********/ static int isdn_divert_ioctl_unlocked(struct file *file, uint cmd, ulong arg) { divert_ioctl dioctl; int i; unsigned long flags; divert_rule *rulep; char *cp; if (copy_from_user(&dioctl, (void __user *) arg, sizeof(dioctl))) return -EFAULT; switch (cmd) { case IIOCGETVER: dioctl.drv_version = DIVERT_IIOC_VERSION; /* set version */ break; case IIOCGETDRV: if ((dioctl.getid.drvid = divert_if.name_to_drv(dioctl.getid.drvnam)) < 0) return (-EINVAL); break; case IIOCGETNAM: cp = divert_if.drv_to_name(dioctl.getid.drvid); if (!cp) return (-EINVAL); if (!*cp) return (-EINVAL); strcpy(dioctl.getid.drvnam, cp); break; case IIOCGETRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); dioctl.getsetrule.rule = *rulep; /* copy data */ break; case IIOCMODRULE: if (!(rulep = getruleptr(dioctl.getsetrule.ruleidx))) return (-EINVAL); spin_lock_irqsave(&divert_lock, flags); *rulep = dioctl.getsetrule.rule; /* copy data */ spin_unlock_irqrestore(&divert_lock, flags); return (0); /* no copy required */ break; case IIOCINSRULE: return (insertrule(dioctl.getsetrule.ruleidx, &dioctl.getsetrule.rule)); break; case IIOCDELRULE: return (deleterule(dioctl.getsetrule.ruleidx)); break; case IIOCDODFACT: return (deflect_extern_action(dioctl.fwd_ctrl.subcmd, dioctl.fwd_ctrl.callid, dioctl.fwd_ctrl.to_nr)); case IIOCDOCFACT: case IIOCDOCFDIS: case IIOCDOCFINT: if (!divert_if.drv_to_name(dioctl.cf_ctrl.drvid)) return (-EINVAL); /* invalid driver */ if (strnlen(dioctl.cf_ctrl.msn, sizeof(dioctl.cf_ctrl.msn)) == sizeof(dioctl.cf_ctrl.msn)) return -EINVAL; if (strnlen(dioctl.cf_ctrl.fwd_nr, sizeof(dioctl.cf_ctrl.fwd_nr)) == sizeof(dioctl.cf_ctrl.fwd_nr)) return -EINVAL; if ((i = cf_command(dioctl.cf_ctrl.drvid, (cmd == IIOCDOCFACT) ? 1 : (cmd == IIOCDOCFDIS) ? 0 : 2, dioctl.cf_ctrl.cfproc, dioctl.cf_ctrl.msn, dioctl.cf_ctrl.service, dioctl.cf_ctrl.fwd_nr, &dioctl.cf_ctrl.procid))) return (i); break; default: return (-EINVAL); } /* switch cmd */ return copy_to_user((void __user *)arg, &dioctl, sizeof(dioctl)) ? -EFAULT : 0; } /* isdn_divert_ioctl */ static long isdn_divert_ioctl(struct file *file, uint cmd, ulong arg) { long ret; mutex_lock(&isdn_divert_mutex); ret = isdn_divert_ioctl_unlocked(file, cmd, arg); mutex_unlock(&isdn_divert_mutex); return ret; } static const struct file_operations isdn_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .read = isdn_divert_read, .write = isdn_divert_write, .poll = isdn_divert_poll, .unlocked_ioctl = isdn_divert_ioctl, .open = isdn_divert_open, .release = isdn_divert_close, }; /****************************/ /* isdn subdir in /proc/net */ /****************************/ static struct proc_dir_entry *isdn_proc_entry = NULL; static struct proc_dir_entry *isdn_divert_entry = NULL; #endif /* CONFIG_PROC_FS */ /***************************************************************************/ /* divert_dev_init must be called before the proc filesystem may be used */ /***************************************************************************/ int divert_dev_init(void) { init_waitqueue_head(&rd_queue); #ifdef CONFIG_PROC_FS isdn_proc_entry = proc_mkdir("isdn", init_net.proc_net); if (!isdn_proc_entry) return (-1); isdn_divert_entry = proc_create("divert", S_IFREG | S_IRUGO, isdn_proc_entry, &isdn_fops); if (!isdn_divert_entry) { remove_proc_entry("isdn", init_net.proc_net); return (-1); } #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_init */ /***************************************************************************/ /* divert_dev_deinit must be called before leaving isdn when included as */ /* a module. */ /***************************************************************************/ int divert_dev_deinit(void) { #ifdef CONFIG_PROC_FS remove_proc_entry("divert", isdn_proc_entry); remove_proc_entry("isdn", init_net.proc_net); #endif /* CONFIG_PROC_FS */ return (0); } /* divert_dev_deinit */
gpl-2.0
onejay09/kernel_HTC_msm7x30_KK
fs/nfsd/nfs3proc.c
7964
25084
/* * Process version 3 NFS requests. * * Copyright (C) 1996, 1997, 1998 Olaf Kirch <okir@monad.swb.de> */ #include <linux/fs.h> #include <linux/ext2_fs.h> #include <linux/magic.h> #include "cache.h" #include "xdr3.h" #include "vfs.h" #define NFSDDBG_FACILITY NFSDDBG_PROC #define RETURN_STATUS(st) { resp->status = (st); return (st); } static int nfs3_ftypes[] = { 0, /* NF3NON */ S_IFREG, /* NF3REG */ S_IFDIR, /* NF3DIR */ S_IFBLK, /* NF3BLK */ S_IFCHR, /* NF3CHR */ S_IFLNK, /* NF3LNK */ S_IFSOCK, /* NF3SOCK */ S_IFIFO, /* NF3FIFO */ }; /* * NULL call. */ static __be32 nfsd3_proc_null(struct svc_rqst *rqstp, void *argp, void *resp) { return nfs_ok; } /* * Get a file's attributes */ static __be32 nfsd3_proc_getattr(struct svc_rqst *rqstp, struct nfsd_fhandle *argp, struct nfsd3_attrstat *resp) { int err; __be32 nfserr; dprintk("nfsd: GETATTR(3) %s\n", SVCFH_fmt(&argp->fh)); fh_copy(&resp->fh, &argp->fh); nfserr = fh_verify(rqstp, &resp->fh, 0, NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT); if (nfserr) RETURN_STATUS(nfserr); err = vfs_getattr(resp->fh.fh_export->ex_path.mnt, resp->fh.fh_dentry, &resp->stat); nfserr = nfserrno(err); RETURN_STATUS(nfserr); } /* * Set a file's attributes */ static __be32 nfsd3_proc_setattr(struct svc_rqst *rqstp, struct nfsd3_sattrargs *argp, struct nfsd3_attrstat *resp) { __be32 nfserr; dprintk("nfsd: SETATTR(3) %s\n", SVCFH_fmt(&argp->fh)); fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_setattr(rqstp, &resp->fh, &argp->attrs, argp->check_guard, argp->guardtime); RETURN_STATUS(nfserr); } /* * Look up a path name component */ static __be32 nfsd3_proc_lookup(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp, struct nfsd3_diropres *resp) { __be32 nfserr; dprintk("nfsd: LOOKUP(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); fh_copy(&resp->dirfh, &argp->fh); fh_init(&resp->fh, NFS3_FHSIZE); nfserr = nfsd_lookup(rqstp, &resp->dirfh, argp->name, argp->len, &resp->fh); RETURN_STATUS(nfserr); } /* * Check file access */ static __be32 nfsd3_proc_access(struct svc_rqst *rqstp, struct nfsd3_accessargs *argp, struct nfsd3_accessres *resp) { __be32 nfserr; dprintk("nfsd: ACCESS(3) %s 0x%x\n", SVCFH_fmt(&argp->fh), argp->access); fh_copy(&resp->fh, &argp->fh); resp->access = argp->access; nfserr = nfsd_access(rqstp, &resp->fh, &resp->access, NULL); RETURN_STATUS(nfserr); } /* * Read a symlink. */ static __be32 nfsd3_proc_readlink(struct svc_rqst *rqstp, struct nfsd3_readlinkargs *argp, struct nfsd3_readlinkres *resp) { __be32 nfserr; dprintk("nfsd: READLINK(3) %s\n", SVCFH_fmt(&argp->fh)); /* Read the symlink. */ fh_copy(&resp->fh, &argp->fh); resp->len = NFS3_MAXPATHLEN; nfserr = nfsd_readlink(rqstp, &resp->fh, argp->buffer, &resp->len); RETURN_STATUS(nfserr); } /* * Read a portion of a file. */ static __be32 nfsd3_proc_read(struct svc_rqst *rqstp, struct nfsd3_readargs *argp, struct nfsd3_readres *resp) { __be32 nfserr; u32 max_blocksize = svc_max_payload(rqstp); dprintk("nfsd: READ(3) %s %lu bytes at %Lu\n", SVCFH_fmt(&argp->fh), (unsigned long) argp->count, (unsigned long long) argp->offset); /* Obtain buffer pointer for payload. * 1 (status) + 22 (post_op_attr) + 1 (count) + 1 (eof) * + 1 (xdr opaque byte count) = 26 */ resp->count = argp->count; if (max_blocksize < resp->count) resp->count = max_blocksize; svc_reserve_auth(rqstp, ((1 + NFS3_POST_OP_ATTR_WORDS + 3)<<2) + resp->count +4); fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_read(rqstp, &resp->fh, argp->offset, rqstp->rq_vec, argp->vlen, &resp->count); if (nfserr == 0) { struct inode *inode = resp->fh.fh_dentry->d_inode; resp->eof = (argp->offset + resp->count) >= inode->i_size; } RETURN_STATUS(nfserr); } /* * Write data to a file */ static __be32 nfsd3_proc_write(struct svc_rqst *rqstp, struct nfsd3_writeargs *argp, struct nfsd3_writeres *resp) { __be32 nfserr; unsigned long cnt = argp->len; dprintk("nfsd: WRITE(3) %s %d bytes at %Lu%s\n", SVCFH_fmt(&argp->fh), argp->len, (unsigned long long) argp->offset, argp->stable? " stable" : ""); fh_copy(&resp->fh, &argp->fh); resp->committed = argp->stable; nfserr = nfsd_write(rqstp, &resp->fh, NULL, argp->offset, rqstp->rq_vec, argp->vlen, &cnt, &resp->committed); resp->count = cnt; RETURN_STATUS(nfserr); } /* * With NFSv3, CREATE processing is a lot easier than with NFSv2. * At least in theory; we'll see how it fares in practice when the * first reports about SunOS compatibility problems start to pour in... */ static __be32 nfsd3_proc_create(struct svc_rqst *rqstp, struct nfsd3_createargs *argp, struct nfsd3_diropres *resp) { svc_fh *dirfhp, *newfhp = NULL; struct iattr *attr; __be32 nfserr; dprintk("nfsd: CREATE(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); dirfhp = fh_copy(&resp->dirfh, &argp->fh); newfhp = fh_init(&resp->fh, NFS3_FHSIZE); attr = &argp->attrs; /* Get the directory inode */ nfserr = fh_verify(rqstp, dirfhp, S_IFDIR, NFSD_MAY_CREATE); if (nfserr) RETURN_STATUS(nfserr); /* Unfudge the mode bits */ attr->ia_mode &= ~S_IFMT; if (!(attr->ia_valid & ATTR_MODE)) { attr->ia_valid |= ATTR_MODE; attr->ia_mode = S_IFREG; } else { attr->ia_mode = (attr->ia_mode & ~S_IFMT) | S_IFREG; } /* Now create the file and set attributes */ nfserr = do_nfsd_create(rqstp, dirfhp, argp->name, argp->len, attr, newfhp, argp->createmode, argp->verf, NULL, NULL); RETURN_STATUS(nfserr); } /* * Make directory. This operation is not idempotent. */ static __be32 nfsd3_proc_mkdir(struct svc_rqst *rqstp, struct nfsd3_createargs *argp, struct nfsd3_diropres *resp) { __be32 nfserr; dprintk("nfsd: MKDIR(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); argp->attrs.ia_valid &= ~ATTR_SIZE; fh_copy(&resp->dirfh, &argp->fh); fh_init(&resp->fh, NFS3_FHSIZE); nfserr = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len, &argp->attrs, S_IFDIR, 0, &resp->fh); fh_unlock(&resp->dirfh); RETURN_STATUS(nfserr); } static __be32 nfsd3_proc_symlink(struct svc_rqst *rqstp, struct nfsd3_symlinkargs *argp, struct nfsd3_diropres *resp) { __be32 nfserr; dprintk("nfsd: SYMLINK(3) %s %.*s -> %.*s\n", SVCFH_fmt(&argp->ffh), argp->flen, argp->fname, argp->tlen, argp->tname); fh_copy(&resp->dirfh, &argp->ffh); fh_init(&resp->fh, NFS3_FHSIZE); nfserr = nfsd_symlink(rqstp, &resp->dirfh, argp->fname, argp->flen, argp->tname, argp->tlen, &resp->fh, &argp->attrs); RETURN_STATUS(nfserr); } /* * Make socket/fifo/device. */ static __be32 nfsd3_proc_mknod(struct svc_rqst *rqstp, struct nfsd3_mknodargs *argp, struct nfsd3_diropres *resp) { __be32 nfserr; int type; dev_t rdev = 0; dprintk("nfsd: MKNOD(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); fh_copy(&resp->dirfh, &argp->fh); fh_init(&resp->fh, NFS3_FHSIZE); if (argp->ftype == 0 || argp->ftype >= NF3BAD) RETURN_STATUS(nfserr_inval); if (argp->ftype == NF3CHR || argp->ftype == NF3BLK) { rdev = MKDEV(argp->major, argp->minor); if (MAJOR(rdev) != argp->major || MINOR(rdev) != argp->minor) RETURN_STATUS(nfserr_inval); } else if (argp->ftype != NF3SOCK && argp->ftype != NF3FIFO) RETURN_STATUS(nfserr_inval); type = nfs3_ftypes[argp->ftype]; nfserr = nfsd_create(rqstp, &resp->dirfh, argp->name, argp->len, &argp->attrs, type, rdev, &resp->fh); fh_unlock(&resp->dirfh); RETURN_STATUS(nfserr); } /* * Remove file/fifo/socket etc. */ static __be32 nfsd3_proc_remove(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp, struct nfsd3_attrstat *resp) { __be32 nfserr; dprintk("nfsd: REMOVE(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); /* Unlink. -S_IFDIR means file must not be a directory */ fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_unlink(rqstp, &resp->fh, -S_IFDIR, argp->name, argp->len); fh_unlock(&resp->fh); RETURN_STATUS(nfserr); } /* * Remove a directory */ static __be32 nfsd3_proc_rmdir(struct svc_rqst *rqstp, struct nfsd3_diropargs *argp, struct nfsd3_attrstat *resp) { __be32 nfserr; dprintk("nfsd: RMDIR(3) %s %.*s\n", SVCFH_fmt(&argp->fh), argp->len, argp->name); fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_unlink(rqstp, &resp->fh, S_IFDIR, argp->name, argp->len); fh_unlock(&resp->fh); RETURN_STATUS(nfserr); } static __be32 nfsd3_proc_rename(struct svc_rqst *rqstp, struct nfsd3_renameargs *argp, struct nfsd3_renameres *resp) { __be32 nfserr; dprintk("nfsd: RENAME(3) %s %.*s ->\n", SVCFH_fmt(&argp->ffh), argp->flen, argp->fname); dprintk("nfsd: -> %s %.*s\n", SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname); fh_copy(&resp->ffh, &argp->ffh); fh_copy(&resp->tfh, &argp->tfh); nfserr = nfsd_rename(rqstp, &resp->ffh, argp->fname, argp->flen, &resp->tfh, argp->tname, argp->tlen); RETURN_STATUS(nfserr); } static __be32 nfsd3_proc_link(struct svc_rqst *rqstp, struct nfsd3_linkargs *argp, struct nfsd3_linkres *resp) { __be32 nfserr; dprintk("nfsd: LINK(3) %s ->\n", SVCFH_fmt(&argp->ffh)); dprintk("nfsd: -> %s %.*s\n", SVCFH_fmt(&argp->tfh), argp->tlen, argp->tname); fh_copy(&resp->fh, &argp->ffh); fh_copy(&resp->tfh, &argp->tfh); nfserr = nfsd_link(rqstp, &resp->tfh, argp->tname, argp->tlen, &resp->fh); RETURN_STATUS(nfserr); } /* * Read a portion of a directory. */ static __be32 nfsd3_proc_readdir(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, struct nfsd3_readdirres *resp) { __be32 nfserr; int count; dprintk("nfsd: READDIR(3) %s %d bytes at %d\n", SVCFH_fmt(&argp->fh), argp->count, (u32) argp->cookie); /* Make sure we've room for the NULL ptr & eof flag, and shrink to * client read size */ count = (argp->count >> 2) - 2; /* Read directory and encode entries on the fly */ fh_copy(&resp->fh, &argp->fh); resp->buflen = count; resp->common.err = nfs_ok; resp->buffer = argp->buffer; resp->rqstp = rqstp; nfserr = nfsd_readdir(rqstp, &resp->fh, (loff_t*) &argp->cookie, &resp->common, nfs3svc_encode_entry); memcpy(resp->verf, argp->verf, 8); resp->count = resp->buffer - argp->buffer; if (resp->offset) xdr_encode_hyper(resp->offset, argp->cookie); RETURN_STATUS(nfserr); } /* * Read a portion of a directory, including file handles and attrs. * For now, we choose to ignore the dircount parameter. */ static __be32 nfsd3_proc_readdirplus(struct svc_rqst *rqstp, struct nfsd3_readdirargs *argp, struct nfsd3_readdirres *resp) { __be32 nfserr; int count = 0; loff_t offset; int i; caddr_t page_addr = NULL; dprintk("nfsd: READDIR+(3) %s %d bytes at %d\n", SVCFH_fmt(&argp->fh), argp->count, (u32) argp->cookie); /* Convert byte count to number of words (i.e. >> 2), * and reserve room for the NULL ptr & eof flag (-2 words) */ resp->count = (argp->count >> 2) - 2; /* Read directory and encode entries on the fly */ fh_copy(&resp->fh, &argp->fh); resp->common.err = nfs_ok; resp->buffer = argp->buffer; resp->buflen = resp->count; resp->rqstp = rqstp; offset = argp->cookie; nfserr = nfsd_readdir(rqstp, &resp->fh, &offset, &resp->common, nfs3svc_encode_entry_plus); memcpy(resp->verf, argp->verf, 8); for (i=1; i<rqstp->rq_resused ; i++) { page_addr = page_address(rqstp->rq_respages[i]); if (((caddr_t)resp->buffer >= page_addr) && ((caddr_t)resp->buffer < page_addr + PAGE_SIZE)) { count += (caddr_t)resp->buffer - page_addr; break; } count += PAGE_SIZE; } resp->count = count >> 2; if (resp->offset) { if (unlikely(resp->offset1)) { /* we ended up with offset on a page boundary */ *resp->offset = htonl(offset >> 32); *resp->offset1 = htonl(offset & 0xffffffff); resp->offset1 = NULL; } else { xdr_encode_hyper(resp->offset, offset); } } RETURN_STATUS(nfserr); } /* * Get file system stats */ static __be32 nfsd3_proc_fsstat(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, struct nfsd3_fsstatres *resp) { __be32 nfserr; dprintk("nfsd: FSSTAT(3) %s\n", SVCFH_fmt(&argp->fh)); nfserr = nfsd_statfs(rqstp, &argp->fh, &resp->stats, 0); fh_put(&argp->fh); RETURN_STATUS(nfserr); } /* * Get file system info */ static __be32 nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, struct nfsd3_fsinfores *resp) { __be32 nfserr; u32 max_blocksize = svc_max_payload(rqstp); dprintk("nfsd: FSINFO(3) %s\n", SVCFH_fmt(&argp->fh)); resp->f_rtmax = max_blocksize; resp->f_rtpref = max_blocksize; resp->f_rtmult = PAGE_SIZE; resp->f_wtmax = max_blocksize; resp->f_wtpref = max_blocksize; resp->f_wtmult = PAGE_SIZE; resp->f_dtpref = PAGE_SIZE; resp->f_maxfilesize = ~(u32) 0; resp->f_properties = NFS3_FSF_DEFAULT; nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP | NFSD_MAY_BYPASS_GSS_ON_ROOT); /* Check special features of the file system. May request * different read/write sizes for file systems known to have * problems with large blocks */ if (nfserr == 0) { struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb; /* Note that we don't care for remote fs's here */ if (sb->s_magic == MSDOS_SUPER_MAGIC) { resp->f_properties = NFS3_FSF_BILLYBOY; } resp->f_maxfilesize = sb->s_maxbytes; } fh_put(&argp->fh); RETURN_STATUS(nfserr); } /* * Get pathconf info for the specified file */ static __be32 nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp, struct nfsd3_pathconfres *resp) { __be32 nfserr; dprintk("nfsd: PATHCONF(3) %s\n", SVCFH_fmt(&argp->fh)); /* Set default pathconf */ resp->p_link_max = 255; /* at least */ resp->p_name_max = 255; /* at least */ resp->p_no_trunc = 0; resp->p_chown_restricted = 1; resp->p_case_insensitive = 0; resp->p_case_preserving = 1; nfserr = fh_verify(rqstp, &argp->fh, 0, NFSD_MAY_NOP); if (nfserr == 0) { struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb; /* Note that we don't care for remote fs's here */ switch (sb->s_magic) { case EXT2_SUPER_MAGIC: resp->p_link_max = EXT2_LINK_MAX; resp->p_name_max = EXT2_NAME_LEN; break; case MSDOS_SUPER_MAGIC: resp->p_case_insensitive = 1; resp->p_case_preserving = 0; break; } } fh_put(&argp->fh); RETURN_STATUS(nfserr); } /* * Commit a file (range) to stable storage. */ static __be32 nfsd3_proc_commit(struct svc_rqst * rqstp, struct nfsd3_commitargs *argp, struct nfsd3_commitres *resp) { __be32 nfserr; dprintk("nfsd: COMMIT(3) %s %u@%Lu\n", SVCFH_fmt(&argp->fh), argp->count, (unsigned long long) argp->offset); if (argp->offset > NFS_OFFSET_MAX) RETURN_STATUS(nfserr_inval); fh_copy(&resp->fh, &argp->fh); nfserr = nfsd_commit(rqstp, &resp->fh, argp->offset, argp->count); RETURN_STATUS(nfserr); } /* * NFSv3 Server procedures. * Only the results of non-idempotent operations are cached. */ #define nfs3svc_decode_fhandleargs nfs3svc_decode_fhandle #define nfs3svc_encode_attrstatres nfs3svc_encode_attrstat #define nfs3svc_encode_wccstatres nfs3svc_encode_wccstat #define nfsd3_mkdirargs nfsd3_createargs #define nfsd3_readdirplusargs nfsd3_readdirargs #define nfsd3_fhandleargs nfsd_fhandle #define nfsd3_fhandleres nfsd3_attrstat #define nfsd3_attrstatres nfsd3_attrstat #define nfsd3_wccstatres nfsd3_attrstat #define nfsd3_createres nfsd3_diropres #define nfsd3_voidres nfsd3_voidargs struct nfsd3_voidargs { int dummy; }; #define PROC(name, argt, rest, relt, cache, respsize) \ { (svc_procfunc) nfsd3_proc_##name, \ (kxdrproc_t) nfs3svc_decode_##argt##args, \ (kxdrproc_t) nfs3svc_encode_##rest##res, \ (kxdrproc_t) nfs3svc_release_##relt, \ sizeof(struct nfsd3_##argt##args), \ sizeof(struct nfsd3_##rest##res), \ 0, \ cache, \ respsize, \ } #define ST 1 /* status*/ #define FH 17 /* filehandle with length */ #define AT 21 /* attributes */ #define pAT (1+AT) /* post attributes - conditional */ #define WC (7+pAT) /* WCC attributes */ static struct svc_procedure nfsd_procedures3[22] = { [NFS3PROC_NULL] = { .pc_func = (svc_procfunc) nfsd3_proc_null, .pc_encode = (kxdrproc_t) nfs3svc_encode_voidres, .pc_argsize = sizeof(struct nfsd3_voidargs), .pc_ressize = sizeof(struct nfsd3_voidres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST, }, [NFS3PROC_GETATTR] = { .pc_func = (svc_procfunc) nfsd3_proc_getattr, .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_attrstatres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_fhandleargs), .pc_ressize = sizeof(struct nfsd3_attrstatres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+AT, }, [NFS3PROC_SETATTR] = { .pc_func = (svc_procfunc) nfsd3_proc_setattr, .pc_decode = (kxdrproc_t) nfs3svc_decode_sattrargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_sattrargs), .pc_ressize = sizeof(struct nfsd3_wccstatres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+WC, }, [NFS3PROC_LOOKUP] = { .pc_func = (svc_procfunc) nfsd3_proc_lookup, .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_diropres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_diropargs), .pc_ressize = sizeof(struct nfsd3_diropres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+FH+pAT+pAT, }, [NFS3PROC_ACCESS] = { .pc_func = (svc_procfunc) nfsd3_proc_access, .pc_decode = (kxdrproc_t) nfs3svc_decode_accessargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_accessres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_accessargs), .pc_ressize = sizeof(struct nfsd3_accessres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+1, }, [NFS3PROC_READLINK] = { .pc_func = (svc_procfunc) nfsd3_proc_readlink, .pc_decode = (kxdrproc_t) nfs3svc_decode_readlinkargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_readlinkres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_readlinkargs), .pc_ressize = sizeof(struct nfsd3_readlinkres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+1+NFS3_MAXPATHLEN/4, }, [NFS3PROC_READ] = { .pc_func = (svc_procfunc) nfsd3_proc_read, .pc_decode = (kxdrproc_t) nfs3svc_decode_readargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_readres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_readargs), .pc_ressize = sizeof(struct nfsd3_readres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+4+NFSSVC_MAXBLKSIZE/4, }, [NFS3PROC_WRITE] = { .pc_func = (svc_procfunc) nfsd3_proc_write, .pc_decode = (kxdrproc_t) nfs3svc_decode_writeargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_writeres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_writeargs), .pc_ressize = sizeof(struct nfsd3_writeres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+WC+4, }, [NFS3PROC_CREATE] = { .pc_func = (svc_procfunc) nfsd3_proc_create, .pc_decode = (kxdrproc_t) nfs3svc_decode_createargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_createres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_createargs), .pc_ressize = sizeof(struct nfsd3_createres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+(1+FH+pAT)+WC, }, [NFS3PROC_MKDIR] = { .pc_func = (svc_procfunc) nfsd3_proc_mkdir, .pc_decode = (kxdrproc_t) nfs3svc_decode_mkdirargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_createres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_mkdirargs), .pc_ressize = sizeof(struct nfsd3_createres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+(1+FH+pAT)+WC, }, [NFS3PROC_SYMLINK] = { .pc_func = (svc_procfunc) nfsd3_proc_symlink, .pc_decode = (kxdrproc_t) nfs3svc_decode_symlinkargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_createres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_symlinkargs), .pc_ressize = sizeof(struct nfsd3_createres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+(1+FH+pAT)+WC, }, [NFS3PROC_MKNOD] = { .pc_func = (svc_procfunc) nfsd3_proc_mknod, .pc_decode = (kxdrproc_t) nfs3svc_decode_mknodargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_createres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_mknodargs), .pc_ressize = sizeof(struct nfsd3_createres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+(1+FH+pAT)+WC, }, [NFS3PROC_REMOVE] = { .pc_func = (svc_procfunc) nfsd3_proc_remove, .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_diropargs), .pc_ressize = sizeof(struct nfsd3_wccstatres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+WC, }, [NFS3PROC_RMDIR] = { .pc_func = (svc_procfunc) nfsd3_proc_rmdir, .pc_decode = (kxdrproc_t) nfs3svc_decode_diropargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_wccstatres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_diropargs), .pc_ressize = sizeof(struct nfsd3_wccstatres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+WC, }, [NFS3PROC_RENAME] = { .pc_func = (svc_procfunc) nfsd3_proc_rename, .pc_decode = (kxdrproc_t) nfs3svc_decode_renameargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_renameres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_renameargs), .pc_ressize = sizeof(struct nfsd3_renameres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+WC+WC, }, [NFS3PROC_LINK] = { .pc_func = (svc_procfunc) nfsd3_proc_link, .pc_decode = (kxdrproc_t) nfs3svc_decode_linkargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_linkres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle2, .pc_argsize = sizeof(struct nfsd3_linkargs), .pc_ressize = sizeof(struct nfsd3_linkres), .pc_cachetype = RC_REPLBUFF, .pc_xdrressize = ST+pAT+WC, }, [NFS3PROC_READDIR] = { .pc_func = (svc_procfunc) nfsd3_proc_readdir, .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_readdirargs), .pc_ressize = sizeof(struct nfsd3_readdirres), .pc_cachetype = RC_NOCACHE, }, [NFS3PROC_READDIRPLUS] = { .pc_func = (svc_procfunc) nfsd3_proc_readdirplus, .pc_decode = (kxdrproc_t) nfs3svc_decode_readdirplusargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_readdirres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_readdirplusargs), .pc_ressize = sizeof(struct nfsd3_readdirres), .pc_cachetype = RC_NOCACHE, }, [NFS3PROC_FSSTAT] = { .pc_func = (svc_procfunc) nfsd3_proc_fsstat, .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_fsstatres, .pc_argsize = sizeof(struct nfsd3_fhandleargs), .pc_ressize = sizeof(struct nfsd3_fsstatres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+2*6+1, }, [NFS3PROC_FSINFO] = { .pc_func = (svc_procfunc) nfsd3_proc_fsinfo, .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_fsinfores, .pc_argsize = sizeof(struct nfsd3_fhandleargs), .pc_ressize = sizeof(struct nfsd3_fsinfores), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+12, }, [NFS3PROC_PATHCONF] = { .pc_func = (svc_procfunc) nfsd3_proc_pathconf, .pc_decode = (kxdrproc_t) nfs3svc_decode_fhandleargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_pathconfres, .pc_argsize = sizeof(struct nfsd3_fhandleargs), .pc_ressize = sizeof(struct nfsd3_pathconfres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+pAT+6, }, [NFS3PROC_COMMIT] = { .pc_func = (svc_procfunc) nfsd3_proc_commit, .pc_decode = (kxdrproc_t) nfs3svc_decode_commitargs, .pc_encode = (kxdrproc_t) nfs3svc_encode_commitres, .pc_release = (kxdrproc_t) nfs3svc_release_fhandle, .pc_argsize = sizeof(struct nfsd3_commitargs), .pc_ressize = sizeof(struct nfsd3_commitres), .pc_cachetype = RC_NOCACHE, .pc_xdrressize = ST+WC+2, }, }; struct svc_version nfsd_version3 = { .vs_vers = 3, .vs_nproc = 22, .vs_proc = nfsd_procedures3, .vs_dispatch = nfsd_dispatch, .vs_xdrsize = NFS3_SVC_XDRSIZE, };
gpl-2.0
robacklin/omap-android
net/tipc/netlink.c
9756
3475
/* * net/tipc/netlink.c: TIPC configuration handling * * Copyright (c) 2005-2006, Ericsson AB * Copyright (c) 2005-2007, Wind River Systems * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the names of the copyright holders nor the names of its * contributors may be used to endorse or promote products derived from * this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGE. */ #include "core.h" #include "config.h" #include <net/genetlink.h> static int handle_cmd(struct sk_buff *skb, struct genl_info *info) { struct sk_buff *rep_buf; struct nlmsghdr *rep_nlh; struct nlmsghdr *req_nlh = info->nlhdr; struct tipc_genlmsghdr *req_userhdr = info->userhdr; int hdr_space = NLMSG_SPACE(GENL_HDRLEN + TIPC_GENL_HDRLEN); u16 cmd; if ((req_userhdr->cmd & 0xC000) && (!capable(CAP_NET_ADMIN))) cmd = TIPC_CMD_NOT_NET_ADMIN; else cmd = req_userhdr->cmd; rep_buf = tipc_cfg_do_cmd(req_userhdr->dest, cmd, NLMSG_DATA(req_nlh) + GENL_HDRLEN + TIPC_GENL_HDRLEN, NLMSG_PAYLOAD(req_nlh, GENL_HDRLEN + TIPC_GENL_HDRLEN), hdr_space); if (rep_buf) { skb_push(rep_buf, hdr_space); rep_nlh = nlmsg_hdr(rep_buf); memcpy(rep_nlh, req_nlh, hdr_space); rep_nlh->nlmsg_len = rep_buf->len; genlmsg_unicast(&init_net, rep_buf, NETLINK_CB(skb).pid); } return 0; } static struct genl_family tipc_genl_family = { .id = GENL_ID_GENERATE, .name = TIPC_GENL_NAME, .version = TIPC_GENL_VERSION, .hdrsize = TIPC_GENL_HDRLEN, .maxattr = 0, }; static struct genl_ops tipc_genl_ops = { .cmd = TIPC_GENL_CMD, .doit = handle_cmd, }; static int tipc_genl_family_registered; int tipc_netlink_start(void) { int res; res = genl_register_family_with_ops(&tipc_genl_family, &tipc_genl_ops, 1); if (res) { err("Failed to register netlink interface\n"); return res; } tipc_genl_family_registered = 1; return 0; } void tipc_netlink_stop(void) { if (!tipc_genl_family_registered) return; genl_unregister_family(&tipc_genl_family); tipc_genl_family_registered = 0; }
gpl-2.0
Isopod/linux
drivers/cpufreq/cpufreq-dt-platdev.c
29
3616
/* * Copyright (C) 2016 Linaro. * Viresh Kumar <viresh.kumar@linaro.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/err.h> #include <linux/of.h> #include <linux/platform_device.h> #include "cpufreq-dt.h" static const struct of_device_id machines[] __initconst = { { .compatible = "allwinner,sun4i-a10", }, { .compatible = "allwinner,sun5i-a10s", }, { .compatible = "allwinner,sun5i-a13", }, { .compatible = "allwinner,sun5i-r8", }, { .compatible = "allwinner,sun6i-a31", }, { .compatible = "allwinner,sun6i-a31s", }, { .compatible = "allwinner,sun7i-a20", }, { .compatible = "allwinner,sun8i-a23", }, { .compatible = "allwinner,sun8i-a33", }, { .compatible = "allwinner,sun8i-a83t", }, { .compatible = "allwinner,sun8i-h3", }, { .compatible = "apm,xgene-shadowcat", }, { .compatible = "arm,integrator-ap", }, { .compatible = "arm,integrator-cp", }, { .compatible = "hisilicon,hi3660", }, { .compatible = "hisilicon,hi6220", }, { .compatible = "fsl,imx27", }, { .compatible = "fsl,imx51", }, { .compatible = "fsl,imx53", }, { .compatible = "fsl,imx7d", }, { .compatible = "marvell,berlin", }, { .compatible = "marvell,pxa250", }, { .compatible = "marvell,pxa270", }, { .compatible = "samsung,exynos3250", }, { .compatible = "samsung,exynos4210", }, { .compatible = "samsung,exynos4212", }, { .compatible = "samsung,exynos4412", }, { .compatible = "samsung,exynos5250", }, #ifndef CONFIG_BL_SWITCHER { .compatible = "samsung,exynos5420", }, { .compatible = "samsung,exynos5433", }, { .compatible = "samsung,exynos5800", }, #endif { .compatible = "renesas,emev2", }, { .compatible = "renesas,r7s72100", }, { .compatible = "renesas,r8a73a4", }, { .compatible = "renesas,r8a7740", }, { .compatible = "renesas,r8a7743", }, { .compatible = "renesas,r8a7745", }, { .compatible = "renesas,r8a7778", }, { .compatible = "renesas,r8a7779", }, { .compatible = "renesas,r8a7790", }, { .compatible = "renesas,r8a7791", }, { .compatible = "renesas,r8a7792", }, { .compatible = "renesas,r8a7793", }, { .compatible = "renesas,r8a7794", }, { .compatible = "renesas,sh73a0", }, { .compatible = "rockchip,rk2928", }, { .compatible = "rockchip,rk3036", }, { .compatible = "rockchip,rk3066a", }, { .compatible = "rockchip,rk3066b", }, { .compatible = "rockchip,rk3188", }, { .compatible = "rockchip,rk3228", }, { .compatible = "rockchip,rk3288", }, { .compatible = "rockchip,rk3366", }, { .compatible = "rockchip,rk3368", }, { .compatible = "rockchip,rk3399", }, { .compatible = "sigma,tango4" }, { .compatible = "socionext,uniphier-pro5", }, { .compatible = "socionext,uniphier-pxs2", }, { .compatible = "socionext,uniphier-ld6b", }, { .compatible = "socionext,uniphier-ld11", }, { .compatible = "socionext,uniphier-ld20", }, { .compatible = "ti,omap2", }, { .compatible = "ti,omap3", }, { .compatible = "ti,omap4", }, { .compatible = "ti,omap5", }, { .compatible = "xlnx,zynq-7000", }, { .compatible = "zte,zx296718", }, { } }; static int __init cpufreq_dt_platdev_init(void) { struct device_node *np = of_find_node_by_path("/"); const struct of_device_id *match; if (!np) return -ENODEV; match = of_match_node(machines, np); of_node_put(np); if (!match) return -ENODEV; return PTR_ERR_OR_ZERO(platform_device_register_data(NULL, "cpufreq-dt", -1, match->data, sizeof(struct cpufreq_dt_platform_data))); } device_initcall(cpufreq_dt_platdev_init);
gpl-2.0
sleshepic/epic_touch_kernel
block/vr-iosched.c
29
10039
/* * V(R) I/O Scheduler * * Copyright (C) 2007 Aaron Carroll <aaronc@gelato.unsw.edu.au> * * * The algorithm: * * The next request is decided based on its distance from the last * request, with a multiplicative penalty of `rev_penalty' applied * for reversing the head direction. A rev_penalty of 1 means SSTF * behaviour. As this variable is increased, the algorithm approaches * pure SCAN. Setting rev_penalty to 0 forces SCAN. * * Async and synch requests are not treated seperately. Instead we * rely on deadlines to ensure fairness. * */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/blkdev.h> #include <linux/elevator.h> #include <linux/bio.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/compiler.h> #include <linux/rbtree.h> #include <linux/version.h> #include <asm/div64.h> enum vr_data_dir { ASYNC, SYNC, }; enum vr_head_dir { FORWARD, BACKWARD, }; static const int sync_expire = HZ / 2; /* max time before a sync is submitted. */ static const int async_expire = 5 * HZ; /* ditto for async, these limits are SOFT! */ static const int fifo_batch = 1; static const int rev_penalty = 1; /* penalty for reversing head direction */ struct vr_data { struct rb_root sort_list; struct list_head fifo_list[2]; struct request *next_rq; struct request *prev_rq; unsigned int nbatched; sector_t last_sector; /* head position */ int head_dir; /* tunables */ int fifo_expire[2]; int fifo_batch; int rev_penalty; }; static void vr_move_request(struct vr_data *, struct request *); static inline struct vr_data * vr_get_data(struct request_queue *q) { return q->elevator->elevator_data; } static void vr_add_rq_rb(struct vr_data *vd, struct request *rq) { struct request *alias = elv_rb_add(&vd->sort_list, rq); if (unlikely(alias)) { vr_move_request(vd, alias); alias = elv_rb_add(&vd->sort_list, rq); BUG_ON(alias); } if (blk_rq_pos(rq) >= vd->last_sector) { if (!vd->next_rq || blk_rq_pos(vd->next_rq) > blk_rq_pos(rq)) vd->next_rq = rq; } else { if (!vd->prev_rq || blk_rq_pos(vd->prev_rq) < blk_rq_pos(rq)) vd->prev_rq = rq; } BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq)); } static void vr_del_rq_rb(struct vr_data *vd, struct request *rq) { /* * We might be deleting our cached next request. * If so, find its sucessor. */ if (vd->next_rq == rq) vd->next_rq = elv_rb_latter_request(NULL, rq); else if (vd->prev_rq == rq) vd->prev_rq = elv_rb_former_request(NULL, rq); BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); BUG_ON(vd->next_rq && vd->prev_rq && blk_rq_pos(vd->next_rq) < blk_rq_pos(vd->prev_rq)); elv_rb_del(&vd->sort_list, rq); } /* * add rq to rbtree and fifo */ static void vr_add_request(struct request_queue *q, struct request *rq) { struct vr_data *vd = vr_get_data(q); const int dir = rq_is_sync(rq); vr_add_rq_rb(vd, rq); if (vd->fifo_expire[dir]) { rq_set_fifo_time(rq, jiffies + vd->fifo_expire[dir]); list_add_tail(&rq->queuelist, &vd->fifo_list[dir]); } } /* * remove rq from rbtree and fifo. */ static void vr_remove_request(struct request_queue *q, struct request *rq) { struct vr_data *vd = vr_get_data(q); rq_fifo_clear(rq); vr_del_rq_rb(vd, rq); } static int vr_merge(struct request_queue *q, struct request **rqp, struct bio *bio) { sector_t sector = bio->bi_sector + bio_sectors(bio); struct vr_data *vd = vr_get_data(q); struct request *rq = elv_rb_find(&vd->sort_list, sector); if (rq && elv_rq_merge_ok(rq, bio)) { *rqp = rq; return ELEVATOR_FRONT_MERGE; } return ELEVATOR_NO_MERGE; } static void vr_merged_request(struct request_queue *q, struct request *req, int type) { struct vr_data *vd = vr_get_data(q); /* * if the merge was a front merge, we need to reposition request */ if (type == ELEVATOR_FRONT_MERGE) { vr_del_rq_rb(vd, req); vr_add_rq_rb(vd, req); } } static void vr_merged_requests(struct request_queue *q, struct request *rq, struct request *next) { /* * if next expires before rq, assign its expire time to rq * and move into next position (next will be deleted) in fifo */ if (!list_empty(&rq->queuelist) && !list_empty(&next->queuelist)) { if (time_before(rq_fifo_time(next), rq_fifo_time(rq))) { list_move(&rq->queuelist, &next->queuelist); rq_set_fifo_time(rq, rq_fifo_time(next)); } } vr_remove_request(q, next); } /* * move an entry to dispatch queue */ static void vr_move_request(struct vr_data *vd, struct request *rq) { struct request_queue *q = rq->q; if (blk_rq_pos(rq) > vd->last_sector) vd->head_dir = FORWARD; else vd->head_dir = BACKWARD; vd->last_sector = blk_rq_pos(rq); vd->next_rq = elv_rb_latter_request(NULL, rq); vd->prev_rq = elv_rb_former_request(NULL, rq); BUG_ON(vd->next_rq && vd->next_rq == vd->prev_rq); vr_remove_request(q, rq); elv_dispatch_add_tail(q, rq); vd->nbatched++; } /* * get the first expired request in direction ddir */ static struct request * vr_expired_request(struct vr_data *vd, int ddir) { struct request *rq; if (list_empty(&vd->fifo_list[ddir])) return NULL; rq = rq_entry_fifo(vd->fifo_list[ddir].next); if (time_after(jiffies, rq_fifo_time(rq))) return rq; return NULL; } /* * Returns the oldest expired request */ static struct request * vr_check_fifo(struct vr_data *vd) { struct request *rq_sync = vr_expired_request(vd, SYNC); struct request *rq_async = vr_expired_request(vd, ASYNC); if (rq_async && rq_sync) { if (time_after(rq_fifo_time(rq_async), rq_fifo_time(rq_sync))) return rq_sync; } else if (rq_sync) return rq_sync; return rq_async; } /* * Return the request with the lowest penalty */ static struct request * vr_choose_request(struct vr_data *vd) { int penalty = (vd->rev_penalty) ? : INT_MAX; struct request *next = vd->next_rq; struct request *prev = vd->prev_rq; sector_t next_pen, prev_pen; BUG_ON(prev && prev == next); if (!prev) return next; else if (!next) return prev; /* At this point both prev and next are defined and distinct */ next_pen = blk_rq_pos(next) - vd->last_sector; prev_pen = vd->last_sector - blk_rq_pos(prev); if (vd->head_dir == FORWARD) next_pen = do_div(next_pen, penalty); else prev_pen = do_div(prev_pen, penalty); if (next_pen <= prev_pen) return next; return prev; } static int vr_dispatch_requests(struct request_queue *q, int force) { struct vr_data *vd = vr_get_data(q); struct request *rq = NULL; /* Check for and issue expired requests */ if (vd->nbatched > vd->fifo_batch) { vd->nbatched = 0; rq = vr_check_fifo(vd); } if (!rq) { rq = vr_choose_request(vd); if (!rq) return 0; } vr_move_request(vd, rq); return 1; } #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) static int vr_queue_empty(struct request_queue *q) { struct vr_data *vd = vr_get_data(q); return RB_EMPTY_ROOT(&vd->sort_list); } #endif static void vr_exit_queue(struct elevator_queue *e) { struct vr_data *vd = e->elevator_data; BUG_ON(!RB_EMPTY_ROOT(&vd->sort_list)); kfree(vd); } /* * initialize elevator private data (vr_data). */ static void *vr_init_queue(struct request_queue *q) { struct vr_data *vd; vd = kmalloc_node(sizeof(*vd), GFP_KERNEL | __GFP_ZERO, q->node); if (!vd) return NULL; INIT_LIST_HEAD(&vd->fifo_list[SYNC]); INIT_LIST_HEAD(&vd->fifo_list[ASYNC]); vd->sort_list = RB_ROOT; vd->fifo_expire[SYNC] = sync_expire; vd->fifo_expire[ASYNC] = async_expire; vd->fifo_batch = fifo_batch; vd->rev_penalty = rev_penalty; return vd; } /* * sysfs parts below */ static ssize_t vr_var_show(int var, char *page) { return sprintf(page, "%d\n", var); } static ssize_t vr_var_store(int *var, const char *page, size_t count) { *var = simple_strtol(page, NULL, 10); return count; } #define SHOW_FUNCTION(__FUNC, __VAR, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, char *page) \ { \ struct vr_data *vd = e->elevator_data; \ int __data = __VAR; \ if (__CONV) \ __data = jiffies_to_msecs(__data); \ return vr_var_show(__data, (page)); \ } SHOW_FUNCTION(vr_sync_expire_show, vd->fifo_expire[SYNC], 1); SHOW_FUNCTION(vr_async_expire_show, vd->fifo_expire[ASYNC], 1); SHOW_FUNCTION(vr_fifo_batch_show, vd->fifo_batch, 0); SHOW_FUNCTION(vr_rev_penalty_show, vd->rev_penalty, 0); #undef SHOW_FUNCTION #define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \ static ssize_t __FUNC(struct elevator_queue *e, const char *page, size_t count) \ { \ struct vr_data *vd = e->elevator_data; \ int __data; \ int ret = vr_var_store(&__data, (page), count); \ if (__data < (MIN)) \ __data = (MIN); \ else if (__data > (MAX)) \ __data = (MAX); \ if (__CONV) \ *(__PTR) = msecs_to_jiffies(__data); \ else \ *(__PTR) = __data; \ return ret; \ } STORE_FUNCTION(vr_sync_expire_store, &vd->fifo_expire[SYNC], 0, INT_MAX, 1); STORE_FUNCTION(vr_async_expire_store, &vd->fifo_expire[ASYNC], 0, INT_MAX, 1); STORE_FUNCTION(vr_fifo_batch_store, &vd->fifo_batch, 0, INT_MAX, 0); STORE_FUNCTION(vr_rev_penalty_store, &vd->rev_penalty, 0, INT_MAX, 0); #undef STORE_FUNCTION #define DD_ATTR(name) \ __ATTR(name, S_IRUGO|S_IWUSR, vr_##name##_show, \ vr_##name##_store) static struct elv_fs_entry vr_attrs[] = { DD_ATTR(sync_expire), DD_ATTR(async_expire), DD_ATTR(fifo_batch), DD_ATTR(rev_penalty), __ATTR_NULL }; static struct elevator_type iosched_vr = { .ops = { .elevator_merge_fn = vr_merge, .elevator_merged_fn = vr_merged_request, .elevator_merge_req_fn = vr_merged_requests, .elevator_dispatch_fn = vr_dispatch_requests, .elevator_add_req_fn = vr_add_request, #if LINUX_VERSION_CODE <= KERNEL_VERSION(2,6,38) .elevator_queue_empty_fn = vr_queue_empty, #endif .elevator_former_req_fn = elv_rb_former_request, .elevator_latter_req_fn = elv_rb_latter_request, .elevator_init_fn = vr_init_queue, .elevator_exit_fn = vr_exit_queue, }, .elevator_attrs = vr_attrs, .elevator_name = "vr", .elevator_owner = THIS_MODULE, }; static int __init vr_init(void) { elv_register(&iosched_vr); return 0; } static void __exit vr_exit(void) { elv_unregister(&iosched_vr); } module_init(vr_init); module_exit(vr_exit); MODULE_AUTHOR("Aaron Carroll"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("V(R) IO scheduler");
gpl-2.0
jedis/jedioutcast
utils/tgajpg/jdatadst.c
29
5261
/* * jdatadst.c * * Copyright (C) 1994, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains compression data destination routines for the case of * emitting JPEG data to a file (or any stdio stream). While these routines * are sufficient for most applications, some will want to use a different * destination manager. * IMPORTANT: we assume that fwrite() will correctly transcribe an array of * JOCTETs into 8-bit-wide elements on external storage. If char is wider * than 8 bits on your machine, you may need to do some tweaking. */ /* this is not a core library module, so it doesn't define JPEG_INTERNALS */ #include "jinclude.h" #include "jpeglib.h" #include "jerror.h" /* Expanded data destination object for stdio output */ typedef struct { struct jpeg_destination_mgr pub; /* public fields */ FILE * outfile; /* target stream */ JOCTET * buffer; /* start of buffer */ } my_destination_mgr; typedef my_destination_mgr * my_dest_ptr; #define OUTPUT_BUF_SIZE 4096 /* choose an efficiently fwrite'able size */ /* * Initialize destination --- called by jpeg_start_compress * before any data is actually written. */ METHODDEF void init_destination (j_compress_ptr cinfo) { my_dest_ptr dest = (my_dest_ptr) cinfo->dest; /* Allocate the output buffer --- it will be released when done with image */ dest->buffer = (JOCTET *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_IMAGE, OUTPUT_BUF_SIZE * SIZEOF(JOCTET)); dest->pub.next_output_byte = dest->buffer; dest->pub.free_in_buffer = OUTPUT_BUF_SIZE; } /* * Empty the output buffer --- called whenever buffer fills up. * * In typical applications, this should write the entire output buffer * (ignoring the current state of next_output_byte & free_in_buffer), * reset the pointer & count to the start of the buffer, and return TRUE * indicating that the buffer has been dumped. * * In applications that need to be able to suspend compression due to output * overrun, a FALSE return indicates that the buffer cannot be emptied now. * In this situation, the compressor will return to its caller (possibly with * an indication that it has not accepted all the supplied scanlines). The * application should resume compression after it has made more room in the * output buffer. Note that there are substantial restrictions on the use of * suspension --- see the documentation. * * When suspending, the compressor will back up to a convenient restart point * (typically the start of the current MCU). next_output_byte & free_in_buffer * indicate where the restart point will be if the current call returns FALSE. * Data beyond this point will be regenerated after resumption, so do not * write it out when emptying the buffer externally. */ METHODDEF boolean empty_output_buffer (j_compress_ptr cinfo) { my_dest_ptr dest = (my_dest_ptr) cinfo->dest; if (JFWRITE(dest->outfile, dest->buffer, OUTPUT_BUF_SIZE) != (size_t) OUTPUT_BUF_SIZE) ERREXIT(cinfo, JERR_FILE_WRITE); dest->pub.next_output_byte = dest->buffer; dest->pub.free_in_buffer = OUTPUT_BUF_SIZE; return TRUE; } /* * Terminate destination --- called by jpeg_finish_compress * after all data has been written. Usually needs to flush buffer. * * NB: *not* called by jpeg_abort or jpeg_destroy; surrounding * application must deal with any cleanup that should happen even * for error exit. */ METHODDEF void term_destination (j_compress_ptr cinfo) { my_dest_ptr dest = (my_dest_ptr) cinfo->dest; size_t datacount = OUTPUT_BUF_SIZE - dest->pub.free_in_buffer; /* Write any data remaining in the buffer */ if (datacount > 0) { if (JFWRITE(dest->outfile, dest->buffer, datacount) != datacount) ERREXIT(cinfo, JERR_FILE_WRITE); } fflush(dest->outfile); /* Make sure we wrote the output file OK */ if (ferror(dest->outfile)) ERREXIT(cinfo, JERR_FILE_WRITE); } /* * Prepare for output to a stdio stream. * The caller must have already opened the stream, and is responsible * for closing it after finishing compression. */ GLOBAL void jpeg_stdio_dest (j_compress_ptr cinfo, FILE * outfile) { my_dest_ptr dest; /* The destination object is made permanent so that multiple JPEG images * can be written to the same file without re-executing jpeg_stdio_dest. * This makes it dangerous to use this manager and a different destination * manager serially with the same JPEG object, because their private object * sizes may be different. Caveat programmer. */ if (cinfo->dest == NULL) { /* first time for this JPEG object? */ cinfo->dest = (struct jpeg_destination_mgr *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, SIZEOF(my_destination_mgr)); } dest = (my_dest_ptr) cinfo->dest; dest->pub.init_destination = init_destination; dest->pub.empty_output_buffer = empty_output_buffer; dest->pub.term_destination = term_destination; dest->outfile = outfile; }
gpl-2.0
iwishiwasgay/starkissed-clone
arch/arm/mach-msm/clock-fsm9900.c
285
88263
/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/ctype.h> #include <linux/io.h> #include <linux/spinlock.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/iopoll.h> #include <linux/regulator/consumer.h> #include <linux/regulator/machine.h> #include <linux/regulator/rpm-smd-regulator.h> #include <soc/qcom/clock-local2.h> #include <soc/qcom/clock-pll.h> #include <soc/qcom/clock-rpm.h> #include <soc/qcom/clock-voter.h> #include <soc/qcom/clock-krait.h> #include <soc/qcom/socinfo.h> #include <soc/qcom/rpm-smd.h> #include "clock.h" enum { GCC_BASE, APCS_BASE, N_BASES, }; static void __iomem *virt_bases[N_BASES]; #define GCC_REG_BASE(x) (void __iomem *)(virt_bases[GCC_BASE] + (x)) #define APCS_REG_BASE(x) (void __iomem *)(virt_bases[APCS_BASE] + (x)) #define GPLL0_MODE 0x0000 #define GPLL0_L 0x0004 #define GPLL0_M 0x0008 #define GPLL0_N 0x000C #define GPLL0_USER_CTL 0x0010 #define GPLL0_CONFIG_CTL 0x0014 #define GPLL0_TEST_CTL 0x0018 #define GPLL0_STATUS 0x001C #define GPLL1_MODE 0x0040 #define GPLL1_L 0x0044 #define GPLL1_M 0x0048 #define GPLL1_N 0x004C #define GPLL1_USER_CTL 0x0050 #define GPLL1_CONFIG_CTL 0x0054 #define GPLL1_TEST_CTL 0x0058 #define GPLL1_STATUS 0x005C #define GPLL2_MODE 0x0080 #define GPLL2_L 0x0084 #define GPLL2_M 0x0088 #define GPLL2_N 0x008C #define GPLL2_USER_CTL 0x0090 #define GPLL2_CONFIG_CTL 0x0094 #define GPLL2_TEST_CTL 0x0098 #define GPLL2_STATUS 0x009C #define OCMEM_NOC_CFG_AHB_CBCR 0x0248 #define MMSS_NOC_CFG_AHB_CBCR 0x024C #define MMSS_VPU_MAPLE_SYS_NOC_AXI_CBCR 0x026C #define USB_HS_BCR 0x0480 #define USB_HS_SYSTEM_CBCR 0x0484 #define USB_HS_AHB_CBCR 0x0488 #define USB_HS_INACTIVITY_TIMERS_CBCR 0x048C #define USB_HS_SYSTEM_CMD_RCGR 0x0490 #define USB2A_PHY_BCR 0x04A8 #define USB2A_PHY_SLEEP_CBCR 0x04AC #define SDCC1_BCR 0x04C0 #define SDCC1_APPS_CMD_RCGR 0x04D0 #define SDCC1_APPS_CBCR 0x04C4 #define SDCC1_AHB_CBCR 0x04C8 #define SDCC2_BCR 0x0500 #define SDCC2_APPS_CMD_RCGR 0x0510 #define SDCC2_APPS_CBCR 0x0504 #define SDCC2_AHB_CBCR 0x0508 #define SDCC2_INACTIVITY_TIMERS_CBCR 0x050C #define BLSP1_BCR 0x05C0 #define BLSP1_AHB_CBCR 0x05C4 #define BLSP1_QUP1_BCR 0x0640 #define BLSP1_QUP1_SPI_APPS_CBCR 0x0644 #define BLSP1_QUP1_I2C_APPS_CBCR 0x0648 #define BLSP1_QUP1_I2C_APPS_CMD_RCGR 0x0660 #define BLSP1_QUP2_I2C_APPS_CMD_RCGR 0x06E0 #define BLSP1_QUP3_I2C_APPS_CMD_RCGR 0x0760 #define BLSP1_QUP4_I2C_APPS_CMD_RCGR 0x07E0 #define BLSP1_QUP5_I2C_APPS_CMD_RCGR 0x0860 #define BLSP1_QUP6_I2C_APPS_CMD_RCGR 0x08E0 #define BLSP2_QUP1_I2C_APPS_CMD_RCGR 0x09A0 #define BLSP2_QUP2_I2C_APPS_CMD_RCGR 0x0A20 #define BLSP2_QUP3_I2C_APPS_CMD_RCGR 0x0AA0 #define BLSP2_QUP4_I2C_APPS_CMD_RCGR 0x0B20 #define BLSP2_QUP5_I2C_APPS_CMD_RCGR 0x0BA0 #define BLSP2_QUP6_I2C_APPS_CMD_RCGR 0x0C20 #define BLSP1_QUP1_SPI_APPS_CMD_RCGR 0x064C #define BLSP1_UART1_BCR 0x0680 #define BLSP1_UART1_APPS_CBCR 0x0684 #define BLSP1_UART1_APPS_CMD_RCGR 0x068C #define BLSP1_QUP2_BCR 0x06C0 #define BLSP1_QUP2_SPI_APPS_CBCR 0x06C4 #define BLSP1_QUP2_I2C_APPS_CBCR 0x06C8 #define BLSP1_QUP2_SPI_APPS_CMD_RCGR 0x06CC #define BLSP1_UART2_BCR 0x0700 #define BLSP1_UART2_APPS_CBCR 0x0704 #define BLSP1_UART2_APPS_CMD_RCGR 0x070C #define BLSP1_QUP3_BCR 0x0740 #define BLSP1_QUP3_SPI_APPS_CBCR 0x0744 #define BLSP1_QUP3_I2C_APPS_CBCR 0x0748 #define BLSP1_QUP3_SPI_APPS_CMD_RCGR 0x074C #define BLSP1_UART3_BCR 0x0780 #define BLSP1_UART3_APPS_CBCR 0x0784 #define BLSP1_UART3_APPS_CMD_RCGR 0x078C #define BLSP1_QUP4_BCR 0x07C0 #define BLSP1_QUP4_SPI_APPS_CBCR 0x07C4 #define BLSP1_QUP4_I2C_APPS_CBCR 0x07C8 #define BLSP1_QUP4_SPI_APPS_CMD_RCGR 0x07CC #define BLSP1_UART4_BCR 0x0800 #define BLSP1_UART4_APPS_CBCR 0x0804 #define BLSP1_UART4_APPS_CMD_RCGR 0x080C #define BLSP1_QUP5_BCR 0x0840 #define BLSP1_QUP5_SPI_APPS_CBCR 0x0844 #define BLSP1_QUP5_I2C_APPS_CBCR 0x0848 #define BLSP1_QUP5_SPI_APPS_CMD_RCGR 0x084C #define BLSP1_UART5_BCR 0x0880 #define BLSP1_UART5_APPS_CBCR 0x0884 #define BLSP1_UART5_APPS_CMD_RCGR 0x088C #define BLSP1_UART5_APPS_CFG_RCGR 0x0890 #define BLSP1_QUP6_BCR 0x08C0 #define BLSP1_QUP6_SPI_APPS_CBCR 0x08C4 #define BLSP1_QUP6_I2C_APPS_CBCR 0x08C8 #define BLSP1_QUP6_SPI_APPS_CMD_RCGR 0x08CC #define BLSP1_UART6_BCR 0x0900 #define BLSP1_UART6_APPS_CBCR 0x0904 #define BLSP1_UART6_APPS_CMD_RCGR 0x090C #define BLSP2_BCR 0x0940 #define BLSP2_AHB_CBCR 0x0944 #define BLSP2_QUP1_BCR 0x0980 #define BLSP2_QUP1_SPI_APPS_CBCR 0x0984 #define BLSP2_QUP1_I2C_APPS_CBCR 0x0988 #define BLSP2_QUP1_SPI_APPS_CMD_RCGR 0x098C #define BLSP2_UART1_BCR 0x09C0 #define BLSP2_UART1_APPS_CBCR 0x09C4 #define BLSP2_UART1_APPS_CMD_RCGR 0x09CC #define BLSP2_QUP2_BCR 0x0A00 #define BLSP2_QUP2_SPI_APPS_CBCR 0x0A04 #define BLSP2_QUP2_I2C_APPS_CBCR 0x0A08 #define BLSP2_QUP2_SPI_APPS_CMD_RCGR 0x0A0C #define BLSP2_UART2_BCR 0x0A40 #define BLSP2_UART2_APPS_CBCR 0x0A44 #define BLSP2_UART2_APPS_CMD_RCGR 0x0A4C #define BLSP2_QUP3_BCR 0x0A80 #define BLSP2_QUP3_SPI_APPS_CBCR 0x0A84 #define BLSP2_QUP3_I2C_APPS_CBCR 0x0A88 #define BLSP2_QUP3_SPI_APPS_CMD_RCGR 0x0A8C #define BLSP2_UART3_BCR 0x0AC0 #define BLSP2_UART3_APPS_CBCR 0x0AC4 #define BLSP2_UART3_APPS_CMD_RCGR 0x0ACC #define BLSP2_QUP4_BCR 0x0B00 #define BLSP2_QUP4_SPI_APPS_CBCR 0x0B04 #define BLSP2_QUP4_I2C_APPS_CBCR 0x0B08 #define BLSP2_QUP4_SPI_APPS_CMD_RCGR 0x0B0C #define BLSP2_UART4_BCR 0x0B40 #define BLSP2_UART4_APPS_CBCR 0x0B44 #define BLSP2_UART4_APPS_CMD_RCGR 0x0B4C #define BLSP2_QUP5_BCR 0x0B80 #define BLSP2_QUP5_SPI_APPS_CBCR 0x0B84 #define BLSP2_QUP5_I2C_APPS_CBCR 0x0B88 #define BLSP2_QUP5_SPI_APPS_CMD_RCGR 0x0B8C #define BLSP2_UART5_BCR 0x0BC0 #define BLSP2_UART5_APPS_CBCR 0x0BC4 #define BLSP2_UART5_APPS_CMD_RCGR 0x0BCC #define BLSP2_QUP6_BCR 0x0C00 #define BLSP2_QUP6_SPI_APPS_CBCR 0x0C04 #define BLSP2_QUP6_I2C_APPS_CBCR 0x0C08 #define BLSP2_QUP6_SPI_APPS_CMD_RCGR 0x0C0C #define BLSP2_UART6_BCR 0x0C40 #define BLSP2_UART6_APPS_CBCR 0x0C44 #define BLSP2_UART6_APPS_CMD_RCGR 0x0C4C #define PDM_BCR 0x0CC0 #define PDM_AHB_CBCR 0x0CC4 #define PDM2_CBCR 0x0CCC #define PDM2_CMD_RCGR 0x0CD0 #define PRNG_BCR 0x0D00 #define PRNG_AHB_CBCR 0x0D04 #define BAM_DMA_BCR 0x0D40 #define BAM_DMA_AHB_CBCR 0x0D44 #define BAM_DMA_INACTIVITY_TIMERS_CBCR 0x0D48 #define BOOT_ROM_AHB_CBCR 0x0E04 #define CE1_BCR 0x1040 #define CE1_CMD_RCGR 0x1050 #define CE1_CBCR 0x1044 #define CE1_AXI_CBCR 0x1048 #define CE1_AHB_CBCR 0x104C #define CE2_BCR 0x1080 #define CE2_CMD_RCGR 0x1090 #define CE2_CBCR 0x1084 #define CE2_AXI_CBCR 0x1088 #define CE2_AHB_CBCR 0x108C #define GCC_XO_DIV4_CBCR 0x10C8 #define APCS_GPLL_ENA_VOTE 0x1480 #define APCS_CLOCK_BRANCH_ENA_VOTE 0x1484 #define APCS_CLOCK_SLEEP_ENA_VOTE 0x1488 #define GCC_DEBUG_CLK_CTL 0x1880 #define CLOCK_FRQ_MEASURE_CTL 0x1884 #define CLOCK_FRQ_MEASURE_STATUS 0x1888 #define GCC_PLLTEST_PAD_CFG 0x188C #define GCC_GP1_CBCR 0x1900 #define GCC_GP1_CMD_RCGR 0x1904 #define GCC_GP2_CBCR 0x1940 #define GCC_GP2_CMD_RCGR 0x1944 #define GCC_GP3_CBCR 0x1980 #define GCC_GP3_CMD_RCGR 0x1984 #define GPLL4_MODE 0x1DC0 #define GPLL4_L 0x1DC4 #define GPLL4_M 0x1DC8 #define GPLL4_N 0x1DCC #define GPLL4_USER_CTL 0x1DD0 #define GPLL4_CONFIG_CTL 0x1DD4 #define GPLL4_TEST_CTL 0x1DD8 #define GPLL4_STATUS 0x1DDC #define PCIE_0_BCR 0x1AC0 #define PCIE_0_PHY_BCR 0x1B00 #define PCIE_0_CFG_AHB_CBCR 0x1B0C #define PCIE_0_PIPE_CBCR 0x1B14 #define PCIE_0_SLV_AXI_CBCR 0x1B04 #define PCIE_0_AUX_CBCR 0x1B10 #define PCIE_0_MSTR_AXI_CBCR 0x1B08 #define PCIE_0_PIPE_CMD_RCGR 0x1B18 #define PCIE_0_AUX_CMD_RCGR 0x1B2C #define PCIE_1_BCR 0x1B40 #define PCIE_1_PHY_BCR 0x1B80 #define PCIE_1_CFG_AHB_CBCR 0x1B8C #define PCIE_1_PIPE_CBCR 0x1B94 #define PCIE_1_SLV_AXI_CBCR 0x1B84 #define PCIE_1_AUX_CBCR 0x1B90 #define PCIE_1_MSTR_AXI_CBCR 0x1B88 #define PCIE_1_PIPE_CMD_RCGR 0x1B98 #define PCIE_1_AUX_CMD_RCGR 0x1BAC #define CE3_BCR 0x1D00 #define CE3_CMD_RCGR 0x1D10 #define CE3_CBCR 0x1D04 #define CE3_AXI_CBCR 0x1D08 #define CE3_AHB_CBCR 0x1D0C #define PCIE_0_PHY_LDO_EN 0x1E00 #define PCIE_1_PHY_LDO_EN 0x1E04 #define EMAC_0_PHY_LDO_EN 0x1E08 #define EMAC_1_PHY_LDO_EN 0x1E0C #define CE4_BCR 0x2180 #define CE4_CMD_RCGR 0x2190 #define CE4_CBCR 0x2184 #define CE4_AXI_CBCR 0x2188 #define CE4_AHB_CBCR 0x218C #define CE5_BCR 0x21C0 #define CE5_CMD_RCGR 0x21D0 #define CE5_CBCR 0x21C4 #define CE5_AXI_CBCR 0x21C8 #define CE5_AHB_CBCR 0x21CC #define CE6_BCR 0x2200 #define CE6_CMD_RCGR 0x2210 #define CE6_CBCR 0x2204 #define CE6_AXI_CBCR 0x2208 #define CE6_AHB_CBCR 0x220C #define CE7_BCR 0x2240 #define CE7_CMD_RCGR 0x2250 #define CE7_CBCR 0x2244 #define CE7_AXI_CBCR 0x2248 #define CE7_AHB_CBCR 0x224C #define CE8_BCR 0x2280 #define CE8_CMD_RCGR 0x2290 #define CE8_CBCR 0x2284 #define CE8_AXI_CBCR 0x2288 #define CE8_AHB_CBCR 0x228C #define SYS_NOC_EMAC_AHB_CBCR 0x2580 #define SYS_NOC_EMAC_AHB_CMD_RCGR 0x2584 #define SYS_NOC_EMAC_AHB_CFG_RCGR 0x2588 #define EMAC_0_BCR 0x25C0 #define EMAC_0_AXI_CBCR 0x25C4 #define EMAC_0_AHB_CBCR 0x25C8 #define EMAC_0_SYS_25M_CBCR 0x25CC #define EMAC_0_TX_CBCR 0x25D0 #define EMAC_0_125M_CBCR 0x25D4 #define EMAC_0_RX_CBCR 0x25D8 #define EMAC_0_SYS_CBCR 0x25DC #define EMAC_0_SYS_25M_CMD_RCGR 0x25E0 #define EMAC_0_SYS_25M_CFG_RCGR 0x25E4 #define EMAC_0_TX_CMD_RCGR 0x2600 #define EMAC_0_TX_CFG_RCGR 0x2604 #define EMAC_0_TX_M 0x2608 #define EMAC_0_TX_N 0x260C #define EMAC_0_TX_D 0x2610 #define EMAC_0_125M_CMD_RCGR 0x2614 #define EMAC_0_125M_CFG_RCGR 0x2618 #define EMAC_1_BCR 0x2640 #define EMAC_1_AXI_CBCR 0x2644 #define EMAC_1_AHB_CBCR 0x2648 #define EMAC_1_SYS_25M_CBCR 0x264C #define EMAC_1_TX_CBCR 0x2650 #define EMAC_1_125M_CBCR 0x2654 #define EMAC_1_RX_CBCR 0x2658 #define EMAC_1_SYS_CBCR 0x265C #define EMAC_1_SYS_25M_CMD_RCGR 0x2660 #define EMAC_1_SYS_25M_CFG_RCGR 0x2664 #define EMAC_1_TX_CMD_RCGR 0x2680 #define EMAC_1_TX_CFG_RCGR 0x2684 #define EMAC_1_TX_M 0x2688 #define EMAC_1_TX_N 0x268C #define EMAC_1_TX_D 0x2690 #define EMAC_1_125M_CMD_RCGR 0x2694 #define EMAC_1_125M_CFG_RCGR 0x2698 #define GLB_CLK_DIAG 0x001C #define L2_CBCR 0x004C #define AHB_CMD_RCGR 0x5000 #define MMSS_MMSSNOC_AHB_CBCR 0x5024 #define MMSS_MMSSNOC_BTO_AHB_CBCR 0x5028 #define MMSS_MISC_AHB_CBCR 0x502C #define AXI_CMD_RCGR 0x5040 #define MMSS_S0_AXI_CBCR 0x5064 #define MMSS_MMSSNOC_AXI_CBCR 0x506C #define OCMEMNOC_CMD_RCGR 0x5090 #define MMSS_DEBUG_CLK_CTL 0x0900 /* Mux source select values */ #define xo_source_val 0 #define gpll0_source_val 1 #define gpll1_source_val 2 #define gpll4_source_val 5 #define gnd_source_val 5 #define sdcc1_gnd_source_val 6 #define pcie_pipe_source_val 2 #define emac0_125m_source_val 1 #define emac0_tx_source_val 2 #define F(f, s, div, m, n) \ { \ .freq_hz = (f), \ .src_clk = &s##_clk_src.c, \ .m_val = (m), \ .n_val = ~((n)-(m)) * !!(n), \ .d_val = ~(n),\ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \ | BVAL(10, 8, s##_source_val), \ } #define F_EXT(f, s, div, m, n) \ { \ .freq_hz = (f), \ .m_val = (m), \ .n_val = ~((n)-(m)) * !!(n), \ .d_val = ~(n),\ .div_src_val = BVAL(4, 0, (int)(2*(div) - 1)) \ | BVAL(10, 8, s##_source_val), \ } #define VDD_DIG_FMAX_MAP1(l1, f1) \ .vdd_class = &vdd_dig, \ .fmax = (unsigned long[VDD_DIG_NUM]) { \ [VDD_DIG_##l1] = (f1), \ }, \ .num_fmax = VDD_DIG_NUM #define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \ .vdd_class = &vdd_dig, \ .fmax = (unsigned long[VDD_DIG_NUM]) { \ [VDD_DIG_##l1] = (f1), \ [VDD_DIG_##l2] = (f2), \ }, \ .num_fmax = VDD_DIG_NUM #define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \ .vdd_class = &vdd_dig, \ .fmax = (unsigned long[VDD_DIG_NUM]) { \ [VDD_DIG_##l1] = (f1), \ [VDD_DIG_##l2] = (f2), \ [VDD_DIG_##l3] = (f3), \ }, \ .num_fmax = VDD_DIG_NUM enum vdd_dig_levels { VDD_DIG_NONE, VDD_DIG_LOW, VDD_DIG_NOMINAL, VDD_DIG_HIGH, VDD_DIG_NUM }; static int vdd_corner[] = { RPM_REGULATOR_CORNER_NONE, /* VDD_DIG_NONE */ RPM_REGULATOR_CORNER_SVS_SOC, /* VDD_DIG_LOW */ RPM_REGULATOR_CORNER_NORMAL, /* VDD_DIG_NOMINAL */ RPM_REGULATOR_CORNER_SUPER_TURBO, /* VDD_DIG_HIGH */ }; static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL); /* TODO RPM clocks are never modified in this chip */ #define RPM_MISC_CLK_TYPE 0x306b6c63 #define CXO_ID 0x0 DEFINE_CLK_RPM_SMD_BRANCH(xo_clk_src, xo_a_clk_src, RPM_MISC_CLK_TYPE, CXO_ID, 19200000); static unsigned int soft_vote_gpll0; static struct pll_vote_clk gpll0_ao_clk_src = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(0), .status_reg = (void __iomem *)GPLL0_STATUS, .status_mask = BIT(17), .soft_vote = &soft_vote_gpll0, .soft_vote_mask = PLL_SOFT_VOTE_ACPU, .base = &virt_bases[GCC_BASE], .c = { .parent = &xo_a_clk_src.c, .rate = 600000000, .dbg_name = "gpll0_ao_clk_src", .ops = &clk_ops_pll_acpu_vote, CLK_INIT(gpll0_ao_clk_src.c), }, }; static struct pll_vote_clk gpll0_clk_src = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(0), .status_reg = (void __iomem *)GPLL0_STATUS, .status_mask = BIT(17), .base = &virt_bases[GCC_BASE], .c = { .parent = &xo_clk_src.c, .rate = 600000000, .dbg_name = "gpll0_clk_src", .ops = &clk_ops_pll_vote, CLK_INIT(gpll0_clk_src.c), }, }; static struct pll_vote_clk gpll1_clk_src = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(1), .status_reg = (void __iomem *)GPLL1_STATUS, .status_mask = BIT(17), .base = &virt_bases[GCC_BASE], .c = { .parent = &xo_clk_src.c, .rate = 480000000, .dbg_name = "gpll1_clk_src", .ops = &clk_ops_pll_vote, CLK_INIT(gpll1_clk_src.c), }, }; static struct pll_vote_clk gpll4_clk_src = { .en_reg = (void __iomem *)APCS_GPLL_ENA_VOTE, .en_mask = BIT(4), .status_reg = (void __iomem *)GPLL4_STATUS, .status_mask = BIT(17), .base = &virt_bases[GCC_BASE], .c = { .parent = &xo_clk_src.c, .rate = 288000000, .dbg_name = "gpll4_clk_src", .ops = &clk_ops_pll_vote, CLK_INIT(gpll4_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk[] = { F( 19200000, xo, 1, 0, 0), F( 50000000, gpll0, 12, 0, 0), F_END }; static struct rcg_clk blsp1_qup1_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP1_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup1_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup1_i2c_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk[] = { F( 960000, xo, 10, 1, 2), F( 4800000, xo, 4, 0, 0), F( 9600000, xo, 2, 0, 0), F( 15000000, gpll0, 10, 1, 4), F( 19200000, xo, 1, 0, 0), F( 25000000, gpll0, 12, 1, 2), F( 50000000, gpll0, 12, 0, 0), F_END }; static struct rcg_clk blsp1_qup1_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP1_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup1_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup1_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup2_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP2_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup2_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup2_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup2_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP2_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup2_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup2_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup3_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP3_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup3_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup3_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup3_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP3_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup3_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup3_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup4_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP4_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup4_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup4_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup4_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP4_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup4_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup4_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup5_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP5_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup5_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup5_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup5_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP5_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup5_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup5_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup6_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP6_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup6_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp1_qup6_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp1_qup6_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP1_QUP6_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_qup6_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_qup6_spi_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_blsp1_2_uart1_6_apps_clk[] = { F( 3686400, gpll0, 1, 96, 15625), F( 7372800, gpll0, 1, 192, 15625), F( 14745600, gpll0, 1, 384, 15625), F( 16000000, gpll0, 5, 2, 15), F( 19200000, xo, 1, 0, 0), F( 24000000, gpll0, 5, 1, 5), F( 32000000, gpll0, 1, 4, 75), F( 40000000, gpll0, 15, 0, 0), F( 46400000, gpll0, 1, 29, 375), F( 48000000, gpll0, 12.5, 0, 0), F( 51200000, gpll0, 1, 32, 375), F( 56000000, gpll0, 1, 7, 75), F( 58982400, gpll0, 1, 1536, 15625), F( 60000000, gpll0, 10, 0, 0), F( 63160000, gpll0, 9.5, 0, 0), F_END }; static struct rcg_clk blsp1_uart1_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart1_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart1_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart2_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart2_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart2_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart3_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART3_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart3_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart3_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart4_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART4_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart4_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart4_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart5_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART5_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart5_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart5_apps_clk_src.c), }, }; static struct rcg_clk blsp1_uart6_apps_clk_src = { .cmd_rcgr_reg = BLSP1_UART6_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp1_uart6_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp1_uart6_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup1_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP1_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup1_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup1_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup1_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP1_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup1_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup1_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup2_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP2_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup2_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup2_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup2_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP2_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup2_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup2_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup3_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP3_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup3_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup3_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup3_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP3_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup3_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup3_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup4_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP4_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup4_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup4_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup4_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP4_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup4_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup4_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup5_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP5_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup5_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup5_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup5_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP5_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup5_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup5_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup6_i2c_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP6_I2C_APPS_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_i2c_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup6_i2c_apps_clk_src", .ops = &clk_ops_rcg, CLK_INIT(blsp2_qup6_i2c_apps_clk_src.c), }, }; static struct rcg_clk blsp2_qup6_spi_apps_clk_src = { .cmd_rcgr_reg = BLSP2_QUP6_SPI_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_qup1_6_spi_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_qup6_spi_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_qup6_spi_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart1_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart1_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart1_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart2_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart2_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart2_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart3_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART3_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart3_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart3_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart4_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART4_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart4_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart4_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart5_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART5_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart5_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart5_apps_clk_src.c), }, }; static struct rcg_clk blsp2_uart6_apps_clk_src = { .cmd_rcgr_reg = BLSP2_UART6_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_blsp1_2_uart1_6_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "blsp2_uart6_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(blsp2_uart6_apps_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_ce1_clk[] = { F( 50000000, gpll0, 12, 0, 0), F( 85710000, gpll0, 7, 0, 0), F(100000000, gpll0, 6, 0, 0), F(171430000, gpll0, 3.5, 0, 0), F_END }; static struct rcg_clk ce1_clk_src = { .cmd_rcgr_reg = CE1_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_ce1_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "ce1_clk_src", .ops = &clk_ops_rcg, CLK_INIT(ce1_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_ce2_clk[] = { F( 50000000, gpll0, 12, 0, 0), F( 85710000, gpll0, 7, 0, 0), F(100000000, gpll0, 6, 0, 0), F(171430000, gpll0, 3.5, 0, 0), F_END }; static struct rcg_clk ce2_clk_src = { .cmd_rcgr_reg = CE2_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_ce2_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "ce2_clk_src", .ops = &clk_ops_rcg, CLK_INIT(ce2_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_ce3_clk[] = { F( 50000000, gpll0, 12, 0, 0), F( 85710000, gpll0, 7, 0, 0), F(100000000, gpll0, 6, 0, 0), F(171430000, gpll0, 3.5, 0, 0), F_END }; static struct rcg_clk ce3_clk_src = { .cmd_rcgr_reg = CE3_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_ce3_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "ce3_clk_src", .ops = &clk_ops_rcg, CLK_INIT(ce3_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_gp1_3_clk[] = { F( 19200000, xo, 1, 0, 0), F(100000000, gpll0, 6, 0, 0), F(200000000, gpll0, 3, 0, 0), F_END }; static struct rcg_clk gcc_gp1_clk_src = { .cmd_rcgr_reg = GCC_GP1_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_gp1_3_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_gp1_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(gcc_gp1_clk_src.c), }, }; static struct rcg_clk gcc_gp2_clk_src = { .cmd_rcgr_reg = GCC_GP2_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_gp1_3_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_gp2_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(gcc_gp2_clk_src.c), }, }; static struct rcg_clk gcc_gp3_clk_src = { .cmd_rcgr_reg = GCC_GP3_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_gp1_3_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_gp3_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(gcc_gp3_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_pcie_0_1_aux_clk[] = { F( 1010000, xo, 1, 1, 19), F_END }; static struct rcg_clk pcie_0_aux_clk_src = { .cmd_rcgr_reg = PCIE_0_AUX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_pcie_0_1_aux_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_0_aux_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(pcie_0_aux_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_pcie_0_1_pipe_clk[] = { F_EXT(125000000, pcie_pipe, 1, 0, 0), F_EXT(250000000, pcie_pipe, 1, 0, 0), F_END }; static struct rcg_clk pcie_0_pipe_clk_src = { .cmd_rcgr_reg = PCIE_0_PIPE_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_0_pipe_clk_src", .ops = &clk_ops_rcg, CLK_INIT(pcie_0_pipe_clk_src.c), }, }; static struct rcg_clk pcie_1_aux_clk_src = { .cmd_rcgr_reg = PCIE_1_AUX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_pcie_0_1_aux_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_1_aux_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(pcie_1_aux_clk_src.c), }, }; static struct rcg_clk pcie_1_pipe_clk_src = { .cmd_rcgr_reg = PCIE_1_PIPE_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_pcie_0_1_pipe_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_1_pipe_clk_src", .ops = &clk_ops_rcg, CLK_INIT(pcie_1_pipe_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_pdm2_clk[] = { F( 60000000, gpll0, 10, 0, 0), F_END }; static struct rcg_clk pdm2_clk_src = { .cmd_rcgr_reg = PDM2_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_pdm2_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pdm2_clk_src", .ops = &clk_ops_rcg, CLK_INIT(pdm2_clk_src.c), }, }; static struct clk_freq_tbl ftbl_gcc_sdcc1_4_apps_clk[] = { F( 144000, xo, 16, 3, 25), F( 400000, xo, 12, 1, 4), F( 20000000, gpll0, 15, 1, 2), F( 25000000, gpll0, 12, 1, 2), F( 50000000, gpll0, 12, 0, 0), F(100000000, gpll0, 6, 0, 0), F(200000000, gpll0, 3, 0, 0), F_END }; static struct rcg_clk sdcc1_apps_clk_src = { .cmd_rcgr_reg = SDCC1_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "sdcc1_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(sdcc1_apps_clk_src.c), }, }; static struct rcg_clk sdcc2_apps_clk_src = { .cmd_rcgr_reg = SDCC2_APPS_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_sdcc1_4_apps_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "sdcc2_apps_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(sdcc2_apps_clk_src.c), }, }; static DEFINE_CLK_BRANCH_VOTER(xo_usb_hs_host_clk, &xo_clk_src.c); static struct clk_freq_tbl ftbl_gcc_usb_hs_system_clk[] = { F( 75000000, gpll0, 8, 0, 0), F_END }; static struct rcg_clk usb_hs_system_clk_src = { .cmd_rcgr_reg = USB_HS_SYSTEM_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_usb_hs_system_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "usb_hs_system_clk_src", .ops = &clk_ops_rcg, CLK_INIT(usb_hs_system_clk_src.c), }, }; static struct local_vote_clk gcc_bam_dma_ahb_clk = { .cbcr_reg = BAM_DMA_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(12), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_bam_dma_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_bam_dma_ahb_clk.c), }, }; static struct local_vote_clk gcc_blsp1_ahb_clk = { .cbcr_reg = BLSP1_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(17), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_blsp1_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_blsp1_ahb_clk.c), }, }; static struct branch_clk gcc_blsp1_qup1_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP1_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup1_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup1_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup1_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup1_spi_apps_clk = { .cbcr_reg = BLSP1_QUP1_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup1_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup1_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup1_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup2_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP2_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup2_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup2_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup2_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup2_spi_apps_clk = { .cbcr_reg = BLSP1_QUP2_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup2_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup2_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup2_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup3_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP3_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup3_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup3_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup3_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup3_spi_apps_clk = { .cbcr_reg = BLSP1_QUP3_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup3_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup3_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup3_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup4_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP4_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup4_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup4_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup4_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup4_spi_apps_clk = { .cbcr_reg = BLSP1_QUP4_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup4_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup4_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup4_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup5_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP5_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup5_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup5_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup5_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup5_spi_apps_clk = { .cbcr_reg = BLSP1_QUP5_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup5_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup5_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup5_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup6_i2c_apps_clk = { .cbcr_reg = BLSP1_QUP6_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup6_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup6_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup6_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_qup6_spi_apps_clk = { .cbcr_reg = BLSP1_QUP6_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_qup6_spi_apps_clk_src.c, .dbg_name = "gcc_blsp1_qup6_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_qup6_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart1_apps_clk = { .cbcr_reg = BLSP1_UART1_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart1_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart1_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart1_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart2_apps_clk = { .cbcr_reg = BLSP1_UART2_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart2_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart2_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart2_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart3_apps_clk = { .cbcr_reg = BLSP1_UART3_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart3_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart3_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart3_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart4_apps_clk = { .cbcr_reg = BLSP1_UART4_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart4_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart4_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart4_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart5_apps_clk = { .cbcr_reg = BLSP1_UART5_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart5_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart5_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart5_apps_clk.c), }, }; static struct branch_clk gcc_blsp1_uart6_apps_clk = { .cbcr_reg = BLSP1_UART6_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp1_uart6_apps_clk_src.c, .dbg_name = "gcc_blsp1_uart6_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp1_uart6_apps_clk.c), }, }; static struct local_vote_clk gcc_blsp2_ahb_clk = { .cbcr_reg = BLSP2_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(15), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_blsp2_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_blsp2_ahb_clk.c), }, }; static struct branch_clk gcc_blsp2_qup1_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP1_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup1_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup1_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup1_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup1_spi_apps_clk = { .cbcr_reg = BLSP2_QUP1_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup1_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup1_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup1_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup2_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP2_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup2_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup2_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup2_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup2_spi_apps_clk = { .cbcr_reg = BLSP2_QUP2_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup2_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup2_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup2_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup3_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP3_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup3_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup3_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup3_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup3_spi_apps_clk = { .cbcr_reg = BLSP2_QUP3_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup3_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup3_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup3_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup4_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP4_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup4_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup4_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup4_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup4_spi_apps_clk = { .cbcr_reg = BLSP2_QUP4_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup4_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup4_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup4_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup5_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP5_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup5_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup5_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup5_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup5_spi_apps_clk = { .cbcr_reg = BLSP2_QUP5_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup5_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup5_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup5_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup6_i2c_apps_clk = { .cbcr_reg = BLSP2_QUP6_I2C_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup6_i2c_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup6_i2c_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup6_i2c_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_qup6_spi_apps_clk = { .cbcr_reg = BLSP2_QUP6_SPI_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_qup6_spi_apps_clk_src.c, .dbg_name = "gcc_blsp2_qup6_spi_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_qup6_spi_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart1_apps_clk = { .cbcr_reg = BLSP2_UART1_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart1_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart1_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart1_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart2_apps_clk = { .cbcr_reg = BLSP2_UART2_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart2_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart2_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart2_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart3_apps_clk = { .cbcr_reg = BLSP2_UART3_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart3_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart3_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart3_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart4_apps_clk = { .cbcr_reg = BLSP2_UART4_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart4_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart4_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart4_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart5_apps_clk = { .cbcr_reg = BLSP2_UART5_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart5_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart5_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart5_apps_clk.c), }, }; static struct branch_clk gcc_blsp2_uart6_apps_clk = { .cbcr_reg = BLSP2_UART6_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &blsp2_uart6_apps_clk_src.c, .dbg_name = "gcc_blsp2_uart6_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_blsp2_uart6_apps_clk.c), }, }; static struct local_vote_clk gcc_boot_rom_ahb_clk = { .cbcr_reg = BOOT_ROM_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(10), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_boot_rom_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_boot_rom_ahb_clk.c), }, }; static struct local_vote_clk gcc_ce1_ahb_clk = { .cbcr_reg = CE1_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(3), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce1_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce1_ahb_clk.c), }, }; static struct local_vote_clk gcc_ce1_axi_clk = { .cbcr_reg = CE1_AXI_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(4), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce1_axi_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce1_axi_clk.c), }, }; static struct local_vote_clk gcc_ce1_clk = { .cbcr_reg = CE1_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(5), .base = &virt_bases[GCC_BASE], .c = { .parent = &ce1_clk_src.c, .dbg_name = "gcc_ce1_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce1_clk.c), }, }; static struct local_vote_clk gcc_ce2_ahb_clk = { .cbcr_reg = CE2_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(0), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce2_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce2_ahb_clk.c), }, }; static struct local_vote_clk gcc_ce2_axi_clk = { .cbcr_reg = CE2_AXI_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(1), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce2_axi_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce2_axi_clk.c), }, }; static struct local_vote_clk gcc_ce2_clk = { .cbcr_reg = CE2_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(2), .base = &virt_bases[GCC_BASE], .c = { .parent = &ce2_clk_src.c, .dbg_name = "gcc_ce2_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_ce2_clk.c), }, }; static struct branch_clk gcc_ce3_ahb_clk = { .cbcr_reg = CE3_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce3_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ce3_ahb_clk.c), }, }; static struct branch_clk gcc_ce3_axi_clk = { .cbcr_reg = CE3_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_ce3_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ce3_axi_clk.c), }, }; static struct branch_clk gcc_ce3_clk = { .cbcr_reg = CE3_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &ce3_clk_src.c, .dbg_name = "gcc_ce3_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_ce3_clk.c), }, }; static struct branch_clk gcc_gp1_clk = { .cbcr_reg = GCC_GP1_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &gcc_gp1_clk_src.c, .dbg_name = "gcc_gp1_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_gp1_clk.c), }, }; static struct branch_clk gcc_gp2_clk = { .cbcr_reg = GCC_GP2_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &gcc_gp2_clk_src.c, .dbg_name = "gcc_gp2_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_gp2_clk.c), }, }; static struct branch_clk gcc_gp3_clk = { .cbcr_reg = GCC_GP3_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &gcc_gp3_clk_src.c, .dbg_name = "gcc_gp3_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_gp3_clk.c), }, }; static struct branch_clk gcc_pcie_0_aux_clk = { .cbcr_reg = PCIE_0_AUX_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &pcie_0_aux_clk_src.c, .dbg_name = "gcc_pcie_0_aux_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_aux_clk.c), }, }; static struct branch_clk gcc_pcie_0_cfg_ahb_clk = { .cbcr_reg = PCIE_0_CFG_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_0_cfg_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_cfg_ahb_clk.c), }, }; static struct branch_clk gcc_pcie_0_mstr_axi_clk = { .cbcr_reg = PCIE_0_MSTR_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_0_mstr_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_mstr_axi_clk.c), }, }; static struct branch_clk gcc_pcie_0_pipe_clk = { .cbcr_reg = PCIE_0_PIPE_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &pcie_0_pipe_clk_src.c, .dbg_name = "gcc_pcie_0_pipe_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_pipe_clk.c), }, }; static struct branch_clk gcc_pcie_0_slv_axi_clk = { .cbcr_reg = PCIE_0_SLV_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_0_slv_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_0_slv_axi_clk.c), }, }; static struct branch_clk gcc_pcie_1_aux_clk = { .cbcr_reg = PCIE_1_AUX_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &pcie_1_aux_clk_src.c, .dbg_name = "gcc_pcie_1_aux_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_aux_clk.c), }, }; static struct branch_clk gcc_pcie_1_cfg_ahb_clk = { .cbcr_reg = PCIE_1_CFG_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_1_cfg_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_cfg_ahb_clk.c), }, }; static struct branch_clk gcc_pcie_1_mstr_axi_clk = { .cbcr_reg = PCIE_1_MSTR_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_1_mstr_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_mstr_axi_clk.c), }, }; static struct branch_clk gcc_pcie_1_pipe_clk = { .cbcr_reg = PCIE_1_PIPE_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &pcie_1_pipe_clk_src.c, .dbg_name = "gcc_pcie_1_pipe_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_pipe_clk.c), }, }; static struct branch_clk gcc_pcie_1_slv_axi_clk = { .cbcr_reg = PCIE_1_SLV_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pcie_1_slv_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pcie_1_slv_axi_clk.c), }, }; static struct branch_clk gcc_pdm2_clk = { .cbcr_reg = PDM2_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &pdm2_clk_src.c, .dbg_name = "gcc_pdm2_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pdm2_clk.c), }, }; static struct branch_clk gcc_pdm_ahb_clk = { .cbcr_reg = PDM_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_pdm_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_pdm_ahb_clk.c), }, }; static struct local_vote_clk gcc_prng_ahb_clk = { .cbcr_reg = PRNG_AHB_CBCR, .vote_reg = APCS_CLOCK_BRANCH_ENA_VOTE, .en_mask = BIT(13), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_prng_ahb_clk", .ops = &clk_ops_vote, CLK_INIT(gcc_prng_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc1_ahb_clk = { .cbcr_reg = SDCC1_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_sdcc1_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc1_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc1_apps_clk = { .cbcr_reg = SDCC1_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &sdcc1_apps_clk_src.c, .dbg_name = "gcc_sdcc1_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc1_apps_clk.c), }, }; static struct branch_clk gcc_sdcc2_ahb_clk = { .cbcr_reg = SDCC2_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_sdcc2_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc2_ahb_clk.c), }, }; static struct branch_clk gcc_sdcc2_apps_clk = { .cbcr_reg = SDCC2_APPS_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &sdcc2_apps_clk_src.c, .dbg_name = "gcc_sdcc2_apps_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_sdcc2_apps_clk.c), }, }; static struct branch_clk gcc_usb2a_phy_sleep_clk = { .cbcr_reg = USB2A_PHY_SLEEP_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_usb2a_phy_sleep_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb2a_phy_sleep_clk.c), }, }; static struct branch_clk gcc_usb_hs_ahb_clk = { .cbcr_reg = USB_HS_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_usb_hs_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb_hs_ahb_clk.c), }, }; static struct branch_clk gcc_usb_hs_system_clk = { .cbcr_reg = USB_HS_SYSTEM_CBCR, .bcr_reg = USB_HS_BCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &usb_hs_system_clk_src.c, .dbg_name = "gcc_usb_hs_system_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_usb_hs_system_clk.c), }, }; static struct gate_clk pcie_0_phy_ldo = { .en_reg = PCIE_0_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_0_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(pcie_0_phy_ldo.c), }, }; static struct gate_clk pcie_1_phy_ldo = { .en_reg = PCIE_1_PHY_LDO_EN, .en_mask = BIT(0), .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "pcie_1_phy_ldo", .ops = &clk_ops_gate, CLK_INIT(pcie_1_phy_ldo.c), }, }; static struct branch_clk gcc_emac0_axi_clk = { .cbcr_reg = EMAC_0_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac0_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_axi_clk.c), }, }; static struct branch_clk gcc_emac1_axi_clk = { .cbcr_reg = EMAC_1_AXI_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac1_axi_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_axi_clk.c), }, }; static struct branch_clk gcc_emac0_ahb_clk = { .cbcr_reg = EMAC_0_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac0_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_ahb_clk.c), }, }; static struct branch_clk gcc_emac1_ahb_clk = { .cbcr_reg = EMAC_1_AHB_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac1_ahb_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_ahb_clk.c), }, }; static struct clk_freq_tbl ftbl_gcc_emac0_1_125m_clk[] = { F( 19200000, xo, 1, 0, 0), F_EXT( 125000000, emac0_125m, 1, 0, 0), F_END }; static struct rcg_clk emac0_125m_clk_src = { .cmd_rcgr_reg = EMAC_0_125M_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_emac0_1_125m_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac0_125m_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac0_125m_clk_src.c), }, }; static struct rcg_clk emac1_125m_clk_src = { .cmd_rcgr_reg = EMAC_1_125M_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_emac0_1_125m_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac1_125m_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac1_125m_clk_src.c), }, }; static struct branch_clk gcc_emac0_125m_clk = { .cbcr_reg = EMAC_0_125M_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac0_125m_clk_src.c, .dbg_name = "gcc_emac0_125m_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_125m_clk.c), }, }; static struct branch_clk gcc_emac1_125m_clk = { .cbcr_reg = EMAC_1_125M_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac1_125m_clk_src.c, .dbg_name = "gcc_emac1_125m_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_125m_clk.c), }, }; static struct clk_freq_tbl ftbl_gcc_emac0_1_sys_25m_clk[] = { F( 19200000, xo, 1, 0, 0), F_EXT( 25000000, emac0_125m, 5, 0, 0), F_END }; static struct rcg_clk emac0_sys_25m_clk_src = { .cmd_rcgr_reg = EMAC_0_SYS_25M_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_emac0_1_sys_25m_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac0_sys_25m_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac0_sys_25m_clk_src.c), }, }; static struct rcg_clk emac1_sys_25m_clk_src = { .cmd_rcgr_reg = EMAC_1_SYS_25M_CMD_RCGR, .set_rate = set_rate_hid, .freq_tbl = ftbl_gcc_emac0_1_sys_25m_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac1_sys_25m_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac1_sys_25m_clk_src.c), }, }; static struct branch_clk gcc_emac0_sys_25m_clk = { .cbcr_reg = EMAC_0_SYS_25M_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac0_sys_25m_clk_src.c, .dbg_name = "gcc_emac0_sys_25m_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_sys_25m_clk.c), }, }; static struct branch_clk gcc_emac1_sys_25m_clk = { .cbcr_reg = EMAC_1_SYS_25M_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac1_sys_25m_clk_src.c, .dbg_name = "gcc_emac1_sys_25m_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_sys_25m_clk.c), }, }; static struct clk_freq_tbl ftbl_gcc_emac0_1_tx_clk[] = { F( 19200000, xo, 1, 0, 0), F_EXT( 125000000, emac0_tx, 1, 0, 0), F_END }; static struct rcg_clk emac0_tx_clk_src = { .cmd_rcgr_reg = EMAC_0_TX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_emac0_1_tx_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac0_tx_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac0_tx_clk_src.c), }, }; static struct rcg_clk emac1_tx_clk_src = { .cmd_rcgr_reg = EMAC_1_TX_CMD_RCGR, .set_rate = set_rate_mnd, .freq_tbl = ftbl_gcc_emac0_1_tx_clk, .current_freq = &rcg_dummy_freq, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "emac1_tx_clk_src", .ops = &clk_ops_rcg_mnd, CLK_INIT(emac1_tx_clk_src.c), }, }; static struct branch_clk gcc_emac0_tx_clk = { .cbcr_reg = EMAC_0_TX_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac0_tx_clk_src.c, .dbg_name = "gcc_emac0_tx_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_tx_clk.c), }, }; static struct branch_clk gcc_emac1_tx_clk = { .cbcr_reg = EMAC_1_TX_CBCR, .has_sibling = 0, .base = &virt_bases[GCC_BASE], .c = { .parent = &emac1_tx_clk_src.c, .dbg_name = "gcc_emac1_tx_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_tx_clk.c), }, }; static struct branch_clk gcc_emac0_rx_clk = { .cbcr_reg = EMAC_0_RX_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac0_rx_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_rx_clk.c), }, }; static struct branch_clk gcc_emac1_rx_clk = { .cbcr_reg = EMAC_1_RX_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac1_rx_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_rx_clk.c), }, }; static struct branch_clk gcc_emac0_sys_clk = { .cbcr_reg = EMAC_0_SYS_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac0_sys_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac0_sys_clk.c), }, }; static struct branch_clk gcc_emac1_sys_clk = { .cbcr_reg = EMAC_1_SYS_CBCR, .has_sibling = 1, .base = &virt_bases[GCC_BASE], .c = { .dbg_name = "gcc_emac1_sys_clk", .ops = &clk_ops_branch, CLK_INIT(gcc_emac1_sys_clk.c), }, }; static DEFINE_CLK_MEASURE(l2_m_clk); static DEFINE_CLK_MEASURE(krait0_m_clk); static DEFINE_CLK_MEASURE(krait1_m_clk); static DEFINE_CLK_MEASURE(krait2_m_clk); static DEFINE_CLK_MEASURE(krait3_m_clk); #ifdef CONFIG_DEBUG_FS struct measure_mux_entry { struct clk *c; int base; u32 debug_mux; }; enum { M_ACPU0 = 0, M_ACPU1, M_ACPU2, M_ACPU3, M_L2, }; struct measure_mux_entry measure_mux[] = { {&gcc_usb_hs_system_clk.c, GCC_BASE, 0x0060}, {&gcc_usb_hs_ahb_clk.c, GCC_BASE, 0x0061}, {&gcc_usb2a_phy_sleep_clk.c, GCC_BASE, 0x0063}, {&gcc_sdcc1_apps_clk.c, GCC_BASE, 0x0068}, {&gcc_sdcc1_ahb_clk.c, GCC_BASE, 0x0069}, {&gcc_sdcc2_apps_clk.c, GCC_BASE, 0x0070}, {&gcc_sdcc2_ahb_clk.c, GCC_BASE, 0x0071}, {&gcc_blsp1_ahb_clk.c, GCC_BASE, 0x0088}, {&gcc_blsp1_qup1_spi_apps_clk.c, GCC_BASE, 0x008a}, {&gcc_blsp1_qup1_i2c_apps_clk.c, GCC_BASE, 0x008b}, {&gcc_blsp1_uart1_apps_clk.c, GCC_BASE, 0x008c}, {&gcc_blsp1_qup2_spi_apps_clk.c, GCC_BASE, 0x008e}, {&gcc_blsp1_qup2_i2c_apps_clk.c, GCC_BASE, 0x0090}, {&gcc_blsp1_uart2_apps_clk.c, GCC_BASE, 0x0091}, {&gcc_blsp1_qup3_spi_apps_clk.c, GCC_BASE, 0x0093}, {&gcc_blsp1_qup3_i2c_apps_clk.c, GCC_BASE, 0x0094}, {&gcc_blsp1_uart3_apps_clk.c, GCC_BASE, 0x0095}, {&gcc_blsp1_qup4_spi_apps_clk.c, GCC_BASE, 0x0098}, {&gcc_blsp1_qup4_i2c_apps_clk.c, GCC_BASE, 0x0099}, {&gcc_blsp1_uart4_apps_clk.c, GCC_BASE, 0x009a}, {&gcc_blsp1_qup5_spi_apps_clk.c, GCC_BASE, 0x009c}, {&gcc_blsp1_qup5_i2c_apps_clk.c, GCC_BASE, 0x009d}, {&gcc_blsp1_uart5_apps_clk.c, GCC_BASE, 0x009e}, {&gcc_blsp1_qup6_spi_apps_clk.c, GCC_BASE, 0x00a1}, {&gcc_blsp1_qup6_i2c_apps_clk.c, GCC_BASE, 0x00a2}, {&gcc_blsp1_uart6_apps_clk.c, GCC_BASE, 0x00a3}, {&gcc_blsp2_ahb_clk.c, GCC_BASE, 0x00a8}, {&gcc_blsp2_qup1_spi_apps_clk.c, GCC_BASE, 0x00aa}, {&gcc_blsp2_qup1_i2c_apps_clk.c, GCC_BASE, 0x00ab}, {&gcc_blsp2_uart1_apps_clk.c, GCC_BASE, 0x00ac}, {&gcc_blsp2_qup2_spi_apps_clk.c, GCC_BASE, 0x00ae}, {&gcc_blsp2_qup2_i2c_apps_clk.c, GCC_BASE, 0x00b0}, {&gcc_blsp2_uart2_apps_clk.c, GCC_BASE, 0x00b1}, {&gcc_blsp2_qup3_spi_apps_clk.c, GCC_BASE, 0x00b3}, {&gcc_blsp2_qup3_i2c_apps_clk.c, GCC_BASE, 0x00b4}, {&gcc_blsp2_uart3_apps_clk.c, GCC_BASE, 0x00b5}, {&gcc_blsp2_qup4_spi_apps_clk.c, GCC_BASE, 0x00b8}, {&gcc_blsp2_qup4_i2c_apps_clk.c, GCC_BASE, 0x00b9}, {&gcc_blsp2_uart4_apps_clk.c, GCC_BASE, 0x00ba}, {&gcc_blsp2_qup5_spi_apps_clk.c, GCC_BASE, 0x00bc}, {&gcc_blsp2_qup5_i2c_apps_clk.c, GCC_BASE, 0x00bd}, {&gcc_blsp2_uart5_apps_clk.c, GCC_BASE, 0x00be}, {&gcc_blsp2_qup6_spi_apps_clk.c, GCC_BASE, 0x00c1}, {&gcc_blsp2_qup6_i2c_apps_clk.c, GCC_BASE, 0x00c2}, {&gcc_blsp2_uart6_apps_clk.c, GCC_BASE, 0x00c3}, {&gcc_pdm_ahb_clk.c, GCC_BASE, 0x00d0}, {&gcc_pdm2_clk.c, GCC_BASE, 0x00d2}, {&gcc_prng_ahb_clk.c, GCC_BASE, 0x00d8}, {&gcc_bam_dma_ahb_clk.c, GCC_BASE, 0x00e0}, {&gcc_boot_rom_ahb_clk.c, GCC_BASE, 0x00f8}, {&gcc_ce1_clk.c, GCC_BASE, 0x0138}, {&gcc_ce1_axi_clk.c, GCC_BASE, 0x0139}, {&gcc_ce1_ahb_clk.c, GCC_BASE, 0x013a}, {&gcc_ce2_clk.c, GCC_BASE, 0x0140}, {&gcc_ce2_axi_clk.c, GCC_BASE, 0x0141}, {&gcc_ce2_ahb_clk.c, GCC_BASE, 0x0142}, {&gcc_pcie_0_slv_axi_clk.c, GCC_BASE, 0x01f8}, {&gcc_pcie_0_mstr_axi_clk.c, GCC_BASE, 0x01f9}, {&gcc_pcie_0_cfg_ahb_clk.c, GCC_BASE, 0x01fa}, {&gcc_pcie_0_aux_clk.c, GCC_BASE, 0x01fb}, {&gcc_pcie_0_pipe_clk.c, GCC_BASE, 0x01fc}, {&gcc_pcie_1_slv_axi_clk.c, GCC_BASE, 0x0200}, {&gcc_pcie_1_mstr_axi_clk.c, GCC_BASE, 0x0201}, {&gcc_pcie_1_cfg_ahb_clk.c, GCC_BASE, 0x0202}, {&gcc_pcie_1_aux_clk.c, GCC_BASE, 0x0203}, {&gcc_pcie_1_pipe_clk.c, GCC_BASE, 0x0204}, {&gcc_ce3_clk.c, GCC_BASE, 0x0228}, {&gcc_ce3_axi_clk.c, GCC_BASE, 0x0229}, {&gcc_ce3_ahb_clk.c, GCC_BASE, 0x022a}, {&gcc_emac0_axi_clk.c, GCC_BASE, 0x01a8}, {&gcc_emac0_ahb_clk.c, GCC_BASE, 0x01a9}, {&gcc_emac0_sys_25m_clk.c, GCC_BASE, 0x01aa}, {&gcc_emac0_tx_clk.c, GCC_BASE, 0x01ab}, {&gcc_emac0_125m_clk.c, GCC_BASE, 0x01ac}, {&gcc_emac0_rx_clk.c, GCC_BASE, 0x01ad}, {&gcc_emac0_sys_clk.c, GCC_BASE, 0x01ae}, {&gcc_emac1_axi_clk.c, GCC_BASE, 0x01b0}, {&gcc_emac1_ahb_clk.c, GCC_BASE, 0x01b1}, {&gcc_emac1_sys_25m_clk.c, GCC_BASE, 0x01b2}, {&gcc_emac1_tx_clk.c, GCC_BASE, 0x01b3}, {&gcc_emac1_125m_clk.c, GCC_BASE, 0x01b4}, {&gcc_emac1_rx_clk.c, GCC_BASE, 0x01b5}, {&gcc_emac1_sys_clk.c, GCC_BASE, 0x01b6}, {&krait0_clk.c, APCS_BASE, M_ACPU0}, {&krait1_clk.c, APCS_BASE, M_ACPU1}, {&krait2_clk.c, APCS_BASE, M_ACPU2}, {&krait3_clk.c, APCS_BASE, M_ACPU3}, {&l2_clk.c, APCS_BASE, M_L2}, {&dummy_clk, N_BASES, 0x0000}, }; /* TODO: Need to consider the new mux selection for pll test */ static int measure_clk_set_parent(struct clk *c, struct clk *parent) { struct measure_clk *clk = to_measure_clk(c); unsigned long flags; u32 regval, clk_sel, i; if (!parent) return -EINVAL; for (i = 0; i < (ARRAY_SIZE(measure_mux) - 1); i++) if (measure_mux[i].c == parent) break; if (measure_mux[i].c == &dummy_clk) return -EINVAL; spin_lock_irqsave(&local_clock_reg_lock, flags); /* * Program the test vector, measurement period (sample_ticks) * and scaling multiplier. */ clk->sample_ticks = 0x10000; clk->multiplier = 1; switch (measure_mux[i].base) { case GCC_BASE: writel_relaxed(0, GCC_REG_BASE(GCC_DEBUG_CLK_CTL)); clk_sel = measure_mux[i].debug_mux; break; case APCS_BASE: clk->multiplier = 4; clk_sel = 0x16A; if (measure_mux[i].debug_mux == M_L2) regval = BIT(12); else regval = measure_mux[i].debug_mux << 8; writel_relaxed(BIT(0), APCS_REG_BASE(L2_CBCR)); writel_relaxed(regval, APCS_REG_BASE(GLB_CLK_DIAG)); break; default: return -EINVAL; } /* Set debug mux clock index */ regval = BVAL(9, 0, clk_sel); writel_relaxed(regval, GCC_REG_BASE(GCC_DEBUG_CLK_CTL)); /* Activate debug clock output */ regval |= BIT(16); writel_relaxed(regval, GCC_REG_BASE(GCC_DEBUG_CLK_CTL)); /* Make sure test vector is set before starting measurements. */ mb(); spin_unlock_irqrestore(&local_clock_reg_lock, flags); return 0; } /* Sample clock for 'ticks' reference clock ticks. */ static u32 run_measurement(unsigned ticks) { /* Stop counters and set the XO4 counter start value. */ writel_relaxed(ticks, GCC_REG_BASE(CLOCK_FRQ_MEASURE_CTL)); /* Wait for timer to become ready. */ while ((readl_relaxed(GCC_REG_BASE(CLOCK_FRQ_MEASURE_STATUS)) & BIT(25)) != 0) cpu_relax(); /* Run measurement and wait for completion. */ writel_relaxed(BIT(20)|ticks, GCC_REG_BASE(CLOCK_FRQ_MEASURE_CTL)); while ((readl_relaxed(GCC_REG_BASE(CLOCK_FRQ_MEASURE_STATUS)) & BIT(25)) == 0) cpu_relax(); /* Return measured ticks. */ return readl_relaxed(GCC_REG_BASE(CLOCK_FRQ_MEASURE_STATUS)) & BM(24, 0); } /* * Perform a hardware rate measurement for a given clock. * FOR DEBUG USE ONLY: Measurements take ~15 ms! */ static unsigned long measure_clk_get_rate(struct clk *c) { unsigned long flags; u32 gcc_xo4_reg_backup; u64 raw_count_short, raw_count_full; struct measure_clk *clk = to_measure_clk(c); unsigned ret; ret = clk_prepare_enable(&xo_clk_src.c); if (ret) { pr_warn("CXO clock failed to enable. Can't measure\n"); return 0; } spin_lock_irqsave(&local_clock_reg_lock, flags); /* Enable CXO/4 and RINGOSC branch. */ gcc_xo4_reg_backup = readl_relaxed(GCC_REG_BASE(GCC_XO_DIV4_CBCR)); writel_relaxed(0x1, GCC_REG_BASE(GCC_XO_DIV4_CBCR)); /* * The ring oscillator counter will not reset if the measured clock * is not running. To detect this, run a short measurement before * the full measurement. If the raw results of the two are the same * then the clock must be off. */ /* Run a short measurement. (~1 ms) */ raw_count_short = run_measurement(0x1000); /* Run a full measurement. (~14 ms) */ raw_count_full = run_measurement(clk->sample_ticks); writel_relaxed(gcc_xo4_reg_backup, GCC_REG_BASE(GCC_XO_DIV4_CBCR)); /* Return 0 if the clock is off. */ if (raw_count_full == raw_count_short) { ret = 0; } else { /* Compute rate in Hz. */ raw_count_full = ((raw_count_full * 10) + 15) * 4800000; do_div(raw_count_full, ((clk->sample_ticks * 10) + 35)); ret = (raw_count_full * clk->multiplier); } /*TODO: confirm if this value is correct. */ writel_relaxed(0x51A00, GCC_REG_BASE(GCC_PLLTEST_PAD_CFG)); spin_unlock_irqrestore(&local_clock_reg_lock, flags); clk_disable_unprepare(&xo_clk_src.c); return ret; } #else /* !CONFIG_DEBUG_FS */ static int measure_clk_set_parent(struct clk *clk, struct clk *parent) { return -EINVAL; } static unsigned long measure_clk_get_rate(struct clk *clk) { return 0; } #endif /* CONFIG_DEBUG_FS */ static struct clk_ops clk_ops_measure = { .set_parent = measure_clk_set_parent, .get_rate = measure_clk_get_rate, }; static struct measure_clk measure_clk = { .c = { .dbg_name = "measure_clk", .ops = &clk_ops_measure, CLK_INIT(measure_clk.c), }, .multiplier = 1, }; static struct clk_lookup fsm_clocks_9900[] = { /* Dummy CE clocks are defined to satisfy the CE driver */ CLK_DUMMY("core_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("iface_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("bus_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("core_clk_src", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("core_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("iface_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("bus_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("core_clk_src", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("dma_bam_pclk", NULL, "msm_sps", OFF), CLK_DUMMY("dfab_clk", NULL, "msm_sps", OFF), CLK_LOOKUP("measure", measure_clk.c, "debug"), CLK_LOOKUP("gpll0", gpll0_clk_src.c, ""), /* RCG source clocks */ CLK_LOOKUP("", sdcc1_apps_clk_src.c, ""), CLK_LOOKUP("", sdcc2_apps_clk_src.c, ""), CLK_LOOKUP("", usb_hs_system_clk_src.c, ""), CLK_LOOKUP("", pcie_0_aux_clk_src.c, ""), CLK_LOOKUP("", pcie_0_pipe_clk_src.c, ""), CLK_LOOKUP("", pcie_1_aux_clk_src.c, ""), CLK_LOOKUP("", pcie_1_pipe_clk_src.c, ""), /* BLSP1 clocks. Only the valid configs are present in the table */ CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f991f000.serial"), CLK_LOOKUP("iface_clk", gcc_blsp1_ahb_clk.c, "f9924000.i2c"), CLK_LOOKUP("core_clk", gcc_blsp1_uart3_apps_clk.c, "f991f000.serial"), CLK_LOOKUP("core_clk", gcc_blsp1_qup2_i2c_apps_clk.c, "f9924000.i2c"), /* BLSP2 clocks. Only the valid configs are present in the table */ CLK_LOOKUP("iface_clk", gcc_blsp2_ahb_clk.c, "f9960000.serial"), CLK_LOOKUP("iface_clk", gcc_blsp2_ahb_clk.c, "f9966000.i2c"), CLK_LOOKUP("core_clk", gcc_blsp2_qup4_i2c_apps_clk.c, "f9966000.i2c"), CLK_LOOKUP("core_clk", gcc_blsp2_uart4_apps_clk.c, "f9960000.serial"), CLK_LOOKUP("iface_clk", gcc_prng_ahb_clk.c, "f9bff000.qcom,msm-rng"), CLK_LOOKUP("", gcc_boot_rom_ahb_clk.c, ""), CLK_LOOKUP("pdm2_clk", gcc_pdm2_clk.c, "f9b10000.qcom,pdm"), CLK_LOOKUP("ahb_clk", gcc_pdm_ahb_clk.c, "f9b10000.qcom,pdm"), /* SDCC clocks */ CLK_LOOKUP("iface_clk", gcc_sdcc1_ahb_clk.c, "msm_sdcc.1"), CLK_LOOKUP("core_clk", gcc_sdcc1_apps_clk.c, "msm_sdcc.1"), CLK_LOOKUP("iface_clk", gcc_sdcc2_ahb_clk.c, "msm_sdcc.2"), CLK_LOOKUP("core_clk", gcc_sdcc2_apps_clk.c, "msm_sdcc.2"), /* USB clocks */ CLK_LOOKUP("iface_clk", gcc_usb_hs_ahb_clk.c, "f9a55000.usb"), CLK_LOOKUP("core_clk", gcc_usb_hs_system_clk.c, "f9a55000.usb"), CLK_LOOKUP("xo", xo_usb_hs_host_clk.c, "f9a55000.usb"), CLK_LOOKUP("iface_clk", gcc_usb_hs_ahb_clk.c, "msm_ehci_host"), CLK_LOOKUP("core_clk", gcc_usb_hs_system_clk.c, "msm_ehci_host"), CLK_LOOKUP("sleep_clk", gcc_usb2a_phy_sleep_clk.c, "msm_ehci_host"), CLK_LOOKUP("xo", xo_usb_hs_host_clk.c, "msm_ehci_host"), /* EMAC clocks */ CLK_LOOKUP("axi_clk", gcc_emac0_axi_clk.c, "feb20000.qcom,emac"), CLK_LOOKUP("cfg_ahb_clk", gcc_emac0_ahb_clk.c, "feb20000.qcom,emac"), CLK_LOOKUP("25m_clk", emac0_sys_25m_clk_src.c, "feb20000.qcom,emac"), CLK_LOOKUP("125m_clk", emac0_125m_clk_src.c, "feb20000.qcom,emac"), CLK_LOOKUP("tx_clk", emac0_tx_clk_src.c, "feb20000.qcom,emac"), CLK_LOOKUP("rx_clk", gcc_emac0_rx_clk.c, "feb20000.qcom,emac"), CLK_LOOKUP("sys_clk", gcc_emac0_sys_clk.c, "feb20000.qcom,emac"), CLK_LOOKUP("axi_clk", gcc_emac1_axi_clk.c, "feb00000.qcom,emac"), CLK_LOOKUP("cfg_ahb_clk", gcc_emac1_ahb_clk.c, "feb00000.qcom,emac"), CLK_LOOKUP("25m_clk", emac1_sys_25m_clk_src.c, "feb00000.qcom,emac"), CLK_LOOKUP("125m_clk", emac1_125m_clk_src.c, "feb00000.qcom,emac"), CLK_LOOKUP("tx_clk", emac1_tx_clk_src.c, "feb00000.qcom,emac"), CLK_LOOKUP("rx_clk", gcc_emac1_rx_clk.c, "feb00000.qcom,emac"), CLK_LOOKUP("sys_clk", gcc_emac1_sys_clk.c, "feb00000.qcom,emac"), /* PCIE clocks */ CLK_LOOKUP("pcie_0_aux_clk", gcc_pcie_0_aux_clk.c, "fc520000.qcom,pcie"), CLK_LOOKUP("pcie_0_cfg_ahb_clk", gcc_pcie_0_cfg_ahb_clk.c, "fc520000.qcom,pcie"), CLK_LOOKUP("pcie_0_mstr_axi_clk", gcc_pcie_0_mstr_axi_clk.c, "fc520000.qcom,pcie"), CLK_LOOKUP("pcie_0_pipe_clk", gcc_pcie_0_pipe_clk.c, "fc520000.qcom,pcie"), CLK_LOOKUP("pcie_0_slv_axi_clk", gcc_pcie_0_slv_axi_clk.c, "fc520000.qcom,pcie"), CLK_DUMMY("pcie_0_ref_clk_src", NULL, "fc520000.qcom,pcie", OFF), CLK_LOOKUP("pcie_1_aux_clk", gcc_pcie_1_aux_clk.c, "fc528000.qcom,pcie"), CLK_LOOKUP("pcie_1_cfg_ahb_clk", gcc_pcie_1_cfg_ahb_clk.c, "fc528000.qcom,pcie"), CLK_LOOKUP("pcie_1_mstr_axi_clk", gcc_pcie_1_mstr_axi_clk.c, "fc528000.qcom,pcie"), CLK_LOOKUP("pcie_1_pipe_clk", gcc_pcie_1_pipe_clk.c, "fc528000.qcom,pcie"), CLK_LOOKUP("pcie_1_slv_axi_clk", gcc_pcie_1_slv_axi_clk.c, "fc528000.qcom,pcie"), CLK_DUMMY("pcie_1_ref_clk_src", NULL, "fc528000.qcom,pcie", OFF), CLK_LOOKUP("hfpll_src", xo_a_clk_src.c, "f9016000.qcom,clock-krait"), CLK_LOOKUP("aux_clk", gpll0_ao_clk_src.c, "f9016000.qcom,clock-krait"), CLK_LOOKUP("xo_clk", xo_clk_src.c, ""), /* MPM */ CLK_LOOKUP("xo", xo_clk_src.c, "fc4281d0.qcom,mpm"), /* LDO */ CLK_LOOKUP("pcie_0_ldo", pcie_0_phy_ldo.c, "fc520000.qcom,pcie"), CLK_LOOKUP("pcie_1_ldo", pcie_1_phy_ldo.c, "fc528000.qcom,pcie"), /* QSEECOM clocks */ CLK_LOOKUP("core_clk", gcc_ce1_clk.c, "qseecom"), CLK_LOOKUP("iface_clk", gcc_ce1_ahb_clk.c, "qseecom"), CLK_LOOKUP("bus_clk", gcc_ce1_axi_clk.c, "qseecom"), CLK_LOOKUP("core_clk_src", ce1_clk_src.c, "qseecom"), CLK_LOOKUP("ce_drv_core_clk", gcc_ce2_clk.c, "qseecom"), CLK_LOOKUP("ce_drv_iface_clk", gcc_ce2_ahb_clk.c, "qseecom"), CLK_LOOKUP("ce_drv_bus_clk", gcc_ce2_axi_clk.c, "qseecom"), CLK_LOOKUP("ce_drv_core_clk_src", ce2_clk_src.c, "qseecom"), }; static struct pll_config_regs gpll4_regs __initdata = { .l_reg = (void __iomem *)GPLL4_L, .m_reg = (void __iomem *)GPLL4_M, .n_reg = (void __iomem *)GPLL4_N, .config_reg = (void __iomem *)GPLL4_USER_CTL, .mode_reg = (void __iomem *)GPLL4_MODE, .base = &virt_bases[GCC_BASE], }; /* PLL4 at 288 MHz, main output enabled. LJ mode. */ static struct pll_config gpll4_config __initdata = { .l = 0x1e, .m = 0x0, .n = 0x1, .vco_val = 0x1, .vco_mask = BM(21, 20), .pre_div_val = 0x0, .pre_div_mask = BM(14, 12), .post_div_val = BIT(8), .post_div_mask = BM(9, 8), .mn_ena_val = BIT(24), .mn_ena_mask = BIT(24), .main_output_val = BIT(0), .main_output_mask = BIT(0), }; static void __init reg_init(void) { u32 regval; configure_sr_hpm_lp_pll(&gpll4_config, &gpll4_regs, 1); /* Vote for GPLL0 to turn on. Needed by acpuclock. */ regval = readl_relaxed(GCC_REG_BASE(APCS_GPLL_ENA_VOTE)); regval |= BIT(0); writel_relaxed(regval, GCC_REG_BASE(APCS_GPLL_ENA_VOTE)); regval = readl_relaxed( GCC_REG_BASE(APCS_CLOCK_BRANCH_ENA_VOTE)); writel_relaxed(regval | BIT(26) | BIT(25), GCC_REG_BASE(APCS_CLOCK_BRANCH_ENA_VOTE)); } static void __init fsm9900_clock_post_init(void) { /* Set rates for single-rate clocks. */ clk_set_rate(&usb_hs_system_clk_src.c, usb_hs_system_clk_src.freq_tbl[0].freq_hz); } #define GCC_CC_PHYS 0xFC400000 #define GCC_CC_SIZE SZ_16K #define APCS_GCC_CC_PHYS 0xF9011000 #define APCS_GCC_CC_SIZE SZ_4K static void __init fsm9900_clock_pre_init(void) { virt_bases[GCC_BASE] = ioremap(GCC_CC_PHYS, GCC_CC_SIZE); if (!virt_bases[GCC_BASE]) panic("clock-fsm9900: Unable to ioremap GCC memory!"); virt_bases[APCS_BASE] = ioremap(APCS_GCC_CC_PHYS, APCS_GCC_CC_SIZE); if (!virt_bases[APCS_BASE]) panic("clock-fsm9900: Unable to ioremap APCS_GCC_CC memory!"); clk_ops_local_pll.enable = sr_hpm_lp_pll_clk_enable; /* This chip does not allow vdd_dig to be modified after bootup */ regulator_use_dummy_regulator(); vdd_dig.regulator[0] = regulator_get(NULL, "vdd_dig"); reg_init(); } struct clock_init_data fsm9900_clock_init_data __initdata = { .table = fsm_clocks_9900, .size = ARRAY_SIZE(fsm_clocks_9900), .pre_init = fsm9900_clock_pre_init, .post_init = fsm9900_clock_post_init, }; /* These tables are for use in sim and rumi targets */ static struct clk_lookup fsm_clocks_dummy[] = { CLK_DUMMY("core_clk", BLSP2_UART_CLK, "f9960000.serial", OFF), CLK_DUMMY("iface_clk", BLSP2_UART_CLK, "f9960000.serial", OFF), CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF), CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF), CLK_DUMMY("core_clk", BLSP2_I2C_CLK, "f9966000.i2c", OFF), CLK_DUMMY("iface_clk", BLSP2_I2C_CLK, "f9966000.i2c", OFF), CLK_DUMMY("core_clk", BLSP1_I2C_CLK, "f9924000.i2c", OFF), CLK_DUMMY("iface_clk", BLSP1_I2C_CLK, "f9924000.i2c", OFF), CLK_DUMMY("core_clk", NULL, "f9a55000.usb", OFF), CLK_DUMMY("iface_clk", NULL, "f9a55000.usb", OFF), CLK_DUMMY("phy_clk", NULL, "f9a55000.usb", OFF), CLK_DUMMY("xo", NULL, "f9a55000.usb", OFF), CLK_DUMMY("core_clk", NULL, "msm_ehci_host", OFF), CLK_DUMMY("iface_clk", NULL, "msm_ehci_host", OFF), CLK_DUMMY("sleep_clk", NULL, "msm_ehci_host", OFF), CLK_DUMMY("xo", NULL, "msm_ehci_host", OFF), CLK_DUMMY("core_clk", NULL, "f9824900.sdhci_msm", OFF), CLK_DUMMY("iface_clk", NULL, "f9824900.sdhci_msm", OFF), CLK_DUMMY("core_clk", NULL, "f98a4900.sdhci_msm", OFF), CLK_DUMMY("iface_clk", NULL, "f98a4900.sdhci_msm", OFF), CLK_DUMMY("core_clk", SDC1_CLK, "msm_sdcc.1", OFF), CLK_DUMMY("iface_clk", SDC1_P_CLK, "msm_sdcc.1", OFF), CLK_DUMMY("core_clk", SDC2_CLK, "msm_sdcc.2", OFF), CLK_DUMMY("iface_clk", SDC2_P_CLK, "msm_sdcc.2", OFF), CLK_DUMMY("core_clk", BLSP1_UART_CLK, "f991f000.serial", OFF), CLK_DUMMY("iface_clk", BLSP1_UART_CLK, "f991f000.serial", OFF), CLK_DUMMY("core_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fd440000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fe040000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("iface_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("bus_clk", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk_src", NULL, "fe000000.qcom,qcrypto", OFF), CLK_DUMMY("core_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("iface_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("bus_clk", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("core_clk_src", NULL, "fe140000.qcom,qcota", OFF), CLK_DUMMY("core_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("iface_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("bus_clk", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("core_clk_src", NULL, "fe0c0000.qcom,qcota", OFF), CLK_DUMMY("dma_bam_pclk", NULL, "msm_sps", OFF), CLK_DUMMY("dfab_clk", NULL, "msm_sps", OFF), CLK_DUMMY("iface_clk", NULL, "f9bff000.qcom,msm-rng", OFF), }; struct clock_init_data fsm9900_dummy_clock_init_data __initdata = { .table = fsm_clocks_dummy, .size = ARRAY_SIZE(fsm_clocks_dummy), };
gpl-2.0
ShinySide/G530P_Permissive
drivers/video/sa1100fb.c
2077
38025
/* * linux/drivers/video/sa1100fb.c * * Copyright (C) 1999 Eric A. Thomas * Based on acornfb.c Copyright (C) Russell King. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * StrongARM 1100 LCD Controller Frame Buffer Driver * * Please direct your questions and comments on this driver to the following * email address: * * linux-arm-kernel@lists.arm.linux.org.uk * * Clean patches should be sent to the ARM Linux Patch System. Please see the * following web page for more information: * * http://www.arm.linux.org.uk/developer/patches/info.shtml * * Thank you. * * Known problems: * - With the Neponset plugged into an Assabet, LCD powerdown * doesn't work (LCD stays powered up). Therefore we shouldn't * blank the screen. * - We don't limit the CPU clock rate nor the mode selection * according to the available SDRAM bandwidth. * * Other notes: * - Linear grayscale palettes and the kernel. * Such code does not belong in the kernel. The kernel frame buffer * drivers do not expect a linear colourmap, but a colourmap based on * the VT100 standard mapping. * * If your _userspace_ requires a linear colourmap, then the setup of * such a colourmap belongs _in userspace_, not in the kernel. Code * to set the colourmap correctly from user space has been sent to * David Neuer. It's around 8 lines of C code, plus another 4 to * detect if we are using grayscale. * * - The following must never be specified in a panel definition: * LCCR0_LtlEnd, LCCR3_PixClkDiv, LCCR3_VrtSnchL, LCCR3_HorSnchL * * - The following should be specified: * either LCCR0_Color or LCCR0_Mono * either LCCR0_Sngl or LCCR0_Dual * either LCCR0_Act or LCCR0_Pas * either LCCR3_OutEnH or LCCD3_OutEnL * either LCCR3_PixRsEdg or LCCR3_PixFlEdg * either LCCR3_ACBsDiv or LCCR3_ACBsCntOff * * Code Status: * 1999/04/01: * - Driver appears to be working for Brutus 320x200x8bpp mode. Other * resolutions are working, but only the 8bpp mode is supported. * Changes need to be made to the palette encode and decode routines * to support 4 and 16 bpp modes. * Driver is not designed to be a module. The FrameBuffer is statically * allocated since dynamic allocation of a 300k buffer cannot be * guaranteed. * * 1999/06/17: * - FrameBuffer memory is now allocated at run-time when the * driver is initialized. * * 2000/04/10: Nicolas Pitre <nico@fluxnic.net> * - Big cleanup for dynamic selection of machine type at run time. * * 2000/07/19: Jamey Hicks <jamey@crl.dec.com> * - Support for Bitsy aka Compaq iPAQ H3600 added. * * 2000/08/07: Tak-Shing Chan <tchan.rd@idthk.com> * Jeff Sutherland <jsutherland@accelent.com> * - Resolved an issue caused by a change made to the Assabet's PLD * earlier this year which broke the framebuffer driver for newer * Phase 4 Assabets. Some other parameters were changed to optimize * for the Sharp display. * * 2000/08/09: Kunihiko IMAI <imai@vasara.co.jp> * - XP860 support added * * 2000/08/19: Mark Huang <mhuang@livetoy.com> * - Allows standard options to be passed on the kernel command line * for most common passive displays. * * 2000/08/29: * - s/save_flags_cli/local_irq_save/ * - remove unneeded extra save_flags_cli in sa1100fb_enable_lcd_controller * * 2000/10/10: Erik Mouw <J.A.K.Mouw@its.tudelft.nl> * - Updated LART stuff. Fixed some minor bugs. * * 2000/10/30: Murphy Chen <murphy@mail.dialogue.com.tw> * - Pangolin support added * * 2000/10/31: Roman Jordan <jor@hoeft-wessel.de> * - Huw Webpanel support added * * 2000/11/23: Eric Peng <ericpeng@coventive.com> * - Freebird add * * 2001/02/07: Jamey Hicks <jamey.hicks@compaq.com> * Cliff Brake <cbrake@accelent.com> * - Added PM callback * * 2001/05/26: <rmk@arm.linux.org.uk> * - Fix 16bpp so that (a) we use the right colours rather than some * totally random colour depending on what was in page 0, and (b) * we don't de-reference a NULL pointer. * - remove duplicated implementation of consistent_alloc() * - convert dma address types to dma_addr_t * - remove unused 'montype' stuff * - remove redundant zero inits of init_var after the initial * memset. * - remove allow_modeset (acornfb idea does not belong here) * * 2001/05/28: <rmk@arm.linux.org.uk> * - massive cleanup - move machine dependent data into structures * - I've left various #warnings in - if you see one, and know * the hardware concerned, please get in contact with me. * * 2001/05/31: <rmk@arm.linux.org.uk> * - Fix LCCR1 HSW value, fix all machine type specifications to * keep values in line. (Please check your machine type specs) * * 2001/06/10: <rmk@arm.linux.org.uk> * - Fiddle with the LCD controller from task context only; mainly * so that we can run with interrupts on, and sleep. * - Convert #warnings into #errors. No pain, no gain. ;) * * 2001/06/14: <rmk@arm.linux.org.uk> * - Make the palette BPS value for 12bpp come out correctly. * - Take notice of "greyscale" on any colour depth. * - Make truecolor visuals use the RGB channel encoding information. * * 2001/07/02: <rmk@arm.linux.org.uk> * - Fix colourmap problems. * * 2001/07/13: <abraham@2d3d.co.za> * - Added support for the ICP LCD-Kit01 on LART. This LCD is * manufactured by Prime View, model no V16C6448AB * * 2001/07/23: <rmk@arm.linux.org.uk> * - Hand merge version from handhelds.org CVS tree. See patch * notes for 595/1 for more information. * - Drop 12bpp (it's 16bpp with different colour register mappings). * - This hardware can not do direct colour. Therefore we don't * support it. * * 2001/07/27: <rmk@arm.linux.org.uk> * - Halve YRES on dual scan LCDs. * * 2001/08/22: <rmk@arm.linux.org.uk> * - Add b/w iPAQ pixclock value. * * 2001/10/12: <rmk@arm.linux.org.uk> * - Add patch 681/1 and clean up stork definitions. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/fb.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/cpufreq.h> #include <linux/gpio.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/mutex.h> #include <linux/io.h> #include <video/sa1100fb.h> #include <mach/hardware.h> #include <asm/mach-types.h> #include <mach/shannon.h> /* * Complain if VAR is out of range. */ #define DEBUG_VAR 1 #include "sa1100fb.h" static const struct sa1100fb_rgb rgb_4 = { .red = { .offset = 0, .length = 4, }, .green = { .offset = 0, .length = 4, }, .blue = { .offset = 0, .length = 4, }, .transp = { .offset = 0, .length = 0, }, }; static const struct sa1100fb_rgb rgb_8 = { .red = { .offset = 0, .length = 8, }, .green = { .offset = 0, .length = 8, }, .blue = { .offset = 0, .length = 8, }, .transp = { .offset = 0, .length = 0, }, }; static const struct sa1100fb_rgb def_rgb_16 = { .red = { .offset = 11, .length = 5, }, .green = { .offset = 5, .length = 6, }, .blue = { .offset = 0, .length = 5, }, .transp = { .offset = 0, .length = 0, }, }; static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *); static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state); static inline void sa1100fb_schedule_work(struct sa1100fb_info *fbi, u_int state) { unsigned long flags; local_irq_save(flags); /* * We need to handle two requests being made at the same time. * There are two important cases: * 1. When we are changing VT (C_REENABLE) while unblanking (C_ENABLE) * We must perform the unblanking, which will do our REENABLE for us. * 2. When we are blanking, but immediately unblank before we have * blanked. We do the "REENABLE" thing here as well, just to be sure. */ if (fbi->task_state == C_ENABLE && state == C_REENABLE) state = (u_int) -1; if (fbi->task_state == C_DISABLE && state == C_ENABLE) state = C_REENABLE; if (state != (u_int)-1) { fbi->task_state = state; schedule_work(&fbi->task); } local_irq_restore(flags); } static inline u_int chan_to_field(u_int chan, struct fb_bitfield *bf) { chan &= 0xffff; chan >>= 16 - bf->length; return chan << bf->offset; } /* * Convert bits-per-pixel to a hardware palette PBS value. */ static inline u_int palette_pbs(struct fb_var_screeninfo *var) { int ret = 0; switch (var->bits_per_pixel) { case 4: ret = 0 << 12; break; case 8: ret = 1 << 12; break; case 16: ret = 2 << 12; break; } return ret; } static int sa1100fb_setpalettereg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; u_int val, ret = 1; if (regno < fbi->palette_size) { val = ((red >> 4) & 0xf00); val |= ((green >> 8) & 0x0f0); val |= ((blue >> 12) & 0x00f); if (regno == 0) val |= palette_pbs(&fbi->fb.var); fbi->palette_cpu[regno] = val; ret = 0; } return ret; } static int sa1100fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int trans, struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; unsigned int val; int ret = 1; /* * If inverse mode was selected, invert all the colours * rather than the register number. The register number * is what you poke into the framebuffer to produce the * colour you requested. */ if (fbi->inf->cmap_inverse) { red = 0xffff - red; green = 0xffff - green; blue = 0xffff - blue; } /* * If greyscale is true, then we convert the RGB value * to greyscale no mater what visual we are using. */ if (fbi->fb.var.grayscale) red = green = blue = (19595 * red + 38470 * green + 7471 * blue) >> 16; switch (fbi->fb.fix.visual) { case FB_VISUAL_TRUECOLOR: /* * 12 or 16-bit True Colour. We encode the RGB value * according to the RGB bitfield information. */ if (regno < 16) { u32 *pal = fbi->fb.pseudo_palette; val = chan_to_field(red, &fbi->fb.var.red); val |= chan_to_field(green, &fbi->fb.var.green); val |= chan_to_field(blue, &fbi->fb.var.blue); pal[regno] = val; ret = 0; } break; case FB_VISUAL_STATIC_PSEUDOCOLOR: case FB_VISUAL_PSEUDOCOLOR: ret = sa1100fb_setpalettereg(regno, red, green, blue, trans, info); break; } return ret; } #ifdef CONFIG_CPU_FREQ /* * sa1100fb_display_dma_period() * Calculate the minimum period (in picoseconds) between two DMA * requests for the LCD controller. If we hit this, it means we're * doing nothing but LCD DMA. */ static inline unsigned int sa1100fb_display_dma_period(struct fb_var_screeninfo *var) { /* * Period = pixclock * bits_per_byte * bytes_per_transfer * / memory_bits_per_pixel; */ return var->pixclock * 8 * 16 / var->bits_per_pixel; } #endif /* * sa1100fb_check_var(): * Round up in the following order: bits_per_pixel, xres, * yres, xres_virtual, yres_virtual, xoffset, yoffset, grayscale, * bitfields, horizontal timing, vertical timing. */ static int sa1100fb_check_var(struct fb_var_screeninfo *var, struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; int rgbidx; if (var->xres < MIN_XRES) var->xres = MIN_XRES; if (var->yres < MIN_YRES) var->yres = MIN_YRES; if (var->xres > fbi->inf->xres) var->xres = fbi->inf->xres; if (var->yres > fbi->inf->yres) var->yres = fbi->inf->yres; var->xres_virtual = max(var->xres_virtual, var->xres); var->yres_virtual = max(var->yres_virtual, var->yres); dev_dbg(fbi->dev, "var->bits_per_pixel=%d\n", var->bits_per_pixel); switch (var->bits_per_pixel) { case 4: rgbidx = RGB_4; break; case 8: rgbidx = RGB_8; break; case 16: rgbidx = RGB_16; break; default: return -EINVAL; } /* * Copy the RGB parameters for this display * from the machine specific parameters. */ var->red = fbi->rgb[rgbidx]->red; var->green = fbi->rgb[rgbidx]->green; var->blue = fbi->rgb[rgbidx]->blue; var->transp = fbi->rgb[rgbidx]->transp; dev_dbg(fbi->dev, "RGBT length = %d:%d:%d:%d\n", var->red.length, var->green.length, var->blue.length, var->transp.length); dev_dbg(fbi->dev, "RGBT offset = %d:%d:%d:%d\n", var->red.offset, var->green.offset, var->blue.offset, var->transp.offset); #ifdef CONFIG_CPU_FREQ dev_dbg(fbi->dev, "dma period = %d ps, clock = %d kHz\n", sa1100fb_display_dma_period(var), cpufreq_get(smp_processor_id())); #endif return 0; } static void sa1100fb_set_visual(struct sa1100fb_info *fbi, u32 visual) { if (fbi->inf->set_visual) fbi->inf->set_visual(visual); } /* * sa1100fb_set_par(): * Set the user defined part of the display for the specified console */ static int sa1100fb_set_par(struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; struct fb_var_screeninfo *var = &info->var; unsigned long palette_mem_size; dev_dbg(fbi->dev, "set_par\n"); if (var->bits_per_pixel == 16) fbi->fb.fix.visual = FB_VISUAL_TRUECOLOR; else if (!fbi->inf->cmap_static) fbi->fb.fix.visual = FB_VISUAL_PSEUDOCOLOR; else { /* * Some people have weird ideas about wanting static * pseudocolor maps. I suspect their user space * applications are broken. */ fbi->fb.fix.visual = FB_VISUAL_STATIC_PSEUDOCOLOR; } fbi->fb.fix.line_length = var->xres_virtual * var->bits_per_pixel / 8; fbi->palette_size = var->bits_per_pixel == 8 ? 256 : 16; palette_mem_size = fbi->palette_size * sizeof(u16); dev_dbg(fbi->dev, "palette_mem_size = 0x%08lx\n", palette_mem_size); fbi->palette_cpu = (u16 *)(fbi->map_cpu + PAGE_SIZE - palette_mem_size); fbi->palette_dma = fbi->map_dma + PAGE_SIZE - palette_mem_size; /* * Set (any) board control register to handle new color depth */ sa1100fb_set_visual(fbi, fbi->fb.fix.visual); sa1100fb_activate_var(var, fbi); return 0; } #if 0 static int sa1100fb_set_cmap(struct fb_cmap *cmap, int kspc, int con, struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; /* * Make sure the user isn't doing something stupid. */ if (!kspc && (fbi->fb.var.bits_per_pixel == 16 || fbi->inf->cmap_static)) return -EINVAL; return gen_set_cmap(cmap, kspc, con, info); } #endif /* * Formal definition of the VESA spec: * On * This refers to the state of the display when it is in full operation * Stand-By * This defines an optional operating state of minimal power reduction with * the shortest recovery time * Suspend * This refers to a level of power management in which substantial power * reduction is achieved by the display. The display can have a longer * recovery time from this state than from the Stand-by state * Off * This indicates that the display is consuming the lowest level of power * and is non-operational. Recovery from this state may optionally require * the user to manually power on the monitor * * Now, the fbdev driver adds an additional state, (blank), where they * turn off the video (maybe by colormap tricks), but don't mess with the * video itself: think of it semantically between on and Stand-By. * * So here's what we should do in our fbdev blank routine: * * VESA_NO_BLANKING (mode 0) Video on, front/back light on * VESA_VSYNC_SUSPEND (mode 1) Video on, front/back light off * VESA_HSYNC_SUSPEND (mode 2) Video on, front/back light off * VESA_POWERDOWN (mode 3) Video off, front/back light off * * This will match the matrox implementation. */ /* * sa1100fb_blank(): * Blank the display by setting all palette values to zero. Note, the * 12 and 16 bpp modes don't really use the palette, so this will not * blank the display in all modes. */ static int sa1100fb_blank(int blank, struct fb_info *info) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; int i; dev_dbg(fbi->dev, "sa1100fb_blank: blank=%d\n", blank); switch (blank) { case FB_BLANK_POWERDOWN: case FB_BLANK_VSYNC_SUSPEND: case FB_BLANK_HSYNC_SUSPEND: case FB_BLANK_NORMAL: if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) for (i = 0; i < fbi->palette_size; i++) sa1100fb_setpalettereg(i, 0, 0, 0, 0, info); sa1100fb_schedule_work(fbi, C_DISABLE); break; case FB_BLANK_UNBLANK: if (fbi->fb.fix.visual == FB_VISUAL_PSEUDOCOLOR || fbi->fb.fix.visual == FB_VISUAL_STATIC_PSEUDOCOLOR) fb_set_cmap(&fbi->fb.cmap, info); sa1100fb_schedule_work(fbi, C_ENABLE); } return 0; } static int sa1100fb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct sa1100fb_info *fbi = (struct sa1100fb_info *)info; unsigned long off = vma->vm_pgoff << PAGE_SHIFT; if (off < info->fix.smem_len) { vma->vm_pgoff += 1; /* skip over the palette */ return dma_mmap_writecombine(fbi->dev, vma, fbi->map_cpu, fbi->map_dma, fbi->map_size); } vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); return vm_iomap_memory(vma, info->fix.mmio_start, info->fix.mmio_len); } static struct fb_ops sa1100fb_ops = { .owner = THIS_MODULE, .fb_check_var = sa1100fb_check_var, .fb_set_par = sa1100fb_set_par, // .fb_set_cmap = sa1100fb_set_cmap, .fb_setcolreg = sa1100fb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_blank = sa1100fb_blank, .fb_mmap = sa1100fb_mmap, }; /* * Calculate the PCD value from the clock rate (in picoseconds). * We take account of the PPCR clock setting. */ static inline unsigned int get_pcd(unsigned int pixclock, unsigned int cpuclock) { unsigned int pcd = cpuclock / 100; pcd *= pixclock; pcd /= 10000000; return pcd + 1; /* make up for integer math truncations */ } /* * sa1100fb_activate_var(): * Configures LCD Controller based on entries in var parameter. Settings are * only written to the controller if changes were made. */ static int sa1100fb_activate_var(struct fb_var_screeninfo *var, struct sa1100fb_info *fbi) { struct sa1100fb_lcd_reg new_regs; u_int half_screen_size, yres, pcd; u_long flags; dev_dbg(fbi->dev, "Configuring SA1100 LCD\n"); dev_dbg(fbi->dev, "var: xres=%d hslen=%d lm=%d rm=%d\n", var->xres, var->hsync_len, var->left_margin, var->right_margin); dev_dbg(fbi->dev, "var: yres=%d vslen=%d um=%d bm=%d\n", var->yres, var->vsync_len, var->upper_margin, var->lower_margin); #if DEBUG_VAR if (var->xres < 16 || var->xres > 1024) dev_err(fbi->dev, "%s: invalid xres %d\n", fbi->fb.fix.id, var->xres); if (var->hsync_len < 1 || var->hsync_len > 64) dev_err(fbi->dev, "%s: invalid hsync_len %d\n", fbi->fb.fix.id, var->hsync_len); if (var->left_margin < 1 || var->left_margin > 255) dev_err(fbi->dev, "%s: invalid left_margin %d\n", fbi->fb.fix.id, var->left_margin); if (var->right_margin < 1 || var->right_margin > 255) dev_err(fbi->dev, "%s: invalid right_margin %d\n", fbi->fb.fix.id, var->right_margin); if (var->yres < 1 || var->yres > 1024) dev_err(fbi->dev, "%s: invalid yres %d\n", fbi->fb.fix.id, var->yres); if (var->vsync_len < 1 || var->vsync_len > 64) dev_err(fbi->dev, "%s: invalid vsync_len %d\n", fbi->fb.fix.id, var->vsync_len); if (var->upper_margin < 0 || var->upper_margin > 255) dev_err(fbi->dev, "%s: invalid upper_margin %d\n", fbi->fb.fix.id, var->upper_margin); if (var->lower_margin < 0 || var->lower_margin > 255) dev_err(fbi->dev, "%s: invalid lower_margin %d\n", fbi->fb.fix.id, var->lower_margin); #endif new_regs.lccr0 = fbi->inf->lccr0 | LCCR0_LEN | LCCR0_LDM | LCCR0_BAM | LCCR0_ERM | LCCR0_LtlEnd | LCCR0_DMADel(0); new_regs.lccr1 = LCCR1_DisWdth(var->xres) + LCCR1_HorSnchWdth(var->hsync_len) + LCCR1_BegLnDel(var->left_margin) + LCCR1_EndLnDel(var->right_margin); /* * If we have a dual scan LCD, then we need to halve * the YRES parameter. */ yres = var->yres; if (fbi->inf->lccr0 & LCCR0_Dual) yres /= 2; new_regs.lccr2 = LCCR2_DisHght(yres) + LCCR2_VrtSnchWdth(var->vsync_len) + LCCR2_BegFrmDel(var->upper_margin) + LCCR2_EndFrmDel(var->lower_margin); pcd = get_pcd(var->pixclock, cpufreq_get(0)); new_regs.lccr3 = LCCR3_PixClkDiv(pcd) | fbi->inf->lccr3 | (var->sync & FB_SYNC_HOR_HIGH_ACT ? LCCR3_HorSnchH : LCCR3_HorSnchL) | (var->sync & FB_SYNC_VERT_HIGH_ACT ? LCCR3_VrtSnchH : LCCR3_VrtSnchL); dev_dbg(fbi->dev, "nlccr0 = 0x%08lx\n", new_regs.lccr0); dev_dbg(fbi->dev, "nlccr1 = 0x%08lx\n", new_regs.lccr1); dev_dbg(fbi->dev, "nlccr2 = 0x%08lx\n", new_regs.lccr2); dev_dbg(fbi->dev, "nlccr3 = 0x%08lx\n", new_regs.lccr3); half_screen_size = var->bits_per_pixel; half_screen_size = half_screen_size * var->xres * var->yres / 16; /* Update shadow copy atomically */ local_irq_save(flags); fbi->dbar1 = fbi->palette_dma; fbi->dbar2 = fbi->screen_dma + half_screen_size; fbi->reg_lccr0 = new_regs.lccr0; fbi->reg_lccr1 = new_regs.lccr1; fbi->reg_lccr2 = new_regs.lccr2; fbi->reg_lccr3 = new_regs.lccr3; local_irq_restore(flags); /* * Only update the registers if the controller is enabled * and something has changed. */ if (readl_relaxed(fbi->base + LCCR0) != fbi->reg_lccr0 || readl_relaxed(fbi->base + LCCR1) != fbi->reg_lccr1 || readl_relaxed(fbi->base + LCCR2) != fbi->reg_lccr2 || readl_relaxed(fbi->base + LCCR3) != fbi->reg_lccr3 || readl_relaxed(fbi->base + DBAR1) != fbi->dbar1 || readl_relaxed(fbi->base + DBAR2) != fbi->dbar2) sa1100fb_schedule_work(fbi, C_REENABLE); return 0; } /* * NOTE! The following functions are purely helpers for set_ctrlr_state. * Do not call them directly; set_ctrlr_state does the correct serialisation * to ensure that things happen in the right way 100% of time time. * -- rmk */ static inline void __sa1100fb_backlight_power(struct sa1100fb_info *fbi, int on) { dev_dbg(fbi->dev, "backlight o%s\n", on ? "n" : "ff"); if (fbi->inf->backlight_power) fbi->inf->backlight_power(on); } static inline void __sa1100fb_lcd_power(struct sa1100fb_info *fbi, int on) { dev_dbg(fbi->dev, "LCD power o%s\n", on ? "n" : "ff"); if (fbi->inf->lcd_power) fbi->inf->lcd_power(on); } static void sa1100fb_setup_gpio(struct sa1100fb_info *fbi) { u_int mask = 0; /* * Enable GPIO<9:2> for LCD use if: * 1. Active display, or * 2. Color Dual Passive display * * see table 11.8 on page 11-27 in the SA1100 manual * -- Erik. * * SA1110 spec update nr. 25 says we can and should * clear LDD15 to 12 for 4 or 8bpp modes with active * panels. */ if ((fbi->reg_lccr0 & LCCR0_CMS) == LCCR0_Color && (fbi->reg_lccr0 & (LCCR0_Dual|LCCR0_Act)) != 0) { mask = GPIO_LDD11 | GPIO_LDD10 | GPIO_LDD9 | GPIO_LDD8; if (fbi->fb.var.bits_per_pixel > 8 || (fbi->reg_lccr0 & (LCCR0_Dual|LCCR0_Act)) == LCCR0_Dual) mask |= GPIO_LDD15 | GPIO_LDD14 | GPIO_LDD13 | GPIO_LDD12; } if (mask) { unsigned long flags; /* * SA-1100 requires the GPIO direction register set * appropriately for the alternate function. Hence * we set it here via bitmask rather than excessive * fiddling via the GPIO subsystem - and even then * we'll still have to deal with GAFR. */ local_irq_save(flags); GPDR |= mask; GAFR |= mask; local_irq_restore(flags); } } static void sa1100fb_enable_controller(struct sa1100fb_info *fbi) { dev_dbg(fbi->dev, "Enabling LCD controller\n"); /* * Make sure the mode bits are present in the first palette entry */ fbi->palette_cpu[0] &= 0xcfff; fbi->palette_cpu[0] |= palette_pbs(&fbi->fb.var); /* Sequence from 11.7.10 */ writel_relaxed(fbi->reg_lccr3, fbi->base + LCCR3); writel_relaxed(fbi->reg_lccr2, fbi->base + LCCR2); writel_relaxed(fbi->reg_lccr1, fbi->base + LCCR1); writel_relaxed(fbi->reg_lccr0 & ~LCCR0_LEN, fbi->base + LCCR0); writel_relaxed(fbi->dbar1, fbi->base + DBAR1); writel_relaxed(fbi->dbar2, fbi->base + DBAR2); writel_relaxed(fbi->reg_lccr0 | LCCR0_LEN, fbi->base + LCCR0); if (machine_is_shannon()) gpio_set_value(SHANNON_GPIO_DISP_EN, 1); dev_dbg(fbi->dev, "DBAR1: 0x%08x\n", readl_relaxed(fbi->base + DBAR1)); dev_dbg(fbi->dev, "DBAR2: 0x%08x\n", readl_relaxed(fbi->base + DBAR2)); dev_dbg(fbi->dev, "LCCR0: 0x%08x\n", readl_relaxed(fbi->base + LCCR0)); dev_dbg(fbi->dev, "LCCR1: 0x%08x\n", readl_relaxed(fbi->base + LCCR1)); dev_dbg(fbi->dev, "LCCR2: 0x%08x\n", readl_relaxed(fbi->base + LCCR2)); dev_dbg(fbi->dev, "LCCR3: 0x%08x\n", readl_relaxed(fbi->base + LCCR3)); } static void sa1100fb_disable_controller(struct sa1100fb_info *fbi) { DECLARE_WAITQUEUE(wait, current); u32 lccr0; dev_dbg(fbi->dev, "Disabling LCD controller\n"); if (machine_is_shannon()) gpio_set_value(SHANNON_GPIO_DISP_EN, 0); set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&fbi->ctrlr_wait, &wait); /* Clear LCD Status Register */ writel_relaxed(~0, fbi->base + LCSR); lccr0 = readl_relaxed(fbi->base + LCCR0); lccr0 &= ~LCCR0_LDM; /* Enable LCD Disable Done Interrupt */ writel_relaxed(lccr0, fbi->base + LCCR0); lccr0 &= ~LCCR0_LEN; /* Disable LCD Controller */ writel_relaxed(lccr0, fbi->base + LCCR0); schedule_timeout(20 * HZ / 1000); remove_wait_queue(&fbi->ctrlr_wait, &wait); } /* * sa1100fb_handle_irq: Handle 'LCD DONE' interrupts. */ static irqreturn_t sa1100fb_handle_irq(int irq, void *dev_id) { struct sa1100fb_info *fbi = dev_id; unsigned int lcsr = readl_relaxed(fbi->base + LCSR); if (lcsr & LCSR_LDD) { u32 lccr0 = readl_relaxed(fbi->base + LCCR0) | LCCR0_LDM; writel_relaxed(lccr0, fbi->base + LCCR0); wake_up(&fbi->ctrlr_wait); } writel_relaxed(lcsr, fbi->base + LCSR); return IRQ_HANDLED; } /* * This function must be called from task context only, since it will * sleep when disabling the LCD controller, or if we get two contending * processes trying to alter state. */ static void set_ctrlr_state(struct sa1100fb_info *fbi, u_int state) { u_int old_state; mutex_lock(&fbi->ctrlr_lock); old_state = fbi->state; /* * Hack around fbcon initialisation. */ if (old_state == C_STARTUP && state == C_REENABLE) state = C_ENABLE; switch (state) { case C_DISABLE_CLKCHANGE: /* * Disable controller for clock change. If the * controller is already disabled, then do nothing. */ if (old_state != C_DISABLE && old_state != C_DISABLE_PM) { fbi->state = state; sa1100fb_disable_controller(fbi); } break; case C_DISABLE_PM: case C_DISABLE: /* * Disable controller */ if (old_state != C_DISABLE) { fbi->state = state; __sa1100fb_backlight_power(fbi, 0); if (old_state != C_DISABLE_CLKCHANGE) sa1100fb_disable_controller(fbi); __sa1100fb_lcd_power(fbi, 0); } break; case C_ENABLE_CLKCHANGE: /* * Enable the controller after clock change. Only * do this if we were disabled for the clock change. */ if (old_state == C_DISABLE_CLKCHANGE) { fbi->state = C_ENABLE; sa1100fb_enable_controller(fbi); } break; case C_REENABLE: /* * Re-enable the controller only if it was already * enabled. This is so we reprogram the control * registers. */ if (old_state == C_ENABLE) { sa1100fb_disable_controller(fbi); sa1100fb_setup_gpio(fbi); sa1100fb_enable_controller(fbi); } break; case C_ENABLE_PM: /* * Re-enable the controller after PM. This is not * perfect - think about the case where we were doing * a clock change, and we suspended half-way through. */ if (old_state != C_DISABLE_PM) break; /* fall through */ case C_ENABLE: /* * Power up the LCD screen, enable controller, and * turn on the backlight. */ if (old_state != C_ENABLE) { fbi->state = C_ENABLE; sa1100fb_setup_gpio(fbi); __sa1100fb_lcd_power(fbi, 1); sa1100fb_enable_controller(fbi); __sa1100fb_backlight_power(fbi, 1); } break; } mutex_unlock(&fbi->ctrlr_lock); } /* * Our LCD controller task (which is called when we blank or unblank) * via keventd. */ static void sa1100fb_task(struct work_struct *w) { struct sa1100fb_info *fbi = container_of(w, struct sa1100fb_info, task); u_int state = xchg(&fbi->task_state, -1); set_ctrlr_state(fbi, state); } #ifdef CONFIG_CPU_FREQ /* * Calculate the minimum DMA period over all displays that we own. * This, together with the SDRAM bandwidth defines the slowest CPU * frequency that can be selected. */ static unsigned int sa1100fb_min_dma_period(struct sa1100fb_info *fbi) { #if 0 unsigned int min_period = (unsigned int)-1; int i; for (i = 0; i < MAX_NR_CONSOLES; i++) { struct display *disp = &fb_display[i]; unsigned int period; /* * Do we own this display? */ if (disp->fb_info != &fbi->fb) continue; /* * Ok, calculate its DMA period */ period = sa1100fb_display_dma_period(&disp->var); if (period < min_period) min_period = period; } return min_period; #else /* * FIXME: we need to verify _all_ consoles. */ return sa1100fb_display_dma_period(&fbi->fb.var); #endif } /* * CPU clock speed change handler. We need to adjust the LCD timing * parameters when the CPU clock is adjusted by the power management * subsystem. */ static int sa1100fb_freq_transition(struct notifier_block *nb, unsigned long val, void *data) { struct sa1100fb_info *fbi = TO_INF(nb, freq_transition); struct cpufreq_freqs *f = data; u_int pcd; switch (val) { case CPUFREQ_PRECHANGE: set_ctrlr_state(fbi, C_DISABLE_CLKCHANGE); break; case CPUFREQ_POSTCHANGE: pcd = get_pcd(fbi->fb.var.pixclock, f->new); fbi->reg_lccr3 = (fbi->reg_lccr3 & ~0xff) | LCCR3_PixClkDiv(pcd); set_ctrlr_state(fbi, C_ENABLE_CLKCHANGE); break; } return 0; } static int sa1100fb_freq_policy(struct notifier_block *nb, unsigned long val, void *data) { struct sa1100fb_info *fbi = TO_INF(nb, freq_policy); struct cpufreq_policy *policy = data; switch (val) { case CPUFREQ_ADJUST: case CPUFREQ_INCOMPATIBLE: dev_dbg(fbi->dev, "min dma period: %d ps, " "new clock %d kHz\n", sa1100fb_min_dma_period(fbi), policy->max); /* todo: fill in min/max values */ break; case CPUFREQ_NOTIFY: do {} while(0); /* todo: panic if min/max values aren't fulfilled * [can't really happen unless there's a bug in the * CPU policy verififcation process * */ break; } return 0; } #endif #ifdef CONFIG_PM /* * Power management hooks. Note that we won't be called from IRQ context, * unlike the blank functions above, so we may sleep. */ static int sa1100fb_suspend(struct platform_device *dev, pm_message_t state) { struct sa1100fb_info *fbi = platform_get_drvdata(dev); set_ctrlr_state(fbi, C_DISABLE_PM); return 0; } static int sa1100fb_resume(struct platform_device *dev) { struct sa1100fb_info *fbi = platform_get_drvdata(dev); set_ctrlr_state(fbi, C_ENABLE_PM); return 0; } #else #define sa1100fb_suspend NULL #define sa1100fb_resume NULL #endif /* * sa1100fb_map_video_memory(): * Allocates the DRAM memory for the frame buffer. This buffer is * remapped into a non-cached, non-buffered, memory region to * allow palette and pixel writes to occur without flushing the * cache. Once this area is remapped, all virtual memory * access to the video memory should occur at the new region. */ static int sa1100fb_map_video_memory(struct sa1100fb_info *fbi) { /* * We reserve one page for the palette, plus the size * of the framebuffer. */ fbi->map_size = PAGE_ALIGN(fbi->fb.fix.smem_len + PAGE_SIZE); fbi->map_cpu = dma_alloc_writecombine(fbi->dev, fbi->map_size, &fbi->map_dma, GFP_KERNEL); if (fbi->map_cpu) { fbi->fb.screen_base = fbi->map_cpu + PAGE_SIZE; fbi->screen_dma = fbi->map_dma + PAGE_SIZE; /* * FIXME: this is actually the wrong thing to place in * smem_start. But fbdev suffers from the problem that * it needs an API which doesn't exist (in this case, * dma_writecombine_mmap) */ fbi->fb.fix.smem_start = fbi->screen_dma; } return fbi->map_cpu ? 0 : -ENOMEM; } /* Fake monspecs to fill in fbinfo structure */ static struct fb_monspecs monspecs = { .hfmin = 30000, .hfmax = 70000, .vfmin = 50, .vfmax = 65, }; static struct sa1100fb_info *sa1100fb_init_fbinfo(struct device *dev) { struct sa1100fb_mach_info *inf = dev->platform_data; struct sa1100fb_info *fbi; unsigned i; fbi = kmalloc(sizeof(struct sa1100fb_info) + sizeof(u32) * 16, GFP_KERNEL); if (!fbi) return NULL; memset(fbi, 0, sizeof(struct sa1100fb_info)); fbi->dev = dev; strcpy(fbi->fb.fix.id, SA1100_NAME); fbi->fb.fix.type = FB_TYPE_PACKED_PIXELS; fbi->fb.fix.type_aux = 0; fbi->fb.fix.xpanstep = 0; fbi->fb.fix.ypanstep = 0; fbi->fb.fix.ywrapstep = 0; fbi->fb.fix.accel = FB_ACCEL_NONE; fbi->fb.var.nonstd = 0; fbi->fb.var.activate = FB_ACTIVATE_NOW; fbi->fb.var.height = -1; fbi->fb.var.width = -1; fbi->fb.var.accel_flags = 0; fbi->fb.var.vmode = FB_VMODE_NONINTERLACED; fbi->fb.fbops = &sa1100fb_ops; fbi->fb.flags = FBINFO_DEFAULT; fbi->fb.monspecs = monspecs; fbi->fb.pseudo_palette = (fbi + 1); fbi->rgb[RGB_4] = &rgb_4; fbi->rgb[RGB_8] = &rgb_8; fbi->rgb[RGB_16] = &def_rgb_16; /* * People just don't seem to get this. We don't support * anything but correct entries now, so panic if someone * does something stupid. */ if (inf->lccr3 & (LCCR3_VrtSnchL|LCCR3_HorSnchL|0xff) || inf->pixclock == 0) panic("sa1100fb error: invalid LCCR3 fields set or zero " "pixclock."); fbi->fb.var.xres = inf->xres; fbi->fb.var.xres_virtual = inf->xres; fbi->fb.var.yres = inf->yres; fbi->fb.var.yres_virtual = inf->yres; fbi->fb.var.bits_per_pixel = inf->bpp; fbi->fb.var.pixclock = inf->pixclock; fbi->fb.var.hsync_len = inf->hsync_len; fbi->fb.var.left_margin = inf->left_margin; fbi->fb.var.right_margin = inf->right_margin; fbi->fb.var.vsync_len = inf->vsync_len; fbi->fb.var.upper_margin = inf->upper_margin; fbi->fb.var.lower_margin = inf->lower_margin; fbi->fb.var.sync = inf->sync; fbi->fb.var.grayscale = inf->cmap_greyscale; fbi->state = C_STARTUP; fbi->task_state = (u_char)-1; fbi->fb.fix.smem_len = inf->xres * inf->yres * inf->bpp / 8; fbi->inf = inf; /* Copy the RGB bitfield overrides */ for (i = 0; i < NR_RGB; i++) if (inf->rgb[i]) fbi->rgb[i] = inf->rgb[i]; init_waitqueue_head(&fbi->ctrlr_wait); INIT_WORK(&fbi->task, sa1100fb_task); mutex_init(&fbi->ctrlr_lock); return fbi; } static int sa1100fb_probe(struct platform_device *pdev) { struct sa1100fb_info *fbi; struct resource *res; int ret, irq; if (!pdev->dev.platform_data) { dev_err(&pdev->dev, "no platform LCD data\n"); return -EINVAL; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (irq < 0 || !res) return -EINVAL; if (!request_mem_region(res->start, resource_size(res), "LCD")) return -EBUSY; fbi = sa1100fb_init_fbinfo(&pdev->dev); ret = -ENOMEM; if (!fbi) goto failed; fbi->base = ioremap(res->start, resource_size(res)); if (!fbi->base) goto failed; /* Initialize video memory */ ret = sa1100fb_map_video_memory(fbi); if (ret) goto failed; ret = request_irq(irq, sa1100fb_handle_irq, 0, "LCD", fbi); if (ret) { dev_err(&pdev->dev, "request_irq failed: %d\n", ret); goto failed; } if (machine_is_shannon()) { ret = gpio_request_one(SHANNON_GPIO_DISP_EN, GPIOF_OUT_INIT_LOW, "display enable"); if (ret) goto err_free_irq; } /* * This makes sure that our colour bitfield * descriptors are correctly initialised. */ sa1100fb_check_var(&fbi->fb.var, &fbi->fb); platform_set_drvdata(pdev, fbi); ret = register_framebuffer(&fbi->fb); if (ret < 0) goto err_reg_fb; #ifdef CONFIG_CPU_FREQ fbi->freq_transition.notifier_call = sa1100fb_freq_transition; fbi->freq_policy.notifier_call = sa1100fb_freq_policy; cpufreq_register_notifier(&fbi->freq_transition, CPUFREQ_TRANSITION_NOTIFIER); cpufreq_register_notifier(&fbi->freq_policy, CPUFREQ_POLICY_NOTIFIER); #endif /* This driver cannot be unloaded at the moment */ return 0; err_reg_fb: if (machine_is_shannon()) gpio_free(SHANNON_GPIO_DISP_EN); err_free_irq: free_irq(irq, fbi); failed: if (fbi) iounmap(fbi->base); platform_set_drvdata(pdev, NULL); kfree(fbi); release_mem_region(res->start, resource_size(res)); return ret; } static struct platform_driver sa1100fb_driver = { .probe = sa1100fb_probe, .suspend = sa1100fb_suspend, .resume = sa1100fb_resume, .driver = { .name = "sa11x0-fb", .owner = THIS_MODULE, }, }; int __init sa1100fb_init(void) { if (fb_get_options("sa1100fb", NULL)) return -ENODEV; return platform_driver_register(&sa1100fb_driver); } int __init sa1100fb_setup(char *options) { #if 0 char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!strncmp(this_opt, "bpp:", 4)) current_par.max_bpp = simple_strtoul(this_opt + 4, NULL, 0); if (!strncmp(this_opt, "lccr0:", 6)) lcd_shadow.lccr0 = simple_strtoul(this_opt + 6, NULL, 0); if (!strncmp(this_opt, "lccr1:", 6)) { lcd_shadow.lccr1 = simple_strtoul(this_opt + 6, NULL, 0); current_par.max_xres = (lcd_shadow.lccr1 & 0x3ff) + 16; } if (!strncmp(this_opt, "lccr2:", 6)) { lcd_shadow.lccr2 = simple_strtoul(this_opt + 6, NULL, 0); current_par.max_yres = (lcd_shadow. lccr0 & LCCR0_SDS) ? ((lcd_shadow. lccr2 & 0x3ff) + 1) * 2 : ((lcd_shadow.lccr2 & 0x3ff) + 1); } if (!strncmp(this_opt, "lccr3:", 6)) lcd_shadow.lccr3 = simple_strtoul(this_opt + 6, NULL, 0); } #endif return 0; } module_init(sa1100fb_init); MODULE_DESCRIPTION("StrongARM-1100/1110 framebuffer driver"); MODULE_LICENSE("GPL");
gpl-2.0
wkpark/lge-kernel-cx2
arch/powerpc/kernel/pci_64.c
2333
7665
/* * Port for PPC64 David Engebretsen, IBM Corp. * Contains common pci routines for ppc64 platform, pSeries and iSeries brands. * * Copyright (C) 2003 Anton Blanchard <anton@au.ibm.com>, IBM * Rework, based on alpha PCI code. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #undef DEBUG #include <linux/kernel.h> #include <linux/pci.h> #include <linux/string.h> #include <linux/init.h> #include <linux/bootmem.h> #include <linux/mm.h> #include <linux/list.h> #include <linux/syscalls.h> #include <linux/irq.h> #include <linux/vmalloc.h> #include <asm/processor.h> #include <asm/io.h> #include <asm/prom.h> #include <asm/pci-bridge.h> #include <asm/byteorder.h> #include <asm/machdep.h> #include <asm/ppc-pci.h> unsigned long pci_probe_only = 1; /* pci_io_base -- the base address from which io bars are offsets. * This is the lowest I/O base address (so bar values are always positive), * and it *must* be the start of ISA space if an ISA bus exists because * ISA drivers use hard coded offsets. If no ISA bus exists nothing * is mapped on the first 64K of IO space */ unsigned long pci_io_base = ISA_IO_BASE; EXPORT_SYMBOL(pci_io_base); static int __init pcibios_init(void) { struct pci_controller *hose, *tmp; printk(KERN_INFO "PCI: Probing PCI hardware\n"); /* For now, override phys_mem_access_prot. If we need it,g * later, we may move that initialization to each ppc_md */ ppc_md.phys_mem_access_prot = pci_phys_mem_access_prot; if (pci_probe_only) ppc_pci_flags |= PPC_PCI_PROBE_ONLY; /* On ppc64, we always enable PCI domains and we keep domain 0 * backward compatible in /proc for video cards */ ppc_pci_flags |= PPC_PCI_ENABLE_PROC_DOMAINS | PPC_PCI_COMPAT_DOMAIN_0; /* Scan all of the recorded PCI controllers. */ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) { pcibios_scan_phb(hose); pci_bus_add_devices(hose->bus); } /* Call common code to handle resource allocation */ pcibios_resource_survey(); printk(KERN_DEBUG "PCI: Probing PCI hardware done\n"); return 0; } subsys_initcall(pcibios_init); #ifdef CONFIG_HOTPLUG int pcibios_unmap_io_space(struct pci_bus *bus) { struct pci_controller *hose; WARN_ON(bus == NULL); /* If this is not a PHB, we only flush the hash table over * the area mapped by this bridge. We don't play with the PTE * mappings since we might have to deal with sub-page alignemnts * so flushing the hash table is the only sane way to make sure * that no hash entries are covering that removed bridge area * while still allowing other busses overlapping those pages * * Note: If we ever support P2P hotplug on Book3E, we'll have * to do an appropriate TLB flush here too */ if (bus->self) { #ifdef CONFIG_PPC_STD_MMU_64 struct resource *res = bus->resource[0]; #endif pr_debug("IO unmapping for PCI-PCI bridge %s\n", pci_name(bus->self)); #ifdef CONFIG_PPC_STD_MMU_64 __flush_hash_table_range(&init_mm, res->start + _IO_BASE, res->end + _IO_BASE + 1); #endif return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); /* Check if we have IOs allocated */ if (hose->io_base_alloc == 0) return 0; pr_debug("IO unmapping for PHB %s\n", hose->dn->full_name); pr_debug(" alloc=0x%p\n", hose->io_base_alloc); /* This is a PHB, we fully unmap the IO area */ vunmap(hose->io_base_alloc); return 0; } EXPORT_SYMBOL_GPL(pcibios_unmap_io_space); #endif /* CONFIG_HOTPLUG */ int __devinit pcibios_map_io_space(struct pci_bus *bus) { struct vm_struct *area; unsigned long phys_page; unsigned long size_page; unsigned long io_virt_offset; struct pci_controller *hose; WARN_ON(bus == NULL); /* If this not a PHB, nothing to do, page tables still exist and * thus HPTEs will be faulted in when needed */ if (bus->self) { pr_debug("IO mapping for PCI-PCI bridge %s\n", pci_name(bus->self)); pr_debug(" virt=0x%016llx...0x%016llx\n", bus->resource[0]->start + _IO_BASE, bus->resource[0]->end + _IO_BASE); return 0; } /* Get the host bridge */ hose = pci_bus_to_host(bus); phys_page = _ALIGN_DOWN(hose->io_base_phys, PAGE_SIZE); size_page = _ALIGN_UP(hose->pci_io_size, PAGE_SIZE); /* Make sure IO area address is clear */ hose->io_base_alloc = NULL; /* If there's no IO to map on that bus, get away too */ if (hose->pci_io_size == 0 || hose->io_base_phys == 0) return 0; /* Let's allocate some IO space for that guy. We don't pass * VM_IOREMAP because we don't care about alignment tricks that * the core does in that case. Maybe we should due to stupid card * with incomplete address decoding but I'd rather not deal with * those outside of the reserved 64K legacy region. */ area = __get_vm_area(size_page, 0, PHB_IO_BASE, PHB_IO_END); if (area == NULL) return -ENOMEM; hose->io_base_alloc = area->addr; hose->io_base_virt = (void __iomem *)(area->addr + hose->io_base_phys - phys_page); pr_debug("IO mapping for PHB %s\n", hose->dn->full_name); pr_debug(" phys=0x%016llx, virt=0x%p (alloc=0x%p)\n", hose->io_base_phys, hose->io_base_virt, hose->io_base_alloc); pr_debug(" size=0x%016llx (alloc=0x%016lx)\n", hose->pci_io_size, size_page); /* Establish the mapping */ if (__ioremap_at(phys_page, area->addr, size_page, _PAGE_NO_CACHE | _PAGE_GUARDED) == NULL) return -ENOMEM; /* Fixup hose IO resource */ io_virt_offset = (unsigned long)hose->io_base_virt - _IO_BASE; hose->io_resource.start += io_virt_offset; hose->io_resource.end += io_virt_offset; pr_debug(" hose->io_resource=%pR\n", &hose->io_resource); return 0; } EXPORT_SYMBOL_GPL(pcibios_map_io_space); void __devinit pcibios_setup_phb_io_space(struct pci_controller *hose) { pcibios_map_io_space(hose->bus); } #define IOBASE_BRIDGE_NUMBER 0 #define IOBASE_MEMORY 1 #define IOBASE_IO 2 #define IOBASE_ISA_IO 3 #define IOBASE_ISA_MEM 4 long sys_pciconfig_iobase(long which, unsigned long in_bus, unsigned long in_devfn) { struct pci_controller* hose; struct list_head *ln; struct pci_bus *bus = NULL; struct device_node *hose_node; /* Argh ! Please forgive me for that hack, but that's the * simplest way to get existing XFree to not lockup on some * G5 machines... So when something asks for bus 0 io base * (bus 0 is HT root), we return the AGP one instead. */ if (in_bus == 0 && of_machine_is_compatible("MacRISC4")) { struct device_node *agp; agp = of_find_compatible_node(NULL, NULL, "u3-agp"); if (agp) in_bus = 0xf0; of_node_put(agp); } /* That syscall isn't quite compatible with PCI domains, but it's * used on pre-domains setup. We return the first match */ for (ln = pci_root_buses.next; ln != &pci_root_buses; ln = ln->next) { bus = pci_bus_b(ln); if (in_bus >= bus->number && in_bus <= bus->subordinate) break; bus = NULL; } if (bus == NULL || bus->dev.of_node == NULL) return -ENODEV; hose_node = bus->dev.of_node; hose = PCI_DN(hose_node)->phb; switch (which) { case IOBASE_BRIDGE_NUMBER: return (long)hose->first_busno; case IOBASE_MEMORY: return (long)hose->pci_mem_offset; case IOBASE_IO: return (long)hose->io_base_phys; case IOBASE_ISA_IO: return (long)isa_io_base; case IOBASE_ISA_MEM: return -EINVAL; } return -EOPNOTSUPP; } #ifdef CONFIG_NUMA int pcibus_to_node(struct pci_bus *bus) { struct pci_controller *phb = pci_bus_to_host(bus); return phb->node; } EXPORT_SYMBOL(pcibus_to_node); #endif
gpl-2.0
agat63/L900_MA7_kernel
arch/microblaze/mm/init.c
2589
9522
/* * Copyright (C) 2007-2008 Michal Simek <monstr@monstr.eu> * Copyright (C) 2006 Atmark Techno, Inc. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/bootmem.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/memblock.h> #include <linux/mm.h> /* mem_init */ #include <linux/initrd.h> #include <linux/pagemap.h> #include <linux/pfn.h> #include <linux/slab.h> #include <linux/swap.h> #include <asm/page.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/sections.h> #include <asm/tlb.h> /* Use for MMU and noMMU because of PCI generic code */ int mem_init_done; #ifndef CONFIG_MMU unsigned int __page_offset; EXPORT_SYMBOL(__page_offset); #else static int init_bootmem_done; #endif /* CONFIG_MMU */ char *klimit = _end; /* * Initialize the bootmem system and give it all the memory we * have available. */ unsigned long memory_start; EXPORT_SYMBOL(memory_start); unsigned long memory_end; /* due to mm/nommu.c */ unsigned long memory_size; EXPORT_SYMBOL(memory_size); /* * paging_init() sets up the page tables - in fact we've already done this. */ static void __init paging_init(void) { unsigned long zones_size[MAX_NR_ZONES]; /* Clean every zones */ memset(zones_size, 0, sizeof(zones_size)); /* * old: we can DMA to/from any address.put all page into ZONE_DMA * We use only ZONE_NORMAL */ zones_size[ZONE_NORMAL] = max_mapnr; free_area_init(zones_size); } void __init setup_memory(void) { unsigned long map_size; struct memblock_region *reg; #ifndef CONFIG_MMU u32 kernel_align_start, kernel_align_size; /* Find main memory where is the kernel */ for_each_memblock(memory, reg) { memory_start = (u32)reg->base; memory_end = (u32) reg->base + reg->size; if ((memory_start <= (u32)_text) && ((u32)_text <= memory_end)) { memory_size = memory_end - memory_start; PAGE_OFFSET = memory_start; printk(KERN_INFO "%s: Main mem: 0x%x-0x%x, " "size 0x%08x\n", __func__, (u32) memory_start, (u32) memory_end, (u32) memory_size); break; } } if (!memory_start || !memory_end) { panic("%s: Missing memory setting 0x%08x-0x%08x\n", __func__, (u32) memory_start, (u32) memory_end); } /* reservation of region where is the kernel */ kernel_align_start = PAGE_DOWN((u32)_text); /* ALIGN can be remove because _end in vmlinux.lds.S is align */ kernel_align_size = PAGE_UP((u32)klimit) - kernel_align_start; memblock_reserve(kernel_align_start, kernel_align_size); printk(KERN_INFO "%s: kernel addr=0x%08x-0x%08x size=0x%08x\n", __func__, kernel_align_start, kernel_align_start + kernel_align_size, kernel_align_size); #endif /* * Kernel: * start: base phys address of kernel - page align * end: base phys address of kernel - page align * * min_low_pfn - the first page (mm/bootmem.c - node_boot_start) * max_low_pfn * max_mapnr - the first unused page (mm/bootmem.c - node_low_pfn) * num_physpages - number of all pages */ /* memory start is from the kernel end (aligned) to higher addr */ min_low_pfn = memory_start >> PAGE_SHIFT; /* minimum for allocation */ /* RAM is assumed contiguous */ num_physpages = max_mapnr = memory_size >> PAGE_SHIFT; max_pfn = max_low_pfn = memory_end >> PAGE_SHIFT; printk(KERN_INFO "%s: max_mapnr: %#lx\n", __func__, max_mapnr); printk(KERN_INFO "%s: min_low_pfn: %#lx\n", __func__, min_low_pfn); printk(KERN_INFO "%s: max_low_pfn: %#lx\n", __func__, max_low_pfn); /* * Find an area to use for the bootmem bitmap. * We look for the first area which is at least * 128kB in length (128kB is enough for a bitmap * for 4GB of memory, using 4kB pages), plus 1 page * (in case the address isn't page-aligned). */ map_size = init_bootmem_node(NODE_DATA(0), PFN_UP(TOPHYS((u32)klimit)), min_low_pfn, max_low_pfn); memblock_reserve(PFN_UP(TOPHYS((u32)klimit)) << PAGE_SHIFT, map_size); /* free bootmem is whole main memory */ free_bootmem(memory_start, memory_size); /* reserve allocate blocks */ for_each_memblock(reserved, reg) { pr_debug("reserved - 0x%08x-0x%08x\n", (u32) reg->base, (u32) reg->size); reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT); } #ifdef CONFIG_MMU init_bootmem_done = 1; #endif paging_init(); } void free_init_pages(char *what, unsigned long begin, unsigned long end) { unsigned long addr; for (addr = begin; addr < end; addr += PAGE_SIZE) { ClearPageReserved(virt_to_page(addr)); init_page_count(virt_to_page(addr)); free_page(addr); totalram_pages++; } printk(KERN_INFO "Freeing %s: %ldk freed\n", what, (end - begin) >> 10); } #ifdef CONFIG_BLK_DEV_INITRD void free_initrd_mem(unsigned long start, unsigned long end) { int pages = 0; for (; start < end; start += PAGE_SIZE) { ClearPageReserved(virt_to_page(start)); init_page_count(virt_to_page(start)); free_page(start); totalram_pages++; pages++; } printk(KERN_NOTICE "Freeing initrd memory: %dk freed\n", (int)(pages * (PAGE_SIZE / 1024))); } #endif void free_initmem(void) { free_init_pages("unused kernel memory", (unsigned long)(&__init_begin), (unsigned long)(&__init_end)); } void __init mem_init(void) { high_memory = (void *)__va(memory_end); /* this will put all memory onto the freelists */ totalram_pages += free_all_bootmem(); printk(KERN_INFO "Memory: %luk/%luk available\n", nr_free_pages() << (PAGE_SHIFT-10), num_physpages << (PAGE_SHIFT-10)); mem_init_done = 1; } #ifndef CONFIG_MMU int page_is_ram(unsigned long pfn) { return __range_ok(pfn, 0); } #else int page_is_ram(unsigned long pfn) { return pfn < max_low_pfn; } /* * Check for command-line options that affect what MMU_init will do. */ static void mm_cmdline_setup(void) { unsigned long maxmem = 0; char *p = cmd_line; /* Look for mem= option on command line */ p = strstr(cmd_line, "mem="); if (p) { p += 4; maxmem = memparse(p, &p); if (maxmem && memory_size > maxmem) { memory_size = maxmem; memory_end = memory_start + memory_size; memblock.memory.regions[0].size = memory_size; } } } /* * MMU_init_hw does the chip-specific initialization of the MMU hardware. */ static void __init mmu_init_hw(void) { /* * The Zone Protection Register (ZPR) defines how protection will * be applied to every page which is a member of a given zone. At * present, we utilize only two of the zones. * The zone index bits (of ZSEL) in the PTE are used for software * indicators, except the LSB. For user access, zone 1 is used, * for kernel access, zone 0 is used. We set all but zone 1 * to zero, allowing only kernel access as indicated in the PTE. * For zone 1, we set a 01 binary (a value of 10 will not work) * to allow user access as indicated in the PTE. This also allows * kernel access as indicated in the PTE. */ __asm__ __volatile__ ("ori r11, r0, 0x10000000;" \ "mts rzpr, r11;" : : : "r11"); } /* * MMU_init sets up the basic memory mappings for the kernel, * including both RAM and possibly some I/O regions, * and sets up the page tables and the MMU hardware ready to go. */ /* called from head.S */ asmlinkage void __init mmu_init(void) { unsigned int kstart, ksize; if (!memblock.reserved.cnt) { printk(KERN_EMERG "Error memory count\n"); machine_restart(NULL); } if ((u32) memblock.memory.regions[0].size < 0x1000000) { printk(KERN_EMERG "Memory must be greater than 16MB\n"); machine_restart(NULL); } /* Find main memory where the kernel is */ memory_start = (u32) memblock.memory.regions[0].base; memory_end = (u32) memblock.memory.regions[0].base + (u32) memblock.memory.regions[0].size; memory_size = memory_end - memory_start; mm_cmdline_setup(); /* FIXME parse args from command line - not used */ /* * Map out the kernel text/data/bss from the available physical * memory. */ kstart = __pa(CONFIG_KERNEL_START); /* kernel start */ /* kernel size */ ksize = PAGE_ALIGN(((u32)_end - (u32)CONFIG_KERNEL_START)); memblock_reserve(kstart, ksize); #if defined(CONFIG_BLK_DEV_INITRD) /* Remove the init RAM disk from the available memory. */ /* if (initrd_start) { mem_pieces_remove(&phys_avail, __pa(initrd_start), initrd_end - initrd_start, 1); }*/ #endif /* CONFIG_BLK_DEV_INITRD */ /* Initialize the MMU hardware */ mmu_init_hw(); /* Map in all of RAM starting at CONFIG_KERNEL_START */ mapin_ram(); #ifdef HIGHMEM_START_BOOL ioremap_base = HIGHMEM_START; #else ioremap_base = 0xfe000000UL; /* for now, could be 0xfffff000 */ #endif /* CONFIG_HIGHMEM */ ioremap_bot = ioremap_base; /* Initialize the context management stuff */ mmu_context_init(); } /* This is only called until mem_init is done. */ void __init *early_get_page(void) { void *p; if (init_bootmem_done) { p = alloc_bootmem_pages(PAGE_SIZE); } else { /* * Mem start + 32MB -> here is limit * because of mem mapping from head.S */ p = __va(memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, memory_start + 0x2000000)); } return p; } #endif /* CONFIG_MMU */ void * __init_refok alloc_maybe_bootmem(size_t size, gfp_t mask) { if (mem_init_done) return kmalloc(size, mask); else return alloc_bootmem(size); } void * __init_refok zalloc_maybe_bootmem(size_t size, gfp_t mask) { void *p; if (mem_init_done) p = kzalloc(size, mask); else { p = alloc_bootmem(size); if (p) memset(p, 0, size); } return p; }
gpl-2.0
CyanogenMod/android_kernel_zte_msm8994
drivers/gpu/drm/nouveau/core/subdev/bios/conn.c
2845
1879
/* * Copyright 2012 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include <core/device.h> #include <subdev/bios.h> #include <subdev/bios/dcb.h> #include <subdev/bios/conn.h> u16 dcb_conntab(struct nouveau_bios *bios, u8 *ver, u8 *hdr, u8 *cnt, u8 *len) { u16 dcb = dcb_table(bios, ver, hdr, cnt, len); if (dcb && *ver >= 0x30 && *hdr >= 0x16) { u16 data = nv_ro16(bios, dcb + 0x14); if (data) { *ver = nv_ro08(bios, data + 0); *hdr = nv_ro08(bios, data + 1); *cnt = nv_ro08(bios, data + 2); *len = nv_ro08(bios, data + 3); return data; } } return 0x0000; } u16 dcb_conn(struct nouveau_bios *bios, u8 idx, u8 *ver, u8 *len) { u8 hdr, cnt; u16 data = dcb_conntab(bios, ver, &hdr, &cnt, len); if (data && idx < cnt) return data + hdr + (idx * *len); return 0x0000; }
gpl-2.0
ShadySquirrel/e980-zeKrnl
net/sunrpc/cache.c
3101
44409
/* * net/sunrpc/cache.c * * Generic code for various authentication-related caches * used by sunrpc clients and servers. * * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au> * * Released under terms in GPL version 2. See COPYING. * */ #include <linux/types.h> #include <linux/fs.h> #include <linux/file.h> #include <linux/slab.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kmod.h> #include <linux/list.h> #include <linux/module.h> #include <linux/ctype.h> #include <asm/uaccess.h> #include <linux/poll.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <linux/net.h> #include <linux/workqueue.h> #include <linux/mutex.h> #include <linux/pagemap.h> #include <asm/ioctls.h> #include <linux/sunrpc/types.h> #include <linux/sunrpc/cache.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include "netns.h" #define RPCDBG_FACILITY RPCDBG_CACHE static bool cache_defer_req(struct cache_req *req, struct cache_head *item); static void cache_revisit_request(struct cache_head *item); static void cache_init(struct cache_head *h) { time_t now = seconds_since_boot(); h->next = NULL; h->flags = 0; kref_init(&h->ref); h->expiry_time = now + CACHE_NEW_EXPIRY; h->last_refresh = now; } static inline int cache_is_expired(struct cache_detail *detail, struct cache_head *h) { return (h->expiry_time < seconds_since_boot()) || (detail->flush_time > h->last_refresh); } struct cache_head *sunrpc_cache_lookup(struct cache_detail *detail, struct cache_head *key, int hash) { struct cache_head **head, **hp; struct cache_head *new = NULL, *freeme = NULL; head = &detail->hash_table[hash]; read_lock(&detail->hash_lock); for (hp=head; *hp != NULL ; hp = &(*hp)->next) { struct cache_head *tmp = *hp; if (detail->match(tmp, key)) { if (cache_is_expired(detail, tmp)) /* This entry is expired, we will discard it. */ break; cache_get(tmp); read_unlock(&detail->hash_lock); return tmp; } } read_unlock(&detail->hash_lock); /* Didn't find anything, insert an empty entry */ new = detail->alloc(); if (!new) return NULL; /* must fully initialise 'new', else * we might get lose if we need to * cache_put it soon. */ cache_init(new); detail->init(new, key); write_lock(&detail->hash_lock); /* check if entry appeared while we slept */ for (hp=head; *hp != NULL ; hp = &(*hp)->next) { struct cache_head *tmp = *hp; if (detail->match(tmp, key)) { if (cache_is_expired(detail, tmp)) { *hp = tmp->next; tmp->next = NULL; detail->entries --; freeme = tmp; break; } cache_get(tmp); write_unlock(&detail->hash_lock); cache_put(new, detail); return tmp; } } new->next = *head; *head = new; detail->entries++; cache_get(new); write_unlock(&detail->hash_lock); if (freeme) cache_put(freeme, detail); return new; } EXPORT_SYMBOL_GPL(sunrpc_cache_lookup); static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch); static void cache_fresh_locked(struct cache_head *head, time_t expiry) { head->expiry_time = expiry; head->last_refresh = seconds_since_boot(); smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */ set_bit(CACHE_VALID, &head->flags); } static void cache_fresh_unlocked(struct cache_head *head, struct cache_detail *detail) { if (test_and_clear_bit(CACHE_PENDING, &head->flags)) { cache_revisit_request(head); cache_dequeue(detail, head); } } struct cache_head *sunrpc_cache_update(struct cache_detail *detail, struct cache_head *new, struct cache_head *old, int hash) { /* The 'old' entry is to be replaced by 'new'. * If 'old' is not VALID, we update it directly, * otherwise we need to replace it */ struct cache_head **head; struct cache_head *tmp; if (!test_bit(CACHE_VALID, &old->flags)) { write_lock(&detail->hash_lock); if (!test_bit(CACHE_VALID, &old->flags)) { if (test_bit(CACHE_NEGATIVE, &new->flags)) set_bit(CACHE_NEGATIVE, &old->flags); else detail->update(old, new); cache_fresh_locked(old, new->expiry_time); write_unlock(&detail->hash_lock); cache_fresh_unlocked(old, detail); return old; } write_unlock(&detail->hash_lock); } /* We need to insert a new entry */ tmp = detail->alloc(); if (!tmp) { cache_put(old, detail); return NULL; } cache_init(tmp); detail->init(tmp, old); head = &detail->hash_table[hash]; write_lock(&detail->hash_lock); if (test_bit(CACHE_NEGATIVE, &new->flags)) set_bit(CACHE_NEGATIVE, &tmp->flags); else detail->update(tmp, new); tmp->next = *head; *head = tmp; detail->entries++; cache_get(tmp); cache_fresh_locked(tmp, new->expiry_time); cache_fresh_locked(old, 0); write_unlock(&detail->hash_lock); cache_fresh_unlocked(tmp, detail); cache_fresh_unlocked(old, detail); cache_put(old, detail); return tmp; } EXPORT_SYMBOL_GPL(sunrpc_cache_update); static int cache_make_upcall(struct cache_detail *cd, struct cache_head *h) { if (!cd->cache_upcall) return -EINVAL; return cd->cache_upcall(cd, h); } static inline int cache_is_valid(struct cache_detail *detail, struct cache_head *h) { if (!test_bit(CACHE_VALID, &h->flags)) return -EAGAIN; else { /* entry is valid */ if (test_bit(CACHE_NEGATIVE, &h->flags)) return -ENOENT; else { /* * In combination with write barrier in * sunrpc_cache_update, ensures that anyone * using the cache entry after this sees the * updated contents: */ smp_rmb(); return 0; } } } static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h) { int rv; write_lock(&detail->hash_lock); rv = cache_is_valid(detail, h); if (rv != -EAGAIN) { write_unlock(&detail->hash_lock); return rv; } set_bit(CACHE_NEGATIVE, &h->flags); cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY); write_unlock(&detail->hash_lock); cache_fresh_unlocked(h, detail); return -ENOENT; } /* * This is the generic cache management routine for all * the authentication caches. * It checks the currency of a cache item and will (later) * initiate an upcall to fill it if needed. * * * Returns 0 if the cache_head can be used, or cache_puts it and returns * -EAGAIN if upcall is pending and request has been queued * -ETIMEDOUT if upcall failed or request could not be queue or * upcall completed but item is still invalid (implying that * the cache item has been replaced with a newer one). * -ENOENT if cache entry was negative */ int cache_check(struct cache_detail *detail, struct cache_head *h, struct cache_req *rqstp) { int rv; long refresh_age, age; /* First decide return status as best we can */ rv = cache_is_valid(detail, h); /* now see if we want to start an upcall */ refresh_age = (h->expiry_time - h->last_refresh); age = seconds_since_boot() - h->last_refresh; if (rqstp == NULL) { if (rv == -EAGAIN) rv = -ENOENT; } else if (rv == -EAGAIN || age > refresh_age/2) { dprintk("RPC: Want update, refage=%ld, age=%ld\n", refresh_age, age); if (!test_and_set_bit(CACHE_PENDING, &h->flags)) { switch (cache_make_upcall(detail, h)) { case -EINVAL: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); rv = try_to_negate_entry(detail, h); break; case -EAGAIN: clear_bit(CACHE_PENDING, &h->flags); cache_revisit_request(h); break; } } } if (rv == -EAGAIN) { if (!cache_defer_req(rqstp, h)) { /* * Request was not deferred; handle it as best * we can ourselves: */ rv = cache_is_valid(detail, h); if (rv == -EAGAIN) rv = -ETIMEDOUT; } } if (rv) cache_put(h, detail); return rv; } EXPORT_SYMBOL_GPL(cache_check); /* * caches need to be periodically cleaned. * For this we maintain a list of cache_detail and * a current pointer into that list and into the table * for that entry. * * Each time clean_cache is called it finds the next non-empty entry * in the current table and walks the list in that entry * looking for entries that can be removed. * * An entry gets removed if: * - The expiry is before current time * - The last_refresh time is before the flush_time for that cache * * later we might drop old entries with non-NEVER expiry if that table * is getting 'full' for some definition of 'full' * * The question of "how often to scan a table" is an interesting one * and is answered in part by the use of the "nextcheck" field in the * cache_detail. * When a scan of a table begins, the nextcheck field is set to a time * that is well into the future. * While scanning, if an expiry time is found that is earlier than the * current nextcheck time, nextcheck is set to that expiry time. * If the flush_time is ever set to a time earlier than the nextcheck * time, the nextcheck time is then set to that flush_time. * * A table is then only scanned if the current time is at least * the nextcheck time. * */ static LIST_HEAD(cache_list); static DEFINE_SPINLOCK(cache_list_lock); static struct cache_detail *current_detail; static int current_index; static void do_cache_clean(struct work_struct *work); static struct delayed_work cache_cleaner; void sunrpc_init_cache_detail(struct cache_detail *cd) { rwlock_init(&cd->hash_lock); INIT_LIST_HEAD(&cd->queue); spin_lock(&cache_list_lock); cd->nextcheck = 0; cd->entries = 0; atomic_set(&cd->readers, 0); cd->last_close = 0; cd->last_warn = -1; list_add(&cd->others, &cache_list); spin_unlock(&cache_list_lock); /* start the cleaning process */ schedule_delayed_work(&cache_cleaner, 0); } EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail); void sunrpc_destroy_cache_detail(struct cache_detail *cd) { cache_purge(cd); spin_lock(&cache_list_lock); write_lock(&cd->hash_lock); if (cd->entries || atomic_read(&cd->inuse)) { write_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); goto out; } if (current_detail == cd) current_detail = NULL; list_del_init(&cd->others); write_unlock(&cd->hash_lock); spin_unlock(&cache_list_lock); if (list_empty(&cache_list)) { /* module must be being unloaded so its safe to kill the worker */ cancel_delayed_work_sync(&cache_cleaner); } return; out: printk(KERN_ERR "nfsd: failed to unregister %s cache\n", cd->name); } EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail); /* clean cache tries to find something to clean * and cleans it. * It returns 1 if it cleaned something, * 0 if it didn't find anything this time * -1 if it fell off the end of the list. */ static int cache_clean(void) { int rv = 0; struct list_head *next; spin_lock(&cache_list_lock); /* find a suitable table if we don't already have one */ while (current_detail == NULL || current_index >= current_detail->hash_size) { if (current_detail) next = current_detail->others.next; else next = cache_list.next; if (next == &cache_list) { current_detail = NULL; spin_unlock(&cache_list_lock); return -1; } current_detail = list_entry(next, struct cache_detail, others); if (current_detail->nextcheck > seconds_since_boot()) current_index = current_detail->hash_size; else { current_index = 0; current_detail->nextcheck = seconds_since_boot()+30*60; } } /* find a non-empty bucket in the table */ while (current_detail && current_index < current_detail->hash_size && current_detail->hash_table[current_index] == NULL) current_index++; /* find a cleanable entry in the bucket and clean it, or set to next bucket */ if (current_detail && current_index < current_detail->hash_size) { struct cache_head *ch, **cp; struct cache_detail *d; write_lock(&current_detail->hash_lock); /* Ok, now to clean this strand */ cp = & current_detail->hash_table[current_index]; for (ch = *cp ; ch ; cp = & ch->next, ch = *cp) { if (current_detail->nextcheck > ch->expiry_time) current_detail->nextcheck = ch->expiry_time+1; if (!cache_is_expired(current_detail, ch)) continue; *cp = ch->next; ch->next = NULL; current_detail->entries--; rv = 1; break; } write_unlock(&current_detail->hash_lock); d = current_detail; if (!ch) current_index ++; spin_unlock(&cache_list_lock); if (ch) { if (test_and_clear_bit(CACHE_PENDING, &ch->flags)) cache_dequeue(current_detail, ch); cache_revisit_request(ch); cache_put(ch, d); } } else spin_unlock(&cache_list_lock); return rv; } /* * We want to regularly clean the cache, so we need to schedule some work ... */ static void do_cache_clean(struct work_struct *work) { int delay = 5; if (cache_clean() == -1) delay = round_jiffies_relative(30*HZ); if (list_empty(&cache_list)) delay = 0; if (delay) schedule_delayed_work(&cache_cleaner, delay); } /* * Clean all caches promptly. This just calls cache_clean * repeatedly until we are sure that every cache has had a chance to * be fully cleaned */ void cache_flush(void) { while (cache_clean() != -1) cond_resched(); while (cache_clean() != -1) cond_resched(); } EXPORT_SYMBOL_GPL(cache_flush); void cache_purge(struct cache_detail *detail) { detail->flush_time = LONG_MAX; detail->nextcheck = seconds_since_boot(); cache_flush(); detail->flush_time = 1; } EXPORT_SYMBOL_GPL(cache_purge); /* * Deferral and Revisiting of Requests. * * If a cache lookup finds a pending entry, we * need to defer the request and revisit it later. * All deferred requests are stored in a hash table, * indexed by "struct cache_head *". * As it may be wasteful to store a whole request * structure, we allow the request to provide a * deferred form, which must contain a * 'struct cache_deferred_req' * This cache_deferred_req contains a method to allow * it to be revisited when cache info is available */ #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head)) #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE) #define DFR_MAX 300 /* ??? */ static DEFINE_SPINLOCK(cache_defer_lock); static LIST_HEAD(cache_defer_list); static struct hlist_head cache_defer_hash[DFR_HASHSIZE]; static int cache_defer_cnt; static void __unhash_deferred_req(struct cache_deferred_req *dreq) { hlist_del_init(&dreq->hash); if (!list_empty(&dreq->recent)) { list_del_init(&dreq->recent); cache_defer_cnt--; } } static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item) { int hash = DFR_HASH(item); INIT_LIST_HEAD(&dreq->recent); hlist_add_head(&dreq->hash, &cache_defer_hash[hash]); } static void setup_deferral(struct cache_deferred_req *dreq, struct cache_head *item, int count_me) { dreq->item = item; spin_lock(&cache_defer_lock); __hash_deferred_req(dreq, item); if (count_me) { cache_defer_cnt++; list_add(&dreq->recent, &cache_defer_list); } spin_unlock(&cache_defer_lock); } struct thread_deferred_req { struct cache_deferred_req handle; struct completion completion; }; static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many) { struct thread_deferred_req *dr = container_of(dreq, struct thread_deferred_req, handle); complete(&dr->completion); } static void cache_wait_req(struct cache_req *req, struct cache_head *item) { struct thread_deferred_req sleeper; struct cache_deferred_req *dreq = &sleeper.handle; sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion); dreq->revisit = cache_restart_thread; setup_deferral(dreq, item, 0); if (!test_bit(CACHE_PENDING, &item->flags) || wait_for_completion_interruptible_timeout( &sleeper.completion, req->thread_wait) <= 0) { /* The completion wasn't completed, so we need * to clean up */ spin_lock(&cache_defer_lock); if (!hlist_unhashed(&sleeper.handle.hash)) { __unhash_deferred_req(&sleeper.handle); spin_unlock(&cache_defer_lock); } else { /* cache_revisit_request already removed * this from the hash table, but hasn't * called ->revisit yet. It will very soon * and we need to wait for it. */ spin_unlock(&cache_defer_lock); wait_for_completion(&sleeper.completion); } } } static void cache_limit_defers(void) { /* Make sure we haven't exceed the limit of allowed deferred * requests. */ struct cache_deferred_req *discard = NULL; if (cache_defer_cnt <= DFR_MAX) return; spin_lock(&cache_defer_lock); /* Consider removing either the first or the last */ if (cache_defer_cnt > DFR_MAX) { if (net_random() & 1) discard = list_entry(cache_defer_list.next, struct cache_deferred_req, recent); else discard = list_entry(cache_defer_list.prev, struct cache_deferred_req, recent); __unhash_deferred_req(discard); } spin_unlock(&cache_defer_lock); if (discard) discard->revisit(discard, 1); } /* Return true if and only if a deferred request is queued. */ static bool cache_defer_req(struct cache_req *req, struct cache_head *item) { struct cache_deferred_req *dreq; if (req->thread_wait) { cache_wait_req(req, item); if (!test_bit(CACHE_PENDING, &item->flags)) return false; } dreq = req->defer(req); if (dreq == NULL) return false; setup_deferral(dreq, item, 1); if (!test_bit(CACHE_PENDING, &item->flags)) /* Bit could have been cleared before we managed to * set up the deferral, so need to revisit just in case */ cache_revisit_request(item); cache_limit_defers(); return true; } static void cache_revisit_request(struct cache_head *item) { struct cache_deferred_req *dreq; struct list_head pending; struct hlist_node *lp, *tmp; int hash = DFR_HASH(item); INIT_LIST_HEAD(&pending); spin_lock(&cache_defer_lock); hlist_for_each_entry_safe(dreq, lp, tmp, &cache_defer_hash[hash], hash) if (dreq->item == item) { __unhash_deferred_req(dreq); list_add(&dreq->recent, &pending); } spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 0); } } void cache_clean_deferred(void *owner) { struct cache_deferred_req *dreq, *tmp; struct list_head pending; INIT_LIST_HEAD(&pending); spin_lock(&cache_defer_lock); list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) { if (dreq->owner == owner) { __unhash_deferred_req(dreq); list_add(&dreq->recent, &pending); } } spin_unlock(&cache_defer_lock); while (!list_empty(&pending)) { dreq = list_entry(pending.next, struct cache_deferred_req, recent); list_del_init(&dreq->recent); dreq->revisit(dreq, 1); } } /* * communicate with user-space * * We have a magic /proc file - /proc/sunrpc/<cachename>/channel. * On read, you get a full request, or block. * On write, an update request is processed. * Poll works if anything to read, and always allows write. * * Implemented by linked list of requests. Each open file has * a ->private that also exists in this list. New requests are added * to the end and may wakeup and preceding readers. * New readers are added to the head. If, on read, an item is found with * CACHE_UPCALLING clear, we free it from the list. * */ static DEFINE_SPINLOCK(queue_lock); static DEFINE_MUTEX(queue_io_mutex); struct cache_queue { struct list_head list; int reader; /* if 0, then request */ }; struct cache_request { struct cache_queue q; struct cache_head *item; char * buf; int len; int readers; }; struct cache_reader { struct cache_queue q; int offset; /* if non-0, we have a refcnt on next request */ }; static ssize_t cache_read(struct file *filp, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { struct cache_reader *rp = filp->private_data; struct cache_request *rq; struct inode *inode = filp->f_path.dentry->d_inode; int err; if (count == 0) return 0; mutex_lock(&inode->i_mutex); /* protect against multiple concurrent * readers on this file */ again: spin_lock(&queue_lock); /* need to find next request */ while (rp->q.list.next != &cd->queue && list_entry(rp->q.list.next, struct cache_queue, list) ->reader) { struct list_head *next = rp->q.list.next; list_move(&rp->q.list, next); } if (rp->q.list.next == &cd->queue) { spin_unlock(&queue_lock); mutex_unlock(&inode->i_mutex); BUG_ON(rp->offset); return 0; } rq = container_of(rp->q.list.next, struct cache_request, q.list); BUG_ON(rq->q.reader); if (rp->offset == 0) rq->readers++; spin_unlock(&queue_lock); if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { err = -EAGAIN; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } else { if (rp->offset + count > rq->len) count = rq->len - rp->offset; err = -EFAULT; if (copy_to_user(buf, rq->buf + rp->offset, count)) goto out; rp->offset += count; if (rp->offset >= rq->len) { rp->offset = 0; spin_lock(&queue_lock); list_move(&rp->q.list, &rq->q.list); spin_unlock(&queue_lock); } err = 0; } out: if (rp->offset == 0) { /* need to release rq */ spin_lock(&queue_lock); rq->readers--; if (rq->readers == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) { list_del(&rq->q.list); spin_unlock(&queue_lock); cache_put(rq->item, cd); kfree(rq->buf); kfree(rq); } else spin_unlock(&queue_lock); } if (err == -EAGAIN) goto again; mutex_unlock(&inode->i_mutex); return err ? err : count; } static ssize_t cache_do_downcall(char *kaddr, const char __user *buf, size_t count, struct cache_detail *cd) { ssize_t ret; if (count == 0) return -EINVAL; if (copy_from_user(kaddr, buf, count)) return -EFAULT; kaddr[count] = '\0'; ret = cd->cache_parse(cd, kaddr, count); if (!ret) ret = count; return ret; } static ssize_t cache_slow_downcall(const char __user *buf, size_t count, struct cache_detail *cd) { static char write_buf[8192]; /* protected by queue_io_mutex */ ssize_t ret = -EINVAL; if (count >= sizeof(write_buf)) goto out; mutex_lock(&queue_io_mutex); ret = cache_do_downcall(write_buf, buf, count, cd); mutex_unlock(&queue_io_mutex); out: return ret; } static ssize_t cache_downcall(struct address_space *mapping, const char __user *buf, size_t count, struct cache_detail *cd) { struct page *page; char *kaddr; ssize_t ret = -ENOMEM; if (count >= PAGE_CACHE_SIZE) goto out_slow; page = find_or_create_page(mapping, 0, GFP_KERNEL); if (!page) goto out_slow; kaddr = kmap(page); ret = cache_do_downcall(kaddr, buf, count, cd); kunmap(page); unlock_page(page); page_cache_release(page); return ret; out_slow: return cache_slow_downcall(buf, count, cd); } static ssize_t cache_write(struct file *filp, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { struct address_space *mapping = filp->f_mapping; struct inode *inode = filp->f_path.dentry->d_inode; ssize_t ret = -EINVAL; if (!cd->cache_parse) goto out; mutex_lock(&inode->i_mutex); ret = cache_downcall(mapping, buf, count, cd); mutex_unlock(&inode->i_mutex); out: return ret; } static DECLARE_WAIT_QUEUE_HEAD(queue_wait); static unsigned int cache_poll(struct file *filp, poll_table *wait, struct cache_detail *cd) { unsigned int mask; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; poll_wait(filp, &queue_wait, wait); /* alway allow write */ mask = POLL_OUT | POLLWRNORM; if (!rp) return mask; spin_lock(&queue_lock); for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { mask |= POLLIN | POLLRDNORM; break; } spin_unlock(&queue_lock); return mask; } static int cache_ioctl(struct inode *ino, struct file *filp, unsigned int cmd, unsigned long arg, struct cache_detail *cd) { int len = 0; struct cache_reader *rp = filp->private_data; struct cache_queue *cq; if (cmd != FIONREAD || !rp) return -EINVAL; spin_lock(&queue_lock); /* only find the length remaining in current request, * or the length of the next request */ for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { struct cache_request *cr = container_of(cq, struct cache_request, q); len = cr->len - rp->offset; break; } spin_unlock(&queue_lock); return put_user(len, (int __user *)arg); } static int cache_open(struct inode *inode, struct file *filp, struct cache_detail *cd) { struct cache_reader *rp = NULL; if (!cd || !try_module_get(cd->owner)) return -EACCES; nonseekable_open(inode, filp); if (filp->f_mode & FMODE_READ) { rp = kmalloc(sizeof(*rp), GFP_KERNEL); if (!rp) return -ENOMEM; rp->offset = 0; rp->q.reader = 1; atomic_inc(&cd->readers); spin_lock(&queue_lock); list_add(&rp->q.list, &cd->queue); spin_unlock(&queue_lock); } filp->private_data = rp; return 0; } static int cache_release(struct inode *inode, struct file *filp, struct cache_detail *cd) { struct cache_reader *rp = filp->private_data; if (rp) { spin_lock(&queue_lock); if (rp->offset) { struct cache_queue *cq; for (cq= &rp->q; &cq->list != &cd->queue; cq = list_entry(cq->list.next, struct cache_queue, list)) if (!cq->reader) { container_of(cq, struct cache_request, q) ->readers--; break; } rp->offset = 0; } list_del(&rp->q.list); spin_unlock(&queue_lock); filp->private_data = NULL; kfree(rp); cd->last_close = seconds_since_boot(); atomic_dec(&cd->readers); } module_put(cd->owner); return 0; } static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch) { struct cache_queue *cq; spin_lock(&queue_lock); list_for_each_entry(cq, &detail->queue, list) if (!cq->reader) { struct cache_request *cr = container_of(cq, struct cache_request, q); if (cr->item != ch) continue; if (cr->readers != 0) continue; list_del(&cr->q.list); spin_unlock(&queue_lock); cache_put(cr->item, detail); kfree(cr->buf); kfree(cr); return; } spin_unlock(&queue_lock); } /* * Support routines for text-based upcalls. * Fields are separated by spaces. * Fields are either mangled to quote space tab newline slosh with slosh * or a hexified with a leading \x * Record is terminated with newline. * */ void qword_add(char **bpp, int *lp, char *str) { char *bp = *bpp; int len = *lp; char c; if (len < 0) return; while ((c=*str++) && len) switch(c) { case ' ': case '\t': case '\n': case '\\': if (len >= 4) { *bp++ = '\\'; *bp++ = '0' + ((c & 0300)>>6); *bp++ = '0' + ((c & 0070)>>3); *bp++ = '0' + ((c & 0007)>>0); } len -= 4; break; default: *bp++ = c; len--; } if (c || len <1) len = -1; else { *bp++ = ' '; len--; } *bpp = bp; *lp = len; } EXPORT_SYMBOL_GPL(qword_add); void qword_addhex(char **bpp, int *lp, char *buf, int blen) { char *bp = *bpp; int len = *lp; if (len < 0) return; if (len > 2) { *bp++ = '\\'; *bp++ = 'x'; len -= 2; while (blen && len >= 2) { unsigned char c = *buf++; *bp++ = '0' + ((c&0xf0)>>4) + (c>=0xa0)*('a'-'9'-1); *bp++ = '0' + (c&0x0f) + ((c&0x0f)>=0x0a)*('a'-'9'-1); len -= 2; blen--; } } if (blen || len<1) len = -1; else { *bp++ = ' '; len--; } *bpp = bp; *lp = len; } EXPORT_SYMBOL_GPL(qword_addhex); static void warn_no_listener(struct cache_detail *detail) { if (detail->last_warn != detail->last_close) { detail->last_warn = detail->last_close; if (detail->warn_no_listener) detail->warn_no_listener(detail, detail->last_close != 0); } } static bool cache_listeners_exist(struct cache_detail *detail) { if (atomic_read(&detail->readers)) return true; if (detail->last_close == 0) /* This cache was never opened */ return false; if (detail->last_close < seconds_since_boot() - 30) /* * We allow for the possibility that someone might * restart a userspace daemon without restarting the * server; but after 30 seconds, we give up. */ return false; return true; } /* * register an upcall request to user-space and queue it up for read() by the * upcall daemon. * * Each request is at most one page long. */ int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h, void (*cache_request)(struct cache_detail *, struct cache_head *, char **, int *)) { char *buf; struct cache_request *crq; char *bp; int len; if (!cache_listeners_exist(detail)) { warn_no_listener(detail); return -EINVAL; } buf = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!buf) return -EAGAIN; crq = kmalloc(sizeof (*crq), GFP_KERNEL); if (!crq) { kfree(buf); return -EAGAIN; } bp = buf; len = PAGE_SIZE; cache_request(detail, h, &bp, &len); if (len < 0) { kfree(buf); kfree(crq); return -EAGAIN; } crq->q.reader = 0; crq->item = cache_get(h); crq->buf = buf; crq->len = PAGE_SIZE - len; crq->readers = 0; spin_lock(&queue_lock); list_add_tail(&crq->q.list, &detail->queue); spin_unlock(&queue_lock); wake_up(&queue_wait); return 0; } EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall); /* * parse a message from user-space and pass it * to an appropriate cache * Messages are, like requests, separated into fields by * spaces and dequotes as \xHEXSTRING or embedded \nnn octal * * Message is * reply cachename expiry key ... content.... * * key and content are both parsed by cache */ #define isodigit(c) (isdigit(c) && c <= '7') int qword_get(char **bpp, char *dest, int bufsize) { /* return bytes copied, or -1 on error */ char *bp = *bpp; int len = 0; while (*bp == ' ') bp++; if (bp[0] == '\\' && bp[1] == 'x') { /* HEX STRING */ bp += 2; while (len < bufsize) { int h, l; h = hex_to_bin(bp[0]); if (h < 0) break; l = hex_to_bin(bp[1]); if (l < 0) break; *dest++ = (h << 4) | l; bp += 2; len++; } } else { /* text with \nnn octal quoting */ while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) { if (*bp == '\\' && isodigit(bp[1]) && (bp[1] <= '3') && isodigit(bp[2]) && isodigit(bp[3])) { int byte = (*++bp -'0'); bp++; byte = (byte << 3) | (*bp++ - '0'); byte = (byte << 3) | (*bp++ - '0'); *dest++ = byte; len++; } else { *dest++ = *bp++; len++; } } } if (*bp != ' ' && *bp != '\n' && *bp != '\0') return -1; while (*bp == ' ') bp++; *bpp = bp; *dest = '\0'; return len; } EXPORT_SYMBOL_GPL(qword_get); /* * support /proc/sunrpc/cache/$CACHENAME/content * as a seqfile. * We call ->cache_show passing NULL for the item to * get a header, then pass each real item in the cache */ struct handle { struct cache_detail *cd; }; static void *c_start(struct seq_file *m, loff_t *pos) __acquires(cd->hash_lock) { loff_t n = *pos; unsigned hash, entry; struct cache_head *ch; struct cache_detail *cd = ((struct handle*)m->private)->cd; read_lock(&cd->hash_lock); if (!n--) return SEQ_START_TOKEN; hash = n >> 32; entry = n & ((1LL<<32) - 1); for (ch=cd->hash_table[hash]; ch; ch=ch->next) if (!entry--) return ch; n &= ~((1LL<<32) - 1); do { hash++; n += 1LL<<32; } while(hash < cd->hash_size && cd->hash_table[hash]==NULL); if (hash >= cd->hash_size) return NULL; *pos = n+1; return cd->hash_table[hash]; } static void *c_next(struct seq_file *m, void *p, loff_t *pos) { struct cache_head *ch = p; int hash = (*pos >> 32); struct cache_detail *cd = ((struct handle*)m->private)->cd; if (p == SEQ_START_TOKEN) hash = 0; else if (ch->next == NULL) { hash++; *pos += 1LL<<32; } else { ++*pos; return ch->next; } *pos &= ~((1LL<<32) - 1); while (hash < cd->hash_size && cd->hash_table[hash] == NULL) { hash++; *pos += 1LL<<32; } if (hash >= cd->hash_size) return NULL; ++*pos; return cd->hash_table[hash]; } static void c_stop(struct seq_file *m, void *p) __releases(cd->hash_lock) { struct cache_detail *cd = ((struct handle*)m->private)->cd; read_unlock(&cd->hash_lock); } static int c_show(struct seq_file *m, void *p) { struct cache_head *cp = p; struct cache_detail *cd = ((struct handle*)m->private)->cd; if (p == SEQ_START_TOKEN) return cd->cache_show(m, cd, NULL); ifdebug(CACHE) seq_printf(m, "# expiry=%ld refcnt=%d flags=%lx\n", convert_to_wallclock(cp->expiry_time), atomic_read(&cp->ref.refcount), cp->flags); cache_get(cp); if (cache_check(cd, cp, NULL)) /* cache_check does a cache_put on failure */ seq_printf(m, "# "); else cache_put(cp, cd); return cd->cache_show(m, cd, cp); } static const struct seq_operations cache_content_op = { .start = c_start, .next = c_next, .stop = c_stop, .show = c_show, }; static int content_open(struct inode *inode, struct file *file, struct cache_detail *cd) { struct handle *han; if (!cd || !try_module_get(cd->owner)) return -EACCES; han = __seq_open_private(file, &cache_content_op, sizeof(*han)); if (han == NULL) { module_put(cd->owner); return -ENOMEM; } han->cd = cd; return 0; } static int content_release(struct inode *inode, struct file *file, struct cache_detail *cd) { int ret = seq_release_private(inode, file); module_put(cd->owner); return ret; } static int open_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { if (!cd || !try_module_get(cd->owner)) return -EACCES; return nonseekable_open(inode, file); } static int release_flush(struct inode *inode, struct file *file, struct cache_detail *cd) { module_put(cd->owner); return 0; } static ssize_t read_flush(struct file *file, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { char tbuf[20]; unsigned long p = *ppos; size_t len; sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); len = strlen(tbuf); if (p >= len) return 0; len -= p; if (len > count) len = count; if (copy_to_user(buf, (void*)(tbuf+p), len)) return -EFAULT; *ppos += len; return len; } static ssize_t write_flush(struct file *file, const char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { char tbuf[20]; char *bp, *ep; if (*ppos || count > sizeof(tbuf)-1) return -EINVAL; if (copy_from_user(tbuf, buf, count)) return -EFAULT; tbuf[count] = 0; simple_strtoul(tbuf, &ep, 0); if (*ep && *ep != '\n') return -EINVAL; bp = tbuf; cd->flush_time = get_expiry(&bp); cd->nextcheck = seconds_since_boot(); cache_flush(); *ppos += count; return count; } static ssize_t cache_read_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; return cache_write(filp, buf, count, ppos, cd); } static unsigned int cache_poll_procfs(struct file *filp, poll_table *wait) { struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; return cache_poll(filp, wait, cd); } static long cache_ioctl_procfs(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_path.dentry->d_inode; struct cache_detail *cd = PDE(inode)->data; return cache_ioctl(inode, filp, cmd, arg, cd); } static int cache_open_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return cache_open(inode, filp, cd); } static int cache_release_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return cache_release(inode, filp, cd); } static const struct file_operations cache_file_operations_procfs = { .owner = THIS_MODULE, .llseek = no_llseek, .read = cache_read_procfs, .write = cache_write_procfs, .poll = cache_poll_procfs, .unlocked_ioctl = cache_ioctl_procfs, /* for FIONREAD */ .open = cache_open_procfs, .release = cache_release_procfs, }; static int content_open_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return content_open(inode, filp, cd); } static int content_release_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return content_release(inode, filp, cd); } static const struct file_operations content_file_operations_procfs = { .open = content_open_procfs, .read = seq_read, .llseek = seq_lseek, .release = content_release_procfs, }; static int open_flush_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return open_flush(inode, filp, cd); } static int release_flush_procfs(struct inode *inode, struct file *filp) { struct cache_detail *cd = PDE(inode)->data; return release_flush(inode, filp, cd); } static ssize_t read_flush_procfs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_procfs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = PDE(filp->f_path.dentry->d_inode)->data; return write_flush(filp, buf, count, ppos, cd); } static const struct file_operations cache_flush_operations_procfs = { .open = open_flush_procfs, .read = read_flush_procfs, .write = write_flush_procfs, .release = release_flush_procfs, .llseek = no_llseek, }; static void remove_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct sunrpc_net *sn; if (cd->u.procfs.proc_ent == NULL) return; if (cd->u.procfs.flush_ent) remove_proc_entry("flush", cd->u.procfs.proc_ent); if (cd->u.procfs.channel_ent) remove_proc_entry("channel", cd->u.procfs.proc_ent); if (cd->u.procfs.content_ent) remove_proc_entry("content", cd->u.procfs.proc_ent); cd->u.procfs.proc_ent = NULL; sn = net_generic(net, sunrpc_net_id); remove_proc_entry(cd->name, sn->proc_net_rpc); } #ifdef CONFIG_PROC_FS static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { struct proc_dir_entry *p; struct sunrpc_net *sn; sn = net_generic(net, sunrpc_net_id); cd->u.procfs.proc_ent = proc_mkdir(cd->name, sn->proc_net_rpc); if (cd->u.procfs.proc_ent == NULL) goto out_nomem; cd->u.procfs.channel_ent = NULL; cd->u.procfs.content_ent = NULL; p = proc_create_data("flush", S_IFREG|S_IRUSR|S_IWUSR, cd->u.procfs.proc_ent, &cache_flush_operations_procfs, cd); cd->u.procfs.flush_ent = p; if (p == NULL) goto out_nomem; if (cd->cache_upcall || cd->cache_parse) { p = proc_create_data("channel", S_IFREG|S_IRUSR|S_IWUSR, cd->u.procfs.proc_ent, &cache_file_operations_procfs, cd); cd->u.procfs.channel_ent = p; if (p == NULL) goto out_nomem; } if (cd->cache_show) { p = proc_create_data("content", S_IFREG|S_IRUSR|S_IWUSR, cd->u.procfs.proc_ent, &content_file_operations_procfs, cd); cd->u.procfs.content_ent = p; if (p == NULL) goto out_nomem; } return 0; out_nomem: remove_cache_proc_entries(cd, net); return -ENOMEM; } #else /* CONFIG_PROC_FS */ static int create_cache_proc_entries(struct cache_detail *cd, struct net *net) { return 0; } #endif void __init cache_initialize(void) { INIT_DELAYED_WORK_DEFERRABLE(&cache_cleaner, do_cache_clean); } int cache_register_net(struct cache_detail *cd, struct net *net) { int ret; sunrpc_init_cache_detail(cd); ret = create_cache_proc_entries(cd, net); if (ret) sunrpc_destroy_cache_detail(cd); return ret; } EXPORT_SYMBOL_GPL(cache_register_net); void cache_unregister_net(struct cache_detail *cd, struct net *net) { remove_cache_proc_entries(cd, net); sunrpc_destroy_cache_detail(cd); } EXPORT_SYMBOL_GPL(cache_unregister_net); struct cache_detail *cache_create_net(struct cache_detail *tmpl, struct net *net) { struct cache_detail *cd; cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL); if (cd == NULL) return ERR_PTR(-ENOMEM); cd->hash_table = kzalloc(cd->hash_size * sizeof(struct cache_head *), GFP_KERNEL); if (cd->hash_table == NULL) { kfree(cd); return ERR_PTR(-ENOMEM); } cd->net = net; return cd; } EXPORT_SYMBOL_GPL(cache_create_net); void cache_destroy_net(struct cache_detail *cd, struct net *net) { kfree(cd->hash_table); kfree(cd); } EXPORT_SYMBOL_GPL(cache_destroy_net); static ssize_t cache_read_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; return cache_read(filp, buf, count, ppos, cd); } static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; return cache_write(filp, buf, count, ppos, cd); } static unsigned int cache_poll_pipefs(struct file *filp, poll_table *wait) { struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; return cache_poll(filp, wait, cd); } static long cache_ioctl_pipefs(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct cache_detail *cd = RPC_I(inode)->private; return cache_ioctl(inode, filp, cmd, arg, cd); } static int cache_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_open(inode, filp, cd); } static int cache_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return cache_release(inode, filp, cd); } const struct file_operations cache_file_operations_pipefs = { .owner = THIS_MODULE, .llseek = no_llseek, .read = cache_read_pipefs, .write = cache_write_pipefs, .poll = cache_poll_pipefs, .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */ .open = cache_open_pipefs, .release = cache_release_pipefs, }; static int content_open_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_open(inode, filp, cd); } static int content_release_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return content_release(inode, filp, cd); } const struct file_operations content_file_operations_pipefs = { .open = content_open_pipefs, .read = seq_read, .llseek = seq_lseek, .release = content_release_pipefs, }; static int open_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return open_flush(inode, filp, cd); } static int release_flush_pipefs(struct inode *inode, struct file *filp) { struct cache_detail *cd = RPC_I(inode)->private; return release_flush(inode, filp, cd); } static ssize_t read_flush_pipefs(struct file *filp, char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; return read_flush(filp, buf, count, ppos, cd); } static ssize_t write_flush_pipefs(struct file *filp, const char __user *buf, size_t count, loff_t *ppos) { struct cache_detail *cd = RPC_I(filp->f_path.dentry->d_inode)->private; return write_flush(filp, buf, count, ppos, cd); } const struct file_operations cache_flush_operations_pipefs = { .open = open_flush_pipefs, .read = read_flush_pipefs, .write = write_flush_pipefs, .release = release_flush_pipefs, .llseek = no_llseek, }; int sunrpc_cache_register_pipefs(struct dentry *parent, const char *name, umode_t umode, struct cache_detail *cd) { struct qstr q; struct dentry *dir; int ret = 0; q.name = name; q.len = strlen(name); q.hash = full_name_hash(q.name, q.len); dir = rpc_create_cache_dir(parent, &q, umode, cd); if (!IS_ERR(dir)) cd->u.pipefs.dir = dir; else ret = PTR_ERR(dir); return ret; } EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs); void sunrpc_cache_unregister_pipefs(struct cache_detail *cd) { rpc_remove_cache_dir(cd->u.pipefs.dir); cd->u.pipefs.dir = NULL; } EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
gpl-2.0
AndroidDeveloperAlliance/ZenKernel_Crespo
drivers/ide/ide-dma-sff.c
4637
9056
#include <linux/types.h> #include <linux/kernel.h> #include <linux/ide.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/io.h> /** * config_drive_for_dma - attempt to activate IDE DMA * @drive: the drive to place in DMA mode * * If the drive supports at least mode 2 DMA or UDMA of any kind * then attempt to place it into DMA mode. Drives that are known to * support DMA but predate the DMA properties or that are known * to have DMA handling bugs are also set up appropriately based * on the good/bad drive lists. */ int config_drive_for_dma(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u16 *id = drive->id; if (drive->media != ide_disk) { if (hwif->host_flags & IDE_HFLAG_NO_ATAPI_DMA) return 0; } /* * Enable DMA on any drive that has * UltraDMA (mode 0/1/2/3/4/5/6) enabled */ if ((id[ATA_ID_FIELD_VALID] & 4) && ((id[ATA_ID_UDMA_MODES] >> 8) & 0x7f)) return 1; /* * Enable DMA on any drive that has mode2 DMA * (multi or single) enabled */ if ((id[ATA_ID_MWDMA_MODES] & 0x404) == 0x404 || (id[ATA_ID_SWDMA_MODES] & 0x404) == 0x404) return 1; /* Consult the list of known "good" drives */ if (ide_dma_good_drive(drive)) return 1; return 0; } u8 ide_dma_sff_read_status(ide_hwif_t *hwif) { unsigned long addr = hwif->dma_base + ATA_DMA_STATUS; if (hwif->host_flags & IDE_HFLAG_MMIO) return readb((void __iomem *)addr); else return inb(addr); } EXPORT_SYMBOL_GPL(ide_dma_sff_read_status); static void ide_dma_sff_write_status(ide_hwif_t *hwif, u8 val) { unsigned long addr = hwif->dma_base + ATA_DMA_STATUS; if (hwif->host_flags & IDE_HFLAG_MMIO) writeb(val, (void __iomem *)addr); else outb(val, addr); } /** * ide_dma_host_set - Enable/disable DMA on a host * @drive: drive to control * * Enable/disable DMA on an IDE controller following generic * bus-mastering IDE controller behaviour. */ void ide_dma_host_set(ide_drive_t *drive, int on) { ide_hwif_t *hwif = drive->hwif; u8 unit = drive->dn & 1; u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); if (on) dma_stat |= (1 << (5 + unit)); else dma_stat &= ~(1 << (5 + unit)); ide_dma_sff_write_status(hwif, dma_stat); } EXPORT_SYMBOL_GPL(ide_dma_host_set); /** * ide_build_dmatable - build IDE DMA table * * ide_build_dmatable() prepares a dma request. We map the command * to get the pci bus addresses of the buffers and then build up * the PRD table that the IDE layer wants to be fed. * * Most chipsets correctly interpret a length of 0x0000 as 64KB, * but at least one (e.g. CS5530) misinterprets it as zero (!). * So we break the 64KB entry into two 32KB entries instead. * * Returns the number of built PRD entries if all went okay, * returns 0 otherwise. * * May also be invoked from trm290.c */ int ide_build_dmatable(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; __le32 *table = (__le32 *)hwif->dmatable_cpu; unsigned int count = 0; int i; struct scatterlist *sg; u8 is_trm290 = !!(hwif->host_flags & IDE_HFLAG_TRM290); for_each_sg(hwif->sg_table, sg, cmd->sg_nents, i) { u32 cur_addr, cur_len, xcount, bcount; cur_addr = sg_dma_address(sg); cur_len = sg_dma_len(sg); /* * Fill in the dma table, without crossing any 64kB boundaries. * Most hardware requires 16-bit alignment of all blocks, * but the trm290 requires 32-bit alignment. */ while (cur_len) { if (count++ >= PRD_ENTRIES) goto use_pio_instead; bcount = 0x10000 - (cur_addr & 0xffff); if (bcount > cur_len) bcount = cur_len; *table++ = cpu_to_le32(cur_addr); xcount = bcount & 0xffff; if (is_trm290) xcount = ((xcount >> 2) - 1) << 16; else if (xcount == 0x0000) { if (count++ >= PRD_ENTRIES) goto use_pio_instead; *table++ = cpu_to_le32(0x8000); *table++ = cpu_to_le32(cur_addr + 0x8000); xcount = 0x8000; } *table++ = cpu_to_le32(xcount); cur_addr += bcount; cur_len -= bcount; } } if (count) { if (!is_trm290) *--table |= cpu_to_le32(0x80000000); return count; } use_pio_instead: printk(KERN_ERR "%s: %s\n", drive->name, count ? "DMA table too small" : "empty DMA table?"); return 0; /* revert to PIO for this request */ } EXPORT_SYMBOL_GPL(ide_build_dmatable); /** * ide_dma_setup - begin a DMA phase * @drive: target device * @cmd: command * * Build an IDE DMA PRD (IDE speak for scatter gather table) * and then set up the DMA transfer registers for a device * that follows generic IDE PCI DMA behaviour. Controllers can * override this function if they need to * * Returns 0 on success. If a PIO fallback is required then 1 * is returned. */ int ide_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd) { ide_hwif_t *hwif = drive->hwif; u8 mmio = (hwif->host_flags & IDE_HFLAG_MMIO) ? 1 : 0; u8 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR; u8 dma_stat; /* fall back to pio! */ if (ide_build_dmatable(drive, cmd) == 0) { ide_map_sg(drive, cmd); return 1; } /* PRD table */ if (mmio) writel(hwif->dmatable_dma, (void __iomem *)(hwif->dma_base + ATA_DMA_TABLE_OFS)); else outl(hwif->dmatable_dma, hwif->dma_base + ATA_DMA_TABLE_OFS); /* specify r/w */ if (mmio) writeb(rw, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); else outb(rw, hwif->dma_base + ATA_DMA_CMD); /* read DMA status for INTR & ERROR flags */ dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); /* clear INTR & ERROR flags */ ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); return 0; } EXPORT_SYMBOL_GPL(ide_dma_setup); /** * ide_dma_sff_timer_expiry - handle a DMA timeout * @drive: Drive that timed out * * An IDE DMA transfer timed out. In the event of an error we ask * the driver to resolve the problem, if a DMA transfer is still * in progress we continue to wait (arguably we need to add a * secondary 'I don't care what the drive thinks' timeout here) * Finally if we have an interrupt we let it complete the I/O. * But only one time - we clear expiry and if it's still not * completed after WAIT_CMD, we error and retry in PIO. * This can occur if an interrupt is lost or due to hang or bugs. */ int ide_dma_sff_timer_expiry(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); printk(KERN_WARNING "%s: %s: DMA status (0x%02x)\n", drive->name, __func__, dma_stat); if ((dma_stat & 0x18) == 0x18) /* BUSY Stupid Early Timer !! */ return WAIT_CMD; hwif->expiry = NULL; /* one free ride for now */ if (dma_stat & ATA_DMA_ERR) /* ERROR */ return -1; if (dma_stat & ATA_DMA_ACTIVE) /* DMAing */ return WAIT_CMD; if (dma_stat & ATA_DMA_INTR) /* Got an Interrupt */ return WAIT_CMD; return 0; /* Status is unknown -- reset the bus */ } EXPORT_SYMBOL_GPL(ide_dma_sff_timer_expiry); void ide_dma_start(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_cmd; /* Note that this is done *after* the cmd has * been issued to the drive, as per the BM-IDE spec. * The Promise Ultra33 doesn't work correctly when * we do this part before issuing the drive cmd. */ if (hwif->host_flags & IDE_HFLAG_MMIO) { dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); writeb(dma_cmd | ATA_DMA_START, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); } else { dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd | ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); } } EXPORT_SYMBOL_GPL(ide_dma_start); /* returns 1 on error, 0 otherwise */ int ide_dma_end(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = 0, dma_cmd = 0; /* stop DMA */ if (hwif->host_flags & IDE_HFLAG_MMIO) { dma_cmd = readb((void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); writeb(dma_cmd & ~ATA_DMA_START, (void __iomem *)(hwif->dma_base + ATA_DMA_CMD)); } else { dma_cmd = inb(hwif->dma_base + ATA_DMA_CMD); outb(dma_cmd & ~ATA_DMA_START, hwif->dma_base + ATA_DMA_CMD); } /* get DMA status */ dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); /* clear INTR & ERROR bits */ ide_dma_sff_write_status(hwif, dma_stat | ATA_DMA_ERR | ATA_DMA_INTR); #define CHECK_DMA_MASK (ATA_DMA_ACTIVE | ATA_DMA_ERR | ATA_DMA_INTR) /* verify good DMA status */ if ((dma_stat & CHECK_DMA_MASK) != ATA_DMA_INTR) return 0x10 | dma_stat; return 0; } EXPORT_SYMBOL_GPL(ide_dma_end); /* returns 1 if dma irq issued, 0 otherwise */ int ide_dma_test_irq(ide_drive_t *drive) { ide_hwif_t *hwif = drive->hwif; u8 dma_stat = hwif->dma_ops->dma_sff_read_status(hwif); return (dma_stat & ATA_DMA_INTR) ? 1 : 0; } EXPORT_SYMBOL_GPL(ide_dma_test_irq); const struct ide_dma_ops sff_dma_ops = { .dma_host_set = ide_dma_host_set, .dma_setup = ide_dma_setup, .dma_start = ide_dma_start, .dma_end = ide_dma_end, .dma_test_irq = ide_dma_test_irq, .dma_lost_irq = ide_dma_lost_irq, .dma_timer_expiry = ide_dma_sff_timer_expiry, .dma_sff_read_status = ide_dma_sff_read_status, }; EXPORT_SYMBOL_GPL(sff_dma_ops);
gpl-2.0
LEPT-Development/android_kernel_lge_f6mt
sound/soc/codecs/ad193x.c
4893
13885
/* * AD193X Audio Codec driver supporting AD1936/7/8/9 * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/device.h> #include <linux/i2c.h> #include <linux/spi/spi.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <sound/tlv.h> #include "ad193x.h" /* codec private data */ struct ad193x_priv { struct regmap *regmap; int sysclk; }; /* * AD193X volume/mute/de-emphasis etc. controls */ static const char * const ad193x_deemp[] = {"None", "48kHz", "44.1kHz", "32kHz"}; static const struct soc_enum ad193x_deemp_enum = SOC_ENUM_SINGLE(AD193X_DAC_CTRL2, 1, 4, ad193x_deemp); static const DECLARE_TLV_DB_MINMAX(adau193x_tlv, -9563, 0); static const struct snd_kcontrol_new ad193x_snd_controls[] = { /* DAC volume control */ SOC_DOUBLE_R_TLV("DAC1 Volume", AD193X_DAC_L1_VOL, AD193X_DAC_R1_VOL, 0, 0xFF, 1, adau193x_tlv), SOC_DOUBLE_R_TLV("DAC2 Volume", AD193X_DAC_L2_VOL, AD193X_DAC_R2_VOL, 0, 0xFF, 1, adau193x_tlv), SOC_DOUBLE_R_TLV("DAC3 Volume", AD193X_DAC_L3_VOL, AD193X_DAC_R3_VOL, 0, 0xFF, 1, adau193x_tlv), SOC_DOUBLE_R_TLV("DAC4 Volume", AD193X_DAC_L4_VOL, AD193X_DAC_R4_VOL, 0, 0xFF, 1, adau193x_tlv), /* ADC switch control */ SOC_DOUBLE("ADC1 Switch", AD193X_ADC_CTRL0, AD193X_ADCL1_MUTE, AD193X_ADCR1_MUTE, 1, 1), SOC_DOUBLE("ADC2 Switch", AD193X_ADC_CTRL0, AD193X_ADCL2_MUTE, AD193X_ADCR2_MUTE, 1, 1), /* DAC switch control */ SOC_DOUBLE("DAC1 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL1_MUTE, AD193X_DACR1_MUTE, 1, 1), SOC_DOUBLE("DAC2 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL2_MUTE, AD193X_DACR2_MUTE, 1, 1), SOC_DOUBLE("DAC3 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL3_MUTE, AD193X_DACR3_MUTE, 1, 1), SOC_DOUBLE("DAC4 Switch", AD193X_DAC_CHNL_MUTE, AD193X_DACL4_MUTE, AD193X_DACR4_MUTE, 1, 1), /* ADC high-pass filter */ SOC_SINGLE("ADC High Pass Filter Switch", AD193X_ADC_CTRL0, AD193X_ADC_HIGHPASS_FILTER, 1, 0), /* DAC de-emphasis */ SOC_ENUM("Playback Deemphasis", ad193x_deemp_enum), }; static const struct snd_soc_dapm_widget ad193x_dapm_widgets[] = { SND_SOC_DAPM_DAC("DAC", "Playback", AD193X_DAC_CTRL0, 0, 1), SND_SOC_DAPM_ADC("ADC", "Capture", SND_SOC_NOPM, 0, 0), SND_SOC_DAPM_SUPPLY("PLL_PWR", AD193X_PLL_CLK_CTRL0, 0, 1, NULL, 0), SND_SOC_DAPM_SUPPLY("ADC_PWR", AD193X_ADC_CTRL0, 0, 1, NULL, 0), SND_SOC_DAPM_SUPPLY("SYSCLK", AD193X_PLL_CLK_CTRL0, 7, 0, NULL, 0), SND_SOC_DAPM_OUTPUT("DAC1OUT"), SND_SOC_DAPM_OUTPUT("DAC2OUT"), SND_SOC_DAPM_OUTPUT("DAC3OUT"), SND_SOC_DAPM_OUTPUT("DAC4OUT"), SND_SOC_DAPM_INPUT("ADC1IN"), SND_SOC_DAPM_INPUT("ADC2IN"), }; static const struct snd_soc_dapm_route audio_paths[] = { { "DAC", NULL, "SYSCLK" }, { "ADC", NULL, "SYSCLK" }, { "DAC", NULL, "ADC_PWR" }, { "ADC", NULL, "ADC_PWR" }, { "DAC1OUT", NULL, "DAC" }, { "DAC2OUT", NULL, "DAC" }, { "DAC3OUT", NULL, "DAC" }, { "DAC4OUT", NULL, "DAC" }, { "ADC", NULL, "ADC1IN" }, { "ADC", NULL, "ADC2IN" }, { "SYSCLK", NULL, "PLL_PWR" }, }; /* * DAI ops entries */ static int ad193x_mute(struct snd_soc_dai *dai, int mute) { struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(dai->codec); if (mute) regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL2, AD193X_DAC_MASTER_MUTE, AD193X_DAC_MASTER_MUTE); else regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL2, AD193X_DAC_MASTER_MUTE, 0); return 0; } static int ad193x_set_tdm_slot(struct snd_soc_dai *dai, unsigned int tx_mask, unsigned int rx_mask, int slots, int width) { struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(dai->codec); unsigned int channels; switch (slots) { case 2: channels = AD193X_2_CHANNELS; break; case 4: channels = AD193X_4_CHANNELS; break; case 8: channels = AD193X_8_CHANNELS; break; case 16: channels = AD193X_16_CHANNELS; break; default: return -EINVAL; } regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL1, AD193X_DAC_CHAN_MASK, channels << AD193X_DAC_CHAN_SHFT); regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL2, AD193X_ADC_CHAN_MASK, channels << AD193X_ADC_CHAN_SHFT); return 0; } static int ad193x_set_dai_fmt(struct snd_soc_dai *codec_dai, unsigned int fmt) { struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec_dai->codec); unsigned int adc_serfmt = 0; unsigned int adc_fmt = 0; unsigned int dac_fmt = 0; /* At present, the driver only support AUX ADC mode(SND_SOC_DAIFMT_I2S * with TDM) and ADC&DAC TDM mode(SND_SOC_DAIFMT_DSP_A) */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: adc_serfmt |= AD193X_ADC_SERFMT_TDM; break; case SND_SOC_DAIFMT_DSP_A: adc_serfmt |= AD193X_ADC_SERFMT_AUX; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* normal bit clock + frame */ break; case SND_SOC_DAIFMT_NB_IF: /* normal bclk + invert frm */ adc_fmt |= AD193X_ADC_LEFT_HIGH; dac_fmt |= AD193X_DAC_LEFT_HIGH; break; case SND_SOC_DAIFMT_IB_NF: /* invert bclk + normal frm */ adc_fmt |= AD193X_ADC_BCLK_INV; dac_fmt |= AD193X_DAC_BCLK_INV; break; case SND_SOC_DAIFMT_IB_IF: /* invert bclk + frm */ adc_fmt |= AD193X_ADC_LEFT_HIGH; adc_fmt |= AD193X_ADC_BCLK_INV; dac_fmt |= AD193X_DAC_LEFT_HIGH; dac_fmt |= AD193X_DAC_BCLK_INV; break; default: return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: /* codec clk & frm master */ adc_fmt |= AD193X_ADC_LCR_MASTER; adc_fmt |= AD193X_ADC_BCLK_MASTER; dac_fmt |= AD193X_DAC_LCR_MASTER; dac_fmt |= AD193X_DAC_BCLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFM: /* codec clk slave & frm master */ adc_fmt |= AD193X_ADC_LCR_MASTER; dac_fmt |= AD193X_DAC_LCR_MASTER; break; case SND_SOC_DAIFMT_CBM_CFS: /* codec clk master & frame slave */ adc_fmt |= AD193X_ADC_BCLK_MASTER; dac_fmt |= AD193X_DAC_BCLK_MASTER; break; case SND_SOC_DAIFMT_CBS_CFS: /* codec clk & frm slave */ break; default: return -EINVAL; } regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL1, AD193X_ADC_SERFMT_MASK, adc_serfmt); regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL2, AD193X_ADC_FMT_MASK, adc_fmt); regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL1, AD193X_DAC_FMT_MASK, dac_fmt); return 0; } static int ad193x_set_dai_sysclk(struct snd_soc_dai *codec_dai, int clk_id, unsigned int freq, int dir) { struct snd_soc_codec *codec = codec_dai->codec; struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec); switch (freq) { case 12288000: case 18432000: case 24576000: case 36864000: ad193x->sysclk = freq; return 0; } return -EINVAL; } static int ad193x_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { int word_len = 0, master_rate = 0; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_codec *codec = rtd->codec; struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec); /* bit size */ switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: word_len = 3; break; case SNDRV_PCM_FORMAT_S20_3LE: word_len = 1; break; case SNDRV_PCM_FORMAT_S24_LE: case SNDRV_PCM_FORMAT_S32_LE: word_len = 0; break; } switch (ad193x->sysclk) { case 12288000: master_rate = AD193X_PLL_INPUT_256; break; case 18432000: master_rate = AD193X_PLL_INPUT_384; break; case 24576000: master_rate = AD193X_PLL_INPUT_512; break; case 36864000: master_rate = AD193X_PLL_INPUT_768; break; } regmap_update_bits(ad193x->regmap, AD193X_PLL_CLK_CTRL0, AD193X_PLL_INPUT_MASK, master_rate); regmap_update_bits(ad193x->regmap, AD193X_DAC_CTRL2, AD193X_DAC_WORD_LEN_MASK, word_len << AD193X_DAC_WORD_LEN_SHFT); regmap_update_bits(ad193x->regmap, AD193X_ADC_CTRL1, AD193X_ADC_WORD_LEN_MASK, word_len); return 0; } static const struct snd_soc_dai_ops ad193x_dai_ops = { .hw_params = ad193x_hw_params, .digital_mute = ad193x_mute, .set_tdm_slot = ad193x_set_tdm_slot, .set_sysclk = ad193x_set_dai_sysclk, .set_fmt = ad193x_set_dai_fmt, }; /* codec DAI instance */ static struct snd_soc_dai_driver ad193x_dai = { .name = "ad193x-hifi", .playback = { .stream_name = "Playback", .channels_min = 2, .channels_max = 8, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, }, .capture = { .stream_name = "Capture", .channels_min = 2, .channels_max = 4, .rates = SNDRV_PCM_RATE_48000, .formats = SNDRV_PCM_FMTBIT_S32_LE | SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S20_3LE | SNDRV_PCM_FMTBIT_S24_LE, }, .ops = &ad193x_dai_ops, }; static int ad193x_probe(struct snd_soc_codec *codec) { struct ad193x_priv *ad193x = snd_soc_codec_get_drvdata(codec); int ret; codec->control_data = ad193x->regmap; ret = snd_soc_codec_set_cache_io(codec, 0, 0, SND_SOC_REGMAP); if (ret < 0) { dev_err(codec->dev, "failed to set cache I/O: %d\n", ret); return ret; } /* default setting for ad193x */ /* unmute dac channels */ regmap_write(ad193x->regmap, AD193X_DAC_CHNL_MUTE, 0x0); /* de-emphasis: 48kHz, powedown dac */ regmap_write(ad193x->regmap, AD193X_DAC_CTRL2, 0x1A); /* powerdown dac, dac in tdm mode */ regmap_write(ad193x->regmap, AD193X_DAC_CTRL0, 0x41); /* high-pass filter enable */ regmap_write(ad193x->regmap, AD193X_ADC_CTRL0, 0x3); /* sata delay=1, adc aux mode */ regmap_write(ad193x->regmap, AD193X_ADC_CTRL1, 0x43); /* pll input: mclki/xi */ regmap_write(ad193x->regmap, AD193X_PLL_CLK_CTRL0, 0x99); /* mclk=24.576Mhz: 0x9D; mclk=12.288Mhz: 0x99 */ regmap_write(ad193x->regmap, AD193X_PLL_CLK_CTRL1, 0x04); return ret; } static struct snd_soc_codec_driver soc_codec_dev_ad193x = { .probe = ad193x_probe, .controls = ad193x_snd_controls, .num_controls = ARRAY_SIZE(ad193x_snd_controls), .dapm_widgets = ad193x_dapm_widgets, .num_dapm_widgets = ARRAY_SIZE(ad193x_dapm_widgets), .dapm_routes = audio_paths, .num_dapm_routes = ARRAY_SIZE(audio_paths), }; static bool adau193x_reg_volatile(struct device *dev, unsigned int reg) { return false; } #if defined(CONFIG_SPI_MASTER) static const struct regmap_config ad193x_spi_regmap_config = { .val_bits = 8, .reg_bits = 16, .read_flag_mask = 0x09, .write_flag_mask = 0x08, .max_register = AD193X_NUM_REGS - 1, .volatile_reg = adau193x_reg_volatile, }; static int __devinit ad193x_spi_probe(struct spi_device *spi) { struct ad193x_priv *ad193x; int ret; ad193x = devm_kzalloc(&spi->dev, sizeof(struct ad193x_priv), GFP_KERNEL); if (ad193x == NULL) return -ENOMEM; ad193x->regmap = regmap_init_spi(spi, &ad193x_spi_regmap_config); if (IS_ERR(ad193x->regmap)) { ret = PTR_ERR(ad193x->regmap); goto err_out; } spi_set_drvdata(spi, ad193x); ret = snd_soc_register_codec(&spi->dev, &soc_codec_dev_ad193x, &ad193x_dai, 1); if (ret < 0) goto err_regmap_exit; return 0; err_regmap_exit: regmap_exit(ad193x->regmap); err_out: return ret; } static int __devexit ad193x_spi_remove(struct spi_device *spi) { struct ad193x_priv *ad193x = spi_get_drvdata(spi); snd_soc_unregister_codec(&spi->dev); regmap_exit(ad193x->regmap); return 0; } static struct spi_driver ad193x_spi_driver = { .driver = { .name = "ad193x", .owner = THIS_MODULE, }, .probe = ad193x_spi_probe, .remove = __devexit_p(ad193x_spi_remove), }; #endif #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) static const struct regmap_config ad193x_i2c_regmap_config = { .val_bits = 8, .reg_bits = 8, .max_register = AD193X_NUM_REGS - 1, .volatile_reg = adau193x_reg_volatile, }; static const struct i2c_device_id ad193x_id[] = { { "ad1936", 0 }, { "ad1937", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ad193x_id); static int __devinit ad193x_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ad193x_priv *ad193x; int ret; ad193x = devm_kzalloc(&client->dev, sizeof(struct ad193x_priv), GFP_KERNEL); if (ad193x == NULL) return -ENOMEM; ad193x->regmap = regmap_init_i2c(client, &ad193x_i2c_regmap_config); if (IS_ERR(ad193x->regmap)) { ret = PTR_ERR(ad193x->regmap); goto err_out; } i2c_set_clientdata(client, ad193x); ret = snd_soc_register_codec(&client->dev, &soc_codec_dev_ad193x, &ad193x_dai, 1); if (ret < 0) goto err_regmap_exit; return 0; err_regmap_exit: regmap_exit(ad193x->regmap); err_out: return ret; } static int __devexit ad193x_i2c_remove(struct i2c_client *client) { struct ad193x_priv *ad193x = i2c_get_clientdata(client); snd_soc_unregister_codec(&client->dev); regmap_exit(ad193x->regmap); return 0; } static struct i2c_driver ad193x_i2c_driver = { .driver = { .name = "ad193x", }, .probe = ad193x_i2c_probe, .remove = __devexit_p(ad193x_i2c_remove), .id_table = ad193x_id, }; #endif static int __init ad193x_modinit(void) { int ret; #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) ret = i2c_add_driver(&ad193x_i2c_driver); if (ret != 0) { printk(KERN_ERR "Failed to register AD193X I2C driver: %d\n", ret); } #endif #if defined(CONFIG_SPI_MASTER) ret = spi_register_driver(&ad193x_spi_driver); if (ret != 0) { printk(KERN_ERR "Failed to register AD193X SPI driver: %d\n", ret); } #endif return ret; } module_init(ad193x_modinit); static void __exit ad193x_modexit(void) { #if defined(CONFIG_SPI_MASTER) spi_unregister_driver(&ad193x_spi_driver); #endif #if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) i2c_del_driver(&ad193x_i2c_driver); #endif } module_exit(ad193x_modexit); MODULE_DESCRIPTION("ASoC ad193x driver"); MODULE_AUTHOR("Barry Song <21cnbao@gmail.com>"); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_oppo_find5
sound/pci/via82xx.c
4893
79045
/* * ALSA driver for VIA VT82xx (South Bridge) * * VT82C686A/B/C, VT8233A/C, VT8235 * * Copyright (c) 2000 Jaroslav Kysela <perex@perex.cz> * Tjeerd.Mulder <Tjeerd.Mulder@fujitsu-siemens.com> * 2002 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /* * Changes: * * Dec. 19, 2002 Takashi Iwai <tiwai@suse.de> * - use the DSX channels for the first pcm playback. * (on VIA8233, 8233C and 8235 only) * this will allow you play simultaneously up to 4 streams. * multi-channel playback is assigned to the second device * on these chips. * - support the secondary capture (on VIA8233/C,8235) * - SPDIF support * the DSX3 channel can be used for SPDIF output. * on VIA8233A, this channel is assigned to the second pcm * playback. * the card config of alsa-lib will assign the correct * device for applications. * - clean up the code, separate low-level initialization * routines for each chipset. * * Sep. 26, 2005 Karsten Wiese <annabellesgarden@yahoo.de> * - Optimize position calculation for the 823x chips. */ #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/slab.h> #include <linux/gameport.h> #include <linux/module.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/info.h> #include <sound/tlv.h> #include <sound/ac97_codec.h> #include <sound/mpu401.h> #include <sound/initval.h> #if 0 #define POINTER_DEBUG #endif MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("VIA VT82xx audio"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{VIA,VT82C686A/B/C,pci},{VIA,VT8233A/C,8235}}"); #if defined(CONFIG_GAMEPORT) || (defined(MODULE) && defined(CONFIG_GAMEPORT_MODULE)) #define SUPPORT_JOYSTICK 1 #endif static int index = SNDRV_DEFAULT_IDX1; /* Index 0-MAX */ static char *id = SNDRV_DEFAULT_STR1; /* ID for this card */ static long mpu_port; #ifdef SUPPORT_JOYSTICK static bool joystick; #endif static int ac97_clock = 48000; static char *ac97_quirk; static int dxs_support; static int dxs_init_volume = 31; static int nodelay; module_param(index, int, 0444); MODULE_PARM_DESC(index, "Index value for VIA 82xx bridge."); module_param(id, charp, 0444); MODULE_PARM_DESC(id, "ID string for VIA 82xx bridge."); module_param(mpu_port, long, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port. (VT82C686x only)"); #ifdef SUPPORT_JOYSTICK module_param(joystick, bool, 0444); MODULE_PARM_DESC(joystick, "Enable joystick. (VT82C686x only)"); #endif module_param(ac97_clock, int, 0444); MODULE_PARM_DESC(ac97_clock, "AC'97 codec clock (default 48000Hz)."); module_param(ac97_quirk, charp, 0444); MODULE_PARM_DESC(ac97_quirk, "AC'97 workaround for strange hardware."); module_param(dxs_support, int, 0444); MODULE_PARM_DESC(dxs_support, "Support for DXS channels (0 = auto, 1 = enable, 2 = disable, 3 = 48k only, 4 = no VRA, 5 = enable any sample rate)"); module_param(dxs_init_volume, int, 0644); MODULE_PARM_DESC(dxs_init_volume, "initial DXS volume (0-31)"); module_param(nodelay, int, 0444); MODULE_PARM_DESC(nodelay, "Disable 500ms init delay"); /* just for backward compatibility */ static bool enable; module_param(enable, bool, 0444); /* revision numbers for via686 */ #define VIA_REV_686_A 0x10 #define VIA_REV_686_B 0x11 #define VIA_REV_686_C 0x12 #define VIA_REV_686_D 0x13 #define VIA_REV_686_E 0x14 #define VIA_REV_686_H 0x20 /* revision numbers for via8233 */ #define VIA_REV_PRE_8233 0x10 /* not in market */ #define VIA_REV_8233C 0x20 /* 2 rec, 4 pb, 1 multi-pb */ #define VIA_REV_8233 0x30 /* 2 rec, 4 pb, 1 multi-pb, spdif */ #define VIA_REV_8233A 0x40 /* 1 rec, 1 multi-pb, spdf */ #define VIA_REV_8235 0x50 /* 2 rec, 4 pb, 1 multi-pb, spdif */ #define VIA_REV_8237 0x60 #define VIA_REV_8251 0x70 /* * Direct registers */ #define VIAREG(via, x) ((via)->port + VIA_REG_##x) #define VIADEV_REG(viadev, x) ((viadev)->port + VIA_REG_##x) /* common offsets */ #define VIA_REG_OFFSET_STATUS 0x00 /* byte - channel status */ #define VIA_REG_STAT_ACTIVE 0x80 /* RO */ #define VIA8233_SHADOW_STAT_ACTIVE 0x08 /* RO */ #define VIA_REG_STAT_PAUSED 0x40 /* RO */ #define VIA_REG_STAT_TRIGGER_QUEUED 0x08 /* RO */ #define VIA_REG_STAT_STOPPED 0x04 /* RWC */ #define VIA_REG_STAT_EOL 0x02 /* RWC */ #define VIA_REG_STAT_FLAG 0x01 /* RWC */ #define VIA_REG_OFFSET_CONTROL 0x01 /* byte - channel control */ #define VIA_REG_CTRL_START 0x80 /* WO */ #define VIA_REG_CTRL_TERMINATE 0x40 /* WO */ #define VIA_REG_CTRL_AUTOSTART 0x20 #define VIA_REG_CTRL_PAUSE 0x08 /* RW */ #define VIA_REG_CTRL_INT_STOP 0x04 #define VIA_REG_CTRL_INT_EOL 0x02 #define VIA_REG_CTRL_INT_FLAG 0x01 #define VIA_REG_CTRL_RESET 0x01 /* RW - probably reset? undocumented */ #define VIA_REG_CTRL_INT (VIA_REG_CTRL_INT_FLAG | VIA_REG_CTRL_INT_EOL | VIA_REG_CTRL_AUTOSTART) #define VIA_REG_OFFSET_TYPE 0x02 /* byte - channel type (686 only) */ #define VIA_REG_TYPE_AUTOSTART 0x80 /* RW - autostart at EOL */ #define VIA_REG_TYPE_16BIT 0x20 /* RW */ #define VIA_REG_TYPE_STEREO 0x10 /* RW */ #define VIA_REG_TYPE_INT_LLINE 0x00 #define VIA_REG_TYPE_INT_LSAMPLE 0x04 #define VIA_REG_TYPE_INT_LESSONE 0x08 #define VIA_REG_TYPE_INT_MASK 0x0c #define VIA_REG_TYPE_INT_EOL 0x02 #define VIA_REG_TYPE_INT_FLAG 0x01 #define VIA_REG_OFFSET_TABLE_PTR 0x04 /* dword - channel table pointer */ #define VIA_REG_OFFSET_CURR_PTR 0x04 /* dword - channel current pointer */ #define VIA_REG_OFFSET_STOP_IDX 0x08 /* dword - stop index, channel type, sample rate */ #define VIA8233_REG_TYPE_16BIT 0x00200000 /* RW */ #define VIA8233_REG_TYPE_STEREO 0x00100000 /* RW */ #define VIA_REG_OFFSET_CURR_COUNT 0x0c /* dword - channel current count (24 bit) */ #define VIA_REG_OFFSET_CURR_INDEX 0x0f /* byte - channel current index (for via8233 only) */ #define DEFINE_VIA_REGSET(name,val) \ enum {\ VIA_REG_##name##_STATUS = (val),\ VIA_REG_##name##_CONTROL = (val) + 0x01,\ VIA_REG_##name##_TYPE = (val) + 0x02,\ VIA_REG_##name##_TABLE_PTR = (val) + 0x04,\ VIA_REG_##name##_CURR_PTR = (val) + 0x04,\ VIA_REG_##name##_STOP_IDX = (val) + 0x08,\ VIA_REG_##name##_CURR_COUNT = (val) + 0x0c,\ } /* playback block */ DEFINE_VIA_REGSET(PLAYBACK, 0x00); DEFINE_VIA_REGSET(CAPTURE, 0x10); DEFINE_VIA_REGSET(FM, 0x20); /* AC'97 */ #define VIA_REG_AC97 0x80 /* dword */ #define VIA_REG_AC97_CODEC_ID_MASK (3<<30) #define VIA_REG_AC97_CODEC_ID_SHIFT 30 #define VIA_REG_AC97_CODEC_ID_PRIMARY 0x00 #define VIA_REG_AC97_CODEC_ID_SECONDARY 0x01 #define VIA_REG_AC97_SECONDARY_VALID (1<<27) #define VIA_REG_AC97_PRIMARY_VALID (1<<25) #define VIA_REG_AC97_BUSY (1<<24) #define VIA_REG_AC97_READ (1<<23) #define VIA_REG_AC97_CMD_SHIFT 16 #define VIA_REG_AC97_CMD_MASK 0x7e #define VIA_REG_AC97_DATA_SHIFT 0 #define VIA_REG_AC97_DATA_MASK 0xffff #define VIA_REG_SGD_SHADOW 0x84 /* dword */ /* via686 */ #define VIA_REG_SGD_STAT_PB_FLAG (1<<0) #define VIA_REG_SGD_STAT_CP_FLAG (1<<1) #define VIA_REG_SGD_STAT_FM_FLAG (1<<2) #define VIA_REG_SGD_STAT_PB_EOL (1<<4) #define VIA_REG_SGD_STAT_CP_EOL (1<<5) #define VIA_REG_SGD_STAT_FM_EOL (1<<6) #define VIA_REG_SGD_STAT_PB_STOP (1<<8) #define VIA_REG_SGD_STAT_CP_STOP (1<<9) #define VIA_REG_SGD_STAT_FM_STOP (1<<10) #define VIA_REG_SGD_STAT_PB_ACTIVE (1<<12) #define VIA_REG_SGD_STAT_CP_ACTIVE (1<<13) #define VIA_REG_SGD_STAT_FM_ACTIVE (1<<14) /* via8233 */ #define VIA8233_REG_SGD_STAT_FLAG (1<<0) #define VIA8233_REG_SGD_STAT_EOL (1<<1) #define VIA8233_REG_SGD_STAT_STOP (1<<2) #define VIA8233_REG_SGD_STAT_ACTIVE (1<<3) #define VIA8233_INTR_MASK(chan) ((VIA8233_REG_SGD_STAT_FLAG|VIA8233_REG_SGD_STAT_EOL) << ((chan) * 4)) #define VIA8233_REG_SGD_CHAN_SDX 0 #define VIA8233_REG_SGD_CHAN_MULTI 4 #define VIA8233_REG_SGD_CHAN_REC 6 #define VIA8233_REG_SGD_CHAN_REC1 7 #define VIA_REG_GPI_STATUS 0x88 #define VIA_REG_GPI_INTR 0x8c /* multi-channel and capture registers for via8233 */ DEFINE_VIA_REGSET(MULTPLAY, 0x40); DEFINE_VIA_REGSET(CAPTURE_8233, 0x60); /* via8233-specific registers */ #define VIA_REG_OFS_PLAYBACK_VOLUME_L 0x02 /* byte */ #define VIA_REG_OFS_PLAYBACK_VOLUME_R 0x03 /* byte */ #define VIA_REG_OFS_MULTPLAY_FORMAT 0x02 /* byte - format and channels */ #define VIA_REG_MULTPLAY_FMT_8BIT 0x00 #define VIA_REG_MULTPLAY_FMT_16BIT 0x80 #define VIA_REG_MULTPLAY_FMT_CH_MASK 0x70 /* # channels << 4 (valid = 1,2,4,6) */ #define VIA_REG_OFS_CAPTURE_FIFO 0x02 /* byte - bit 6 = fifo enable */ #define VIA_REG_CAPTURE_FIFO_ENABLE 0x40 #define VIA_DXS_MAX_VOLUME 31 /* max. volume (attenuation) of reg 0x32/33 */ #define VIA_REG_CAPTURE_CHANNEL 0x63 /* byte - input select */ #define VIA_REG_CAPTURE_CHANNEL_MIC 0x4 #define VIA_REG_CAPTURE_CHANNEL_LINE 0 #define VIA_REG_CAPTURE_SELECT_CODEC 0x03 /* recording source codec (0 = primary) */ #define VIA_TBL_BIT_FLAG 0x40000000 #define VIA_TBL_BIT_EOL 0x80000000 /* pci space */ #define VIA_ACLINK_STAT 0x40 #define VIA_ACLINK_C11_READY 0x20 #define VIA_ACLINK_C10_READY 0x10 #define VIA_ACLINK_C01_READY 0x04 /* secondary codec ready */ #define VIA_ACLINK_LOWPOWER 0x02 /* low-power state */ #define VIA_ACLINK_C00_READY 0x01 /* primary codec ready */ #define VIA_ACLINK_CTRL 0x41 #define VIA_ACLINK_CTRL_ENABLE 0x80 /* 0: disable, 1: enable */ #define VIA_ACLINK_CTRL_RESET 0x40 /* 0: assert, 1: de-assert */ #define VIA_ACLINK_CTRL_SYNC 0x20 /* 0: release SYNC, 1: force SYNC hi */ #define VIA_ACLINK_CTRL_SDO 0x10 /* 0: release SDO, 1: force SDO hi */ #define VIA_ACLINK_CTRL_VRA 0x08 /* 0: disable VRA, 1: enable VRA */ #define VIA_ACLINK_CTRL_PCM 0x04 /* 0: disable PCM, 1: enable PCM */ #define VIA_ACLINK_CTRL_FM 0x02 /* via686 only */ #define VIA_ACLINK_CTRL_SB 0x01 /* via686 only */ #define VIA_ACLINK_CTRL_INIT (VIA_ACLINK_CTRL_ENABLE|\ VIA_ACLINK_CTRL_RESET|\ VIA_ACLINK_CTRL_PCM|\ VIA_ACLINK_CTRL_VRA) #define VIA_FUNC_ENABLE 0x42 #define VIA_FUNC_MIDI_PNP 0x80 /* FIXME: it's 0x40 in the datasheet! */ #define VIA_FUNC_MIDI_IRQMASK 0x40 /* FIXME: not documented! */ #define VIA_FUNC_RX2C_WRITE 0x20 #define VIA_FUNC_SB_FIFO_EMPTY 0x10 #define VIA_FUNC_ENABLE_GAME 0x08 #define VIA_FUNC_ENABLE_FM 0x04 #define VIA_FUNC_ENABLE_MIDI 0x02 #define VIA_FUNC_ENABLE_SB 0x01 #define VIA_PNP_CONTROL 0x43 #define VIA_FM_NMI_CTRL 0x48 #define VIA8233_VOLCHG_CTRL 0x48 #define VIA8233_SPDIF_CTRL 0x49 #define VIA8233_SPDIF_DX3 0x08 #define VIA8233_SPDIF_SLOT_MASK 0x03 #define VIA8233_SPDIF_SLOT_1011 0x00 #define VIA8233_SPDIF_SLOT_34 0x01 #define VIA8233_SPDIF_SLOT_78 0x02 #define VIA8233_SPDIF_SLOT_69 0x03 /* */ #define VIA_DXS_AUTO 0 #define VIA_DXS_ENABLE 1 #define VIA_DXS_DISABLE 2 #define VIA_DXS_48K 3 #define VIA_DXS_NO_VRA 4 #define VIA_DXS_SRC 5 /* * pcm stream */ struct snd_via_sg_table { unsigned int offset; unsigned int size; } ; #define VIA_TABLE_SIZE 255 #define VIA_MAX_BUFSIZE (1<<24) struct viadev { unsigned int reg_offset; unsigned long port; int direction; /* playback = 0, capture = 1 */ struct snd_pcm_substream *substream; int running; unsigned int tbl_entries; /* # descriptors */ struct snd_dma_buffer table; struct snd_via_sg_table *idx_table; /* for recovery from the unexpected pointer */ unsigned int lastpos; unsigned int fragsize; unsigned int bufsize; unsigned int bufsize2; int hwptr_done; /* processed frame position in the buffer */ int in_interrupt; int shadow_shift; }; enum { TYPE_CARD_VIA686 = 1, TYPE_CARD_VIA8233 }; enum { TYPE_VIA686, TYPE_VIA8233, TYPE_VIA8233A }; #define VIA_MAX_DEVS 7 /* 4 playback, 1 multi, 2 capture */ struct via_rate_lock { spinlock_t lock; int rate; int used; }; struct via82xx { int irq; unsigned long port; struct resource *mpu_res; int chip_type; unsigned char revision; unsigned char old_legacy; unsigned char old_legacy_cfg; #ifdef CONFIG_PM unsigned char legacy_saved; unsigned char legacy_cfg_saved; unsigned char spdif_ctrl_saved; unsigned char capture_src_saved[2]; unsigned int mpu_port_saved; #endif unsigned char playback_volume[4][2]; /* for VIA8233/C/8235; default = 0 */ unsigned char playback_volume_c[2]; /* for VIA8233/C/8235; default = 0 */ unsigned int intr_mask; /* SGD_SHADOW mask to check interrupts */ struct pci_dev *pci; struct snd_card *card; unsigned int num_devs; unsigned int playback_devno, multi_devno, capture_devno; struct viadev devs[VIA_MAX_DEVS]; struct via_rate_lock rates[2]; /* playback and capture */ unsigned int dxs_fixed: 1; /* DXS channel accepts only 48kHz */ unsigned int no_vra: 1; /* no need to set VRA on DXS channels */ unsigned int dxs_src: 1; /* use full SRC capabilities of DXS */ unsigned int spdif_on: 1; /* only spdif rates work to external DACs */ struct snd_pcm *pcms[2]; struct snd_rawmidi *rmidi; struct snd_kcontrol *dxs_controls[4]; struct snd_ac97_bus *ac97_bus; struct snd_ac97 *ac97; unsigned int ac97_clock; unsigned int ac97_secondary; /* secondary AC'97 codec is present */ spinlock_t reg_lock; struct snd_info_entry *proc_entry; #ifdef SUPPORT_JOYSTICK struct gameport *gameport; #endif }; static DEFINE_PCI_DEVICE_TABLE(snd_via82xx_ids) = { /* 0x1106, 0x3058 */ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_82C686_5), TYPE_CARD_VIA686, }, /* 686A */ /* 0x1106, 0x3059 */ { PCI_VDEVICE(VIA, PCI_DEVICE_ID_VIA_8233_5), TYPE_CARD_VIA8233, }, /* VT8233 */ { 0, } }; MODULE_DEVICE_TABLE(pci, snd_via82xx_ids); /* */ /* * allocate and initialize the descriptor buffers * periods = number of periods * fragsize = period size in bytes */ static int build_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci, unsigned int periods, unsigned int fragsize) { unsigned int i, idx, ofs, rest; struct via82xx *chip = snd_pcm_substream_chip(substream); if (dev->table.area == NULL) { /* the start of each lists must be aligned to 8 bytes, * but the kernel pages are much bigger, so we don't care */ if (snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), PAGE_ALIGN(VIA_TABLE_SIZE * 2 * 8), &dev->table) < 0) return -ENOMEM; } if (! dev->idx_table) { dev->idx_table = kmalloc(sizeof(*dev->idx_table) * VIA_TABLE_SIZE, GFP_KERNEL); if (! dev->idx_table) return -ENOMEM; } /* fill the entries */ idx = 0; ofs = 0; for (i = 0; i < periods; i++) { rest = fragsize; /* fill descriptors for a period. * a period can be split to several descriptors if it's * over page boundary. */ do { unsigned int r; unsigned int flag; unsigned int addr; if (idx >= VIA_TABLE_SIZE) { snd_printk(KERN_ERR "via82xx: too much table size!\n"); return -EINVAL; } addr = snd_pcm_sgbuf_get_addr(substream, ofs); ((u32 *)dev->table.area)[idx << 1] = cpu_to_le32(addr); r = snd_pcm_sgbuf_get_chunk_size(substream, ofs, rest); rest -= r; if (! rest) { if (i == periods - 1) flag = VIA_TBL_BIT_EOL; /* buffer boundary */ else flag = VIA_TBL_BIT_FLAG; /* period boundary */ } else flag = 0; /* period continues to the next */ /* printk(KERN_DEBUG "via: tbl %d: at %d size %d " "(rest %d)\n", idx, ofs, r, rest); */ ((u32 *)dev->table.area)[(idx<<1) + 1] = cpu_to_le32(r | flag); dev->idx_table[idx].offset = ofs; dev->idx_table[idx].size = r; ofs += r; idx++; } while (rest > 0); } dev->tbl_entries = idx; dev->bufsize = periods * fragsize; dev->bufsize2 = dev->bufsize / 2; dev->fragsize = fragsize; return 0; } static int clean_via_table(struct viadev *dev, struct snd_pcm_substream *substream, struct pci_dev *pci) { if (dev->table.area) { snd_dma_free_pages(&dev->table); dev->table.area = NULL; } kfree(dev->idx_table); dev->idx_table = NULL; return 0; } /* * Basic I/O */ static inline unsigned int snd_via82xx_codec_xread(struct via82xx *chip) { return inl(VIAREG(chip, AC97)); } static inline void snd_via82xx_codec_xwrite(struct via82xx *chip, unsigned int val) { outl(val, VIAREG(chip, AC97)); } static int snd_via82xx_codec_ready(struct via82xx *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val; while (timeout-- > 0) { udelay(1); if (!((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY)) return val & 0xffff; } snd_printk(KERN_ERR "codec_ready: codec %i is not ready [0x%x]\n", secondary, snd_via82xx_codec_xread(chip)); return -EIO; } static int snd_via82xx_codec_valid(struct via82xx *chip, int secondary) { unsigned int timeout = 1000; /* 1ms */ unsigned int val, val1; unsigned int stat = !secondary ? VIA_REG_AC97_PRIMARY_VALID : VIA_REG_AC97_SECONDARY_VALID; while (timeout-- > 0) { val = snd_via82xx_codec_xread(chip); val1 = val & (VIA_REG_AC97_BUSY | stat); if (val1 == stat) return val & 0xffff; udelay(1); } return -EIO; } static void snd_via82xx_codec_wait(struct snd_ac97 *ac97) { struct via82xx *chip = ac97->private_data; int err; err = snd_via82xx_codec_ready(chip, ac97->num); /* here we need to wait fairly for long time.. */ if (!nodelay) msleep(500); } static void snd_via82xx_codec_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct via82xx *chip = ac97->private_data; unsigned int xval; xval = !ac97->num ? VIA_REG_AC97_CODEC_ID_PRIMARY : VIA_REG_AC97_CODEC_ID_SECONDARY; xval <<= VIA_REG_AC97_CODEC_ID_SHIFT; xval |= reg << VIA_REG_AC97_CMD_SHIFT; xval |= val << VIA_REG_AC97_DATA_SHIFT; snd_via82xx_codec_xwrite(chip, xval); snd_via82xx_codec_ready(chip, ac97->num); } static unsigned short snd_via82xx_codec_read(struct snd_ac97 *ac97, unsigned short reg) { struct via82xx *chip = ac97->private_data; unsigned int xval, val = 0xffff; int again = 0; xval = ac97->num << VIA_REG_AC97_CODEC_ID_SHIFT; xval |= ac97->num ? VIA_REG_AC97_SECONDARY_VALID : VIA_REG_AC97_PRIMARY_VALID; xval |= VIA_REG_AC97_READ; xval |= (reg & 0x7f) << VIA_REG_AC97_CMD_SHIFT; while (1) { if (again++ > 3) { snd_printk(KERN_ERR "codec_read: codec %i is not valid [0x%x]\n", ac97->num, snd_via82xx_codec_xread(chip)); return 0xffff; } snd_via82xx_codec_xwrite(chip, xval); udelay (20); if (snd_via82xx_codec_valid(chip, ac97->num) >= 0) { udelay(25); val = snd_via82xx_codec_xread(chip); break; } } return val & 0xffff; } static void snd_via82xx_channel_reset(struct via82xx *chip, struct viadev *viadev) { outb(VIA_REG_CTRL_PAUSE | VIA_REG_CTRL_TERMINATE | VIA_REG_CTRL_RESET, VIADEV_REG(viadev, OFFSET_CONTROL)); inb(VIADEV_REG(viadev, OFFSET_CONTROL)); udelay(50); /* disable interrupts */ outb(0x00, VIADEV_REG(viadev, OFFSET_CONTROL)); /* clear interrupts */ outb(0x03, VIADEV_REG(viadev, OFFSET_STATUS)); outb(0x00, VIADEV_REG(viadev, OFFSET_TYPE)); /* for via686 */ // outl(0, VIADEV_REG(viadev, OFFSET_CURR_PTR)); viadev->lastpos = 0; viadev->hwptr_done = 0; } /* * Interrupt handler * Used for 686 and 8233A */ static irqreturn_t snd_via686_interrupt(int irq, void *dev_id) { struct via82xx *chip = dev_id; unsigned int status; unsigned int i; status = inl(VIAREG(chip, SGD_SHADOW)); if (! (status & chip->intr_mask)) { if (chip->rmidi) /* check mpu401 interrupt */ return snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data); return IRQ_NONE; } /* check status for each stream */ spin_lock(&chip->reg_lock); for (i = 0; i < chip->num_devs; i++) { struct viadev *viadev = &chip->devs[i]; unsigned char c_status = inb(VIADEV_REG(viadev, OFFSET_STATUS)); if (! (c_status & (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG|VIA_REG_STAT_STOPPED))) continue; if (viadev->substream && viadev->running) { /* * Update hwptr_done based on 'period elapsed' * interrupts. We'll use it, when the chip returns 0 * for OFFSET_CURR_COUNT. */ if (c_status & VIA_REG_STAT_EOL) viadev->hwptr_done = 0; else viadev->hwptr_done += viadev->fragsize; viadev->in_interrupt = c_status; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(viadev->substream); spin_lock(&chip->reg_lock); viadev->in_interrupt = 0; } outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */ } spin_unlock(&chip->reg_lock); return IRQ_HANDLED; } /* * Interrupt handler */ static irqreturn_t snd_via8233_interrupt(int irq, void *dev_id) { struct via82xx *chip = dev_id; unsigned int status; unsigned int i; int irqreturn = 0; /* check status for each stream */ spin_lock(&chip->reg_lock); status = inl(VIAREG(chip, SGD_SHADOW)); for (i = 0; i < chip->num_devs; i++) { struct viadev *viadev = &chip->devs[i]; struct snd_pcm_substream *substream; unsigned char c_status, shadow_status; shadow_status = (status >> viadev->shadow_shift) & (VIA8233_SHADOW_STAT_ACTIVE|VIA_REG_STAT_EOL| VIA_REG_STAT_FLAG); c_status = shadow_status & (VIA_REG_STAT_EOL|VIA_REG_STAT_FLAG); if (!c_status) continue; substream = viadev->substream; if (substream && viadev->running) { /* * Update hwptr_done based on 'period elapsed' * interrupts. We'll use it, when the chip returns 0 * for OFFSET_CURR_COUNT. */ if (c_status & VIA_REG_STAT_EOL) viadev->hwptr_done = 0; else viadev->hwptr_done += viadev->fragsize; viadev->in_interrupt = c_status; if (shadow_status & VIA8233_SHADOW_STAT_ACTIVE) viadev->in_interrupt |= VIA_REG_STAT_ACTIVE; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(substream); spin_lock(&chip->reg_lock); viadev->in_interrupt = 0; } outb(c_status, VIADEV_REG(viadev, OFFSET_STATUS)); /* ack */ irqreturn = 1; } spin_unlock(&chip->reg_lock); return IRQ_RETVAL(irqreturn); } /* * PCM callbacks */ /* * trigger callback */ static int snd_via82xx_pcm_trigger(struct snd_pcm_substream *substream, int cmd) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned char val; if (chip->chip_type != TYPE_VIA686) val = VIA_REG_CTRL_INT; else val = 0; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: val |= VIA_REG_CTRL_START; viadev->running = 1; break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: val = VIA_REG_CTRL_TERMINATE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_PUSH: val |= VIA_REG_CTRL_PAUSE; viadev->running = 0; break; case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: viadev->running = 1; break; default: return -EINVAL; } outb(val, VIADEV_REG(viadev, OFFSET_CONTROL)); if (cmd == SNDRV_PCM_TRIGGER_STOP) snd_via82xx_channel_reset(chip, viadev); return 0; } /* * pointer callbacks */ /* * calculate the linear position at the given sg-buffer index and the rest count */ #define check_invalid_pos(viadev,pos) \ ((pos) < viadev->lastpos && ((pos) >= viadev->bufsize2 ||\ viadev->lastpos < viadev->bufsize2)) static inline unsigned int calc_linear_pos(struct viadev *viadev, unsigned int idx, unsigned int count) { unsigned int size, base, res; size = viadev->idx_table[idx].size; base = viadev->idx_table[idx].offset; res = base + size - count; if (res >= viadev->bufsize) res -= viadev->bufsize; /* check the validity of the calculated position */ if (size < count) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (size = %d, count = %d)\n", (int)size, (int)count); res = viadev->lastpos; } else { if (! count) { /* Some mobos report count = 0 on the DMA boundary, * i.e. count = size indeed. * Let's check whether this step is above the expected size. */ int delta = res - viadev->lastpos; if (delta < 0) delta += viadev->bufsize; if ((unsigned int)delta > viadev->fragsize) res = base; } if (check_invalid_pos(viadev, res)) { #ifdef POINTER_DEBUG printk(KERN_DEBUG "fail: idx = %i/%i, lastpos = 0x%x, " "bufsize2 = 0x%x, offsize = 0x%x, size = 0x%x, " "count = 0x%x\n", idx, viadev->tbl_entries, viadev->lastpos, viadev->bufsize2, viadev->idx_table[idx].offset, viadev->idx_table[idx].size, count); #endif /* count register returns full size when end of buffer is reached */ res = base + size; if (check_invalid_pos(viadev, res)) { snd_printd(KERN_ERR "invalid via82xx_cur_ptr (2), " "using last valid pointer\n"); res = viadev->lastpos; } } } return res; } /* * get the current pointer on via686 */ static snd_pcm_uframes_t snd_via686_pcm_pointer(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned int idx, ptr, count, res; if (snd_BUG_ON(!viadev->tbl_entries)) return 0; if (!(inb(VIADEV_REG(viadev, OFFSET_STATUS)) & VIA_REG_STAT_ACTIVE)) return 0; spin_lock(&chip->reg_lock); count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)) & 0xffffff; /* The via686a does not have the current index register, * so we need to calculate the index from CURR_PTR. */ ptr = inl(VIADEV_REG(viadev, OFFSET_CURR_PTR)); if (ptr <= (unsigned int)viadev->table.addr) idx = 0; else /* CURR_PTR holds the address + 8 */ idx = ((ptr - (unsigned int)viadev->table.addr) / 8 - 1) % viadev->tbl_entries; res = calc_linear_pos(viadev, idx, count); viadev->lastpos = res; /* remember the last position */ spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, res); } /* * get the current pointer on via823x */ static snd_pcm_uframes_t snd_via8233_pcm_pointer(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned int idx, count, res; int status; if (snd_BUG_ON(!viadev->tbl_entries)) return 0; spin_lock(&chip->reg_lock); count = inl(VIADEV_REG(viadev, OFFSET_CURR_COUNT)); status = viadev->in_interrupt; if (!status) status = inb(VIADEV_REG(viadev, OFFSET_STATUS)); /* An apparent bug in the 8251 is worked around by sending a * REG_CTRL_START. */ if (chip->revision == VIA_REV_8251 && (status & VIA_REG_STAT_EOL)) snd_via82xx_pcm_trigger(substream, SNDRV_PCM_TRIGGER_START); if (!(status & VIA_REG_STAT_ACTIVE)) { res = 0; goto unlock; } if (count & 0xffffff) { idx = count >> 24; if (idx >= viadev->tbl_entries) { #ifdef POINTER_DEBUG printk(KERN_DEBUG "fail: invalid idx = %i/%i\n", idx, viadev->tbl_entries); #endif res = viadev->lastpos; } else { count &= 0xffffff; res = calc_linear_pos(viadev, idx, count); } } else { res = viadev->hwptr_done; if (!viadev->in_interrupt) { if (status & VIA_REG_STAT_EOL) { res = 0; } else if (status & VIA_REG_STAT_FLAG) { res += viadev->fragsize; } } } unlock: viadev->lastpos = res; spin_unlock(&chip->reg_lock); return bytes_to_frames(substream->runtime, res); } /* * hw_params callback: * allocate the buffer and build up the buffer description table */ static int snd_via82xx_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; int err; err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params)); if (err < 0) return err; err = build_via_table(viadev, substream, chip->pci, params_periods(hw_params), params_period_bytes(hw_params)); if (err < 0) return err; return 0; } /* * hw_free callback: * clean up the buffer description table and release the buffer */ static int snd_via82xx_hw_free(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; clean_via_table(viadev, substream, chip->pci); snd_pcm_lib_free_pages(substream); return 0; } /* * set up the table pointer */ static void snd_via82xx_set_table_ptr(struct via82xx *chip, struct viadev *viadev) { snd_via82xx_codec_ready(chip, 0); outl((u32)viadev->table.addr, VIADEV_REG(viadev, OFFSET_TABLE_PTR)); udelay(20); snd_via82xx_codec_ready(chip, 0); } /* * prepare callback for playback and capture on via686 */ static void via686_setup_format(struct via82xx *chip, struct viadev *viadev, struct snd_pcm_runtime *runtime) { snd_via82xx_channel_reset(chip, viadev); /* this must be set after channel_reset */ snd_via82xx_set_table_ptr(chip, viadev); outb(VIA_REG_TYPE_AUTOSTART | (runtime->format == SNDRV_PCM_FORMAT_S16_LE ? VIA_REG_TYPE_16BIT : 0) | (runtime->channels > 1 ? VIA_REG_TYPE_STEREO : 0) | ((viadev->reg_offset & 0x10) == 0 ? VIA_REG_TYPE_INT_LSAMPLE : 0) | VIA_REG_TYPE_INT_EOL | VIA_REG_TYPE_INT_FLAG, VIADEV_REG(viadev, OFFSET_TYPE)); } static int snd_via686_playback_prepare(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE, runtime->rate); snd_ac97_set_rate(chip->ac97, AC97_SPDIF, runtime->rate); via686_setup_format(chip, viadev, runtime); return 0; } static int snd_via686_capture_prepare(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; snd_ac97_set_rate(chip->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate); via686_setup_format(chip, viadev, runtime); return 0; } /* * lock the current rate */ static int via_lock_rate(struct via_rate_lock *rec, int rate) { int changed = 0; spin_lock_irq(&rec->lock); if (rec->rate != rate) { if (rec->rate && rec->used > 1) /* already set */ changed = -EINVAL; else { rec->rate = rate; changed = 1; } } spin_unlock_irq(&rec->lock); return changed; } /* * prepare callback for DSX playback on via823x */ static int snd_via8233_playback_prepare(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; int ac97_rate = chip->dxs_src ? 48000 : runtime->rate; int rate_changed; u32 rbits; if ((rate_changed = via_lock_rate(&chip->rates[0], ac97_rate)) < 0) return rate_changed; if (rate_changed) snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE, chip->no_vra ? 48000 : runtime->rate); if (chip->spdif_on && viadev->reg_offset == 0x30) snd_ac97_set_rate(chip->ac97, AC97_SPDIF, runtime->rate); if (runtime->rate == 48000) rbits = 0xfffff; else rbits = (0x100000 / 48000) * runtime->rate + ((0x100000 % 48000) * runtime->rate) / 48000; snd_BUG_ON(rbits & ~0xfffff); snd_via82xx_channel_reset(chip, viadev); snd_via82xx_set_table_ptr(chip, viadev); outb(chip->playback_volume[viadev->reg_offset / 0x10][0], VIADEV_REG(viadev, OFS_PLAYBACK_VOLUME_L)); outb(chip->playback_volume[viadev->reg_offset / 0x10][1], VIADEV_REG(viadev, OFS_PLAYBACK_VOLUME_R)); outl((runtime->format == SNDRV_PCM_FORMAT_S16_LE ? VIA8233_REG_TYPE_16BIT : 0) | /* format */ (runtime->channels > 1 ? VIA8233_REG_TYPE_STEREO : 0) | /* stereo */ rbits | /* rate */ 0xff000000, /* STOP index is never reached */ VIADEV_REG(viadev, OFFSET_STOP_IDX)); udelay(20); snd_via82xx_codec_ready(chip, 0); return 0; } /* * prepare callback for multi-channel playback on via823x */ static int snd_via8233_multi_prepare(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; unsigned int slots; int fmt; if (via_lock_rate(&chip->rates[0], runtime->rate) < 0) return -EINVAL; snd_ac97_set_rate(chip->ac97, AC97_PCM_FRONT_DAC_RATE, runtime->rate); snd_ac97_set_rate(chip->ac97, AC97_PCM_SURR_DAC_RATE, runtime->rate); snd_ac97_set_rate(chip->ac97, AC97_PCM_LFE_DAC_RATE, runtime->rate); snd_ac97_set_rate(chip->ac97, AC97_SPDIF, runtime->rate); snd_via82xx_channel_reset(chip, viadev); snd_via82xx_set_table_ptr(chip, viadev); fmt = (runtime->format == SNDRV_PCM_FORMAT_S16_LE) ? VIA_REG_MULTPLAY_FMT_16BIT : VIA_REG_MULTPLAY_FMT_8BIT; fmt |= runtime->channels << 4; outb(fmt, VIADEV_REG(viadev, OFS_MULTPLAY_FORMAT)); #if 0 if (chip->revision == VIA_REV_8233A) slots = 0; else #endif { /* set sample number to slot 3, 4, 7, 8, 6, 9 (for VIA8233/C,8235) */ /* corresponding to FL, FR, RL, RR, C, LFE ?? */ switch (runtime->channels) { case 1: slots = (1<<0) | (1<<4); break; case 2: slots = (1<<0) | (2<<4); break; case 3: slots = (1<<0) | (2<<4) | (5<<8); break; case 4: slots = (1<<0) | (2<<4) | (3<<8) | (4<<12); break; case 5: slots = (1<<0) | (2<<4) | (3<<8) | (4<<12) | (5<<16); break; case 6: slots = (1<<0) | (2<<4) | (3<<8) | (4<<12) | (5<<16) | (6<<20); break; default: slots = 0; break; } } /* STOP index is never reached */ outl(0xff000000 | slots, VIADEV_REG(viadev, OFFSET_STOP_IDX)); udelay(20); snd_via82xx_codec_ready(chip, 0); return 0; } /* * prepare callback for capture on via823x */ static int snd_via8233_capture_prepare(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct snd_pcm_runtime *runtime = substream->runtime; if (via_lock_rate(&chip->rates[1], runtime->rate) < 0) return -EINVAL; snd_ac97_set_rate(chip->ac97, AC97_PCM_LR_ADC_RATE, runtime->rate); snd_via82xx_channel_reset(chip, viadev); snd_via82xx_set_table_ptr(chip, viadev); outb(VIA_REG_CAPTURE_FIFO_ENABLE, VIADEV_REG(viadev, OFS_CAPTURE_FIFO)); outl((runtime->format == SNDRV_PCM_FORMAT_S16_LE ? VIA8233_REG_TYPE_16BIT : 0) | (runtime->channels > 1 ? VIA8233_REG_TYPE_STEREO : 0) | 0xff000000, /* STOP index is never reached */ VIADEV_REG(viadev, OFFSET_STOP_IDX)); udelay(20); snd_via82xx_codec_ready(chip, 0); return 0; } /* * pcm hardware definition, identical for both playback and capture */ static struct snd_pcm_hardware snd_via82xx_hw = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID | /* SNDRV_PCM_INFO_RESUME | */ SNDRV_PCM_INFO_PAUSE), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = VIA_MAX_BUFSIZE, .period_bytes_min = 32, .period_bytes_max = VIA_MAX_BUFSIZE / 2, .periods_min = 2, .periods_max = VIA_TABLE_SIZE / 2, .fifo_size = 0, }; /* * open callback skeleton */ static int snd_via82xx_pcm_open(struct via82xx *chip, struct viadev *viadev, struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; int err; struct via_rate_lock *ratep; bool use_src = false; runtime->hw = snd_via82xx_hw; /* set the hw rate condition */ ratep = &chip->rates[viadev->direction]; spin_lock_irq(&ratep->lock); ratep->used++; if (chip->spdif_on && viadev->reg_offset == 0x30) { /* DXS#3 and spdif is on */ runtime->hw.rates = chip->ac97->rates[AC97_RATES_SPDIF]; snd_pcm_limit_hw_rates(runtime); } else if (chip->dxs_fixed && viadev->reg_offset < 0x40) { /* fixed DXS playback rate */ runtime->hw.rates = SNDRV_PCM_RATE_48000; runtime->hw.rate_min = runtime->hw.rate_max = 48000; } else if (chip->dxs_src && viadev->reg_offset < 0x40) { /* use full SRC capabilities of DXS */ runtime->hw.rates = (SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000); runtime->hw.rate_min = 8000; runtime->hw.rate_max = 48000; use_src = true; } else if (! ratep->rate) { int idx = viadev->direction ? AC97_RATES_ADC : AC97_RATES_FRONT_DAC; runtime->hw.rates = chip->ac97->rates[idx]; snd_pcm_limit_hw_rates(runtime); } else { /* a fixed rate */ runtime->hw.rates = SNDRV_PCM_RATE_KNOT; runtime->hw.rate_max = runtime->hw.rate_min = ratep->rate; } spin_unlock_irq(&ratep->lock); /* we may remove following constaint when we modify table entries in interrupt */ if ((err = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS)) < 0) return err; if (use_src) { err = snd_pcm_hw_rule_noresample(runtime, 48000); if (err < 0) return err; } runtime->private_data = viadev; viadev->substream = substream; return 0; } /* * open callback for playback on via686 */ static int snd_via686_playback_open(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->playback_devno + substream->number]; int err; if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0) return err; return 0; } /* * open callback for playback on via823x DXS */ static int snd_via8233_playback_open(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev; unsigned int stream; int err; viadev = &chip->devs[chip->playback_devno + substream->number]; if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0) return err; stream = viadev->reg_offset / 0x10; if (chip->dxs_controls[stream]) { chip->playback_volume[stream][0] = VIA_DXS_MAX_VOLUME - (dxs_init_volume & 31); chip->playback_volume[stream][1] = VIA_DXS_MAX_VOLUME - (dxs_init_volume & 31); chip->dxs_controls[stream]->vd[0].access &= ~SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE | SNDRV_CTL_EVENT_MASK_INFO, &chip->dxs_controls[stream]->id); } return 0; } /* * open callback for playback on via823x multi-channel */ static int snd_via8233_multi_open(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->multi_devno]; int err; /* channels constraint for VIA8233A * 3 and 5 channels are not supported */ static unsigned int channels[] = { 1, 2, 4, 6 }; static struct snd_pcm_hw_constraint_list hw_constraints_channels = { .count = ARRAY_SIZE(channels), .list = channels, .mask = 0, }; if ((err = snd_via82xx_pcm_open(chip, viadev, substream)) < 0) return err; substream->runtime->hw.channels_max = 6; if (chip->revision == VIA_REV_8233A) snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_CHANNELS, &hw_constraints_channels); return 0; } /* * open callback for capture on via686 and via823x */ static int snd_via82xx_capture_open(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = &chip->devs[chip->capture_devno + substream->pcm->device]; return snd_via82xx_pcm_open(chip, viadev, substream); } /* * close callback */ static int snd_via82xx_pcm_close(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; struct via_rate_lock *ratep; /* release the rate lock */ ratep = &chip->rates[viadev->direction]; spin_lock_irq(&ratep->lock); ratep->used--; if (! ratep->used) ratep->rate = 0; spin_unlock_irq(&ratep->lock); if (! ratep->rate) { if (! viadev->direction) { snd_ac97_update_power(chip->ac97, AC97_PCM_FRONT_DAC_RATE, 0); snd_ac97_update_power(chip->ac97, AC97_PCM_SURR_DAC_RATE, 0); snd_ac97_update_power(chip->ac97, AC97_PCM_LFE_DAC_RATE, 0); } else snd_ac97_update_power(chip->ac97, AC97_PCM_LR_ADC_RATE, 0); } viadev->substream = NULL; return 0; } static int snd_via8233_playback_close(struct snd_pcm_substream *substream) { struct via82xx *chip = snd_pcm_substream_chip(substream); struct viadev *viadev = substream->runtime->private_data; unsigned int stream; stream = viadev->reg_offset / 0x10; if (chip->dxs_controls[stream]) { chip->dxs_controls[stream]->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_INFO, &chip->dxs_controls[stream]->id); } return snd_via82xx_pcm_close(substream); } /* via686 playback callbacks */ static struct snd_pcm_ops snd_via686_playback_ops = { .open = snd_via686_playback_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via686_playback_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via686 capture callbacks */ static struct snd_pcm_ops snd_via686_capture_ops = { .open = snd_via82xx_capture_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via686_capture_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via686_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via823x DSX playback callbacks */ static struct snd_pcm_ops snd_via8233_playback_ops = { .open = snd_via8233_playback_open, .close = snd_via8233_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via8233_playback_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via8233_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via823x multi-channel playback callbacks */ static struct snd_pcm_ops snd_via8233_multi_ops = { .open = snd_via8233_multi_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via8233_multi_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via8233_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; /* via823x capture callbacks */ static struct snd_pcm_ops snd_via8233_capture_ops = { .open = snd_via82xx_capture_open, .close = snd_via82xx_pcm_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_via82xx_hw_params, .hw_free = snd_via82xx_hw_free, .prepare = snd_via8233_capture_prepare, .trigger = snd_via82xx_pcm_trigger, .pointer = snd_via8233_pcm_pointer, .page = snd_pcm_sgbuf_ops_page, }; static void init_viadev(struct via82xx *chip, int idx, unsigned int reg_offset, int shadow_pos, int direction) { chip->devs[idx].reg_offset = reg_offset; chip->devs[idx].shadow_shift = shadow_pos * 4; chip->devs[idx].direction = direction; chip->devs[idx].port = chip->port + reg_offset; } /* * create pcm instances for VIA8233, 8233C and 8235 (not 8233A) */ static int __devinit snd_via8233_pcm_new(struct via82xx *chip) { struct snd_pcm *pcm; int i, err; chip->playback_devno = 0; /* x 4 */ chip->multi_devno = 4; /* x 1 */ chip->capture_devno = 5; /* x 2 */ chip->num_devs = 7; chip->intr_mask = 0x33033333; /* FLAG|EOL for rec0-1, mc, sdx0-3 */ /* PCM #0: 4 DSX playbacks and 1 capture */ err = snd_pcm_new(chip->card, chip->card->shortname, 0, 4, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via8233_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via8233_capture_ops); pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[0] = pcm; /* set up playbacks */ for (i = 0; i < 4; i++) init_viadev(chip, i, 0x10 * i, i, 0); /* capture */ init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, VIA_MAX_BUFSIZE); /* PCM #1: multi-channel playback and 2nd capture */ err = snd_pcm_new(chip->card, chip->card->shortname, 1, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via8233_multi_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via8233_capture_ops); pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[1] = pcm; /* set up playback */ init_viadev(chip, chip->multi_devno, VIA_REG_MULTPLAY_STATUS, 4, 0); /* set up capture */ init_viadev(chip, chip->capture_devno + 1, VIA_REG_CAPTURE_8233_STATUS + 0x10, 7, 1); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, VIA_MAX_BUFSIZE); return 0; } /* * create pcm instances for VIA8233A */ static int __devinit snd_via8233a_pcm_new(struct via82xx *chip) { struct snd_pcm *pcm; int err; chip->multi_devno = 0; chip->playback_devno = 1; chip->capture_devno = 2; chip->num_devs = 3; chip->intr_mask = 0x03033000; /* FLAG|EOL for rec0, mc, sdx3 */ /* PCM #0: multi-channel playback and capture */ err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via8233_multi_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via8233_capture_ops); pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[0] = pcm; /* set up playback */ init_viadev(chip, chip->multi_devno, VIA_REG_MULTPLAY_STATUS, 4, 0); /* capture */ init_viadev(chip, chip->capture_devno, VIA_REG_CAPTURE_8233_STATUS, 6, 1); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, VIA_MAX_BUFSIZE); /* SPDIF supported? */ if (! ac97_can_spdif(chip->ac97)) return 0; /* PCM #1: DXS3 playback (for spdif) */ err = snd_pcm_new(chip->card, chip->card->shortname, 1, 1, 0, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via8233_playback_ops); pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[1] = pcm; /* set up playback */ init_viadev(chip, chip->playback_devno, 0x30, 3, 0); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, VIA_MAX_BUFSIZE); return 0; } /* * create a pcm instance for via686a/b */ static int __devinit snd_via686_pcm_new(struct via82xx *chip) { struct snd_pcm *pcm; int err; chip->playback_devno = 0; chip->capture_devno = 1; chip->num_devs = 2; chip->intr_mask = 0x77; /* FLAG | EOL for PB, CP, FM */ err = snd_pcm_new(chip->card, chip->card->shortname, 0, 1, 1, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_via686_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_via686_capture_ops); pcm->private_data = chip; strcpy(pcm->name, chip->card->shortname); chip->pcms[0] = pcm; init_viadev(chip, 0, VIA_REG_PLAYBACK_STATUS, 0, 0); init_viadev(chip, 1, VIA_REG_CAPTURE_STATUS, 0, 1); snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV_SG, snd_dma_pci_data(chip->pci), 64*1024, VIA_MAX_BUFSIZE); return 0; } /* * Mixer part */ static int snd_via8233_capture_source_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { /* formerly they were "Line" and "Mic", but it looks like that they * have nothing to do with the actual physical connections... */ static char *texts[2] = { "Input1", "Input2" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 2; if (uinfo->value.enumerated.item >= 2) uinfo->value.enumerated.item = 1; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_via8233_capture_source_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); unsigned long port = chip->port + (kcontrol->id.index ? (VIA_REG_CAPTURE_CHANNEL + 0x10) : VIA_REG_CAPTURE_CHANNEL); ucontrol->value.enumerated.item[0] = inb(port) & VIA_REG_CAPTURE_CHANNEL_MIC ? 1 : 0; return 0; } static int snd_via8233_capture_source_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); unsigned long port = chip->port + (kcontrol->id.index ? (VIA_REG_CAPTURE_CHANNEL + 0x10) : VIA_REG_CAPTURE_CHANNEL); u8 val, oval; spin_lock_irq(&chip->reg_lock); oval = inb(port); val = oval & ~VIA_REG_CAPTURE_CHANNEL_MIC; if (ucontrol->value.enumerated.item[0]) val |= VIA_REG_CAPTURE_CHANNEL_MIC; if (val != oval) outb(val, port); spin_unlock_irq(&chip->reg_lock); return val != oval; } static struct snd_kcontrol_new snd_via8233_capture_source __devinitdata = { .name = "Input Source Select", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_via8233_capture_source_info, .get = snd_via8233_capture_source_get, .put = snd_via8233_capture_source_put, }; #define snd_via8233_dxs3_spdif_info snd_ctl_boolean_mono_info static int snd_via8233_dxs3_spdif_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); u8 val; pci_read_config_byte(chip->pci, VIA8233_SPDIF_CTRL, &val); ucontrol->value.integer.value[0] = (val & VIA8233_SPDIF_DX3) ? 1 : 0; return 0; } static int snd_via8233_dxs3_spdif_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); u8 val, oval; pci_read_config_byte(chip->pci, VIA8233_SPDIF_CTRL, &oval); val = oval & ~VIA8233_SPDIF_DX3; if (ucontrol->value.integer.value[0]) val |= VIA8233_SPDIF_DX3; /* save the spdif flag for rate filtering */ chip->spdif_on = ucontrol->value.integer.value[0] ? 1 : 0; if (val != oval) { pci_write_config_byte(chip->pci, VIA8233_SPDIF_CTRL, val); return 1; } return 0; } static struct snd_kcontrol_new snd_via8233_dxs3_spdif_control __devinitdata = { .name = SNDRV_CTL_NAME_IEC958("Output ",NONE,SWITCH), .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .info = snd_via8233_dxs3_spdif_info, .get = snd_via8233_dxs3_spdif_get, .put = snd_via8233_dxs3_spdif_put, }; static int snd_via8233_dxs_volume_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 2; uinfo->value.integer.min = 0; uinfo->value.integer.max = VIA_DXS_MAX_VOLUME; return 0; } static int snd_via8233_dxs_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); unsigned int idx = kcontrol->id.subdevice; ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][0]; ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume[idx][1]; return 0; } static int snd_via8233_pcmdxs_volume_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = VIA_DXS_MAX_VOLUME - chip->playback_volume_c[0]; ucontrol->value.integer.value[1] = VIA_DXS_MAX_VOLUME - chip->playback_volume_c[1]; return 0; } static int snd_via8233_dxs_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); unsigned int idx = kcontrol->id.subdevice; unsigned long port = chip->port + 0x10 * idx; unsigned char val; int i, change = 0; for (i = 0; i < 2; i++) { val = ucontrol->value.integer.value[i]; if (val > VIA_DXS_MAX_VOLUME) val = VIA_DXS_MAX_VOLUME; val = VIA_DXS_MAX_VOLUME - val; change |= val != chip->playback_volume[idx][i]; if (change) { chip->playback_volume[idx][i] = val; outb(val, port + VIA_REG_OFS_PLAYBACK_VOLUME_L + i); } } return change; } static int snd_via8233_pcmdxs_volume_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct via82xx *chip = snd_kcontrol_chip(kcontrol); unsigned int idx; unsigned char val; int i, change = 0; for (i = 0; i < 2; i++) { val = ucontrol->value.integer.value[i]; if (val > VIA_DXS_MAX_VOLUME) val = VIA_DXS_MAX_VOLUME; val = VIA_DXS_MAX_VOLUME - val; if (val != chip->playback_volume_c[i]) { change = 1; chip->playback_volume_c[i] = val; for (idx = 0; idx < 4; idx++) { unsigned long port = chip->port + 0x10 * idx; chip->playback_volume[idx][i] = val; outb(val, port + VIA_REG_OFS_PLAYBACK_VOLUME_L + i); } } } return change; } static const DECLARE_TLV_DB_SCALE(db_scale_dxs, -4650, 150, 1); static struct snd_kcontrol_new snd_via8233_pcmdxs_volume_control __devinitdata = { .name = "PCM Playback Volume", .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .access = (SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ), .info = snd_via8233_dxs_volume_info, .get = snd_via8233_pcmdxs_volume_get, .put = snd_via8233_pcmdxs_volume_put, .tlv = { .p = db_scale_dxs } }; static struct snd_kcontrol_new snd_via8233_dxs_volume_control __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_PCM, .device = 0, /* .subdevice set later */ .name = "PCM Playback Volume", .access = SNDRV_CTL_ELEM_ACCESS_READWRITE | SNDRV_CTL_ELEM_ACCESS_TLV_READ | SNDRV_CTL_ELEM_ACCESS_INACTIVE, .info = snd_via8233_dxs_volume_info, .get = snd_via8233_dxs_volume_get, .put = snd_via8233_dxs_volume_put, .tlv = { .p = db_scale_dxs } }; /* */ static void snd_via82xx_mixer_free_ac97_bus(struct snd_ac97_bus *bus) { struct via82xx *chip = bus->private_data; chip->ac97_bus = NULL; } static void snd_via82xx_mixer_free_ac97(struct snd_ac97 *ac97) { struct via82xx *chip = ac97->private_data; chip->ac97 = NULL; } static struct ac97_quirk ac97_quirks[] = { { .subvendor = 0x1106, .subdevice = 0x4161, .codec_id = 0x56494161, /* VT1612A */ .name = "Soltek SL-75DRV5", .type = AC97_TUNE_NONE }, { /* FIXME: which codec? */ .subvendor = 0x1106, .subdevice = 0x4161, .name = "ASRock K7VT2", .type = AC97_TUNE_HP_ONLY }, { .subvendor = 0x110a, .subdevice = 0x0079, .name = "Fujitsu Siemens D1289", .type = AC97_TUNE_HP_ONLY }, { .subvendor = 0x1019, .subdevice = 0x0a81, .name = "ECS K7VTA3", .type = AC97_TUNE_HP_ONLY }, { .subvendor = 0x1019, .subdevice = 0x0a85, .name = "ECS L7VMM2", .type = AC97_TUNE_HP_ONLY }, { .subvendor = 0x1019, .subdevice = 0x1841, .name = "ECS K7VTA3", .type = AC97_TUNE_HP_ONLY }, { .subvendor = 0x1849, .subdevice = 0x3059, .name = "ASRock K7VM2", .type = AC97_TUNE_HP_ONLY /* VT1616 */ }, { .subvendor = 0x14cd, .subdevice = 0x7002, .name = "Unknown", .type = AC97_TUNE_ALC_JACK }, { .subvendor = 0x1071, .subdevice = 0x8590, .name = "Mitac Mobo", .type = AC97_TUNE_ALC_JACK }, { .subvendor = 0x161f, .subdevice = 0x202b, .name = "Arima Notebook", .type = AC97_TUNE_HP_ONLY, }, { .subvendor = 0x161f, .subdevice = 0x2032, .name = "Targa Traveller 811", .type = AC97_TUNE_HP_ONLY, }, { .subvendor = 0x161f, .subdevice = 0x2032, .name = "m680x", .type = AC97_TUNE_HP_ONLY, /* http://launchpad.net/bugs/38546 */ }, { .subvendor = 0x1297, .subdevice = 0xa232, .name = "Shuttle AK32VN", .type = AC97_TUNE_HP_ONLY }, { } /* terminator */ }; static int __devinit snd_via82xx_mixer_new(struct via82xx *chip, const char *quirk_override) { struct snd_ac97_template ac97; int err; static struct snd_ac97_bus_ops ops = { .write = snd_via82xx_codec_write, .read = snd_via82xx_codec_read, .wait = snd_via82xx_codec_wait, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, chip, &chip->ac97_bus)) < 0) return err; chip->ac97_bus->private_free = snd_via82xx_mixer_free_ac97_bus; chip->ac97_bus->clock = chip->ac97_clock; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; ac97.private_free = snd_via82xx_mixer_free_ac97; ac97.pci = chip->pci; ac97.scaps = AC97_SCAP_SKIP_MODEM | AC97_SCAP_POWER_SAVE; if ((err = snd_ac97_mixer(chip->ac97_bus, &ac97, &chip->ac97)) < 0) return err; snd_ac97_tune_hardware(chip->ac97, ac97_quirks, quirk_override); if (chip->chip_type != TYPE_VIA686) { /* use slot 10/11 */ snd_ac97_update_bits(chip->ac97, AC97_EXTENDED_STATUS, 0x03 << 4, 0x03 << 4); } return 0; } #ifdef SUPPORT_JOYSTICK #define JOYSTICK_ADDR 0x200 static int __devinit snd_via686_create_gameport(struct via82xx *chip, unsigned char *legacy) { struct gameport *gp; struct resource *r; if (!joystick) return -ENODEV; r = request_region(JOYSTICK_ADDR, 8, "VIA686 gameport"); if (!r) { printk(KERN_WARNING "via82xx: cannot reserve joystick port 0x%#x\n", JOYSTICK_ADDR); return -EBUSY; } chip->gameport = gp = gameport_allocate_port(); if (!gp) { printk(KERN_ERR "via82xx: cannot allocate memory for gameport\n"); release_and_free_resource(r); return -ENOMEM; } gameport_set_name(gp, "VIA686 Gameport"); gameport_set_phys(gp, "pci%s/gameport0", pci_name(chip->pci)); gameport_set_dev_parent(gp, &chip->pci->dev); gp->io = JOYSTICK_ADDR; gameport_set_port_data(gp, r); /* Enable legacy joystick port */ *legacy |= VIA_FUNC_ENABLE_GAME; pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, *legacy); gameport_register_port(chip->gameport); return 0; } static void snd_via686_free_gameport(struct via82xx *chip) { if (chip->gameport) { struct resource *r = gameport_get_port_data(chip->gameport); gameport_unregister_port(chip->gameport); chip->gameport = NULL; release_and_free_resource(r); } } #else static inline int snd_via686_create_gameport(struct via82xx *chip, unsigned char *legacy) { return -ENOSYS; } static inline void snd_via686_free_gameport(struct via82xx *chip) { } #endif /* * */ static int __devinit snd_via8233_init_misc(struct via82xx *chip) { int i, err, caps; unsigned char val; caps = chip->chip_type == TYPE_VIA8233A ? 1 : 2; for (i = 0; i < caps; i++) { snd_via8233_capture_source.index = i; err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_capture_source, chip)); if (err < 0) return err; } if (ac97_can_spdif(chip->ac97)) { err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_dxs3_spdif_control, chip)); if (err < 0) return err; } if (chip->chip_type != TYPE_VIA8233A) { /* when no h/w PCM volume control is found, use DXS volume control * as the PCM vol control */ struct snd_ctl_elem_id sid; memset(&sid, 0, sizeof(sid)); strcpy(sid.name, "PCM Playback Volume"); sid.iface = SNDRV_CTL_ELEM_IFACE_MIXER; if (! snd_ctl_find_id(chip->card, &sid)) { snd_printd(KERN_INFO "Using DXS as PCM Playback\n"); err = snd_ctl_add(chip->card, snd_ctl_new1(&snd_via8233_pcmdxs_volume_control, chip)); if (err < 0) return err; } else /* Using DXS when PCM emulation is enabled is really weird */ { for (i = 0; i < 4; ++i) { struct snd_kcontrol *kctl; kctl = snd_ctl_new1( &snd_via8233_dxs_volume_control, chip); if (!kctl) return -ENOMEM; kctl->id.subdevice = i; err = snd_ctl_add(chip->card, kctl); if (err < 0) return err; chip->dxs_controls[i] = kctl; } } } /* select spdif data slot 10/11 */ pci_read_config_byte(chip->pci, VIA8233_SPDIF_CTRL, &val); val = (val & ~VIA8233_SPDIF_SLOT_MASK) | VIA8233_SPDIF_SLOT_1011; val &= ~VIA8233_SPDIF_DX3; /* SPDIF off as default */ pci_write_config_byte(chip->pci, VIA8233_SPDIF_CTRL, val); return 0; } static int __devinit snd_via686_init_misc(struct via82xx *chip) { unsigned char legacy, legacy_cfg; int rev_h = 0; legacy = chip->old_legacy; legacy_cfg = chip->old_legacy_cfg; legacy |= VIA_FUNC_MIDI_IRQMASK; /* FIXME: correct? (disable MIDI) */ legacy &= ~VIA_FUNC_ENABLE_GAME; /* disable joystick */ if (chip->revision >= VIA_REV_686_H) { rev_h = 1; if (mpu_port >= 0x200) { /* force MIDI */ mpu_port &= 0xfffc; pci_write_config_dword(chip->pci, 0x18, mpu_port | 0x01); #ifdef CONFIG_PM chip->mpu_port_saved = mpu_port; #endif } else { mpu_port = pci_resource_start(chip->pci, 2); } } else { switch (mpu_port) { /* force MIDI */ case 0x300: case 0x310: case 0x320: case 0x330: legacy_cfg &= ~(3 << 2); legacy_cfg |= (mpu_port & 0x0030) >> 2; break; default: /* no, use BIOS settings */ if (legacy & VIA_FUNC_ENABLE_MIDI) mpu_port = 0x300 + ((legacy_cfg & 0x000c) << 2); break; } } if (mpu_port >= 0x200 && (chip->mpu_res = request_region(mpu_port, 2, "VIA82xx MPU401")) != NULL) { if (rev_h) legacy |= VIA_FUNC_MIDI_PNP; /* enable PCI I/O 2 */ legacy |= VIA_FUNC_ENABLE_MIDI; } else { if (rev_h) legacy &= ~VIA_FUNC_MIDI_PNP; /* disable PCI I/O 2 */ legacy &= ~VIA_FUNC_ENABLE_MIDI; mpu_port = 0; } pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, legacy); pci_write_config_byte(chip->pci, VIA_PNP_CONTROL, legacy_cfg); if (chip->mpu_res) { if (snd_mpu401_uart_new(chip->card, 0, MPU401_HW_VIA686A, mpu_port, MPU401_INFO_INTEGRATED | MPU401_INFO_IRQ_HOOK, -1, &chip->rmidi) < 0) { printk(KERN_WARNING "unable to initialize MPU-401" " at 0x%lx, skipping\n", mpu_port); legacy &= ~VIA_FUNC_ENABLE_MIDI; } else { legacy &= ~VIA_FUNC_MIDI_IRQMASK; /* enable MIDI interrupt */ } pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, legacy); } snd_via686_create_gameport(chip, &legacy); #ifdef CONFIG_PM chip->legacy_saved = legacy; chip->legacy_cfg_saved = legacy_cfg; #endif return 0; } /* * proc interface */ static void snd_via82xx_proc_read(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct via82xx *chip = entry->private_data; int i; snd_iprintf(buffer, "%s\n\n", chip->card->longname); for (i = 0; i < 0xa0; i += 4) { snd_iprintf(buffer, "%02x: %08x\n", i, inl(chip->port + i)); } } static void __devinit snd_via82xx_proc_init(struct via82xx *chip) { struct snd_info_entry *entry; if (! snd_card_proc_new(chip->card, "via82xx", &entry)) snd_info_set_text_ops(entry, chip, snd_via82xx_proc_read); } /* * */ static int snd_via82xx_chip_init(struct via82xx *chip) { unsigned int val; unsigned long end_time; unsigned char pval; #if 0 /* broken on K7M? */ if (chip->chip_type == TYPE_VIA686) /* disable all legacy ports */ pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, 0); #endif pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (! (pval & VIA_ACLINK_C00_READY)) { /* codec not ready? */ /* deassert ACLink reset, force SYNC */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_ENABLE | VIA_ACLINK_CTRL_RESET | VIA_ACLINK_CTRL_SYNC); udelay(100); #if 1 /* FIXME: should we do full reset here for all chip models? */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, 0x00); udelay(100); #else /* deassert ACLink reset, force SYNC (warm AC'97 reset) */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_RESET|VIA_ACLINK_CTRL_SYNC); udelay(2); #endif /* ACLink on, deassert ACLink reset, VSR, SGD data out */ /* note - FM data out has trouble with non VRA codecs !! */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } /* Make sure VRA is enabled, in case we didn't do a * complete codec reset, above */ pci_read_config_byte(chip->pci, VIA_ACLINK_CTRL, &pval); if ((pval & VIA_ACLINK_CTRL_INIT) != VIA_ACLINK_CTRL_INIT) { /* ACLink on, deassert ACLink reset, VSR, SGD data out */ /* note - FM data out has trouble with non VRA codecs !! */ pci_write_config_byte(chip->pci, VIA_ACLINK_CTRL, VIA_ACLINK_CTRL_INIT); udelay(100); } /* wait until codec ready */ end_time = jiffies + msecs_to_jiffies(750); do { pci_read_config_byte(chip->pci, VIA_ACLINK_STAT, &pval); if (pval & VIA_ACLINK_C00_READY) /* primary codec ready */ break; schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_BUSY) snd_printk(KERN_ERR "AC'97 codec is not ready [0x%x]\n", val); #if 0 /* FIXME: we don't support the second codec yet so skip the detection now.. */ snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); end_time = jiffies + msecs_to_jiffies(750); snd_via82xx_codec_xwrite(chip, VIA_REG_AC97_READ | VIA_REG_AC97_SECONDARY_VALID | (VIA_REG_AC97_CODEC_ID_SECONDARY << VIA_REG_AC97_CODEC_ID_SHIFT)); do { if ((val = snd_via82xx_codec_xread(chip)) & VIA_REG_AC97_SECONDARY_VALID) { chip->ac97_secondary = 1; goto __ac97_ok2; } schedule_timeout_uninterruptible(1); } while (time_before(jiffies, end_time)); /* This is ok, the most of motherboards have only one codec */ __ac97_ok2: #endif if (chip->chip_type == TYPE_VIA686) { /* route FM trap to IRQ, disable FM trap */ pci_write_config_byte(chip->pci, VIA_FM_NMI_CTRL, 0); /* disable all GPI interrupts */ outl(0, VIAREG(chip, GPI_INTR)); } if (chip->chip_type != TYPE_VIA686) { /* Workaround for Award BIOS bug: * DXS channels don't work properly with VRA if MC97 is disabled. */ struct pci_dev *pci; pci = pci_get_device(0x1106, 0x3068, NULL); /* MC97 */ if (pci) { unsigned char data; pci_read_config_byte(pci, 0x44, &data); pci_write_config_byte(pci, 0x44, data | 0x40); pci_dev_put(pci); } } if (chip->chip_type != TYPE_VIA8233A) { int i, idx; for (idx = 0; idx < 4; idx++) { unsigned long port = chip->port + 0x10 * idx; for (i = 0; i < 2; i++) { chip->playback_volume[idx][i]=chip->playback_volume_c[i]; outb(chip->playback_volume_c[i], port + VIA_REG_OFS_PLAYBACK_VOLUME_L + i); } } } return 0; } #ifdef CONFIG_PM /* * power management */ static int snd_via82xx_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx *chip = card->private_data; int i; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); for (i = 0; i < 2; i++) snd_pcm_suspend_all(chip->pcms[i]); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); synchronize_irq(chip->irq); snd_ac97_suspend(chip->ac97); /* save misc values */ if (chip->chip_type != TYPE_VIA686) { pci_read_config_byte(chip->pci, VIA8233_SPDIF_CTRL, &chip->spdif_ctrl_saved); chip->capture_src_saved[0] = inb(chip->port + VIA_REG_CAPTURE_CHANNEL); chip->capture_src_saved[1] = inb(chip->port + VIA_REG_CAPTURE_CHANNEL + 0x10); } pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int snd_via82xx_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct via82xx *chip = card->private_data; int i; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "via82xx: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); snd_via82xx_chip_init(chip); if (chip->chip_type == TYPE_VIA686) { if (chip->mpu_port_saved) pci_write_config_dword(chip->pci, 0x18, chip->mpu_port_saved | 0x01); pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, chip->legacy_saved); pci_write_config_byte(chip->pci, VIA_PNP_CONTROL, chip->legacy_cfg_saved); } else { pci_write_config_byte(chip->pci, VIA8233_SPDIF_CTRL, chip->spdif_ctrl_saved); outb(chip->capture_src_saved[0], chip->port + VIA_REG_CAPTURE_CHANNEL); outb(chip->capture_src_saved[1], chip->port + VIA_REG_CAPTURE_CHANNEL + 0x10); } snd_ac97_resume(chip->ac97); for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif /* CONFIG_PM */ static int snd_via82xx_free(struct via82xx *chip) { unsigned int i; if (chip->irq < 0) goto __end_hw; /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); if (chip->irq >= 0) free_irq(chip->irq, chip); __end_hw: release_and_free_resource(chip->mpu_res); pci_release_regions(chip->pci); if (chip->chip_type == TYPE_VIA686) { snd_via686_free_gameport(chip); pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, chip->old_legacy); pci_write_config_byte(chip->pci, VIA_PNP_CONTROL, chip->old_legacy_cfg); } pci_disable_device(chip->pci); kfree(chip); return 0; } static int snd_via82xx_dev_free(struct snd_device *device) { struct via82xx *chip = device->device_data; return snd_via82xx_free(chip); } static int __devinit snd_via82xx_create(struct snd_card *card, struct pci_dev *pci, int chip_type, int revision, unsigned int ac97_clock, struct via82xx ** r_via) { struct via82xx *chip; int err; static struct snd_device_ops ops = { .dev_free = snd_via82xx_dev_free, }; if ((err = pci_enable_device(pci)) < 0) return err; if ((chip = kzalloc(sizeof(*chip), GFP_KERNEL)) == NULL) { pci_disable_device(pci); return -ENOMEM; } chip->chip_type = chip_type; chip->revision = revision; spin_lock_init(&chip->reg_lock); spin_lock_init(&chip->rates[0].lock); spin_lock_init(&chip->rates[1].lock); chip->card = card; chip->pci = pci; chip->irq = -1; pci_read_config_byte(pci, VIA_FUNC_ENABLE, &chip->old_legacy); pci_read_config_byte(pci, VIA_PNP_CONTROL, &chip->old_legacy_cfg); pci_write_config_byte(chip->pci, VIA_FUNC_ENABLE, chip->old_legacy & ~(VIA_FUNC_ENABLE_SB|VIA_FUNC_ENABLE_FM)); if ((err = pci_request_regions(pci, card->driver)) < 0) { kfree(chip); pci_disable_device(pci); return err; } chip->port = pci_resource_start(pci, 0); if (request_irq(pci->irq, chip_type == TYPE_VIA8233 ? snd_via8233_interrupt : snd_via686_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_via82xx_free(chip); return -EBUSY; } chip->irq = pci->irq; if (ac97_clock >= 8000 && ac97_clock <= 48000) chip->ac97_clock = ac97_clock; synchronize_irq(chip->irq); if ((err = snd_via82xx_chip_init(chip)) < 0) { snd_via82xx_free(chip); return err; } if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_via82xx_free(chip); return err; } /* The 8233 ac97 controller does not implement the master bit * in the pci command register. IMHO this is a violation of the PCI spec. * We call pci_set_master here because it does not hurt. */ pci_set_master(pci); snd_card_set_dev(card, &pci->dev); *r_via = chip; return 0; } struct via823x_info { int revision; char *name; int type; }; static struct via823x_info via823x_cards[] __devinitdata = { { VIA_REV_PRE_8233, "VIA 8233-Pre", TYPE_VIA8233 }, { VIA_REV_8233C, "VIA 8233C", TYPE_VIA8233 }, { VIA_REV_8233, "VIA 8233", TYPE_VIA8233 }, { VIA_REV_8233A, "VIA 8233A", TYPE_VIA8233A }, { VIA_REV_8235, "VIA 8235", TYPE_VIA8233 }, { VIA_REV_8237, "VIA 8237", TYPE_VIA8233 }, { VIA_REV_8251, "VIA 8251", TYPE_VIA8233 }, }; /* * auto detection of DXS channel supports. */ static struct snd_pci_quirk dxs_whitelist[] __devinitdata = { SND_PCI_QUIRK(0x1005, 0x4710, "Avance Logic Mobo", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1019, 0x0996, "ESC Mobo", VIA_DXS_48K), SND_PCI_QUIRK(0x1019, 0x0a81, "ECS K7VTA3 v8.0", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x1019, 0x0a85, "ECS L7VMM2", VIA_DXS_NO_VRA), SND_PCI_QUIRK_VENDOR(0x1019, "ESC K8", VIA_DXS_SRC), SND_PCI_QUIRK(0x1019, 0xaa01, "ESC K8T890-A", VIA_DXS_SRC), SND_PCI_QUIRK(0x1025, 0x0033, "Acer Inspire 1353LM", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x1025, 0x0046, "Acer Aspire 1524 WLMi", VIA_DXS_SRC), SND_PCI_QUIRK_VENDOR(0x1043, "ASUS A7/A8", VIA_DXS_NO_VRA), SND_PCI_QUIRK_VENDOR(0x1071, "Diverse Notebook", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x10cf, 0x118e, "FSC Laptop", VIA_DXS_ENABLE), SND_PCI_QUIRK_VENDOR(0x1106, "ASRock", VIA_DXS_SRC), SND_PCI_QUIRK(0x1297, 0xa231, "Shuttle AK31v2", VIA_DXS_SRC), SND_PCI_QUIRK(0x1297, 0xa232, "Shuttle", VIA_DXS_SRC), SND_PCI_QUIRK(0x1297, 0xc160, "Shuttle Sk41G", VIA_DXS_SRC), SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte GA-7VAXP", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1462, 0x3800, "MSI KT266", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1462, 0x7120, "MSI KT4V", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1462, 0x7142, "MSI K8MM-V", VIA_DXS_ENABLE), SND_PCI_QUIRK_VENDOR(0x1462, "MSI Mobo", VIA_DXS_SRC), SND_PCI_QUIRK(0x147b, 0x1401, "ABIT KD7(-RAID)", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x147b, 0x1411, "ABIT VA-20", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x147b, 0x1413, "ABIT KV8 Pro", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x147b, 0x1415, "ABIT AV8", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x14ff, 0x0403, "Twinhead mobo", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x14ff, 0x0408, "Twinhead laptop", VIA_DXS_SRC), SND_PCI_QUIRK(0x1558, 0x4701, "Clevo D470", VIA_DXS_SRC), SND_PCI_QUIRK(0x1584, 0x8120, "Diverse Laptop", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1584, 0x8123, "Targa/Uniwill", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x161f, 0x202b, "Amira Notebook", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x161f, 0x2032, "m680x machines", VIA_DXS_48K), SND_PCI_QUIRK(0x1631, 0xe004, "PB EasyNote 3174", VIA_DXS_ENABLE), SND_PCI_QUIRK(0x1695, 0x3005, "EPoX EP-8K9A", VIA_DXS_ENABLE), SND_PCI_QUIRK_VENDOR(0x1695, "EPoX mobo", VIA_DXS_SRC), SND_PCI_QUIRK_VENDOR(0x16f3, "Jetway K8", VIA_DXS_SRC), SND_PCI_QUIRK_VENDOR(0x1734, "FSC Laptop", VIA_DXS_SRC), SND_PCI_QUIRK(0x1849, 0x3059, "ASRock K7VM2", VIA_DXS_NO_VRA), SND_PCI_QUIRK_VENDOR(0x1849, "ASRock mobo", VIA_DXS_SRC), SND_PCI_QUIRK(0x1919, 0x200a, "Soltek SL-K8", VIA_DXS_NO_VRA), SND_PCI_QUIRK(0x4005, 0x4710, "MSI K7T266", VIA_DXS_SRC), { } /* terminator */ }; static int __devinit check_dxs_list(struct pci_dev *pci, int revision) { const struct snd_pci_quirk *w; w = snd_pci_quirk_lookup(pci, dxs_whitelist); if (w) { snd_printdd(KERN_INFO "via82xx: DXS white list for %s found\n", w->name); return w->value; } /* for newer revision, default to DXS_SRC */ if (revision >= VIA_REV_8235) return VIA_DXS_SRC; /* * not detected, try 48k rate only to be sure. */ printk(KERN_INFO "via82xx: Assuming DXS channels with 48k fixed sample rate.\n"); printk(KERN_INFO " Please try dxs_support=5 option\n"); printk(KERN_INFO " and report if it works on your machine.\n"); printk(KERN_INFO " For more details, read ALSA-Configuration.txt.\n"); return VIA_DXS_48K; }; static int __devinit snd_via82xx_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { struct snd_card *card; struct via82xx *chip; int chip_type = 0, card_type; unsigned int i; int err; err = snd_card_create(index, id, THIS_MODULE, 0, &card); if (err < 0) return err; card_type = pci_id->driver_data; switch (card_type) { case TYPE_CARD_VIA686: strcpy(card->driver, "VIA686A"); sprintf(card->shortname, "VIA 82C686A/B rev%x", pci->revision); chip_type = TYPE_VIA686; break; case TYPE_CARD_VIA8233: chip_type = TYPE_VIA8233; sprintf(card->shortname, "VIA 823x rev%x", pci->revision); for (i = 0; i < ARRAY_SIZE(via823x_cards); i++) { if (pci->revision == via823x_cards[i].revision) { chip_type = via823x_cards[i].type; strcpy(card->shortname, via823x_cards[i].name); break; } } if (chip_type != TYPE_VIA8233A) { if (dxs_support == VIA_DXS_AUTO) dxs_support = check_dxs_list(pci, pci->revision); /* force to use VIA8233 or 8233A model according to * dxs_support module option */ if (dxs_support == VIA_DXS_DISABLE) chip_type = TYPE_VIA8233A; else chip_type = TYPE_VIA8233; } if (chip_type == TYPE_VIA8233A) strcpy(card->driver, "VIA8233A"); else if (pci->revision >= VIA_REV_8237) strcpy(card->driver, "VIA8237"); /* no slog assignment */ else strcpy(card->driver, "VIA8233"); break; default: snd_printk(KERN_ERR "invalid card type %d\n", card_type); err = -EINVAL; goto __error; } if ((err = snd_via82xx_create(card, pci, chip_type, pci->revision, ac97_clock, &chip)) < 0) goto __error; card->private_data = chip; if ((err = snd_via82xx_mixer_new(chip, ac97_quirk)) < 0) goto __error; if (chip_type == TYPE_VIA686) { if ((err = snd_via686_pcm_new(chip)) < 0 || (err = snd_via686_init_misc(chip)) < 0) goto __error; } else { if (chip_type == TYPE_VIA8233A) { if ((err = snd_via8233a_pcm_new(chip)) < 0) goto __error; // chip->dxs_fixed = 1; /* FIXME: use 48k for DXS #3? */ } else { if ((err = snd_via8233_pcm_new(chip)) < 0) goto __error; if (dxs_support == VIA_DXS_48K) chip->dxs_fixed = 1; else if (dxs_support == VIA_DXS_NO_VRA) chip->no_vra = 1; else if (dxs_support == VIA_DXS_SRC) { chip->no_vra = 1; chip->dxs_src = 1; } } if ((err = snd_via8233_init_misc(chip)) < 0) goto __error; } /* disable interrupts */ for (i = 0; i < chip->num_devs; i++) snd_via82xx_channel_reset(chip, &chip->devs[i]); snprintf(card->longname, sizeof(card->longname), "%s with %s at %#lx, irq %d", card->shortname, snd_ac97_get_short_name(chip->ac97), chip->port, chip->irq); snd_via82xx_proc_init(chip); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } pci_set_drvdata(pci, card); return 0; __error: snd_card_free(card); return err; } static void __devexit snd_via82xx_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_via82xx_ids, .probe = snd_via82xx_probe, .remove = __devexit_p(snd_via82xx_remove), #ifdef CONFIG_PM .suspend = snd_via82xx_suspend, .resume = snd_via82xx_resume, #endif }; static int __init alsa_card_via82xx_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_via82xx_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_via82xx_init) module_exit(alsa_card_via82xx_exit)
gpl-2.0
Nico60/android_kernel_samsung_ks01lte
drivers/acpi/acpica/rsmemory.c
5149
7395
/******************************************************************************* * * Module Name: rsmem24 - Memory resource descriptors * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rsmemory") /******************************************************************************* * * acpi_rs_convert_memory24 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory24[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY24, ACPI_RS_SIZE(struct acpi_resource_memory24), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory24)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY24, sizeof(struct aml_resource_memory24), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory24.write_protect), AML_OFFSET(memory24.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE16, ACPI_RS_OFFSET(data.memory24.minimum), AML_OFFSET(memory24.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_MEMORY32, sizeof(struct aml_resource_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.memory32.write_protect), AML_OFFSET(memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Minimum Base Address * Maximum Base Address * Address Base Alignment * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.memory32.minimum), AML_OFFSET(memory32.minimum), 4} }; /******************************************************************************* * * acpi_rs_convert_fixed_memory32 * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_convert_fixed_memory32[4] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_FIXED_MEMORY32, ACPI_RS_SIZE(struct acpi_resource_fixed_memory32), ACPI_RSC_TABLE_SIZE(acpi_rs_convert_fixed_memory32)}, {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_FIXED_MEMORY32, sizeof(struct aml_resource_fixed_memory32), 0}, /* Read/Write bit */ {ACPI_RSC_1BITFLAG, ACPI_RS_OFFSET(data.fixed_memory32.write_protect), AML_OFFSET(fixed_memory32.flags), 0}, /* * These fields are contiguous in both the source and destination: * Base Address * Range Length */ {ACPI_RSC_MOVE32, ACPI_RS_OFFSET(data.fixed_memory32.address), AML_OFFSET(fixed_memory32.address), 2} }; /******************************************************************************* * * acpi_rs_get_vendor_small * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_small[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_small)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)} , /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0} }; /******************************************************************************* * * acpi_rs_get_vendor_large * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_get_vendor_large[3] = { {ACPI_RSC_INITGET, ACPI_RESOURCE_TYPE_VENDOR, ACPI_RS_SIZE(struct acpi_resource_vendor), ACPI_RSC_TABLE_SIZE(acpi_rs_get_vendor_large)}, /* Length of the vendor data (byte count) */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, sizeof(u8)} , /* Vendor data */ {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} }; /******************************************************************************* * * acpi_rs_set_vendor * ******************************************************************************/ struct acpi_rsconvert_info acpi_rs_set_vendor[7] = { /* Default is a small vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_SMALL, sizeof(struct aml_resource_small_header), ACPI_RSC_TABLE_SIZE(acpi_rs_set_vendor)}, /* Get the length and copy the data */ {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_small_header), 0}, /* * All done if the Vendor byte length is 7 or less, meaning that it will * fit within a small descriptor */ {ACPI_RSC_EXIT_LE, 0, 0, 7}, /* Must create a large vendor descriptor */ {ACPI_RSC_INITSET, ACPI_RESOURCE_NAME_VENDOR_LARGE, sizeof(struct aml_resource_large_header), 0}, {ACPI_RSC_COUNT16, ACPI_RS_OFFSET(data.vendor.byte_length), 0, 0}, {ACPI_RSC_MOVE8, ACPI_RS_OFFSET(data.vendor.byte_data[0]), sizeof(struct aml_resource_large_header), 0} };
gpl-2.0
Ander-Alvarez/ultracm13
arch/mips/loongson/common/gpio.c
8733
2863
/* * STLS2F GPIO Support * * Copyright (c) 2008 Richard Liu, STMicroelectronics <richard.liu@st.com> * Copyright (c) 2008-2010 Arnaud Patard <apatard@mandriva.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/err.h> #include <asm/types.h> #include <loongson.h> #include <linux/gpio.h> #define STLS2F_N_GPIO 4 #define STLS2F_GPIO_IN_OFFSET 16 static DEFINE_SPINLOCK(gpio_lock); int gpio_get_value(unsigned gpio) { u32 val; u32 mask; if (gpio >= STLS2F_N_GPIO) return __gpio_get_value(gpio); mask = 1 << (gpio + STLS2F_GPIO_IN_OFFSET); spin_lock(&gpio_lock); val = LOONGSON_GPIODATA; spin_unlock(&gpio_lock); return ((val & mask) != 0); } EXPORT_SYMBOL(gpio_get_value); void gpio_set_value(unsigned gpio, int state) { u32 val; u32 mask; if (gpio >= STLS2F_N_GPIO) { __gpio_set_value(gpio, state); return ; } mask = 1 << gpio; spin_lock(&gpio_lock); val = LOONGSON_GPIODATA; if (state) val |= mask; else val &= (~mask); LOONGSON_GPIODATA = val; spin_unlock(&gpio_lock); } EXPORT_SYMBOL(gpio_set_value); int gpio_cansleep(unsigned gpio) { if (gpio < STLS2F_N_GPIO) return 0; else return __gpio_cansleep(gpio); } EXPORT_SYMBOL(gpio_cansleep); static int ls2f_gpio_direction_input(struct gpio_chip *chip, unsigned gpio) { u32 temp; u32 mask; if (gpio >= STLS2F_N_GPIO) return -EINVAL; spin_lock(&gpio_lock); mask = 1 << gpio; temp = LOONGSON_GPIOIE; temp |= mask; LOONGSON_GPIOIE = temp; spin_unlock(&gpio_lock); return 0; } static int ls2f_gpio_direction_output(struct gpio_chip *chip, unsigned gpio, int level) { u32 temp; u32 mask; if (gpio >= STLS2F_N_GPIO) return -EINVAL; gpio_set_value(gpio, level); spin_lock(&gpio_lock); mask = 1 << gpio; temp = LOONGSON_GPIOIE; temp &= (~mask); LOONGSON_GPIOIE = temp; spin_unlock(&gpio_lock); return 0; } static int ls2f_gpio_get_value(struct gpio_chip *chip, unsigned gpio) { return gpio_get_value(gpio); } static void ls2f_gpio_set_value(struct gpio_chip *chip, unsigned gpio, int value) { gpio_set_value(gpio, value); } static struct gpio_chip ls2f_chip = { .label = "ls2f", .direction_input = ls2f_gpio_direction_input, .get = ls2f_gpio_get_value, .direction_output = ls2f_gpio_direction_output, .set = ls2f_gpio_set_value, .base = 0, .ngpio = STLS2F_N_GPIO, }; static int __init ls2f_gpio_setup(void) { return gpiochip_add(&ls2f_chip); } arch_initcall(ls2f_gpio_setup);
gpl-2.0
javilonas/Lonas_KL-GT-I9300-Sammy
arch/mips/alchemy/common/clocks.c
10269
3440
/* * BRIEF MODULE DESCRIPTION * Simple Au1xx0 clocks routines. * * Copyright 2001, 2008 MontaVista Software Inc. * Author: MontaVista Software, Inc. <source@mvista.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/spinlock.h> #include <asm/time.h> #include <asm/mach-au1x00/au1000.h> /* * I haven't found anyone that doesn't use a 12 MHz source clock, * but just in case..... */ #define AU1000_SRC_CLK 12000000 static unsigned int au1x00_clock; /* Hz */ static unsigned long uart_baud_base; /* * Set the au1000_clock */ void set_au1x00_speed(unsigned int new_freq) { au1x00_clock = new_freq; } unsigned int get_au1x00_speed(void) { return au1x00_clock; } EXPORT_SYMBOL(get_au1x00_speed); /* * The UART baud base is not known at compile time ... if * we want to be able to use the same code on different * speed CPUs. */ unsigned long get_au1x00_uart_baud_base(void) { return uart_baud_base; } void set_au1x00_uart_baud_base(unsigned long new_baud_base) { uart_baud_base = new_baud_base; } /* * We read the real processor speed from the PLL. This is important * because it is more accurate than computing it from the 32 KHz * counter, if it exists. If we don't have an accurate processor * speed, all of the peripherals that derive their clocks based on * this advertised speed will introduce error and sometimes not work * properly. This function is further convoluted to still allow configurations * to do that in case they have really, really old silicon with a * write-only PLL register. -- Dan */ unsigned long au1xxx_calc_clock(void) { unsigned long cpu_speed; /* * On early Au1000, sys_cpupll was write-only. Since these * silicon versions of Au1000 are not sold by AMD, we don't bend * over backwards trying to determine the frequency. */ if (au1xxx_cpu_has_pll_wo()) cpu_speed = 396000000; else cpu_speed = (au_readl(SYS_CPUPLL) & 0x0000003f) * AU1000_SRC_CLK; /* On Alchemy CPU:counter ratio is 1:1 */ mips_hpt_frequency = cpu_speed; /* Equation: Baudrate = CPU / (SD * 2 * CLKDIV * 16) */ set_au1x00_uart_baud_base(cpu_speed / (2 * ((int)(au_readl(SYS_POWERCTRL) & 0x03) + 2) * 16)); set_au1x00_speed(cpu_speed); return cpu_speed; }
gpl-2.0
olegfusion/IM-A830S_kernel
drivers/video/output.c
11549
3547
/* * output.c - Display Output Switch driver * * Copyright (C) 2006 Luming Yu <luming.yu@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #include <linux/module.h> #include <linux/video_output.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/ctype.h> MODULE_DESCRIPTION("Display Output Switcher Lowlevel Control Abstraction"); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Luming Yu <luming.yu@intel.com>"); static ssize_t video_output_show_state(struct device *dev, struct device_attribute *attr, char *buf) { ssize_t ret_size = 0; struct output_device *od = to_output_device(dev); if (od->props) ret_size = sprintf(buf,"%.8x\n",od->props->get_status(od)); return ret_size; } static ssize_t video_output_store_state(struct device *dev, struct device_attribute *attr, const char *buf,size_t count) { char *endp; struct output_device *od = to_output_device(dev); int request_state = simple_strtoul(buf,&endp,0); size_t size = endp - buf; if (isspace(*endp)) size++; if (size != count) return -EINVAL; if (od->props) { od->request_state = request_state; od->props->set_state(od); } return count; } static void video_output_release(struct device *dev) { struct output_device *od = to_output_device(dev); kfree(od); } static struct device_attribute video_output_attributes[] = { __ATTR(state, 0644, video_output_show_state, video_output_store_state), __ATTR_NULL, }; static struct class video_output_class = { .name = "video_output", .dev_release = video_output_release, .dev_attrs = video_output_attributes, }; struct output_device *video_output_register(const char *name, struct device *dev, void *devdata, struct output_properties *op) { struct output_device *new_dev; int ret_code = 0; new_dev = kzalloc(sizeof(struct output_device),GFP_KERNEL); if (!new_dev) { ret_code = -ENOMEM; goto error_return; } new_dev->props = op; new_dev->dev.class = &video_output_class; new_dev->dev.parent = dev; dev_set_name(&new_dev->dev, name); dev_set_drvdata(&new_dev->dev, devdata); ret_code = device_register(&new_dev->dev); if (ret_code) { kfree(new_dev); goto error_return; } return new_dev; error_return: return ERR_PTR(ret_code); } EXPORT_SYMBOL(video_output_register); void video_output_unregister(struct output_device *dev) { if (!dev) return; device_unregister(&dev->dev); } EXPORT_SYMBOL(video_output_unregister); static void __exit video_output_class_exit(void) { class_unregister(&video_output_class); } static int __init video_output_class_init(void) { return class_register(&video_output_class); } postcore_initcall(video_output_class_init); module_exit(video_output_class_exit);
gpl-2.0
jfvelte-dev/android_kernel_samsung_jf
drivers/char/agp/alpha-agp.c
12061
5489
#include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/agp_backend.h> #include <linux/mm.h> #include <linux/slab.h> #include <asm/machvec.h> #include <asm/agp_backend.h> #include "../../../arch/alpha/kernel/pci_impl.h" #include "agp.h" static int alpha_core_agp_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { alpha_agp_info *agp = agp_bridge->dev_private_data; dma_addr_t dma_addr; unsigned long pa; struct page *page; dma_addr = (unsigned long)vmf->virtual_address - vma->vm_start + agp->aperture.bus_base; pa = agp->ops->translate(agp, dma_addr); if (pa == (unsigned long)-EINVAL) return VM_FAULT_SIGBUS; /* no translation */ /* * Get the page, inc the use count, and return it */ page = virt_to_page(__va(pa)); get_page(page); vmf->page = page; return 0; } static struct aper_size_info_fixed alpha_core_agp_sizes[] = { { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ }; static const struct vm_operations_struct alpha_core_agp_vm_ops = { .fault = alpha_core_agp_vm_fault, }; static int alpha_core_agp_fetch_size(void) { return alpha_core_agp_sizes[0].size; } static int alpha_core_agp_configure(void) { alpha_agp_info *agp = agp_bridge->dev_private_data; agp_bridge->gart_bus_addr = agp->aperture.bus_base; return 0; } static void alpha_core_agp_cleanup(void) { alpha_agp_info *agp = agp_bridge->dev_private_data; agp->ops->cleanup(agp); } static void alpha_core_agp_tlbflush(struct agp_memory *mem) { alpha_agp_info *agp = agp_bridge->dev_private_data; alpha_mv.mv_pci_tbi(agp->hose, 0, -1); } static void alpha_core_agp_enable(struct agp_bridge_data *bridge, u32 mode) { alpha_agp_info *agp = bridge->dev_private_data; agp->mode.lw = agp_collect_device_status(bridge, mode, agp->capability.lw); agp->mode.bits.enable = 1; agp->ops->configure(agp); agp_device_command(agp->mode.lw, false); } static int alpha_core_agp_insert_memory(struct agp_memory *mem, off_t pg_start, int type) { alpha_agp_info *agp = agp_bridge->dev_private_data; int num_entries, status; void *temp; if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES) return -EINVAL; temp = agp_bridge->current_size; num_entries = A_SIZE_FIX(temp)->num_entries; if ((pg_start + mem->page_count) > num_entries) return -EINVAL; status = agp->ops->bind(agp, pg_start, mem); mb(); alpha_core_agp_tlbflush(mem); return status; } static int alpha_core_agp_remove_memory(struct agp_memory *mem, off_t pg_start, int type) { alpha_agp_info *agp = agp_bridge->dev_private_data; int status; status = agp->ops->unbind(agp, pg_start, mem); alpha_core_agp_tlbflush(mem); return status; } static int alpha_core_agp_create_free_gatt_table(struct agp_bridge_data *a) { return 0; } struct agp_bridge_driver alpha_core_agp_driver = { .owner = THIS_MODULE, .aperture_sizes = alpha_core_agp_sizes, .num_aperture_sizes = 1, .size_type = FIXED_APER_SIZE, .cant_use_aperture = true, .masks = NULL, .fetch_size = alpha_core_agp_fetch_size, .configure = alpha_core_agp_configure, .agp_enable = alpha_core_agp_enable, .cleanup = alpha_core_agp_cleanup, .tlb_flush = alpha_core_agp_tlbflush, .mask_memory = agp_generic_mask_memory, .cache_flush = global_cache_flush, .create_gatt_table = alpha_core_agp_create_free_gatt_table, .free_gatt_table = alpha_core_agp_create_free_gatt_table, .insert_memory = alpha_core_agp_insert_memory, .remove_memory = alpha_core_agp_remove_memory, .alloc_by_type = agp_generic_alloc_by_type, .free_by_type = agp_generic_free_by_type, .agp_alloc_page = agp_generic_alloc_page, .agp_alloc_pages = agp_generic_alloc_pages, .agp_destroy_page = agp_generic_destroy_page, .agp_destroy_pages = agp_generic_destroy_pages, .agp_type_to_mask_type = agp_generic_type_to_mask_type, }; struct agp_bridge_data *alpha_bridge; int __init alpha_core_agp_setup(void) { alpha_agp_info *agp = alpha_mv.agp_info(); struct pci_dev *pdev; /* faked */ struct aper_size_info_fixed *aper_size; if (!agp) return -ENODEV; if (agp->ops->setup(agp)) return -ENODEV; /* * Build the aperture size descriptor */ aper_size = alpha_core_agp_sizes; aper_size->size = agp->aperture.size / (1024 * 1024); aper_size->num_entries = agp->aperture.size / PAGE_SIZE; aper_size->page_order = __ffs(aper_size->num_entries / 1024); /* * Build a fake pci_dev struct */ pdev = alloc_pci_dev(); if (!pdev) return -ENOMEM; pdev->vendor = 0xffff; pdev->device = 0xffff; pdev->sysdata = agp->hose; alpha_bridge = agp_alloc_bridge(); if (!alpha_bridge) goto fail; alpha_bridge->driver = &alpha_core_agp_driver; alpha_bridge->vm_ops = &alpha_core_agp_vm_ops; alpha_bridge->current_size = aper_size; /* only 1 size */ alpha_bridge->dev_private_data = agp; alpha_bridge->dev = pdev; alpha_bridge->mode = agp->capability.lw; printk(KERN_INFO PFX "Detected AGP on hose %d\n", agp->hose->index); return agp_add_bridge(alpha_bridge); fail: kfree(pdev); return -ENOMEM; } static int __init agp_alpha_core_init(void) { if (agp_off) return -EINVAL; if (alpha_mv.agp_info) return alpha_core_agp_setup(); return -ENODEV; } static void __exit agp_alpha_core_cleanup(void) { agp_remove_bridge(alpha_bridge); agp_put_bridge(alpha_bridge); } module_init(agp_alpha_core_init); module_exit(agp_alpha_core_cleanup); MODULE_AUTHOR("Jeff Wiedemeier <Jeff.Wiedemeier@hp.com>"); MODULE_LICENSE("GPL and additional rights");
gpl-2.0
craigacgomez/kernel_samsung_manta
arch/blackfin/kernel/pseudodbg.c
13085
5423
/* The fake debug assert instructions * * Copyright 2010 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/ptrace.h> const char * const greg_names[] = { "R0", "R1", "R2", "R3", "R4", "R5", "R6", "R7", "P0", "P1", "P2", "P3", "P4", "P5", "SP", "FP", "I0", "I1", "I2", "I3", "M0", "M1", "M2", "M3", "B0", "B1", "B2", "B3", "L0", "L1", "L2", "L3", "A0.X", "A0.W", "A1.X", "A1.W", "<res>", "<res>", "ASTAT", "RETS", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "<res>", "LC0", "LT0", "LB0", "LC1", "LT1", "LB1", "CYCLES", "CYCLES2", "USP", "SEQSTAT", "SYSCFG", "RETI", "RETX", "RETN", "RETE", "EMUDAT", }; static const char *get_allreg_name(int grp, int reg) { return greg_names[(grp << 3) | reg]; } /* * Unfortunately, the pt_regs structure is not laid out the same way as the * hardware register file, so we need to do some fix ups. * * CYCLES is not stored in the pt_regs structure - so, we just read it from * the hardware. * * Don't support: * - All reserved registers * - All in group 7 are (supervisors only) */ static bool fix_up_reg(struct pt_regs *fp, long *value, int grp, int reg) { long *val = &fp->r0; unsigned long tmp; /* Only do Dregs and Pregs for now */ if (grp == 5 || (grp == 4 && (reg == 4 || reg == 5)) || (grp == 7)) return false; if (grp == 0 || (grp == 1 && reg < 6)) val -= (reg + 8 * grp); else if (grp == 1 && reg == 6) val = &fp->usp; else if (grp == 1 && reg == 7) val = &fp->fp; else if (grp == 2) { val = &fp->i0; val -= reg; } else if (grp == 3 && reg >= 4) { val = &fp->l0; val -= (reg - 4); } else if (grp == 3 && reg < 4) { val = &fp->b0; val -= reg; } else if (grp == 4 && reg < 4) { val = &fp->a0x; val -= reg; } else if (grp == 4 && reg == 6) val = &fp->astat; else if (grp == 4 && reg == 7) val = &fp->rets; else if (grp == 6 && reg < 6) { val = &fp->lc0; val -= reg; } else if (grp == 6 && reg == 6) { __asm__ __volatile__("%0 = cycles;\n" : "=d"(tmp)); val = &tmp; } else if (grp == 6 && reg == 7) { __asm__ __volatile__("%0 = cycles2;\n" : "=d"(tmp)); val = &tmp; } *value = *val; return true; } #define PseudoDbg_Assert_opcode 0xf0000000 #define PseudoDbg_Assert_expected_bits 0 #define PseudoDbg_Assert_expected_mask 0xffff #define PseudoDbg_Assert_regtest_bits 16 #define PseudoDbg_Assert_regtest_mask 0x7 #define PseudoDbg_Assert_grp_bits 19 #define PseudoDbg_Assert_grp_mask 0x7 #define PseudoDbg_Assert_dbgop_bits 22 #define PseudoDbg_Assert_dbgop_mask 0x3 #define PseudoDbg_Assert_dontcare_bits 24 #define PseudoDbg_Assert_dontcare_mask 0x7 #define PseudoDbg_Assert_code_bits 27 #define PseudoDbg_Assert_code_mask 0x1f /* * DBGA - debug assert */ bool execute_pseudodbg_assert(struct pt_regs *fp, unsigned int opcode) { int expected = ((opcode >> PseudoDbg_Assert_expected_bits) & PseudoDbg_Assert_expected_mask); int dbgop = ((opcode >> (PseudoDbg_Assert_dbgop_bits)) & PseudoDbg_Assert_dbgop_mask); int grp = ((opcode >> (PseudoDbg_Assert_grp_bits)) & PseudoDbg_Assert_grp_mask); int regtest = ((opcode >> (PseudoDbg_Assert_regtest_bits)) & PseudoDbg_Assert_regtest_mask); long value; if ((opcode & 0xFF000000) != PseudoDbg_Assert_opcode) return false; if (!fix_up_reg(fp, &value, grp, regtest)) return false; if (dbgop == 0 || dbgop == 2) { /* DBGA ( regs_lo , uimm16 ) */ /* DBGAL ( regs , uimm16 ) */ if (expected != (value & 0xFFFF)) { pr_notice("DBGA (%s.L,0x%x) failure, got 0x%x\n", get_allreg_name(grp, regtest), expected, (unsigned int)(value & 0xFFFF)); return false; } } else if (dbgop == 1 || dbgop == 3) { /* DBGA ( regs_hi , uimm16 ) */ /* DBGAH ( regs , uimm16 ) */ if (expected != ((value >> 16) & 0xFFFF)) { pr_notice("DBGA (%s.H,0x%x) failure, got 0x%x\n", get_allreg_name(grp, regtest), expected, (unsigned int)((value >> 16) & 0xFFFF)); return false; } } fp->pc += 4; return true; } #define PseudoDbg_opcode 0xf8000000 #define PseudoDbg_reg_bits 0 #define PseudoDbg_reg_mask 0x7 #define PseudoDbg_grp_bits 3 #define PseudoDbg_grp_mask 0x7 #define PseudoDbg_fn_bits 6 #define PseudoDbg_fn_mask 0x3 #define PseudoDbg_code_bits 8 #define PseudoDbg_code_mask 0xff /* * DBG - debug (dump a register value out) */ bool execute_pseudodbg(struct pt_regs *fp, unsigned int opcode) { int grp, fn, reg; long value, value1; if ((opcode & 0xFF000000) != PseudoDbg_opcode) return false; opcode >>= 16; grp = ((opcode >> PseudoDbg_grp_bits) & PseudoDbg_reg_mask); fn = ((opcode >> PseudoDbg_fn_bits) & PseudoDbg_fn_mask); reg = ((opcode >> PseudoDbg_reg_bits) & PseudoDbg_reg_mask); if (fn == 3 && (reg == 0 || reg == 1)) { if (!fix_up_reg(fp, &value, 4, 2 * reg)) return false; if (!fix_up_reg(fp, &value1, 4, 2 * reg + 1)) return false; pr_notice("DBG A%i = %02lx%08lx\n", reg, value & 0xFF, value1); fp->pc += 2; return true; } else if (fn == 0) { if (!fix_up_reg(fp, &value, grp, reg)) return false; pr_notice("DBG %s = %08lx\n", get_allreg_name(grp, reg), value); fp->pc += 2; return true; } return false; }
gpl-2.0
Trinityhaxxor/platform_kernel_msm8x60_stock
arch/sh/kernel/nmi_debug.c
13597
1649
/* * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/delay.h> #include <linux/kdebug.h> #include <linux/notifier.h> #include <linux/sched.h> #include <linux/hardirq.h> enum nmi_action { NMI_SHOW_STATE = 1 << 0, NMI_SHOW_REGS = 1 << 1, NMI_DIE = 1 << 2, NMI_DEBOUNCE = 1 << 3, }; static unsigned long nmi_actions; static int nmi_debug_notify(struct notifier_block *self, unsigned long val, void *data) { struct die_args *args = data; if (likely(val != DIE_NMI)) return NOTIFY_DONE; if (nmi_actions & NMI_SHOW_STATE) show_state(); if (nmi_actions & NMI_SHOW_REGS) show_regs(args->regs); if (nmi_actions & NMI_DEBOUNCE) mdelay(10); if (nmi_actions & NMI_DIE) return NOTIFY_BAD; return NOTIFY_OK; } static struct notifier_block nmi_debug_nb = { .notifier_call = nmi_debug_notify, }; static int __init nmi_debug_setup(char *str) { char *p, *sep; register_die_notifier(&nmi_debug_nb); if (*str != '=') return 0; for (p = str + 1; *p; p = sep + 1) { sep = strchr(p, ','); if (sep) *sep = 0; if (strcmp(p, "state") == 0) nmi_actions |= NMI_SHOW_STATE; else if (strcmp(p, "regs") == 0) nmi_actions |= NMI_SHOW_REGS; else if (strcmp(p, "debounce") == 0) nmi_actions |= NMI_DEBOUNCE; else if (strcmp(p, "die") == 0) nmi_actions |= NMI_DIE; else printk(KERN_WARNING "NMI: Unrecognized action `%s'\n", p); if (!sep) break; } return 0; } __setup("nmi_debug", nmi_debug_setup);
gpl-2.0
VanirAOSP/kernel_samsung_jf
arch/avr32/boards/atstk1000/atstk1004.c
13597
3756
/* * ATSTK1003 daughterboard-specific init code * * Copyright (C) 2007 Atmel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/clk.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/string.h> #include <linux/types.h> #include <linux/spi/at73c213.h> #include <linux/spi/spi.h> #include <linux/atmel-mci.h> #include <video/atmel_lcdc.h> #include <asm/setup.h> #include <mach/at32ap700x.h> #include <mach/board.h> #include <mach/init.h> #include <mach/portmux.h> #include "atstk1000.h" /* Oscillator frequencies. These are board specific */ unsigned long at32_board_osc_rates[3] = { [0] = 32768, /* 32.768 kHz on RTC osc */ [1] = 20000000, /* 20 MHz on osc0 */ [2] = 12000000, /* 12 MHz on osc1 */ }; #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC static struct at73c213_board_info at73c213_data = { .ssc_id = 0, .shortname = "AVR32 STK1000 external DAC", }; #endif #ifndef CONFIG_BOARD_ATSTK100X_SW1_CUSTOM static struct spi_board_info spi0_board_info[] __initdata = { #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC { /* AT73C213 */ .modalias = "at73c213", .max_speed_hz = 200000, .chip_select = 0, .mode = SPI_MODE_1, .platform_data = &at73c213_data, }, #endif { /* QVGA display */ .modalias = "ltv350qv", .max_speed_hz = 16000000, .chip_select = 1, .mode = SPI_MODE_3, }, }; #endif #ifdef CONFIG_BOARD_ATSTK100X_SPI1 static struct spi_board_info spi1_board_info[] __initdata = { { /* patch in custom entries here */ } }; #endif #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM static struct mci_platform_data __initdata mci0_data = { .slot[0] = { .bus_width = 4, .detect_pin = -ENODEV, .wp_pin = -ENODEV, }, }; #endif #ifdef CONFIG_BOARD_ATSTK1000_EXTDAC static void __init atstk1004_setup_extdac(void) { struct clk *gclk; struct clk *pll; gclk = clk_get(NULL, "gclk0"); if (IS_ERR(gclk)) goto err_gclk; pll = clk_get(NULL, "pll0"); if (IS_ERR(pll)) goto err_pll; if (clk_set_parent(gclk, pll)) { pr_debug("STK1000: failed to set pll0 as parent for DAC clock\n"); goto err_set_clk; } at32_select_periph(GPIO_PIOA_BASE, (1 << 30), GPIO_PERIPH_A, 0); at73c213_data.dac_clk = gclk; err_set_clk: clk_put(pll); err_pll: clk_put(gclk); err_gclk: return; } #else static void __init atstk1004_setup_extdac(void) { } #endif /* CONFIG_BOARD_ATSTK1000_EXTDAC */ void __init setup_board(void) { #ifdef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM at32_map_usart(0, 1, 0); /* USART 0/B: /dev/ttyS1, IRDA */ #else at32_map_usart(1, 0, 0); /* USART 1/A: /dev/ttyS0, DB9 */ #endif /* USART 2/unused: expansion connector */ at32_map_usart(3, 2, 0); /* USART 3/C: /dev/ttyS2, DB9 */ at32_setup_serial_console(0); } static int __init atstk1004_init(void) { #ifdef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM at32_add_device_usart(1); #else at32_add_device_usart(0); #endif at32_add_device_usart(2); #ifndef CONFIG_BOARD_ATSTK100X_SW1_CUSTOM at32_add_device_spi(0, spi0_board_info, ARRAY_SIZE(spi0_board_info)); #endif #ifdef CONFIG_BOARD_ATSTK100X_SPI1 at32_add_device_spi(1, spi1_board_info, ARRAY_SIZE(spi1_board_info)); #endif #ifndef CONFIG_BOARD_ATSTK100X_SW2_CUSTOM at32_add_device_mci(0, &mci0_data); #endif at32_add_device_lcdc(0, &atstk1000_lcdc_data, fbmem_start, fbmem_size, ATMEL_LCDC_PRI_24BIT | ATMEL_LCDC_PRI_CONTROL); at32_add_device_usba(0, NULL); #ifndef CONFIG_BOARD_ATSTK100X_SW3_CUSTOM at32_add_device_ssc(0, ATMEL_SSC_TX); #endif atstk1000_setup_j2_leds(); atstk1004_setup_extdac(); return 0; } postcore_initcall(atstk1004_init);
gpl-2.0
djdeeles/android_kernel_lge_g3
sound/drivers/opl3/opl3_drums.c
14877
7192
/* * Copyright (c) by Uros Bizjak <uros@kss-loka.si> * * OPL2/OPL3/OPL4 FM routines for internal percussion channels * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "opl3_voice.h" extern char snd_opl3_regmap[MAX_OPL2_VOICES][4]; static char snd_opl3_drum_table[47] = { OPL3_BASSDRUM_ON, OPL3_BASSDRUM_ON, OPL3_HIHAT_ON, /* 35 - 37 */ OPL3_SNAREDRUM_ON, OPL3_HIHAT_ON, OPL3_SNAREDRUM_ON, /* 38 - 40 */ OPL3_BASSDRUM_ON, OPL3_HIHAT_ON, OPL3_BASSDRUM_ON, /* 41 - 43 */ OPL3_HIHAT_ON, OPL3_TOMTOM_ON, OPL3_HIHAT_ON, /* 44 - 46 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_CYMBAL_ON, /* 47 - 49 */ OPL3_TOMTOM_ON, OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, /* 50 - 52 */ OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, OPL3_CYMBAL_ON, /* 53 - 55 */ OPL3_HIHAT_ON, OPL3_CYMBAL_ON, OPL3_TOMTOM_ON, /* 56 - 58 */ OPL3_CYMBAL_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 59 - 61 */ OPL3_HIHAT_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 62 - 64 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 65 - 67 */ OPL3_TOMTOM_ON, OPL3_HIHAT_ON, OPL3_HIHAT_ON, /* 68 - 70 */ OPL3_HIHAT_ON, OPL3_HIHAT_ON, OPL3_TOMTOM_ON, /* 71 - 73 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 74 - 76 */ OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, OPL3_TOMTOM_ON, /* 77 - 79 */ OPL3_CYMBAL_ON, OPL3_CYMBAL_ON /* 80 - 81 */ }; struct snd_opl3_drum_voice { int voice; int op; unsigned char am_vib; unsigned char ksl_level; unsigned char attack_decay; unsigned char sustain_release; unsigned char feedback_connection; unsigned char wave_select; }; struct snd_opl3_drum_note { int voice; unsigned char fnum; unsigned char octave_f; unsigned char feedback_connection; }; static struct snd_opl3_drum_voice bass_op0 = {6, 0, 0x00, 0x32, 0xf8, 0x66, 0x30, 0x00}; static struct snd_opl3_drum_voice bass_op1 = {6, 1, 0x00, 0x03, 0xf6, 0x57, 0x30, 0x00}; static struct snd_opl3_drum_note bass_note = {6, 0x90, 0x09}; static struct snd_opl3_drum_voice hihat = {7, 0, 0x00, 0x03, 0xf0, 0x06, 0x20, 0x00}; static struct snd_opl3_drum_voice snare = {7, 1, 0x00, 0x03, 0xf0, 0x07, 0x20, 0x02}; static struct snd_opl3_drum_note snare_note = {7, 0xf4, 0x0d}; static struct snd_opl3_drum_voice tomtom = {8, 0, 0x02, 0x03, 0xf0, 0x06, 0x10, 0x00}; static struct snd_opl3_drum_note tomtom_note = {8, 0xf4, 0x09}; static struct snd_opl3_drum_voice cymbal = {8, 1, 0x04, 0x03, 0xf0, 0x06, 0x10, 0x00}; /* * set drum voice characteristics */ static void snd_opl3_drum_voice_set(struct snd_opl3 *opl3, struct snd_opl3_drum_voice *data) { unsigned char op_offset = snd_opl3_regmap[data->voice][data->op]; unsigned char voice_offset = data->voice; unsigned short opl3_reg; /* Set OPL3 AM_VIB register */ opl3_reg = OPL3_LEFT | (OPL3_REG_AM_VIB + op_offset); opl3->command(opl3, opl3_reg, data->am_vib); /* Set OPL3 KSL_LEVEL register */ opl3_reg = OPL3_LEFT | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, data->ksl_level); /* Set OPL3 ATTACK_DECAY register */ opl3_reg = OPL3_LEFT | (OPL3_REG_ATTACK_DECAY + op_offset); opl3->command(opl3, opl3_reg, data->attack_decay); /* Set OPL3 SUSTAIN_RELEASE register */ opl3_reg = OPL3_LEFT | (OPL3_REG_SUSTAIN_RELEASE + op_offset); opl3->command(opl3, opl3_reg, data->sustain_release); /* Set OPL3 FEEDBACK_CONNECTION register */ opl3_reg = OPL3_LEFT | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, data->feedback_connection); /* Select waveform */ opl3_reg = OPL3_LEFT | (OPL3_REG_WAVE_SELECT + op_offset); opl3->command(opl3, opl3_reg, data->wave_select); } /* * Set drum voice pitch */ static void snd_opl3_drum_note_set(struct snd_opl3 *opl3, struct snd_opl3_drum_note *data) { unsigned char voice_offset = data->voice; unsigned short opl3_reg; /* Set OPL3 FNUM_LOW register */ opl3_reg = OPL3_LEFT | (OPL3_REG_FNUM_LOW + voice_offset); opl3->command(opl3, opl3_reg, data->fnum); /* Set OPL3 KEYON_BLOCK register */ opl3_reg = OPL3_LEFT | (OPL3_REG_KEYON_BLOCK + voice_offset); opl3->command(opl3, opl3_reg, data->octave_f); } /* * Set drum voice volume and position */ static void snd_opl3_drum_vol_set(struct snd_opl3 *opl3, struct snd_opl3_drum_voice *data, int vel, struct snd_midi_channel *chan) { unsigned char op_offset = snd_opl3_regmap[data->voice][data->op]; unsigned char voice_offset = data->voice; unsigned char reg_val; unsigned short opl3_reg; /* Set OPL3 KSL_LEVEL register */ reg_val = data->ksl_level; snd_opl3_calc_volume(&reg_val, vel, chan); opl3_reg = OPL3_LEFT | (OPL3_REG_KSL_LEVEL + op_offset); opl3->command(opl3, opl3_reg, reg_val); /* Set OPL3 FEEDBACK_CONNECTION register */ /* Set output voice connection */ reg_val = data->feedback_connection | OPL3_STEREO_BITS; if (chan->gm_pan < 43) reg_val &= ~OPL3_VOICE_TO_RIGHT; if (chan->gm_pan > 85) reg_val &= ~OPL3_VOICE_TO_LEFT; opl3_reg = OPL3_LEFT | (OPL3_REG_FEEDBACK_CONNECTION + voice_offset); opl3->command(opl3, opl3_reg, reg_val); } /* * Loads drum voices at init time */ void snd_opl3_load_drums(struct snd_opl3 *opl3) { snd_opl3_drum_voice_set(opl3, &bass_op0); snd_opl3_drum_voice_set(opl3, &bass_op1); snd_opl3_drum_note_set(opl3, &bass_note); snd_opl3_drum_voice_set(opl3, &hihat); snd_opl3_drum_voice_set(opl3, &snare); snd_opl3_drum_note_set(opl3, &snare_note); snd_opl3_drum_voice_set(opl3, &tomtom); snd_opl3_drum_note_set(opl3, &tomtom_note); snd_opl3_drum_voice_set(opl3, &cymbal); } /* * Switch drum voice on or off */ void snd_opl3_drum_switch(struct snd_opl3 *opl3, int note, int vel, int on_off, struct snd_midi_channel *chan) { unsigned char drum_mask; struct snd_opl3_drum_voice *drum_voice; if (!(opl3->drum_reg & OPL3_PERCUSSION_ENABLE)) return; if ((note < 35) || (note > 81)) return; drum_mask = snd_opl3_drum_table[note - 35]; if (on_off) { switch (drum_mask) { case OPL3_BASSDRUM_ON: drum_voice = &bass_op1; break; case OPL3_HIHAT_ON: drum_voice = &hihat; break; case OPL3_SNAREDRUM_ON: drum_voice = &snare; break; case OPL3_TOMTOM_ON: drum_voice = &tomtom; break; case OPL3_CYMBAL_ON: drum_voice = &cymbal; break; default: drum_voice = &tomtom; } snd_opl3_drum_vol_set(opl3, drum_voice, vel, chan); opl3->drum_reg |= drum_mask; } else { opl3->drum_reg &= ~drum_mask; } opl3->command(opl3, OPL3_LEFT | OPL3_REG_PERCUSSION, opl3->drum_reg); }
gpl-2.0
Split-Screen/android_kernel_samsung_galaxys2plus-common
arch/arm/kernel/irq.c
30
4977
/* * linux/arch/arm/kernel/irq.c * * Copyright (C) 1992 Linus Torvalds * Modifications for ARM processor Copyright (C) 1995-2000 Russell King. * * Support for Dynamic Tick Timer Copyright (C) 2004-2005 Nokia Corporation. * Dynamic Tick Timer written by Tony Lindgren <tony@atomide.com> and * Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains the code used by various IRQ handling routines: * asking for different IRQ's should be done through these routines * instead of just grabbing them. Thus setups with different IRQ numbers * shouldn't result in any weird surprises, and installing new handlers * should be easier. * * IRQ's are in fact implemented a bit like signal handlers for the kernel. * Naturally it's not a 1:1 relation, but there are similarities. */ #include <linux/kernel_stat.h> #include <linux/module.h> #include <linux/signal.h> #include <linux/ioport.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/random.h> #include <linux/smp.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/kallsyms.h> #include <linux/proc_fs.h> #include <linux/ftrace.h> #include <asm/system.h> #include <asm/mach/arch.h> #include <asm/mach/irq.h> #include <asm/mach/time.h> #ifdef CONFIG_BCM_KNLLOG_IRQ #include <linux/broadcom/knllog.h> #endif /* * No architecture-specific irq_finish function defined in arm/arch/irqs.h. */ #ifndef irq_finish #define irq_finish(irq) do { } while (0) #endif unsigned long irq_err_count; int arch_show_interrupts(struct seq_file *p, int prec) { #ifdef CONFIG_FIQ show_fiq_list(p, prec); #endif #ifdef CONFIG_SMP show_ipi_list(p, prec); #endif #ifdef CONFIG_LOCAL_TIMERS show_local_irqs(p, prec); #endif seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count); return 0; } /* * do_IRQ handles all hardware IRQ's. Decoded IRQs should not * come via this function. Instead, they should provide their * own 'handler' */ asmlinkage void __exception_irq_entry asm_do_IRQ(unsigned int irq, struct pt_regs *regs) { struct pt_regs *old_regs = set_irq_regs(regs); #ifdef CONFIG_BCM_KNLLOG_IRQ struct irq_desc *desc = irq_desc + irq; #endif void dpm_log_irq(u32 irq); dpm_log_irq(irq); irq_enter(); /* * Some hardware gives randomly wrong interrupts. Rather * than crashing, do something sensible. */ if (unlikely(irq >= nr_irqs)) { if (printk_ratelimit()) printk(KERN_WARNING "Bad IRQ%u\n", irq); ack_bad_irq(irq); } else { #ifdef CONFIG_BCM_KNLLOG_IRQ if (gKnllogIrqSchedEnable & KNLLOG_IRQ) KNLLOG("in [%d] (0x%x)\n", irq, (int)desc); #endif generic_handle_irq(irq); } #ifdef CONFIG_BCM_KNLLOG_IRQ if (gKnllogIrqSchedEnable & KNLLOG_IRQ) KNLLOG("out [%d] (0x%x)\n", irq, (int)desc); #endif /* AT91 specific workaround */ irq_finish(irq); irq_exit(); set_irq_regs(old_regs); } void set_irq_flags(unsigned int irq, unsigned int iflags) { unsigned long clr = 0, set = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; if (irq >= nr_irqs) { printk(KERN_ERR "Trying to set irq flags for IRQ%d\n", irq); return; } if (iflags & IRQF_VALID) clr |= IRQ_NOREQUEST; if (iflags & IRQF_PROBE) clr |= IRQ_NOPROBE; if (!(iflags & IRQF_NOAUTOEN)) clr |= IRQ_NOAUTOEN; /* Order is clear bits in "clr" then set bits in "set" */ irq_modify_status(irq, clr, set & ~clr); } void __init init_IRQ(void) { machine_desc->init_irq(); } #ifdef CONFIG_SPARSE_IRQ int __init arch_probe_nr_irqs(void) { nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS; return nr_irqs; } #endif #ifdef CONFIG_HOTPLUG_CPU static bool migrate_one_irq(struct irq_data *d) { unsigned int cpu = cpumask_any_and(d->affinity, cpu_online_mask); bool ret = false; if (cpu >= nr_cpu_ids) { cpu = cpumask_any(cpu_online_mask); ret = true; } pr_debug("IRQ%u: moving from cpu%u to cpu%u\n", d->irq, d->node, cpu); d->chip->irq_set_affinity(d, cpumask_of(cpu), true); return ret; } /* * The CPU has been marked offline. Migrate IRQs off this CPU. If * the affinity settings do not allow other CPUs, force them onto any * available CPU. */ void migrate_irqs(void) { unsigned int i, cpu = smp_processor_id(); struct irq_desc *desc; unsigned long flags; local_irq_save(flags); for_each_irq_desc(i, desc) { struct irq_data *d = &desc->irq_data; bool affinity_broken = false; raw_spin_lock(&desc->lock); do { if (desc->action == NULL) break; if (d->node != cpu) break; affinity_broken = migrate_one_irq(d); } while (0); raw_spin_unlock(&desc->lock); if (affinity_broken && printk_ratelimit()) pr_warning("IRQ%u no longer affine to CPU%u\n", i, cpu); } local_irq_restore(flags); } #endif /* CONFIG_HOTPLUG_CPU */
gpl-2.0
RadioFreeAsia/RDacity
lib-src/libnyquist/nyquist/cmt/midifns.c
30
51014
/***************************************************************************** * midifns.c * Copyright 1989 Carnegie Mellon University * Date | Change *-----------+----------------------------------------------------------------- * 29-Mar-88 | Created from IBM PC version of mpu.c * | Added settime() * 02-May-88 | AMIGA 2000 version. portable version. * 12-Oct-88 | JCD : Exclusive AMIGA Version. * 13-Apr-89 | JCD : New portable version. * 19-Apr-89 | JCD : Amiga CAMD Version added. * 5-Apr-91 | JDW : Further modification * 17-Feb-92 | GWL : incorporate JMN's new mpu.c * 8-Jun-92 | JDZ : add support for ITC midi interface * 16-Dec-92 | RBD : replace JMN's mpu.c with LMS's mpu.c * 11-Mar-94 | PLu : port to IRIX * 25-Apr-97 | RBD : it looks like SGI changed their interface. I * | made it compile again, but MIDI does not work, so * | took out calls to actually send/recv MIDI data * 28-Apr-03 | DM : Renamed random -> cmtrand, true->TRUE, false->FALSE * | Use features rather than system names in #ifdef's *****************************************************************************/ #include "switches.h" #ifdef UNIX #include <sys/resource.h> #include <sys/param.h> #ifndef OPEN_MAX /* this is here for compiling the UNIX version under AIX. This is a BSDism */ #define OPEN_MAX 2000 #endif /* OPEN_MAX */ #endif /* UNIX */ #ifdef UNIX_MACH #include "machmidi.h" #endif #ifdef AMIGA #ifdef AZTEC #include "functions.h" #endif /* AZTEC */ #include "midi/camd.h" #include "clib/camd_protos.h" /* note: azt_camd_pragmas.h was produced by running MAPFD on the * lib/camd_lib.fd file included in the CAMD disk from Commodore. * The "CamdClient" calls are manually removed from */ #ifdef AZTEC #include "pragmas/azt_camd_pragmas.h" #else /* !AZTEC */ #include "pragmas/lat_camd_pragmas.h" #endif /* AZTEC */ #include "camdmidi.h" #include "ctype.h" #endif /* AMIGA */ #ifdef UNIX_IRIX /* #define UNIX_IRIX_MIDIFNS -- this would enable actual midi I/O * if the actual midi I/O code worked */ /* IRIX changed the MIDI interface, * retain this for older systems: */ #ifdef UNIX_IRIX_MIDIFNS #include <dmedia/midi.h> #endif #endif #include "stdio.h" #include "cext.h" #include "midicode.h" #include "cmdline.h" #include "pitch.h" #include "midifns.h" #include "userio.h" #include "string.h" #ifdef MACINTOSH_OR_DOS #ifndef WINDOWS #include "midibuff.h" #endif #endif #ifdef UNIX_ITC /* was ITC */ #include "sys/param.h" /* since boolean is defined, block its definition in midistruct.h. * CMT defines boolean as ushort, but midistruct.h uses int. * This is not a problem on RS/6000s, but beware! */ /* the following would be included if we had the BSD switch set. I think we should try to avoid BSDisms; when fixed, the following should be removed */ #define NBBY 8 #include "sys/select.h" /* defines fd_set */ #define MIDI_HAS_BOOLEAN #include "midistruct.h" #include "cmtio.h" #endif /* UNIX_ITC */ #ifdef DOS #ifndef WINDOWS #include "timer.h" #include "mpu.h" #endif /* ifndef WINDOWS */ #endif /* ifdef DOS */ #ifndef BREAKTEST #define BREAKTEST #endif #ifdef __APPLE__ #include <sys/types.h> #include <sys/time.h> #include <errno.h> #else #ifdef UNIX #ifndef UNIX_IRIX #include "sys/time.h" #include "sys/timeb.h" #include "cmtio.h" #else #include <sys/types.h> #include <sys/time.h> #include <errno.h> #ifdef UNIX_IRIX_MIDIFNS #include <midi.h> #include <midiio.h> #endif /* UNIX_IRIX_MIDIFNS */ #endif /* UNIX_IRIX */ #endif /* UNIX */ #endif /* __APPLE__ */ #ifdef ITC static int ignore_realtime = 0; #endif /* ITC */ #ifdef MACINTOSH /* added for ThinkC 7: */ #include <OSUtils.h> /* port numbers are in the range 0..MAX_PORTS-1 */ #define CHANNELS_PER_PORT 16 #define MAX_PORTS ((MAX_CHANNELS + CHANNELS_PER_PORT - 1) / CHANNELS_PER_PORT) /* here are some MIDIMGR specific definitions */ #ifdef MIDIMGR #include "MIDI.h" #include "midimgr.h" #define TICKS_TO_MS(t) t #define MS_TO_TICKS(t) t #else /* here are some non-MIDIMGR definitions for the Mac */ /**************************************************************************** * * DMH: constants from macmidi.c * ****************************************************************************/ /* the modem port, also called port A */ #define portA 0 /* the printer port, also called port B */ #define portB 1 /* a tick is 1/60 of a second * * the following tables and routines are used to convert * between ticks and milliseconds */ #define TICKS_TO_MS(t) (((t) * 50) / 3) #define MS_TO_TICKS(t) (((t) * 3) / 50) #endif /* def MIDIMGR */ #endif /* def MACINTOSH */ #ifdef WINDOWS #define huge #endif /**************************************************************************** * * exported flags * ****************************************************************************/ boolean miditrace = FALSE; /* enables printed trace of MIDI output */ boolean musictrace = FALSE; /* enables printed trace of commands */ #ifdef MACINTOSH_OR_DOS boolean ctrlFilter = TRUE; /* suppress continuous controller data */ boolean exclFilter = TRUE; /* suppress exclusive messages */ boolean realFilter = TRUE; /* suppress realtime messages */ #endif /**************************************************************************** * * exported variables * ****************************************************************************/ public int keyloud; /* set to velocity of last getkey event */ /* public long error; */ public short midi_error_flags = 0; /* The following midifns_syntax lists command line switches and options. Since these are machine dependent, use conditional compilation. Conditional compilation within a string is a bit tricky: you want to write "\" for line continuation within the string, but "\" gets eaten by the macro preprocessor. That's why we define macros like AMIGAINPORT. Regretably it doesn't work for all compilers. */ /* Lattice and RT/Unix aren't happy expanding the embedded macros below, so I made a separate declaration of midifns_syntax for Unix */ #ifdef UNIX public char *midifns_syntax = "block<s>Turn off midi THRU;\ miditrace<s>Trace low-level midi functions;\ noalloff<s>Do not send alloff message when done;\ trace<s>Trace music operations;\ tune<o>Load a tuning file"; #else #ifdef MACINTOSH #ifdef MIDIMGR public char *midifns_syntax = "miditrace<s>Trace low-level midi functions;\ noalloff<s>Do not send alloff message when done;\ patch<s>Remember/reuse Midi Mgr patches;\ trace<s>Trace music operations;\ keep<s>Keep other processes running;\ tune<o>Load a tuning file"; #else /* no MIDIMGR */ public char *midifns_syntax = "miditrace<s>Trace low-level midi functions;\ noalloff<s>Do not send alloff message when done;\ patch<s>Remember/reuse Midi Mgr patches;\ trace<s>Trace music operations;\ tune<o>Load a tuning file"; #endif /* MIDIMGR */ #else #ifdef AMIGA public char *midifns_syntax = "block<s>Turn off midi THRU;\ inport<o>Inpur port number;\ miditrace<s>Trace low-level midi functions;\ noalloff<s>Do not send alloff message when done;\ outport<o>Output port number;\ trace<s>Trace music operations;\ tune<o>Load a tuning file"; #else /* not UNIX or MACINTOSH or MIDIMGR or AMIGA */ #ifdef DOS public char *midifns_syntax = "miditrace<s>Trace low-level midi functions;\ noalloff<s>Do not send alloff message when done;\ trace<s>Trace music operations;\ tune<o>Load a tuning file"; #endif /* DOS */ #endif /* AMIGA */ #endif /* MACINTOSH */ #endif /* UNIX */ #ifdef MACINTOSH boolean do_midi_thru = FALSE; /* exported: copy midi in to midi out */ #endif /**************************************************************************** * * local module variables * ****************************************************************************/ private int initialized = FALSE; /* set by musicinit, cleared by musicterm */ private boolean tune_flag = FALSE; /* set by musicinit, never cleared */ #ifdef DOS private boolean metroflag = FALSE; /* flag to turn on metronome */ #endif private int user_scale = FALSE; /* TRUE if user-defined scale */ private int bend[MAX_CHANNELS]; /* current pitch bend on channel */ short cur_midi_prgm[MAX_CHANNELS]; private pitch_table pit_tab[128]; /* scale definition */ #ifdef DOS private ulong timeoffset = 0; public boolean exclerr = FALSE; public byte xcodemask; /* mask (00 or FF) */ public byte xcode; /* mfr code */ #endif #ifdef MACINTOSH_OR_DOS boolean sysex_pending = FALSE; #endif #ifdef AMIGA #define CONTCONT ((CMF_Ctrl & ~CMF_CtrlSwitch) | CMF_PitchBend | \ CMF_ChanPress) #endif /* def AMIGA */ #ifdef UNIX private ulong timeoffset = 0; #endif #ifdef UNIX_IRIX_MIDIFNS static MIport *miport; static int ignore_realtime = 0; private byte *sysex_p; private int sysex_n; #endif #ifdef ITC mi_id midiconn; #endif #ifdef MACINTOSH private ulong ticksAtStart = 0L; /* clock ticks at time of last musicinit or timereset * ASSUME: tick clock never wraps. this is a good assumption, since * the tick clock is set to zero when the power is turned on and the * tick counter is 32 bits. the Macintosh would need to be on for * 828.5 days for the tick counter to wrap around! */ #endif /* def MACINTOSH */ /**************************************************************************** * * functions declared in this module * ****************************************************************************/ private void fixup(); private void midi_init(); extern boolean check_ascii(); /*userio.c*/ private void musicterm(); /**************************************************************************** * alloff * Inputs: * none * Effect: * Sends MIDI all notes off command on every channel. ****************************************************************************/ #define ALL_NOTES_OFF 0x7B /*DMH: from macmidi.c*/ void alloff() { int c; if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"alloff()\n"); for (c = 1; c <= MAX_CHANNELS; c++) { midi_write(3, MIDI_PORT(c), (byte) (0xb0 | MIDI_CHANNEL(c)), ALL_NOTES_OFF, 0); } } /*************************************************************** * eventwait * * Input : wakeup time, -1 means forever * Output : none * Return: none * Effect: waits until ascii or midi input or timeout ***************************************************************/ #ifdef UNIX_ITC void eventwait(timeout) long timeout; { struct timeval unix_timeout; struct timeval *waitspec = NULL; fd_set readfds; struct rlimit file_limit; FD_ZERO(&readfds); FD_SET(MI_CONNECTION(midiconn), &readfds); FD_SET(fileno(stdin), &readfds); if (timeout >= 0) { timeout -= gettime(); /* convert to millisecond delay */ unix_timeout.tv_sec = timeout / 1000; /* remainder become microsecs: */ unix_timeout.tv_usec = (timeout - (unix_timeout.tv_sec * 1000)) * 1000; waitspec = &unix_timeout; } getrlimit(RLIMIT_NOFILE, &file_limit); select(file_limit.rlim_max+1, &readfds, 0, 0, waitspec); return; } #else /* !UNIX_ITC */ #ifdef UNIX /* see machmidi.c for UNIX_MACH implementation */ #ifndef UNIX_MACH #ifdef UNIX_IRIX_MIDIFNS void eventwait(timeout) long timeout; { struct timeval unix_timeout; struct timeval *waitspec = NULL; fd_set readfds; FD_ZERO(&readfds); FD_SET(mdGetFd(miport), &readfds); FD_SET(fileno(stdin), &readfds); if (timeout >= 0) { timeout -= gettime(); /* convert to millisecond delay */ unix_timeout.tv_sec = timeout / 1000; /* remainder become microsecs: */ unix_timeout.tv_usec = (timeout - (unix_timeout.tv_sec * 1000)) * 1000; waitspec = &unix_timeout; } select(FD_SETSIZE, &readfds, 0, 0, waitspec); return; } #else #ifdef BUFFERED_SYNCHRONOUS_INPUT void eventwait(timeout) long timeout; { struct timeval unix_timeout; struct timeval *waitspec = NULL; struct rlimit file_limit; if (timeout >= 0) { timeout -= gettime(); /* convert to millisecond delay */ unix_timeout.tv_sec = timeout / 1000; /* remainder become microsecs: */ unix_timeout.tv_usec = (timeout - (unix_timeout.tv_sec * 1000)) * 1000; waitspec = &unix_timeout; getrlimit(RLIMIT_NOFILE, &file_limit); select(file_limit.rlim_max+1, 0, 0, 0, waitspec); } else { int c = getc(stdin); ungetc(c, stdin); } return; } #else void eventwait(timeout) long timeout; { struct timeval unix_timeout; struct timeval *waitspec = NULL; int readfds = 1 << IOinputfd; struct rlimit file_limit; if (timeout >= 0) { timeout -= gettime(); /* convert to millisecond delay */ unix_timeout.tv_sec = timeout / 1000; /* remainder become microsecs: */ unix_timeout.tv_usec = (timeout - (unix_timeout.tv_sec * 1000)) * 1000; waitspec = &unix_timeout; } getrlimit(RLIMIT_NOFILE, &file_limit); select(file_limit.rlim_max+1, &readfds, 0, 0, waitspec); return; } #endif /* BUFFERED_SYNCHRONOUS_INPUT */ #endif /* UNIX_IRIX */ #endif /* UNIX_MACH */ #endif /* UNIX */ /* I wanted to put an else here, but this confused a Unix C compiler */ #endif /* UNIX_ITC */ #ifdef AMIGA /* see camdmidi.c for Amiga implementation */ #else #ifndef UNIX /* since I couldn't use an else above, have to check UNIX here */ #ifdef WINDOWS void eventwait(timeout) long timeout; { if (timeout >= 0) { gprintf(TRANS, "eventwait: not implemented\n"); return; } else { int c = getc(stdin); ungetc(c, stdin); } return; } #else void eventwait(timeout) long timeout; { while (timeout > gettime() || timeout == -1) { if (check_ascii() || check_midi()) return; } } #endif /* WINDOWS */ #endif /* UNIX */ #endif /* AMIGA */ /**************************************************************************** * exclusive * Inputs: * boolean onflag -- set to TRUE to receive midi exclusive data * Effect: * Tells module to read exclusive messages into buffer ****************************************************************************/ void exclusive(boolean onflag) { if (!initialized) fixup(); if (musictrace) gprintf(TRANS, "exclusive: %d\n", onflag); #ifdef AMIGA if (onflag) SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter | CMF_SysEx, cmt_mi->ChanFilter); else SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter & ~CMF_SysEx, cmt_mi->ChanFilter); #endif #ifdef MACINTOSH_OR_DOS exclFilter = !onflag; #endif } /**************************************************************************** * fixup * Effect: * Print error message and call musicinit ****************************************************************************/ private void fixup() { gprintf(ERROR, "You forgot to call musicinit. I'll do it for you.\n"); musicinit(); } #ifdef UNIX_IRIX_MIDIFNS private void flush_sysex(void); #endif long get_excl(byte *buffer, long len) { long ret = 0; #ifdef UNIX_IRIX_MIDIFNS byte *sxp = sysex_p; long l = len; #endif #ifdef UNIX_ITC /* was ITC */ ret = mi_getx(midiconn, FALSE, len, (char *) buffer); #endif #ifdef UNIX_MACH ret = mi_getx(midiconn, FALSE, len, (unsigned char *)buffer); #endif #ifdef UNIX_IRIX_MIDIFNS if (!sysex_p) return 0; if (len > sysex_n) len = sysex_n; while (l--) { *buffer = *(sxp++); if (*(buffer++) == CMT_MIDI_EOX) { flush_sysex(); break; } } ret = len - l - 1; #endif #ifdef AMIGA ret = GetSysEx(cmt_mi, (UBYTE *) buffer, len); AMIGA_ERROR_CHECK; #endif #ifdef MACINTOSH_OR_DOS #ifndef WINDOWS /* I'm not sure the following line is a good thing: it forces the * caller to wait until a full sysex message is received and the * 1st 4 bytes are fetched via getbuf() before a sysex message can * be read via get_excl(). Without this, both mm.c and exget.c * were fetching ahead and getting out of sync with getbuf(). I * fixed mm.c and exget.c to work (by checking for EOX), but I added * this line (which should never have any effect) just to make the * DOS interface behave more like the Amiga and Mac interfaces. The * drawback is that you can't fetch bytes until the EOX is seen, * because nothing goes into the getbuf() buffer until then. */ if (!sysex_pending) return 0; while (len-- && (xbufhead != xbuftail)) { *buffer = xbuff[xbufhead++]; ret++; if (*buffer == MIDI_EOX) { sysex_pending = FALSE; break; } buffer++; xbufhead &= xbufmask; } #endif #endif return ret; } /**************************************************************************** * getbuf * Inputs: * boolean waitflag: TRUE if routine should wait for data * byte * p: Pointer to data destination * Result: boolean * TRUE if data was written to *p * FALSE if data not written to *p * Effect: * copies data from buffer to *p * will wait for buffer to become nonempty if waitflag is TRUE * * Modified 24 May 1988 for AMIGA (JCD) ****************************************************************************/ #ifdef UNIX_IRIX_MIDIFNS private void setup_sysex(MDevent *event, u_char *buffer); #endif /* UNIX_IRIX */ boolean getbuf(boolean waitflag, unsigned char * p) { #ifdef UNIX_IRIX_MIDIFNS MDevent event; int ret; #endif /* UNIX_IRIX */ if (!initialized) fixup(); #ifdef UNIX #ifdef UNIX_IRIX_MIDIFNS /* current IRIX version ignores the waitflag (it never waits) */ if (sysex_p) flush_sysex(); if (ignore_realtime == 0) { ret = mdReceive(miport, &event, 1); if (ret) { if (event.msg[0] != 0xF0) { *((u_long*) p) = *((u_long*) event.msg); } else { setup_sysex(&event, p); } } return ret; } else { do /* skip realtime messages */ { ret = mdReceive(miport, &event, 1); if (ret == -1) return ret; } while (event.msg[0] == 0xf8); if (event.msg[0] != 0xF0) { *((u_long*) p) = *((u_long*) event.msg); } else { setup_sysex(&event, p); } return ret; } #endif /* UNIX_IRIX */ #ifdef UNIX_ITC if (ignore_realtime == 0) { return(mi_get(midiconn, waitflag, (char *) p)); } else { boolean ret=false; /* filter out realtime msgs */ do { ret = mi_get(midiconn, waitflag, (char *) p); if (ret == FALSE) return(ret); } while(p[0] == 0xf8); return(ret); } #else /* UNIX_ITC */ #ifndef UNIX_IRIX if (waitflag) { gprintf(ERROR, "getbuf called with waitflag!"); EXIT(1); } return FALSE; #endif /* UNIX_IRIX */ #endif /* UNIX_ITC */ #endif /* UNIX */ #ifdef MACINTOSH_OR_DOS #ifndef WINDOWS if (sysex_pending) { /* flush sysex to keep buffers in sync */ while (xbuff[xbufhead++] != MIDI_EOX) { xbufhead &= xbufmask; if (xbufhead == xbuftail) break; } sysex_pending = FALSE; } if (waitflag) while (buffhead == bufftail) /* wait */ ; else if (buffhead == bufftail) return(false); *(long *)p = *(long *)(((char *)buff)+buffhead); buffhead = (buffhead + 4) & BUFF_MASK; if (*p == MIDI_SYSEX) { /* if sys-ex, remember to fetch from xbuff */ sysex_pending = TRUE; } return(true); #else return FALSE; #endif /* WINDOWS */ #endif /* MACINTOSH_OR_DOS */ #ifdef AMIGA if (waitflag) { do { WaitMidi(cmt_mi, &cmt_msg); AMIGA_ERROR_CHECK; } while (amigaerrflags); } else { AMIGA_ERROR_CHECK; if (!GetMidi(cmt_mi, &cmt_msg)) return(false); } *(long *)p = *(long *)&cmt_msg; clearmsg(cmt_msg); return(true); #endif /* AMIGA */ } #ifdef UNIX_IRIX_MIDIFNS private void setup_sysex(MDevent *event, u_char *buffer) /* N.B. do not leak memory remember to call free(sysex_p) */ { u_char *sxp = (u_char *) event->sysexmsg; int i; for (i=0;i<4;i++) *(buffer++) = *(sxp++); sysex_p = event->sysexmsg; sysex_n = event->msglen; } private void flush_sysex() { mdFree(sysex_p); sysex_p = 0; sysex_n = 0; } #endif #ifdef MACINTOSH_OR_DOS #ifndef WINDOWS public boolean check_midi() { if (buffhead == bufftail) return FALSE; else return TRUE; } #endif #endif /**************************************************************************** * getkey * Inputs: * boolean waitflag: TRUE if wait until key depression, FALSE if * return immediately * Result: int * key number of key which has been depressed * It returns -1 if waitflag is FALSE and no key has been pressed * If waitflag is TRUE this routine will block until a key is pressed * Effect: * reads a key ****************************************************************************/ /*DMH: in previous version, macmidi.c subtracted 12 from msg to get key at each occurence...*/ short getkey(boolean waitflag) { byte msg[4]; short k; if (!initialized) fixup(); while (TRUE) { /* process data until you find a note */ /* look for data and exit if none found */ /* NOTE: waitflag will force waiting until data arrives */ if (!getbuf(waitflag, msg)) { /* nothing there */ k = -1; break; } else if ((msg[0] & MIDI_CODE_MASK) == MIDI_ON_NOTE) { if (msg[2] == 0) { /* velocity 0 -> note off */ keyloud = 0; k = msg[1] + 128; } else { keyloud = msg[2]; k = msg[1]; } break; } else if ((msg[0] & MIDI_CODE_MASK) == MIDI_OFF_NOTE) { keyloud = 0; k = msg[1] + 128; break; } } if (musictrace) { if (k != -1) gprintf(TRANS,"getkey got %d\n", k); } return k; } /**************************************************************************** * gettime * Result: ulong * current timestamp since the last call to * musicinit or timereset * Effect: * fakes it ****************************************************************************/ ulong gettime() /*DMH: ulong is from mpu->midifns conversion, for Mac*/ { #if HAS_GETTIMEOFDAY struct timeval timeval; #endif #if HAS_FTIME struct timeb ftime_res; #endif register ulong ticks = 0L; BREAKTEST /* abort if user typed Ctrl Break */ if (!initialized) fixup(); #ifdef MACINTOSH #ifdef MIDIMGR ticks = MIDIGetCurTime(OutputRefNum) - ticksAtStart; #else ticks = TickCount() - ticksAtStart; #endif if (initialized) abort_check(); /* give user a chance to abort */ ticks = TICKS_TO_MS(ticks); #endif #ifdef AMIGA ticks = (*camdtime - timeoffset) << 1; /* return milliseconds */ #endif #ifdef DOS #ifndef WINDOWS ticks = elapsedtime(timeoffset, readtimer()); /* return milliseconds */ /* gprintf(TRANS, "currtime = %ld, timeoffset = %ld\n", currtime, timeoffset); */ #endif #endif /* ifdef DOS */ #if HAS_GETTIMEOFDAY gettimeofday(&timeval, 0); ticks = timeval.tv_sec * 1000 + timeval.tv_usec / 1000 - timeoffset; #endif #if HAS_FTIME ftime(&ftime_res); ticks = ((ftime_res.time - timeoffset) * 1000) + ftime_res.millitm; #endif /* if (miditrace) gprintf(TRANS, "."); */ return(ticks); } /**************************************************************************** * l_rest * Inputs: * long time: Amount of time to rest * Effect: * Waits until the amount of time specified has lapsed ****************************************************************************/ void l_rest(time) long time; { if (!initialized) fixup(); l_restuntil(time + gettime()); } /**************************************************************************** * l_restuntil * Inputs: * long time: Event time to rest until * Effect: * Waits until the specified time has been reached (absolute time) ****************************************************************************/ void l_restuntil(time) long time; { #ifdef MACINTOSH ulong now = gettime(); ulong junk; /* changed from ulong for ThinkC 7, back to ulong for CW5 */ #endif #ifdef AMIGA while (time > gettime()) eventwait(time); #else for(; (time_type) time > gettime();); #endif #ifdef MACINTOSH now = gettime(); if (time > now) Delay(MS_TO_TICKS(time - now), &junk); /* else time <= now, so return immediately */ #endif } /**************************************************************************** * metronome * Inputs: * boolean onflag: TRUE or FALSE * Effect: * enables (true) or disables (false) MPU-401 metronome function. * must be called before musicinit ****************************************************************************/ void metronome(boolean onflag) { #ifdef DOS metroflag = onflag; #endif } /**************************************************************************** * midi_bend * Inputs: * int channel: midi channel on which to send data * int value: pitch bend value * Effect: * Sends a midi pitch bend message ****************************************************************************/ void midi_bend(int channel, int value) { if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_bend: ch %d, val %d\n", channel, value - (1 << 13)); bend[MIDI_CHANNEL(channel)] = value; midi_write(3, MIDI_PORT(channel), (byte) (MIDI_BEND | MIDI_CHANNEL(channel)), (byte) MIDI_DATA(value), (byte) MIDI_DATA(value >> 7)); } /**************************************************************************** * midi_buffer * Inputs: * byte * buffer: the buffer address * int size: number of bytes in buffer * Returns: * FALSE if size is less than 16 or buffer is NULL, otherwise TRUE * Effect: DOS, MAC: * tells interrupt routine to store system exclusive messages in * buffer. The largest power of 2 bytes less than size will be * used. xbufhead and xbuftail will be initialized to zero, * and xbuftail will be one greater than the index of the last * system exclusive byte read. Since there may already be a buffer * and therefore the normal midi message buffer may have the first * 4 bytes of some sysex messages, clear the normal midi buffer too. * AMIGA: * adds buffer to midi interface * ****************************************************************************/ boolean midi_buffer(byte huge *buffer, ulong size) { if (!buffer) return FALSE; #ifdef AMIGA if (!SetSysExQueue(cmt_mi, (UBYTE *) buffer, (ULONG) size)) return(false); cu_register(remove_sysex_buffer, buffer); #endif #ifdef MACINTOSH_OR_DOS #ifndef WINDOWS { int mask = 0x000F; if (size < 16) return(false); while (mask < size && mask > 0) mask = ((mask << 1) | 1); midi_flush(); xbuff = NULL; /* turn off buffering */ xbufmask = mask >> 1; xbufhead = xbuftail = 0; xbuff = buffer; /* set buffer, turn on buffering */ } #endif #endif #ifdef UNIX return FALSE; #else exclusive(TRUE); return TRUE; #endif } /* midi_clock -- send a midi time clock message */ /**/ void midi_clock() { if (!initialized) fixup(); if (musictrace) gprintf(TRANS, "+"); midi_write(1, 0, MIDI_TIME_CLOCK, 0, 0); } /**************************************************************************** * midi_cont * Inputs: * boolean onflag: TRUE or FALSE * Effect: * enables (true) or disables (false) continuous control ****************************************************************************/ void midi_cont(boolean onflag) { if (!initialized) fixup(); if (onflag) { #ifdef AMIGA SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter | CONTCONT, cmt_mi->ChanFilter); #endif #ifdef DOS #ifndef WINDOWS mPutCmd(BENDERON); #endif #endif } else { #ifdef AMIGA SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter & ~CONTCONT, cmt_mi->ChanFilter); #endif } #ifdef MACINTOSH_OR_DOS ctrlFilter = !onflag; #endif if (musictrace) gprintf(TRANS,"midi_cont: %d\n", onflag); } /**************************************************************************** * midi_ctrl * Inputs: * int channel: midi channel on which to send data * int control: control number * int value: control value * Effect: * Sends a midi control change message ****************************************************************************/ void midi_ctrl(int channel, int control, int value) { if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_ctrl: ch %d, ctrl %d, val %d\n", channel, control, value); midi_write(3, MIDI_PORT(channel), (byte) (MIDI_CTRL | MIDI_CHANNEL(channel)), (byte) MIDI_DATA(control), (byte) MIDI_DATA(value)); } /**************************************************************************** * midi_exclusive * Inputs: * byte *msg: pointer to a midi exclusive message, terminated by 0xF7 * Effect: * Sends a midi exclusive message * Bugs: * 18-mar-94 PLu : This function does not know which port to send to in * case of multiple midi-ports (MAC, IRIX) ****************************************************************************/ #ifdef MACINTOSH #define INTERBYTE_DELAY 10 #endif void midi_exclusive(msg) unsigned char *msg; /* the data to be sent */ { #ifdef ITC int count, done, tosend, willsend; unsigned char *m; mi_status ret; #endif #ifdef UNIX_IRIX_MIDIFNS unsigned char *m; MDevent mdevent; #endif #ifdef MACINTOSH #ifndef NYQUIST int i; /* for DX7 delay loop */ int count = 0; /* counter for formatting midi byte trace */ MIDIPacket TheMIDIPacket; unsigned char prev = 0; boolean first_packet = TRUE; #endif #endif /* * if user mistakenly called midi_exclusive instead of exclusive, * the argument will be TRUE or FALSE, both of which are highly * unlikely valid arguments for midi_exclusive: */ if (msg == (byte *) FALSE || msg == (byte *) TRUE) { gprintf(ERROR,"midi_exclusive: invalid argument %u.\n", msg); EXIT(1); } if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_exclusive\n"); #ifdef AMIGA PutSysEx(cmt_mi, msg); #endif #ifdef MACINTOSH #ifndef NYQUIST /* if NYQUIST, do nothing */ #ifdef MIDIMGR while (prev != MIDI_EOX) { int len = 0; while (prev != MIDI_EOX && len < 249) { TheMIDIPacket.data[len++] = prev = *msg++; } TheMIDIPacket.len = 6 + len; TheMIDIPacket.tStamp = 0; if (first_packet && (prev != MIDI_EOX)) { TheMIDIPacket.flags = midiTimeStampCurrent + midiStartCont; first_packet = FALSE; } else if (first_packet) { TheMIDIPacket.flags = midiTimeStampCurrent + midiNoCont; } else if (prev == MIDI_EOX) { TheMIDIPacket.flags = midiTimeStampCurrent + midiEndCont; } else { TheMIDIPacket.flags = midiTimeStampCurrent + midiMidCont; } MIDIWritePacket(OutputRefNum, &TheMIDIPacket); } #else while (*msg != MIDI_EOX) { Xmit(0, *msg); msg++; count++; /* this is a delay loop, without which your DX7 will crash */ for (i = INTERBYTE_DELAY; i > 0; i--) abort_check(); } Xmit(0, MIDI_EOX); #endif /* MIDIMGR */ #endif /* NYQUIST */ #endif /* MACINTOSH */ #ifdef DOS #ifndef WINDOWS do { mPutData(*msg); } while (*msg++ != MIDI_EOX); #endif #endif #ifdef ITC for (m = msg, tosend = 1; (*m) != MIDI_EOX; m++, tosend++); for (count = 0; count < tosend; count += done) { willsend = min(16384, tosend); ret = mi_exclusive(midiconn, 1, msg, (short) willsend); if (ret != MI_SUCCESS) { gprintf(GWARN, "Got %d from mi_exclusive\n", ret); } done = willsend; } #endif #ifdef UNIX_IRIX_MIDIFNS /* we don't know which device to sent SYSEX messages to so port zero is assumed. */ for (m = msg, mdevent.msglen = 1; (*m) != CMT_MIDI_EOX; m++, mdevent.msglen++); mdevent.sysexmsg = msg; if (mdSend(miport, &mdevent, 1) == -1) { gprintf(GWARN, "could not send SYSEX message\n"); } #endif if (miditrace) { do { gprintf(TRANS, "~%2x", *msg); #ifdef UNIX_IRIX_MIDIFNS } while (*msg++ != CMT_MIDI_EOX); #else } while (*msg++ != MIDI_EOX); #endif } } /**************************************************************************** * midi_note * Inputs: * int channel: midi channel on which to send data * int pitch: midi pitch code * int velocity: velocity with which to sound it (0=> release) * Effect: * Sends a midi note-play request out ****************************************************************************/ void midi_note(int channel, int pitch, int velocity) { if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_note: ch %d, key %d, vel %d\n", channel, pitch, velocity); if (user_scale) { /* check for correct pitch bend */ if ((pit_tab[pitch].pbend != bend[MIDI_CHANNEL(channel)]) && (velocity != 0)) { midi_bend(channel, pit_tab[pitch].pbend); bend[channel] = pit_tab[pitch].pbend; } pitch = pit_tab[pitch].ppitch; } midi_write(3, MIDI_PORT(channel), (byte) (MIDI_ON_NOTE | MIDI_CHANNEL(channel)), (byte) MIDI_DATA(pitch), (byte) MIDI_DATA(velocity)); } /**************************************************************************** * midi_program * Inputs: * int channel: Channel on which to send midi program change request * int program: Program number to send (decremented by 1 before * being sent as midi data) * Effect: * Sends a program change request out the channel ****************************************************************************/ void midi_program(int channel, int program) { #ifdef MACINTOSH int port, midi_chan; #endif if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_program: ch %d, prog %d\n", channel, program); channel = MIDI_CHANNEL(channel); if (cur_midi_prgm[channel] != program) { midi_write(2, MIDI_PORT(channel), (byte) (MIDI_CH_PROGRAM | channel), (byte) (MIDI_PROGRAM(program)), 0); cur_midi_prgm[channel] = program; } } /**************************************************************************** * midi_real * Inputs: * boolean onflag: TRUE or FALSE * Effect: * enables (true) or disables (false) midi realtime messages F8-FF ****************************************************************************/ void midi_real(boolean onflag) { if (!initialized) fixup(); #ifdef UNIX_ITC { mi_status ret; ret = mi_realtime(midiconn, onflag); if (ret != MI_SUCCESS) { gprintf(ERROR, "Warning: bad ret = %d in midi_real\n", ret); } } #endif /* UNIX_ITC */ #ifdef ITC ignore_realtime = !onflag; #endif /* ITC */ #ifdef AMIGA if (onflag) { SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter | CMF_RealTime, cmt_mi->ChanFilter); } else { SetMidiFilters(cmt_mi, cmt_mi->PortFilter, cmt_mi->TypeFilter & ~CMF_RealTime, cmt_mi->ChanFilter); } #endif #ifdef MACINTOSH_OR_DOS realFilter = !onflag; #endif if (musictrace) gprintf(TRANS,"midi_real: %d\n", onflag); } /* midi_start -- send a midi start message */ /**/ void midi_start() { if (!initialized) fixup(); if (musictrace) gprintf(TRANS, "`"); midi_write(1, 0, MIDI_START, 0, 0); } /* midi_stop -- send a midi stop message */ /**/ void midi_stop() { if (!initialized) fixup(); if (musictrace) gprintf(TRANS, "'"); midi_write(1, 0 /* ignored */, MIDI_STOP, 0, 0); } /**************************************************************************** * midi_thru * Inputs: * boolean onflag: TRUE or FALSE * Effect: * DOS: enables (true) or disables (false) midi thru info from * MPU-401 to host. (Default is set; reset with cmdline -block.) * AMIGA: enables (true) or disables (false) midi route from AMIGA * midi input to AMIGA midi output. ****************************************************************************/ void midi_thru(boolean onflag) /* DMH: midi thru is not supported on the MAC or DOS */ { if (!initialized) fixup(); #ifndef MIDI_THRU gprintf(ERROR, "midi_thru called but not implemented\n"); #else #ifdef AMIGA MidiThru(0L, (long) onflag); #endif #ifdef MACINTOSH /* this currently does not do anything - Mac driver doesn't * support THRU */ do_midi_thru = onflag; #endif #endif if (musictrace) gprintf(TRANS,"midi_thru: %d\n", onflag); } /**************************************************************************** * midi_touch * Inputs: * int channel: midi channel on which to send data * int value: control value * Effect: * Sends a midi after touch message ****************************************************************************/ void midi_touch(int channel, int value) { if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"midi_touch: ch %d, val %d\n",channel,value); midi_write(2, MIDI_PORT(channel), (byte) (MIDI_TOUCH | MIDI_CHANNEL(channel)), (byte) MIDI_DATA(value), 0); } /**************************************************************************** * midi_write * Inputs: * UBYTE n: number of characters to send (1, 2 or 3); int port: the port number (usually 0), on MAC, this may be 1 * char c1,c2,c3: Character(s) to write to MIDI data port * Effect: * Writes the data to the serial interface designated by port **************************************************************************** * Change log * Date | Change *-----------+---------------------------------------------------------------- * 15-Mar-94 | PLu : Added IRIX version ****************************************************************************/ #ifdef UNIX #ifdef UNIX_IRIX_MIDIFNS void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { MDevent event; if (port < 0) return; * ((u_long *) event.msg) = 0xe0000000 | ((port & 0x1f) << 24) | (c1 << 16) | (c2 << 8) | c3; if (mdSend(miport, &event, 1) == -1) gprintf(ERROR, "Can not send midi message in midi_write"); midi_write_trace(n, port, c1, c2, c3); } #else #ifdef ITC void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { unsigned char outb[3]; mi_channel mch; mi_status ret; if (port < 0) return; outb[0] = c1; outb[1] = c2; outb[2] = c3; mch = (16*port)+((int)MI_CHANNEL(c1)); ret = mi_put(midiconn, mch, outb); if (ret != MI_SUCCESS) gprintf(ERROR, "Warning: bad ret = %d in midi_write\n", (int)ret); midi_write_trace(n, port, c1, c2, c3); } #else void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { /* no output */ midi_write_trace(n, port, c1, c2, c3); } #endif /* ITC */ #endif /* UNIX_IRIX */ #endif /* UNIX */ #ifdef DOS #ifndef WINDOWS void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { if (n >= 1) mPutData(c1); if (n >= 2) mPutData(c2); if (n >= 3) mPutData(c3); midi_write_trace(n, port, c1, c2, c3); } #else void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { midi_write_trace(n, port, c1, c2, c3); } #endif #endif #ifdef MACINTOSH #ifdef MIDIMGR void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { MIDIPacket TheMIDIPacket; TheMIDIPacket.flags = midiTimeStampCurrent; TheMIDIPacket.len = 6 + n; TheMIDIPacket.tStamp = 0; TheMIDIPacket.data[0] = c1; TheMIDIPacket.data[1] = c2; TheMIDIPacket.data[2] = c3; MIDIWritePacket(OutputRefNum, &TheMIDIPacket); midi_write_trace(n, port, c1, c2, c3); } #else void midi_write(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { #ifndef NYQUIST Xmit(port, c1); if (n >= 2) Xmit(port, c2); if (n >= 3) Xmit(port, c3); #endif midi_write_trace(n, port, c1, c2, c3); } #endif #endif void midi_write_trace(int n, int port, unsigned char c1, unsigned char c2, unsigned char c3) { if (miditrace) { /* to indicate bytes going out on port 1, put message in brackets * with the port number, e.g. [1:~90~3c~64] */ if (port > 0) gprintf(TRANS, "[%d:", port); if (n >= 1) gprintf(TRANS, "~%2x", c1); if (n >= 2) gprintf(TRANS, "~%2x", c2); if (n >= 3) gprintf(TRANS, "~%2x", c3); if (port > 0) gprintf(TRANS, "]", port); } } /***************************************************************** * set_pitch_default *****************************************************************/ private void set_pitch_default() { int i; for (i = 0; i < 128; i++) { pit_tab[i].pbend = 8192; pit_tab[i].ppitch = i; } } /***************************************************************** * read_tuning *****************************************************************/ void read_tuning(filename) char *filename; { int index, pit, lineno = 0; float bend; FILE *fpp; user_scale = TRUE; set_pitch_default(); fpp = fileopen(filename, "tun", "r", "Tuning definition file"); while ((fscanf(fpp, "%d %d %f\n", &index, &pit, &bend) > 2) && (lineno < 128)) { lineno++; if (index >= 0 && index <= 127) { pit_tab[index].pbend = (int)(8192 * bend/100 + 8192); pit_tab[index].ppitch = pit; } } } /**************************************************************************** * musicinit * Effect: ****************************************************************************/ void musicinit() { int i; char *filename; if (!tune_flag) { /* do this code only once */ miditrace = cl_switch("miditrace"); musictrace = cl_switch("trace"); } if (!initialized) { cu_register((cu_fn_type) musicterm, NULL); midi_init(); } initialized = TRUE; /* this does some random cleanup activity */ #ifndef APPLICATION if (!tune_flag) { /* do this code only once */ #ifdef DOS #ifndef WINDOWS #if 0 version = mPutGetCmd(GETMPUVER); revision = mPutGetCmd(GETMPUREV); gprintf(TRANS, "MPU version %d.%d%c\n", version >> 4, version & 0x0f, revision + 'A' - 1); #endif mPutCmd(UARTMODE); mPutCmd(NOREALTIME); /* initially prevent Real Time MIDI info */ mPutCmd(EXCLUSIVOFF); /* initially prevent Sys-Ex data */ #endif #endif tune_flag = TRUE; filename = cl_option("tune"); if (filename != NULL) read_tuning(filename); } /* now that flags are set, print the trace message */ if (musictrace) gprintf(TRANS, "musicinit()\n"); if (user_scale) { for (i = 0; i < MAX_CHANNELS; i++) { midi_bend(i, 8192); bend[i] = 8192; } } #endif /* ifndef APPLICATION */ for (i = 0; i < MAX_CHANNELS; i++) { /* initialize to impossible values so that the * next call to midi_bend or midi_program will * not match and therefore send an output: */ bend[i] = -1; cur_midi_prgm[i] = -1; } #ifdef MIDI_THRU midi_thru(!(cl_switch("block"))); /* set MIDI thru */ #endif timereset(); /* Reset clock */ #ifdef AMIGA event_mask |= (1L << ascii_signal()) | (1L << cmt_mi->AlarmSigBit) | (1L << cmt_mi->RecvSigBit); #endif } /**************************************************************************** * musicterm * Effect: * Miscellaneous cleanup of things done by musicinit. ****************************************************************************/ private void musicterm() { if (musictrace) gprintf(TRANS, "musicterm()\n"); initialized = FALSE; } /**************************************************************************** * cmtrand * Inputs: * int lo: Lower limit of value * int hi: Upper limit of value * Result: int * random number (lo <= result <= hi) ****************************************************************************/ long randseed = 1534781L; short cmtrand(short lo, short hi) { randseed *= 13L; randseed += 1874351L; return((short)(lo + (((hi + 1 - lo) * ((0x00ffff00 & randseed) >> 8)) >> 16))); } #ifdef AMIGA /* remove_sysex_buffer -- a cleanup procedure for the Amiga */ /**/ void remove_sysex_buffer(void *obj) { ClearSysExQueue(cmt_mi); } #endif /* AMIGA */ /**************************************************************************** * settime * Inputs: new time * Effect: * Sets the current time to the new time. * DMH: for MAC, sets the clock to absTime * implemented by adjusting ticksATStart ****************************************************************************/ void settime(newtime) time_type newtime; { if (musictrace) gprintf(TRANS, "settime(%lu)\n", newtime); #ifdef AMIGA timeoffset = *camdtime - (newtime >> 1); #endif #ifdef MACINTOSH #ifdef MIDIMGR ticksAtStart = MIDIGetCurTime(OutputRefNum); #else ticksAtStart = TickCount() - MS_TO_TICKS(newtime); #endif #endif } /**************************************************************************** * timereset * Effect: * Resets the time. * DMH: for MAC, implemented by setting ticksAtStart to * current value of system tick counter * JMN: for DOS, resets the time on the MPU-401. Ticks is reset to 0 ****************************************************************************/ void timereset() { #if HAS_GETTIMEOFDAY struct timeval timeval; #endif #if HAS_FTIME struct timeb ftime_res; #endif if (!initialized) fixup(); if (musictrace) gprintf(TRANS,"timereset()\n"); #ifdef AMIGA timeoffset = *camdtime; #endif #ifdef DOS #ifndef WINDOWS timeoffset = (ulong) readtimer(); #endif #endif #ifdef MACINTOSH #ifdef MIDIMGR ticksAtStart = MIDIGetCurTime(OutputRefNum); #else ticksAtStart = TickCount(); #endif #endif #if HAS_GETTIMEOFDAY gettimeofday(&timeval, 0); timeoffset = timeval.tv_sec * 1000 + timeval.tv_usec / 1000 - timeoffset; #endif #if HAS_FTIME ftime(&ftime_res); timeoffset = ftime_res.time; #endif } /**************************************************************************** * trace * Inputs: * boolean flag: TRUE for trace on * Effect: * turns tracing on (flag == TRUE) or off (flag == FALSE) ****************************************************************************/ void trace(boolean flag) { musictrace = flag; } /**************************************************************************** * tracemidi * Inputs: * boolean flag: TRUE for trace on * Effect: * turns midi tracing on (flag == TRUE) or off (flag == FALSE) ****************************************************************************/ void tracemidi(boolean flag) { miditrace = flag; } /*********************************************************************** * * midi and timer initialization * ***********************************************************************/ #ifdef DOS /* binary value of hex char */ private int xval(int c) { int i; static char t[]="0123456789abcdef"; for (i=0; i<16; i++) if(tolower(c)==t[i]) return(i); return (-1); } /* binary value of hex string */ private int atox(char *t) { int i=0; int x; while(*t) { if ((x=xval(*t++))<0)return (0); i=(i<<4)+x; } return (i); } #endif /* def DOS */ private void midi_init() { #ifdef UNIX_IRIX_MIDIFNS #define PBUFLEN 4 MIconfig *config; static u_int pbuf[] = { MI_STAMPING, MINOSTAMP, MI_BLOCKING, MINONBLOCKING}; #endif #ifdef UNIX_MACH mach_midi_init(); #else #ifdef ITC midiconn = mi_open(NULL); if (midiconn == NULL) { gprintf(FATAL, "could not open a MIDI device\n"); EXIT(1); } cu_register((cu_fn_type) mi_close, (void *) midiconn); #endif #endif #ifdef AMIGA amiga_midi_init(); #endif /* def AMIGA */ #ifdef DOS #ifndef WINDOWS int err; int irq=SEARCHIRQ; int base=MPUBASEADDR; char *t; if (t=getenv("MPUIRQ")) { if (musictrace) gprintf(TRANS,"MPUIRQ %s\n",t); irq=atoi(t); } if (t=getenv("MPUBASE")) { if (musictrace) gprintf(TRANS,"MPUBASE %s\n",t); base=atox(t); } if(err = mOpen(base, irq)) { mClose(err); EXIT(1); } cu_register((cu_fn_type) mClose, 0); cu_register((cu_fn_type) mPutCmd, (cu_parm_type) MPURESET); initializetimer(); cu_register((cu_fn_type) restoretimer, NULL); #endif #endif #ifdef MACINTOSH #ifndef NYQUIST /* if NYQUIST, do nothing */ #ifdef MIDIMGR setup_midimgr(); /* this registers itself for cleanup */ #else init_abort_handler(); cu_register(cleanup_abort_handler, NULL); setupMIDI(portA, 0x80); cu_register(restoreMIDI, (long) portA); /* only initialize portB if necessary */ if (MAX_CHANNELS > CHANNELS_PER_PORT) { setupMIDI(portB, 0x80); cu_register(restoreMIDI, (long) portB); } #endif #endif /* NYQUIST */ #ifdef MIDIMGR ticksAtStart = MIDIGetCurTime(OutputRefNum); #else ticksAtStart = TickCount(); /* reset the clock */ #endif #endif /* def MACINTOSH */ if (!(cl_switch("noalloff"))) cu_register((cu_fn_type) alloff, NULL); } #ifdef DOS /**************************************************************************** * set_x_mfr * Inputs: * unsigned char mfr: Manufacturer ID for MIDI * Result: void * * Effect: * Sets the xcode and xcodemask to allow only these sysex messages ****************************************************************************/ void set_x_mfr(mfr) unsigned char mfr; { xcode = mfr; xcodemask = 0xFF; } /**************************************************************************** * clear_x_mfr * Result: void * * Effect: * Clears sysex manufacturer code filter; accepts all sysex messages ****************************************************************************/ void clear_x_mfr() { xcode = 0; xcodemask = 0; } #endif /* DOS */
gpl-2.0
bigzz/ltp
testcases/open_posix_testsuite/conformance/interfaces/pthread_cancel/4-1.c
30
1427
/* * Copyright (c) 2002, Intel Corporation. All rights reserved. * Created by: rolla.n.selbak REMOVE-THIS AT intel DOT com * This file is licensed under the GPL license. For the full content * of this license, see the COPYING file at the top level of this * source tree. * Test pthread_cancel * Upon successful completion will return a 0. * * STEPS: * 1. Create a thread * 2. Cancel that thread * 3. If pthread_cancel does not return [ESRCH] then it should return 0 */ #include <pthread.h> #include <stdio.h> #include <errno.h> #include <unistd.h> #include "posixtest.h" int sem; /* Manual semaphore */ void *a_thread_func() { pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL); /* Indicate to main() that the thread has been created. */ sem = 1; while (1) sleep(1); pthread_exit(0); return NULL; } int main(void) { pthread_t new_th; int ret; sem = 0; /* Create a new thread. */ if (pthread_create(&new_th, NULL, a_thread_func, NULL) != 0) { perror("Error creating thread\n"); return PTS_UNRESOLVED; } /* Make sure thread is created before we cancel it. */ while (sem == 0) sleep(1); /* Send cancel request to thread */ ret = pthread_cancel(new_th); if (ret != 0) { if (ret == ESRCH) { perror("Could not cancel thread\n"); return PTS_UNRESOLVED; } else { printf("Test FAILED\n"); return PTS_FAIL; } } printf("Test PASSED\n"); return PTS_PASS; }
gpl-2.0
ArtisteHsu/jetson-tk1-r21.4-kernel
drivers/mtd/nand/nand_ids.c
30
5977
/* * drivers/mtd/nandids.c * * Copyright (C) 2002 Thomas Gleixner (tglx@linutronix.de) * * $Id: nand_ids.c,v 1.10 2004/05/26 13:40:12 gleixner Exp $ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/mtd/nand.h> /* * Chip ID list * * Name. ID code, pagesize, chipsize in MegaByte, eraseblock size, * options * * Pagesize; 0, 256, 512 * 0 get this information from the extended chip ID + 256 256 Byte page size * 512 512 Byte page size */ struct nand_flash_dev nand_flash_ids[] = { {"NAND 1MiB 5V 8-bit", 0x6e, 256, 1, 0x1000, 0}, {"NAND 2MiB 5V 8-bit", 0x64, 256, 2, 0x1000, 0}, {"NAND 4MiB 5V 8-bit", 0x6b, 512, 4, 0x2000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xe8, 256, 1, 0x1000, 0}, {"NAND 1MiB 3,3V 8-bit", 0xec, 256, 1, 0x1000, 0}, {"NAND 2MiB 3,3V 8-bit", 0xea, 256, 2, 0x1000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xd5, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe3, 512, 4, 0x2000, 0}, {"NAND 4MiB 3,3V 8-bit", 0xe5, 512, 4, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xd6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 8-bit", 0x39, 512, 8, 0x2000, 0}, {"NAND 8MiB 3,3V 8-bit", 0xe6, 512, 8, 0x2000, 0}, {"NAND 8MiB 1,8V 16-bit", 0x49, 512, 8, 0x2000, NAND_BUSWIDTH_16}, {"NAND 8MiB 3,3V 16-bit", 0x59, 512, 8, 0x2000, NAND_BUSWIDTH_16}, {"NAND 16MiB 1,8V 8-bit", 0x33, 512, 16, 0x4000, 0}, {"NAND 16MiB 3,3V 8-bit", 0x73, 512, 16, 0x4000, 0}, {"NAND 16MiB 1,8V 16-bit", 0x43, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 16MiB 3,3V 16-bit", 0x53, 512, 16, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 1,8V 8-bit", 0x35, 512, 32, 0x4000, 0}, {"NAND 32MiB 3,3V 8-bit", 0x75, 512, 32, 0x4000, 0}, {"NAND 32MiB 1,8V 16-bit", 0x45, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 32MiB 3,3V 16-bit", 0x55, 512, 32, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 1,8V 8-bit", 0x36, 512, 64, 0x4000, 0}, {"NAND 64MiB 3,3V 8-bit", 0x76, 512, 64, 0x4000, 0}, {"NAND 64MiB 1,8V 16-bit", 0x46, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 64MiB 3,3V 16-bit", 0x56, 512, 64, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 1,8V 8-bit", 0x78, 512, 128, 0x4000, 0}, {"NAND 128MiB 3,3V 8-bit", 0x79, 512, 128, 0x4000, 0}, {"NAND 128MiB 1,8V 16-bit", 0x72, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 128MiB 3,3V 16-bit", 0x74, 512, 128, 0x4000, NAND_BUSWIDTH_16}, {"NAND 256MiB 3,3V 8-bit", 0x71, 512, 256, 0x4000, 0}, {"NAND 512MiB 3,3V 8-bit", 0xDC, 512, 512, 0x4000, 0}, /* These are the new chips with large page size. The pagesize * and the erasesize is determined from the extended id bytes */ /* 1 Gigabit */ {"NAND 128MiB 1,8V 8-bit", 0xA1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 128MiB 3,3V 8-bit", 0xF1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 128MiB 1,8V 16-bit", 0xB1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, {"NAND 128MiB 3,3V 16-bit", 0xC1, 0, 128, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, /* 2 Gigabit */ {"NAND 256MiB 1,8V 8-bit", 0xAA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 256MiB 3,3V 8-bit", 0xDA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 256MiB 1,8V 16-bit", 0xBA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, {"NAND 256MiB 3,3V 16-bit", 0xCA, 0, 256, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, /* 4 Gigabit */ {"NAND 512MiB 1,8V 8-bit", 0xAC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 512MiB 3,3V 8-bit", 0xDC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 512MiB 1,8V 16-bit", 0xBC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, {"NAND 512MiB 3,3V 16-bit", 0xCC, 0, 512, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, /* 8 Gigabit */ {"NAND 1GiB 1,8V 8-bit", 0xA3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 1GiB 3,3V 8-bit", 0xD3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 1GiB 1,8V 16-bit", 0xB3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, {"NAND 1GiB 3,3V 16-bit", 0xC3, 0, 1024, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, /* 16 Gigabit */ {"NAND 2GiB 1,8V 8-bit", 0xA5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 2GiB 3,3V 8-bit", 0xD5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_NO_AUTOINCR}, {"NAND 2GiB 1,8V 16-bit", 0xB5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, {"NAND 2GiB 3,3V 16-bit", 0xC5, 0, 2048, 0, NAND_SAMSUNG_LP_OPTIONS | NAND_BUSWIDTH_16 | NAND_NO_AUTOINCR}, /* Renesas AND 1 Gigabit. Those chips do not support extended id and have a strange page/block layout ! * The chosen minimum erasesize is 4 * 2 * 2048 = 16384 Byte, as those chips have an array of 4 page planes * 1 block = 2 pages, but due to plane arrangement the blocks 0-3 consists of page 0 + 4,1 + 5, 2 + 6, 3 + 7 * Anyway JFFS2 would increase the eraseblock size so we chose a combined one which can be erased in one go * There are more speed improvements for reads and writes possible, but not implemented now */ {"AND 128MiB 3,3V 8-bit", 0x01, 2048, 128, 0x4000, NAND_IS_AND | NAND_NO_AUTOINCR | NAND_4PAGE_ARRAY}, {NULL,} }; /* * Manufacturer ID list */ struct nand_manufacturers nand_manuf_ids[] = { {NAND_MFR_TOSHIBA, "Toshiba"}, {NAND_MFR_SAMSUNG, "Samsung"}, {NAND_MFR_FUJITSU, "Fujitsu"}, {NAND_MFR_NATIONAL, "National"}, {NAND_MFR_RENESAS, "Renesas"}, {NAND_MFR_STMICRO, "ST Micro"}, {0x0, "Unknown"} }; EXPORT_SYMBOL (nand_manuf_ids); EXPORT_SYMBOL (nand_flash_ids); MODULE_LICENSE ("GPL"); MODULE_AUTHOR ("Thomas Gleixner <tglx@linutronix.de>"); MODULE_DESCRIPTION ("Nand device & manufacturer ID's");
gpl-2.0
m4734/mysql_pio
storage/ndb/nodejs/Adapter/impl/ndb/src/node_module.cpp
30
3237
/* Copyright (c) 2012, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <node.h> #include "adapter_global.h" #include "js_wrapper_macros.h" #include "JsConverter.h" using namespace v8; typedef void LOADER_FUNCTION(Handle<Object>); extern LOADER_FUNCTION Ndb_init_initOnLoad; extern LOADER_FUNCTION Ndb_util_initOnLoad; extern LOADER_FUNCTION Ndb_cluster_connection_initOnLoad; extern LOADER_FUNCTION NdbTransaction_initOnLoad; extern LOADER_FUNCTION DBDictionaryImpl_initOnLoad; extern LOADER_FUNCTION DBOperationHelper_initOnLoad; extern LOADER_FUNCTION udebug_initOnLoad; extern LOADER_FUNCTION AsyncNdbContext_initOnLoad; extern LOADER_FUNCTION NdbWrapper_initOnLoad; extern LOADER_FUNCTION NdbTypeEncoders_initOnLoad; extern LOADER_FUNCTION ValueObject_initOnLoad; extern LOADER_FUNCTION IndexBound_initOnLoad; extern LOADER_FUNCTION NdbInterpretedCode_initOnLoad; extern LOADER_FUNCTION NdbScanFilter_initOnLoad; extern LOADER_FUNCTION ScanHelper_initOnLoad; extern LOADER_FUNCTION DBSessionImpl_initOnLoad; void init_ndbapi(Handle<Object> target) { Ndb_cluster_connection_initOnLoad(target); Ndb_init_initOnLoad(target); NdbTransaction_initOnLoad(target); NdbInterpretedCode_initOnLoad(target); NdbScanFilter_initOnLoad(target); } void init_impl(Handle<Object> target) { DBDictionaryImpl_initOnLoad(target); DBOperationHelper_initOnLoad(target); AsyncNdbContext_initOnLoad(target); NdbWrapper_initOnLoad(target); ValueObject_initOnLoad(target); IndexBound_initOnLoad(target); ScanHelper_initOnLoad(target); DBSessionImpl_initOnLoad(target); } void initModule(Handle<Object> target) { HandleScope scope; Persistent<Object> ndb_obj = Persistent<Object>(Object::New()); Persistent<Object> ndbapi_obj = Persistent<Object>(Object::New()); Persistent<Object> impl_obj = Persistent<Object>(Object::New()); Persistent<Object> util_obj = Persistent<Object>(Object::New()); Persistent<Object> debug_obj = Persistent<Object>(Object::New()); init_ndbapi(ndbapi_obj); init_impl(impl_obj); Ndb_util_initOnLoad(util_obj); NdbTypeEncoders_initOnLoad(impl_obj); udebug_initOnLoad(debug_obj); target->Set(Persistent<String>(String::NewSymbol("debug")), debug_obj); target->Set(Persistent<String>(String::NewSymbol("ndb")), ndb_obj); ndb_obj->Set(Persistent<String>(String::NewSymbol("ndbapi")), ndbapi_obj); ndb_obj->Set(Persistent<String>(String::NewSymbol("impl")), impl_obj); ndb_obj->Set(Persistent<String>(String::NewSymbol("util")), util_obj); } V8BINDER_LOADABLE_MODULE(ndb_adapter, initModule)
gpl-2.0
Asus-T100/kernel
drivers/regulator/max8973-regulator.c
286
23384
/* * max8973-regulator.c -- Maxim max8973 * * Regulator driver for MAXIM 8973 DC-DC step-down switching regulator. * * Copyright (c) 2012, NVIDIA Corporation. * * Author: Laxman Dewangan <ldewangan@nvidia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation version 2. * * This program is distributed "as is" WITHOUT ANY WARRANTY of any kind, * whether express or implied; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307, USA */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/max8973-regulator.h> #include <linux/regulator/of_regulator.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/i2c.h> #include <linux/slab.h> #include <linux/regmap.h> #include <linux/thermal.h> #include <linux/irq.h> #include <linux/interrupt.h> /* Register definitions */ #define MAX8973_VOUT 0x0 #define MAX8973_VOUT_DVS 0x1 #define MAX8973_CONTROL1 0x2 #define MAX8973_CONTROL2 0x3 #define MAX8973_CHIPID1 0x4 #define MAX8973_CHIPID2 0x5 #define MAX8973_MAX_VOUT_REG 2 /* MAX8973_VOUT */ #define MAX8973_VOUT_ENABLE BIT(7) #define MAX8973_VOUT_MASK 0x7F /* MAX8973_VOUT_DVS */ #define MAX8973_DVS_VOUT_MASK 0x7F /* MAX8973_CONTROL1 */ #define MAX8973_SNS_ENABLE BIT(7) #define MAX8973_FPWM_EN_M BIT(6) #define MAX8973_NFSR_ENABLE BIT(5) #define MAX8973_AD_ENABLE BIT(4) #define MAX8973_BIAS_ENABLE BIT(3) #define MAX8973_FREQSHIFT_9PER BIT(2) #define MAX8973_RAMP_12mV_PER_US 0x0 #define MAX8973_RAMP_25mV_PER_US 0x1 #define MAX8973_RAMP_50mV_PER_US 0x2 #define MAX8973_RAMP_200mV_PER_US 0x3 #define MAX8973_RAMP_MASK 0x3 /* MAX8973_CONTROL2 */ #define MAX8973_WDTMR_ENABLE BIT(6) #define MAX8973_DISCH_ENBABLE BIT(5) #define MAX8973_FT_ENABLE BIT(4) #define MAX77621_T_JUNCTION_120 BIT(7) #define MAX8973_CKKADV_TRIP_MASK 0xC #define MAX8973_CKKADV_TRIP_DISABLE 0xC #define MAX8973_CKKADV_TRIP_75mV_PER_US 0x0 #define MAX8973_CKKADV_TRIP_150mV_PER_US 0x4 #define MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS 0x8 #define MAX8973_CONTROL_CLKADV_TRIP_MASK 0x00030000 #define MAX8973_INDUCTOR_MIN_30_PER 0x0 #define MAX8973_INDUCTOR_NOMINAL 0x1 #define MAX8973_INDUCTOR_PLUS_30_PER 0x2 #define MAX8973_INDUCTOR_PLUS_60_PER 0x3 #define MAX8973_CONTROL_INDUCTOR_VALUE_MASK 0x00300000 #define MAX8973_MIN_VOLATGE 606250 #define MAX8973_MAX_VOLATGE 1400000 #define MAX8973_VOLATGE_STEP 6250 #define MAX8973_BUCK_N_VOLTAGE 0x80 #define MAX77621_CHIPID_TJINT_S BIT(0) #define MAX77621_NORMAL_OPERATING_TEMP 100000 #define MAX77621_TJINT_WARNING_TEMP_120 120000 #define MAX77621_TJINT_WARNING_TEMP_140 140000 enum device_id { MAX8973, MAX77621 }; /* Maxim 8973 chip information */ struct max8973_chip { struct device *dev; struct regulator_desc desc; struct regmap *regmap; bool enable_external_control; int enable_gpio; int dvs_gpio; int lru_index[MAX8973_MAX_VOUT_REG]; int curr_vout_val[MAX8973_MAX_VOUT_REG]; int curr_vout_reg; int curr_gpio_val; struct regulator_ops ops; enum device_id id; int junction_temp_warning; int irq; struct thermal_zone_device *tz_device; }; /* * find_voltage_set_register: Find new voltage configuration register (VOUT). * The finding of the new VOUT register will be based on the LRU mechanism. * Each VOUT register will have different voltage configured . This * Function will look if any of the VOUT register have requested voltage set * or not. * - If it is already there then it will make that register as most * recently used and return as found so that caller need not to set * the VOUT register but need to set the proper gpios to select this * VOUT register. * - If requested voltage is not found then it will use the least * recently mechanism to get new VOUT register for new configuration * and will return not_found so that caller need to set new VOUT * register and then gpios (both). */ static bool find_voltage_set_register(struct max8973_chip *tps, int req_vsel, int *vout_reg, int *gpio_val) { int i; bool found = false; int new_vout_reg = tps->lru_index[MAX8973_MAX_VOUT_REG - 1]; int found_index = MAX8973_MAX_VOUT_REG - 1; for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) { if (tps->curr_vout_val[tps->lru_index[i]] == req_vsel) { new_vout_reg = tps->lru_index[i]; found_index = i; found = true; goto update_lru_index; } } update_lru_index: for (i = found_index; i > 0; i--) tps->lru_index[i] = tps->lru_index[i - 1]; tps->lru_index[0] = new_vout_reg; *gpio_val = new_vout_reg; *vout_reg = MAX8973_VOUT + new_vout_reg; return found; } static int max8973_dcdc_get_voltage_sel(struct regulator_dev *rdev) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int data; int ret; ret = regmap_read(max->regmap, max->curr_vout_reg, &data); if (ret < 0) { dev_err(max->dev, "register %d read failed, err = %d\n", max->curr_vout_reg, ret); return ret; } return data & MAX8973_VOUT_MASK; } static int max8973_dcdc_set_voltage_sel(struct regulator_dev *rdev, unsigned vsel) { struct max8973_chip *max = rdev_get_drvdata(rdev); int ret; bool found = false; int vout_reg = max->curr_vout_reg; int gpio_val = max->curr_gpio_val; /* * If gpios are available to select the VOUT register then least * recently used register for new configuration. */ if (gpio_is_valid(max->dvs_gpio)) found = find_voltage_set_register(max, vsel, &vout_reg, &gpio_val); if (!found) { ret = regmap_update_bits(max->regmap, vout_reg, MAX8973_VOUT_MASK, vsel); if (ret < 0) { dev_err(max->dev, "register %d update failed, err %d\n", vout_reg, ret); return ret; } max->curr_vout_reg = vout_reg; max->curr_vout_val[gpio_val] = vsel; } /* Select proper VOUT register vio gpios */ if (gpio_is_valid(max->dvs_gpio)) { gpio_set_value_cansleep(max->dvs_gpio, gpio_val & 0x1); max->curr_gpio_val = gpio_val; } return 0; } static int max8973_dcdc_set_mode(struct regulator_dev *rdev, unsigned int mode) { struct max8973_chip *max = rdev_get_drvdata(rdev); int ret; int pwm; /* Enable force PWM mode in FAST mode only. */ switch (mode) { case REGULATOR_MODE_FAST: pwm = MAX8973_FPWM_EN_M; break; case REGULATOR_MODE_NORMAL: pwm = 0; break; default: return -EINVAL; } ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1, MAX8973_FPWM_EN_M, pwm); if (ret < 0) dev_err(max->dev, "register %d update failed, err %d\n", MAX8973_CONTROL1, ret); return ret; } static unsigned int max8973_dcdc_get_mode(struct regulator_dev *rdev) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int data; int ret; ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data); if (ret < 0) { dev_err(max->dev, "register %d read failed, err %d\n", MAX8973_CONTROL1, ret); return ret; } return (data & MAX8973_FPWM_EN_M) ? REGULATOR_MODE_FAST : REGULATOR_MODE_NORMAL; } static int max8973_set_ramp_delay(struct regulator_dev *rdev, int ramp_delay) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int control; int ret; /* Set ramp delay */ if (ramp_delay <= 12000) control = MAX8973_RAMP_12mV_PER_US; else if (ramp_delay <= 25000) control = MAX8973_RAMP_25mV_PER_US; else if (ramp_delay <= 50000) control = MAX8973_RAMP_50mV_PER_US; else if (ramp_delay <= 200000) control = MAX8973_RAMP_200mV_PER_US; else return -EINVAL; ret = regmap_update_bits(max->regmap, MAX8973_CONTROL1, MAX8973_RAMP_MASK, control); if (ret < 0) dev_err(max->dev, "register %d update failed, %d", MAX8973_CONTROL1, ret); return ret; } static int max8973_set_current_limit(struct regulator_dev *rdev, int min_ua, int max_ua) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int val; int ret; if (max_ua <= 9000000) val = MAX8973_CKKADV_TRIP_75mV_PER_US; else if (max_ua <= 12000000) val = MAX8973_CKKADV_TRIP_150mV_PER_US; else val = MAX8973_CKKADV_TRIP_DISABLE; ret = regmap_update_bits(max->regmap, MAX8973_CONTROL2, MAX8973_CKKADV_TRIP_MASK, val); if (ret < 0) { dev_err(max->dev, "register %d update failed: %d\n", MAX8973_CONTROL2, ret); return ret; } return 0; } static int max8973_get_current_limit(struct regulator_dev *rdev) { struct max8973_chip *max = rdev_get_drvdata(rdev); unsigned int control2; int ret; ret = regmap_read(max->regmap, MAX8973_CONTROL2, &control2); if (ret < 0) { dev_err(max->dev, "register %d read failed: %d\n", MAX8973_CONTROL2, ret); return ret; } switch (control2 & MAX8973_CKKADV_TRIP_MASK) { case MAX8973_CKKADV_TRIP_DISABLE: return 15000000; case MAX8973_CKKADV_TRIP_150mV_PER_US: return 12000000; case MAX8973_CKKADV_TRIP_75mV_PER_US: return 9000000; default: break; } return 9000000; } static const struct regulator_ops max8973_dcdc_ops = { .get_voltage_sel = max8973_dcdc_get_voltage_sel, .set_voltage_sel = max8973_dcdc_set_voltage_sel, .list_voltage = regulator_list_voltage_linear, .set_mode = max8973_dcdc_set_mode, .get_mode = max8973_dcdc_get_mode, .set_voltage_time_sel = regulator_set_voltage_time_sel, .set_ramp_delay = max8973_set_ramp_delay, }; static int max8973_init_dcdc(struct max8973_chip *max, struct max8973_regulator_platform_data *pdata) { int ret; uint8_t control1 = 0; uint8_t control2 = 0; unsigned int data; ret = regmap_read(max->regmap, MAX8973_CONTROL1, &data); if (ret < 0) { dev_err(max->dev, "register %d read failed, err = %d", MAX8973_CONTROL1, ret); return ret; } control1 = data & MAX8973_RAMP_MASK; switch (control1) { case MAX8973_RAMP_12mV_PER_US: max->desc.ramp_delay = 12000; break; case MAX8973_RAMP_25mV_PER_US: max->desc.ramp_delay = 25000; break; case MAX8973_RAMP_50mV_PER_US: max->desc.ramp_delay = 50000; break; case MAX8973_RAMP_200mV_PER_US: max->desc.ramp_delay = 200000; break; } if (pdata->control_flags & MAX8973_CONTROL_REMOTE_SENSE_ENABLE) control1 |= MAX8973_SNS_ENABLE; if (!(pdata->control_flags & MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE)) control1 |= MAX8973_NFSR_ENABLE; if (pdata->control_flags & MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE) control1 |= MAX8973_AD_ENABLE; if (pdata->control_flags & MAX8973_CONTROL_BIAS_ENABLE) { control1 |= MAX8973_BIAS_ENABLE; max->desc.enable_time = 20; } else { max->desc.enable_time = 240; } if (pdata->control_flags & MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE) control1 |= MAX8973_FREQSHIFT_9PER; if ((pdata->junction_temp_warning == MAX77621_TJINT_WARNING_TEMP_120) && (max->id == MAX77621)) control2 |= MAX77621_T_JUNCTION_120; if (!(pdata->control_flags & MAX8973_CONTROL_PULL_DOWN_ENABLE)) control2 |= MAX8973_DISCH_ENBABLE; /* Clock advance trip configuration */ switch (pdata->control_flags & MAX8973_CONTROL_CLKADV_TRIP_MASK) { case MAX8973_CONTROL_CLKADV_TRIP_DISABLED: control2 |= MAX8973_CKKADV_TRIP_DISABLE; break; case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US: control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US; break; case MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US: control2 |= MAX8973_CKKADV_TRIP_150mV_PER_US; break; case MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US_HIST_DIS: control2 |= MAX8973_CKKADV_TRIP_75mV_PER_US_HIST_DIS; break; } /* Configure inductor value */ switch (pdata->control_flags & MAX8973_CONTROL_INDUCTOR_VALUE_MASK) { case MAX8973_CONTROL_INDUCTOR_VALUE_NOMINAL: control2 |= MAX8973_INDUCTOR_NOMINAL; break; case MAX8973_CONTROL_INDUCTOR_VALUE_MINUS_30_PER: control2 |= MAX8973_INDUCTOR_MIN_30_PER; break; case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_30_PER: control2 |= MAX8973_INDUCTOR_PLUS_30_PER; break; case MAX8973_CONTROL_INDUCTOR_VALUE_PLUS_60_PER: control2 |= MAX8973_INDUCTOR_PLUS_60_PER; break; } ret = regmap_write(max->regmap, MAX8973_CONTROL1, control1); if (ret < 0) { dev_err(max->dev, "register %d write failed, err = %d", MAX8973_CONTROL1, ret); return ret; } ret = regmap_write(max->regmap, MAX8973_CONTROL2, control2); if (ret < 0) { dev_err(max->dev, "register %d write failed, err = %d", MAX8973_CONTROL2, ret); return ret; } /* If external control is enabled then disable EN bit */ if (max->enable_external_control && (max->id == MAX8973)) { ret = regmap_update_bits(max->regmap, MAX8973_VOUT, MAX8973_VOUT_ENABLE, 0); if (ret < 0) dev_err(max->dev, "register %d update failed, err = %d", MAX8973_VOUT, ret); } return ret; } static int max8973_thermal_read_temp(void *data, int *temp) { struct max8973_chip *mchip = data; unsigned int val; int ret; ret = regmap_read(mchip->regmap, MAX8973_CHIPID1, &val); if (ret < 0) { dev_err(mchip->dev, "Failed to read register CHIPID1, %d", ret); return ret; } /* +1 degC to trigger cool devive */ if (val & MAX77621_CHIPID_TJINT_S) *temp = mchip->junction_temp_warning + 1000; else *temp = MAX77621_NORMAL_OPERATING_TEMP; return 0; } static irqreturn_t max8973_thermal_irq(int irq, void *data) { struct max8973_chip *mchip = data; thermal_zone_device_update(mchip->tz_device, THERMAL_EVENT_UNSPECIFIED); return IRQ_HANDLED; } static const struct thermal_zone_of_device_ops max77621_tz_ops = { .get_temp = max8973_thermal_read_temp, }; static int max8973_thermal_init(struct max8973_chip *mchip) { struct thermal_zone_device *tzd; struct irq_data *irq_data; unsigned long irq_flags = 0; int ret; if (mchip->id != MAX77621) return 0; tzd = devm_thermal_zone_of_sensor_register(mchip->dev, 0, mchip, &max77621_tz_ops); if (IS_ERR(tzd)) { ret = PTR_ERR(tzd); dev_err(mchip->dev, "Failed to register thermal sensor: %d\n", ret); return ret; } if (mchip->irq <= 0) return 0; irq_data = irq_get_irq_data(mchip->irq); if (irq_data) irq_flags = irqd_get_trigger_type(irq_data); ret = devm_request_threaded_irq(mchip->dev, mchip->irq, NULL, max8973_thermal_irq, IRQF_ONESHOT | IRQF_SHARED | irq_flags, dev_name(mchip->dev), mchip); if (ret < 0) { dev_err(mchip->dev, "Failed to request irq %d, %d\n", mchip->irq, ret); return ret; } return 0; } static const struct regmap_config max8973_regmap_config = { .reg_bits = 8, .val_bits = 8, .max_register = MAX8973_CHIPID2, .cache_type = REGCACHE_RBTREE, }; static struct max8973_regulator_platform_data *max8973_parse_dt( struct device *dev) { struct max8973_regulator_platform_data *pdata; struct device_node *np = dev->of_node; int ret; u32 pval; bool etr_enable; bool etr_sensitivity_high; pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) return NULL; pdata->enable_ext_control = of_property_read_bool(np, "maxim,externally-enable"); pdata->enable_gpio = of_get_named_gpio(np, "maxim,enable-gpio", 0); pdata->dvs_gpio = of_get_named_gpio(np, "maxim,dvs-gpio", 0); ret = of_property_read_u32(np, "maxim,dvs-default-state", &pval); if (!ret) pdata->dvs_def_state = pval; if (of_property_read_bool(np, "maxim,enable-remote-sense")) pdata->control_flags |= MAX8973_CONTROL_REMOTE_SENSE_ENABLE; if (of_property_read_bool(np, "maxim,enable-falling-slew-rate")) pdata->control_flags |= MAX8973_CONTROL_FALLING_SLEW_RATE_ENABLE; if (of_property_read_bool(np, "maxim,enable-active-discharge")) pdata->control_flags |= MAX8973_CONTROL_OUTPUT_ACTIVE_DISCH_ENABLE; if (of_property_read_bool(np, "maxim,enable-frequency-shift")) pdata->control_flags |= MAX8973_CONTROL_FREQ_SHIFT_9PER_ENABLE; if (of_property_read_bool(np, "maxim,enable-bias-control")) pdata->control_flags |= MAX8973_CONTROL_BIAS_ENABLE; etr_enable = of_property_read_bool(np, "maxim,enable-etr"); etr_sensitivity_high = of_property_read_bool(np, "maxim,enable-high-etr-sensitivity"); if (etr_sensitivity_high) etr_enable = true; if (etr_enable) { if (etr_sensitivity_high) pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_75mV_PER_US; else pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_150mV_PER_US; } else { pdata->control_flags |= MAX8973_CONTROL_CLKADV_TRIP_DISABLED; } pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_140; ret = of_property_read_u32(np, "junction-warn-millicelsius", &pval); if (!ret && (pval <= MAX77621_TJINT_WARNING_TEMP_120)) pdata->junction_temp_warning = MAX77621_TJINT_WARNING_TEMP_120; return pdata; } static const struct of_device_id of_max8973_match_tbl[] = { { .compatible = "maxim,max8973", .data = (void *)MAX8973, }, { .compatible = "maxim,max77621", .data = (void *)MAX77621, }, { }, }; MODULE_DEVICE_TABLE(of, of_max8973_match_tbl); static int max8973_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct max8973_regulator_platform_data *pdata; struct regulator_init_data *ridata; struct regulator_config config = { }; struct regulator_dev *rdev; struct max8973_chip *max; bool pdata_from_dt = false; unsigned int chip_id; int ret; pdata = dev_get_platdata(&client->dev); if (!pdata && client->dev.of_node) { pdata = max8973_parse_dt(&client->dev); pdata_from_dt = true; } if (!pdata) { dev_err(&client->dev, "No Platform data"); return -EIO; } if ((pdata->dvs_gpio == -EPROBE_DEFER) || (pdata->enable_gpio == -EPROBE_DEFER)) return -EPROBE_DEFER; max = devm_kzalloc(&client->dev, sizeof(*max), GFP_KERNEL); if (!max) return -ENOMEM; max->regmap = devm_regmap_init_i2c(client, &max8973_regmap_config); if (IS_ERR(max->regmap)) { ret = PTR_ERR(max->regmap); dev_err(&client->dev, "regmap init failed, err %d\n", ret); return ret; } if (client->dev.of_node) { const struct of_device_id *match; match = of_match_device(of_match_ptr(of_max8973_match_tbl), &client->dev); if (!match) return -ENODATA; max->id = (u32)((uintptr_t)match->data); } else { max->id = id->driver_data; } ret = regmap_read(max->regmap, MAX8973_CHIPID1, &chip_id); if (ret < 0) { dev_err(&client->dev, "register CHIPID1 read failed, %d", ret); return ret; } dev_info(&client->dev, "CHIP-ID OTP: 0x%02x ID_M: 0x%02x\n", (chip_id >> 4) & 0xF, (chip_id >> 1) & 0x7); i2c_set_clientdata(client, max); max->ops = max8973_dcdc_ops; max->dev = &client->dev; max->desc.name = id->name; max->desc.id = 0; max->desc.ops = &max->ops; max->desc.type = REGULATOR_VOLTAGE; max->desc.owner = THIS_MODULE; max->desc.min_uV = MAX8973_MIN_VOLATGE; max->desc.uV_step = MAX8973_VOLATGE_STEP; max->desc.n_voltages = MAX8973_BUCK_N_VOLTAGE; max->dvs_gpio = (pdata->dvs_gpio) ? pdata->dvs_gpio : -EINVAL; max->enable_gpio = (pdata->enable_gpio) ? pdata->enable_gpio : -EINVAL; max->enable_external_control = pdata->enable_ext_control; max->curr_gpio_val = pdata->dvs_def_state; max->curr_vout_reg = MAX8973_VOUT + pdata->dvs_def_state; max->junction_temp_warning = pdata->junction_temp_warning; if (gpio_is_valid(max->enable_gpio)) max->enable_external_control = true; max->lru_index[0] = max->curr_vout_reg; if (gpio_is_valid(max->dvs_gpio)) { int gpio_flags; int i; gpio_flags = (pdata->dvs_def_state) ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW; ret = devm_gpio_request_one(&client->dev, max->dvs_gpio, gpio_flags, "max8973-dvs"); if (ret) { dev_err(&client->dev, "gpio_request for gpio %d failed, err = %d\n", max->dvs_gpio, ret); return ret; } /* * Initialize the lru index with vout_reg id * The index 0 will be most recently used and * set with the max->curr_vout_reg */ for (i = 0; i < MAX8973_MAX_VOUT_REG; ++i) max->lru_index[i] = i; max->lru_index[0] = max->curr_vout_reg; max->lru_index[max->curr_vout_reg] = 0; } else { /* * If there is no DVS GPIO, the VOUT register * address is fixed. */ max->ops.set_voltage_sel = regulator_set_voltage_sel_regmap; max->ops.get_voltage_sel = regulator_get_voltage_sel_regmap; max->desc.vsel_reg = max->curr_vout_reg; max->desc.vsel_mask = MAX8973_VOUT_MASK; } if (pdata_from_dt) pdata->reg_init_data = of_get_regulator_init_data(&client->dev, client->dev.of_node, &max->desc); ridata = pdata->reg_init_data; switch (max->id) { case MAX8973: if (!pdata->enable_ext_control) { max->desc.enable_reg = MAX8973_VOUT; max->desc.enable_mask = MAX8973_VOUT_ENABLE; max->ops.enable = regulator_enable_regmap; max->ops.disable = regulator_disable_regmap; max->ops.is_enabled = regulator_is_enabled_regmap; break; } if (gpio_is_valid(max->enable_gpio)) { config.ena_gpio_flags = GPIOF_OUT_INIT_LOW; if (ridata && (ridata->constraints.always_on || ridata->constraints.boot_on)) config.ena_gpio_flags = GPIOF_OUT_INIT_HIGH; config.ena_gpio = max->enable_gpio; } break; case MAX77621: if (gpio_is_valid(max->enable_gpio)) { ret = devm_gpio_request_one(&client->dev, max->enable_gpio, GPIOF_OUT_INIT_HIGH, "max8973-en-gpio"); if (ret) { dev_err(&client->dev, "gpio_request for gpio %d failed: %d\n", max->enable_gpio, ret); return ret; } } max->desc.enable_reg = MAX8973_VOUT; max->desc.enable_mask = MAX8973_VOUT_ENABLE; max->ops.enable = regulator_enable_regmap; max->ops.disable = regulator_disable_regmap; max->ops.is_enabled = regulator_is_enabled_regmap; max->ops.set_current_limit = max8973_set_current_limit; max->ops.get_current_limit = max8973_get_current_limit; break; default: break; } ret = max8973_init_dcdc(max, pdata); if (ret < 0) { dev_err(max->dev, "Max8973 Init failed, err = %d\n", ret); return ret; } config.dev = &client->dev; config.init_data = pdata->reg_init_data; config.driver_data = max; config.of_node = client->dev.of_node; config.regmap = max->regmap; /* Register the regulators */ rdev = devm_regulator_register(&client->dev, &max->desc, &config); if (IS_ERR(rdev)) { ret = PTR_ERR(rdev); dev_err(max->dev, "regulator register failed, err %d\n", ret); return ret; } max8973_thermal_init(max); return 0; } static const struct i2c_device_id max8973_id[] = { {.name = "max8973", .driver_data = MAX8973}, {.name = "max77621", .driver_data = MAX77621}, {}, }; MODULE_DEVICE_TABLE(i2c, max8973_id); static struct i2c_driver max8973_i2c_driver = { .driver = { .name = "max8973", .of_match_table = of_max8973_match_tbl, }, .probe = max8973_probe, .id_table = max8973_id, }; static int __init max8973_init(void) { return i2c_add_driver(&max8973_i2c_driver); } subsys_initcall(max8973_init); static void __exit max8973_cleanup(void) { i2c_del_driver(&max8973_i2c_driver); } module_exit(max8973_cleanup); MODULE_AUTHOR("Laxman Dewangan <ldewangan@nvidia.com>"); MODULE_DESCRIPTION("MAX8973 voltage regulator driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
go2ev-devteam/Gplus_2159_0801
openplatform/sdk/os/kernel-2.6.32/sound/pci/ctxfi/xfi.c
542
4281
/* * xfi linux driver. * * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved. * * This source file is released under GPL v2 license (no other versions). * See the COPYING file included in the main directory of this source * distribution for the license terms and conditions. */ #include <linux/init.h> #include <linux/pci.h> #include <linux/moduleparam.h> #include <linux/pci_ids.h> #include <sound/core.h> #include <sound/initval.h> #include "ctatc.h" #include "cthardware.h" MODULE_AUTHOR("Creative Technology Ltd"); MODULE_DESCRIPTION("X-Fi driver version 1.03"); MODULE_LICENSE("GPL v2"); MODULE_SUPPORTED_DEVICE("{{Creative Labs, Sound Blaster X-Fi}"); static unsigned int reference_rate = 48000; static unsigned int multiple = 2; MODULE_PARM_DESC(reference_rate, "Reference rate (default=48000)"); module_param(reference_rate, uint, S_IRUGO); MODULE_PARM_DESC(multiple, "Rate multiplier (default=2)"); module_param(multiple, uint, S_IRUGO); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Creative X-Fi driver"); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Creative X-Fi driver"); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Creative X-Fi driver"); static struct pci_device_id ct_pci_dev_ids[] = { /* only X-Fi is supported, so... */ { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K1), .driver_data = ATC20K1, }, { PCI_DEVICE(PCI_VENDOR_ID_CREATIVE, PCI_DEVICE_ID_CREATIVE_20K2), .driver_data = ATC20K2, }, { 0, } }; MODULE_DEVICE_TABLE(pci, ct_pci_dev_ids); static int __devinit ct_card_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct ct_atc *atc; int err; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err) return err; if ((reference_rate != 48000) && (reference_rate != 44100)) { printk(KERN_ERR "ctxfi: Invalid reference_rate value %u!!!\n", reference_rate); printk(KERN_ERR "ctxfi: The valid values for reference_rate " "are 48000 and 44100, Value 48000 is assumed.\n"); reference_rate = 48000; } if ((multiple != 1) && (multiple != 2)) { printk(KERN_ERR "ctxfi: Invalid multiple value %u!!!\n", multiple); printk(KERN_ERR "ctxfi: The valid values for multiple are " "1 and 2, Value 2 is assumed.\n"); multiple = 2; } err = ct_atc_create(card, pci, reference_rate, multiple, pci_id->driver_data, &atc); if (err < 0) goto error; card->private_data = atc; /* Create alsa devices supported by this card */ err = ct_atc_create_alsa_devs(atc); if (err < 0) goto error; strcpy(card->driver, "SB-XFi"); strcpy(card->shortname, "Creative X-Fi"); snprintf(card->longname, sizeof(card->longname), "%s %s %s", card->shortname, atc->chip_name, atc->model_name); err = snd_card_register(card); if (err < 0) goto error; pci_set_drvdata(pci, card); dev++; return 0; error: snd_card_free(card); return err; } static void __devexit ct_card_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } #ifdef CONFIG_PM static int ct_card_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct ct_atc *atc = card->private_data; return atc->suspend(atc, state); } static int ct_card_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct ct_atc *atc = card->private_data; return atc->resume(atc); } #endif static struct pci_driver ct_driver = { .name = "SB-XFi", .id_table = ct_pci_dev_ids, .probe = ct_card_probe, .remove = __devexit_p(ct_card_remove), #ifdef CONFIG_PM .suspend = ct_card_suspend, .resume = ct_card_resume, #endif }; static int __init ct_card_init(void) { return pci_register_driver(&ct_driver); } static void __exit ct_card_exit(void) { pci_unregister_driver(&ct_driver); } module_init(ct_card_init) module_exit(ct_card_exit)
gpl-2.0
Diaob/z_bac_150827_android_kernel_oneplus_msm8994
fs/cifs/smb1ops.c
798
29526
/* * SMB1 (CIFS) version specific operations * * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com> * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License v2 as published * by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/pagemap.h> #include <linux/vfs.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifs_debug.h" #include "cifspdu.h" /* * An NT cancel request header looks just like the original request except: * * The Command is SMB_COM_NT_CANCEL * The WordCount is zeroed out * The ByteCount is zeroed out * * This function mangles an existing request buffer into a * SMB_COM_NT_CANCEL request and then sends it. */ static int send_nt_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid) { int rc = 0; struct smb_hdr *in_buf = (struct smb_hdr *)buf; /* -4 for RFC1001 length and +2 for BCC field */ in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2); in_buf->Command = SMB_COM_NT_CANCEL; in_buf->WordCount = 0; put_bcc(0, in_buf); mutex_lock(&server->srv_mutex); rc = cifs_sign_smb(in_buf, server, &mid->sequence_number); if (rc) { mutex_unlock(&server->srv_mutex); return rc; } /* * The response to this call was already factored into the sequence * number when the call went out, so we must adjust it back downward * after signing here. */ --server->sequence_number; rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length)); if (rc < 0) server->sequence_number--; mutex_unlock(&server->srv_mutex); cifs_dbg(FYI, "issued NT_CANCEL for mid %u, rc = %d\n", in_buf->Mid, rc); return rc; } static bool cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2) { return ob1->fid.netfid == ob2->fid.netfid; } static unsigned int cifs_read_data_offset(char *buf) { READ_RSP *rsp = (READ_RSP *)buf; return le16_to_cpu(rsp->DataOffset); } static unsigned int cifs_read_data_length(char *buf) { READ_RSP *rsp = (READ_RSP *)buf; return (le16_to_cpu(rsp->DataLengthHigh) << 16) + le16_to_cpu(rsp->DataLength); } static struct mid_q_entry * cifs_find_mid(struct TCP_Server_Info *server, char *buffer) { struct smb_hdr *buf = (struct smb_hdr *)buffer; struct mid_q_entry *mid; spin_lock(&GlobalMid_Lock); list_for_each_entry(mid, &server->pending_mid_q, qhead) { if (mid->mid == buf->Mid && mid->mid_state == MID_REQUEST_SUBMITTED && le16_to_cpu(mid->command) == buf->Command) { spin_unlock(&GlobalMid_Lock); return mid; } } spin_unlock(&GlobalMid_Lock); return NULL; } static void cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add, const int optype) { spin_lock(&server->req_lock); server->credits += add; server->in_flight--; spin_unlock(&server->req_lock); wake_up(&server->request_q); } static void cifs_set_credits(struct TCP_Server_Info *server, const int val) { spin_lock(&server->req_lock); server->credits = val; server->oplocks = val > 1 ? enable_oplocks : false; spin_unlock(&server->req_lock); } static int * cifs_get_credits_field(struct TCP_Server_Info *server, const int optype) { return &server->credits; } static unsigned int cifs_get_credits(struct mid_q_entry *mid) { return 1; } /* * Find a free multiplex id (SMB mid). Otherwise there could be * mid collisions which might cause problems, demultiplexing the * wrong response to this request. Multiplex ids could collide if * one of a series requests takes much longer than the others, or * if a very large number of long lived requests (byte range * locks or FindNotify requests) are pending. No more than * 64K-1 requests can be outstanding at one time. If no * mids are available, return zero. A future optimization * could make the combination of mids and uid the key we use * to demultiplex on (rather than mid alone). * In addition to the above check, the cifs demultiplex * code already used the command code as a secondary * check of the frame and if signing is negotiated the * response would be discarded if the mid were the same * but the signature was wrong. Since the mid is not put in the * pending queue until later (when it is about to be dispatched) * we do have to limit the number of outstanding requests * to somewhat less than 64K-1 although it is hard to imagine * so many threads being in the vfs at one time. */ static __u64 cifs_get_next_mid(struct TCP_Server_Info *server) { __u64 mid = 0; __u16 last_mid, cur_mid; bool collision; spin_lock(&GlobalMid_Lock); /* mid is 16 bit only for CIFS/SMB */ cur_mid = (__u16)((server->CurrentMid) & 0xffff); /* we do not want to loop forever */ last_mid = cur_mid; cur_mid++; /* * This nested loop looks more expensive than it is. * In practice the list of pending requests is short, * fewer than 50, and the mids are likely to be unique * on the first pass through the loop unless some request * takes longer than the 64 thousand requests before it * (and it would also have to have been a request that * did not time out). */ while (cur_mid != last_mid) { struct mid_q_entry *mid_entry; unsigned int num_mids; collision = false; if (cur_mid == 0) cur_mid++; num_mids = 0; list_for_each_entry(mid_entry, &server->pending_mid_q, qhead) { ++num_mids; if (mid_entry->mid == cur_mid && mid_entry->mid_state == MID_REQUEST_SUBMITTED) { /* This mid is in use, try a different one */ collision = true; break; } } /* * if we have more than 32k mids in the list, then something * is very wrong. Possibly a local user is trying to DoS the * box by issuing long-running calls and SIGKILL'ing them. If * we get to 2^16 mids then we're in big trouble as this * function could loop forever. * * Go ahead and assign out the mid in this situation, but force * an eventual reconnect to clean out the pending_mid_q. */ if (num_mids > 32768) server->tcpStatus = CifsNeedReconnect; if (!collision) { mid = (__u64)cur_mid; server->CurrentMid = mid; break; } cur_mid++; } spin_unlock(&GlobalMid_Lock); return mid; } /* return codes: 0 not a transact2, or all data present >0 transact2 with that much data missing -EINVAL invalid transact2 */ static int check2ndT2(char *buf) { struct smb_hdr *pSMB = (struct smb_hdr *)buf; struct smb_t2_rsp *pSMBt; int remaining; __u16 total_data_size, data_in_this_rsp; if (pSMB->Command != SMB_COM_TRANSACTION2) return 0; /* check for plausible wct, bcc and t2 data and parm sizes */ /* check for parm and data offset going beyond end of smb */ if (pSMB->WordCount != 10) { /* coalesce_t2 depends on this */ cifs_dbg(FYI, "invalid transact2 word count\n"); return -EINVAL; } pSMBt = (struct smb_t2_rsp *)pSMB; total_data_size = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); data_in_this_rsp = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); if (total_data_size == data_in_this_rsp) return 0; else if (total_data_size < data_in_this_rsp) { cifs_dbg(FYI, "total data %d smaller than data in frame %d\n", total_data_size, data_in_this_rsp); return -EINVAL; } remaining = total_data_size - data_in_this_rsp; cifs_dbg(FYI, "missing %d bytes from transact2, check next response\n", remaining); if (total_data_size > CIFSMaxBufSize) { cifs_dbg(VFS, "TotalDataSize %d is over maximum buffer %d\n", total_data_size, CIFSMaxBufSize); return -EINVAL; } return remaining; } static int coalesce_t2(char *second_buf, struct smb_hdr *target_hdr) { struct smb_t2_rsp *pSMBs = (struct smb_t2_rsp *)second_buf; struct smb_t2_rsp *pSMBt = (struct smb_t2_rsp *)target_hdr; char *data_area_of_tgt; char *data_area_of_src; int remaining; unsigned int byte_count, total_in_tgt; __u16 tgt_total_cnt, src_total_cnt, total_in_src; src_total_cnt = get_unaligned_le16(&pSMBs->t2_rsp.TotalDataCount); tgt_total_cnt = get_unaligned_le16(&pSMBt->t2_rsp.TotalDataCount); if (tgt_total_cnt != src_total_cnt) cifs_dbg(FYI, "total data count of primary and secondary t2 differ source=%hu target=%hu\n", src_total_cnt, tgt_total_cnt); total_in_tgt = get_unaligned_le16(&pSMBt->t2_rsp.DataCount); remaining = tgt_total_cnt - total_in_tgt; if (remaining < 0) { cifs_dbg(FYI, "Server sent too much data. tgt_total_cnt=%hu total_in_tgt=%hu\n", tgt_total_cnt, total_in_tgt); return -EPROTO; } if (remaining == 0) { /* nothing to do, ignore */ cifs_dbg(FYI, "no more data remains\n"); return 0; } total_in_src = get_unaligned_le16(&pSMBs->t2_rsp.DataCount); if (remaining < total_in_src) cifs_dbg(FYI, "transact2 2nd response contains too much data\n"); /* find end of first SMB data area */ data_area_of_tgt = (char *)&pSMBt->hdr.Protocol + get_unaligned_le16(&pSMBt->t2_rsp.DataOffset); /* validate target area */ data_area_of_src = (char *)&pSMBs->hdr.Protocol + get_unaligned_le16(&pSMBs->t2_rsp.DataOffset); data_area_of_tgt += total_in_tgt; total_in_tgt += total_in_src; /* is the result too big for the field? */ if (total_in_tgt > USHRT_MAX) { cifs_dbg(FYI, "coalesced DataCount too large (%u)\n", total_in_tgt); return -EPROTO; } put_unaligned_le16(total_in_tgt, &pSMBt->t2_rsp.DataCount); /* fix up the BCC */ byte_count = get_bcc(target_hdr); byte_count += total_in_src; /* is the result too big for the field? */ if (byte_count > USHRT_MAX) { cifs_dbg(FYI, "coalesced BCC too large (%u)\n", byte_count); return -EPROTO; } put_bcc(byte_count, target_hdr); byte_count = be32_to_cpu(target_hdr->smb_buf_length); byte_count += total_in_src; /* don't allow buffer to overflow */ if (byte_count > CIFSMaxBufSize + MAX_CIFS_HDR_SIZE - 4) { cifs_dbg(FYI, "coalesced BCC exceeds buffer size (%u)\n", byte_count); return -ENOBUFS; } target_hdr->smb_buf_length = cpu_to_be32(byte_count); /* copy second buffer into end of first buffer */ memcpy(data_area_of_tgt, data_area_of_src, total_in_src); if (remaining != total_in_src) { /* more responses to go */ cifs_dbg(FYI, "waiting for more secondary responses\n"); return 1; } /* we are done */ cifs_dbg(FYI, "found the last secondary response\n"); return 0; } static bool cifs_check_trans2(struct mid_q_entry *mid, struct TCP_Server_Info *server, char *buf, int malformed) { if (malformed) return false; if (check2ndT2(buf) <= 0) return false; mid->multiRsp = true; if (mid->resp_buf) { /* merge response - fix up 1st*/ malformed = coalesce_t2(buf, mid->resp_buf); if (malformed > 0) return true; /* All parts received or packet is malformed. */ mid->multiEnd = true; dequeue_mid(mid, malformed); return true; } if (!server->large_buf) { /*FIXME: switch to already allocated largebuf?*/ cifs_dbg(VFS, "1st trans2 resp needs bigbuf\n"); } else { /* Have first buffer */ mid->resp_buf = buf; mid->large_buf = true; server->bigbuf = NULL; } return true; } static bool cifs_need_neg(struct TCP_Server_Info *server) { return server->maxBuf == 0; } static int cifs_negotiate(const unsigned int xid, struct cifs_ses *ses) { int rc; rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) { /* retry only once on 1st time connection */ set_credits(ses->server, 1); rc = CIFSSMBNegotiate(xid, ses); if (rc == -EAGAIN) rc = -EHOSTDOWN; } return rc; } static unsigned int cifs_negotiate_wsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; unsigned int wsize; /* start with specified wsize, or default */ if (volume_info->wsize) wsize = volume_info->wsize; else if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) wsize = CIFS_DEFAULT_IOSIZE; else wsize = CIFS_DEFAULT_NON_POSIX_WSIZE; /* can server support 24-bit write sizes? (via UNIX extensions) */ if (!tcon->unix_ext || !(unix_cap & CIFS_UNIX_LARGE_WRITE_CAP)) wsize = min_t(unsigned int, wsize, CIFS_MAX_RFC1002_WSIZE); /* * no CAP_LARGE_WRITE_X or is signing enabled without CAP_UNIX set? * Limit it to max buffer offered by the server, minus the size of the * WRITEX header, not including the 4 byte RFC1001 length. */ if (!(server->capabilities & CAP_LARGE_WRITE_X) || (!(server->capabilities & CAP_UNIX) && (server->sec_mode & (SECMODE_SIGN_ENABLED|SECMODE_SIGN_REQUIRED)))) wsize = min_t(unsigned int, wsize, server->maxBuf - sizeof(WRITE_REQ) + 4); /* hard limit of CIFS_MAX_WSIZE */ wsize = min_t(unsigned int, wsize, CIFS_MAX_WSIZE); return wsize; } static unsigned int cifs_negotiate_rsize(struct cifs_tcon *tcon, struct smb_vol *volume_info) { __u64 unix_cap = le64_to_cpu(tcon->fsUnixInfo.Capability); struct TCP_Server_Info *server = tcon->ses->server; unsigned int rsize, defsize; /* * Set default value... * * HACK alert! Ancient servers have very small buffers. Even though * MS-CIFS indicates that servers are only limited by the client's * bufsize for reads, testing against win98se shows that it throws * INVALID_PARAMETER errors if you try to request too large a read. * OS/2 just sends back short reads. * * If the server doesn't advertise CAP_LARGE_READ_X, then assume that * it can't handle a read request larger than its MaxBufferSize either. */ if (tcon->unix_ext && (unix_cap & CIFS_UNIX_LARGE_READ_CAP)) defsize = CIFS_DEFAULT_IOSIZE; else if (server->capabilities & CAP_LARGE_READ_X) defsize = CIFS_DEFAULT_NON_POSIX_RSIZE; else defsize = server->maxBuf - sizeof(READ_RSP); rsize = volume_info->rsize ? volume_info->rsize : defsize; /* * no CAP_LARGE_READ_X? Then MS-CIFS states that we must limit this to * the client's MaxBufferSize. */ if (!(server->capabilities & CAP_LARGE_READ_X)) rsize = min_t(unsigned int, CIFSMaxBufSize, rsize); /* hard limit of CIFS_MAX_RSIZE */ rsize = min_t(unsigned int, rsize, CIFS_MAX_RSIZE); return rsize; } static void cifs_qfs_tcon(const unsigned int xid, struct cifs_tcon *tcon) { CIFSSMBQFSDeviceInfo(xid, tcon); CIFSSMBQFSAttributeInfo(xid, tcon); } static int cifs_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path) { int rc; FILE_ALL_INFO *file_info; file_info = kmalloc(sizeof(FILE_ALL_INFO), GFP_KERNEL); if (file_info == NULL) return -ENOMEM; rc = CIFSSMBQPathInfo(xid, tcon, full_path, file_info, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc == -EOPNOTSUPP || rc == -EINVAL) rc = SMBQueryInformation(xid, tcon, full_path, file_info, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); kfree(file_info); return rc; } static int cifs_query_path_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, FILE_ALL_INFO *data, bool *adjustTZ) { int rc; /* could do find first instead but this returns more info */ rc = CIFSSMBQPathInfo(xid, tcon, full_path, data, 0 /* not legacy */, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); /* * BB optimize code so we do not make the above call when server claims * no NT SMB support and the above call failed at least once - set flag * in tcon or mount. */ if ((rc == -EOPNOTSUPP) || (rc == -EINVAL)) { rc = SMBQueryInformation(xid, tcon, full_path, data, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); *adjustTZ = true; } return rc; } static int cifs_get_srv_inum(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_sb_info *cifs_sb, const char *full_path, u64 *uniqueid, FILE_ALL_INFO *data) { /* * We can not use the IndexNumber field by default from Windows or * Samba (in ALL_INFO buf) but we can request it explicitly. The SNIA * CIFS spec claims that this value is unique within the scope of a * share, and the windows docs hint that it's actually unique * per-machine. * * There may be higher info levels that work but are there Windows * server or network appliances for which IndexNumber field is not * guaranteed unique? */ return CIFSGetSrvInodeNumber(xid, tcon, full_path, uniqueid, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } static int cifs_query_file_info(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, FILE_ALL_INFO *data) { return CIFSSMBQFileInfo(xid, tcon, fid->netfid, data); } static void cifs_clear_stats(struct cifs_tcon *tcon) { #ifdef CONFIG_CIFS_STATS atomic_set(&tcon->stats.cifs_stats.num_writes, 0); atomic_set(&tcon->stats.cifs_stats.num_reads, 0); atomic_set(&tcon->stats.cifs_stats.num_flushes, 0); atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0); atomic_set(&tcon->stats.cifs_stats.num_opens, 0); atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0); atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_closes, 0); atomic_set(&tcon->stats.cifs_stats.num_deletes, 0); atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0); atomic_set(&tcon->stats.cifs_stats.num_renames, 0); atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0); atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0); atomic_set(&tcon->stats.cifs_stats.num_fnext, 0); atomic_set(&tcon->stats.cifs_stats.num_fclose, 0); atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0); atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0); atomic_set(&tcon->stats.cifs_stats.num_locks, 0); atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0); atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0); #endif } static void cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon) { #ifdef CONFIG_CIFS_STATS seq_printf(m, " Oplocks breaks: %d", atomic_read(&tcon->stats.cifs_stats.num_oplock_brks)); seq_printf(m, "\nReads: %d Bytes: %llu", atomic_read(&tcon->stats.cifs_stats.num_reads), (long long)(tcon->bytes_read)); seq_printf(m, "\nWrites: %d Bytes: %llu", atomic_read(&tcon->stats.cifs_stats.num_writes), (long long)(tcon->bytes_written)); seq_printf(m, "\nFlushes: %d", atomic_read(&tcon->stats.cifs_stats.num_flushes)); seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d", atomic_read(&tcon->stats.cifs_stats.num_locks), atomic_read(&tcon->stats.cifs_stats.num_hardlinks), atomic_read(&tcon->stats.cifs_stats.num_symlinks)); seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d", atomic_read(&tcon->stats.cifs_stats.num_opens), atomic_read(&tcon->stats.cifs_stats.num_closes), atomic_read(&tcon->stats.cifs_stats.num_deletes)); seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d", atomic_read(&tcon->stats.cifs_stats.num_posixopens), atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs)); seq_printf(m, "\nMkdirs: %d Rmdirs: %d", atomic_read(&tcon->stats.cifs_stats.num_mkdirs), atomic_read(&tcon->stats.cifs_stats.num_rmdirs)); seq_printf(m, "\nRenames: %d T2 Renames %d", atomic_read(&tcon->stats.cifs_stats.num_renames), atomic_read(&tcon->stats.cifs_stats.num_t2renames)); seq_printf(m, "\nFindFirst: %d FNext %d FClose %d", atomic_read(&tcon->stats.cifs_stats.num_ffirst), atomic_read(&tcon->stats.cifs_stats.num_fnext), atomic_read(&tcon->stats.cifs_stats.num_fclose)); #endif } static void cifs_mkdir_setinfo(struct inode *inode, const char *full_path, struct cifs_sb_info *cifs_sb, struct cifs_tcon *tcon, const unsigned int xid) { FILE_BASIC_INFO info; struct cifsInodeInfo *cifsInode; u32 dosattrs; int rc; memset(&info, 0, sizeof(info)); cifsInode = CIFS_I(inode); dosattrs = cifsInode->cifsAttrs|ATTR_READONLY; info.Attributes = cpu_to_le32(dosattrs); rc = CIFSSMBSetPathInfo(xid, tcon, full_path, &info, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc == 0) cifsInode->cifsAttrs = dosattrs; } static int cifs_open_file(const unsigned int xid, struct cifs_tcon *tcon, const char *path, int disposition, int desired_access, int create_options, struct cifs_fid *fid, __u32 *oplock, FILE_ALL_INFO *buf, struct cifs_sb_info *cifs_sb) { if (!(tcon->ses->capabilities & CAP_NT_SMBS)) return SMBLegacyOpen(xid, tcon, path, disposition, desired_access, create_options, &fid->netfid, oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); return CIFSSMBOpen(xid, tcon, path, disposition, desired_access, create_options, &fid->netfid, oplock, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); } static void cifs_set_fid(struct cifsFileInfo *cfile, struct cifs_fid *fid, __u32 oplock) { struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); cfile->fid.netfid = fid->netfid; cifs_set_oplock_level(cinode, oplock); cinode->can_cache_brlcks = cinode->clientCanCacheAll; } static void cifs_close_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { CIFSSMBClose(xid, tcon, fid->netfid); } static int cifs_flush_file(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return CIFSSMBFlush(xid, tcon, fid->netfid); } static int cifs_sync_read(const unsigned int xid, struct cifsFileInfo *cfile, struct cifs_io_parms *parms, unsigned int *bytes_read, char **buf, int *buf_type) { parms->netfid = cfile->fid.netfid; return CIFSSMBRead(xid, parms, bytes_read, buf, buf_type); } static int cifs_sync_write(const unsigned int xid, struct cifsFileInfo *cfile, struct cifs_io_parms *parms, unsigned int *written, struct kvec *iov, unsigned long nr_segs) { parms->netfid = cfile->fid.netfid; return CIFSSMBWrite2(xid, parms, written, iov, nr_segs); } static int smb_set_file_info(struct inode *inode, const char *full_path, FILE_BASIC_INFO *buf, const unsigned int xid) { int oplock = 0; int rc; __u16 netfid; __u32 netpid; struct cifsFileInfo *open_file; struct cifsInodeInfo *cinode = CIFS_I(inode); struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb); struct tcon_link *tlink = NULL; struct cifs_tcon *tcon; /* if the file is already open for write, just use that fileid */ open_file = find_writable_file(cinode, true); if (open_file) { netfid = open_file->fid.netfid; netpid = open_file->pid; tcon = tlink_tcon(open_file->tlink); goto set_via_filehandle; } tlink = cifs_sb_tlink(cifs_sb); if (IS_ERR(tlink)) { rc = PTR_ERR(tlink); tlink = NULL; goto out; } tcon = tlink_tcon(tlink); /* * NT4 apparently returns success on this call, but it doesn't really * work. */ if (!(tcon->ses->flags & CIFS_SES_NT4)) { rc = CIFSSMBSetPathInfo(xid, tcon, full_path, buf, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc == 0) { cinode->cifsAttrs = le32_to_cpu(buf->Attributes); goto out; } else if (rc != -EOPNOTSUPP && rc != -EINVAL) goto out; } cifs_dbg(FYI, "calling SetFileInfo since SetPathInfo for times not supported by this server\n"); rc = CIFSSMBOpen(xid, tcon, full_path, FILE_OPEN, SYNCHRONIZE | FILE_WRITE_ATTRIBUTES, CREATE_NOT_DIR, &netfid, &oplock, NULL, cifs_sb->local_nls, cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR); if (rc != 0) { if (rc == -EIO) rc = -EINVAL; goto out; } netpid = current->tgid; set_via_filehandle: rc = CIFSSMBSetFileInfo(xid, tcon, buf, netfid, netpid); if (!rc) cinode->cifsAttrs = le32_to_cpu(buf->Attributes); if (open_file == NULL) CIFSSMBClose(xid, tcon, netfid); else cifsFileInfo_put(open_file); out: if (tlink != NULL) cifs_put_tlink(tlink); return rc; } static int cifs_query_dir_first(const unsigned int xid, struct cifs_tcon *tcon, const char *path, struct cifs_sb_info *cifs_sb, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { return CIFSFindFirst(xid, tcon, path, cifs_sb, &fid->netfid, search_flags, srch_inf, true); } static int cifs_query_dir_next(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid, __u16 search_flags, struct cifs_search_info *srch_inf) { return CIFSFindNext(xid, tcon, fid->netfid, search_flags, srch_inf); } static int cifs_close_dir(const unsigned int xid, struct cifs_tcon *tcon, struct cifs_fid *fid) { return CIFSFindClose(xid, tcon, fid->netfid); } static int cifs_oplock_response(struct cifs_tcon *tcon, struct cifs_fid *fid, struct cifsInodeInfo *cinode) { return CIFSSMBLock(0, tcon, fid->netfid, current->tgid, 0, 0, 0, 0, LOCKING_ANDX_OPLOCK_RELEASE, false, cinode->clientCanCacheRead ? 1 : 0); } static int cifs_queryfs(const unsigned int xid, struct cifs_tcon *tcon, struct kstatfs *buf) { int rc = -EOPNOTSUPP; buf->f_type = CIFS_MAGIC_NUMBER; /* * We could add a second check for a QFS Unix capability bit */ if ((tcon->ses->capabilities & CAP_UNIX) && (CIFS_POSIX_EXTENSIONS & le64_to_cpu(tcon->fsUnixInfo.Capability))) rc = CIFSSMBQFSPosixInfo(xid, tcon, buf); /* * Only need to call the old QFSInfo if failed on newer one, * e.g. by OS/2. **/ if (rc && (tcon->ses->capabilities & CAP_NT_SMBS)) rc = CIFSSMBQFSInfo(xid, tcon, buf); /* * Some old Windows servers also do not support level 103, retry with * older level one if old server failed the previous call or we * bypassed it because we detected that this was an older LANMAN sess */ if (rc) rc = SMBOldQFSInfo(xid, tcon, buf); return rc; } static int cifs_mand_lock(const unsigned int xid, struct cifsFileInfo *cfile, __u64 offset, __u64 length, __u32 type, int lock, int unlock, bool wait) { return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->fid.netfid, current->tgid, length, offset, unlock, lock, (__u8)type, wait, 0); } struct smb_version_operations smb1_operations = { .send_cancel = send_nt_cancel, .compare_fids = cifs_compare_fids, .setup_request = cifs_setup_request, .setup_async_request = cifs_setup_async_request, .check_receive = cifs_check_receive, .add_credits = cifs_add_credits, .set_credits = cifs_set_credits, .get_credits_field = cifs_get_credits_field, .get_credits = cifs_get_credits, .get_next_mid = cifs_get_next_mid, .read_data_offset = cifs_read_data_offset, .read_data_length = cifs_read_data_length, .map_error = map_smb_to_linux_error, .find_mid = cifs_find_mid, .check_message = checkSMB, .dump_detail = cifs_dump_detail, .clear_stats = cifs_clear_stats, .print_stats = cifs_print_stats, .is_oplock_break = is_valid_oplock_break, .check_trans2 = cifs_check_trans2, .need_neg = cifs_need_neg, .negotiate = cifs_negotiate, .negotiate_wsize = cifs_negotiate_wsize, .negotiate_rsize = cifs_negotiate_rsize, .sess_setup = CIFS_SessSetup, .logoff = CIFSSMBLogoff, .tree_connect = CIFSTCon, .tree_disconnect = CIFSSMBTDis, .get_dfs_refer = CIFSGetDFSRefer, .qfs_tcon = cifs_qfs_tcon, .is_path_accessible = cifs_is_path_accessible, .query_path_info = cifs_query_path_info, .query_file_info = cifs_query_file_info, .get_srv_inum = cifs_get_srv_inum, .set_path_size = CIFSSMBSetEOF, .set_file_size = CIFSSMBSetFileSize, .set_file_info = smb_set_file_info, .echo = CIFSSMBEcho, .mkdir = CIFSSMBMkDir, .mkdir_setinfo = cifs_mkdir_setinfo, .rmdir = CIFSSMBRmDir, .unlink = CIFSSMBDelFile, .rename_pending_delete = cifs_rename_pending_delete, .rename = CIFSSMBRename, .create_hardlink = CIFSCreateHardLink, .open = cifs_open_file, .set_fid = cifs_set_fid, .close = cifs_close_file, .flush = cifs_flush_file, .async_readv = cifs_async_readv, .async_writev = cifs_async_writev, .sync_read = cifs_sync_read, .sync_write = cifs_sync_write, .query_dir_first = cifs_query_dir_first, .query_dir_next = cifs_query_dir_next, .close_dir = cifs_close_dir, .calc_smb_size = smbCalcSize, .oplock_response = cifs_oplock_response, .queryfs = cifs_queryfs, .mand_lock = cifs_mand_lock, .mand_unlock_range = cifs_unlock_range, .push_mand_locks = cifs_push_mandatory_locks, #ifdef CONFIG_CIFS_XATTR .query_all_EAs = CIFSSMBQAllEAs, .set_EA = CIFSSMBSetEA, #endif /* CIFS_XATTR */ #ifdef CONFIG_CIFS_ACL .get_acl = get_cifs_acl, .set_acl = set_cifs_acl, #endif /* CIFS_ACL */ }; struct smb_version_values smb1_values = { .version_string = SMB1_VERSION_STRING, .large_lock_type = LOCKING_ANDX_LARGE_FILES, .exclusive_lock_type = 0, .shared_lock_type = LOCKING_ANDX_SHARED_LOCK, .unlock_lock_type = 0, .header_size = sizeof(struct smb_hdr), .max_header_size = MAX_CIFS_HDR_SIZE, .read_rsp_size = sizeof(READ_RSP), .lock_cmd = cpu_to_le16(SMB_COM_LOCKING_ANDX), .cap_unix = CAP_UNIX, .cap_nt_find = CAP_NT_SMBS | CAP_NT_FIND, .cap_large_files = CAP_LARGE_FILES, .oplock_read = OPLOCK_READ, };
gpl-2.0
AOKP/lge-kernel-star
net/802/garp.c
798
18007
/* * IEEE 802.1D Generic Attribute Registration Protocol (GARP) * * Copyright (c) 2008 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/timer.h> #include <linux/skbuff.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/rtnetlink.h> #include <linux/llc.h> #include <net/llc.h> #include <net/llc_pdu.h> #include <net/garp.h> #include <asm/unaligned.h> static unsigned int garp_join_time __read_mostly = 200; module_param(garp_join_time, uint, 0644); MODULE_PARM_DESC(garp_join_time, "Join time in ms (default 200ms)"); MODULE_LICENSE("GPL"); static const struct garp_state_trans { u8 state; u8 action; } garp_applicant_state_table[GARP_APPLICANT_MAX + 1][GARP_EVENT_MAX + 1] = { [GARP_APPLICANT_VA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_AA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_QA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_LA }, }, [GARP_APPLICANT_LA] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_VO, .action = GARP_ACTION_S_LEAVE_EMPTY }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_LA }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VA }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_VP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_AA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_VO }, }, [GARP_APPLICANT_AP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_QA, .action = GARP_ACTION_S_JOIN_IN }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_AO }, }, [GARP_APPLICANT_QP] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_QO }, }, [GARP_APPLICANT_VO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_AO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_VP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_AO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_AP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, [GARP_APPLICANT_QO] = { [GARP_EVENT_TRANSMIT_PDU] = { .state = GARP_APPLICANT_INVALID }, [GARP_EVENT_R_JOIN_IN] = { .state = GARP_APPLICANT_QO }, [GARP_EVENT_R_JOIN_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_IN] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_R_LEAVE_EMPTY] = { .state = GARP_APPLICANT_VO }, [GARP_EVENT_REQ_JOIN] = { .state = GARP_APPLICANT_QP }, [GARP_EVENT_REQ_LEAVE] = { .state = GARP_APPLICANT_INVALID }, }, }; static int garp_attr_cmp(const struct garp_attr *attr, const void *data, u8 len, u8 type) { if (attr->type != type) return attr->type - type; if (attr->dlen != len) return attr->dlen - len; return memcmp(attr->data, data, len); } static struct garp_attr *garp_attr_lookup(const struct garp_applicant *app, const void *data, u8 len, u8 type) { struct rb_node *parent = app->gid.rb_node; struct garp_attr *attr; int d; while (parent) { attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, data, len, type); if (d < 0) parent = parent->rb_left; else if (d > 0) parent = parent->rb_right; else return attr; } return NULL; } static void garp_attr_insert(struct garp_applicant *app, struct garp_attr *new) { struct rb_node *parent = NULL, **p = &app->gid.rb_node; struct garp_attr *attr; int d; while (*p) { parent = *p; attr = rb_entry(parent, struct garp_attr, node); d = garp_attr_cmp(attr, new->data, new->dlen, new->type); if (d < 0) p = &parent->rb_left; else if (d > 0) p = &parent->rb_right; } rb_link_node(&new->node, parent, p); rb_insert_color(&new->node, &app->gid); } static struct garp_attr *garp_attr_create(struct garp_applicant *app, const void *data, u8 len, u8 type) { struct garp_attr *attr; attr = kmalloc(sizeof(*attr) + len, GFP_ATOMIC); if (!attr) return attr; attr->state = GARP_APPLICANT_VO; attr->type = type; attr->dlen = len; memcpy(attr->data, data, len); garp_attr_insert(app, attr); return attr; } static void garp_attr_destroy(struct garp_applicant *app, struct garp_attr *attr) { rb_erase(&attr->node, &app->gid); kfree(attr); } static int garp_pdu_init(struct garp_applicant *app) { struct sk_buff *skb; struct garp_pdu_hdr *gp; #define LLC_RESERVE sizeof(struct llc_pdu_un) skb = alloc_skb(app->dev->mtu + LL_RESERVED_SPACE(app->dev), GFP_ATOMIC); if (!skb) return -ENOMEM; skb->dev = app->dev; skb->protocol = htons(ETH_P_802_2); skb_reserve(skb, LL_RESERVED_SPACE(app->dev) + LLC_RESERVE); gp = (struct garp_pdu_hdr *)__skb_put(skb, sizeof(*gp)); put_unaligned(htons(GARP_PROTOCOL_ID), &gp->protocol); app->pdu = skb; return 0; } static int garp_pdu_append_end_mark(struct garp_applicant *app) { if (skb_tailroom(app->pdu) < sizeof(u8)) return -1; *(u8 *)__skb_put(app->pdu, sizeof(u8)) = GARP_END_MARK; return 0; } static void garp_pdu_queue(struct garp_applicant *app) { if (!app->pdu) return; garp_pdu_append_end_mark(app); garp_pdu_append_end_mark(app); llc_pdu_header_init(app->pdu, LLC_PDU_TYPE_U, LLC_SAP_BSPAN, LLC_SAP_BSPAN, LLC_PDU_CMD); llc_pdu_init_as_ui_cmd(app->pdu); llc_mac_hdr_init(app->pdu, app->dev->dev_addr, app->app->proto.group_address); skb_queue_tail(&app->queue, app->pdu); app->pdu = NULL; } static void garp_queue_xmit(struct garp_applicant *app) { struct sk_buff *skb; while ((skb = skb_dequeue(&app->queue))) dev_queue_xmit(skb); } static int garp_pdu_append_msg(struct garp_applicant *app, u8 attrtype) { struct garp_msg_hdr *gm; if (skb_tailroom(app->pdu) < sizeof(*gm)) return -1; gm = (struct garp_msg_hdr *)__skb_put(app->pdu, sizeof(*gm)); gm->attrtype = attrtype; garp_cb(app->pdu)->cur_type = attrtype; return 0; } static int garp_pdu_append_attr(struct garp_applicant *app, const struct garp_attr *attr, enum garp_attr_event event) { struct garp_attr_hdr *ga; unsigned int len; int err; again: if (!app->pdu) { err = garp_pdu_init(app); if (err < 0) return err; } if (garp_cb(app->pdu)->cur_type != attr->type) { if (garp_cb(app->pdu)->cur_type && garp_pdu_append_end_mark(app) < 0) goto queue; if (garp_pdu_append_msg(app, attr->type) < 0) goto queue; } len = sizeof(*ga) + attr->dlen; if (skb_tailroom(app->pdu) < len) goto queue; ga = (struct garp_attr_hdr *)__skb_put(app->pdu, len); ga->len = len; ga->event = event; memcpy(ga->data, attr->data, attr->dlen); return 0; queue: garp_pdu_queue(app); goto again; } static void garp_attr_event(struct garp_applicant *app, struct garp_attr *attr, enum garp_event event) { enum garp_applicant_state state; state = garp_applicant_state_table[attr->state][event].state; if (state == GARP_APPLICANT_INVALID) return; switch (garp_applicant_state_table[attr->state][event].action) { case GARP_ACTION_NONE: break; case GARP_ACTION_S_JOIN_IN: /* When appending the attribute fails, don't update state in * order to retry on next TRANSMIT_PDU event. */ if (garp_pdu_append_attr(app, attr, GARP_JOIN_IN) < 0) return; break; case GARP_ACTION_S_LEAVE_EMPTY: garp_pdu_append_attr(app, attr, GARP_LEAVE_EMPTY); /* As a pure applicant, sending a leave message implies that * the attribute was unregistered and can be destroyed. */ garp_attr_destroy(app, attr); return; default: WARN_ON(1); } attr->state = state; } int garp_request_join(const struct net_device *dev, const struct garp_application *appl, const void *data, u8 len, u8 type) { struct garp_port *port = dev->garp_port; struct garp_applicant *app = port->applicants[appl->type]; struct garp_attr *attr; spin_lock_bh(&app->lock); attr = garp_attr_create(app, data, len, type); if (!attr) { spin_unlock_bh(&app->lock); return -ENOMEM; } garp_attr_event(app, attr, GARP_EVENT_REQ_JOIN); spin_unlock_bh(&app->lock); return 0; } EXPORT_SYMBOL_GPL(garp_request_join); void garp_request_leave(const struct net_device *dev, const struct garp_application *appl, const void *data, u8 len, u8 type) { struct garp_port *port = dev->garp_port; struct garp_applicant *app = port->applicants[appl->type]; struct garp_attr *attr; spin_lock_bh(&app->lock); attr = garp_attr_lookup(app, data, len, type); if (!attr) { spin_unlock_bh(&app->lock); return; } garp_attr_event(app, attr, GARP_EVENT_REQ_LEAVE); spin_unlock_bh(&app->lock); } EXPORT_SYMBOL_GPL(garp_request_leave); static void garp_gid_event(struct garp_applicant *app, enum garp_event event) { struct rb_node *node, *next; struct garp_attr *attr; for (node = rb_first(&app->gid); next = node ? rb_next(node) : NULL, node != NULL; node = next) { attr = rb_entry(node, struct garp_attr, node); garp_attr_event(app, attr, event); } } static void garp_join_timer_arm(struct garp_applicant *app) { unsigned long delay; delay = (u64)msecs_to_jiffies(garp_join_time) * net_random() >> 32; mod_timer(&app->join_timer, jiffies + delay); } static void garp_join_timer(unsigned long data) { struct garp_applicant *app = (struct garp_applicant *)data; spin_lock(&app->lock); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); garp_pdu_queue(app); spin_unlock(&app->lock); garp_queue_xmit(app); garp_join_timer_arm(app); } static int garp_pdu_parse_end_mark(struct sk_buff *skb) { if (!pskb_may_pull(skb, sizeof(u8))) return -1; if (*skb->data == GARP_END_MARK) { skb_pull(skb, sizeof(u8)); return -1; } return 0; } static int garp_pdu_parse_attr(struct garp_applicant *app, struct sk_buff *skb, u8 attrtype) { const struct garp_attr_hdr *ga; struct garp_attr *attr; enum garp_event event; unsigned int dlen; if (!pskb_may_pull(skb, sizeof(*ga))) return -1; ga = (struct garp_attr_hdr *)skb->data; if (ga->len < sizeof(*ga)) return -1; if (!pskb_may_pull(skb, ga->len)) return -1; skb_pull(skb, ga->len); dlen = sizeof(*ga) - ga->len; if (attrtype > app->app->maxattr) return 0; switch (ga->event) { case GARP_LEAVE_ALL: if (dlen != 0) return -1; garp_gid_event(app, GARP_EVENT_R_LEAVE_EMPTY); return 0; case GARP_JOIN_EMPTY: event = GARP_EVENT_R_JOIN_EMPTY; break; case GARP_JOIN_IN: event = GARP_EVENT_R_JOIN_IN; break; case GARP_LEAVE_EMPTY: event = GARP_EVENT_R_LEAVE_EMPTY; break; case GARP_EMPTY: event = GARP_EVENT_R_EMPTY; break; default: return 0; } if (dlen == 0) return -1; attr = garp_attr_lookup(app, ga->data, dlen, attrtype); if (attr == NULL) return 0; garp_attr_event(app, attr, event); return 0; } static int garp_pdu_parse_msg(struct garp_applicant *app, struct sk_buff *skb) { const struct garp_msg_hdr *gm; if (!pskb_may_pull(skb, sizeof(*gm))) return -1; gm = (struct garp_msg_hdr *)skb->data; if (gm->attrtype == 0) return -1; skb_pull(skb, sizeof(*gm)); while (skb->len > 0) { if (garp_pdu_parse_attr(app, skb, gm->attrtype) < 0) return -1; if (garp_pdu_parse_end_mark(skb) < 0) break; } return 0; } static void garp_pdu_rcv(const struct stp_proto *proto, struct sk_buff *skb, struct net_device *dev) { struct garp_application *appl = proto->data; struct garp_port *port; struct garp_applicant *app; const struct garp_pdu_hdr *gp; port = rcu_dereference(dev->garp_port); if (!port) goto err; app = rcu_dereference(port->applicants[appl->type]); if (!app) goto err; if (!pskb_may_pull(skb, sizeof(*gp))) goto err; gp = (struct garp_pdu_hdr *)skb->data; if (get_unaligned(&gp->protocol) != htons(GARP_PROTOCOL_ID)) goto err; skb_pull(skb, sizeof(*gp)); spin_lock(&app->lock); while (skb->len > 0) { if (garp_pdu_parse_msg(app, skb) < 0) break; if (garp_pdu_parse_end_mark(skb) < 0) break; } spin_unlock(&app->lock); err: kfree_skb(skb); } static int garp_init_port(struct net_device *dev) { struct garp_port *port; port = kzalloc(sizeof(*port), GFP_KERNEL); if (!port) return -ENOMEM; rcu_assign_pointer(dev->garp_port, port); return 0; } static void garp_release_port(struct net_device *dev) { struct garp_port *port = dev->garp_port; unsigned int i; for (i = 0; i <= GARP_APPLICATION_MAX; i++) { if (port->applicants[i]) return; } rcu_assign_pointer(dev->garp_port, NULL); synchronize_rcu(); kfree(port); } int garp_init_applicant(struct net_device *dev, struct garp_application *appl) { struct garp_applicant *app; int err; ASSERT_RTNL(); if (!dev->garp_port) { err = garp_init_port(dev); if (err < 0) goto err1; } err = -ENOMEM; app = kzalloc(sizeof(*app), GFP_KERNEL); if (!app) goto err2; err = dev_mc_add(dev, appl->proto.group_address, ETH_ALEN, 0); if (err < 0) goto err3; app->dev = dev; app->app = appl; app->gid = RB_ROOT; spin_lock_init(&app->lock); skb_queue_head_init(&app->queue); rcu_assign_pointer(dev->garp_port->applicants[appl->type], app); setup_timer(&app->join_timer, garp_join_timer, (unsigned long)app); garp_join_timer_arm(app); return 0; err3: kfree(app); err2: garp_release_port(dev); err1: return err; } EXPORT_SYMBOL_GPL(garp_init_applicant); void garp_uninit_applicant(struct net_device *dev, struct garp_application *appl) { struct garp_port *port = dev->garp_port; struct garp_applicant *app = port->applicants[appl->type]; ASSERT_RTNL(); rcu_assign_pointer(port->applicants[appl->type], NULL); synchronize_rcu(); /* Delete timer and generate a final TRANSMIT_PDU event to flush out * all pending messages before the applicant is gone. */ del_timer_sync(&app->join_timer); garp_gid_event(app, GARP_EVENT_TRANSMIT_PDU); garp_pdu_queue(app); garp_queue_xmit(app); dev_mc_delete(dev, appl->proto.group_address, ETH_ALEN, 0); kfree(app); garp_release_port(dev); } EXPORT_SYMBOL_GPL(garp_uninit_applicant); int garp_register_application(struct garp_application *appl) { appl->proto.rcv = garp_pdu_rcv; appl->proto.data = appl; return stp_proto_register(&appl->proto); } EXPORT_SYMBOL_GPL(garp_register_application); void garp_unregister_application(struct garp_application *appl) { stp_proto_unregister(&appl->proto); } EXPORT_SYMBOL_GPL(garp_unregister_application);
gpl-2.0
jstotero/lge_e510_kernel
drivers/mfd/twl-core.c
798
28993
/* * twl_core.c - driver for TWL4030/TWL5030/TWL60X0/TPS659x0 PM * and audio CODEC devices * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * Modifications to defer interrupt handling to a kernel thread: * Copyright (C) 2006 MontaVista Software, Inc. * * Based on tlv320aic23.c: * Copyright (c) by Kai Svahn <kai.svahn@nokia.com> * * Code cleanup and modifications to IRQ handler. * by syed khasim <x0khasim@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/mutex.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/regulator/machine.h> #include <linux/i2c.h> #include <linux/i2c/twl.h> #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) #include <plat/cpu.h> #endif /* * The TWL4030 "Triton 2" is one of a family of a multi-function "Power * Management and System Companion Device" chips originally designed for * use in OMAP2 and OMAP 3 based systems. Its control interfaces use I2C, * often at around 3 Mbit/sec, including for interrupt handling. * * This driver core provides genirq support for the interrupts emitted, * by the various modules, and exports register access primitives. * * FIXME this driver currently requires use of the first interrupt line * (and associated registers). */ #define DRIVER_NAME "twl" #if defined(CONFIG_KEYBOARD_TWL4030) || defined(CONFIG_KEYBOARD_TWL4030_MODULE) #define twl_has_keypad() true #else #define twl_has_keypad() false #endif #if defined(CONFIG_GPIO_TWL4030) || defined(CONFIG_GPIO_TWL4030_MODULE) #define twl_has_gpio() true #else #define twl_has_gpio() false #endif #if defined(CONFIG_REGULATOR_TWL4030) \ || defined(CONFIG_REGULATOR_TWL4030_MODULE) #define twl_has_regulator() true #else #define twl_has_regulator() false #endif #if defined(CONFIG_TWL4030_MADC) || defined(CONFIG_TWL4030_MADC_MODULE) #define twl_has_madc() true #else #define twl_has_madc() false #endif #ifdef CONFIG_TWL4030_POWER #define twl_has_power() true #else #define twl_has_power() false #endif #if defined(CONFIG_RTC_DRV_TWL4030) || defined(CONFIG_RTC_DRV_TWL4030_MODULE) #define twl_has_rtc() true #else #define twl_has_rtc() false #endif #if defined(CONFIG_TWL4030_USB) || defined(CONFIG_TWL4030_USB_MODULE) #define twl_has_usb() true #else #define twl_has_usb() false #endif #if defined(CONFIG_TWL4030_WATCHDOG) || \ defined(CONFIG_TWL4030_WATCHDOG_MODULE) #define twl_has_watchdog() true #else #define twl_has_watchdog() false #endif #if defined(CONFIG_TWL4030_CODEC) || defined(CONFIG_TWL4030_CODEC_MODULE) ||\ defined(CONFIG_SND_SOC_TWL6040) || defined(CONFIG_SND_SOC_TWL6040_MODULE) #define twl_has_codec() true #else #define twl_has_codec() false #endif /* Triton Core internal information (BEGIN) */ /* Last - for index max*/ #define TWL4030_MODULE_LAST TWL4030_MODULE_SECURED_REG #define TWL_NUM_SLAVES 4 #if defined(CONFIG_INPUT_TWL4030_PWRBUTTON) \ || defined(CONFIG_INPUT_TWL4030_PWRBUTTON_MODULE) #define twl_has_pwrbutton() true #else #define twl_has_pwrbutton() false #endif #define SUB_CHIP_ID0 0 #define SUB_CHIP_ID1 1 #define SUB_CHIP_ID2 2 #define SUB_CHIP_ID3 3 #define TWL_MODULE_LAST TWL4030_MODULE_LAST /* Base Address defns for twl4030_map[] */ /* subchip/slave 0 - USB ID */ #define TWL4030_BASEADD_USB 0x0000 /* subchip/slave 1 - AUD ID */ #define TWL4030_BASEADD_AUDIO_VOICE 0x0000 #define TWL4030_BASEADD_GPIO 0x0098 #define TWL4030_BASEADD_INTBR 0x0085 #define TWL4030_BASEADD_PIH 0x0080 #define TWL4030_BASEADD_TEST 0x004C /* subchip/slave 2 - AUX ID */ #define TWL4030_BASEADD_INTERRUPTS 0x00B9 #define TWL4030_BASEADD_LED 0x00EE #define TWL4030_BASEADD_MADC 0x0000 #define TWL4030_BASEADD_MAIN_CHARGE 0x0074 #define TWL4030_BASEADD_PRECHARGE 0x00AA #define TWL4030_BASEADD_PWM0 0x00F8 #define TWL4030_BASEADD_PWM1 0x00FB #define TWL4030_BASEADD_PWMA 0x00EF #define TWL4030_BASEADD_PWMB 0x00F1 #define TWL4030_BASEADD_KEYPAD 0x00D2 #define TWL5031_BASEADD_ACCESSORY 0x0074 /* Replaces Main Charge */ #define TWL5031_BASEADD_INTERRUPTS 0x00B9 /* Different than TWL4030's one */ /* subchip/slave 3 - POWER ID */ #define TWL4030_BASEADD_BACKUP 0x0014 #define TWL4030_BASEADD_INT 0x002E #define TWL4030_BASEADD_PM_MASTER 0x0036 #define TWL4030_BASEADD_PM_RECEIVER 0x005B #define TWL4030_BASEADD_RTC 0x001C #define TWL4030_BASEADD_SECURED_REG 0x0000 /* Triton Core internal information (END) */ /* subchip/slave 0 0x48 - POWER */ #define TWL6030_BASEADD_RTC 0x0000 #define TWL6030_BASEADD_MEM 0x0017 #define TWL6030_BASEADD_PM_MASTER 0x001F #define TWL6030_BASEADD_PM_SLAVE_MISC 0x0030 /* PM_RECEIVER */ #define TWL6030_BASEADD_PM_MISC 0x00E2 #define TWL6030_BASEADD_PM_PUPD 0x00F0 /* subchip/slave 1 0x49 - FEATURE */ #define TWL6030_BASEADD_USB 0x0000 #define TWL6030_BASEADD_GPADC_CTRL 0x002E #define TWL6030_BASEADD_AUX 0x0090 #define TWL6030_BASEADD_PWM 0x00BA #define TWL6030_BASEADD_GASGAUGE 0x00C0 #define TWL6030_BASEADD_PIH 0x00D0 #define TWL6030_BASEADD_CHARGER 0x00E0 /* subchip/slave 2 0x4A - DFT */ #define TWL6030_BASEADD_DIEID 0x00C0 /* subchip/slave 3 0x4B - AUDIO */ #define TWL6030_BASEADD_AUDIO 0x0000 #define TWL6030_BASEADD_RSV 0x0000 #define TWL6030_BASEADD_ZERO 0x0000 /* Few power values */ #define R_CFG_BOOT 0x05 #define R_PROTECT_KEY 0x0E /* access control values for R_PROTECT_KEY */ #define KEY_UNLOCK1 0xce #define KEY_UNLOCK2 0xec #define KEY_LOCK 0x00 /* some fields in R_CFG_BOOT */ #define HFCLK_FREQ_19p2_MHZ (1 << 0) #define HFCLK_FREQ_26_MHZ (2 << 0) #define HFCLK_FREQ_38p4_MHZ (3 << 0) #define HIGH_PERF_SQ (1 << 3) #define CK32K_LOWPWR_EN (1 << 7) /* chip-specific feature flags, for i2c_device_id.driver_data */ #define TWL4030_VAUX2 BIT(0) /* pre-5030 voltage ranges */ #define TPS_SUBSET BIT(1) /* tps659[23]0 have fewer LDOs */ #define TWL5031 BIT(2) /* twl5031 has different registers */ #define TWL6030_CLASS BIT(3) /* TWL6030 class */ /*----------------------------------------------------------------------*/ /* is driver active, bound to a chip? */ static bool inuse; static unsigned int twl_id; unsigned int twl_rev(void) { return twl_id; } EXPORT_SYMBOL(twl_rev); /* Structure for each TWL4030/TWL6030 Slave */ struct twl_client { struct i2c_client *client; u8 address; /* max numb of i2c_msg required is for read =2 */ struct i2c_msg xfer_msg[2]; /* To lock access to xfer_msg */ struct mutex xfer_lock; }; static struct twl_client twl_modules[TWL_NUM_SLAVES]; /* mapping the module id to slave id and base address */ struct twl_mapping { unsigned char sid; /* Slave ID */ unsigned char base; /* base address */ }; struct twl_mapping *twl_map; static struct twl_mapping twl4030_map[TWL4030_MODULE_LAST + 1] = { /* * NOTE: don't change this table without updating the * <linux/i2c/twl.h> defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ { 0, TWL4030_BASEADD_USB }, { 1, TWL4030_BASEADD_AUDIO_VOICE }, { 1, TWL4030_BASEADD_GPIO }, { 1, TWL4030_BASEADD_INTBR }, { 1, TWL4030_BASEADD_PIH }, { 1, TWL4030_BASEADD_TEST }, { 2, TWL4030_BASEADD_KEYPAD }, { 2, TWL4030_BASEADD_MADC }, { 2, TWL4030_BASEADD_INTERRUPTS }, { 2, TWL4030_BASEADD_LED }, { 2, TWL4030_BASEADD_MAIN_CHARGE }, { 2, TWL4030_BASEADD_PRECHARGE }, { 2, TWL4030_BASEADD_PWM0 }, { 2, TWL4030_BASEADD_PWM1 }, { 2, TWL4030_BASEADD_PWMA }, { 2, TWL4030_BASEADD_PWMB }, { 2, TWL5031_BASEADD_ACCESSORY }, { 2, TWL5031_BASEADD_INTERRUPTS }, { 3, TWL4030_BASEADD_BACKUP }, { 3, TWL4030_BASEADD_INT }, { 3, TWL4030_BASEADD_PM_MASTER }, { 3, TWL4030_BASEADD_PM_RECEIVER }, { 3, TWL4030_BASEADD_RTC }, { 3, TWL4030_BASEADD_SECURED_REG }, }; static struct twl_mapping twl6030_map[] = { /* * NOTE: don't change this table without updating the * <linux/i2c/twl.h> defines for TWL4030_MODULE_* * so they continue to match the order in this table. */ { SUB_CHIP_ID1, TWL6030_BASEADD_USB }, { SUB_CHIP_ID3, TWL6030_BASEADD_AUDIO }, { SUB_CHIP_ID2, TWL6030_BASEADD_DIEID }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID1, TWL6030_BASEADD_PIH }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID1, TWL6030_BASEADD_GPADC_CTRL }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID1, TWL6030_BASEADD_CHARGER }, { SUB_CHIP_ID1, TWL6030_BASEADD_GASGAUGE }, { SUB_CHIP_ID1, TWL6030_BASEADD_PWM }, { SUB_CHIP_ID0, TWL6030_BASEADD_ZERO }, { SUB_CHIP_ID1, TWL6030_BASEADD_ZERO }, { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO }, { SUB_CHIP_ID2, TWL6030_BASEADD_ZERO }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID2, TWL6030_BASEADD_RSV }, { SUB_CHIP_ID0, TWL6030_BASEADD_PM_MASTER }, { SUB_CHIP_ID0, TWL6030_BASEADD_PM_SLAVE_MISC }, { SUB_CHIP_ID0, TWL6030_BASEADD_RTC }, { SUB_CHIP_ID0, TWL6030_BASEADD_MEM }, }; /*----------------------------------------------------------------------*/ /* Exported Functions */ /** * twl_i2c_write - Writes a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes+1 containing data to write * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * * IMPORTANT: for 'value' parameter: Allocate value num_bytes+1 and * valid data starts at Offset 1. * * Returns the result of operation - 0 is success */ int twl_i2c_write(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { int ret; int sid; struct twl_client *twl; struct i2c_msg *msg; if (unlikely(mod_no > TWL_MODULE_LAST)) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; if (unlikely(!inuse)) { pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); return -EPERM; } mutex_lock(&twl->xfer_lock); /* * [MSG1]: fill the register address data * fill the data Tx buffer */ msg = &twl->xfer_msg[0]; msg->addr = twl->address; msg->len = num_bytes + 1; msg->flags = 0; msg->buf = value; /* over write the first byte of buffer with the register address */ *value = twl_map[mod_no].base + reg; ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 1); mutex_unlock(&twl->xfer_lock); /* i2c_transfer returns number of messages transferred */ if (ret != 1) { pr_err("%s: i2c_write failed to transfer all messages\n", DRIVER_NAME); if (ret < 0) return ret; else return -EIO; } else { return 0; } } EXPORT_SYMBOL(twl_i2c_write); /** * twl_i2c_read - Reads a n bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: an array of num_bytes containing data to be read * @reg: register address (just offset will do) * @num_bytes: number of bytes to transfer * * Returns result of operation - num_bytes is success else failure. */ int twl_i2c_read(u8 mod_no, u8 *value, u8 reg, unsigned num_bytes) { int ret; u8 val; int sid; struct twl_client *twl; struct i2c_msg *msg; if (unlikely(mod_no > TWL_MODULE_LAST)) { pr_err("%s: invalid module number %d\n", DRIVER_NAME, mod_no); return -EPERM; } sid = twl_map[mod_no].sid; twl = &twl_modules[sid]; if (unlikely(!inuse)) { pr_err("%s: client %d is not initialized\n", DRIVER_NAME, sid); return -EPERM; } mutex_lock(&twl->xfer_lock); /* [MSG1] fill the register address data */ msg = &twl->xfer_msg[0]; msg->addr = twl->address; msg->len = 1; msg->flags = 0; /* Read the register value */ val = twl_map[mod_no].base + reg; msg->buf = &val; /* [MSG2] fill the data rx buffer */ msg = &twl->xfer_msg[1]; msg->addr = twl->address; msg->flags = I2C_M_RD; /* Read the register value */ msg->len = num_bytes; /* only n bytes */ msg->buf = value; ret = i2c_transfer(twl->client->adapter, twl->xfer_msg, 2); mutex_unlock(&twl->xfer_lock); /* i2c_transfer returns number of messages transferred */ if (ret != 2) { pr_err("%s: i2c_read failed to transfer all messages\n", DRIVER_NAME); if (ret < 0) return ret; else return -EIO; } else { return 0; } } EXPORT_SYMBOL(twl_i2c_read); /** * twl_i2c_write_u8 - Writes a 8 bit register in TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: the value to be written 8 bit * @reg: register address (just offset will do) * * Returns result of operation - 0 is success */ int twl_i2c_write_u8(u8 mod_no, u8 value, u8 reg) { /* 2 bytes offset 1 contains the data offset 0 is used by i2c_write */ u8 temp_buffer[2] = { 0 }; /* offset 1 contains the data */ temp_buffer[1] = value; return twl_i2c_write(mod_no, temp_buffer, reg, 1); } EXPORT_SYMBOL(twl_i2c_write_u8); /** * twl_i2c_read_u8 - Reads a 8 bit register from TWL4030/TWL5030/TWL60X0 * @mod_no: module number * @value: the value read 8 bit * @reg: register address (just offset will do) * * Returns result of operation - 0 is success */ int twl_i2c_read_u8(u8 mod_no, u8 *value, u8 reg) { return twl_i2c_read(mod_no, value, reg, 1); } EXPORT_SYMBOL(twl_i2c_read_u8); /*----------------------------------------------------------------------*/ static struct device * add_numbered_child(unsigned chip, const char *name, int num, void *pdata, unsigned pdata_len, bool can_wakeup, int irq0, int irq1) { struct platform_device *pdev; struct twl_client *twl = &twl_modules[chip]; int status; pdev = platform_device_alloc(name, num); if (!pdev) { dev_dbg(&twl->client->dev, "can't alloc dev\n"); status = -ENOMEM; goto err; } device_init_wakeup(&pdev->dev, can_wakeup); pdev->dev.parent = &twl->client->dev; if (pdata) { status = platform_device_add_data(pdev, pdata, pdata_len); if (status < 0) { dev_dbg(&pdev->dev, "can't add platform_data\n"); goto err; } } if (irq0) { struct resource r[2] = { { .start = irq0, .flags = IORESOURCE_IRQ, }, { .start = irq1, .flags = IORESOURCE_IRQ, }, }; status = platform_device_add_resources(pdev, r, irq1 ? 2 : 1); if (status < 0) { dev_dbg(&pdev->dev, "can't add irqs\n"); goto err; } } status = platform_device_add(pdev); err: if (status < 0) { platform_device_put(pdev); dev_err(&twl->client->dev, "can't add %s dev\n", name); return ERR_PTR(status); } return &pdev->dev; } static inline struct device *add_child(unsigned chip, const char *name, void *pdata, unsigned pdata_len, bool can_wakeup, int irq0, int irq1) { return add_numbered_child(chip, name, -1, pdata, pdata_len, can_wakeup, irq0, irq1); } static struct device * add_regulator_linked(int num, struct regulator_init_data *pdata, struct regulator_consumer_supply *consumers, unsigned num_consumers) { unsigned sub_chip_id; /* regulator framework demands init_data ... */ if (!pdata) return NULL; if (consumers) { pdata->consumer_supplies = consumers; pdata->num_consumer_supplies = num_consumers; } /* NOTE: we currently ignore regulator IRQs, e.g. for short circuits */ sub_chip_id = twl_map[TWL_MODULE_PM_MASTER].sid; return add_numbered_child(sub_chip_id, "twl_reg", num, pdata, sizeof(*pdata), false, 0, 0); } static struct device * add_regulator(int num, struct regulator_init_data *pdata) { return add_regulator_linked(num, pdata, NULL, 0); } /* * NOTE: We know the first 8 IRQs after pdata->base_irq are * for the PIH, and the next are for the PWR_INT SIH, since * that's how twl_init_irq() sets things up. */ static int add_children(struct twl4030_platform_data *pdata, unsigned long features) { struct device *child; unsigned sub_chip_id; if (twl_has_gpio() && pdata->gpio) { child = add_child(SUB_CHIP_ID1, "twl4030_gpio", pdata->gpio, sizeof(*pdata->gpio), false, pdata->irq_base + GPIO_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_keypad() && pdata->keypad) { child = add_child(SUB_CHIP_ID2, "twl4030_keypad", pdata->keypad, sizeof(*pdata->keypad), true, pdata->irq_base + KEYPAD_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_madc() && pdata->madc) { child = add_child(2, "twl4030_madc", pdata->madc, sizeof(*pdata->madc), true, pdata->irq_base + MADC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_rtc()) { /* * REVISIT platform_data here currently might expose the * "msecure" line ... but for now we just expect board * setup to tell the chip "it's always ok to SET_TIME". * Eventually, Linux might become more aware of such * HW security concerns, and "least privilege". */ sub_chip_id = twl_map[TWL_MODULE_RTC].sid; child = add_child(sub_chip_id, "twl_rtc", NULL, 0, true, pdata->irq_base + RTC_INTR_OFFSET, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_usb() && pdata->usb && twl_class_is_4030()) { static struct regulator_consumer_supply usb1v5 = { .supply = "usb1v5", }; static struct regulator_consumer_supply usb1v8 = { .supply = "usb1v8", }; static struct regulator_consumer_supply usb3v1 = { .supply = "usb3v1", }; /* First add the regulators so that they can be used by transceiver */ if (twl_has_regulator()) { /* this is a template that gets copied */ struct regulator_init_data usb_fixed = { .constraints.valid_modes_mask = REGULATOR_MODE_NORMAL | REGULATOR_MODE_STANDBY, .constraints.valid_ops_mask = REGULATOR_CHANGE_MODE | REGULATOR_CHANGE_STATUS, }; child = add_regulator_linked(TWL4030_REG_VUSB1V5, &usb_fixed, &usb1v5, 1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator_linked(TWL4030_REG_VUSB1V8, &usb_fixed, &usb1v8, 1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator_linked(TWL4030_REG_VUSB3V1, &usb_fixed, &usb3v1, 1); if (IS_ERR(child)) return PTR_ERR(child); } child = add_child(0, "twl4030_usb", pdata->usb, sizeof(*pdata->usb), true, /* irq0 = USB_PRES, irq1 = USB */ pdata->irq_base + USB_PRES_INTR_OFFSET, pdata->irq_base + USB_INTR_OFFSET); if (IS_ERR(child)) return PTR_ERR(child); /* we need to connect regulators to this transceiver */ if (twl_has_regulator() && child) { usb1v5.dev = child; usb1v8.dev = child; usb3v1.dev = child; } } if (twl_has_watchdog()) { child = add_child(0, "twl4030_wdt", NULL, 0, false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_pwrbutton()) { child = add_child(1, "twl4030_pwrbutton", NULL, 0, true, pdata->irq_base + 8 + 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } if (twl_has_codec() && pdata->codec && twl_class_is_4030()) { sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid; child = add_child(sub_chip_id, "twl4030_codec", pdata->codec, sizeof(*pdata->codec), false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } /* Phoenix*/ if (twl_has_codec() && pdata->codec && twl_class_is_6030()) { sub_chip_id = twl_map[TWL_MODULE_AUDIO_VOICE].sid; child = add_child(sub_chip_id, "twl6040_codec", pdata->codec, sizeof(*pdata->codec), false, 0, 0); if (IS_ERR(child)) return PTR_ERR(child); } /* twl4030 regulators */ if (twl_has_regulator() && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL1, pdata->vpll1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VIO, pdata->vio); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDD1, pdata->vdd1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDD2, pdata->vdd2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VMMC1, pdata->vmmc1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VDAC, pdata->vdac); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator((features & TWL4030_VAUX2) ? TWL4030_REG_VAUX2_4030 : TWL4030_REG_VAUX2, pdata->vaux2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTANA1, pdata->vintana1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTANA2, pdata->vintana2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VINTDIG, pdata->vintdig); if (IS_ERR(child)) return PTR_ERR(child); } /* maybe add LDOs that are omitted on cost-reduced parts */ if (twl_has_regulator() && !(features & TPS_SUBSET) && twl_class_is_4030()) { child = add_regulator(TWL4030_REG_VPLL2, pdata->vpll2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VMMC2, pdata->vmmc2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VSIM, pdata->vsim); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX1, pdata->vaux1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX3, pdata->vaux3); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL4030_REG_VAUX4, pdata->vaux4); if (IS_ERR(child)) return PTR_ERR(child); } /* twl6030 regulators */ if (twl_has_regulator() && twl_class_is_6030()) { child = add_regulator(TWL6030_REG_VMMC, pdata->vmmc); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VPP, pdata->vpp); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VUSIM, pdata->vusim); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VANA, pdata->vana); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VCXIO, pdata->vcxio); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VDAC, pdata->vdac); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VUSB, pdata->vusb); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VAUX1_6030, pdata->vaux1); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VAUX2_6030, pdata->vaux2); if (IS_ERR(child)) return PTR_ERR(child); child = add_regulator(TWL6030_REG_VAUX3_6030, pdata->vaux3); if (IS_ERR(child)) return PTR_ERR(child); } return 0; } /*----------------------------------------------------------------------*/ /* * These three functions initialize the on-chip clock framework, * letting it generate the right frequencies for USB, MADC, and * other purposes. */ static inline int __init protect_pm_master(void) { int e = 0; e = twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_LOCK, R_PROTECT_KEY); return e; } static inline int __init unprotect_pm_master(void) { int e = 0; e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK1, R_PROTECT_KEY); e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, KEY_UNLOCK2, R_PROTECT_KEY); return e; } static void clocks_init(struct device *dev, struct twl4030_clock_init_data *clock) { int e = 0; struct clk *osc; u32 rate; u8 ctrl = HFCLK_FREQ_26_MHZ; #if defined(CONFIG_ARCH_OMAP2) || defined(CONFIG_ARCH_OMAP3) if (cpu_is_omap2430()) osc = clk_get(dev, "osc_ck"); else osc = clk_get(dev, "osc_sys_ck"); if (IS_ERR(osc)) { printk(KERN_WARNING "Skipping twl internal clock init and " "using bootloader value (unknown osc rate)\n"); return; } rate = clk_get_rate(osc); clk_put(osc); #else /* REVISIT for non-OMAP systems, pass the clock rate from * board init code, using platform_data. */ osc = ERR_PTR(-EIO); printk(KERN_WARNING "Skipping twl internal clock init and " "using bootloader value (unknown osc rate)\n"); return; #endif switch (rate) { case 19200000: ctrl = HFCLK_FREQ_19p2_MHZ; break; case 26000000: ctrl = HFCLK_FREQ_26_MHZ; break; case 38400000: ctrl = HFCLK_FREQ_38p4_MHZ; break; } ctrl |= HIGH_PERF_SQ; if (clock && clock->ck32k_lowpwr_enable) ctrl |= CK32K_LOWPWR_EN; e |= unprotect_pm_master(); /* effect->MADC+USB ck en */ e |= twl_i2c_write_u8(TWL_MODULE_PM_MASTER, ctrl, R_CFG_BOOT); e |= protect_pm_master(); if (e < 0) pr_err("%s: clock init err [%d]\n", DRIVER_NAME, e); } /*----------------------------------------------------------------------*/ int twl4030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); int twl4030_exit_irq(void); int twl4030_init_chip_irq(const char *chip); int twl6030_init_irq(int irq_num, unsigned irq_base, unsigned irq_end); int twl6030_exit_irq(void); static int twl_remove(struct i2c_client *client) { unsigned i; int status; if (twl_class_is_4030()) status = twl4030_exit_irq(); else status = twl6030_exit_irq(); if (status < 0) return status; for (i = 0; i < TWL_NUM_SLAVES; i++) { struct twl_client *twl = &twl_modules[i]; if (twl->client && twl->client != client) i2c_unregister_device(twl->client); twl_modules[i].client = NULL; } inuse = false; return 0; } /* NOTE: this driver only handles a single twl4030/tps659x0 chip */ static int __init twl_probe(struct i2c_client *client, const struct i2c_device_id *id) { int status; unsigned i; struct twl4030_platform_data *pdata = client->dev.platform_data; u8 temp; if (!pdata) { dev_dbg(&client->dev, "no platform data?\n"); return -EINVAL; } if (i2c_check_functionality(client->adapter, I2C_FUNC_I2C) == 0) { dev_dbg(&client->dev, "can't talk I2C?\n"); return -EIO; } if (inuse) { dev_dbg(&client->dev, "driver is already in use\n"); return -EBUSY; } for (i = 0; i < TWL_NUM_SLAVES; i++) { struct twl_client *twl = &twl_modules[i]; twl->address = client->addr + i; if (i == 0) twl->client = client; else { twl->client = i2c_new_dummy(client->adapter, twl->address); if (!twl->client) { dev_err(&client->dev, "can't attach client %d\n", i); status = -ENOMEM; goto fail; } } mutex_init(&twl->xfer_lock); } inuse = true; if ((id->driver_data) & TWL6030_CLASS) { twl_id = TWL6030_CLASS_ID; twl_map = &twl6030_map[0]; } else { twl_id = TWL4030_CLASS_ID; twl_map = &twl4030_map[0]; } /* setup clock framework */ clocks_init(&client->dev, pdata->clock); /* load power event scripts */ if (twl_has_power() && pdata->power) twl4030_power_init(pdata->power); /* Maybe init the T2 Interrupt subsystem */ if (client->irq && pdata->irq_base && pdata->irq_end > pdata->irq_base) { if (twl_class_is_4030()) { twl4030_init_chip_irq(id->name); status = twl4030_init_irq(client->irq, pdata->irq_base, pdata->irq_end); } else { status = twl6030_init_irq(client->irq, pdata->irq_base, pdata->irq_end); } if (status < 0) goto fail; } /* Disable TWL4030/TWL5030 I2C Pull-up on I2C1 and I2C4(SR) interface. * Program I2C_SCL_CTRL_PU(bit 0)=0, I2C_SDA_CTRL_PU (bit 2)=0, * SR_I2C_SCL_CTRL_PU(bit 4)=0 and SR_I2C_SDA_CTRL_PU(bit 6)=0. */ if (twl_class_is_4030()) { twl_i2c_read_u8(TWL4030_MODULE_INTBR, &temp, REG_GPPUPDCTR1); temp &= ~(SR_I2C_SDA_CTRL_PU | SR_I2C_SCL_CTRL_PU | \ I2C_SDA_CTRL_PU | I2C_SCL_CTRL_PU); twl_i2c_write_u8(TWL4030_MODULE_INTBR, temp, REG_GPPUPDCTR1); } status = add_children(pdata, id->driver_data); fail: if (status < 0) twl_remove(client); return status; } static const struct i2c_device_id twl_ids[] = { { "twl4030", TWL4030_VAUX2 }, /* "Triton 2" */ { "twl5030", 0 }, /* T2 updated */ { "twl5031", TWL5031 }, /* TWL5030 updated */ { "tps65950", 0 }, /* catalog version of twl5030 */ { "tps65930", TPS_SUBSET }, /* fewer LDOs and DACs; no charger */ { "tps65920", TPS_SUBSET }, /* fewer LDOs; no codec or charger */ { "twl6030", TWL6030_CLASS }, /* "Phoenix power chip" */ { /* end of list */ }, }; MODULE_DEVICE_TABLE(i2c, twl_ids); /* One Client Driver , 4 Clients */ static struct i2c_driver twl_driver = { .driver.name = DRIVER_NAME, .id_table = twl_ids, .probe = twl_probe, .remove = twl_remove, }; static int __init twl_init(void) { return i2c_add_driver(&twl_driver); } subsys_initcall(twl_init); static void __exit twl_exit(void) { i2c_del_driver(&twl_driver); } module_exit(twl_exit); MODULE_AUTHOR("Texas Instruments, Inc."); MODULE_DESCRIPTION("I2C Core interface for TWL"); MODULE_LICENSE("GPL");
gpl-2.0
RaspberryPi-CM/android_kernel_raspberry_pi2
arch/mips/lantiq/xway/dma.c
1054
6868
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA. * * Copyright (C) 2011 John Crispin <blogic@openwrt.org> */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <lantiq_soc.h> #include <xway_dma.h> #define LTQ_DMA_ID 0x08 #define LTQ_DMA_CTRL 0x10 #define LTQ_DMA_CPOLL 0x14 #define LTQ_DMA_CS 0x18 #define LTQ_DMA_CCTRL 0x1C #define LTQ_DMA_CDBA 0x20 #define LTQ_DMA_CDLEN 0x24 #define LTQ_DMA_CIS 0x28 #define LTQ_DMA_CIE 0x2C #define LTQ_DMA_PS 0x40 #define LTQ_DMA_PCTRL 0x44 #define LTQ_DMA_IRNEN 0xf4 #define DMA_DESCPT BIT(3) /* descriptor complete irq */ #define DMA_TX BIT(8) /* TX channel direction */ #define DMA_CHAN_ON BIT(0) /* channel on / off bit */ #define DMA_PDEN BIT(6) /* enable packet drop */ #define DMA_CHAN_RST BIT(1) /* channel on / off bit */ #define DMA_RESET BIT(0) /* channel on / off bit */ #define DMA_IRQ_ACK 0x7e /* IRQ status register */ #define DMA_POLL BIT(31) /* turn on channel polling */ #define DMA_CLK_DIV4 BIT(6) /* polling clock divider */ #define DMA_2W_BURST BIT(1) /* 2 word burst length */ #define DMA_MAX_CHANNEL 20 /* the soc has 20 channels */ #define DMA_ETOP_ENDIANNESS (0xf << 8) /* endianness swap etop channels */ #define DMA_WEIGHT (BIT(17) | BIT(16)) /* default channel wheight */ #define ltq_dma_r32(x) ltq_r32(ltq_dma_membase + (x)) #define ltq_dma_w32(x, y) ltq_w32(x, ltq_dma_membase + (y)) #define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ ltq_dma_membase + (z)) static void __iomem *ltq_dma_membase; void ltq_dma_enable_irq(struct ltq_dma_channel *ch) { unsigned long flags; local_irq_save(flags); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ltq_dma_enable_irq); void ltq_dma_disable_irq(struct ltq_dma_channel *ch) { unsigned long flags; local_irq_save(flags); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(1 << ch->nr, 0, LTQ_DMA_IRNEN); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ltq_dma_disable_irq); void ltq_dma_ack_irq(struct ltq_dma_channel *ch) { unsigned long flags; local_irq_save(flags); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(DMA_IRQ_ACK, LTQ_DMA_CIS); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ltq_dma_ack_irq); void ltq_dma_open(struct ltq_dma_channel *ch) { unsigned long flag; local_irq_save(flag); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(0, DMA_CHAN_ON, LTQ_DMA_CCTRL); ltq_dma_enable_irq(ch); local_irq_restore(flag); } EXPORT_SYMBOL_GPL(ltq_dma_open); void ltq_dma_close(struct ltq_dma_channel *ch) { unsigned long flag; local_irq_save(flag); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); ltq_dma_disable_irq(ch); local_irq_restore(flag); } EXPORT_SYMBOL_GPL(ltq_dma_close); static void ltq_dma_alloc(struct ltq_dma_channel *ch) { unsigned long flags; ch->desc = 0; ch->desc_base = dma_alloc_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, &ch->phys, GFP_ATOMIC); memset(ch->desc_base, 0, LTQ_DESC_NUM * LTQ_DESC_SIZE); local_irq_save(flags); ltq_dma_w32(ch->nr, LTQ_DMA_CS); ltq_dma_w32(ch->phys, LTQ_DMA_CDBA); ltq_dma_w32(LTQ_DESC_NUM, LTQ_DMA_CDLEN); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); wmb(); ltq_dma_w32_mask(0, DMA_CHAN_RST, LTQ_DMA_CCTRL); while (ltq_dma_r32(LTQ_DMA_CCTRL) & DMA_CHAN_RST) ; local_irq_restore(flags); } void ltq_dma_alloc_tx(struct ltq_dma_channel *ch) { unsigned long flags; ltq_dma_alloc(ch); local_irq_save(flags); ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_w32(DMA_WEIGHT | DMA_TX, LTQ_DMA_CCTRL); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ltq_dma_alloc_tx); void ltq_dma_alloc_rx(struct ltq_dma_channel *ch) { unsigned long flags; ltq_dma_alloc(ch); local_irq_save(flags); ltq_dma_w32(DMA_DESCPT, LTQ_DMA_CIE); ltq_dma_w32_mask(0, 1 << ch->nr, LTQ_DMA_IRNEN); ltq_dma_w32(DMA_WEIGHT, LTQ_DMA_CCTRL); local_irq_restore(flags); } EXPORT_SYMBOL_GPL(ltq_dma_alloc_rx); void ltq_dma_free(struct ltq_dma_channel *ch) { if (!ch->desc_base) return; ltq_dma_close(ch); dma_free_coherent(NULL, LTQ_DESC_NUM * LTQ_DESC_SIZE, ch->desc_base, ch->phys); } EXPORT_SYMBOL_GPL(ltq_dma_free); void ltq_dma_init_port(int p) { ltq_dma_w32(p, LTQ_DMA_PS); switch (p) { case DMA_PORT_ETOP: /* * Tell the DMA engine to swap the endianness of data frames and * drop packets if the channel arbitration fails. */ ltq_dma_w32_mask(0, DMA_ETOP_ENDIANNESS | DMA_PDEN, LTQ_DMA_PCTRL); break; case DMA_PORT_DEU: ltq_dma_w32((DMA_2W_BURST << 4) | (DMA_2W_BURST << 2), LTQ_DMA_PCTRL); break; default: break; } } EXPORT_SYMBOL_GPL(ltq_dma_init_port); static int ltq_dma_init(struct platform_device *pdev) { struct clk *clk; struct resource *res; unsigned id; int i; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ltq_dma_membase = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(ltq_dma_membase)) panic("Failed to remap dma resource"); /* power up and reset the dma engine */ clk = clk_get(&pdev->dev, NULL); if (IS_ERR(clk)) panic("Failed to get dma clock"); clk_enable(clk); ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); /* disable all interrupts */ ltq_dma_w32(0, LTQ_DMA_IRNEN); /* reset/configure each channel */ for (i = 0; i < DMA_MAX_CHANNEL; i++) { ltq_dma_w32(i, LTQ_DMA_CS); ltq_dma_w32(DMA_CHAN_RST, LTQ_DMA_CCTRL); ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); } id = ltq_dma_r32(LTQ_DMA_ID); dev_info(&pdev->dev, "Init done - hw rev: %X, ports: %d, channels: %d\n", id & 0x1f, (id >> 16) & 0xf, id >> 20); return 0; } static const struct of_device_id dma_match[] = { { .compatible = "lantiq,dma-xway" }, {}, }; MODULE_DEVICE_TABLE(of, dma_match); static struct platform_driver dma_driver = { .probe = ltq_dma_init, .driver = { .name = "dma-xway", .of_match_table = dma_match, }, }; int __init dma_init(void) { return platform_driver_register(&dma_driver); } postcore_initcall(dma_init);
gpl-2.0
vocoderism/Tegra-Note-7
arch/s390/mm/fault.c
1310
17177
/* * arch/s390/mm/fault.c * * S390 version * Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation * Author(s): Hartmut Penner (hp@de.ibm.com) * Ulrich Weigand (uweigand@de.ibm.com) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/kernel_stat.h> #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/ptrace.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/compat.h> #include <linux/smp.h> #include <linux/kdebug.h> #include <linux/init.h> #include <linux/console.h> #include <linux/module.h> #include <linux/hardirq.h> #include <linux/kprobes.h> #include <linux/uaccess.h> #include <linux/hugetlb.h> #include <asm/asm-offsets.h> #include <asm/pgtable.h> #include <asm/irq.h> #include <asm/mmu_context.h> #include <asm/facility.h> #include "../kernel/entry.h" #ifndef CONFIG_64BIT #define __FAIL_ADDR_MASK 0x7ffff000 #define __SUBCODE_MASK 0x0200 #define __PF_RES_FIELD 0ULL #else /* CONFIG_64BIT */ #define __FAIL_ADDR_MASK -4096L #define __SUBCODE_MASK 0x0600 #define __PF_RES_FIELD 0x8000000000000000ULL #endif /* CONFIG_64BIT */ #define VM_FAULT_BADCONTEXT 0x010000 #define VM_FAULT_BADMAP 0x020000 #define VM_FAULT_BADACCESS 0x040000 static unsigned long store_indication; void fault_init(void) { if (test_facility(2) && test_facility(75)) store_indication = 0xc00; } static inline int notify_page_fault(struct pt_regs *regs) { int ret = 0; /* kprobe_running() needs smp_processor_id() */ if (kprobes_built_in() && !user_mode(regs)) { preempt_disable(); if (kprobe_running() && kprobe_fault_handler(regs, 14)) ret = 1; preempt_enable(); } return ret; } /* * Unlock any spinlocks which will prevent us from getting the * message out. */ void bust_spinlocks(int yes) { if (yes) { oops_in_progress = 1; } else { int loglevel_save = console_loglevel; console_unblank(); oops_in_progress = 0; /* * OK, the message is on the console. Now we call printk() * without oops_in_progress set so that printk will give klogd * a poke. Hold onto your hats... */ console_loglevel = 15; printk(" "); console_loglevel = loglevel_save; } } /* * Returns the address space associated with the fault. * Returns 0 for kernel space and 1 for user space. */ static inline int user_space_fault(unsigned long trans_exc_code) { /* * The lowest two bits of the translation exception * identification indicate which paging table was used. */ trans_exc_code &= 3; if (trans_exc_code == 2) /* Access via secondary space, set_fs setting decides */ return current->thread.mm_segment.ar4; if (user_mode == HOME_SPACE_MODE) /* User space if the access has been done via home space. */ return trans_exc_code == 3; /* * If the user space is not the home space the kernel runs in home * space. Access via secondary space has already been covered, * access via primary space or access register is from user space * and access via home space is from the kernel. */ return trans_exc_code != 3; } static inline void report_user_fault(struct pt_regs *regs, long signr) { if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; if (!unhandled_signal(current, signr)) return; if (!printk_ratelimit()) return; printk(KERN_ALERT "User process fault: interruption code 0x%X ", regs->int_code); print_vma_addr(KERN_CONT "in ", regs->psw.addr & PSW_ADDR_INSN); printk(KERN_CONT "\n"); printk(KERN_ALERT "failing address: %lX\n", regs->int_parm_long & __FAIL_ADDR_MASK); show_regs(regs); } /* * Send SIGSEGV to task. This is an external routine * to keep the stack usage of do_page_fault small. */ static noinline void do_sigsegv(struct pt_regs *regs, int si_code) { struct siginfo si; report_user_fault(regs, SIGSEGV); si.si_signo = SIGSEGV; si.si_code = si_code; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGSEGV, &si, current); } static noinline void do_no_context(struct pt_regs *regs) { const struct exception_table_entry *fixup; unsigned long address; /* Are we prepared to handle this kernel fault? */ fixup = search_exception_tables(regs->psw.addr & PSW_ADDR_INSN); if (fixup) { regs->psw.addr = fixup->fixup | PSW_ADDR_AMODE; return; } /* * Oops. The kernel tried to access some bad page. We'll have to * terminate things with extreme prejudice. */ address = regs->int_parm_long & __FAIL_ADDR_MASK; if (!user_space_fault(regs->int_parm_long)) printk(KERN_ALERT "Unable to handle kernel pointer dereference" " at virtual kernel address %p\n", (void *)address); else printk(KERN_ALERT "Unable to handle kernel paging request" " at virtual user address %p\n", (void *)address); die(regs, "Oops"); do_exit(SIGKILL); } static noinline void do_low_address(struct pt_regs *regs) { /* Low-address protection hit in kernel mode means NULL pointer write access in kernel mode. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* Low-address protection hit in user mode 'cannot happen'. */ die (regs, "Low-address protection"); do_exit(SIGKILL); } do_no_context(regs); } static noinline void do_sigbus(struct pt_regs *regs) { struct task_struct *tsk = current; struct siginfo si; /* * Send a sigbus, regardless of whether we were in kernel * or user mode. */ si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; si.si_addr = (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK); force_sig_info(SIGBUS, &si, tsk); } static noinline void do_fault_error(struct pt_regs *regs, int fault) { int si_code; switch (fault) { case VM_FAULT_BADACCESS: case VM_FAULT_BADMAP: /* Bad memory access. Check if it is kernel or user space. */ if (regs->psw.mask & PSW_MASK_PSTATE) { /* User mode accesses just cause a SIGSEGV */ si_code = (fault == VM_FAULT_BADMAP) ? SEGV_MAPERR : SEGV_ACCERR; do_sigsegv(regs, si_code); return; } case VM_FAULT_BADCONTEXT: do_no_context(regs); break; default: /* fault & VM_FAULT_ERROR */ if (fault & VM_FAULT_OOM) { if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs); else pagefault_out_of_memory(); } else if (fault & VM_FAULT_SIGBUS) { /* Kernel mode? Handle exceptions or die */ if (!(regs->psw.mask & PSW_MASK_PSTATE)) do_no_context(regs); else do_sigbus(regs); } else BUG(); break; } } /* * This routine handles page faults. It determines the address, * and the problem, and then passes it off to one of the appropriate * routines. * * interruption code (int_code): * 04 Protection -> Write-Protection (suprression) * 10 Segment translation -> Not present (nullification) * 11 Page translation -> Not present (nullification) * 3b Region third trans. -> Not present (nullification) */ static inline int do_exception(struct pt_regs *regs, int access) { struct task_struct *tsk; struct mm_struct *mm; struct vm_area_struct *vma; unsigned long trans_exc_code; unsigned long address; unsigned int flags; int fault; if (notify_page_fault(regs)) return 0; tsk = current; mm = tsk->mm; trans_exc_code = regs->int_parm_long; /* * Verify that the fault happened in user space, that * we are not in an interrupt and that there is a * user context. */ fault = VM_FAULT_BADCONTEXT; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto out; address = trans_exc_code & __FAIL_ADDR_MASK; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_ALLOW_RETRY; if (access == VM_WRITE || (trans_exc_code & store_indication) == 0x400) flags |= FAULT_FLAG_WRITE; down_read(&mm->mmap_sem); #ifdef CONFIG_PGSTE if (test_tsk_thread_flag(current, TIF_SIE) && S390_lowcore.gmap) { address = __gmap_fault(address, (struct gmap *) S390_lowcore.gmap); if (address == -EFAULT) { fault = VM_FAULT_BADMAP; goto out_up; } if (address == -ENOMEM) { fault = VM_FAULT_OOM; goto out_up; } } #endif retry: fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) goto out_up; if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) goto out_up; if (expand_stack(vma, address)) goto out_up; } /* * Ok, we have a good vm_area for this memory access, so * we can handle it.. */ fault = VM_FAULT_BADACCESS; if (unlikely(!(vma->vm_flags & access))) goto out_up; if (is_vm_hugetlb_page(vma)) address &= HPAGE_MASK; /* * If for any reason at all we couldn't handle the fault, * make sure we exit gracefully rather than endlessly redo * the fault. */ fault = handle_mm_fault(mm, vma, address, flags); if (unlikely(fault & VM_FAULT_ERROR)) goto out_up; /* * Major/minor page fault accounting is only done on the * initial attempt. If we go through a retry, it is extremely * likely that the page will be found in page cache at that point. */ if (flags & FAULT_FLAG_ALLOW_RETRY) { if (fault & VM_FAULT_MAJOR) { tsk->maj_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, address); } else { tsk->min_flt++; perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, address); } if (fault & VM_FAULT_RETRY) { /* Clear FAULT_FLAG_ALLOW_RETRY to avoid any risk * of starvation. */ flags &= ~FAULT_FLAG_ALLOW_RETRY; down_read(&mm->mmap_sem); goto retry; } } /* * The instruction that caused the program check will * be repeated. Don't signal single step via SIGTRAP. */ clear_tsk_thread_flag(tsk, TIF_PER_TRAP); fault = 0; out_up: up_read(&mm->mmap_sem); out: return fault; } void __kprobes do_protection_exception(struct pt_regs *regs) { unsigned long trans_exc_code; int fault; trans_exc_code = regs->int_parm_long; /* Protection exception is suppressing, decrement psw address. */ regs->psw.addr = __rewind_psw(regs->psw, regs->int_code >> 16); /* * Check for low-address protection. This needs to be treated * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ if (unlikely(!(trans_exc_code & 4))) { do_low_address(regs); return; } fault = do_exception(regs, VM_WRITE); if (unlikely(fault)) do_fault_error(regs, fault); } void __kprobes do_dat_exception(struct pt_regs *regs) { int access, fault; access = VM_READ | VM_EXEC | VM_WRITE; fault = do_exception(regs, access); if (unlikely(fault)) do_fault_error(regs, fault); } #ifdef CONFIG_64BIT void __kprobes do_asce_exception(struct pt_regs *regs) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; unsigned long trans_exc_code; trans_exc_code = regs->int_parm_long; if (unlikely(!user_space_fault(trans_exc_code) || in_atomic() || !mm)) goto no_context; down_read(&mm->mmap_sem); vma = find_vma(mm, trans_exc_code & __FAIL_ADDR_MASK); up_read(&mm->mmap_sem); if (vma) { update_mm(mm, current); return; } /* User mode accesses just cause a SIGSEGV */ if (regs->psw.mask & PSW_MASK_PSTATE) { do_sigsegv(regs, SEGV_MAPERR); return; } no_context: do_no_context(regs); } #endif int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write) { struct pt_regs regs; int access, fault; /* Emulate a uaccess fault from kernel mode. */ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK; if (!irqs_disabled()) regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT; regs.psw.addr = (unsigned long) __builtin_return_address(0); regs.psw.addr |= PSW_ADDR_AMODE; regs.int_code = pgm_int_code; regs.int_parm_long = (uaddr & PAGE_MASK) | 2; access = write ? VM_WRITE : VM_READ; fault = do_exception(&regs, access); /* * Since the fault happened in kernel mode while performing a uaccess * all we need to do now is emulating a fixup in case "fault" is not * zero. * For the calling uaccess functions this results always in -EFAULT. */ return fault ? -EFAULT : 0; } #ifdef CONFIG_PFAULT /* * 'pfault' pseudo page faults routines. */ static int pfault_disable; static int __init nopfault(char *str) { pfault_disable = 1; return 1; } __setup("nopfault", nopfault); struct pfault_refbk { u16 refdiagc; u16 reffcode; u16 refdwlen; u16 refversn; u64 refgaddr; u64 refselmk; u64 refcmpmk; u64 reserved; } __attribute__ ((packed, aligned(8))); int pfault_init(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 0, .refdwlen = 5, .refversn = 2, .refgaddr = __LC_CURRENT_PID, .refselmk = 1ULL << 48, .refcmpmk = 1ULL << 48, .reserved = __PF_RES_FIELD }; int rc; if (pfault_disable) return -1; asm volatile( " diag %1,%0,0x258\n" "0: j 2f\n" "1: la %0,8\n" "2:\n" EX_TABLE(0b,1b) : "=d" (rc) : "a" (&refbk), "m" (refbk) : "cc"); return rc; } void pfault_fini(void) { struct pfault_refbk refbk = { .refdiagc = 0x258, .reffcode = 1, .refdwlen = 5, .refversn = 2, }; if (pfault_disable) return; asm volatile( " diag %0,0,0x258\n" "0:\n" EX_TABLE(0b,0b) : : "a" (&refbk), "m" (refbk) : "cc"); } static DEFINE_SPINLOCK(pfault_lock); static LIST_HEAD(pfault_list); static void pfault_interrupt(struct ext_code ext_code, unsigned int param32, unsigned long param64) { struct task_struct *tsk; __u16 subcode; pid_t pid; /* * Get the external interruption subcode & pfault * initial/completion signal bit. VM stores this * in the 'cpu address' field associated with the * external interrupt. */ subcode = ext_code.subcode; if ((subcode & 0xff00) != __SUBCODE_MASK) return; kstat_cpu(smp_processor_id()).irqs[EXTINT_PFL]++; if (subcode & 0x0080) { /* Get the token (= pid of the affected task). */ pid = sizeof(void *) == 4 ? param32 : param64; rcu_read_lock(); tsk = find_task_by_pid_ns(pid, &init_pid_ns); if (tsk) get_task_struct(tsk); rcu_read_unlock(); if (!tsk) return; } else { tsk = current; } spin_lock(&pfault_lock); if (subcode & 0x0080) { /* signal bit is set -> a page has been swapped in by VM */ if (tsk->thread.pfault_wait == 1) { /* Initial interrupt was faster than the completion * interrupt. pfault_wait is valid. Set pfault_wait * back to zero and wake up the process. This can * safely be done because the task is still sleeping * and can't produce new pfaults. */ tsk->thread.pfault_wait = 0; list_del(&tsk->thread.list); wake_up_process(tsk); put_task_struct(tsk); } else { /* Completion interrupt was faster than initial * interrupt. Set pfault_wait to -1 so the initial * interrupt doesn't put the task to sleep. * If the task is not running, ignore the completion * interrupt since it must be a leftover of a PFAULT * CANCEL operation which didn't remove all pending * completion interrupts. */ if (tsk->state == TASK_RUNNING) tsk->thread.pfault_wait = -1; } put_task_struct(tsk); } else { /* signal bit not set -> a real page is missing. */ if (tsk->thread.pfault_wait == 1) { /* Already on the list with a reference: put to sleep */ set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } else if (tsk->thread.pfault_wait == -1) { /* Completion interrupt was faster than the initial * interrupt (pfault_wait == -1). Set pfault_wait * back to zero and exit. */ tsk->thread.pfault_wait = 0; } else { /* Initial interrupt arrived before completion * interrupt. Let the task sleep. * An extra task reference is needed since a different * cpu may set the task state to TASK_RUNNING again * before the scheduler is reached. */ get_task_struct(tsk); tsk->thread.pfault_wait = 1; list_add(&tsk->thread.list, &pfault_list); set_task_state(tsk, TASK_UNINTERRUPTIBLE); set_tsk_need_resched(tsk); } } spin_unlock(&pfault_lock); } static int __cpuinit pfault_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct thread_struct *thread, *next; struct task_struct *tsk; switch (action) { case CPU_DEAD: case CPU_DEAD_FROZEN: spin_lock_irq(&pfault_lock); list_for_each_entry_safe(thread, next, &pfault_list, list) { thread->pfault_wait = 0; list_del(&thread->list); tsk = container_of(thread, struct task_struct, thread); wake_up_process(tsk); put_task_struct(tsk); } spin_unlock_irq(&pfault_lock); break; default: break; } return NOTIFY_OK; } static int __init pfault_irq_init(void) { int rc; rc = register_external_interrupt(0x2603, pfault_interrupt); if (rc) goto out_extint; rc = pfault_init() == 0 ? 0 : -EOPNOTSUPP; if (rc) goto out_pfault; service_subclass_irq_register(); hotcpu_notifier(pfault_cpu_notify, 0); return 0; out_pfault: unregister_external_interrupt(0x2603, pfault_interrupt); out_extint: pfault_disable = 1; return rc; } early_initcall(pfault_irq_init); #endif /* CONFIG_PFAULT */
gpl-2.0
gpkulkarni/linux-arm64
arch/arm/mach-s3c24xx/mach-at2440evb.c
1566
5383
/* linux/arch/arm/mach-s3c2440/mach-at2440evb.c * * Copyright (c) 2008 Ramax Lo <ramaxlo@gmail.com> * Based on mach-anubis.c by Ben Dooks <ben@simtec.co.uk> * and modifications by SBZ <sbz@spgui.org> and * Weibing <http://weibing.blogbus.com> * * For product information, visit http://www.arm.com/ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/gpio.h> #include <linux/interrupt.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/init.h> #include <linux/io.h> #include <linux/serial_core.h> #include <linux/serial_s3c.h> #include <linux/dm9000.h> #include <linux/platform_device.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/fb.h> #include <asm/irq.h> #include <asm/mach-types.h> #include <mach/regs-gpio.h> #include <mach/regs-lcd.h> #include <mach/gpio-samsung.h> #include <linux/platform_data/mtd-nand-s3c2410.h> #include <linux/platform_data/i2c-s3c2410.h> #include <linux/mtd/mtd.h> #include <linux/mtd/nand.h> #include <linux/mtd/nand_ecc.h> #include <linux/mtd/partitions.h> #include <plat/devs.h> #include <plat/cpu.h> #include <linux/platform_data/mmc-s3cmci.h> #include <plat/samsung-time.h> #include "common.h" static struct map_desc at2440evb_iodesc[] __initdata = { /* Nothing here */ }; #define UCON S3C2410_UCON_DEFAULT #define ULCON (S3C2410_LCON_CS8 | S3C2410_LCON_PNONE) #define UFCON (S3C2410_UFCON_RXTRIG8 | S3C2410_UFCON_FIFOMODE) static struct s3c2410_uartcfg at2440evb_uartcfgs[] __initdata = { [0] = { .hwport = 0, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2, }, [1] = { .hwport = 1, .flags = 0, .ucon = UCON, .ulcon = ULCON, .ufcon = UFCON, .clk_sel = S3C2410_UCON_CLKSEL1 | S3C2410_UCON_CLKSEL2, }, }; /* NAND Flash on AT2440EVB board */ static struct mtd_partition __initdata at2440evb_default_nand_part[] = { [0] = { .name = "Boot Agent", .size = SZ_256K, .offset = 0, }, [1] = { .name = "Kernel", .size = SZ_2M, .offset = SZ_256K, }, [2] = { .name = "Root", .offset = SZ_256K + SZ_2M, .size = MTDPART_SIZ_FULL, }, }; static struct s3c2410_nand_set __initdata at2440evb_nand_sets[] = { [0] = { .name = "nand", .nr_chips = 1, .nr_partitions = ARRAY_SIZE(at2440evb_default_nand_part), .partitions = at2440evb_default_nand_part, }, }; static struct s3c2410_platform_nand __initdata at2440evb_nand_info = { .tacls = 25, .twrph0 = 55, .twrph1 = 40, .nr_sets = ARRAY_SIZE(at2440evb_nand_sets), .sets = at2440evb_nand_sets, }; /* DM9000AEP 10/100 ethernet controller */ static struct resource at2440evb_dm9k_resource[] = { [0] = DEFINE_RES_MEM(S3C2410_CS3, 4), [1] = DEFINE_RES_MEM(S3C2410_CS3 + 4, 4), [2] = DEFINE_RES_NAMED(IRQ_EINT7, 1, NULL, IORESOURCE_IRQ \ | IORESOURCE_IRQ_HIGHEDGE), }; static struct dm9000_plat_data at2440evb_dm9k_pdata = { .flags = (DM9000_PLATF_16BITONLY | DM9000_PLATF_NO_EEPROM), }; static struct platform_device at2440evb_device_eth = { .name = "dm9000", .id = -1, .num_resources = ARRAY_SIZE(at2440evb_dm9k_resource), .resource = at2440evb_dm9k_resource, .dev = { .platform_data = &at2440evb_dm9k_pdata, }, }; static struct s3c24xx_mci_pdata at2440evb_mci_pdata __initdata = { .gpio_detect = S3C2410_GPG(10), }; /* 7" LCD panel */ static struct s3c2410fb_display at2440evb_lcd_cfg __initdata = { .lcdcon5 = S3C2410_LCDCON5_FRM565 | S3C2410_LCDCON5_INVVLINE | S3C2410_LCDCON5_INVVFRAME | S3C2410_LCDCON5_PWREN | S3C2410_LCDCON5_HWSWP, .type = S3C2410_LCDCON1_TFT, .width = 800, .height = 480, .pixclock = 33333, /* HCLK 60 MHz, divisor 2 */ .xres = 800, .yres = 480, .bpp = 16, .left_margin = 88, .right_margin = 40, .hsync_len = 128, .upper_margin = 32, .lower_margin = 11, .vsync_len = 2, }; static struct s3c2410fb_mach_info at2440evb_fb_info __initdata = { .displays = &at2440evb_lcd_cfg, .num_displays = 1, .default_display = 0, }; static struct platform_device *at2440evb_devices[] __initdata = { &s3c_device_ohci, &s3c_device_wdt, &s3c_device_adc, &s3c_device_i2c0, &s3c_device_rtc, &s3c_device_nand, &s3c_device_sdi, &s3c_device_lcd, &at2440evb_device_eth, }; static void __init at2440evb_map_io(void) { s3c24xx_init_io(at2440evb_iodesc, ARRAY_SIZE(at2440evb_iodesc)); s3c24xx_init_uarts(at2440evb_uartcfgs, ARRAY_SIZE(at2440evb_uartcfgs)); samsung_set_timer_source(SAMSUNG_PWM3, SAMSUNG_PWM4); } static void __init at2440evb_init_time(void) { s3c2440_init_clocks(16934400); samsung_timer_init(); } static void __init at2440evb_init(void) { s3c24xx_fb_set_platdata(&at2440evb_fb_info); s3c24xx_mci_set_platdata(&at2440evb_mci_pdata); s3c_nand_set_platdata(&at2440evb_nand_info); s3c_i2c0_set_platdata(NULL); platform_add_devices(at2440evb_devices, ARRAY_SIZE(at2440evb_devices)); } MACHINE_START(AT2440EVB, "AT2440EVB") .atag_offset = 0x100, .map_io = at2440evb_map_io, .init_machine = at2440evb_init, .init_irq = s3c2440_init_irq, .init_time = at2440evb_init_time, MACHINE_END
gpl-2.0
ehigh2014/linux
drivers/pnp/interface.c
1566
11206
/* * interface.c - contains everything related to the user interface * * Some code, especially possible resource dumping is based on isapnp_proc.c (c) Jaroslav Kysela <perex@perex.cz> * Copyright 2002 Adam Belay <ambx1@neo.rr.com> * Copyright (C) 2008 Hewlett-Packard Development Company, L.P. * Bjorn Helgaas <bjorn.helgaas@hp.com> */ #include <linux/pnp.h> #include <linux/string.h> #include <linux/errno.h> #include <linux/list.h> #include <linux/types.h> #include <linux/stat.h> #include <linux/ctype.h> #include <linux/slab.h> #include <linux/mutex.h> #include <asm/uaccess.h> #include "base.h" struct pnp_info_buffer { char *buffer; /* pointer to begin of buffer */ char *curr; /* current position in buffer */ unsigned long size; /* current size */ unsigned long len; /* total length of buffer */ int stop; /* stop flag */ int error; /* error code */ }; typedef struct pnp_info_buffer pnp_info_buffer_t; static int pnp_printf(pnp_info_buffer_t * buffer, char *fmt, ...) { va_list args; int res; if (buffer->stop || buffer->error) return 0; va_start(args, fmt); res = vsnprintf(buffer->curr, buffer->len - buffer->size, fmt, args); va_end(args); if (buffer->size + res >= buffer->len) { buffer->stop = 1; return 0; } buffer->curr += res; buffer->size += res; return res; } static void pnp_print_port(pnp_info_buffer_t * buffer, char *space, struct pnp_port *port) { pnp_printf(buffer, "%sport %#llx-%#llx, align %#llx, size %#llx, " "%i-bit address decoding\n", space, (unsigned long long) port->min, (unsigned long long) port->max, port->align ? ((unsigned long long) port->align - 1) : 0, (unsigned long long) port->size, port->flags & IORESOURCE_IO_16BIT_ADDR ? 16 : 10); } static void pnp_print_irq(pnp_info_buffer_t * buffer, char *space, struct pnp_irq *irq) { int first = 1, i; pnp_printf(buffer, "%sirq ", space); for (i = 0; i < PNP_IRQ_NR; i++) if (test_bit(i, irq->map.bits)) { if (!first) { pnp_printf(buffer, ","); } else { first = 0; } if (i == 2 || i == 9) pnp_printf(buffer, "2/9"); else pnp_printf(buffer, "%i", i); } if (bitmap_empty(irq->map.bits, PNP_IRQ_NR)) pnp_printf(buffer, "<none>"); if (irq->flags & IORESOURCE_IRQ_HIGHEDGE) pnp_printf(buffer, " High-Edge"); if (irq->flags & IORESOURCE_IRQ_LOWEDGE) pnp_printf(buffer, " Low-Edge"); if (irq->flags & IORESOURCE_IRQ_HIGHLEVEL) pnp_printf(buffer, " High-Level"); if (irq->flags & IORESOURCE_IRQ_LOWLEVEL) pnp_printf(buffer, " Low-Level"); if (irq->flags & IORESOURCE_IRQ_OPTIONAL) pnp_printf(buffer, " (optional)"); pnp_printf(buffer, "\n"); } static void pnp_print_dma(pnp_info_buffer_t * buffer, char *space, struct pnp_dma *dma) { int first = 1, i; char *s; pnp_printf(buffer, "%sdma ", space); for (i = 0; i < 8; i++) if (dma->map & (1 << i)) { if (!first) { pnp_printf(buffer, ","); } else { first = 0; } pnp_printf(buffer, "%i", i); } if (!dma->map) pnp_printf(buffer, "<none>"); switch (dma->flags & IORESOURCE_DMA_TYPE_MASK) { case IORESOURCE_DMA_8BIT: s = "8-bit"; break; case IORESOURCE_DMA_8AND16BIT: s = "8-bit&16-bit"; break; default: s = "16-bit"; } pnp_printf(buffer, " %s", s); if (dma->flags & IORESOURCE_DMA_MASTER) pnp_printf(buffer, " master"); if (dma->flags & IORESOURCE_DMA_BYTE) pnp_printf(buffer, " byte-count"); if (dma->flags & IORESOURCE_DMA_WORD) pnp_printf(buffer, " word-count"); switch (dma->flags & IORESOURCE_DMA_SPEED_MASK) { case IORESOURCE_DMA_TYPEA: s = "type-A"; break; case IORESOURCE_DMA_TYPEB: s = "type-B"; break; case IORESOURCE_DMA_TYPEF: s = "type-F"; break; default: s = "compatible"; break; } pnp_printf(buffer, " %s\n", s); } static void pnp_print_mem(pnp_info_buffer_t * buffer, char *space, struct pnp_mem *mem) { char *s; pnp_printf(buffer, "%sMemory %#llx-%#llx, align %#llx, size %#llx", space, (unsigned long long) mem->min, (unsigned long long) mem->max, (unsigned long long) mem->align, (unsigned long long) mem->size); if (mem->flags & IORESOURCE_MEM_WRITEABLE) pnp_printf(buffer, ", writeable"); if (mem->flags & IORESOURCE_MEM_CACHEABLE) pnp_printf(buffer, ", cacheable"); if (mem->flags & IORESOURCE_MEM_RANGELENGTH) pnp_printf(buffer, ", range-length"); if (mem->flags & IORESOURCE_MEM_SHADOWABLE) pnp_printf(buffer, ", shadowable"); if (mem->flags & IORESOURCE_MEM_EXPANSIONROM) pnp_printf(buffer, ", expansion ROM"); switch (mem->flags & IORESOURCE_MEM_TYPE_MASK) { case IORESOURCE_MEM_8BIT: s = "8-bit"; break; case IORESOURCE_MEM_8AND16BIT: s = "8-bit&16-bit"; break; case IORESOURCE_MEM_32BIT: s = "32-bit"; break; default: s = "16-bit"; } pnp_printf(buffer, ", %s\n", s); } static void pnp_print_option(pnp_info_buffer_t * buffer, char *space, struct pnp_option *option) { switch (option->type) { case IORESOURCE_IO: pnp_print_port(buffer, space, &option->u.port); break; case IORESOURCE_MEM: pnp_print_mem(buffer, space, &option->u.mem); break; case IORESOURCE_IRQ: pnp_print_irq(buffer, space, &option->u.irq); break; case IORESOURCE_DMA: pnp_print_dma(buffer, space, &option->u.dma); break; } } static ssize_t options_show(struct device *dmdev, struct device_attribute *attr, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); pnp_info_buffer_t *buffer; struct pnp_option *option; int ret, dep = 0, set = 0; char *indent; buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; list_for_each_entry(option, &dev->options, list) { if (pnp_option_is_dependent(option)) { indent = " "; if (!dep || pnp_option_set(option) != set) { set = pnp_option_set(option); dep = 1; pnp_printf(buffer, "Dependent: %02i - " "Priority %s\n", set, pnp_option_priority_name(option)); } } else { dep = 0; indent = ""; } pnp_print_option(buffer, indent, option); } ret = (buffer->curr - buf); kfree(buffer); return ret; } static DEVICE_ATTR_RO(options); static ssize_t resources_show(struct device *dmdev, struct device_attribute *attr, char *buf) { struct pnp_dev *dev = to_pnp_dev(dmdev); pnp_info_buffer_t *buffer; struct pnp_resource *pnp_res; struct resource *res; int ret; if (!dev) return -EINVAL; buffer = pnp_alloc(sizeof(pnp_info_buffer_t)); if (!buffer) return -ENOMEM; buffer->len = PAGE_SIZE; buffer->buffer = buf; buffer->curr = buffer->buffer; pnp_printf(buffer, "state = %s\n", dev->active ? "active" : "disabled"); list_for_each_entry(pnp_res, &dev->resources, list) { res = &pnp_res->res; pnp_printf(buffer, pnp_resource_type_name(res)); if (res->flags & IORESOURCE_DISABLED) { pnp_printf(buffer, " disabled\n"); continue; } switch (pnp_resource_type(res)) { case IORESOURCE_IO: case IORESOURCE_MEM: case IORESOURCE_BUS: pnp_printf(buffer, " %#llx-%#llx%s\n", (unsigned long long) res->start, (unsigned long long) res->end, res->flags & IORESOURCE_WINDOW ? " window" : ""); break; case IORESOURCE_IRQ: case IORESOURCE_DMA: pnp_printf(buffer, " %lld\n", (unsigned long long) res->start); break; } } ret = (buffer->curr - buf); kfree(buffer); return ret; } static char *pnp_get_resource_value(char *buf, unsigned long type, resource_size_t *start, resource_size_t *end, unsigned long *flags) { if (start) *start = 0; if (end) *end = 0; if (flags) *flags = 0; /* TBD: allow for disabled resources */ buf = skip_spaces(buf); if (start) { *start = simple_strtoull(buf, &buf, 0); if (end) { buf = skip_spaces(buf); if (*buf == '-') { buf = skip_spaces(buf + 1); *end = simple_strtoull(buf, &buf, 0); } else *end = *start; } } /* TBD: allow for additional flags, e.g., IORESOURCE_WINDOW */ return buf; } static ssize_t resources_store(struct device *dmdev, struct device_attribute *attr, const char *ubuf, size_t count) { struct pnp_dev *dev = to_pnp_dev(dmdev); char *buf = (void *)ubuf; int retval = 0; if (dev->status & PNP_ATTACHED) { retval = -EBUSY; dev_info(&dev->dev, "in use; can't configure\n"); goto done; } buf = skip_spaces(buf); if (!strncasecmp(buf, "disable", 7)) { retval = pnp_disable_dev(dev); goto done; } if (!strncasecmp(buf, "activate", 8)) { retval = pnp_activate_dev(dev); goto done; } if (!strncasecmp(buf, "fill", 4)) { if (dev->active) goto done; retval = pnp_auto_config_dev(dev); goto done; } if (!strncasecmp(buf, "auto", 4)) { if (dev->active) goto done; pnp_init_resources(dev); retval = pnp_auto_config_dev(dev); goto done; } if (!strncasecmp(buf, "clear", 5)) { if (dev->active) goto done; pnp_init_resources(dev); goto done; } if (!strncasecmp(buf, "get", 3)) { mutex_lock(&pnp_res_mutex); if (pnp_can_read(dev)) dev->protocol->get(dev); mutex_unlock(&pnp_res_mutex); goto done; } if (!strncasecmp(buf, "set", 3)) { resource_size_t start; resource_size_t end; unsigned long flags; if (dev->active) goto done; buf += 3; pnp_init_resources(dev); mutex_lock(&pnp_res_mutex); while (1) { buf = skip_spaces(buf); if (!strncasecmp(buf, "io", 2)) { buf = pnp_get_resource_value(buf + 2, IORESOURCE_IO, &start, &end, &flags); pnp_add_io_resource(dev, start, end, flags); } else if (!strncasecmp(buf, "mem", 3)) { buf = pnp_get_resource_value(buf + 3, IORESOURCE_MEM, &start, &end, &flags); pnp_add_mem_resource(dev, start, end, flags); } else if (!strncasecmp(buf, "irq", 3)) { buf = pnp_get_resource_value(buf + 3, IORESOURCE_IRQ, &start, NULL, &flags); pnp_add_irq_resource(dev, start, flags); } else if (!strncasecmp(buf, "dma", 3)) { buf = pnp_get_resource_value(buf + 3, IORESOURCE_DMA, &start, NULL, &flags); pnp_add_dma_resource(dev, start, flags); } else if (!strncasecmp(buf, "bus", 3)) { buf = pnp_get_resource_value(buf + 3, IORESOURCE_BUS, &start, &end, NULL); pnp_add_bus_resource(dev, start, end); } else break; } mutex_unlock(&pnp_res_mutex); goto done; } done: if (retval < 0) return retval; return count; } static DEVICE_ATTR_RW(resources); static ssize_t id_show(struct device *dmdev, struct device_attribute *attr, char *buf) { char *str = buf; struct pnp_dev *dev = to_pnp_dev(dmdev); struct pnp_id *pos = dev->id; while (pos) { str += sprintf(str, "%s\n", pos->id); pos = pos->next; } return (str - buf); } static DEVICE_ATTR_RO(id); static struct attribute *pnp_dev_attrs[] = { &dev_attr_resources.attr, &dev_attr_options.attr, &dev_attr_id.attr, NULL, }; static const struct attribute_group pnp_dev_group = { .attrs = pnp_dev_attrs, }; const struct attribute_group *pnp_dev_groups[] = { &pnp_dev_group, NULL, };
gpl-2.0
zjh3123629/linux-3.10.28
drivers/scsi/sg.c
1822
71435
/* * History: * Started: Aug 9 by Lawrence Foard (entropy@world.std.com), * to allow user process control of SCSI devices. * Development Sponsored by Killy Corp. NY NY * * Original driver (sg.c): * Copyright (C) 1992 Lawrence Foard * Version 2 and 3 extensions to driver: * Copyright (C) 1998 - 2005 Douglas Gilbert * * Modified 19-JAN-1998 Richard Gooch <rgooch@atnf.csiro.au> Devfs support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * */ static int sg_version_num = 30534; /* 2 digits for each component */ #define SG_VERSION_STR "3.5.34" /* * D. P. Gilbert (dgilbert@interlog.com, dougg@triode.net.au), notes: * - scsi logging is available via SCSI_LOG_TIMEOUT macros. First * the kernel/module needs to be built with CONFIG_SCSI_LOGGING * (otherwise the macros compile to empty statements). * */ #include <linux/module.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/aio.h> #include <linux/errno.h> #include <linux/mtio.h> #include <linux/ioctl.h> #include <linux/slab.h> #include <linux/fcntl.h> #include <linux/init.h> #include <linux/poll.h> #include <linux/moduleparam.h> #include <linux/cdev.h> #include <linux/idr.h> #include <linux/seq_file.h> #include <linux/blkdev.h> #include <linux/delay.h> #include <linux/blktrace_api.h> #include <linux/mutex.h> #include <linux/ratelimit.h> #include "scsi.h" #include <scsi/scsi_dbg.h> #include <scsi/scsi_host.h> #include <scsi/scsi_driver.h> #include <scsi/scsi_ioctl.h> #include <scsi/sg.h> #include "scsi_logging.h" #ifdef CONFIG_SCSI_PROC_FS #include <linux/proc_fs.h> static char *sg_version_date = "20061027"; static int sg_proc_init(void); static void sg_proc_cleanup(void); #endif #define SG_ALLOW_DIO_DEF 0 #define SG_MAX_DEVS 32768 /* * Suppose you want to calculate the formula muldiv(x,m,d)=int(x * m / d) * Then when using 32 bit integers x * m may overflow during the calculation. * Replacing muldiv(x) by muldiv(x)=((x % d) * m) / d + int(x / d) * m * calculates the same, but prevents the overflow when both m and d * are "small" numbers (like HZ and USER_HZ). * Of course an overflow is inavoidable if the result of muldiv doesn't fit * in 32 bits. */ #define MULDIV(X,MUL,DIV) ((((X % DIV) * MUL) / DIV) + ((X / DIV) * MUL)) #define SG_DEFAULT_TIMEOUT MULDIV(SG_DEFAULT_TIMEOUT_USER, HZ, USER_HZ) int sg_big_buff = SG_DEF_RESERVED_SIZE; /* N.B. This variable is readable and writeable via /proc/scsi/sg/def_reserved_size . Each time sg_open() is called a buffer of this size (or less if there is not enough memory) will be reserved for use by this file descriptor. [Deprecated usage: this variable is also readable via /proc/sys/kernel/sg-big-buff if the sg driver is built into the kernel (i.e. it is not a module).] */ static int def_reserved_size = -1; /* picks up init parameter */ static int sg_allow_dio = SG_ALLOW_DIO_DEF; static int scatter_elem_sz = SG_SCATTER_SZ; static int scatter_elem_sz_prev = SG_SCATTER_SZ; #define SG_SECTOR_SZ 512 static int sg_add(struct device *, struct class_interface *); static void sg_remove(struct device *, struct class_interface *); static DEFINE_SPINLOCK(sg_open_exclusive_lock); static DEFINE_IDR(sg_index_idr); static DEFINE_RWLOCK(sg_index_lock); /* Also used to lock file descriptor list for device */ static struct class_interface sg_interface = { .add_dev = sg_add, .remove_dev = sg_remove, }; typedef struct sg_scatter_hold { /* holding area for scsi scatter gather info */ unsigned short k_use_sg; /* Count of kernel scatter-gather pieces */ unsigned sglist_len; /* size of malloc'd scatter-gather list ++ */ unsigned bufflen; /* Size of (aggregate) data buffer */ struct page **pages; int page_order; char dio_in_use; /* 0->indirect IO (or mmap), 1->dio */ unsigned char cmd_opcode; /* first byte of command */ } Sg_scatter_hold; struct sg_device; /* forward declarations */ struct sg_fd; typedef struct sg_request { /* SG_MAX_QUEUE requests outstanding per file */ struct sg_request *nextrp; /* NULL -> tail request (slist) */ struct sg_fd *parentfp; /* NULL -> not in use */ Sg_scatter_hold data; /* hold buffer, perhaps scatter list */ sg_io_hdr_t header; /* scsi command+info, see <scsi/sg.h> */ unsigned char sense_b[SCSI_SENSE_BUFFERSIZE]; char res_used; /* 1 -> using reserve buffer, 0 -> not ... */ char orphan; /* 1 -> drop on sight, 0 -> normal */ char sg_io_owned; /* 1 -> packet belongs to SG_IO */ /* done protected by rq_list_lock */ char done; /* 0->before bh, 1->before read, 2->read */ struct request *rq; struct bio *bio; struct execute_work ew; } Sg_request; typedef struct sg_fd { /* holds the state of a file descriptor */ /* sfd_siblings is protected by sg_index_lock */ struct list_head sfd_siblings; struct sg_device *parentdp; /* owning device */ wait_queue_head_t read_wait; /* queue read until command done */ rwlock_t rq_list_lock; /* protect access to list in req_arr */ int timeout; /* defaults to SG_DEFAULT_TIMEOUT */ int timeout_user; /* defaults to SG_DEFAULT_TIMEOUT_USER */ Sg_scatter_hold reserve; /* buffer held for this file descriptor */ unsigned save_scat_len; /* original length of trunc. scat. element */ Sg_request *headrp; /* head of request slist, NULL->empty */ struct fasync_struct *async_qp; /* used by asynchronous notification */ Sg_request req_arr[SG_MAX_QUEUE]; /* used as singly-linked list */ char low_dma; /* as in parent but possibly overridden to 1 */ char force_packid; /* 1 -> pack_id input to read(), 0 -> ignored */ char cmd_q; /* 1 -> allow command queuing, 0 -> don't */ char next_cmd_len; /* 0 -> automatic (def), >0 -> use on next write() */ char keep_orphan; /* 0 -> drop orphan (def), 1 -> keep for read() */ char mmap_called; /* 0 -> mmap() never called on this fd */ struct kref f_ref; struct execute_work ew; } Sg_fd; typedef struct sg_device { /* holds the state of each scsi generic device */ struct scsi_device *device; wait_queue_head_t o_excl_wait; /* queue open() when O_EXCL in use */ int sg_tablesize; /* adapter's max scatter-gather table size */ u32 index; /* device index number */ /* sfds is protected by sg_index_lock */ struct list_head sfds; volatile char detached; /* 0->attached, 1->detached pending removal */ /* exclude protected by sg_open_exclusive_lock */ char exclude; /* opened for exclusive access */ char sgdebug; /* 0->off, 1->sense, 9->dump dev, 10-> all devs */ struct gendisk *disk; struct cdev * cdev; /* char_dev [sysfs: /sys/cdev/major/sg<n>] */ struct kref d_ref; } Sg_device; /* tasklet or soft irq callback */ static void sg_rq_end_io(struct request *rq, int uptodate); static int sg_start_req(Sg_request *srp, unsigned char *cmd); static int sg_finish_rem_req(Sg_request * srp); static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size); static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp); static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp); static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking); static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer); static void sg_remove_scat(Sg_scatter_hold * schp); static void sg_build_reserve(Sg_fd * sfp, int req_size); static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size); static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp); static Sg_fd *sg_add_sfp(Sg_device * sdp, int dev); static void sg_remove_sfp(struct kref *); static Sg_request *sg_get_rq_mark(Sg_fd * sfp, int pack_id); static Sg_request *sg_add_request(Sg_fd * sfp); static int sg_remove_request(Sg_fd * sfp, Sg_request * srp); static int sg_res_in_use(Sg_fd * sfp); static Sg_device *sg_get_dev(int dev); static void sg_put_dev(Sg_device *sdp); #define SZ_SG_HEADER sizeof(struct sg_header) #define SZ_SG_IO_HDR sizeof(sg_io_hdr_t) #define SZ_SG_IOVEC sizeof(sg_iovec_t) #define SZ_SG_REQ_INFO sizeof(sg_req_info_t) static int sg_allow_access(struct file *filp, unsigned char *cmd) { struct sg_fd *sfp = filp->private_data; if (sfp->parentdp->device->type == TYPE_SCANNER) return 0; return blk_verify_command(cmd, filp->f_mode & FMODE_WRITE); } static int get_exclude(Sg_device *sdp) { unsigned long flags; int ret; spin_lock_irqsave(&sg_open_exclusive_lock, flags); ret = sdp->exclude; spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); return ret; } static int set_exclude(Sg_device *sdp, char val) { unsigned long flags; spin_lock_irqsave(&sg_open_exclusive_lock, flags); sdp->exclude = val; spin_unlock_irqrestore(&sg_open_exclusive_lock, flags); return val; } static int sfds_list_empty(Sg_device *sdp) { unsigned long flags; int ret; read_lock_irqsave(&sg_index_lock, flags); ret = list_empty(&sdp->sfds); read_unlock_irqrestore(&sg_index_lock, flags); return ret; } static int sg_open(struct inode *inode, struct file *filp) { int dev = iminor(inode); int flags = filp->f_flags; struct request_queue *q; Sg_device *sdp; Sg_fd *sfp; int res; int retval; nonseekable_open(inode, filp); SCSI_LOG_TIMEOUT(3, printk("sg_open: dev=%d, flags=0x%x\n", dev, flags)); sdp = sg_get_dev(dev); if (IS_ERR(sdp)) { retval = PTR_ERR(sdp); sdp = NULL; goto sg_put; } /* This driver's module count bumped by fops_get in <linux/fs.h> */ /* Prevent the device driver from vanishing while we sleep */ retval = scsi_device_get(sdp->device); if (retval) goto sg_put; retval = scsi_autopm_get_device(sdp->device); if (retval) goto sdp_put; if (!((flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) { retval = -ENXIO; /* we are in error recovery for this device */ goto error_out; } if (flags & O_EXCL) { if (O_RDONLY == (flags & O_ACCMODE)) { retval = -EPERM; /* Can't lock it with read only access */ goto error_out; } if (!sfds_list_empty(sdp) && (flags & O_NONBLOCK)) { retval = -EBUSY; goto error_out; } res = wait_event_interruptible(sdp->o_excl_wait, ((!sfds_list_empty(sdp) || get_exclude(sdp)) ? 0 : set_exclude(sdp, 1))); if (res) { retval = res; /* -ERESTARTSYS because signal hit process */ goto error_out; } } else if (get_exclude(sdp)) { /* some other fd has an exclusive lock on dev */ if (flags & O_NONBLOCK) { retval = -EBUSY; goto error_out; } res = wait_event_interruptible(sdp->o_excl_wait, !get_exclude(sdp)); if (res) { retval = res; /* -ERESTARTSYS because signal hit process */ goto error_out; } } if (sdp->detached) { retval = -ENODEV; goto error_out; } if (sfds_list_empty(sdp)) { /* no existing opens on this device */ sdp->sgdebug = 0; q = sdp->device->request_queue; sdp->sg_tablesize = queue_max_segments(q); } if ((sfp = sg_add_sfp(sdp, dev))) filp->private_data = sfp; else { if (flags & O_EXCL) { set_exclude(sdp, 0); /* undo if error */ wake_up_interruptible(&sdp->o_excl_wait); } retval = -ENOMEM; goto error_out; } retval = 0; error_out: if (retval) { scsi_autopm_put_device(sdp->device); sdp_put: scsi_device_put(sdp->device); } sg_put: if (sdp) sg_put_dev(sdp); return retval; } /* Following function was formerly called 'sg_close' */ static int sg_release(struct inode *inode, struct file *filp) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_release: %s\n", sdp->disk->disk_name)); set_exclude(sdp, 0); wake_up_interruptible(&sdp->o_excl_wait); scsi_autopm_put_device(sdp->device); kref_put(&sfp->f_ref, sg_remove_sfp); return 0; } static ssize_t sg_read(struct file *filp, char __user *buf, size_t count, loff_t * ppos) { Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int req_pack_id = -1; sg_io_hdr_t *hp; struct sg_header *old_hdr = NULL; int retval = 0; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_read: %s, count=%d\n", sdp->disk->disk_name, (int) count)); if (!access_ok(VERIFY_WRITE, buf, count)) return -EFAULT; if (sfp->force_packid && (count >= SZ_SG_HEADER)) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (!old_hdr) return -ENOMEM; if (__copy_from_user(old_hdr, buf, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } if (old_hdr->reply_len < 0) { if (count >= SZ_SG_IO_HDR) { sg_io_hdr_t *new_hdr; new_hdr = kmalloc(SZ_SG_IO_HDR, GFP_KERNEL); if (!new_hdr) { retval = -ENOMEM; goto free_old_hdr; } retval =__copy_from_user (new_hdr, buf, SZ_SG_IO_HDR); req_pack_id = new_hdr->pack_id; kfree(new_hdr); if (retval) { retval = -EFAULT; goto free_old_hdr; } } } else req_pack_id = old_hdr->pack_id; } srp = sg_get_rq_mark(sfp, req_pack_id); if (!srp) { /* now wait on packet to arrive */ if (sdp->detached) { retval = -ENODEV; goto free_old_hdr; } if (filp->f_flags & O_NONBLOCK) { retval = -EAGAIN; goto free_old_hdr; } retval = wait_event_interruptible(sfp->read_wait, (sdp->detached || (srp = sg_get_rq_mark(sfp, req_pack_id)))); if (sdp->detached) { retval = -ENODEV; goto free_old_hdr; } if (retval) { /* -ERESTARTSYS as signal hit process */ goto free_old_hdr; } } if (srp->header.interface_id != '\0') { retval = sg_new_read(sfp, buf, count, srp); goto free_old_hdr; } hp = &srp->header; if (old_hdr == NULL) { old_hdr = kmalloc(SZ_SG_HEADER, GFP_KERNEL); if (! old_hdr) { retval = -ENOMEM; goto free_old_hdr; } } memset(old_hdr, 0, SZ_SG_HEADER); old_hdr->reply_len = (int) hp->timeout; old_hdr->pack_len = old_hdr->reply_len; /* old, strange behaviour */ old_hdr->pack_id = hp->pack_id; old_hdr->twelve_byte = ((srp->data.cmd_opcode >= 0xc0) && (12 == hp->cmd_len)) ? 1 : 0; old_hdr->target_status = hp->masked_status; old_hdr->host_status = hp->host_status; old_hdr->driver_status = hp->driver_status; if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) memcpy(old_hdr->sense_buffer, srp->sense_b, sizeof (old_hdr->sense_buffer)); switch (hp->host_status) { /* This setup of 'result' is for backward compatibility and is best ignored by the user who should use target, host + driver status */ case DID_OK: case DID_PASSTHROUGH: case DID_SOFT_ERROR: old_hdr->result = 0; break; case DID_NO_CONNECT: case DID_BUS_BUSY: case DID_TIME_OUT: old_hdr->result = EBUSY; break; case DID_BAD_TARGET: case DID_ABORT: case DID_PARITY: case DID_RESET: case DID_BAD_INTR: old_hdr->result = EIO; break; case DID_ERROR: old_hdr->result = (srp->sense_b[0] == 0 && hp->masked_status == GOOD) ? 0 : EIO; break; default: old_hdr->result = EIO; break; } /* Now copy the result back to the user buffer. */ if (count >= SZ_SG_HEADER) { if (__copy_to_user(buf, old_hdr, SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } buf += SZ_SG_HEADER; if (count > old_hdr->reply_len) count = old_hdr->reply_len; if (count > SZ_SG_HEADER) { if (sg_read_oxfer(srp, buf, count - SZ_SG_HEADER)) { retval = -EFAULT; goto free_old_hdr; } } } else count = (old_hdr->result == 0) ? 0 : -EIO; sg_finish_rem_req(srp); retval = count; free_old_hdr: kfree(old_hdr); return retval; } static ssize_t sg_new_read(Sg_fd * sfp, char __user *buf, size_t count, Sg_request * srp) { sg_io_hdr_t *hp = &srp->header; int err = 0; int len; if (count < SZ_SG_IO_HDR) { err = -EINVAL; goto err_out; } hp->sb_len_wr = 0; if ((hp->mx_sb_len > 0) && hp->sbp) { if ((CHECK_CONDITION & hp->masked_status) || (DRIVER_SENSE & hp->driver_status)) { int sb_len = SCSI_SENSE_BUFFERSIZE; sb_len = (hp->mx_sb_len > sb_len) ? sb_len : hp->mx_sb_len; len = 8 + (int) srp->sense_b[7]; /* Additional sense length field */ len = (len > sb_len) ? sb_len : len; if (copy_to_user(hp->sbp, srp->sense_b, len)) { err = -EFAULT; goto err_out; } hp->sb_len_wr = len; } } if (hp->masked_status || hp->host_status || hp->driver_status) hp->info |= SG_INFO_CHECK; if (copy_to_user(buf, hp, SZ_SG_IO_HDR)) { err = -EFAULT; goto err_out; } err_out: err = sg_finish_rem_req(srp); return (0 == err) ? count : err; } static ssize_t sg_write(struct file *filp, const char __user *buf, size_t count, loff_t * ppos) { int mxsize, cmd_size, k; int input_size, blocking; unsigned char opcode; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; struct sg_header old_hdr; sg_io_hdr_t *hp; unsigned char cmnd[MAX_COMMAND_SIZE]; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_write: %s, count=%d\n", sdp->disk->disk_name, (int) count)); if (sdp->detached) return -ENODEV; if (!((filp->f_flags & O_NONBLOCK) || scsi_block_when_processing_errors(sdp->device))) return -ENXIO; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ if (count < SZ_SG_HEADER) return -EIO; if (__copy_from_user(&old_hdr, buf, SZ_SG_HEADER)) return -EFAULT; blocking = !(filp->f_flags & O_NONBLOCK); if (old_hdr.reply_len < 0) return sg_new_write(sfp, filp, buf, count, blocking, 0, 0, NULL); if (count < (SZ_SG_HEADER + 6)) return -EIO; /* The minimum scsi command length is 6 bytes. */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, printk("sg_write: queue full\n")); return -EDOM; } buf += SZ_SG_HEADER; __get_user(opcode, buf); if (sfp->next_cmd_len > 0) { if (sfp->next_cmd_len > MAX_COMMAND_SIZE) { SCSI_LOG_TIMEOUT(1, printk("sg_write: command length too long\n")); sfp->next_cmd_len = 0; sg_remove_request(sfp, srp); return -EIO; } cmd_size = sfp->next_cmd_len; sfp->next_cmd_len = 0; /* reset so only this write() effected */ } else { cmd_size = COMMAND_SIZE(opcode); /* based on SCSI command group */ if ((opcode >= 0xc0) && old_hdr.twelve_byte) cmd_size = 12; } SCSI_LOG_TIMEOUT(4, printk( "sg_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) opcode, cmd_size)); /* Determine buffer size. */ input_size = count - cmd_size; mxsize = (input_size > old_hdr.reply_len) ? input_size : old_hdr.reply_len; mxsize -= SZ_SG_HEADER; input_size -= SZ_SG_HEADER; if (input_size < 0) { sg_remove_request(sfp, srp); return -EIO; /* User did not pass enough bytes for this command. */ } hp = &srp->header; hp->interface_id = '\0'; /* indicator of old interface tunnelled */ hp->cmd_len = (unsigned char) cmd_size; hp->iovec_count = 0; hp->mx_sb_len = 0; if (input_size > 0) hp->dxfer_direction = (old_hdr.reply_len > SZ_SG_HEADER) ? SG_DXFER_TO_FROM_DEV : SG_DXFER_TO_DEV; else hp->dxfer_direction = (mxsize > 0) ? SG_DXFER_FROM_DEV : SG_DXFER_NONE; hp->dxfer_len = mxsize; if (hp->dxfer_direction == SG_DXFER_TO_DEV) hp->dxferp = (char __user *)buf + cmd_size; else hp->dxferp = NULL; hp->sbp = NULL; hp->timeout = old_hdr.reply_len; /* structure abuse ... */ hp->flags = input_size; /* structure abuse ... */ hp->pack_id = old_hdr.pack_id; hp->usr_ptr = NULL; if (__copy_from_user(cmnd, buf, cmd_size)) return -EFAULT; /* * SG_DXFER_TO_FROM_DEV is functionally equivalent to SG_DXFER_FROM_DEV, * but is is possible that the app intended SG_DXFER_TO_DEV, because there * is a non-zero input_size, so emit a warning. */ if (hp->dxfer_direction == SG_DXFER_TO_FROM_DEV) { static char cmd[TASK_COMM_LEN]; if (strcmp(current->comm, cmd)) { printk_ratelimited(KERN_WARNING "sg_write: data in/out %d/%d bytes " "for SCSI command 0x%x-- guessing " "data in;\n program %s not setting " "count and/or reply_len properly\n", old_hdr.reply_len - (int)SZ_SG_HEADER, input_size, (unsigned int) cmnd[0], current->comm); strcpy(cmd, current->comm); } } k = sg_common_write(sfp, srp, cmnd, sfp->timeout, blocking); return (k < 0) ? k : count; } static ssize_t sg_new_write(Sg_fd *sfp, struct file *file, const char __user *buf, size_t count, int blocking, int read_only, int sg_io_owned, Sg_request **o_srp) { int k; Sg_request *srp; sg_io_hdr_t *hp; unsigned char cmnd[MAX_COMMAND_SIZE]; int timeout; unsigned long ul_timeout; if (count < SZ_SG_IO_HDR) return -EINVAL; if (!access_ok(VERIFY_READ, buf, count)) return -EFAULT; /* protects following copy_from_user()s + get_user()s */ sfp->cmd_q = 1; /* when sg_io_hdr seen, set command queuing on */ if (!(srp = sg_add_request(sfp))) { SCSI_LOG_TIMEOUT(1, printk("sg_new_write: queue full\n")); return -EDOM; } srp->sg_io_owned = sg_io_owned; hp = &srp->header; if (__copy_from_user(hp, buf, SZ_SG_IO_HDR)) { sg_remove_request(sfp, srp); return -EFAULT; } if (hp->interface_id != 'S') { sg_remove_request(sfp, srp); return -ENOSYS; } if (hp->flags & SG_FLAG_MMAP_IO) { if (hp->dxfer_len > sfp->reserve.bufflen) { sg_remove_request(sfp, srp); return -ENOMEM; /* MMAP_IO size must fit in reserve buffer */ } if (hp->flags & SG_FLAG_DIRECT_IO) { sg_remove_request(sfp, srp); return -EINVAL; /* either MMAP_IO or DIRECT_IO (not both) */ } if (sg_res_in_use(sfp)) { sg_remove_request(sfp, srp); return -EBUSY; /* reserve buffer already being used */ } } ul_timeout = msecs_to_jiffies(srp->header.timeout); timeout = (ul_timeout < INT_MAX) ? ul_timeout : INT_MAX; if ((!hp->cmdp) || (hp->cmd_len < 6) || (hp->cmd_len > sizeof (cmnd))) { sg_remove_request(sfp, srp); return -EMSGSIZE; } if (!access_ok(VERIFY_READ, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; /* protects following copy_from_user()s + get_user()s */ } if (__copy_from_user(cmnd, hp->cmdp, hp->cmd_len)) { sg_remove_request(sfp, srp); return -EFAULT; } if (read_only && sg_allow_access(file, cmnd)) { sg_remove_request(sfp, srp); return -EPERM; } k = sg_common_write(sfp, srp, cmnd, timeout, blocking); if (k < 0) return k; if (o_srp) *o_srp = srp; return count; } static int sg_common_write(Sg_fd * sfp, Sg_request * srp, unsigned char *cmnd, int timeout, int blocking) { int k, data_dir; Sg_device *sdp = sfp->parentdp; sg_io_hdr_t *hp = &srp->header; srp->data.cmd_opcode = cmnd[0]; /* hold opcode of command */ hp->status = 0; hp->masked_status = 0; hp->msg_status = 0; hp->info = 0; hp->host_status = 0; hp->driver_status = 0; hp->resid = 0; SCSI_LOG_TIMEOUT(4, printk("sg_common_write: scsi opcode=0x%02x, cmd_size=%d\n", (int) cmnd[0], (int) hp->cmd_len)); k = sg_start_req(srp, cmnd); if (k) { SCSI_LOG_TIMEOUT(1, printk("sg_common_write: start_req err=%d\n", k)); sg_finish_rem_req(srp); return k; /* probably out of space --> ENOMEM */ } if (sdp->detached) { if (srp->bio) blk_end_request_all(srp->rq, -EIO); sg_finish_rem_req(srp); return -ENODEV; } switch (hp->dxfer_direction) { case SG_DXFER_TO_FROM_DEV: case SG_DXFER_FROM_DEV: data_dir = DMA_FROM_DEVICE; break; case SG_DXFER_TO_DEV: data_dir = DMA_TO_DEVICE; break; case SG_DXFER_UNKNOWN: data_dir = DMA_BIDIRECTIONAL; break; default: data_dir = DMA_NONE; break; } hp->duration = jiffies_to_msecs(jiffies); srp->rq->timeout = timeout; kref_get(&sfp->f_ref); /* sg_rq_end_io() does kref_put(). */ blk_execute_rq_nowait(sdp->device->request_queue, sdp->disk, srp->rq, 1, sg_rq_end_io); return 0; } static int srp_done(Sg_fd *sfp, Sg_request *srp) { unsigned long flags; int ret; read_lock_irqsave(&sfp->rq_list_lock, flags); ret = srp->done; read_unlock_irqrestore(&sfp->rq_list_lock, flags); return ret; } static long sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { void __user *p = (void __user *)arg; int __user *ip = p; int result, val, read_only; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; unsigned long iflags; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_ioctl: %s, cmd=0x%x\n", sdp->disk->disk_name, (int) cmd_in)); read_only = (O_RDWR != (filp->f_flags & O_ACCMODE)); switch (cmd_in) { case SG_IO: if (sdp->detached) return -ENODEV; if (!scsi_block_when_processing_errors(sdp->device)) return -ENXIO; if (!access_ok(VERIFY_WRITE, p, SZ_SG_IO_HDR)) return -EFAULT; result = sg_new_write(sfp, filp, p, SZ_SG_IO_HDR, 1, read_only, 1, &srp); if (result < 0) return result; result = wait_event_interruptible(sfp->read_wait, (srp_done(sfp, srp) || sdp->detached)); if (sdp->detached) return -ENODEV; write_lock_irq(&sfp->rq_list_lock); if (srp->done) { srp->done = 2; write_unlock_irq(&sfp->rq_list_lock); result = sg_new_read(sfp, p, SZ_SG_IO_HDR, srp); return (result < 0) ? result : 0; } srp->orphan = 1; write_unlock_irq(&sfp->rq_list_lock); return result; /* -ERESTARTSYS because signal hit process */ case SG_SET_TIMEOUT: result = get_user(val, ip); if (result) return result; if (val < 0) return -EIO; if (val >= MULDIV (INT_MAX, USER_HZ, HZ)) val = MULDIV (INT_MAX, USER_HZ, HZ); sfp->timeout_user = val; sfp->timeout = MULDIV (val, HZ, USER_HZ); return 0; case SG_GET_TIMEOUT: /* N.B. User receives timeout as return value */ /* strange ..., for backward compatibility */ return sfp->timeout_user; case SG_SET_FORCE_LOW_DMA: result = get_user(val, ip); if (result) return result; if (val) { sfp->low_dma = 1; if ((0 == sfp->low_dma) && (0 == sg_res_in_use(sfp))) { val = (int) sfp->reserve.bufflen; sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); } } else { if (sdp->detached) return -ENODEV; sfp->low_dma = sdp->device->host->unchecked_isa_dma; } return 0; case SG_GET_LOW_DMA: return put_user((int) sfp->low_dma, ip); case SG_GET_SCSI_ID: if (!access_ok(VERIFY_WRITE, p, sizeof (sg_scsi_id_t))) return -EFAULT; else { sg_scsi_id_t __user *sg_idp = p; if (sdp->detached) return -ENODEV; __put_user((int) sdp->device->host->host_no, &sg_idp->host_no); __put_user((int) sdp->device->channel, &sg_idp->channel); __put_user((int) sdp->device->id, &sg_idp->scsi_id); __put_user((int) sdp->device->lun, &sg_idp->lun); __put_user((int) sdp->device->type, &sg_idp->scsi_type); __put_user((short) sdp->device->host->cmd_per_lun, &sg_idp->h_cmd_per_lun); __put_user((short) sdp->device->queue_depth, &sg_idp->d_queue_depth); __put_user(0, &sg_idp->unused[0]); __put_user(0, &sg_idp->unused[1]); return 0; } case SG_SET_FORCE_PACK_ID: result = get_user(val, ip); if (result) return result; sfp->force_packid = val ? 1 : 0; return 0; case SG_GET_PACK_ID: if (!access_ok(VERIFY_WRITE, ip, sizeof (int))) return -EFAULT; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) { read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(srp->header.pack_id, ip); return 0; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); __put_user(-1, ip); return 0; case SG_GET_NUM_WAITING: read_lock_irqsave(&sfp->rq_list_lock, iflags); for (val = 0, srp = sfp->headrp; srp; srp = srp->nextrp) { if ((1 == srp->done) && (!srp->sg_io_owned)) ++val; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return put_user(val, ip); case SG_GET_SG_TABLESIZE: return put_user(sdp->sg_tablesize, ip); case SG_SET_RESERVED_SIZE: result = get_user(val, ip); if (result) return result; if (val < 0) return -EINVAL; val = min_t(int, val, queue_max_sectors(sdp->device->request_queue) * 512); if (val != sfp->reserve.bufflen) { if (sg_res_in_use(sfp) || sfp->mmap_called) return -EBUSY; sg_remove_scat(&sfp->reserve); sg_build_reserve(sfp, val); } return 0; case SG_GET_RESERVED_SIZE: val = min_t(int, sfp->reserve.bufflen, queue_max_sectors(sdp->device->request_queue) * 512); return put_user(val, ip); case SG_SET_COMMAND_Q: result = get_user(val, ip); if (result) return result; sfp->cmd_q = val ? 1 : 0; return 0; case SG_GET_COMMAND_Q: return put_user((int) sfp->cmd_q, ip); case SG_SET_KEEP_ORPHAN: result = get_user(val, ip); if (result) return result; sfp->keep_orphan = val; return 0; case SG_GET_KEEP_ORPHAN: return put_user((int) sfp->keep_orphan, ip); case SG_NEXT_CMD_LEN: result = get_user(val, ip); if (result) return result; sfp->next_cmd_len = (val > 0) ? val : 0; return 0; case SG_GET_VERSION_NUM: return put_user(sg_version_num, ip); case SG_GET_ACCESS_COUNT: /* faked - we don't have a real access count anymore */ val = (sdp->device ? 1 : 0); return put_user(val, ip); case SG_GET_REQUEST_TABLE: if (!access_ok(VERIFY_WRITE, p, SZ_SG_REQ_INFO * SG_MAX_QUEUE)) return -EFAULT; else { sg_req_info_t *rinfo; unsigned int ms; rinfo = kmalloc(SZ_SG_REQ_INFO * SG_MAX_QUEUE, GFP_KERNEL); if (!rinfo) return -ENOMEM; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp, val = 0; val < SG_MAX_QUEUE; ++val, srp = srp ? srp->nextrp : srp) { memset(&rinfo[val], 0, SZ_SG_REQ_INFO); if (srp) { rinfo[val].req_state = srp->done + 1; rinfo[val].problem = srp->header.masked_status & srp->header.host_status & srp->header.driver_status; if (srp->done) rinfo[val].duration = srp->header.duration; else { ms = jiffies_to_msecs(jiffies); rinfo[val].duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; } rinfo[val].orphan = srp->orphan; rinfo[val].sg_io_owned = srp->sg_io_owned; rinfo[val].pack_id = srp->header.pack_id; rinfo[val].usr_ptr = srp->header.usr_ptr; } } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); result = __copy_to_user(p, rinfo, SZ_SG_REQ_INFO * SG_MAX_QUEUE); result = result ? -EFAULT : 0; kfree(rinfo); return result; } case SG_EMULATED_HOST: if (sdp->detached) return -ENODEV; return put_user(sdp->device->host->hostt->emulated, ip); case SG_SCSI_RESET: if (sdp->detached) return -ENODEV; if (filp->f_flags & O_NONBLOCK) { if (scsi_host_in_recovery(sdp->device->host)) return -EBUSY; } else if (!scsi_block_when_processing_errors(sdp->device)) return -EBUSY; result = get_user(val, ip); if (result) return result; if (SG_SCSI_RESET_NOTHING == val) return 0; switch (val) { case SG_SCSI_RESET_DEVICE: val = SCSI_TRY_RESET_DEVICE; break; case SG_SCSI_RESET_TARGET: val = SCSI_TRY_RESET_TARGET; break; case SG_SCSI_RESET_BUS: val = SCSI_TRY_RESET_BUS; break; case SG_SCSI_RESET_HOST: val = SCSI_TRY_RESET_HOST; break; default: return -EINVAL; } if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; return (scsi_reset_provider(sdp->device, val) == SUCCESS) ? 0 : -EIO; case SCSI_IOCTL_SEND_COMMAND: if (sdp->detached) return -ENODEV; if (read_only) { unsigned char opcode = WRITE_6; Scsi_Ioctl_Command __user *siocp = p; if (copy_from_user(&opcode, siocp->data, 1)) return -EFAULT; if (sg_allow_access(filp, &opcode)) return -EPERM; } return sg_scsi_ioctl(sdp->device->request_queue, NULL, filp->f_mode, p); case SG_SET_DEBUG: result = get_user(val, ip); if (result) return result; sdp->sgdebug = (char) val; return 0; case SCSI_IOCTL_GET_IDLUN: case SCSI_IOCTL_GET_BUS_NUMBER: case SCSI_IOCTL_PROBE_HOST: case SG_GET_TRANSFORM: if (sdp->detached) return -ENODEV; return scsi_ioctl(sdp->device, cmd_in, p); case BLKSECTGET: return put_user(queue_max_sectors(sdp->device->request_queue) * 512, ip); case BLKTRACESETUP: return blk_trace_setup(sdp->device->request_queue, sdp->disk->disk_name, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), NULL, (char *)arg); case BLKTRACESTART: return blk_trace_startstop(sdp->device->request_queue, 1); case BLKTRACESTOP: return blk_trace_startstop(sdp->device->request_queue, 0); case BLKTRACETEARDOWN: return blk_trace_remove(sdp->device->request_queue); default: if (read_only) return -EPERM; /* don't know so take safe approach */ return scsi_ioctl(sdp->device, cmd_in, p); } } #ifdef CONFIG_COMPAT static long sg_compat_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg) { Sg_device *sdp; Sg_fd *sfp; struct scsi_device *sdev; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; sdev = sdp->device; if (sdev->host->hostt->compat_ioctl) { int ret; ret = sdev->host->hostt->compat_ioctl(sdev, cmd_in, (void __user *)arg); return ret; } return -ENOIOCTLCMD; } #endif static unsigned int sg_poll(struct file *filp, poll_table * wait) { unsigned int res = 0; Sg_device *sdp; Sg_fd *sfp; Sg_request *srp; int count = 0; unsigned long iflags; sfp = filp->private_data; if (!sfp) return POLLERR; sdp = sfp->parentdp; if (!sdp) return POLLERR; poll_wait(filp, &sfp->read_wait, wait); read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) { /* if any read waiting, flag it */ if ((0 == res) && (1 == srp->done) && (!srp->sg_io_owned)) res = POLLIN | POLLRDNORM; ++count; } read_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (sdp->detached) res |= POLLHUP; else if (!sfp->cmd_q) { if (0 == count) res |= POLLOUT | POLLWRNORM; } else if (count < SG_MAX_QUEUE) res |= POLLOUT | POLLWRNORM; SCSI_LOG_TIMEOUT(3, printk("sg_poll: %s, res=0x%x\n", sdp->disk->disk_name, (int) res)); return res; } static int sg_fasync(int fd, struct file *filp, int mode) { Sg_device *sdp; Sg_fd *sfp; if ((!(sfp = (Sg_fd *) filp->private_data)) || (!(sdp = sfp->parentdp))) return -ENXIO; SCSI_LOG_TIMEOUT(3, printk("sg_fasync: %s, mode=%d\n", sdp->disk->disk_name, mode)); return fasync_helper(fd, filp, mode, &sfp->async_qp); } static int sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { Sg_fd *sfp; unsigned long offset, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((NULL == vma) || (!(sfp = (Sg_fd *) vma->vm_private_data))) return VM_FAULT_SIGBUS; rsv_schp = &sfp->reserve; offset = vmf->pgoff << PAGE_SHIFT; if (offset >= rsv_schp->bufflen) return VM_FAULT_SIGBUS; SCSI_LOG_TIMEOUT(3, printk("sg_vma_fault: offset=%lu, scatg=%d\n", offset, rsv_schp->k_use_sg)); sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; if (offset < len) { struct page *page = nth_page(rsv_schp->pages[k], offset >> PAGE_SHIFT); get_page(page); /* increment page count */ vmf->page = page; return 0; /* success */ } sa += len; offset -= len; } return VM_FAULT_SIGBUS; } static const struct vm_operations_struct sg_mmap_vm_ops = { .fault = sg_vma_fault, }; static int sg_mmap(struct file *filp, struct vm_area_struct *vma) { Sg_fd *sfp; unsigned long req_sz, len, sa; Sg_scatter_hold *rsv_schp; int k, length; if ((!filp) || (!vma) || (!(sfp = (Sg_fd *) filp->private_data))) return -ENXIO; req_sz = vma->vm_end - vma->vm_start; SCSI_LOG_TIMEOUT(3, printk("sg_mmap starting, vm_start=%p, len=%d\n", (void *) vma->vm_start, (int) req_sz)); if (vma->vm_pgoff) return -EINVAL; /* want no offset */ rsv_schp = &sfp->reserve; if (req_sz > rsv_schp->bufflen) return -ENOMEM; /* cannot map more than reserved buffer */ sa = vma->vm_start; length = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg && sa < vma->vm_end; k++) { len = vma->vm_end - sa; len = (len < length) ? len : length; sa += len; } sfp->mmap_called = 1; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_private_data = sfp; vma->vm_ops = &sg_mmap_vm_ops; return 0; } static void sg_rq_end_io_usercontext(struct work_struct *work) { struct sg_request *srp = container_of(work, struct sg_request, ew.work); struct sg_fd *sfp = srp->parentfp; sg_finish_rem_req(srp); kref_put(&sfp->f_ref, sg_remove_sfp); } /* * This function is a "bottom half" handler that is called by the mid * level when a command is completed (or has failed). */ static void sg_rq_end_io(struct request *rq, int uptodate) { struct sg_request *srp = rq->end_io_data; Sg_device *sdp; Sg_fd *sfp; unsigned long iflags; unsigned int ms; char *sense; int result, resid, done = 1; if (WARN_ON(srp->done != 0)) return; sfp = srp->parentfp; if (WARN_ON(sfp == NULL)) return; sdp = sfp->parentdp; if (unlikely(sdp->detached)) printk(KERN_INFO "sg_rq_end_io: device detached\n"); sense = rq->sense; result = rq->errors; resid = rq->resid_len; SCSI_LOG_TIMEOUT(4, printk("sg_cmd_done: %s, pack_id=%d, res=0x%x\n", sdp->disk->disk_name, srp->header.pack_id, result)); srp->header.resid = resid; ms = jiffies_to_msecs(jiffies); srp->header.duration = (ms > srp->header.duration) ? (ms - srp->header.duration) : 0; if (0 != result) { struct scsi_sense_hdr sshdr; srp->header.status = 0xff & result; srp->header.masked_status = status_byte(result); srp->header.msg_status = msg_byte(result); srp->header.host_status = host_byte(result); srp->header.driver_status = driver_byte(result); if ((sdp->sgdebug > 0) && ((CHECK_CONDITION == srp->header.masked_status) || (COMMAND_TERMINATED == srp->header.masked_status))) __scsi_print_sense("sg_cmd_done", sense, SCSI_SENSE_BUFFERSIZE); /* Following if statement is a patch supplied by Eric Youngdale */ if (driver_byte(result) != 0 && scsi_normalize_sense(sense, SCSI_SENSE_BUFFERSIZE, &sshdr) && !scsi_sense_is_deferred(&sshdr) && sshdr.sense_key == UNIT_ATTENTION && sdp->device->removable) { /* Detected possible disc change. Set the bit - this */ /* may be used if there are filesystems using this device */ sdp->device->changed = 1; } } /* Rely on write phase to clean out srp status values, so no "else" */ write_lock_irqsave(&sfp->rq_list_lock, iflags); if (unlikely(srp->orphan)) { if (sfp->keep_orphan) srp->sg_io_owned = 0; else done = 0; } srp->done = done; write_unlock_irqrestore(&sfp->rq_list_lock, iflags); if (likely(done)) { /* Now wake up any sg_read() that is waiting for this * packet. */ wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_IN); kref_put(&sfp->f_ref, sg_remove_sfp); } else { INIT_WORK(&srp->ew.work, sg_rq_end_io_usercontext); schedule_work(&srp->ew.work); } } static const struct file_operations sg_fops = { .owner = THIS_MODULE, .read = sg_read, .write = sg_write, .poll = sg_poll, .unlocked_ioctl = sg_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = sg_compat_ioctl, #endif .open = sg_open, .mmap = sg_mmap, .release = sg_release, .fasync = sg_fasync, .llseek = no_llseek, }; static struct class *sg_sysfs_class; static int sg_sysfs_valid = 0; static Sg_device *sg_alloc(struct gendisk *disk, struct scsi_device *scsidp) { struct request_queue *q = scsidp->request_queue; Sg_device *sdp; unsigned long iflags; int error; u32 k; sdp = kzalloc(sizeof(Sg_device), GFP_KERNEL); if (!sdp) { printk(KERN_WARNING "kmalloc Sg_device failure\n"); return ERR_PTR(-ENOMEM); } idr_preload(GFP_KERNEL); write_lock_irqsave(&sg_index_lock, iflags); error = idr_alloc(&sg_index_idr, sdp, 0, SG_MAX_DEVS, GFP_NOWAIT); if (error < 0) { if (error == -ENOSPC) { sdev_printk(KERN_WARNING, scsidp, "Unable to attach sg device type=%d, minor number exceeds %d\n", scsidp->type, SG_MAX_DEVS - 1); error = -ENODEV; } else { printk(KERN_WARNING "idr allocation Sg_device failure: %d\n", error); } goto out_unlock; } k = error; SCSI_LOG_TIMEOUT(3, printk("sg_alloc: dev=%d \n", k)); sprintf(disk->disk_name, "sg%d", k); disk->first_minor = k; sdp->disk = disk; sdp->device = scsidp; INIT_LIST_HEAD(&sdp->sfds); init_waitqueue_head(&sdp->o_excl_wait); sdp->sg_tablesize = queue_max_segments(q); sdp->index = k; kref_init(&sdp->d_ref); error = 0; out_unlock: write_unlock_irqrestore(&sg_index_lock, iflags); idr_preload_end(); if (error) { kfree(sdp); return ERR_PTR(error); } return sdp; } static int sg_add(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); struct gendisk *disk; Sg_device *sdp = NULL; struct cdev * cdev = NULL; int error; unsigned long iflags; disk = alloc_disk(1); if (!disk) { printk(KERN_WARNING "alloc_disk failed\n"); return -ENOMEM; } disk->major = SCSI_GENERIC_MAJOR; error = -ENOMEM; cdev = cdev_alloc(); if (!cdev) { printk(KERN_WARNING "cdev_alloc failed\n"); goto out; } cdev->owner = THIS_MODULE; cdev->ops = &sg_fops; sdp = sg_alloc(disk, scsidp); if (IS_ERR(sdp)) { printk(KERN_WARNING "sg_alloc failed\n"); error = PTR_ERR(sdp); goto out; } error = cdev_add(cdev, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), 1); if (error) goto cdev_add_err; sdp->cdev = cdev; if (sg_sysfs_valid) { struct device *sg_class_member; sg_class_member = device_create(sg_sysfs_class, cl_dev->parent, MKDEV(SCSI_GENERIC_MAJOR, sdp->index), sdp, "%s", disk->disk_name); if (IS_ERR(sg_class_member)) { printk(KERN_ERR "sg_add: " "device_create failed\n"); error = PTR_ERR(sg_class_member); goto cdev_add_err; } error = sysfs_create_link(&scsidp->sdev_gendev.kobj, &sg_class_member->kobj, "generic"); if (error) printk(KERN_ERR "sg_add: unable to make symlink " "'generic' back to sg%d\n", sdp->index); } else printk(KERN_WARNING "sg_add: sg_sys Invalid\n"); sdev_printk(KERN_NOTICE, scsidp, "Attached scsi generic sg%d type %d\n", sdp->index, scsidp->type); dev_set_drvdata(cl_dev, sdp); return 0; cdev_add_err: write_lock_irqsave(&sg_index_lock, iflags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, iflags); kfree(sdp); out: put_disk(disk); if (cdev) cdev_del(cdev); return error; } static void sg_device_destroy(struct kref *kref) { struct sg_device *sdp = container_of(kref, struct sg_device, d_ref); unsigned long flags; /* CAUTION! Note that the device can still be found via idr_find() * even though the refcount is 0. Therefore, do idr_remove() BEFORE * any other cleanup. */ write_lock_irqsave(&sg_index_lock, flags); idr_remove(&sg_index_idr, sdp->index); write_unlock_irqrestore(&sg_index_lock, flags); SCSI_LOG_TIMEOUT(3, printk("sg_device_destroy: %s\n", sdp->disk->disk_name)); put_disk(sdp->disk); kfree(sdp); } static void sg_remove(struct device *cl_dev, struct class_interface *cl_intf) { struct scsi_device *scsidp = to_scsi_device(cl_dev->parent); Sg_device *sdp = dev_get_drvdata(cl_dev); unsigned long iflags; Sg_fd *sfp; if (!sdp || sdp->detached) return; SCSI_LOG_TIMEOUT(3, printk("sg_remove: %s\n", sdp->disk->disk_name)); /* Need a write lock to set sdp->detached. */ write_lock_irqsave(&sg_index_lock, iflags); sdp->detached = 1; list_for_each_entry(sfp, &sdp->sfds, sfd_siblings) { wake_up_interruptible(&sfp->read_wait); kill_fasync(&sfp->async_qp, SIGPOLL, POLL_HUP); } write_unlock_irqrestore(&sg_index_lock, iflags); sysfs_remove_link(&scsidp->sdev_gendev.kobj, "generic"); device_destroy(sg_sysfs_class, MKDEV(SCSI_GENERIC_MAJOR, sdp->index)); cdev_del(sdp->cdev); sdp->cdev = NULL; sg_put_dev(sdp); } module_param_named(scatter_elem_sz, scatter_elem_sz, int, S_IRUGO | S_IWUSR); module_param_named(def_reserved_size, def_reserved_size, int, S_IRUGO | S_IWUSR); module_param_named(allow_dio, sg_allow_dio, int, S_IRUGO | S_IWUSR); MODULE_AUTHOR("Douglas Gilbert"); MODULE_DESCRIPTION("SCSI generic (sg) driver"); MODULE_LICENSE("GPL"); MODULE_VERSION(SG_VERSION_STR); MODULE_ALIAS_CHARDEV_MAJOR(SCSI_GENERIC_MAJOR); MODULE_PARM_DESC(scatter_elem_sz, "scatter gather element " "size (default: max(SG_SCATTER_SZ, PAGE_SIZE))"); MODULE_PARM_DESC(def_reserved_size, "size of buffer reserved for each fd"); MODULE_PARM_DESC(allow_dio, "allow direct I/O (default: 0 (disallow))"); static int __init init_sg(void) { int rc; if (scatter_elem_sz < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = scatter_elem_sz; } if (def_reserved_size >= 0) sg_big_buff = def_reserved_size; else def_reserved_size = sg_big_buff; rc = register_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS, "sg"); if (rc) return rc; sg_sysfs_class = class_create(THIS_MODULE, "scsi_generic"); if ( IS_ERR(sg_sysfs_class) ) { rc = PTR_ERR(sg_sysfs_class); goto err_out; } sg_sysfs_valid = 1; rc = scsi_register_interface(&sg_interface); if (0 == rc) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_init(); #endif /* CONFIG_SCSI_PROC_FS */ return 0; } class_destroy(sg_sysfs_class); err_out: unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); return rc; } static void __exit exit_sg(void) { #ifdef CONFIG_SCSI_PROC_FS sg_proc_cleanup(); #endif /* CONFIG_SCSI_PROC_FS */ scsi_unregister_interface(&sg_interface); class_destroy(sg_sysfs_class); sg_sysfs_valid = 0; unregister_chrdev_region(MKDEV(SCSI_GENERIC_MAJOR, 0), SG_MAX_DEVS); idr_destroy(&sg_index_idr); } static int sg_start_req(Sg_request *srp, unsigned char *cmd) { int res; struct request *rq; Sg_fd *sfp = srp->parentfp; sg_io_hdr_t *hp = &srp->header; int dxfer_len = (int) hp->dxfer_len; int dxfer_dir = hp->dxfer_direction; unsigned int iov_count = hp->iovec_count; Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; struct request_queue *q = sfp->parentdp->device->request_queue; struct rq_map_data *md, map_data; int rw = hp->dxfer_direction == SG_DXFER_TO_DEV ? WRITE : READ; SCSI_LOG_TIMEOUT(4, printk(KERN_INFO "sg_start_req: dxfer_len=%d\n", dxfer_len)); rq = blk_get_request(q, rw, GFP_ATOMIC); if (!rq) return -ENOMEM; memcpy(rq->cmd, cmd, hp->cmd_len); rq->cmd_len = hp->cmd_len; rq->cmd_type = REQ_TYPE_BLOCK_PC; srp->rq = rq; rq->end_io_data = srp; rq->sense = srp->sense_b; rq->retries = SG_DEFAULT_RETRIES; if ((dxfer_len <= 0) || (dxfer_dir == SG_DXFER_NONE)) return 0; if (sg_allow_dio && hp->flags & SG_FLAG_DIRECT_IO && dxfer_dir != SG_DXFER_UNKNOWN && !iov_count && !sfp->parentdp->device->host->unchecked_isa_dma && blk_rq_aligned(q, (unsigned long)hp->dxferp, dxfer_len)) md = NULL; else md = &map_data; if (md) { if (!sg_res_in_use(sfp) && dxfer_len <= rsv_schp->bufflen) sg_link_reserve(sfp, srp, dxfer_len); else { res = sg_build_indirect(req_schp, sfp, dxfer_len); if (res) return res; } md->pages = req_schp->pages; md->page_order = req_schp->page_order; md->nr_entries = req_schp->k_use_sg; md->offset = 0; md->null_mapped = hp->dxferp ? 0 : 1; if (dxfer_dir == SG_DXFER_TO_FROM_DEV) md->from_user = 1; else md->from_user = 0; } if (iov_count) { int len, size = sizeof(struct sg_iovec) * iov_count; struct iovec *iov; iov = memdup_user(hp->dxferp, size); if (IS_ERR(iov)) return PTR_ERR(iov); len = iov_length(iov, iov_count); if (hp->dxfer_len < len) { iov_count = iov_shorten(iov, iov_count, hp->dxfer_len); len = hp->dxfer_len; } res = blk_rq_map_user_iov(q, rq, md, (struct sg_iovec *)iov, iov_count, len, GFP_ATOMIC); kfree(iov); } else res = blk_rq_map_user(q, rq, md, hp->dxferp, hp->dxfer_len, GFP_ATOMIC); if (!res) { srp->bio = rq->bio; if (!md) { req_schp->dio_in_use = 1; hp->info |= SG_INFO_DIRECT_IO; } } return res; } static int sg_finish_rem_req(Sg_request * srp) { int ret = 0; Sg_fd *sfp = srp->parentfp; Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, printk("sg_finish_rem_req: res_used=%d\n", (int) srp->res_used)); if (srp->rq) { if (srp->bio) ret = blk_rq_unmap_user(srp->bio); blk_put_request(srp->rq); } if (srp->res_used) sg_unlink_reserve(sfp, srp); else sg_remove_scat(req_schp); sg_remove_request(sfp, srp); return ret; } static int sg_build_sgat(Sg_scatter_hold * schp, const Sg_fd * sfp, int tablesize) { int sg_bufflen = tablesize * sizeof(struct page *); gfp_t gfp_flags = GFP_ATOMIC | __GFP_NOWARN; schp->pages = kzalloc(sg_bufflen, gfp_flags); if (!schp->pages) return -ENOMEM; schp->sglist_len = sg_bufflen; return tablesize; /* number of scat_gath elements allocated */ } static int sg_build_indirect(Sg_scatter_hold * schp, Sg_fd * sfp, int buff_size) { int ret_sz = 0, i, k, rem_sz, num, mx_sc_elems; int sg_tablesize = sfp->parentdp->sg_tablesize; int blk_size = buff_size, order; gfp_t gfp_mask = GFP_ATOMIC | __GFP_COMP | __GFP_NOWARN; if (blk_size < 0) return -EFAULT; if (0 == blk_size) ++blk_size; /* don't know why */ /* round request up to next highest SG_SECTOR_SZ byte boundary */ blk_size = ALIGN(blk_size, SG_SECTOR_SZ); SCSI_LOG_TIMEOUT(4, printk("sg_build_indirect: buff_size=%d, blk_size=%d\n", buff_size, blk_size)); /* N.B. ret_sz carried into this block ... */ mx_sc_elems = sg_build_sgat(schp, sfp, sg_tablesize); if (mx_sc_elems < 0) return mx_sc_elems; /* most likely -ENOMEM */ num = scatter_elem_sz; if (unlikely(num != scatter_elem_sz_prev)) { if (num < PAGE_SIZE) { scatter_elem_sz = PAGE_SIZE; scatter_elem_sz_prev = PAGE_SIZE; } else scatter_elem_sz_prev = num; } if (sfp->low_dma) gfp_mask |= GFP_DMA; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) gfp_mask |= __GFP_ZERO; order = get_order(num); retry: ret_sz = 1 << (PAGE_SHIFT + order); for (k = 0, rem_sz = blk_size; rem_sz > 0 && k < mx_sc_elems; k++, rem_sz -= ret_sz) { num = (rem_sz > scatter_elem_sz_prev) ? scatter_elem_sz_prev : rem_sz; schp->pages[k] = alloc_pages(gfp_mask, order); if (!schp->pages[k]) goto out; if (num == scatter_elem_sz_prev) { if (unlikely(ret_sz > scatter_elem_sz_prev)) { scatter_elem_sz = ret_sz; scatter_elem_sz_prev = ret_sz; } } SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k=%d, num=%d, " "ret_sz=%d\n", k, num, ret_sz)); } /* end of for loop */ schp->page_order = order; schp->k_use_sg = k; SCSI_LOG_TIMEOUT(5, printk("sg_build_indirect: k_use_sg=%d, " "rem_sz=%d\n", k, rem_sz)); schp->bufflen = blk_size; if (rem_sz > 0) /* must have failed */ return -ENOMEM; return 0; out: for (i = 0; i < k; i++) __free_pages(schp->pages[i], order); if (--order >= 0) goto retry; return -ENOMEM; } static void sg_remove_scat(Sg_scatter_hold * schp) { SCSI_LOG_TIMEOUT(4, printk("sg_remove_scat: k_use_sg=%d\n", schp->k_use_sg)); if (schp->pages && schp->sglist_len > 0) { if (!schp->dio_in_use) { int k; for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { SCSI_LOG_TIMEOUT(5, printk( "sg_remove_scat: k=%d, pg=0x%p\n", k, schp->pages[k])); __free_pages(schp->pages[k], schp->page_order); } kfree(schp->pages); } } memset(schp, 0, sizeof (*schp)); } static int sg_read_oxfer(Sg_request * srp, char __user *outp, int num_read_xfer) { Sg_scatter_hold *schp = &srp->data; int k, num; SCSI_LOG_TIMEOUT(4, printk("sg_read_oxfer: num_read_xfer=%d\n", num_read_xfer)); if ((!outp) || (num_read_xfer <= 0)) return 0; num = 1 << (PAGE_SHIFT + schp->page_order); for (k = 0; k < schp->k_use_sg && schp->pages[k]; k++) { if (num > num_read_xfer) { if (__copy_to_user(outp, page_address(schp->pages[k]), num_read_xfer)) return -EFAULT; break; } else { if (__copy_to_user(outp, page_address(schp->pages[k]), num)) return -EFAULT; num_read_xfer -= num; if (num_read_xfer <= 0) break; outp += num; } } return 0; } static void sg_build_reserve(Sg_fd * sfp, int req_size) { Sg_scatter_hold *schp = &sfp->reserve; SCSI_LOG_TIMEOUT(4, printk("sg_build_reserve: req_size=%d\n", req_size)); do { if (req_size < PAGE_SIZE) req_size = PAGE_SIZE; if (0 == sg_build_indirect(schp, sfp, req_size)) return; else sg_remove_scat(schp); req_size >>= 1; /* divide by 2 */ } while (req_size > (PAGE_SIZE / 2)); } static void sg_link_reserve(Sg_fd * sfp, Sg_request * srp, int size) { Sg_scatter_hold *req_schp = &srp->data; Sg_scatter_hold *rsv_schp = &sfp->reserve; int k, num, rem; srp->res_used = 1; SCSI_LOG_TIMEOUT(4, printk("sg_link_reserve: size=%d\n", size)); rem = size; num = 1 << (PAGE_SHIFT + rsv_schp->page_order); for (k = 0; k < rsv_schp->k_use_sg; k++) { if (rem <= num) { req_schp->k_use_sg = k + 1; req_schp->sglist_len = rsv_schp->sglist_len; req_schp->pages = rsv_schp->pages; req_schp->bufflen = size; req_schp->page_order = rsv_schp->page_order; break; } else rem -= num; } if (k >= rsv_schp->k_use_sg) SCSI_LOG_TIMEOUT(1, printk("sg_link_reserve: BAD size\n")); } static void sg_unlink_reserve(Sg_fd * sfp, Sg_request * srp) { Sg_scatter_hold *req_schp = &srp->data; SCSI_LOG_TIMEOUT(4, printk("sg_unlink_reserve: req->k_use_sg=%d\n", (int) req_schp->k_use_sg)); req_schp->k_use_sg = 0; req_schp->bufflen = 0; req_schp->pages = NULL; req_schp->page_order = 0; req_schp->sglist_len = 0; sfp->save_scat_len = 0; srp->res_used = 0; } static Sg_request * sg_get_rq_mark(Sg_fd * sfp, int pack_id) { Sg_request *resp; unsigned long iflags; write_lock_irqsave(&sfp->rq_list_lock, iflags); for (resp = sfp->headrp; resp; resp = resp->nextrp) { /* look for requests that are ready + not SG_IO owned */ if ((1 == resp->done) && (!resp->sg_io_owned) && ((-1 == pack_id) || (resp->header.pack_id == pack_id))) { resp->done = 2; /* guard against other readers */ break; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* always adds to end of list */ static Sg_request * sg_add_request(Sg_fd * sfp) { int k; unsigned long iflags; Sg_request *resp; Sg_request *rp = sfp->req_arr; write_lock_irqsave(&sfp->rq_list_lock, iflags); resp = sfp->headrp; if (!resp) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; resp = rp; sfp->headrp = resp; } else { if (0 == sfp->cmd_q) resp = NULL; /* command queuing disallowed */ else { for (k = 0; k < SG_MAX_QUEUE; ++k, ++rp) { if (!rp->parentfp) break; } if (k < SG_MAX_QUEUE) { memset(rp, 0, sizeof (Sg_request)); rp->parentfp = sfp; while (resp->nextrp) resp = resp->nextrp; resp->nextrp = rp; resp = rp; } else resp = NULL; } } if (resp) { resp->nextrp = NULL; resp->header.duration = jiffies_to_msecs(jiffies); } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return resp; } /* Return of 1 for found; 0 for not found */ static int sg_remove_request(Sg_fd * sfp, Sg_request * srp) { Sg_request *prev_rp; Sg_request *rp; unsigned long iflags; int res = 0; if ((!sfp) || (!srp) || (!sfp->headrp)) return res; write_lock_irqsave(&sfp->rq_list_lock, iflags); prev_rp = sfp->headrp; if (srp == prev_rp) { sfp->headrp = prev_rp->nextrp; prev_rp->parentfp = NULL; res = 1; } else { while ((rp = prev_rp->nextrp)) { if (srp == rp) { prev_rp->nextrp = rp->nextrp; rp->parentfp = NULL; res = 1; break; } prev_rp = rp; } } write_unlock_irqrestore(&sfp->rq_list_lock, iflags); return res; } static Sg_fd * sg_add_sfp(Sg_device * sdp, int dev) { Sg_fd *sfp; unsigned long iflags; int bufflen; sfp = kzalloc(sizeof(*sfp), GFP_ATOMIC | __GFP_NOWARN); if (!sfp) return NULL; init_waitqueue_head(&sfp->read_wait); rwlock_init(&sfp->rq_list_lock); kref_init(&sfp->f_ref); sfp->timeout = SG_DEFAULT_TIMEOUT; sfp->timeout_user = SG_DEFAULT_TIMEOUT_USER; sfp->force_packid = SG_DEF_FORCE_PACK_ID; sfp->low_dma = (SG_DEF_FORCE_LOW_DMA == 0) ? sdp->device->host->unchecked_isa_dma : 1; sfp->cmd_q = SG_DEF_COMMAND_Q; sfp->keep_orphan = SG_DEF_KEEP_ORPHAN; sfp->parentdp = sdp; write_lock_irqsave(&sg_index_lock, iflags); list_add_tail(&sfp->sfd_siblings, &sdp->sfds); write_unlock_irqrestore(&sg_index_lock, iflags); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: sfp=0x%p\n", sfp)); if (unlikely(sg_big_buff != def_reserved_size)) sg_big_buff = def_reserved_size; bufflen = min_t(int, sg_big_buff, queue_max_sectors(sdp->device->request_queue) * 512); sg_build_reserve(sfp, bufflen); SCSI_LOG_TIMEOUT(3, printk("sg_add_sfp: bufflen=%d, k_use_sg=%d\n", sfp->reserve.bufflen, sfp->reserve.k_use_sg)); kref_get(&sdp->d_ref); __module_get(THIS_MODULE); return sfp; } static void sg_remove_sfp_usercontext(struct work_struct *work) { struct sg_fd *sfp = container_of(work, struct sg_fd, ew.work); struct sg_device *sdp = sfp->parentdp; /* Cleanup any responses which were never read(). */ while (sfp->headrp) sg_finish_rem_req(sfp->headrp); if (sfp->reserve.bufflen > 0) { SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: bufflen=%d, k_use_sg=%d\n", (int) sfp->reserve.bufflen, (int) sfp->reserve.k_use_sg)); sg_remove_scat(&sfp->reserve); } SCSI_LOG_TIMEOUT(6, printk("sg_remove_sfp: %s, sfp=0x%p\n", sdp->disk->disk_name, sfp)); kfree(sfp); scsi_device_put(sdp->device); sg_put_dev(sdp); module_put(THIS_MODULE); } static void sg_remove_sfp(struct kref *kref) { struct sg_fd *sfp = container_of(kref, struct sg_fd, f_ref); struct sg_device *sdp = sfp->parentdp; unsigned long iflags; write_lock_irqsave(&sg_index_lock, iflags); list_del(&sfp->sfd_siblings); write_unlock_irqrestore(&sg_index_lock, iflags); wake_up_interruptible(&sdp->o_excl_wait); INIT_WORK(&sfp->ew.work, sg_remove_sfp_usercontext); schedule_work(&sfp->ew.work); } static int sg_res_in_use(Sg_fd * sfp) { const Sg_request *srp; unsigned long iflags; read_lock_irqsave(&sfp->rq_list_lock, iflags); for (srp = sfp->headrp; srp; srp = srp->nextrp) if (srp->res_used) break; read_unlock_irqrestore(&sfp->rq_list_lock, iflags); return srp ? 1 : 0; } #ifdef CONFIG_SCSI_PROC_FS static int sg_idr_max_id(int id, void *p, void *data) { int *k = data; if (*k < id) *k = id; return 0; } static int sg_last_dev(void) { int k = -1; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); idr_for_each(&sg_index_idr, sg_idr_max_id, &k); read_unlock_irqrestore(&sg_index_lock, iflags); return k + 1; /* origin 1 */ } #endif /* must be called with sg_index_lock held */ static Sg_device *sg_lookup_dev(int dev) { return idr_find(&sg_index_idr, dev); } static Sg_device *sg_get_dev(int dev) { struct sg_device *sdp; unsigned long flags; read_lock_irqsave(&sg_index_lock, flags); sdp = sg_lookup_dev(dev); if (!sdp) sdp = ERR_PTR(-ENXIO); else if (sdp->detached) { /* If sdp->detached, then the refcount may already be 0, in * which case it would be a bug to do kref_get(). */ sdp = ERR_PTR(-ENODEV); } else kref_get(&sdp->d_ref); read_unlock_irqrestore(&sg_index_lock, flags); return sdp; } static void sg_put_dev(struct sg_device *sdp) { kref_put(&sdp->d_ref, sg_device_destroy); } #ifdef CONFIG_SCSI_PROC_FS static struct proc_dir_entry *sg_proc_sgp = NULL; static char sg_proc_sg_dirname[] = "scsi/sg"; static int sg_proc_seq_show_int(struct seq_file *s, void *v); static int sg_proc_single_open_adio(struct inode *inode, struct file *file); static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations adio_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_adio, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_adio, .release = single_release, }; static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off); static const struct file_operations dressz_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_dressz, .read = seq_read, .llseek = seq_lseek, .write = sg_proc_write_dressz, .release = single_release, }; static int sg_proc_seq_show_version(struct seq_file *s, void *v); static int sg_proc_single_open_version(struct inode *inode, struct file *file); static const struct file_operations version_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_version, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); static const struct file_operations devhdr_fops = { .owner = THIS_MODULE, .open = sg_proc_single_open_devhdr, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int sg_proc_seq_show_dev(struct seq_file *s, void *v); static int sg_proc_open_dev(struct inode *inode, struct file *file); static void * dev_seq_start(struct seq_file *s, loff_t *pos); static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); static void dev_seq_stop(struct seq_file *s, void *v); static const struct file_operations dev_fops = { .owner = THIS_MODULE, .open = sg_proc_open_dev, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations dev_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_dev, }; static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); static int sg_proc_open_devstrs(struct inode *inode, struct file *file); static const struct file_operations devstrs_fops = { .owner = THIS_MODULE, .open = sg_proc_open_devstrs, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations devstrs_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_devstrs, }; static int sg_proc_seq_show_debug(struct seq_file *s, void *v); static int sg_proc_open_debug(struct inode *inode, struct file *file); static const struct file_operations debug_fops = { .owner = THIS_MODULE, .open = sg_proc_open_debug, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static const struct seq_operations debug_seq_ops = { .start = dev_seq_start, .next = dev_seq_next, .stop = dev_seq_stop, .show = sg_proc_seq_show_debug, }; struct sg_proc_leaf { const char * name; const struct file_operations * fops; }; static const struct sg_proc_leaf sg_proc_leaf_arr[] = { {"allow_dio", &adio_fops}, {"debug", &debug_fops}, {"def_reserved_size", &dressz_fops}, {"device_hdr", &devhdr_fops}, {"devices", &dev_fops}, {"device_strs", &devstrs_fops}, {"version", &version_fops} }; static int sg_proc_init(void) { int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); int k; sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); if (!sg_proc_sgp) return 1; for (k = 0; k < num_leaves; ++k) { const struct sg_proc_leaf *leaf = &sg_proc_leaf_arr[k]; umode_t mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); } return 0; } static void sg_proc_cleanup(void) { int k; int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); if (!sg_proc_sgp) return; for (k = 0; k < num_leaves; ++k) remove_proc_entry(sg_proc_leaf_arr[k].name, sg_proc_sgp); remove_proc_entry(sg_proc_sg_dirname, NULL); } static int sg_proc_seq_show_int(struct seq_file *s, void *v) { seq_printf(s, "%d\n", *((int *)s->private)); return 0; } static int sg_proc_single_open_adio(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_allow_dio); } static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long num; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &num); if (err) return err; sg_allow_dio = num ? 1 : 0; return count; } static int sg_proc_single_open_dressz(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_int, &sg_big_buff); } static ssize_t sg_proc_write_dressz(struct file *filp, const char __user *buffer, size_t count, loff_t *off) { int err; unsigned long k = ULONG_MAX; if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SYS_RAWIO)) return -EACCES; err = kstrtoul_from_user(buffer, count, 0, &k); if (err) return err; if (k <= 1048576) { /* limit "big buff" to 1 MB */ sg_big_buff = k; return count; } return -ERANGE; } static int sg_proc_seq_show_version(struct seq_file *s, void *v) { seq_printf(s, "%d\t%s [%s]\n", sg_version_num, SG_VERSION_STR, sg_version_date); return 0; } static int sg_proc_single_open_version(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_version, NULL); } static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v) { seq_printf(s, "host\tchan\tid\tlun\ttype\topens\tqdepth\tbusy\t" "online\n"); return 0; } static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file) { return single_open(file, sg_proc_seq_show_devhdr, NULL); } struct sg_proc_deviter { loff_t index; size_t max; }; static void * dev_seq_start(struct seq_file *s, loff_t *pos) { struct sg_proc_deviter * it = kmalloc(sizeof(*it), GFP_KERNEL); s->private = it; if (! it) return NULL; it->index = *pos; it->max = sg_last_dev(); if (it->index >= it->max) return NULL; return it; } static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos) { struct sg_proc_deviter * it = s->private; *pos = ++it->index; return (it->index < it->max) ? it : NULL; } static void dev_seq_stop(struct seq_file *s, void *v) { kfree(s->private); } static int sg_proc_open_dev(struct inode *inode, struct file *file) { return seq_open(file, &dev_seq_ops); } static int sg_proc_seq_show_dev(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (sdp && (scsidp = sdp->device) && (!sdp->detached)) seq_printf(s, "%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\n", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, (int) scsidp->type, 1, (int) scsidp->queue_depth, (int) scsidp->device_busy, (int) scsi_device_online(scsidp)); else seq_printf(s, "-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\t-1\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } static int sg_proc_open_devstrs(struct inode *inode, struct file *file) { return seq_open(file, &devstrs_seq_ops); } static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; struct scsi_device *scsidp; unsigned long iflags; read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (sdp && (scsidp = sdp->device) && (!sdp->detached)) seq_printf(s, "%8.8s\t%16.16s\t%4.4s\n", scsidp->vendor, scsidp->model, scsidp->rev); else seq_printf(s, "<no active device>\n"); read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } /* must be called while holding sg_index_lock */ static void sg_proc_debug_helper(struct seq_file *s, Sg_device * sdp) { int k, m, new_interface, blen, usg; Sg_request *srp; Sg_fd *fp; const sg_io_hdr_t *hp; const char * cp; unsigned int ms; k = 0; list_for_each_entry(fp, &sdp->sfds, sfd_siblings) { k++; read_lock(&fp->rq_list_lock); /* irqs already disabled */ seq_printf(s, " FD(%d): timeout=%dms bufflen=%d " "(res)sgat=%d low_dma=%d\n", k, jiffies_to_msecs(fp->timeout), fp->reserve.bufflen, (int) fp->reserve.k_use_sg, (int) fp->low_dma); seq_printf(s, " cmd_q=%d f_packid=%d k_orphan=%d closed=0\n", (int) fp->cmd_q, (int) fp->force_packid, (int) fp->keep_orphan); for (m = 0, srp = fp->headrp; srp != NULL; ++m, srp = srp->nextrp) { hp = &srp->header; new_interface = (hp->interface_id == '\0') ? 0 : 1; if (srp->res_used) { if (new_interface && (SG_FLAG_MMAP_IO & hp->flags)) cp = " mmap>> "; else cp = " rb>> "; } else { if (SG_INFO_DIRECT_IO_MASK & hp->info) cp = " dio>> "; else cp = " "; } seq_printf(s, cp); blen = srp->data.bufflen; usg = srp->data.k_use_sg; seq_printf(s, srp->done ? ((1 == srp->done) ? "rcv:" : "fin:") : "act:"); seq_printf(s, " id=%d blen=%d", srp->header.pack_id, blen); if (srp->done) seq_printf(s, " dur=%d", hp->duration); else { ms = jiffies_to_msecs(jiffies); seq_printf(s, " t_o/elap=%d/%d", (new_interface ? hp->timeout : jiffies_to_msecs(fp->timeout)), (ms > hp->duration ? ms - hp->duration : 0)); } seq_printf(s, "ms sgat=%d op=0x%02x\n", usg, (int) srp->data.cmd_opcode); } if (0 == m) seq_printf(s, " No requests active\n"); read_unlock(&fp->rq_list_lock); } } static int sg_proc_open_debug(struct inode *inode, struct file *file) { return seq_open(file, &debug_seq_ops); } static int sg_proc_seq_show_debug(struct seq_file *s, void *v) { struct sg_proc_deviter * it = (struct sg_proc_deviter *) v; Sg_device *sdp; unsigned long iflags; if (it && (0 == it->index)) { seq_printf(s, "max_active_device=%d(origin 1)\n", (int)it->max); seq_printf(s, " def_reserved_size=%d\n", sg_big_buff); } read_lock_irqsave(&sg_index_lock, iflags); sdp = it ? sg_lookup_dev(it->index) : NULL; if (sdp && !list_empty(&sdp->sfds)) { struct scsi_device *scsidp = sdp->device; seq_printf(s, " >>> device=%s ", sdp->disk->disk_name); if (sdp->detached) seq_printf(s, "detached pending close "); else seq_printf (s, "scsi%d chan=%d id=%d lun=%d em=%d", scsidp->host->host_no, scsidp->channel, scsidp->id, scsidp->lun, scsidp->host->hostt->emulated); seq_printf(s, " sg_tablesize=%d excl=%d\n", sdp->sg_tablesize, get_exclude(sdp)); sg_proc_debug_helper(s, sdp); } read_unlock_irqrestore(&sg_index_lock, iflags); return 0; } #endif /* CONFIG_SCSI_PROC_FS */ module_init(init_sg); module_exit(exit_sg);
gpl-2.0
myjang0507/Polaris-slte-
drivers/acpi/acpica/evgpe.c
2078
22970
/****************************************************************************** * * Module Name: evgpe - General Purpose Event handling and dispatch * *****************************************************************************/ /* * Copyright (C) 2000 - 2013, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acevents.h" #include "acnamesp.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evgpe") #if (!ACPI_REDUCED_HARDWARE) /* Entire module */ /* Local prototypes */ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context); static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context); /******************************************************************************* * * FUNCTION: acpi_ev_update_gpe_enable_mask * * PARAMETERS: gpe_event_info - GPE to update * * RETURN: Status * * DESCRIPTION: Updates GPE register enable mask based upon whether there are * runtime references to this GPE * ******************************************************************************/ acpi_status acpi_ev_update_gpe_enable_mask(struct acpi_gpe_event_info *gpe_event_info) { struct acpi_gpe_register_info *gpe_register_info; u32 register_bit; ACPI_FUNCTION_TRACE(ev_update_gpe_enable_mask); gpe_register_info = gpe_event_info->register_info; if (!gpe_register_info) { return_ACPI_STATUS(AE_NOT_EXIST); } register_bit = acpi_hw_get_gpe_register_bit(gpe_event_info); /* Clear the run bit up front */ ACPI_CLEAR_BIT(gpe_register_info->enable_for_run, register_bit); /* Set the mask bit only if there are references to this GPE */ if (gpe_event_info->runtime_count) { ACPI_SET_BIT(gpe_register_info->enable_for_run, (u8)register_bit); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_enable_gpe * * PARAMETERS: gpe_event_info - GPE to enable * * RETURN: Status * * DESCRIPTION: Clear a GPE of stale events and enable it. * ******************************************************************************/ acpi_status acpi_ev_enable_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; ACPI_FUNCTION_TRACE(ev_enable_gpe); /* * We will only allow a GPE to be enabled if it has either an associated * method (_Lxx/_Exx) or a handler, or is using the implicit notify * feature. Otherwise, the GPE will be immediately disabled by * acpi_ev_gpe_dispatch the first time it fires. */ if ((gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) == ACPI_GPE_DISPATCH_NONE) { return_ACPI_STATUS(AE_NO_HANDLER); } /* Clear the GPE (of stale events) */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Enable the requested GPE */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_ENABLE); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_add_gpe_reference * * PARAMETERS: gpe_event_info - Add a reference to this GPE * * RETURN: Status * * DESCRIPTION: Add a reference to a GPE. On the first reference, the GPE is * hardware-enabled. * ******************************************************************************/ acpi_status acpi_ev_add_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_add_gpe_reference); if (gpe_event_info->runtime_count == ACPI_UINT8_MAX) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count++; if (gpe_event_info->runtime_count == 1) { /* Enable on first reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_ev_enable_gpe(gpe_event_info); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count--; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_remove_gpe_reference * * PARAMETERS: gpe_event_info - Remove a reference to this GPE * * RETURN: Status * * DESCRIPTION: Remove a reference to a GPE. When the last reference is * removed, the GPE is hardware-disabled. * ******************************************************************************/ acpi_status acpi_ev_remove_gpe_reference(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status = AE_OK; ACPI_FUNCTION_TRACE(ev_remove_gpe_reference); if (!gpe_event_info->runtime_count) { return_ACPI_STATUS(AE_LIMIT); } gpe_event_info->runtime_count--; if (!gpe_event_info->runtime_count) { /* Disable on last reference */ status = acpi_ev_update_gpe_enable_mask(gpe_event_info); if (ACPI_SUCCESS(status)) { status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); } if (ACPI_FAILURE(status)) { gpe_event_info->runtime_count++; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ev_low_get_gpe_info * * PARAMETERS: gpe_number - Raw GPE number * gpe_block - A GPE info block * * RETURN: A GPE event_info struct. NULL if not a valid GPE (The gpe_number * is not within the specified GPE block) * * DESCRIPTION: Returns the event_info struct associated with this GPE. This is * the low-level implementation of ev_get_gpe_event_info. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_low_get_gpe_info(u32 gpe_number, struct acpi_gpe_block_info *gpe_block) { u32 gpe_index; /* * Validate that the gpe_number is within the specified gpe_block. * (Two steps) */ if (!gpe_block || (gpe_number < gpe_block->block_base_number)) { return (NULL); } gpe_index = gpe_number - gpe_block->block_base_number; if (gpe_index >= gpe_block->gpe_count) { return (NULL); } return (&gpe_block->event_info[gpe_index]); } /******************************************************************************* * * FUNCTION: acpi_ev_get_gpe_event_info * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_number - Raw GPE number * * RETURN: A GPE event_info struct. NULL if not a valid GPE * * DESCRIPTION: Returns the event_info struct associated with this GPE. * Validates the gpe_block and the gpe_number * * Should be called only when the GPE lists are semaphore locked * and not subject to change. * ******************************************************************************/ struct acpi_gpe_event_info *acpi_ev_get_gpe_event_info(acpi_handle gpe_device, u32 gpe_number) { union acpi_operand_object *obj_desc; struct acpi_gpe_event_info *gpe_info; u32 i; ACPI_FUNCTION_ENTRY(); /* A NULL gpe_device means use the FADT-defined GPE block(s) */ if (!gpe_device) { /* Examine GPE Block 0 and 1 (These blocks are permanent) */ for (i = 0; i < ACPI_MAX_GPE_BLOCKS; i++) { gpe_info = acpi_ev_low_get_gpe_info(gpe_number, acpi_gbl_gpe_fadt_blocks [i]); if (gpe_info) { return (gpe_info); } } /* The gpe_number was not in the range of either FADT GPE block */ return (NULL); } /* A Non-NULL gpe_device means this is a GPE Block Device */ obj_desc = acpi_ns_get_attached_object((struct acpi_namespace_node *) gpe_device); if (!obj_desc || !obj_desc->device.gpe_block) { return (NULL); } return (acpi_ev_low_get_gpe_info (gpe_number, obj_desc->device.gpe_block)); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_detect * * PARAMETERS: gpe_xrupt_list - Interrupt block for this interrupt. * Can have multiple GPE blocks attached. * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Detect if any GP events have occurred. This function is * executed at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_detect(struct acpi_gpe_xrupt_info * gpe_xrupt_list) { acpi_status status; struct acpi_gpe_block_info *gpe_block; struct acpi_gpe_register_info *gpe_register_info; u32 int_status = ACPI_INTERRUPT_NOT_HANDLED; u8 enabled_status_byte; u32 status_reg; u32 enable_reg; acpi_cpu_flags flags; u32 i; u32 j; ACPI_FUNCTION_NAME(ev_gpe_detect); /* Check for the case where there are no GPEs */ if (!gpe_xrupt_list) { return (int_status); } /* * We need to obtain the GPE lock for both the data structs and registers * Note: Not necessary to obtain the hardware lock, since the GPE * registers are owned by the gpe_lock. */ flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock); /* Examine all GPE blocks attached to this interrupt level */ gpe_block = gpe_xrupt_list->gpe_block_list_head; while (gpe_block) { /* * Read all of the 8-bit GPE status and enable registers in this GPE * block, saving all of them. Find all currently active GP events. */ for (i = 0; i < gpe_block->register_count; i++) { /* Get the next status/enable pair */ gpe_register_info = &gpe_block->register_info[i]; /* * Optimization: If there are no GPEs enabled within this * register, we can safely ignore the entire register. */ if (!(gpe_register_info->enable_for_run | gpe_register_info->enable_for_wake)) { ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Ignore disabled registers for GPE%02X-GPE%02X: " "RunEnable=%02X, WakeEnable=%02X\n", gpe_register_info-> base_gpe_number, gpe_register_info-> base_gpe_number + (ACPI_GPE_REGISTER_WIDTH - 1), gpe_register_info-> enable_for_run, gpe_register_info-> enable_for_wake)); continue; } /* Read the Status Register */ status = acpi_hw_read(&status_reg, &gpe_register_info->status_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* Read the Enable Register */ status = acpi_hw_read(&enable_reg, &gpe_register_info->enable_address); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } ACPI_DEBUG_PRINT((ACPI_DB_INTERRUPTS, "Read registers for GPE%02X-GPE%02X: Status=%02X, Enable=%02X, " "RunEnable=%02X, WakeEnable=%02X\n", gpe_register_info->base_gpe_number, gpe_register_info->base_gpe_number + (ACPI_GPE_REGISTER_WIDTH - 1), status_reg, enable_reg, gpe_register_info->enable_for_run, gpe_register_info->enable_for_wake)); /* Check if there is anything active at all in this register */ enabled_status_byte = (u8) (status_reg & enable_reg); if (!enabled_status_byte) { /* No active GPEs in this register, move on */ continue; } /* Now look at the individual GPEs in this byte register */ for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) { /* Examine one GPE bit */ if (enabled_status_byte & (1 << j)) { /* * Found an active GPE. Dispatch the event to a handler * or method. */ int_status |= acpi_ev_gpe_dispatch(gpe_block-> node, &gpe_block-> event_info[((acpi_size) i * ACPI_GPE_REGISTER_WIDTH) + j], j + gpe_register_info->base_gpe_number); } } } gpe_block = gpe_block->next; } unlock_and_exit: acpi_os_release_lock(acpi_gbl_gpe_lock, flags); return (int_status); } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_execute_gpe_method * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * * RETURN: None * * DESCRIPTION: Perform the actual execution of a GPE control method. This * function is called from an invocation of acpi_os_execute and * therefore does NOT execute at interrupt level - so that * the control method itself is not executed in the context of * an interrupt handler. * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_execute_gpe_method(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; acpi_status status; struct acpi_gpe_event_info *local_gpe_event_info; struct acpi_evaluate_info *info; struct acpi_gpe_notify_info *notify; ACPI_FUNCTION_TRACE(ev_asynch_execute_gpe_method); /* Allocate a local GPE block */ local_gpe_event_info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_event_info)); if (!local_gpe_event_info) { ACPI_EXCEPTION((AE_INFO, AE_NO_MEMORY, "while handling a GPE")); return_VOID; } status = acpi_ut_acquire_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); return_VOID; } /* Must revalidate the gpe_number/gpe_block */ if (!acpi_ev_valid_gpe_event(gpe_event_info)) { status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); ACPI_FREE(local_gpe_event_info); return_VOID; } /* * Take a snapshot of the GPE info for this level - we copy the info to * prevent a race condition with remove_handler/remove_block. */ ACPI_MEMCPY(local_gpe_event_info, gpe_event_info, sizeof(struct acpi_gpe_event_info)); status = acpi_ut_release_mutex(ACPI_MTX_EVENTS); if (ACPI_FAILURE(status)) { return_VOID; } /* Do the correct dispatch - normal method or implicit notify */ switch (local_gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_NOTIFY: /* * Implicit notify. * Dispatch a DEVICE_WAKE notify to the appropriate handler. * NOTE: the request is queued for execution after this method * completes. The notify handlers are NOT invoked synchronously * from this thread -- because handlers may in turn run other * control methods. * * June 2012: Expand implicit notify mechanism to support * notifies on multiple device objects. */ notify = local_gpe_event_info->dispatch.notify_list; while (ACPI_SUCCESS(status) && notify) { status = acpi_ev_queue_notify_request(notify->device_node, ACPI_NOTIFY_DEVICE_WAKE); notify = notify->next; } break; case ACPI_GPE_DISPATCH_METHOD: /* Allocate the evaluation information block */ info = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_evaluate_info)); if (!info) { status = AE_NO_MEMORY; } else { /* * Invoke the GPE Method (_Lxx, _Exx) i.e., evaluate the * _Lxx/_Exx control method that corresponds to this GPE */ info->prefix_node = local_gpe_event_info->dispatch.method_node; info->flags = ACPI_IGNORE_RETURN_VALUE; status = acpi_ns_evaluate(info); ACPI_FREE(info); } if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "while evaluating GPE method [%4.4s]", acpi_ut_get_node_name (local_gpe_event_info->dispatch. method_node))); } break; default: return_VOID; /* Should never happen */ } /* Defer enabling of GPE until all notify handlers are done */ status = acpi_os_execute(OSL_NOTIFY_HANDLER, acpi_ev_asynch_enable_gpe, local_gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_FREE(local_gpe_event_info); } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ev_asynch_enable_gpe * * PARAMETERS: Context (gpe_event_info) - Info for this GPE * Callback from acpi_os_execute * * RETURN: None * * DESCRIPTION: Asynchronous clear/enable for GPE. This allows the GPE to * complete (i.e., finish execution of Notify) * ******************************************************************************/ static void ACPI_SYSTEM_XFACE acpi_ev_asynch_enable_gpe(void *context) { struct acpi_gpe_event_info *gpe_event_info = context; (void)acpi_ev_finish_gpe(gpe_event_info); ACPI_FREE(gpe_event_info); return; } /******************************************************************************* * * FUNCTION: acpi_ev_finish_gpe * * PARAMETERS: gpe_event_info - Info for this GPE * * RETURN: Status * * DESCRIPTION: Clear/Enable a GPE. Common code that is used after execution * of a GPE method or a synchronous or asynchronous GPE handler. * ******************************************************************************/ acpi_status acpi_ev_finish_gpe(struct acpi_gpe_event_info *gpe_event_info) { acpi_status status; if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_LEVEL_TRIGGERED) { /* * GPE is level-triggered, we clear the GPE status bit after * handling the event. */ status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { return (status); } } /* * Enable this GPE, conditionally. This means that the GPE will * only be physically enabled if the enable_for_run bit is set * in the event_info. */ (void)acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_CONDITIONAL_ENABLE); return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ev_gpe_dispatch * * PARAMETERS: gpe_device - Device node. NULL for GPE0/GPE1 * gpe_event_info - Info for this GPE * gpe_number - Number relative to the parent GPE block * * RETURN: INTERRUPT_HANDLED or INTERRUPT_NOT_HANDLED * * DESCRIPTION: Dispatch a General Purpose Event to either a function (e.g. EC) * or method (e.g. _Lxx/_Exx) handler. * * This function executes at interrupt level. * ******************************************************************************/ u32 acpi_ev_gpe_dispatch(struct acpi_namespace_node *gpe_device, struct acpi_gpe_event_info *gpe_event_info, u32 gpe_number) { acpi_status status; u32 return_value; ACPI_FUNCTION_TRACE(ev_gpe_dispatch); /* Invoke global event handler if present */ acpi_gpe_count++; if (acpi_gbl_global_event_handler) { acpi_gbl_global_event_handler(ACPI_EVENT_TYPE_GPE, gpe_device, gpe_number, acpi_gbl_global_event_handler_context); } /* * If edge-triggered, clear the GPE status bit now. Note that * level-triggered events are cleared after the GPE is serviced. */ if ((gpe_event_info->flags & ACPI_GPE_XRUPT_TYPE_MASK) == ACPI_GPE_EDGE_TRIGGERED) { status = acpi_hw_clear_gpe(gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to clear GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } } /* * Always disable the GPE so that it does not keep firing before * any asynchronous activity completes (either from the execution * of a GPE method or an asynchronous GPE handler.) * * If there is no handler or method to run, just disable the * GPE and leave it disabled permanently to prevent further such * pointless events from firing. */ status = acpi_hw_low_set_gpe(gpe_event_info, ACPI_GPE_DISABLE); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to disable GPE%02X", gpe_number)); return_UINT32(ACPI_INTERRUPT_NOT_HANDLED); } /* * Dispatch the GPE to either an installed handler or the control * method associated with this GPE (_Lxx or _Exx). If a handler * exists, we invoke it and do not attempt to run the method. * If there is neither a handler nor a method, leave the GPE * disabled. */ switch (gpe_event_info->flags & ACPI_GPE_DISPATCH_MASK) { case ACPI_GPE_DISPATCH_HANDLER: /* Invoke the installed handler (at interrupt level) */ return_value = gpe_event_info->dispatch.handler->address(gpe_device, gpe_number, gpe_event_info-> dispatch.handler-> context); /* If requested, clear (if level-triggered) and reenable the GPE */ if (return_value & ACPI_REENABLE_GPE) { (void)acpi_ev_finish_gpe(gpe_event_info); } break; case ACPI_GPE_DISPATCH_METHOD: case ACPI_GPE_DISPATCH_NOTIFY: /* * Execute the method associated with the GPE * NOTE: Level-triggered GPEs are cleared after the method completes. */ status = acpi_os_execute(OSL_GPE_HANDLER, acpi_ev_asynch_execute_gpe_method, gpe_event_info); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Unable to queue handler for GPE%02X - event disabled", gpe_number)); } break; default: /* * No handler or method to run! * 03/2010: This case should no longer be possible. We will not allow * a GPE to be enabled if it has no handler or method. */ ACPI_ERROR((AE_INFO, "No handler or method for GPE%02X, disabling event", gpe_number)); break; } return_UINT32(ACPI_INTERRUPT_HANDLED); } #endif /* !ACPI_REDUCED_HARDWARE */
gpl-2.0
PerthCharles/tcpcomment
linux-3.10/drivers/net/ethernet/emulex/benet/be_ethtool.c
2078
30198
/* * Copyright (C) 2005 - 2013 Emulex * All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. The full GNU General * Public License is included in this distribution in the file called COPYING. * * Contact Information: * linux-drivers@emulex.com * * Emulex * 3333 Susan Street * Costa Mesa, CA 92626 */ #include "be.h" #include "be_cmds.h" #include <linux/ethtool.h> struct be_ethtool_stat { char desc[ETH_GSTRING_LEN]; int type; int size; int offset; }; enum {DRVSTAT_TX, DRVSTAT_RX, DRVSTAT}; #define FIELDINFO(_struct, field) FIELD_SIZEOF(_struct, field), \ offsetof(_struct, field) #define DRVSTAT_TX_INFO(field) #field, DRVSTAT_TX,\ FIELDINFO(struct be_tx_stats, field) #define DRVSTAT_RX_INFO(field) #field, DRVSTAT_RX,\ FIELDINFO(struct be_rx_stats, field) #define DRVSTAT_INFO(field) #field, DRVSTAT,\ FIELDINFO(struct be_drv_stats, field) static const struct be_ethtool_stat et_stats[] = { {DRVSTAT_INFO(rx_crc_errors)}, {DRVSTAT_INFO(rx_alignment_symbol_errors)}, {DRVSTAT_INFO(rx_pause_frames)}, {DRVSTAT_INFO(rx_control_frames)}, /* Received packets dropped when the Ethernet length field * is not equal to the actual Ethernet data length. */ {DRVSTAT_INFO(rx_in_range_errors)}, /* Received packets dropped when their length field is >= 1501 bytes * and <= 1535 bytes. */ {DRVSTAT_INFO(rx_out_range_errors)}, /* Received packets dropped when they are longer than 9216 bytes */ {DRVSTAT_INFO(rx_frame_too_long)}, /* Received packets dropped when they don't pass the unicast or * multicast address filtering. */ {DRVSTAT_INFO(rx_address_filtered)}, /* Received packets dropped when IP packet length field is less than * the IP header length field. */ {DRVSTAT_INFO(rx_dropped_too_small)}, /* Received packets dropped when IP length field is greater than * the actual packet length. */ {DRVSTAT_INFO(rx_dropped_too_short)}, /* Received packets dropped when the IP header length field is less * than 5. */ {DRVSTAT_INFO(rx_dropped_header_too_small)}, /* Received packets dropped when the TCP header length field is less * than 5 or the TCP header length + IP header length is more * than IP packet length. */ {DRVSTAT_INFO(rx_dropped_tcp_length)}, {DRVSTAT_INFO(rx_dropped_runt)}, /* Number of received packets dropped when a fifo for descriptors going * into the packet demux block overflows. In normal operation, this * fifo must never overflow. */ {DRVSTAT_INFO(rxpp_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_input_fifo_overflow_drop)}, {DRVSTAT_INFO(rx_ip_checksum_errs)}, {DRVSTAT_INFO(rx_tcp_checksum_errs)}, {DRVSTAT_INFO(rx_udp_checksum_errs)}, {DRVSTAT_INFO(tx_pauseframes)}, {DRVSTAT_INFO(tx_controlframes)}, {DRVSTAT_INFO(rx_priority_pause_frames)}, {DRVSTAT_INFO(tx_priority_pauseframes)}, /* Received packets dropped when an internal fifo going into * main packet buffer tank (PMEM) overflows. */ {DRVSTAT_INFO(pmem_fifo_overflow_drop)}, {DRVSTAT_INFO(jabber_events)}, /* Received packets dropped due to lack of available HW packet buffers * used to temporarily hold the received packets. */ {DRVSTAT_INFO(rx_drops_no_pbuf)}, /* Received packets dropped due to input receive buffer * descriptor fifo overflowing. */ {DRVSTAT_INFO(rx_drops_no_erx_descr)}, /* Packets dropped because the internal FIFO to the offloaded TCP * receive processing block is full. This could happen only for * offloaded iSCSI or FCoE trarffic. */ {DRVSTAT_INFO(rx_drops_no_tpre_descr)}, /* Received packets dropped when they need more than 8 * receive buffers. This cannot happen as the driver configures * 2048 byte receive buffers. */ {DRVSTAT_INFO(rx_drops_too_many_frags)}, {DRVSTAT_INFO(forwarded_packets)}, /* Received packets dropped when the frame length * is more than 9018 bytes */ {DRVSTAT_INFO(rx_drops_mtu)}, /* Number of packets dropped due to random early drop function */ {DRVSTAT_INFO(eth_red_drops)}, {DRVSTAT_INFO(be_on_die_temperature)} }; #define ETHTOOL_STATS_NUM ARRAY_SIZE(et_stats) /* Stats related to multi RX queues: get_stats routine assumes bytes, pkts * are first and second members respectively. */ static const struct be_ethtool_stat et_rx_stats[] = { {DRVSTAT_RX_INFO(rx_bytes)},/* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_pkts)}, /* If moving this member see above note */ {DRVSTAT_RX_INFO(rx_compl)}, {DRVSTAT_RX_INFO(rx_mcast_pkts)}, /* Number of page allocation failures while posting receive buffers * to HW. */ {DRVSTAT_RX_INFO(rx_post_fail)}, /* Recevied packets dropped due to skb allocation failure */ {DRVSTAT_RX_INFO(rx_drops_no_skbs)}, /* Received packets dropped due to lack of available fetched buffers * posted by the driver. */ {DRVSTAT_RX_INFO(rx_drops_no_frags)} }; #define ETHTOOL_RXSTATS_NUM (ARRAY_SIZE(et_rx_stats)) /* Stats related to multi TX queues: get_stats routine assumes compl is the * first member */ static const struct be_ethtool_stat et_tx_stats[] = { {DRVSTAT_TX_INFO(tx_compl)}, /* If moving this member see above note */ {DRVSTAT_TX_INFO(tx_bytes)}, {DRVSTAT_TX_INFO(tx_pkts)}, /* Number of skbs queued for trasmission by the driver */ {DRVSTAT_TX_INFO(tx_reqs)}, /* Number of TX work request blocks DMAed to HW */ {DRVSTAT_TX_INFO(tx_wrbs)}, /* Number of times the TX queue was stopped due to lack * of spaces in the TXQ. */ {DRVSTAT_TX_INFO(tx_stops)} }; #define ETHTOOL_TXSTATS_NUM (ARRAY_SIZE(et_tx_stats)) static const char et_self_tests[][ETH_GSTRING_LEN] = { "MAC Loopback test", "PHY Loopback test", "External Loopback test", "DDR DMA test", "Link test" }; #define ETHTOOL_TESTS_NUM ARRAY_SIZE(et_self_tests) #define BE_MAC_LOOPBACK 0x0 #define BE_PHY_LOOPBACK 0x1 #define BE_ONE_PORT_EXT_LOOPBACK 0x2 #define BE_NO_LOOPBACK 0xff static void be_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo) { struct be_adapter *adapter = netdev_priv(netdev); char fw_on_flash[FW_VER_LEN]; memset(fw_on_flash, 0 , sizeof(fw_on_flash)); be_cmd_get_fw_ver(adapter, adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver)); strlcpy(drvinfo->version, DRV_VER, sizeof(drvinfo->version)); if (!memcmp(adapter->fw_ver, fw_on_flash, FW_VER_LEN)) strlcpy(drvinfo->fw_version, adapter->fw_ver, sizeof(drvinfo->fw_version)); else snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "%s [%s]", adapter->fw_ver, fw_on_flash); strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), sizeof(drvinfo->bus_info)); drvinfo->testinfo_len = 0; drvinfo->regdump_len = 0; drvinfo->eedump_len = 0; } static u32 lancer_cmd_get_file_len(struct be_adapter *adapter, u8 *file_name) { u32 data_read = 0, eof; u8 addn_status; struct be_dma_mem data_len_cmd; int status; memset(&data_len_cmd, 0, sizeof(data_len_cmd)); /* data_offset and data_size should be 0 to get reg len */ status = lancer_cmd_read_object(adapter, &data_len_cmd, 0, 0, file_name, &data_read, &eof, &addn_status); return data_read; } static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name, u32 buf_len, void *buf) { struct be_dma_mem read_cmd; u32 read_len = 0, total_read_len = 0, chunk_size; u32 eof = 0; u8 addn_status; int status = 0; read_cmd.size = LANCER_READ_FILE_CHUNK; read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size, &read_cmd.dma); if (!read_cmd.va) { dev_err(&adapter->pdev->dev, "Memory allocation failure while reading dump\n"); return -ENOMEM; } while ((total_read_len < buf_len) && !eof) { chunk_size = min_t(u32, (buf_len - total_read_len), LANCER_READ_FILE_CHUNK); chunk_size = ALIGN(chunk_size, 4); status = lancer_cmd_read_object(adapter, &read_cmd, chunk_size, total_read_len, file_name, &read_len, &eof, &addn_status); if (!status) { memcpy(buf + total_read_len, read_cmd.va, read_len); total_read_len += read_len; eof &= LANCER_READ_FILE_EOF_MASK; } else { status = -EIO; break; } } pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va, read_cmd.dma); return status; } static int be_get_reg_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); u32 log_size = 0; if (!check_privilege(adapter, MAX_PRIVILEGES)) return 0; if (be_physfn(adapter)) { if (lancer_chip(adapter)) log_size = lancer_cmd_get_file_len(adapter, LANCER_FW_DUMP_FILE); else be_cmd_get_reg_len(adapter, &log_size); } return log_size; } static void be_get_regs(struct net_device *netdev, struct ethtool_regs *regs, void *buf) { struct be_adapter *adapter = netdev_priv(netdev); if (be_physfn(adapter)) { memset(buf, 0, regs->len); if (lancer_chip(adapter)) lancer_cmd_read_file(adapter, LANCER_FW_DUMP_FILE, regs->len, buf); else be_cmd_get_regs(adapter, regs->len, buf); } } static int be_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo = &adapter->eq_obj[0]; et->rx_coalesce_usecs = eqo->cur_eqd; et->rx_coalesce_usecs_high = eqo->max_eqd; et->rx_coalesce_usecs_low = eqo->min_eqd; et->tx_coalesce_usecs = eqo->cur_eqd; et->tx_coalesce_usecs_high = eqo->max_eqd; et->tx_coalesce_usecs_low = eqo->min_eqd; et->use_adaptive_rx_coalesce = eqo->enable_aic; et->use_adaptive_tx_coalesce = eqo->enable_aic; return 0; } /* TX attributes are ignored. Only RX attributes are considered * eqd cmd is issued in the worker thread. */ static int be_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *et) { struct be_adapter *adapter = netdev_priv(netdev); struct be_eq_obj *eqo; int i; for_all_evt_queues(adapter, eqo, i) { eqo->enable_aic = et->use_adaptive_rx_coalesce; eqo->max_eqd = min(et->rx_coalesce_usecs_high, BE_MAX_EQD); eqo->min_eqd = min(et->rx_coalesce_usecs_low, eqo->max_eqd); eqo->eqd = et->rx_coalesce_usecs; } return 0; } static void be_get_ethtool_stats(struct net_device *netdev, struct ethtool_stats *stats, uint64_t *data) { struct be_adapter *adapter = netdev_priv(netdev); struct be_rx_obj *rxo; struct be_tx_obj *txo; void *p; unsigned int i, j, base = 0, start; for (i = 0; i < ETHTOOL_STATS_NUM; i++) { p = (u8 *)&adapter->drv_stats + et_stats[i].offset; data[i] = *(u32 *)p; } base += ETHTOOL_STATS_NUM; for_all_rx_queues(adapter, rxo, j) { struct be_rx_stats *stats = rx_stats(rxo); do { start = u64_stats_fetch_begin_bh(&stats->sync); data[base] = stats->rx_bytes; data[base + 1] = stats->rx_pkts; } while (u64_stats_fetch_retry_bh(&stats->sync, start)); for (i = 2; i < ETHTOOL_RXSTATS_NUM; i++) { p = (u8 *)stats + et_rx_stats[i].offset; data[base + i] = *(u32 *)p; } base += ETHTOOL_RXSTATS_NUM; } for_all_tx_queues(adapter, txo, j) { struct be_tx_stats *stats = tx_stats(txo); do { start = u64_stats_fetch_begin_bh(&stats->sync_compl); data[base] = stats->tx_compl; } while (u64_stats_fetch_retry_bh(&stats->sync_compl, start)); do { start = u64_stats_fetch_begin_bh(&stats->sync); for (i = 1; i < ETHTOOL_TXSTATS_NUM; i++) { p = (u8 *)stats + et_tx_stats[i].offset; data[base + i] = (et_tx_stats[i].size == sizeof(u64)) ? *(u64 *)p : *(u32 *)p; } } while (u64_stats_fetch_retry_bh(&stats->sync, start)); base += ETHTOOL_TXSTATS_NUM; } } static void be_get_stat_strings(struct net_device *netdev, uint32_t stringset, uint8_t *data) { struct be_adapter *adapter = netdev_priv(netdev); int i, j; switch (stringset) { case ETH_SS_STATS: for (i = 0; i < ETHTOOL_STATS_NUM; i++) { memcpy(data, et_stats[i].desc, ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } for (i = 0; i < adapter->num_rx_qs; i++) { for (j = 0; j < ETHTOOL_RXSTATS_NUM; j++) { sprintf(data, "rxq%d: %s", i, et_rx_stats[j].desc); data += ETH_GSTRING_LEN; } } for (i = 0; i < adapter->num_tx_qs; i++) { for (j = 0; j < ETHTOOL_TXSTATS_NUM; j++) { sprintf(data, "txq%d: %s", i, et_tx_stats[j].desc); data += ETH_GSTRING_LEN; } } break; case ETH_SS_TEST: for (i = 0; i < ETHTOOL_TESTS_NUM; i++) { memcpy(data, et_self_tests[i], ETH_GSTRING_LEN); data += ETH_GSTRING_LEN; } break; } } static int be_get_sset_count(struct net_device *netdev, int stringset) { struct be_adapter *adapter = netdev_priv(netdev); switch (stringset) { case ETH_SS_TEST: return ETHTOOL_TESTS_NUM; case ETH_SS_STATS: return ETHTOOL_STATS_NUM + adapter->num_rx_qs * ETHTOOL_RXSTATS_NUM + adapter->num_tx_qs * ETHTOOL_TXSTATS_NUM; default: return -EINVAL; } } static u32 be_get_port_type(u32 phy_type, u32 dac_cable_len) { u32 port; switch (phy_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: port = PORT_TP; break; case PHY_TYPE_SFP_PLUS_10GB: port = dac_cable_len ? PORT_DA : PORT_FIBRE; break; case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: port = PORT_FIBRE; break; case PHY_TYPE_BASET_10GB: port = PORT_TP; break; default: port = PORT_OTHER; } return port; } static u32 convert_to_et_setting(u32 if_type, u32 if_speeds) { u32 val = 0; switch (if_type) { case PHY_TYPE_BASET_1GB: case PHY_TYPE_BASEX_1GB: case PHY_TYPE_SGMII: val |= SUPPORTED_TP; if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) val |= SUPPORTED_1000baseT_Full; if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) val |= SUPPORTED_100baseT_Full; if (if_speeds & BE_SUPPORTED_SPEED_10MBPS) val |= SUPPORTED_10baseT_Full; break; case PHY_TYPE_KX4_10GB: val |= SUPPORTED_Backplane; if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) val |= SUPPORTED_1000baseKX_Full; if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) val |= SUPPORTED_10000baseKX4_Full; break; case PHY_TYPE_KR_10GB: val |= SUPPORTED_Backplane | SUPPORTED_10000baseKR_Full; break; case PHY_TYPE_SFP_PLUS_10GB: case PHY_TYPE_XFP_10GB: case PHY_TYPE_SFP_1GB: val |= SUPPORTED_FIBRE; if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) val |= SUPPORTED_10000baseT_Full; if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) val |= SUPPORTED_1000baseT_Full; break; case PHY_TYPE_BASET_10GB: val |= SUPPORTED_TP; if (if_speeds & BE_SUPPORTED_SPEED_10GBPS) val |= SUPPORTED_10000baseT_Full; if (if_speeds & BE_SUPPORTED_SPEED_1GBPS) val |= SUPPORTED_1000baseT_Full; if (if_speeds & BE_SUPPORTED_SPEED_100MBPS) val |= SUPPORTED_100baseT_Full; break; default: val |= SUPPORTED_TP; } return val; } bool be_pause_supported(struct be_adapter *adapter) { return (adapter->phy.interface_type == PHY_TYPE_SFP_PLUS_10GB || adapter->phy.interface_type == PHY_TYPE_XFP_10GB) ? false : true; } static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); u8 link_status; u16 link_speed = 0; int status; u32 auto_speeds; u32 fixed_speeds; u32 dac_cable_len; u16 interface_type; if (adapter->phy.link_speed < 0) { status = be_cmd_link_status_query(adapter, &link_speed, &link_status, 0); if (!status) be_link_status_update(adapter, link_status); ethtool_cmd_speed_set(ecmd, link_speed); status = be_cmd_get_phy_info(adapter); if (!status) { interface_type = adapter->phy.interface_type; auto_speeds = adapter->phy.auto_speeds_supported; fixed_speeds = adapter->phy.fixed_speeds_supported; dac_cable_len = adapter->phy.dac_cable_len; ecmd->supported = convert_to_et_setting(interface_type, auto_speeds | fixed_speeds); ecmd->advertising = convert_to_et_setting(interface_type, auto_speeds); ecmd->port = be_get_port_type(interface_type, dac_cable_len); if (adapter->phy.auto_speeds_supported) { ecmd->supported |= SUPPORTED_Autoneg; ecmd->autoneg = AUTONEG_ENABLE; ecmd->advertising |= ADVERTISED_Autoneg; } ecmd->supported |= SUPPORTED_Pause; if (be_pause_supported(adapter)) ecmd->advertising |= ADVERTISED_Pause; switch (adapter->phy.interface_type) { case PHY_TYPE_KR_10GB: case PHY_TYPE_KX4_10GB: ecmd->transceiver = XCVR_INTERNAL; break; default: ecmd->transceiver = XCVR_EXTERNAL; break; } } else { ecmd->port = PORT_OTHER; ecmd->autoneg = AUTONEG_DISABLE; ecmd->transceiver = XCVR_DUMMY1; } /* Save for future use */ adapter->phy.link_speed = ethtool_cmd_speed(ecmd); adapter->phy.port_type = ecmd->port; adapter->phy.transceiver = ecmd->transceiver; adapter->phy.autoneg = ecmd->autoneg; adapter->phy.advertising = ecmd->advertising; adapter->phy.supported = ecmd->supported; } else { ethtool_cmd_speed_set(ecmd, adapter->phy.link_speed); ecmd->port = adapter->phy.port_type; ecmd->transceiver = adapter->phy.transceiver; ecmd->autoneg = adapter->phy.autoneg; ecmd->advertising = adapter->phy.advertising; ecmd->supported = adapter->phy.supported; } ecmd->duplex = netif_carrier_ok(netdev) ? DUPLEX_FULL : DUPLEX_UNKNOWN; ecmd->phy_address = adapter->port_num; return 0; } static void be_get_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring) { struct be_adapter *adapter = netdev_priv(netdev); ring->rx_max_pending = ring->rx_pending = adapter->rx_obj[0].q.len; ring->tx_max_pending = ring->tx_pending = adapter->tx_obj[0].q.len; } static void be_get_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); be_cmd_get_flow_control(adapter, &ecmd->tx_pause, &ecmd->rx_pause); ecmd->autoneg = adapter->phy.fc_autoneg; } static int be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd) { struct be_adapter *adapter = netdev_priv(netdev); int status; if (ecmd->autoneg != adapter->phy.fc_autoneg) return -EINVAL; adapter->tx_fc = ecmd->tx_pause; adapter->rx_fc = ecmd->rx_pause; status = be_cmd_set_flow_control(adapter, adapter->tx_fc, adapter->rx_fc); if (status) dev_warn(&adapter->pdev->dev, "Pause param set failed.\n"); return status; } static int be_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) { struct be_adapter *adapter = netdev_priv(netdev); switch (state) { case ETHTOOL_ID_ACTIVE: be_cmd_get_beacon_state(adapter, adapter->hba_port_num, &adapter->beacon_state); return 1; /* cycle on/off once per second */ case ETHTOOL_ID_ON: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_ENABLED); break; case ETHTOOL_ID_OFF: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, BEACON_STATE_DISABLED); break; case ETHTOOL_ID_INACTIVE: be_cmd_set_beacon_state(adapter, adapter->hba_port_num, 0, 0, adapter->beacon_state); } return 0; } static void be_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); if (be_is_wol_supported(adapter)) { wol->supported |= WAKE_MAGIC; if (adapter->wol) wol->wolopts |= WAKE_MAGIC; } else wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int be_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) { struct be_adapter *adapter = netdev_priv(netdev); if (wol->wolopts & ~WAKE_MAGIC) return -EOPNOTSUPP; if (!be_is_wol_supported(adapter)) { dev_warn(&adapter->pdev->dev, "WOL not supported\n"); return -EOPNOTSUPP; } if (wol->wolopts & WAKE_MAGIC) adapter->wol = true; else adapter->wol = false; return 0; } static int be_test_ddr_dma(struct be_adapter *adapter) { int ret, i; struct be_dma_mem ddrdma_cmd; static const u64 pattern[2] = { 0x5a5a5a5a5a5a5a5aULL, 0xa5a5a5a5a5a5a5a5ULL }; ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test); ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size, &ddrdma_cmd.dma, GFP_KERNEL); if (!ddrdma_cmd.va) return -ENOMEM; for (i = 0; i < 2; i++) { ret = be_cmd_ddr_dma_test(adapter, pattern[i], 4096, &ddrdma_cmd); if (ret != 0) goto err; } err: dma_free_coherent(&adapter->pdev->dev, ddrdma_cmd.size, ddrdma_cmd.va, ddrdma_cmd.dma); return ret; } static u64 be_loopback_test(struct be_adapter *adapter, u8 loopback_type, u64 *status) { be_cmd_set_loopback(adapter, adapter->hba_port_num, loopback_type, 1); *status = be_cmd_loopback_test(adapter, adapter->hba_port_num, loopback_type, 1500, 2, 0xabc); be_cmd_set_loopback(adapter, adapter->hba_port_num, BE_NO_LOOPBACK, 1); return *status; } static void be_self_test(struct net_device *netdev, struct ethtool_test *test, u64 *data) { struct be_adapter *adapter = netdev_priv(netdev); int status; u8 link_status = 0; if (adapter->function_caps & BE_FUNCTION_CAPS_SUPER_NIC) { dev_err(&adapter->pdev->dev, "Self test not supported\n"); test->flags |= ETH_TEST_FL_FAILED; return; } memset(data, 0, sizeof(u64) * ETHTOOL_TESTS_NUM); if (test->flags & ETH_TEST_FL_OFFLINE) { if (be_loopback_test(adapter, BE_MAC_LOOPBACK, &data[0]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } if (be_loopback_test(adapter, BE_PHY_LOOPBACK, &data[1]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } if (be_loopback_test(adapter, BE_ONE_PORT_EXT_LOOPBACK, &data[2]) != 0) { test->flags |= ETH_TEST_FL_FAILED; } } if (!lancer_chip(adapter) && be_test_ddr_dma(adapter) != 0) { data[3] = 1; test->flags |= ETH_TEST_FL_FAILED; } status = be_cmd_link_status_query(adapter, NULL, &link_status, 0); if (status) { test->flags |= ETH_TEST_FL_FAILED; data[4] = -1; } else if (!link_status) { test->flags |= ETH_TEST_FL_FAILED; data[4] = 1; } } static int be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) { struct be_adapter *adapter = netdev_priv(netdev); return be_load_fw(adapter, efl->data); } static int be_get_eeprom_len(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); if (!check_privilege(adapter, MAX_PRIVILEGES)) return 0; if (lancer_chip(adapter)) { if (be_physfn(adapter)) return lancer_cmd_get_file_len(adapter, LANCER_VPD_PF_FILE); else return lancer_cmd_get_file_len(adapter, LANCER_VPD_VF_FILE); } else { return BE_READ_SEEPROM_LEN; } } static int be_read_eeprom(struct net_device *netdev, struct ethtool_eeprom *eeprom, uint8_t *data) { struct be_adapter *adapter = netdev_priv(netdev); struct be_dma_mem eeprom_cmd; struct be_cmd_resp_seeprom_read *resp; int status; if (!eeprom->len) return -EINVAL; if (lancer_chip(adapter)) { if (be_physfn(adapter)) return lancer_cmd_read_file(adapter, LANCER_VPD_PF_FILE, eeprom->len, data); else return lancer_cmd_read_file(adapter, LANCER_VPD_VF_FILE, eeprom->len, data); } eeprom->magic = BE_VENDOR_ID | (adapter->pdev->device<<16); memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem)); eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read); eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size, &eeprom_cmd.dma, GFP_KERNEL); if (!eeprom_cmd.va) return -ENOMEM; status = be_cmd_get_seeprom_data(adapter, &eeprom_cmd); if (!status) { resp = eeprom_cmd.va; memcpy(data, resp->seeprom_data + eeprom->offset, eeprom->len); } dma_free_coherent(&adapter->pdev->dev, eeprom_cmd.size, eeprom_cmd.va, eeprom_cmd.dma); return status; } static u32 be_get_msg_level(struct net_device *netdev) { struct be_adapter *adapter = netdev_priv(netdev); if (lancer_chip(adapter)) { dev_err(&adapter->pdev->dev, "Operation not supported\n"); return -EOPNOTSUPP; } return adapter->msg_enable; } static void be_set_fw_log_level(struct be_adapter *adapter, u32 level) { struct be_dma_mem extfat_cmd; struct be_fat_conf_params *cfgs; int status; int i, j; memset(&extfat_cmd, 0, sizeof(struct be_dma_mem)); extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps); extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size, &extfat_cmd.dma); if (!extfat_cmd.va) { dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n", __func__); goto err; } status = be_cmd_get_ext_fat_capabilites(adapter, &extfat_cmd); if (!status) { cfgs = (struct be_fat_conf_params *)(extfat_cmd.va + sizeof(struct be_cmd_resp_hdr)); for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) { u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes); for (j = 0; j < num_modes; j++) { if (cfgs->module[i].trace_lvl[j].mode == MODE_UART) cfgs->module[i].trace_lvl[j].dbg_lvl = cpu_to_le32(level); } } status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs); if (status) dev_err(&adapter->pdev->dev, "Message level set failed\n"); } else { dev_err(&adapter->pdev->dev, "Message level get failed\n"); } pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va, extfat_cmd.dma); err: return; } static void be_set_msg_level(struct net_device *netdev, u32 level) { struct be_adapter *adapter = netdev_priv(netdev); if (lancer_chip(adapter)) { dev_err(&adapter->pdev->dev, "Operation not supported\n"); return; } if (adapter->msg_enable == level) return; if ((level & NETIF_MSG_HW) != (adapter->msg_enable & NETIF_MSG_HW)) be_set_fw_log_level(adapter, level & NETIF_MSG_HW ? FW_LOG_LEVEL_DEFAULT : FW_LOG_LEVEL_FATAL); adapter->msg_enable = level; return; } static u64 be_get_rss_hash_opts(struct be_adapter *adapter, u64 flow_type) { u64 data = 0; switch (flow_type) { case TCP_V4_FLOW: if (adapter->rss_flags & RSS_ENABLE_IPV4) data |= RXH_IP_DST | RXH_IP_SRC; if (adapter->rss_flags & RSS_ENABLE_TCP_IPV4) data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V4_FLOW: if (adapter->rss_flags & RSS_ENABLE_IPV4) data |= RXH_IP_DST | RXH_IP_SRC; if (adapter->rss_flags & RSS_ENABLE_UDP_IPV4) data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case TCP_V6_FLOW: if (adapter->rss_flags & RSS_ENABLE_IPV6) data |= RXH_IP_DST | RXH_IP_SRC; if (adapter->rss_flags & RSS_ENABLE_TCP_IPV6) data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; case UDP_V6_FLOW: if (adapter->rss_flags & RSS_ENABLE_IPV6) data |= RXH_IP_DST | RXH_IP_SRC; if (adapter->rss_flags & RSS_ENABLE_UDP_IPV6) data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; break; } return data; } static int be_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, u32 *rule_locs) { struct be_adapter *adapter = netdev_priv(netdev); if (!be_multi_rxq(adapter)) { dev_info(&adapter->pdev->dev, "ethtool::get_rxnfc: RX flow hashing is disabled\n"); return -EINVAL; } switch (cmd->cmd) { case ETHTOOL_GRXFH: cmd->data = be_get_rss_hash_opts(adapter, cmd->flow_type); break; case ETHTOOL_GRXRINGS: cmd->data = adapter->num_rx_qs - 1; break; default: return -EINVAL; } return 0; } static int be_set_rss_hash_opts(struct be_adapter *adapter, struct ethtool_rxnfc *cmd) { struct be_rx_obj *rxo; int status = 0, i, j; u8 rsstable[128]; u32 rss_flags = adapter->rss_flags; if (cmd->data != L3_RSS_FLAGS && cmd->data != (L3_RSS_FLAGS | L4_RSS_FLAGS)) return -EINVAL; switch (cmd->flow_type) { case TCP_V4_FLOW: if (cmd->data == L3_RSS_FLAGS) rss_flags &= ~RSS_ENABLE_TCP_IPV4; else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) rss_flags |= RSS_ENABLE_IPV4 | RSS_ENABLE_TCP_IPV4; break; case TCP_V6_FLOW: if (cmd->data == L3_RSS_FLAGS) rss_flags &= ~RSS_ENABLE_TCP_IPV6; else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) rss_flags |= RSS_ENABLE_IPV6 | RSS_ENABLE_TCP_IPV6; break; case UDP_V4_FLOW: if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) && BEx_chip(adapter)) return -EINVAL; if (cmd->data == L3_RSS_FLAGS) rss_flags &= ~RSS_ENABLE_UDP_IPV4; else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) rss_flags |= RSS_ENABLE_IPV4 | RSS_ENABLE_UDP_IPV4; break; case UDP_V6_FLOW: if ((cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) && BEx_chip(adapter)) return -EINVAL; if (cmd->data == L3_RSS_FLAGS) rss_flags &= ~RSS_ENABLE_UDP_IPV6; else if (cmd->data == (L3_RSS_FLAGS | L4_RSS_FLAGS)) rss_flags |= RSS_ENABLE_IPV6 | RSS_ENABLE_UDP_IPV6; break; default: return -EINVAL; } if (rss_flags == adapter->rss_flags) return status; if (be_multi_rxq(adapter)) { for (j = 0; j < 128; j += adapter->num_rx_qs - 1) { for_all_rss_queues(adapter, rxo, i) { if ((j + i) >= 128) break; rsstable[j + i] = rxo->rss_id; } } } status = be_cmd_rss_config(adapter, rsstable, rss_flags, 128); if (!status) adapter->rss_flags = rss_flags; return status; } static int be_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd) { struct be_adapter *adapter = netdev_priv(netdev); int status = 0; if (!be_multi_rxq(adapter)) { dev_err(&adapter->pdev->dev, "ethtool::set_rxnfc: RX flow hashing is disabled\n"); return -EINVAL; } switch (cmd->cmd) { case ETHTOOL_SRXFH: status = be_set_rss_hash_opts(adapter, cmd); break; default: return -EINVAL; } return status; } const struct ethtool_ops be_ethtool_ops = { .get_settings = be_get_settings, .get_drvinfo = be_get_drvinfo, .get_wol = be_get_wol, .set_wol = be_set_wol, .get_link = ethtool_op_get_link, .get_eeprom_len = be_get_eeprom_len, .get_eeprom = be_read_eeprom, .get_coalesce = be_get_coalesce, .set_coalesce = be_set_coalesce, .get_ringparam = be_get_ringparam, .get_pauseparam = be_get_pauseparam, .set_pauseparam = be_set_pauseparam, .get_strings = be_get_stat_strings, .set_phys_id = be_set_phys_id, .get_msglevel = be_get_msg_level, .set_msglevel = be_set_msg_level, .get_sset_count = be_get_sset_count, .get_ethtool_stats = be_get_ethtool_stats, .get_regs_len = be_get_reg_len, .get_regs = be_get_regs, .flash_device = be_do_flash, .self_test = be_self_test, .get_rxnfc = be_get_rxnfc, .set_rxnfc = be_set_rxnfc, };
gpl-2.0
davidmueller13/android_kernel_samsung_zeroflteskt
drivers/usb/musb/omap2430.c
2078
17618
/* * Copyright (C) 2005-2007 by Texas Instruments * Some code has been taken from tusb6010.c * Copyrights for that are attributable to: * Copyright (C) 2006 Nokia Corporation * Tony Lindgren <tony@atomide.com> * * This file is part of the Inventra Controller Driver for Linux. * * The Inventra Controller Driver for Linux is free software; you * can redistribute it and/or modify it under the terms of the GNU * General Public License version 2 as published by the Free Software * Foundation. * * The Inventra Controller Driver for Linux is distributed in * the hope that it will be useful, but WITHOUT ANY WARRANTY; * without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public * License for more details. * * You should have received a copy of the GNU General Public License * along with The Inventra Controller Driver for Linux ; if not, * write to the Free Software Foundation, Inc., 59 Temple Place, * Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/list.h> #include <linux/io.h> #include <linux/of.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/pm_runtime.h> #include <linux/err.h> #include <linux/delay.h> #include <linux/usb/musb-omap.h> #include <linux/usb/omap_control_usb.h> #include "musb_core.h" #include "omap2430.h" struct omap2430_glue { struct device *dev; struct platform_device *musb; enum omap_musb_vbus_id_status status; struct work_struct omap_musb_mailbox_work; struct device *control_otghs; }; #define glue_to_musb(g) platform_get_drvdata(g->musb) static struct omap2430_glue *_glue; static struct timer_list musb_idle_timer; static void musb_do_idle(unsigned long _musb) { struct musb *musb = (void *)_musb; unsigned long flags; u8 power; u8 devctl; spin_lock_irqsave(&musb->lock, flags); switch (musb->xceiv->state) { case OTG_STATE_A_WAIT_BCON: devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) { musb->xceiv->state = OTG_STATE_B_IDLE; MUSB_DEV_MODE(musb); } else { musb->xceiv->state = OTG_STATE_A_IDLE; MUSB_HST_MODE(musb); } break; case OTG_STATE_A_SUSPEND: /* finish RESUME signaling? */ if (musb->port1_status & MUSB_PORT_STAT_RESUME) { power = musb_readb(musb->mregs, MUSB_POWER); power &= ~MUSB_POWER_RESUME; dev_dbg(musb->controller, "root port resume stopped, power %02x\n", power); musb_writeb(musb->mregs, MUSB_POWER, power); musb->is_active = 1; musb->port1_status &= ~(USB_PORT_STAT_SUSPEND | MUSB_PORT_STAT_RESUME); musb->port1_status |= USB_PORT_STAT_C_SUSPEND << 16; usb_hcd_poll_rh_status(musb_to_hcd(musb)); /* NOTE: it might really be A_WAIT_BCON ... */ musb->xceiv->state = OTG_STATE_A_HOST; } break; case OTG_STATE_A_HOST: devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (devctl & MUSB_DEVCTL_BDEVICE) musb->xceiv->state = OTG_STATE_B_IDLE; else musb->xceiv->state = OTG_STATE_A_WAIT_BCON; default: break; } spin_unlock_irqrestore(&musb->lock, flags); } static void omap2430_musb_try_idle(struct musb *musb, unsigned long timeout) { unsigned long default_timeout = jiffies + msecs_to_jiffies(3); static unsigned long last_timer; if (timeout == 0) timeout = default_timeout; /* Never idle if active, or when VBUS timeout is not set as host */ if (musb->is_active || ((musb->a_wait_bcon == 0) && (musb->xceiv->state == OTG_STATE_A_WAIT_BCON))) { dev_dbg(musb->controller, "%s active, deleting timer\n", usb_otg_state_string(musb->xceiv->state)); del_timer(&musb_idle_timer); last_timer = jiffies; return; } if (time_after(last_timer, timeout)) { if (!timer_pending(&musb_idle_timer)) last_timer = timeout; else { dev_dbg(musb->controller, "Longer idle timer already pending, ignoring\n"); return; } } last_timer = timeout; dev_dbg(musb->controller, "%s inactive, for idle timer for %lu ms\n", usb_otg_state_string(musb->xceiv->state), (unsigned long)jiffies_to_msecs(timeout - jiffies)); mod_timer(&musb_idle_timer, timeout); } static void omap2430_musb_set_vbus(struct musb *musb, int is_on) { struct usb_otg *otg = musb->xceiv->otg; u8 devctl; unsigned long timeout = jiffies + msecs_to_jiffies(1000); /* HDRC controls CPEN, but beware current surges during device * connect. They can trigger transient overcurrent conditions * that must be ignored. */ devctl = musb_readb(musb->mregs, MUSB_DEVCTL); if (is_on) { if (musb->xceiv->state == OTG_STATE_A_IDLE) { int loops = 100; /* start the session */ devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); /* * Wait for the musb to set as A device to enable the * VBUS */ while (musb_readb(musb->mregs, MUSB_DEVCTL) & 0x80) { mdelay(5); cpu_relax(); if (time_after(jiffies, timeout) || loops-- <= 0) { dev_err(musb->controller, "configured as A device timeout"); break; } } otg_set_vbus(otg, 1); } else { musb->is_active = 1; otg->default_a = 1; musb->xceiv->state = OTG_STATE_A_WAIT_VRISE; devctl |= MUSB_DEVCTL_SESSION; MUSB_HST_MODE(musb); } } else { musb->is_active = 0; /* NOTE: we're skipping A_WAIT_VFALL -> A_IDLE and * jumping right to B_IDLE... */ otg->default_a = 0; musb->xceiv->state = OTG_STATE_B_IDLE; devctl &= ~MUSB_DEVCTL_SESSION; MUSB_DEV_MODE(musb); } musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); dev_dbg(musb->controller, "VBUS %s, devctl %02x " /* otg %3x conf %08x prcm %08x */ "\n", usb_otg_state_string(musb->xceiv->state), musb_readb(musb->mregs, MUSB_DEVCTL)); } static int omap2430_musb_set_mode(struct musb *musb, u8 musb_mode) { u8 devctl = musb_readb(musb->mregs, MUSB_DEVCTL); devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); return 0; } static inline void omap2430_low_level_exit(struct musb *musb) { u32 l; /* in any role */ l = musb_readl(musb->mregs, OTG_FORCESTDBY); l |= ENABLEFORCE; /* enable MSTANDBY */ musb_writel(musb->mregs, OTG_FORCESTDBY, l); } static inline void omap2430_low_level_init(struct musb *musb) { u32 l; l = musb_readl(musb->mregs, OTG_FORCESTDBY); l &= ~ENABLEFORCE; /* disable MSTANDBY */ musb_writel(musb->mregs, OTG_FORCESTDBY, l); } void omap_musb_mailbox(enum omap_musb_vbus_id_status status) { struct omap2430_glue *glue = _glue; if (!glue) { pr_err("%s: musb core is not yet initialized\n", __func__); return; } glue->status = status; if (!glue_to_musb(glue)) { pr_err("%s: musb core is not yet ready\n", __func__); return; } schedule_work(&glue->omap_musb_mailbox_work); } EXPORT_SYMBOL_GPL(omap_musb_mailbox); static void omap_musb_set_mailbox(struct omap2430_glue *glue) { struct musb *musb = glue_to_musb(glue); struct device *dev = musb->controller; struct musb_hdrc_platform_data *pdata = dev->platform_data; struct omap_musb_board_data *data = pdata->board_data; struct usb_otg *otg = musb->xceiv->otg; switch (glue->status) { case OMAP_MUSB_ID_GROUND: dev_dbg(dev, "ID GND\n"); otg->default_a = true; musb->xceiv->state = OTG_STATE_A_IDLE; musb->xceiv->last_event = USB_EVENT_ID; if (musb->gadget_driver) { pm_runtime_get_sync(dev); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST); omap2430_musb_set_vbus(musb, 1); } break; case OMAP_MUSB_VBUS_VALID: dev_dbg(dev, "VBUS Connect\n"); otg->default_a = false; musb->xceiv->state = OTG_STATE_B_IDLE; musb->xceiv->last_event = USB_EVENT_VBUS; if (musb->gadget_driver) pm_runtime_get_sync(dev); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; case OMAP_MUSB_ID_FLOAT: case OMAP_MUSB_VBUS_OFF: dev_dbg(dev, "VBUS Disconnect\n"); musb->xceiv->last_event = USB_EVENT_NONE; if (musb->gadget_driver) { omap2430_musb_set_vbus(musb, 0); pm_runtime_mark_last_busy(dev); pm_runtime_put_autosuspend(dev); } if (data->interface_type == MUSB_INTERFACE_UTMI) otg_set_vbus(musb->xceiv->otg, 0); omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); break; default: dev_dbg(dev, "ID float\n"); } } static void omap_musb_mailbox_work(struct work_struct *mailbox_work) { struct omap2430_glue *glue = container_of(mailbox_work, struct omap2430_glue, omap_musb_mailbox_work); omap_musb_set_mailbox(glue); } static irqreturn_t omap2430_musb_interrupt(int irq, void *__hci) { unsigned long flags; irqreturn_t retval = IRQ_NONE; struct musb *musb = __hci; spin_lock_irqsave(&musb->lock, flags); musb->int_usb = musb_readb(musb->mregs, MUSB_INTRUSB); musb->int_tx = musb_readw(musb->mregs, MUSB_INTRTX); musb->int_rx = musb_readw(musb->mregs, MUSB_INTRRX); if (musb->int_usb || musb->int_tx || musb->int_rx) retval = musb_interrupt(musb); spin_unlock_irqrestore(&musb->lock, flags); return retval; } static int omap2430_musb_init(struct musb *musb) { u32 l; int status = 0; struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); struct musb_hdrc_platform_data *plat = dev->platform_data; struct omap_musb_board_data *data = plat->board_data; /* We require some kind of external transceiver, hooked * up through ULPI. TWL4030-family PMICs include one, * which needs a driver, drivers aren't always needed. */ if (dev->parent->of_node) musb->xceiv = devm_usb_get_phy_by_phandle(dev->parent, "usb-phy", 0); else musb->xceiv = devm_usb_get_phy_dev(dev, 0); if (IS_ERR(musb->xceiv)) { status = PTR_ERR(musb->xceiv); if (status == -ENXIO) return status; pr_err("HS USB OTG: no transceiver configured\n"); return -EPROBE_DEFER; } musb->isr = omap2430_musb_interrupt; status = pm_runtime_get_sync(dev); if (status < 0) { dev_err(dev, "pm_runtime_get_sync FAILED %d\n", status); goto err1; } l = musb_readl(musb->mregs, OTG_INTERFSEL); if (data->interface_type == MUSB_INTERFACE_UTMI) { /* OMAP4 uses Internal PHY GS70 which uses UTMI interface */ l &= ~ULPI_12PIN; /* Disable ULPI */ l |= UTMI_8BIT; /* Enable UTMI */ } else { l |= ULPI_12PIN; } musb_writel(musb->mregs, OTG_INTERFSEL, l); pr_debug("HS USB OTG: revision 0x%x, sysconfig 0x%02x, " "sysstatus 0x%x, intrfsel 0x%x, simenable 0x%x\n", musb_readl(musb->mregs, OTG_REVISION), musb_readl(musb->mregs, OTG_SYSCONFIG), musb_readl(musb->mregs, OTG_SYSSTATUS), musb_readl(musb->mregs, OTG_INTERFSEL), musb_readl(musb->mregs, OTG_SIMENABLE)); setup_timer(&musb_idle_timer, musb_do_idle, (unsigned long) musb); if (glue->status != OMAP_MUSB_UNKNOWN) omap_musb_set_mailbox(glue); usb_phy_init(musb->xceiv); pm_runtime_put_noidle(musb->controller); return 0; err1: return status; } static void omap2430_musb_enable(struct musb *musb) { u8 devctl; unsigned long timeout = jiffies + msecs_to_jiffies(1000); struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); struct musb_hdrc_platform_data *pdata = dev->platform_data; struct omap_musb_board_data *data = pdata->board_data; switch (glue->status) { case OMAP_MUSB_ID_GROUND: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_HOST); if (data->interface_type != MUSB_INTERFACE_UTMI) break; devctl = musb_readb(musb->mregs, MUSB_DEVCTL); /* start the session */ devctl |= MUSB_DEVCTL_SESSION; musb_writeb(musb->mregs, MUSB_DEVCTL, devctl); while (musb_readb(musb->mregs, MUSB_DEVCTL) & MUSB_DEVCTL_BDEVICE) { cpu_relax(); if (time_after(jiffies, timeout)) { dev_err(dev, "configured as A device timeout"); break; } } break; case OMAP_MUSB_VBUS_VALID: omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DEVICE); break; default: break; } } static void omap2430_musb_disable(struct musb *musb) { struct device *dev = musb->controller; struct omap2430_glue *glue = dev_get_drvdata(dev->parent); if (glue->status != OMAP_MUSB_UNKNOWN) omap_control_usb_set_mode(glue->control_otghs, USB_MODE_DISCONNECT); } static int omap2430_musb_exit(struct musb *musb) { del_timer_sync(&musb_idle_timer); omap2430_low_level_exit(musb); return 0; } static const struct musb_platform_ops omap2430_ops = { .init = omap2430_musb_init, .exit = omap2430_musb_exit, .set_mode = omap2430_musb_set_mode, .try_idle = omap2430_musb_try_idle, .set_vbus = omap2430_musb_set_vbus, .enable = omap2430_musb_enable, .disable = omap2430_musb_disable, }; static u64 omap2430_dmamask = DMA_BIT_MASK(32); static int omap2430_probe(struct platform_device *pdev) { struct musb_hdrc_platform_data *pdata = pdev->dev.platform_data; struct omap_musb_board_data *data; struct platform_device *musb; struct omap2430_glue *glue; struct device_node *np = pdev->dev.of_node; struct musb_hdrc_config *config; int ret = -ENOMEM; glue = devm_kzalloc(&pdev->dev, sizeof(*glue), GFP_KERNEL); if (!glue) { dev_err(&pdev->dev, "failed to allocate glue context\n"); goto err0; } musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO); if (!musb) { dev_err(&pdev->dev, "failed to allocate musb device\n"); goto err0; } musb->dev.parent = &pdev->dev; musb->dev.dma_mask = &omap2430_dmamask; musb->dev.coherent_dma_mask = omap2430_dmamask; glue->dev = &pdev->dev; glue->musb = musb; glue->status = OMAP_MUSB_UNKNOWN; if (np) { pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "failed to allocate musb platfrom data\n"); goto err2; } data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); if (!data) { dev_err(&pdev->dev, "failed to allocate musb board data\n"); goto err2; } config = devm_kzalloc(&pdev->dev, sizeof(*config), GFP_KERNEL); if (!config) { dev_err(&pdev->dev, "failed to allocate musb hdrc config\n"); goto err2; } of_property_read_u32(np, "mode", (u32 *)&pdata->mode); of_property_read_u32(np, "interface-type", (u32 *)&data->interface_type); of_property_read_u32(np, "num-eps", (u32 *)&config->num_eps); of_property_read_u32(np, "ram-bits", (u32 *)&config->ram_bits); of_property_read_u32(np, "power", (u32 *)&pdata->power); config->multipoint = of_property_read_bool(np, "multipoint"); pdata->has_mailbox = of_property_read_bool(np, "ti,has-mailbox"); pdata->board_data = data; pdata->config = config; } if (pdata->has_mailbox) { glue->control_otghs = omap_get_control_dev(); if (IS_ERR(glue->control_otghs)) { dev_vdbg(&pdev->dev, "Failed to get control device\n"); ret = PTR_ERR(glue->control_otghs); goto err2; } } else { glue->control_otghs = ERR_PTR(-ENODEV); } pdata->platform_ops = &omap2430_ops; platform_set_drvdata(pdev, glue); /* * REVISIT if we ever have two instances of the wrapper, we will be * in big trouble */ _glue = glue; INIT_WORK(&glue->omap_musb_mailbox_work, omap_musb_mailbox_work); ret = platform_device_add_resources(musb, pdev->resource, pdev->num_resources); if (ret) { dev_err(&pdev->dev, "failed to add resources\n"); goto err2; } ret = platform_device_add_data(musb, pdata, sizeof(*pdata)); if (ret) { dev_err(&pdev->dev, "failed to add platform_data\n"); goto err2; } pm_runtime_enable(&pdev->dev); ret = platform_device_add(musb); if (ret) { dev_err(&pdev->dev, "failed to register musb device\n"); goto err2; } return 0; err2: platform_device_put(musb); err0: return ret; } static int omap2430_remove(struct platform_device *pdev) { struct omap2430_glue *glue = platform_get_drvdata(pdev); cancel_work_sync(&glue->omap_musb_mailbox_work); platform_device_unregister(glue->musb); return 0; } #ifdef CONFIG_PM static int omap2430_runtime_suspend(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); if (musb) { musb->context.otg_interfsel = musb_readl(musb->mregs, OTG_INTERFSEL); omap2430_low_level_exit(musb); usb_phy_set_suspend(musb->xceiv, 1); } return 0; } static int omap2430_runtime_resume(struct device *dev) { struct omap2430_glue *glue = dev_get_drvdata(dev); struct musb *musb = glue_to_musb(glue); if (musb) { omap2430_low_level_init(musb); musb_writel(musb->mregs, OTG_INTERFSEL, musb->context.otg_interfsel); usb_phy_set_suspend(musb->xceiv, 0); } return 0; } static struct dev_pm_ops omap2430_pm_ops = { .runtime_suspend = omap2430_runtime_suspend, .runtime_resume = omap2430_runtime_resume, }; #define DEV_PM_OPS (&omap2430_pm_ops) #else #define DEV_PM_OPS NULL #endif #ifdef CONFIG_OF static const struct of_device_id omap2430_id_table[] = { { .compatible = "ti,omap4-musb" }, { .compatible = "ti,omap3-musb" }, {}, }; MODULE_DEVICE_TABLE(of, omap2430_id_table); #endif static struct platform_driver omap2430_driver = { .probe = omap2430_probe, .remove = omap2430_remove, .driver = { .name = "musb-omap2430", .pm = DEV_PM_OPS, .of_match_table = of_match_ptr(omap2430_id_table), }, }; MODULE_DESCRIPTION("OMAP2PLUS MUSB Glue Layer"); MODULE_AUTHOR("Felipe Balbi <balbi@ti.com>"); MODULE_LICENSE("GPL v2"); static int __init omap2430_init(void) { return platform_driver_register(&omap2430_driver); } subsys_initcall(omap2430_init); static void __exit omap2430_exit(void) { platform_driver_unregister(&omap2430_driver); } module_exit(omap2430_exit);
gpl-2.0
fards/Ainol_fire_kernel
arch/s390/kvm/priv.c
2334
8193
/* * priv.c - handling privileged instructions * * Copyright IBM Corp. 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm.h> #include <linux/gfp.h> #include <linux/errno.h> #include <asm/current.h> #include <asm/debug.h> #include <asm/ebcdic.h> #include <asm/sysinfo.h> #include "gaccess.h" #include "kvm-s390.h" static int handle_set_prefix(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } /* get the value */ if (get_guest_u32(vcpu, operand2, &address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } vcpu->arch.sie_block->prefix = address; vcpu->arch.sie_block->ihcpu = 0xffff; VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); out: return 0; } static int handle_store_prefix(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address; vcpu->stat.instruction_stpx++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ if (put_guest_u32(vcpu, operand2, address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); out: return 0; } static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; int rc; vcpu->stat.instruction_stap++; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); out: return 0; } static int handle_skey(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_storage_key++; vcpu->arch.sie_block->gpsw.addr -= 4; VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; } static int handle_stsch(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_stsch++; VCPU_EVENT(vcpu, 4, "%s", "store subchannel - CC3"); /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; return 0; } static int handle_chsc(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_chsc++; VCPU_EVENT(vcpu, 4, "%s", "channel subsystem call - CC3"); /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; return 0; } static int handle_stfl(struct kvm_vcpu *vcpu) { unsigned int facility_list; int rc; vcpu->stat.instruction_stfl++; /* only pass the facility bits, which we can handle */ facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3; rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), &facility_list, sizeof(facility_list)); if (rc == -EFAULT) kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); else VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); return 0; } static int handle_stidp(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; int rc; vcpu->stat.instruction_stidp++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); out: return 0; } static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem) { struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int; int cpus = 0; int n; spin_lock(&fi->lock); for (n = 0; n < KVM_MAX_VCPUS; n++) if (fi->local_int[n]) cpus++; spin_unlock(&fi->lock); /* deal with other level 3 hypervisors */ if (stsi(mem, 3, 2, 2) == -ENOSYS) mem->count = 0; if (mem->count < 8) mem->count++; for (n = mem->count - 1; n > 0 ; n--) memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0])); mem->vm[0].cpus_total = cpus; mem->vm[0].cpus_configured = cpus; mem->vm[0].cpus_standby = 0; mem->vm[0].cpus_reserved = 0; mem->vm[0].caf = 1000; memcpy(mem->vm[0].name, "KVMguest", 8); ASCEBC(mem->vm[0].name, 8); memcpy(mem->vm[0].cpi, "KVM/Linux ", 16); ASCEBC(mem->vm[0].cpi, 16); } static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->arch.guest_gprs[0] & 0xff; int sel2 = vcpu->arch.guest_gprs[1] & 0xffff; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; unsigned long mem; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); switch (fc) { case 0: vcpu->arch.guest_gprs[0] = 3 << 28; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); return 0; case 1: /* same handling for 1 and 2 */ case 2: mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_fail; if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS) goto out_mem; break; case 3: if (sel1 != 2 || sel2 != 2) goto out_fail; mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_fail; handle_stsi_3_2_2(vcpu, (void *) mem); break; default: goto out_fail; } if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out_mem; } free_page(mem); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.guest_gprs[0] = 0; return 0; out_mem: free_page(mem); out_fail: /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; return 0; } static intercept_handler_t priv_handlers[256] = { [0x02] = handle_stidp, [0x10] = handle_set_prefix, [0x11] = handle_store_prefix, [0x12] = handle_store_cpu_address, [0x29] = handle_skey, [0x2a] = handle_skey, [0x2b] = handle_skey, [0x34] = handle_stsch, [0x5f] = handle_chsc, [0x7d] = handle_stsi, [0xb1] = handle_stfl, }; int kvm_s390_handle_b2(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* * a lot of B2 instructions are priviledged. We first check for * the privileged ones, that we can handle in the kernel. If the * kernel can handle this instruction, we check for the problem * state bit and (a) handle the instruction or (b) send a code 2 * program check. * Anything else goes to userspace.*/ handler = priv_handlers[vcpu->arch.sie_block->ipa & 0x00ff]; if (handler) { if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); else return handler(vcpu); } return -EOPNOTSUPP; }
gpl-2.0
jm199011/ef40s_kernel_4.2
drivers/staging/octeon/cvmx-pko.c
3102
14987
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Support library for the hardware Packet Output unit. */ #include <asm/octeon/octeon.h> #include "cvmx-config.h" #include "cvmx-pko.h" #include "cvmx-helper.h" /** * Internal state of packet output */ /** * Call before any other calls to initialize the packet * output system. This does chip global config, and should only be * done by one core. */ void cvmx_pko_initialize_global(void) { int i; uint64_t priority = 8; union cvmx_pko_reg_cmd_buf config; /* * Set the size of the PKO command buffers to an odd number of * 64bit words. This allows the normal two word send to stay * aligned and never span a command word buffer. */ config.u64 = 0; config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL; config.s.size = CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE / 8 - 1; cvmx_write_csr(CVMX_PKO_REG_CMD_BUF, config.u64); for (i = 0; i < CVMX_PKO_MAX_OUTPUT_QUEUES; i++) cvmx_pko_config_port(CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID, i, 1, &priority); /* * If we aren't using all of the queues optimize PKO's * internal memory. */ if (OCTEON_IS_MODEL(OCTEON_CN38XX) || OCTEON_IS_MODEL(OCTEON_CN58XX) || OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX)) { int num_interfaces = cvmx_helper_get_number_of_interfaces(); int last_port = cvmx_helper_get_last_ipd_port(num_interfaces - 1); int max_queues = cvmx_pko_get_base_queue(last_port) + cvmx_pko_get_num_queues(last_port); if (OCTEON_IS_MODEL(OCTEON_CN38XX)) { if (max_queues <= 32) cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); else if (max_queues <= 64) cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); } else { if (max_queues <= 64) cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 2); else if (max_queues <= 128) cvmx_write_csr(CVMX_PKO_REG_QUEUE_MODE, 1); } } } /** * This function does per-core initialization required by the PKO routines. * This must be called on all cores that will do packet output, and must * be called after the FPA has been initialized and filled with pages. * * Returns 0 on success * !0 on failure */ int cvmx_pko_initialize_local(void) { /* Nothing to do */ return 0; } /** * Enables the packet output hardware. It must already be * configured. */ void cvmx_pko_enable(void) { union cvmx_pko_reg_flags flags; flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); if (flags.s.ena_pko) cvmx_dprintf ("Warning: Enabling PKO when PKO already enabled.\n"); flags.s.ena_dwb = 1; flags.s.ena_pko = 1; /* * always enable big endian for 3-word command. Does nothing * for 2-word. */ flags.s.store_be = 1; cvmx_write_csr(CVMX_PKO_REG_FLAGS, flags.u64); } /** * Disables the packet output. Does not affect any configuration. */ void cvmx_pko_disable(void) { union cvmx_pko_reg_flags pko_reg_flags; pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); pko_reg_flags.s.ena_pko = 0; cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); } /** * Reset the packet output. */ static void __cvmx_pko_reset(void) { union cvmx_pko_reg_flags pko_reg_flags; pko_reg_flags.u64 = cvmx_read_csr(CVMX_PKO_REG_FLAGS); pko_reg_flags.s.reset = 1; cvmx_write_csr(CVMX_PKO_REG_FLAGS, pko_reg_flags.u64); } /** * Shutdown and free resources required by packet output. */ void cvmx_pko_shutdown(void) { union cvmx_pko_mem_queue_ptrs config; int queue; cvmx_pko_disable(); for (queue = 0; queue < CVMX_PKO_MAX_OUTPUT_QUEUES; queue++) { config.u64 = 0; config.s.tail = 1; config.s.index = 0; config.s.port = CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID; config.s.queue = queue & 0x7f; config.s.qos_mask = 0; config.s.buf_ptr = 0; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) { union cvmx_pko_reg_queue_ptrs1 config1; config1.u64 = 0; config1.s.qid7 = queue >> 7; cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); } cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); cvmx_cmd_queue_shutdown(CVMX_CMD_QUEUE_PKO(queue)); } __cvmx_pko_reset(); } /** * Configure a output port and the associated queues for use. * * @port: Port to configure. * @base_queue: First queue number to associate with this port. * @num_queues: Number of queues to associate with this port * @priority: Array of priority levels for each queue. Values are * allowed to be 0-8. A value of 8 get 8 times the traffic * of a value of 1. A value of 0 indicates that no rounds * will be participated in. These priorities can be changed * on the fly while the pko is enabled. A priority of 9 * indicates that static priority should be used. If static * priority is used all queues with static priority must be * contiguous starting at the base_queue, and lower numbered * queues have higher priority than higher numbered queues. * There must be num_queues elements in the array. */ cvmx_pko_status_t cvmx_pko_config_port(uint64_t port, uint64_t base_queue, uint64_t num_queues, const uint64_t priority[]) { cvmx_pko_status_t result_code; uint64_t queue; union cvmx_pko_mem_queue_ptrs config; union cvmx_pko_reg_queue_ptrs1 config1; int static_priority_base = -1; int static_priority_end = -1; if ((port >= CVMX_PKO_NUM_OUTPUT_PORTS) && (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID)) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid port %llu\n", (unsigned long long)port); return CVMX_PKO_INVALID_PORT; } if (base_queue + num_queues > CVMX_PKO_MAX_OUTPUT_QUEUES) { cvmx_dprintf ("ERROR: cvmx_pko_config_port: Invalid queue range %llu\n", (unsigned long long)(base_queue + num_queues)); return CVMX_PKO_INVALID_QUEUE; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { /* * Validate the static queue priority setup and set * static_priority_base and static_priority_end * accordingly. */ for (queue = 0; queue < num_queues; queue++) { /* Find first queue of static priority */ if (static_priority_base == -1 && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) static_priority_base = queue; /* Find last queue of static priority */ if (static_priority_base != -1 && static_priority_end == -1 && priority[queue] != CVMX_PKO_QUEUE_STATIC_PRIORITY && queue) static_priority_end = queue - 1; else if (static_priority_base != -1 && static_priority_end == -1 && queue == num_queues - 1) /* all queues are static priority */ static_priority_end = queue; /* * Check to make sure all static priority * queues are contiguous. Also catches some * cases of static priorites not starting at * queue 0. */ if (static_priority_end != -1 && (int)queue > static_priority_end && priority[queue] == CVMX_PKO_QUEUE_STATIC_PRIORITY) { cvmx_dprintf("ERROR: cvmx_pko_config_port: " "Static priority queues aren't " "contiguous or don't start at " "base queue. q: %d, eq: %d\n", (int)queue, static_priority_end); return CVMX_PKO_INVALID_PRIORITY; } } if (static_priority_base > 0) { cvmx_dprintf("ERROR: cvmx_pko_config_port: Static " "priority queues don't start at base " "queue. sq: %d\n", static_priority_base); return CVMX_PKO_INVALID_PRIORITY; } #if 0 cvmx_dprintf("Port %d: Static priority queue base: %d, " "end: %d\n", port, static_priority_base, static_priority_end); #endif } /* * At this point, static_priority_base and static_priority_end * are either both -1, or are valid start/end queue * numbers. */ result_code = CVMX_PKO_SUCCESS; #ifdef PKO_DEBUG cvmx_dprintf("num queues: %d (%lld,%lld)\n", num_queues, CVMX_PKO_QUEUES_PER_PORT_INTERFACE0, CVMX_PKO_QUEUES_PER_PORT_INTERFACE1); #endif for (queue = 0; queue < num_queues; queue++) { uint64_t *buf_ptr = NULL; config1.u64 = 0; config1.s.idx3 = queue >> 3; config1.s.qid7 = (base_queue + queue) >> 7; config.u64 = 0; config.s.tail = queue == (num_queues - 1); config.s.index = queue; config.s.port = port; config.s.queue = base_queue + queue; if (!cvmx_octeon_is_pass1()) { config.s.static_p = static_priority_base >= 0; config.s.static_q = (int)queue <= static_priority_end; config.s.s_tail = (int)queue == static_priority_end; } /* * Convert the priority into an enable bit field. Try * to space the bits out evenly so the packet don't * get grouped up */ switch ((int)priority[queue]) { case 0: config.s.qos_mask = 0x00; break; case 1: config.s.qos_mask = 0x01; break; case 2: config.s.qos_mask = 0x11; break; case 3: config.s.qos_mask = 0x49; break; case 4: config.s.qos_mask = 0x55; break; case 5: config.s.qos_mask = 0x57; break; case 6: config.s.qos_mask = 0x77; break; case 7: config.s.qos_mask = 0x7f; break; case 8: config.s.qos_mask = 0xff; break; case CVMX_PKO_QUEUE_STATIC_PRIORITY: /* Pass 1 will fall through to the error case */ if (!cvmx_octeon_is_pass1()) { config.s.qos_mask = 0xff; break; } default: cvmx_dprintf("ERROR: cvmx_pko_config_port: Invalid " "priority %llu\n", (unsigned long long)priority[queue]); config.s.qos_mask = 0xff; result_code = CVMX_PKO_INVALID_PRIORITY; break; } if (port != CVMX_PKO_MEM_QUEUE_PTRS_ILLEGAL_PID) { cvmx_cmd_queue_result_t cmd_res = cvmx_cmd_queue_initialize(CVMX_CMD_QUEUE_PKO (base_queue + queue), CVMX_PKO_MAX_QUEUE_DEPTH, CVMX_FPA_OUTPUT_BUFFER_POOL, CVMX_FPA_OUTPUT_BUFFER_POOL_SIZE - CVMX_PKO_COMMAND_BUFFER_SIZE_ADJUST * 8); if (cmd_res != CVMX_CMD_QUEUE_SUCCESS) { switch (cmd_res) { case CVMX_CMD_QUEUE_NO_MEMORY: cvmx_dprintf("ERROR: " "cvmx_pko_config_port: " "Unable to allocate " "output buffer.\n"); return CVMX_PKO_NO_MEMORY; case CVMX_CMD_QUEUE_ALREADY_SETUP: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Port already setup.\n"); return CVMX_PKO_PORT_ALREADY_SETUP; case CVMX_CMD_QUEUE_INVALID_PARAM: default: cvmx_dprintf ("ERROR: cvmx_pko_config_port: Command queue initialization failed.\n"); return CVMX_PKO_CMD_QUEUE_INIT_ERROR; } } buf_ptr = (uint64_t *) cvmx_cmd_queue_buffer(CVMX_CMD_QUEUE_PKO (base_queue + queue)); config.s.buf_ptr = cvmx_ptr_to_phys(buf_ptr); } else config.s.buf_ptr = 0; CVMX_SYNCWS; if (!OCTEON_IS_MODEL(OCTEON_CN3XXX)) cvmx_write_csr(CVMX_PKO_REG_QUEUE_PTRS1, config1.u64); cvmx_write_csr(CVMX_PKO_MEM_QUEUE_PTRS, config.u64); } return result_code; } #ifdef PKO_DEBUG /** * Show map of ports -> queues for different cores. */ void cvmx_pko_show_queue_map() { int core, port; int pko_output_ports = 36; cvmx_dprintf("port"); for (port = 0; port < pko_output_ports; port++) cvmx_dprintf("%3d ", port); cvmx_dprintf("\n"); for (core = 0; core < CVMX_MAX_CORES; core++) { cvmx_dprintf("\n%2d: ", core); for (port = 0; port < pko_output_ports; port++) { cvmx_dprintf("%3d ", cvmx_pko_get_base_queue_per_core(port, core)); } } cvmx_dprintf("\n"); } #endif /** * Rate limit a PKO port to a max packets/sec. This function is only * supported on CN51XX and higher, excluding CN58XX. * * @port: Port to rate limit * @packets_s: Maximum packet/sec * @burst: Maximum number of packets to burst in a row before rate * limiting cuts in. * * Returns Zero on success, negative on failure */ int cvmx_pko_rate_limit_packets(int port, int packets_s, int burst) { union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; pko_mem_port_rate0.u64 = 0; pko_mem_port_rate0.s.pid = port; pko_mem_port_rate0.s.rate_pkt = cvmx_sysinfo_get()->cpu_clock_hz / packets_s / 16; /* No cost per word since we are limited by packets/sec, not bits/sec */ pko_mem_port_rate0.s.rate_word = 0; pko_mem_port_rate1.u64 = 0; pko_mem_port_rate1.s.pid = port; pko_mem_port_rate1.s.rate_lim = ((uint64_t) pko_mem_port_rate0.s.rate_pkt * burst) >> 8; cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); return 0; } /** * Rate limit a PKO port to a max bits/sec. This function is only * supported on CN51XX and higher, excluding CN58XX. * * @port: Port to rate limit * @bits_s: PKO rate limit in bits/sec * @burst: Maximum number of bits to burst before rate * limiting cuts in. * * Returns Zero on success, negative on failure */ int cvmx_pko_rate_limit_bits(int port, uint64_t bits_s, int burst) { union cvmx_pko_mem_port_rate0 pko_mem_port_rate0; union cvmx_pko_mem_port_rate1 pko_mem_port_rate1; uint64_t clock_rate = cvmx_sysinfo_get()->cpu_clock_hz; uint64_t tokens_per_bit = clock_rate * 16 / bits_s; pko_mem_port_rate0.u64 = 0; pko_mem_port_rate0.s.pid = port; /* * Each packet has a 12 bytes of interframe gap, an 8 byte * preamble, and a 4 byte CRC. These are not included in the * per word count. Multiply by 8 to covert to bits and divide * by 256 for limit granularity. */ pko_mem_port_rate0.s.rate_pkt = (12 + 8 + 4) * 8 * tokens_per_bit / 256; /* Each 8 byte word has 64bits */ pko_mem_port_rate0.s.rate_word = 64 * tokens_per_bit; pko_mem_port_rate1.u64 = 0; pko_mem_port_rate1.s.pid = port; pko_mem_port_rate1.s.rate_lim = tokens_per_bit * burst / 256; cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE0, pko_mem_port_rate0.u64); cvmx_write_csr(CVMX_PKO_MEM_PORT_RATE1, pko_mem_port_rate1.u64); return 0; }
gpl-2.0
Redmi-dev/android_kernel_xiaomi_msm8226
drivers/power/charger-manager.c
3614
26806
/* * Copyright (C) 2011 Samsung Electronics Co., Ltd. * MyungJoo Ham <myungjoo.ham@samsung.com> * * This driver enables to monitor battery health and control charger * during suspend-to-mem. * Charger manager depends on other devices. register this later than * the depending devices. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. **/ #include <linux/io.h> #include <linux/module.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/rtc.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/platform_device.h> #include <linux/power/charger-manager.h> #include <linux/regulator/consumer.h> /* * Regard CM_JIFFIES_SMALL jiffies is small enough to ignore for * delayed works so that we can run delayed works with CM_JIFFIES_SMALL * without any delays. */ #define CM_JIFFIES_SMALL (2) /* If y is valid (> 0) and smaller than x, do x = y */ #define CM_MIN_VALID(x, y) x = (((y > 0) && ((x) > (y))) ? (y) : (x)) /* * Regard CM_RTC_SMALL (sec) is small enough to ignore error in invoking * rtc alarm. It should be 2 or larger */ #define CM_RTC_SMALL (2) #define UEVENT_BUF_SIZE 32 static LIST_HEAD(cm_list); static DEFINE_MUTEX(cm_list_mtx); /* About in-suspend (suspend-again) monitoring */ static struct rtc_device *rtc_dev; /* * Backup RTC alarm * Save the wakeup alarm before entering suspend-to-RAM */ static struct rtc_wkalrm rtc_wkalarm_save; /* Backup RTC alarm time in terms of seconds since 01-01-1970 00:00:00 */ static unsigned long rtc_wkalarm_save_time; static bool cm_suspended; static bool cm_rtc_set; static unsigned long cm_suspend_duration_ms; /* Global charger-manager description */ static struct charger_global_desc *g_desc; /* init with setup_charger_manager */ /** * is_batt_present - See if the battery presents in place. * @cm: the Charger Manager representing the battery. */ static bool is_batt_present(struct charger_manager *cm) { union power_supply_propval val; bool present = false; int i, ret; switch (cm->desc->battery_present) { case CM_FUEL_GAUGE: ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_PRESENT, &val); if (ret == 0 && val.intval) present = true; break; case CM_CHARGER_STAT: for (i = 0; cm->charger_stat[i]; i++) { ret = cm->charger_stat[i]->get_property( cm->charger_stat[i], POWER_SUPPLY_PROP_PRESENT, &val); if (ret == 0 && val.intval) { present = true; break; } } break; } return present; } /** * is_ext_pwr_online - See if an external power source is attached to charge * @cm: the Charger Manager representing the battery. * * Returns true if at least one of the chargers of the battery has an external * power source attached to charge the battery regardless of whether it is * actually charging or not. */ static bool is_ext_pwr_online(struct charger_manager *cm) { union power_supply_propval val; bool online = false; int i, ret; /* If at least one of them has one, it's yes. */ for (i = 0; cm->charger_stat[i]; i++) { ret = cm->charger_stat[i]->get_property( cm->charger_stat[i], POWER_SUPPLY_PROP_ONLINE, &val); if (ret == 0 && val.intval) { online = true; break; } } return online; } /** * get_batt_uV - Get the voltage level of the battery * @cm: the Charger Manager representing the battery. * @uV: the voltage level returned. * * Returns 0 if there is no error. * Returns a negative value on error. */ static int get_batt_uV(struct charger_manager *cm, int *uV) { union power_supply_propval val; int ret; if (!cm->fuel_gauge) return -ENODEV; ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_VOLTAGE_NOW, &val); if (ret) return ret; *uV = val.intval; return 0; } /** * is_charging - Returns true if the battery is being charged. * @cm: the Charger Manager representing the battery. */ static bool is_charging(struct charger_manager *cm) { int i, ret; bool charging = false; union power_supply_propval val; /* If there is no battery, it cannot be charged */ if (!is_batt_present(cm)) return false; /* If at least one of the charger is charging, return yes */ for (i = 0; cm->charger_stat[i]; i++) { /* 1. The charger sholuld not be DISABLED */ if (cm->emergency_stop) continue; if (!cm->charger_enabled) continue; /* 2. The charger should be online (ext-power) */ ret = cm->charger_stat[i]->get_property( cm->charger_stat[i], POWER_SUPPLY_PROP_ONLINE, &val); if (ret) { dev_warn(cm->dev, "Cannot read ONLINE value from %s.\n", cm->desc->psy_charger_stat[i]); continue; } if (val.intval == 0) continue; /* * 3. The charger should not be FULL, DISCHARGING, * or NOT_CHARGING. */ ret = cm->charger_stat[i]->get_property( cm->charger_stat[i], POWER_SUPPLY_PROP_STATUS, &val); if (ret) { dev_warn(cm->dev, "Cannot read STATUS value from %s.\n", cm->desc->psy_charger_stat[i]); continue; } if (val.intval == POWER_SUPPLY_STATUS_FULL || val.intval == POWER_SUPPLY_STATUS_DISCHARGING || val.intval == POWER_SUPPLY_STATUS_NOT_CHARGING) continue; /* Then, this is charging. */ charging = true; break; } return charging; } /** * is_polling_required - Return true if need to continue polling for this CM. * @cm: the Charger Manager representing the battery. */ static bool is_polling_required(struct charger_manager *cm) { switch (cm->desc->polling_mode) { case CM_POLL_DISABLE: return false; case CM_POLL_ALWAYS: return true; case CM_POLL_EXTERNAL_POWER_ONLY: return is_ext_pwr_online(cm); case CM_POLL_CHARGING_ONLY: return is_charging(cm); default: dev_warn(cm->dev, "Incorrect polling_mode (%d)\n", cm->desc->polling_mode); } return false; } /** * try_charger_enable - Enable/Disable chargers altogether * @cm: the Charger Manager representing the battery. * @enable: true: enable / false: disable * * Note that Charger Manager keeps the charger enabled regardless whether * the charger is charging or not (because battery is full or no external * power source exists) except when CM needs to disable chargers forcibly * bacause of emergency causes; when the battery is overheated or too cold. */ static int try_charger_enable(struct charger_manager *cm, bool enable) { int err = 0, i; struct charger_desc *desc = cm->desc; /* Ignore if it's redundent command */ if (enable == cm->charger_enabled) return 0; if (enable) { if (cm->emergency_stop) return -EAGAIN; err = regulator_bulk_enable(desc->num_charger_regulators, desc->charger_regulators); } else { /* * Abnormal battery state - Stop charging forcibly, * even if charger was enabled at the other places */ err = regulator_bulk_disable(desc->num_charger_regulators, desc->charger_regulators); for (i = 0; i < desc->num_charger_regulators; i++) { if (regulator_is_enabled( desc->charger_regulators[i].consumer)) { regulator_force_disable( desc->charger_regulators[i].consumer); dev_warn(cm->dev, "Disable regulator(%s) forcibly.\n", desc->charger_regulators[i].supply); } } } if (!err) cm->charger_enabled = enable; return err; } /** * uevent_notify - Let users know something has changed. * @cm: the Charger Manager representing the battery. * @event: the event string. * * If @event is null, it implies that uevent_notify is called * by resume function. When called in the resume function, cm_suspended * should be already reset to false in order to let uevent_notify * notify the recent event during the suspend to users. While * suspended, uevent_notify does not notify users, but tracks * events so that uevent_notify can notify users later after resumed. */ static void uevent_notify(struct charger_manager *cm, const char *event) { static char env_str[UEVENT_BUF_SIZE + 1] = ""; static char env_str_save[UEVENT_BUF_SIZE + 1] = ""; if (cm_suspended) { /* Nothing in suspended-event buffer */ if (env_str_save[0] == 0) { if (!strncmp(env_str, event, UEVENT_BUF_SIZE)) return; /* status not changed */ strncpy(env_str_save, event, UEVENT_BUF_SIZE); return; } if (!strncmp(env_str_save, event, UEVENT_BUF_SIZE)) return; /* Duplicated. */ strncpy(env_str_save, event, UEVENT_BUF_SIZE); return; } if (event == NULL) { /* No messages pending */ if (!env_str_save[0]) return; strncpy(env_str, env_str_save, UEVENT_BUF_SIZE); kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE); env_str_save[0] = 0; return; } /* status not changed */ if (!strncmp(env_str, event, UEVENT_BUF_SIZE)) return; /* save the status and notify the update */ strncpy(env_str, event, UEVENT_BUF_SIZE); kobject_uevent(&cm->dev->kobj, KOBJ_CHANGE); dev_info(cm->dev, event); } /** * _cm_monitor - Monitor the temperature and return true for exceptions. * @cm: the Charger Manager representing the battery. * * Returns true if there is an event to notify for the battery. * (True if the status of "emergency_stop" changes) */ static bool _cm_monitor(struct charger_manager *cm) { struct charger_desc *desc = cm->desc; int temp = desc->temperature_out_of_range(&cm->last_temp_mC); dev_dbg(cm->dev, "monitoring (%2.2d.%3.3dC)\n", cm->last_temp_mC / 1000, cm->last_temp_mC % 1000); /* It has been stopped or charging already */ if (!!temp == !!cm->emergency_stop) return false; if (temp) { cm->emergency_stop = temp; if (!try_charger_enable(cm, false)) { if (temp > 0) uevent_notify(cm, "OVERHEAT"); else uevent_notify(cm, "COLD"); } } else { cm->emergency_stop = 0; if (!try_charger_enable(cm, true)) uevent_notify(cm, "CHARGING"); } return true; } /** * cm_monitor - Monitor every battery. * * Returns true if there is an event to notify from any of the batteries. * (True if the status of "emergency_stop" changes) */ static bool cm_monitor(void) { bool stop = false; struct charger_manager *cm; mutex_lock(&cm_list_mtx); list_for_each_entry(cm, &cm_list, entry) { if (_cm_monitor(cm)) stop = true; } mutex_unlock(&cm_list_mtx); return stop; } static int charger_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { struct charger_manager *cm = container_of(psy, struct charger_manager, charger_psy); struct charger_desc *desc = cm->desc; int ret = 0; int uV; switch (psp) { case POWER_SUPPLY_PROP_STATUS: if (is_charging(cm)) val->intval = POWER_SUPPLY_STATUS_CHARGING; else if (is_ext_pwr_online(cm)) val->intval = POWER_SUPPLY_STATUS_NOT_CHARGING; else val->intval = POWER_SUPPLY_STATUS_DISCHARGING; break; case POWER_SUPPLY_PROP_HEALTH: if (cm->emergency_stop > 0) val->intval = POWER_SUPPLY_HEALTH_OVERHEAT; else if (cm->emergency_stop < 0) val->intval = POWER_SUPPLY_HEALTH_COLD; else val->intval = POWER_SUPPLY_HEALTH_GOOD; break; case POWER_SUPPLY_PROP_PRESENT: if (is_batt_present(cm)) val->intval = 1; else val->intval = 0; break; case POWER_SUPPLY_PROP_VOLTAGE_NOW: ret = get_batt_uV(cm, &val->intval); break; case POWER_SUPPLY_PROP_CURRENT_NOW: ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CURRENT_NOW, val); break; case POWER_SUPPLY_PROP_TEMP: /* in thenth of centigrade */ if (cm->last_temp_mC == INT_MIN) desc->temperature_out_of_range(&cm->last_temp_mC); val->intval = cm->last_temp_mC / 100; if (!desc->measure_battery_temp) ret = -ENODEV; break; case POWER_SUPPLY_PROP_TEMP_AMBIENT: /* in thenth of centigrade */ if (cm->last_temp_mC == INT_MIN) desc->temperature_out_of_range(&cm->last_temp_mC); val->intval = cm->last_temp_mC / 100; if (desc->measure_battery_temp) ret = -ENODEV; break; case POWER_SUPPLY_PROP_CAPACITY: if (!cm->fuel_gauge) { ret = -ENODEV; break; } if (!is_batt_present(cm)) { /* There is no battery. Assume 100% */ val->intval = 100; break; } ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CAPACITY, val); if (ret) break; if (val->intval > 100) { val->intval = 100; break; } if (val->intval < 0) val->intval = 0; /* Do not adjust SOC when charging: voltage is overrated */ if (is_charging(cm)) break; /* * If the capacity value is inconsistent, calibrate it base on * the battery voltage values and the thresholds given as desc */ ret = get_batt_uV(cm, &uV); if (ret) { /* Voltage information not available. No calibration */ ret = 0; break; } if (desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV && !is_charging(cm)) { val->intval = 100; break; } break; case POWER_SUPPLY_PROP_ONLINE: if (is_ext_pwr_online(cm)) val->intval = 1; else val->intval = 0; break; case POWER_SUPPLY_PROP_CHARGE_FULL: if (cm->fuel_gauge) { if (cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CHARGE_FULL, val) == 0) break; } if (is_ext_pwr_online(cm)) { /* Not full if it's charging. */ if (is_charging(cm)) { val->intval = 0; break; } /* * Full if it's powered but not charging andi * not forced stop by emergency */ if (!cm->emergency_stop) { val->intval = 1; break; } } /* Full if it's over the fullbatt voltage */ ret = get_batt_uV(cm, &uV); if (!ret && desc->fullbatt_uV > 0 && uV >= desc->fullbatt_uV && !is_charging(cm)) { val->intval = 1; break; } /* Full if the cap is 100 */ if (cm->fuel_gauge) { ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CAPACITY, val); if (!ret && val->intval >= 100 && !is_charging(cm)) { val->intval = 1; break; } } val->intval = 0; ret = 0; break; case POWER_SUPPLY_PROP_CHARGE_NOW: if (is_charging(cm)) { ret = cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CHARGE_NOW, val); if (ret) { val->intval = 1; ret = 0; } else { /* If CHARGE_NOW is supplied, use it */ val->intval = (val->intval > 0) ? val->intval : 1; } } else { val->intval = 0; } break; default: return -EINVAL; } return ret; } #define NUM_CHARGER_PSY_OPTIONAL (4) static enum power_supply_property default_charger_props[] = { /* Guaranteed to provide */ POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_VOLTAGE_NOW, POWER_SUPPLY_PROP_CAPACITY, POWER_SUPPLY_PROP_ONLINE, POWER_SUPPLY_PROP_CHARGE_FULL, /* * Optional properties are: * POWER_SUPPLY_PROP_CHARGE_NOW, * POWER_SUPPLY_PROP_CURRENT_NOW, * POWER_SUPPLY_PROP_TEMP, and * POWER_SUPPLY_PROP_TEMP_AMBIENT, */ }; static struct power_supply psy_default = { .name = "battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = default_charger_props, .num_properties = ARRAY_SIZE(default_charger_props), .get_property = charger_get_property, }; /** * cm_setup_timer - For in-suspend monitoring setup wakeup alarm * for suspend_again. * * Returns true if the alarm is set for Charger Manager to use. * Returns false if * cm_setup_timer fails to set an alarm, * cm_setup_timer does not need to set an alarm for Charger Manager, * or an alarm previously configured is to be used. */ static bool cm_setup_timer(void) { struct charger_manager *cm; unsigned int wakeup_ms = UINT_MAX; bool ret = false; mutex_lock(&cm_list_mtx); list_for_each_entry(cm, &cm_list, entry) { /* Skip if polling is not required for this CM */ if (!is_polling_required(cm) && !cm->emergency_stop) continue; if (cm->desc->polling_interval_ms == 0) continue; CM_MIN_VALID(wakeup_ms, cm->desc->polling_interval_ms); } mutex_unlock(&cm_list_mtx); if (wakeup_ms < UINT_MAX && wakeup_ms > 0) { pr_info("Charger Manager wakeup timer: %u ms.\n", wakeup_ms); if (rtc_dev) { struct rtc_wkalrm tmp; unsigned long time, now; unsigned long add = DIV_ROUND_UP(wakeup_ms, 1000); /* * Set alarm with the polling interval (wakeup_ms) * except when rtc_wkalarm_save comes first. * However, the alarm time should be NOW + * CM_RTC_SMALL or later. */ tmp.enabled = 1; rtc_read_time(rtc_dev, &tmp.time); rtc_tm_to_time(&tmp.time, &now); if (add < CM_RTC_SMALL) add = CM_RTC_SMALL; time = now + add; ret = true; if (rtc_wkalarm_save.enabled && rtc_wkalarm_save_time && rtc_wkalarm_save_time < time) { if (rtc_wkalarm_save_time < now + CM_RTC_SMALL) time = now + CM_RTC_SMALL; else time = rtc_wkalarm_save_time; /* The timer is not appointed by CM */ ret = false; } pr_info("Waking up after %lu secs.\n", time - now); rtc_time_to_tm(time, &tmp.time); rtc_set_alarm(rtc_dev, &tmp); cm_suspend_duration_ms += wakeup_ms; return ret; } } if (rtc_dev) rtc_set_alarm(rtc_dev, &rtc_wkalarm_save); return false; } /** * cm_suspend_again - Determine whether suspend again or not * * Returns true if the system should be suspended again * Returns false if the system should be woken up */ bool cm_suspend_again(void) { struct charger_manager *cm; bool ret = false; if (!g_desc || !g_desc->rtc_only_wakeup || !g_desc->rtc_only_wakeup() || !cm_rtc_set) return false; if (cm_monitor()) goto out; ret = true; mutex_lock(&cm_list_mtx); list_for_each_entry(cm, &cm_list, entry) { if (cm->status_save_ext_pwr_inserted != is_ext_pwr_online(cm) || cm->status_save_batt != is_batt_present(cm)) { ret = false; break; } } mutex_unlock(&cm_list_mtx); cm_rtc_set = cm_setup_timer(); out: /* It's about the time when the non-CM appointed timer goes off */ if (rtc_wkalarm_save.enabled) { unsigned long now; struct rtc_time tmp; rtc_read_time(rtc_dev, &tmp); rtc_tm_to_time(&tmp, &now); if (rtc_wkalarm_save_time && now + CM_RTC_SMALL >= rtc_wkalarm_save_time) return false; } return ret; } EXPORT_SYMBOL_GPL(cm_suspend_again); /** * setup_charger_manager - initialize charger_global_desc data * @gd: pointer to instance of charger_global_desc */ int setup_charger_manager(struct charger_global_desc *gd) { if (!gd) return -EINVAL; if (rtc_dev) rtc_class_close(rtc_dev); rtc_dev = NULL; g_desc = NULL; if (!gd->rtc_only_wakeup) { pr_err("The callback rtc_only_wakeup is not given.\n"); return -EINVAL; } if (gd->rtc_name) { rtc_dev = rtc_class_open(gd->rtc_name); if (IS_ERR_OR_NULL(rtc_dev)) { rtc_dev = NULL; /* Retry at probe. RTC may be not registered yet */ } } else { pr_warn("No wakeup timer is given for charger manager." "In-suspend monitoring won't work.\n"); } g_desc = gd; return 0; } EXPORT_SYMBOL_GPL(setup_charger_manager); static int charger_manager_probe(struct platform_device *pdev) { struct charger_desc *desc = dev_get_platdata(&pdev->dev); struct charger_manager *cm; int ret = 0, i = 0; union power_supply_propval val; if (g_desc && !rtc_dev && g_desc->rtc_name) { rtc_dev = rtc_class_open(g_desc->rtc_name); if (IS_ERR_OR_NULL(rtc_dev)) { rtc_dev = NULL; dev_err(&pdev->dev, "Cannot get RTC %s.\n", g_desc->rtc_name); ret = -ENODEV; goto err_alloc; } } if (!desc) { dev_err(&pdev->dev, "No platform data (desc) found.\n"); ret = -ENODEV; goto err_alloc; } cm = kzalloc(sizeof(struct charger_manager), GFP_KERNEL); if (!cm) { dev_err(&pdev->dev, "Cannot allocate memory.\n"); ret = -ENOMEM; goto err_alloc; } /* Basic Values. Unspecified are Null or 0 */ cm->dev = &pdev->dev; cm->desc = kzalloc(sizeof(struct charger_desc), GFP_KERNEL); if (!cm->desc) { dev_err(&pdev->dev, "Cannot allocate memory.\n"); ret = -ENOMEM; goto err_alloc_desc; } memcpy(cm->desc, desc, sizeof(struct charger_desc)); cm->last_temp_mC = INT_MIN; /* denotes "unmeasured, yet" */ if (!desc->charger_regulators || desc->num_charger_regulators < 1) { ret = -EINVAL; dev_err(&pdev->dev, "charger_regulators undefined.\n"); goto err_no_charger; } if (!desc->psy_charger_stat || !desc->psy_charger_stat[0]) { dev_err(&pdev->dev, "No power supply defined.\n"); ret = -EINVAL; goto err_no_charger_stat; } /* Counting index only */ while (desc->psy_charger_stat[i]) i++; cm->charger_stat = kzalloc(sizeof(struct power_supply *) * (i + 1), GFP_KERNEL); if (!cm->charger_stat) { ret = -ENOMEM; goto err_no_charger_stat; } for (i = 0; desc->psy_charger_stat[i]; i++) { cm->charger_stat[i] = power_supply_get_by_name( desc->psy_charger_stat[i]); if (!cm->charger_stat[i]) { dev_err(&pdev->dev, "Cannot find power supply " "\"%s\"\n", desc->psy_charger_stat[i]); ret = -ENODEV; goto err_chg_stat; } } cm->fuel_gauge = power_supply_get_by_name(desc->psy_fuel_gauge); if (!cm->fuel_gauge) { dev_err(&pdev->dev, "Cannot find power supply \"%s\"\n", desc->psy_fuel_gauge); ret = -ENODEV; goto err_chg_stat; } if (desc->polling_interval_ms == 0 || msecs_to_jiffies(desc->polling_interval_ms) <= CM_JIFFIES_SMALL) { dev_err(&pdev->dev, "polling_interval_ms is too small\n"); ret = -EINVAL; goto err_chg_stat; } if (!desc->temperature_out_of_range) { dev_err(&pdev->dev, "there is no temperature_out_of_range\n"); ret = -EINVAL; goto err_chg_stat; } platform_set_drvdata(pdev, cm); memcpy(&cm->charger_psy, &psy_default, sizeof(psy_default)); if (!desc->psy_name) { strncpy(cm->psy_name_buf, psy_default.name, PSY_NAME_MAX); } else { strncpy(cm->psy_name_buf, desc->psy_name, PSY_NAME_MAX); } cm->charger_psy.name = cm->psy_name_buf; /* Allocate for psy properties because they may vary */ cm->charger_psy.properties = kzalloc(sizeof(enum power_supply_property) * (ARRAY_SIZE(default_charger_props) + NUM_CHARGER_PSY_OPTIONAL), GFP_KERNEL); if (!cm->charger_psy.properties) { dev_err(&pdev->dev, "Cannot allocate for psy properties.\n"); ret = -ENOMEM; goto err_chg_stat; } memcpy(cm->charger_psy.properties, default_charger_props, sizeof(enum power_supply_property) * ARRAY_SIZE(default_charger_props)); cm->charger_psy.num_properties = psy_default.num_properties; /* Find which optional psy-properties are available */ if (!cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CHARGE_NOW, &val)) { cm->charger_psy.properties[cm->charger_psy.num_properties] = POWER_SUPPLY_PROP_CHARGE_NOW; cm->charger_psy.num_properties++; } if (!cm->fuel_gauge->get_property(cm->fuel_gauge, POWER_SUPPLY_PROP_CURRENT_NOW, &val)) { cm->charger_psy.properties[cm->charger_psy.num_properties] = POWER_SUPPLY_PROP_CURRENT_NOW; cm->charger_psy.num_properties++; } if (desc->measure_battery_temp) { cm->charger_psy.properties[cm->charger_psy.num_properties] = POWER_SUPPLY_PROP_TEMP; cm->charger_psy.num_properties++; } else { cm->charger_psy.properties[cm->charger_psy.num_properties] = POWER_SUPPLY_PROP_TEMP_AMBIENT; cm->charger_psy.num_properties++; } ret = power_supply_register(NULL, &cm->charger_psy); if (ret) { dev_err(&pdev->dev, "Cannot register charger-manager with" " name \"%s\".\n", cm->charger_psy.name); goto err_register; } ret = regulator_bulk_get(&pdev->dev, desc->num_charger_regulators, desc->charger_regulators); if (ret) { dev_err(&pdev->dev, "Cannot get charger regulators.\n"); goto err_bulk_get; } ret = try_charger_enable(cm, true); if (ret) { dev_err(&pdev->dev, "Cannot enable charger regulators\n"); goto err_chg_enable; } /* Add to the list */ mutex_lock(&cm_list_mtx); list_add(&cm->entry, &cm_list); mutex_unlock(&cm_list_mtx); return 0; err_chg_enable: regulator_bulk_free(desc->num_charger_regulators, desc->charger_regulators); err_bulk_get: power_supply_unregister(&cm->charger_psy); err_register: kfree(cm->charger_psy.properties); err_chg_stat: kfree(cm->charger_stat); err_no_charger_stat: err_no_charger: kfree(cm->desc); err_alloc_desc: kfree(cm); err_alloc: return ret; } static int __devexit charger_manager_remove(struct platform_device *pdev) { struct charger_manager *cm = platform_get_drvdata(pdev); struct charger_desc *desc = cm->desc; /* Remove from the list */ mutex_lock(&cm_list_mtx); list_del(&cm->entry); mutex_unlock(&cm_list_mtx); regulator_bulk_free(desc->num_charger_regulators, desc->charger_regulators); power_supply_unregister(&cm->charger_psy); kfree(cm->charger_psy.properties); kfree(cm->charger_stat); kfree(cm->desc); kfree(cm); return 0; } static const struct platform_device_id charger_manager_id[] = { { "charger-manager", 0 }, { }, }; MODULE_DEVICE_TABLE(platform, charger_manager_id); static int cm_suspend_prepare(struct device *dev) { struct charger_manager *cm = dev_get_drvdata(dev); if (!cm_suspended) { if (rtc_dev) { struct rtc_time tmp; unsigned long now; rtc_read_alarm(rtc_dev, &rtc_wkalarm_save); rtc_read_time(rtc_dev, &tmp); if (rtc_wkalarm_save.enabled) { rtc_tm_to_time(&rtc_wkalarm_save.time, &rtc_wkalarm_save_time); rtc_tm_to_time(&tmp, &now); if (now > rtc_wkalarm_save_time) rtc_wkalarm_save_time = 0; } else { rtc_wkalarm_save_time = 0; } } cm_suspended = true; } cm->status_save_ext_pwr_inserted = is_ext_pwr_online(cm); cm->status_save_batt = is_batt_present(cm); if (!cm_rtc_set) { cm_suspend_duration_ms = 0; cm_rtc_set = cm_setup_timer(); } return 0; } static void cm_suspend_complete(struct device *dev) { struct charger_manager *cm = dev_get_drvdata(dev); if (cm_suspended) { if (rtc_dev) { struct rtc_wkalrm tmp; rtc_read_alarm(rtc_dev, &tmp); rtc_wkalarm_save.pending = tmp.pending; rtc_set_alarm(rtc_dev, &rtc_wkalarm_save); } cm_suspended = false; cm_rtc_set = false; } uevent_notify(cm, NULL); } static const struct dev_pm_ops charger_manager_pm = { .prepare = cm_suspend_prepare, .complete = cm_suspend_complete, }; static struct platform_driver charger_manager_driver = { .driver = { .name = "charger-manager", .owner = THIS_MODULE, .pm = &charger_manager_pm, }, .probe = charger_manager_probe, .remove = __devexit_p(charger_manager_remove), .id_table = charger_manager_id, }; static int __init charger_manager_init(void) { return platform_driver_register(&charger_manager_driver); } late_initcall(charger_manager_init); static void __exit charger_manager_cleanup(void) { platform_driver_unregister(&charger_manager_driver); } module_exit(charger_manager_cleanup); MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>"); MODULE_DESCRIPTION("Charger Manager"); MODULE_LICENSE("GPL");
gpl-2.0
mrg666/android_kernel_shooter
arch/um/kernel/skas/uaccess.c
3870
5095
/* * Copyright (C) 2002 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/err.h> #include <linux/highmem.h> #include <linux/mm.h> #include <linux/sched.h> #include <asm/current.h> #include <asm/page.h> #include <asm/pgtable.h> #include "kern_util.h" #include "os.h" pte_t *virt_to_pte(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; if (mm == NULL) return NULL; pgd = pgd_offset(mm, addr); if (!pgd_present(*pgd)) return NULL; pud = pud_offset(pgd, addr); if (!pud_present(*pud)) return NULL; pmd = pmd_offset(pud, addr); if (!pmd_present(*pmd)) return NULL; return pte_offset_kernel(pmd, addr); } static pte_t *maybe_map(unsigned long virt, int is_write) { pte_t *pte = virt_to_pte(current->mm, virt); int err, dummy_code; if ((pte == NULL) || !pte_present(*pte) || (is_write && !pte_write(*pte))) { err = handle_page_fault(virt, 0, is_write, 1, &dummy_code); if (err) return NULL; pte = virt_to_pte(current->mm, virt); } if (!pte_present(*pte)) pte = NULL; return pte; } static int do_op_one_page(unsigned long addr, int len, int is_write, int (*op)(unsigned long addr, int len, void *arg), void *arg) { jmp_buf buf; struct page *page; pte_t *pte; int n, faulted; pte = maybe_map(addr, is_write); if (pte == NULL) return -1; page = pte_page(*pte); addr = (unsigned long) kmap_atomic(page, KM_UML_USERCOPY) + (addr & ~PAGE_MASK); current->thread.fault_catcher = &buf; faulted = UML_SETJMP(&buf); if (faulted == 0) n = (*op)(addr, len, arg); else n = -1; current->thread.fault_catcher = NULL; kunmap_atomic((void *)addr, KM_UML_USERCOPY); return n; } static int buffer_op(unsigned long addr, int len, int is_write, int (*op)(unsigned long, int, void *), void *arg) { int size, remain, n; size = min(PAGE_ALIGN(addr) - addr, (unsigned long) len); remain = len; n = do_op_one_page(addr, size, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } addr += size; remain -= size; if (remain == 0) goto out; while (addr < ((addr + remain) & PAGE_MASK)) { n = do_op_one_page(addr, PAGE_SIZE, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } addr += PAGE_SIZE; remain -= PAGE_SIZE; } if (remain == 0) goto out; n = do_op_one_page(addr, remain, is_write, op, arg); if (n != 0) { remain = (n < 0 ? remain : 0); goto out; } return 0; out: return remain; } static int copy_chunk_from_user(unsigned long from, int len, void *arg) { unsigned long *to_ptr = arg, to = *to_ptr; memcpy((void *) to, (void *) from, len); *to_ptr += len; return 0; } int copy_from_user(void *to, const void __user *from, int n) { if (segment_eq(get_fs(), KERNEL_DS)) { memcpy(to, (__force void*)from, n); return 0; } return access_ok(VERIFY_READ, from, n) ? buffer_op((unsigned long) from, n, 0, copy_chunk_from_user, &to): n; } static int copy_chunk_to_user(unsigned long to, int len, void *arg) { unsigned long *from_ptr = arg, from = *from_ptr; memcpy((void *) to, (void *) from, len); *from_ptr += len; return 0; } int copy_to_user(void __user *to, const void *from, int n) { if (segment_eq(get_fs(), KERNEL_DS)) { memcpy((__force void *) to, from, n); return 0; } return access_ok(VERIFY_WRITE, to, n) ? buffer_op((unsigned long) to, n, 1, copy_chunk_to_user, &from) : n; } static int strncpy_chunk_from_user(unsigned long from, int len, void *arg) { char **to_ptr = arg, *to = *to_ptr; int n; strncpy(to, (void *) from, len); n = strnlen(to, len); *to_ptr += n; if (n < len) return 1; return 0; } int strncpy_from_user(char *dst, const char __user *src, int count) { int n; char *ptr = dst; if (segment_eq(get_fs(), KERNEL_DS)) { strncpy(dst, (__force void *) src, count); return strnlen(dst, count); } if (!access_ok(VERIFY_READ, src, 1)) return -EFAULT; n = buffer_op((unsigned long) src, count, 0, strncpy_chunk_from_user, &ptr); if (n != 0) return -EFAULT; return strnlen(dst, count); } static int clear_chunk(unsigned long addr, int len, void *unused) { memset((void *) addr, 0, len); return 0; } int __clear_user(void __user *mem, int len) { return buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL); } int clear_user(void __user *mem, int len) { if (segment_eq(get_fs(), KERNEL_DS)) { memset((__force void*)mem, 0, len); return 0; } return access_ok(VERIFY_WRITE, mem, len) ? buffer_op((unsigned long) mem, len, 1, clear_chunk, NULL) : len; } static int strnlen_chunk(unsigned long str, int len, void *arg) { int *len_ptr = arg, n; n = strnlen((void *) str, len); *len_ptr += n; if (n < len) return 1; return 0; } int strnlen_user(const void __user *str, int len) { int count = 0, n; if (segment_eq(get_fs(), KERNEL_DS)) return strnlen((__force char*)str, len) + 1; n = buffer_op((unsigned long) str, len, 0, strnlen_chunk, &count); if (n == 0) return count + 1; return -EFAULT; }
gpl-2.0
placiano/NBKernel_NK4
drivers/rtc/rtc-wm8350.c
4382
11989
/* * Real Time Clock driver for Wolfson Microelectronics WM8350 * * Copyright (C) 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * linux@wolfsonmicro.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/time.h> #include <linux/rtc.h> #include <linux/bcd.h> #include <linux/interrupt.h> #include <linux/ioctl.h> #include <linux/completion.h> #include <linux/mfd/wm8350/rtc.h> #include <linux/mfd/wm8350/core.h> #include <linux/delay.h> #include <linux/platform_device.h> #define WM8350_SET_ALM_RETRIES 5 #define WM8350_SET_TIME_RETRIES 5 #define WM8350_GET_TIME_RETRIES 5 #define to_wm8350_from_rtc_dev(d) container_of(d, struct wm8350, rtc.pdev.dev) /* * Read current time and date in RTC */ static int wm8350_rtc_readtime(struct device *dev, struct rtc_time *tm) { struct wm8350 *wm8350 = dev_get_drvdata(dev); u16 time1[4], time2[4]; int retries = WM8350_GET_TIME_RETRIES, ret; /* * Read the time twice and compare. * If time1 == time2, then time is valid else retry. */ do { ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time1); if (ret < 0) return ret; ret = wm8350_block_read(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time2); if (ret < 0) return ret; if (memcmp(time1, time2, sizeof(time1)) == 0) { tm->tm_sec = time1[0] & WM8350_RTC_SECS_MASK; tm->tm_min = (time1[0] & WM8350_RTC_MINS_MASK) >> WM8350_RTC_MINS_SHIFT; tm->tm_hour = time1[1] & WM8350_RTC_HRS_MASK; tm->tm_wday = ((time1[1] >> WM8350_RTC_DAY_SHIFT) & 0x7) - 1; tm->tm_mon = ((time1[2] & WM8350_RTC_MTH_MASK) >> WM8350_RTC_MTH_SHIFT) - 1; tm->tm_mday = (time1[2] & WM8350_RTC_DATE_MASK); tm->tm_year = ((time1[3] & WM8350_RTC_YHUNDREDS_MASK) >> WM8350_RTC_YHUNDREDS_SHIFT) * 100; tm->tm_year += time1[3] & WM8350_RTC_YUNITS_MASK; tm->tm_yday = rtc_year_days(tm->tm_mday, tm->tm_mon, tm->tm_year); tm->tm_year -= 1900; dev_dbg(dev, "Read (%d left): %04x %04x %04x %04x\n", retries, time1[0], time1[1], time1[2], time1[3]); return 0; } } while (retries--); dev_err(dev, "timed out reading RTC time\n"); return -EIO; } /* * Set current time and date in RTC */ static int wm8350_rtc_settime(struct device *dev, struct rtc_time *tm) { struct wm8350 *wm8350 = dev_get_drvdata(dev); u16 time[4]; u16 rtc_ctrl; int ret, retries = WM8350_SET_TIME_RETRIES; time[0] = tm->tm_sec; time[0] |= tm->tm_min << WM8350_RTC_MINS_SHIFT; time[1] = tm->tm_hour; time[1] |= (tm->tm_wday + 1) << WM8350_RTC_DAY_SHIFT; time[2] = tm->tm_mday; time[2] |= (tm->tm_mon + 1) << WM8350_RTC_MTH_SHIFT; time[3] = ((tm->tm_year + 1900) / 100) << WM8350_RTC_YHUNDREDS_SHIFT; time[3] |= (tm->tm_year + 1900) % 100; dev_dbg(dev, "Setting: %04x %04x %04x %04x\n", time[0], time[1], time[2], time[3]); /* Set RTC_SET to stop the clock */ ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET); if (ret < 0) return ret; /* Wait until confirmation of stopping */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (--retries && !(rtc_ctrl & WM8350_RTC_STS)); if (!retries) { dev_err(dev, "timed out on set confirmation\n"); return -EIO; } /* Write time to RTC */ ret = wm8350_block_write(wm8350, WM8350_RTC_SECONDS_MINUTES, 4, time); if (ret < 0) return ret; /* Clear RTC_SET to start the clock */ ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET); return ret; } /* * Read alarm time and date in RTC */ static int wm8350_rtc_readalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct wm8350 *wm8350 = dev_get_drvdata(dev); struct rtc_time *tm = &alrm->time; u16 time[4]; int ret; ret = wm8350_block_read(wm8350, WM8350_ALARM_SECONDS_MINUTES, 4, time); if (ret < 0) return ret; tm->tm_sec = time[0] & WM8350_RTC_ALMSECS_MASK; if (tm->tm_sec == WM8350_RTC_ALMSECS_MASK) tm->tm_sec = -1; tm->tm_min = time[0] & WM8350_RTC_ALMMINS_MASK; if (tm->tm_min == WM8350_RTC_ALMMINS_MASK) tm->tm_min = -1; else tm->tm_min >>= WM8350_RTC_ALMMINS_SHIFT; tm->tm_hour = time[1] & WM8350_RTC_ALMHRS_MASK; if (tm->tm_hour == WM8350_RTC_ALMHRS_MASK) tm->tm_hour = -1; tm->tm_wday = ((time[1] >> WM8350_RTC_ALMDAY_SHIFT) & 0x7) - 1; if (tm->tm_wday > 7) tm->tm_wday = -1; tm->tm_mon = time[2] & WM8350_RTC_ALMMTH_MASK; if (tm->tm_mon == WM8350_RTC_ALMMTH_MASK) tm->tm_mon = -1; else tm->tm_mon = (tm->tm_mon >> WM8350_RTC_ALMMTH_SHIFT) - 1; tm->tm_mday = (time[2] & WM8350_RTC_ALMDATE_MASK); if (tm->tm_mday == WM8350_RTC_ALMDATE_MASK) tm->tm_mday = -1; tm->tm_year = -1; alrm->enabled = !(time[3] & WM8350_RTC_ALMSTS); return 0; } static int wm8350_rtc_stop_alarm(struct wm8350 *wm8350) { int retries = WM8350_SET_ALM_RETRIES; u16 rtc_ctrl; int ret; /* Set RTC_SET to stop the clock */ ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_ALMSET); if (ret < 0) return ret; /* Wait until confirmation of stopping */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (retries-- && !(rtc_ctrl & WM8350_RTC_ALMSTS)); if (!(rtc_ctrl & WM8350_RTC_ALMSTS)) return -ETIMEDOUT; return 0; } static int wm8350_rtc_start_alarm(struct wm8350 *wm8350) { int ret; int retries = WM8350_SET_ALM_RETRIES; u16 rtc_ctrl; ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_ALMSET); if (ret < 0) return ret; /* Wait until confirmation */ do { rtc_ctrl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); schedule_timeout_uninterruptible(msecs_to_jiffies(1)); } while (retries-- && rtc_ctrl & WM8350_RTC_ALMSTS); if (rtc_ctrl & WM8350_RTC_ALMSTS) return -ETIMEDOUT; return 0; } static int wm8350_rtc_alarm_irq_enable(struct device *dev, unsigned int enabled) { struct wm8350 *wm8350 = dev_get_drvdata(dev); if (enabled) return wm8350_rtc_start_alarm(wm8350); else return wm8350_rtc_stop_alarm(wm8350); } static int wm8350_rtc_setalarm(struct device *dev, struct rtc_wkalrm *alrm) { struct wm8350 *wm8350 = dev_get_drvdata(dev); struct rtc_time *tm = &alrm->time; u16 time[3]; int ret; memset(time, 0, sizeof(time)); if (tm->tm_sec != -1) time[0] |= tm->tm_sec; else time[0] |= WM8350_RTC_ALMSECS_MASK; if (tm->tm_min != -1) time[0] |= tm->tm_min << WM8350_RTC_ALMMINS_SHIFT; else time[0] |= WM8350_RTC_ALMMINS_MASK; if (tm->tm_hour != -1) time[1] |= tm->tm_hour; else time[1] |= WM8350_RTC_ALMHRS_MASK; if (tm->tm_wday != -1) time[1] |= (tm->tm_wday + 1) << WM8350_RTC_ALMDAY_SHIFT; else time[1] |= WM8350_RTC_ALMDAY_MASK; if (tm->tm_mday != -1) time[2] |= tm->tm_mday; else time[2] |= WM8350_RTC_ALMDATE_MASK; if (tm->tm_mon != -1) time[2] |= (tm->tm_mon + 1) << WM8350_RTC_ALMMTH_SHIFT; else time[2] |= WM8350_RTC_ALMMTH_MASK; ret = wm8350_rtc_stop_alarm(wm8350); if (ret < 0) return ret; /* Write time to RTC */ ret = wm8350_block_write(wm8350, WM8350_ALARM_SECONDS_MINUTES, 3, time); if (ret < 0) return ret; if (alrm->enabled) ret = wm8350_rtc_start_alarm(wm8350); return ret; } static irqreturn_t wm8350_rtc_alarm_handler(int irq, void *data) { struct wm8350 *wm8350 = data; struct rtc_device *rtc = wm8350->rtc.rtc; int ret; rtc_update_irq(rtc, 1, RTC_IRQF | RTC_AF); /* Make it one shot */ ret = wm8350_set_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_ALMSET); if (ret != 0) { dev_err(&(wm8350->rtc.pdev->dev), "Failed to disable alarm: %d\n", ret); } return IRQ_HANDLED; } static irqreturn_t wm8350_rtc_update_handler(int irq, void *data) { struct wm8350 *wm8350 = data; struct rtc_device *rtc = wm8350->rtc.rtc; rtc_update_irq(rtc, 1, RTC_IRQF | RTC_UF); return IRQ_HANDLED; } static const struct rtc_class_ops wm8350_rtc_ops = { .read_time = wm8350_rtc_readtime, .set_time = wm8350_rtc_settime, .read_alarm = wm8350_rtc_readalarm, .set_alarm = wm8350_rtc_setalarm, .alarm_irq_enable = wm8350_rtc_alarm_irq_enable, }; #ifdef CONFIG_PM_SLEEP static int wm8350_rtc_suspend(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev); int ret = 0; u16 reg; reg = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); if (device_may_wakeup(&wm8350->rtc.pdev->dev) && reg & WM8350_RTC_ALMSTS) { ret = wm8350_rtc_stop_alarm(wm8350); if (ret != 0) dev_err(&pdev->dev, "Failed to stop RTC alarm: %d\n", ret); } return ret; } static int wm8350_rtc_resume(struct device *dev) { struct platform_device *pdev = to_platform_device(dev); struct wm8350 *wm8350 = dev_get_drvdata(&pdev->dev); int ret; if (wm8350->rtc.alarm_enabled) { ret = wm8350_rtc_start_alarm(wm8350); if (ret != 0) dev_err(&pdev->dev, "Failed to restart RTC alarm: %d\n", ret); } return 0; } #endif static int wm8350_rtc_probe(struct platform_device *pdev) { struct wm8350 *wm8350 = platform_get_drvdata(pdev); struct wm8350_rtc *wm_rtc = &wm8350->rtc; int ret = 0; u16 timectl, power5; timectl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); if (timectl & WM8350_RTC_BCD) { dev_err(&pdev->dev, "RTC BCD mode not supported\n"); return -EINVAL; } if (timectl & WM8350_RTC_12HR) { dev_err(&pdev->dev, "RTC 12 hour mode not supported\n"); return -EINVAL; } /* enable the RTC if it's not already enabled */ power5 = wm8350_reg_read(wm8350, WM8350_POWER_MGMT_5); if (!(power5 & WM8350_RTC_TICK_ENA)) { dev_info(wm8350->dev, "Starting RTC\n"); wm8350_reg_unlock(wm8350); ret = wm8350_set_bits(wm8350, WM8350_POWER_MGMT_5, WM8350_RTC_TICK_ENA); if (ret < 0) { dev_err(&pdev->dev, "failed to enable RTC: %d\n", ret); return ret; } wm8350_reg_lock(wm8350); } if (timectl & WM8350_RTC_STS) { int retries; ret = wm8350_clear_bits(wm8350, WM8350_RTC_TIME_CONTROL, WM8350_RTC_SET); if (ret < 0) { dev_err(&pdev->dev, "failed to start: %d\n", ret); return ret; } retries = WM8350_SET_TIME_RETRIES; do { timectl = wm8350_reg_read(wm8350, WM8350_RTC_TIME_CONTROL); } while (timectl & WM8350_RTC_STS && --retries); if (retries == 0) { dev_err(&pdev->dev, "failed to start: timeout\n"); return -ENODEV; } } device_init_wakeup(&pdev->dev, 1); wm_rtc->rtc = devm_rtc_device_register(&pdev->dev, "wm8350", &wm8350_rtc_ops, THIS_MODULE); if (IS_ERR(wm_rtc->rtc)) { ret = PTR_ERR(wm_rtc->rtc); dev_err(&pdev->dev, "failed to register RTC: %d\n", ret); return ret; } wm8350_register_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350_rtc_update_handler, 0, "RTC Seconds", wm8350); wm8350_mask_irq(wm8350, WM8350_IRQ_RTC_SEC); wm8350_register_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350_rtc_alarm_handler, 0, "RTC Alarm", wm8350); return 0; } static int wm8350_rtc_remove(struct platform_device *pdev) { struct wm8350 *wm8350 = platform_get_drvdata(pdev); wm8350_free_irq(wm8350, WM8350_IRQ_RTC_SEC, wm8350); wm8350_free_irq(wm8350, WM8350_IRQ_RTC_ALM, wm8350); return 0; } static SIMPLE_DEV_PM_OPS(wm8350_rtc_pm_ops, wm8350_rtc_suspend, wm8350_rtc_resume); static struct platform_driver wm8350_rtc_driver = { .probe = wm8350_rtc_probe, .remove = wm8350_rtc_remove, .driver = { .name = "wm8350-rtc", .pm = &wm8350_rtc_pm_ops, }, }; module_platform_driver(wm8350_rtc_driver); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("RTC driver for the WM8350"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm8350-rtc");
gpl-2.0
VanirAOSP/kernel_motorola_msm8226
sound/pci/maestro3.c
4894
85862
/* * Driver for ESS Maestro3/Allegro (ES1988) soundcards. * Copyright (c) 2000 by Zach Brown <zab@zabbo.net> * Takashi Iwai <tiwai@suse.de> * * Most of the hardware init stuffs are based on maestro3 driver for * OSS/Free by Zach Brown. Many thanks to Zach! * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * * ChangeLog: * Aug. 27, 2001 * - Fixed deadlock on capture * - Added Canyon3D-2 support by Rob Riggs <rob@pangalactic.org> * */ #define CARD_NAME "ESS Maestro3/Allegro/Canyon3D-2" #define DRIVER_NAME "Maestro3" #include <asm/io.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/input.h> #include <sound/core.h> #include <sound/info.h> #include <sound/control.h> #include <sound/pcm.h> #include <sound/mpu401.h> #include <sound/ac97_codec.h> #include <sound/initval.h> #include <asm/byteorder.h> MODULE_AUTHOR("Zach Brown <zab@zabbo.net>, Takashi Iwai <tiwai@suse.de>"); MODULE_DESCRIPTION("ESS Maestro3 PCI"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESS,Maestro3 PCI}," "{ESS,ES1988}," "{ESS,Allegro PCI}," "{ESS,Allegro-1 PCI}," "{ESS,Canyon3D-2/LE PCI}}"); MODULE_FIRMWARE("ess/maestro3_assp_kernel.fw"); MODULE_FIRMWARE("ess/maestro3_assp_minisrc.fw"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; /* all enabled */ static bool external_amp[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = 1}; static int amp_gpio[SNDRV_CARDS] = {[0 ... (SNDRV_CARDS - 1)] = -1}; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable this soundcard."); module_param_array(external_amp, bool, NULL, 0444); MODULE_PARM_DESC(external_amp, "Enable external amp for " CARD_NAME " soundcard."); module_param_array(amp_gpio, int, NULL, 0444); MODULE_PARM_DESC(amp_gpio, "GPIO pin number for external amp. (default = -1)"); #define MAX_PLAYBACKS 2 #define MAX_CAPTURES 1 #define NR_DSPS (MAX_PLAYBACKS + MAX_CAPTURES) /* * maestro3 registers */ /* Allegro PCI configuration registers */ #define PCI_LEGACY_AUDIO_CTRL 0x40 #define SOUND_BLASTER_ENABLE 0x00000001 #define FM_SYNTHESIS_ENABLE 0x00000002 #define GAME_PORT_ENABLE 0x00000004 #define MPU401_IO_ENABLE 0x00000008 #define MPU401_IRQ_ENABLE 0x00000010 #define ALIAS_10BIT_IO 0x00000020 #define SB_DMA_MASK 0x000000C0 #define SB_DMA_0 0x00000040 #define SB_DMA_1 0x00000040 #define SB_DMA_R 0x00000080 #define SB_DMA_3 0x000000C0 #define SB_IRQ_MASK 0x00000700 #define SB_IRQ_5 0x00000000 #define SB_IRQ_7 0x00000100 #define SB_IRQ_9 0x00000200 #define SB_IRQ_10 0x00000300 #define MIDI_IRQ_MASK 0x00003800 #define SERIAL_IRQ_ENABLE 0x00004000 #define DISABLE_LEGACY 0x00008000 #define PCI_ALLEGRO_CONFIG 0x50 #define SB_ADDR_240 0x00000004 #define MPU_ADDR_MASK 0x00000018 #define MPU_ADDR_330 0x00000000 #define MPU_ADDR_300 0x00000008 #define MPU_ADDR_320 0x00000010 #define MPU_ADDR_340 0x00000018 #define USE_PCI_TIMING 0x00000040 #define POSTED_WRITE_ENABLE 0x00000080 #define DMA_POLICY_MASK 0x00000700 #define DMA_DDMA 0x00000000 #define DMA_TDMA 0x00000100 #define DMA_PCPCI 0x00000200 #define DMA_WBDMA16 0x00000400 #define DMA_WBDMA4 0x00000500 #define DMA_WBDMA2 0x00000600 #define DMA_WBDMA1 0x00000700 #define DMA_SAFE_GUARD 0x00000800 #define HI_PERF_GP_ENABLE 0x00001000 #define PIC_SNOOP_MODE_0 0x00002000 #define PIC_SNOOP_MODE_1 0x00004000 #define SOUNDBLASTER_IRQ_MASK 0x00008000 #define RING_IN_ENABLE 0x00010000 #define SPDIF_TEST_MODE 0x00020000 #define CLK_MULT_MODE_SELECT_2 0x00040000 #define EEPROM_WRITE_ENABLE 0x00080000 #define CODEC_DIR_IN 0x00100000 #define HV_BUTTON_FROM_GD 0x00200000 #define REDUCED_DEBOUNCE 0x00400000 #define HV_CTRL_ENABLE 0x00800000 #define SPDIF_ENABLE 0x01000000 #define CLK_DIV_SELECT 0x06000000 #define CLK_DIV_BY_48 0x00000000 #define CLK_DIV_BY_49 0x02000000 #define CLK_DIV_BY_50 0x04000000 #define CLK_DIV_RESERVED 0x06000000 #define PM_CTRL_ENABLE 0x08000000 #define CLK_MULT_MODE_SELECT 0x30000000 #define CLK_MULT_MODE_SHIFT 28 #define CLK_MULT_MODE_0 0x00000000 #define CLK_MULT_MODE_1 0x10000000 #define CLK_MULT_MODE_2 0x20000000 #define CLK_MULT_MODE_3 0x30000000 #define INT_CLK_SELECT 0x40000000 #define INT_CLK_MULT_RESET 0x80000000 /* M3 */ #define INT_CLK_SRC_NOT_PCI 0x00100000 #define INT_CLK_MULT_ENABLE 0x80000000 #define PCI_ACPI_CONTROL 0x54 #define PCI_ACPI_D0 0x00000000 #define PCI_ACPI_D1 0xB4F70000 #define PCI_ACPI_D2 0xB4F7B4F7 #define PCI_USER_CONFIG 0x58 #define EXT_PCI_MASTER_ENABLE 0x00000001 #define SPDIF_OUT_SELECT 0x00000002 #define TEST_PIN_DIR_CTRL 0x00000004 #define AC97_CODEC_TEST 0x00000020 #define TRI_STATE_BUFFER 0x00000080 #define IN_CLK_12MHZ_SELECT 0x00000100 #define MULTI_FUNC_DISABLE 0x00000200 #define EXT_MASTER_PAIR_SEL 0x00000400 #define PCI_MASTER_SUPPORT 0x00000800 #define STOP_CLOCK_ENABLE 0x00001000 #define EAPD_DRIVE_ENABLE 0x00002000 #define REQ_TRI_STATE_ENABLE 0x00004000 #define REQ_LOW_ENABLE 0x00008000 #define MIDI_1_ENABLE 0x00010000 #define MIDI_2_ENABLE 0x00020000 #define SB_AUDIO_SYNC 0x00040000 #define HV_CTRL_TEST 0x00100000 #define SOUNDBLASTER_TEST 0x00400000 #define PCI_USER_CONFIG_C 0x5C #define PCI_DDMA_CTRL 0x60 #define DDMA_ENABLE 0x00000001 /* Allegro registers */ #define HOST_INT_CTRL 0x18 #define SB_INT_ENABLE 0x0001 #define MPU401_INT_ENABLE 0x0002 #define ASSP_INT_ENABLE 0x0010 #define RING_INT_ENABLE 0x0020 #define HV_INT_ENABLE 0x0040 #define CLKRUN_GEN_ENABLE 0x0100 #define HV_CTRL_TO_PME 0x0400 #define SOFTWARE_RESET_ENABLE 0x8000 /* * should be using the above defines, probably. */ #define REGB_ENABLE_RESET 0x01 #define REGB_STOP_CLOCK 0x10 #define HOST_INT_STATUS 0x1A #define SB_INT_PENDING 0x01 #define MPU401_INT_PENDING 0x02 #define ASSP_INT_PENDING 0x10 #define RING_INT_PENDING 0x20 #define HV_INT_PENDING 0x40 #define HARDWARE_VOL_CTRL 0x1B #define SHADOW_MIX_REG_VOICE 0x1C #define HW_VOL_COUNTER_VOICE 0x1D #define SHADOW_MIX_REG_MASTER 0x1E #define HW_VOL_COUNTER_MASTER 0x1F #define CODEC_COMMAND 0x30 #define CODEC_READ_B 0x80 #define CODEC_STATUS 0x30 #define CODEC_BUSY_B 0x01 #define CODEC_DATA 0x32 #define RING_BUS_CTRL_A 0x36 #define RAC_PME_ENABLE 0x0100 #define RAC_SDFS_ENABLE 0x0200 #define LAC_PME_ENABLE 0x0400 #define LAC_SDFS_ENABLE 0x0800 #define SERIAL_AC_LINK_ENABLE 0x1000 #define IO_SRAM_ENABLE 0x2000 #define IIS_INPUT_ENABLE 0x8000 #define RING_BUS_CTRL_B 0x38 #define SECOND_CODEC_ID_MASK 0x0003 #define SPDIF_FUNC_ENABLE 0x0010 #define SECOND_AC_ENABLE 0x0020 #define SB_MODULE_INTF_ENABLE 0x0040 #define SSPE_ENABLE 0x0040 #define M3I_DOCK_ENABLE 0x0080 #define SDO_OUT_DEST_CTRL 0x3A #define COMMAND_ADDR_OUT 0x0003 #define PCM_LR_OUT_LOCAL 0x0000 #define PCM_LR_OUT_REMOTE 0x0004 #define PCM_LR_OUT_MUTE 0x0008 #define PCM_LR_OUT_BOTH 0x000C #define LINE1_DAC_OUT_LOCAL 0x0000 #define LINE1_DAC_OUT_REMOTE 0x0010 #define LINE1_DAC_OUT_MUTE 0x0020 #define LINE1_DAC_OUT_BOTH 0x0030 #define PCM_CLS_OUT_LOCAL 0x0000 #define PCM_CLS_OUT_REMOTE 0x0040 #define PCM_CLS_OUT_MUTE 0x0080 #define PCM_CLS_OUT_BOTH 0x00C0 #define PCM_RLF_OUT_LOCAL 0x0000 #define PCM_RLF_OUT_REMOTE 0x0100 #define PCM_RLF_OUT_MUTE 0x0200 #define PCM_RLF_OUT_BOTH 0x0300 #define LINE2_DAC_OUT_LOCAL 0x0000 #define LINE2_DAC_OUT_REMOTE 0x0400 #define LINE2_DAC_OUT_MUTE 0x0800 #define LINE2_DAC_OUT_BOTH 0x0C00 #define HANDSET_OUT_LOCAL 0x0000 #define HANDSET_OUT_REMOTE 0x1000 #define HANDSET_OUT_MUTE 0x2000 #define HANDSET_OUT_BOTH 0x3000 #define IO_CTRL_OUT_LOCAL 0x0000 #define IO_CTRL_OUT_REMOTE 0x4000 #define IO_CTRL_OUT_MUTE 0x8000 #define IO_CTRL_OUT_BOTH 0xC000 #define SDO_IN_DEST_CTRL 0x3C #define STATUS_ADDR_IN 0x0003 #define PCM_LR_IN_LOCAL 0x0000 #define PCM_LR_IN_REMOTE 0x0004 #define PCM_LR_RESERVED 0x0008 #define PCM_LR_IN_BOTH 0x000C #define LINE1_ADC_IN_LOCAL 0x0000 #define LINE1_ADC_IN_REMOTE 0x0010 #define LINE1_ADC_IN_MUTE 0x0020 #define MIC_ADC_IN_LOCAL 0x0000 #define MIC_ADC_IN_REMOTE 0x0040 #define MIC_ADC_IN_MUTE 0x0080 #define LINE2_DAC_IN_LOCAL 0x0000 #define LINE2_DAC_IN_REMOTE 0x0400 #define LINE2_DAC_IN_MUTE 0x0800 #define HANDSET_IN_LOCAL 0x0000 #define HANDSET_IN_REMOTE 0x1000 #define HANDSET_IN_MUTE 0x2000 #define IO_STATUS_IN_LOCAL 0x0000 #define IO_STATUS_IN_REMOTE 0x4000 #define SPDIF_IN_CTRL 0x3E #define SPDIF_IN_ENABLE 0x0001 #define GPIO_DATA 0x60 #define GPIO_DATA_MASK 0x0FFF #define GPIO_HV_STATUS 0x3000 #define GPIO_PME_STATUS 0x4000 #define GPIO_MASK 0x64 #define GPIO_DIRECTION 0x68 #define GPO_PRIMARY_AC97 0x0001 #define GPI_LINEOUT_SENSE 0x0004 #define GPO_SECONDARY_AC97 0x0008 #define GPI_VOL_DOWN 0x0010 #define GPI_VOL_UP 0x0020 #define GPI_IIS_CLK 0x0040 #define GPI_IIS_LRCLK 0x0080 #define GPI_IIS_DATA 0x0100 #define GPI_DOCKING_STATUS 0x0100 #define GPI_HEADPHONE_SENSE 0x0200 #define GPO_EXT_AMP_SHUTDOWN 0x1000 #define GPO_EXT_AMP_M3 1 /* default m3 amp */ #define GPO_EXT_AMP_ALLEGRO 8 /* default allegro amp */ /* M3 */ #define GPO_M3_EXT_AMP_SHUTDN 0x0002 #define ASSP_INDEX_PORT 0x80 #define ASSP_MEMORY_PORT 0x82 #define ASSP_DATA_PORT 0x84 #define MPU401_DATA_PORT 0x98 #define MPU401_STATUS_PORT 0x99 #define CLK_MULT_DATA_PORT 0x9C #define ASSP_CONTROL_A 0xA2 #define ASSP_0_WS_ENABLE 0x01 #define ASSP_CTRL_A_RESERVED1 0x02 #define ASSP_CTRL_A_RESERVED2 0x04 #define ASSP_CLK_49MHZ_SELECT 0x08 #define FAST_PLU_ENABLE 0x10 #define ASSP_CTRL_A_RESERVED3 0x20 #define DSP_CLK_36MHZ_SELECT 0x40 #define ASSP_CONTROL_B 0xA4 #define RESET_ASSP 0x00 #define RUN_ASSP 0x01 #define ENABLE_ASSP_CLOCK 0x00 #define STOP_ASSP_CLOCK 0x10 #define RESET_TOGGLE 0x40 #define ASSP_CONTROL_C 0xA6 #define ASSP_HOST_INT_ENABLE 0x01 #define FM_ADDR_REMAP_DISABLE 0x02 #define HOST_WRITE_PORT_ENABLE 0x08 #define ASSP_HOST_INT_STATUS 0xAC #define DSP2HOST_REQ_PIORECORD 0x01 #define DSP2HOST_REQ_I2SRATE 0x02 #define DSP2HOST_REQ_TIMER 0x04 /* AC97 registers */ /* XXX fix this crap up */ /*#define AC97_RESET 0x00*/ #define AC97_VOL_MUTE_B 0x8000 #define AC97_VOL_M 0x1F #define AC97_LEFT_VOL_S 8 #define AC97_MASTER_VOL 0x02 #define AC97_LINE_LEVEL_VOL 0x04 #define AC97_MASTER_MONO_VOL 0x06 #define AC97_PC_BEEP_VOL 0x0A #define AC97_PC_BEEP_VOL_M 0x0F #define AC97_SROUND_MASTER_VOL 0x38 #define AC97_PC_BEEP_VOL_S 1 /*#define AC97_PHONE_VOL 0x0C #define AC97_MIC_VOL 0x0E*/ #define AC97_MIC_20DB_ENABLE 0x40 /*#define AC97_LINEIN_VOL 0x10 #define AC97_CD_VOL 0x12 #define AC97_VIDEO_VOL 0x14 #define AC97_AUX_VOL 0x16*/ #define AC97_PCM_OUT_VOL 0x18 /*#define AC97_RECORD_SELECT 0x1A*/ #define AC97_RECORD_MIC 0x00 #define AC97_RECORD_CD 0x01 #define AC97_RECORD_VIDEO 0x02 #define AC97_RECORD_AUX 0x03 #define AC97_RECORD_MONO_MUX 0x02 #define AC97_RECORD_DIGITAL 0x03 #define AC97_RECORD_LINE 0x04 #define AC97_RECORD_STEREO 0x05 #define AC97_RECORD_MONO 0x06 #define AC97_RECORD_PHONE 0x07 /*#define AC97_RECORD_GAIN 0x1C*/ #define AC97_RECORD_VOL_M 0x0F /*#define AC97_GENERAL_PURPOSE 0x20*/ #define AC97_POWER_DOWN_CTRL 0x26 #define AC97_ADC_READY 0x0001 #define AC97_DAC_READY 0x0002 #define AC97_ANALOG_READY 0x0004 #define AC97_VREF_ON 0x0008 #define AC97_PR0 0x0100 #define AC97_PR1 0x0200 #define AC97_PR2 0x0400 #define AC97_PR3 0x0800 #define AC97_PR4 0x1000 #define AC97_RESERVED1 0x28 #define AC97_VENDOR_TEST 0x5A #define AC97_CLOCK_DELAY 0x5C #define AC97_LINEOUT_MUX_SEL 0x0001 #define AC97_MONO_MUX_SEL 0x0002 #define AC97_CLOCK_DELAY_SEL 0x1F #define AC97_DAC_CDS_SHIFT 6 #define AC97_ADC_CDS_SHIFT 11 #define AC97_MULTI_CHANNEL_SEL 0x74 /*#define AC97_VENDOR_ID1 0x7C #define AC97_VENDOR_ID2 0x7E*/ /* * ASSP control regs */ #define DSP_PORT_TIMER_COUNT 0x06 #define DSP_PORT_MEMORY_INDEX 0x80 #define DSP_PORT_MEMORY_TYPE 0x82 #define MEMTYPE_INTERNAL_CODE 0x0002 #define MEMTYPE_INTERNAL_DATA 0x0003 #define MEMTYPE_MASK 0x0003 #define DSP_PORT_MEMORY_DATA 0x84 #define DSP_PORT_CONTROL_REG_A 0xA2 #define DSP_PORT_CONTROL_REG_B 0xA4 #define DSP_PORT_CONTROL_REG_C 0xA6 #define REV_A_CODE_MEMORY_BEGIN 0x0000 #define REV_A_CODE_MEMORY_END 0x0FFF #define REV_A_CODE_MEMORY_UNIT_LENGTH 0x0040 #define REV_A_CODE_MEMORY_LENGTH (REV_A_CODE_MEMORY_END - REV_A_CODE_MEMORY_BEGIN + 1) #define REV_B_CODE_MEMORY_BEGIN 0x0000 #define REV_B_CODE_MEMORY_END 0x0BFF #define REV_B_CODE_MEMORY_UNIT_LENGTH 0x0040 #define REV_B_CODE_MEMORY_LENGTH (REV_B_CODE_MEMORY_END - REV_B_CODE_MEMORY_BEGIN + 1) #define REV_A_DATA_MEMORY_BEGIN 0x1000 #define REV_A_DATA_MEMORY_END 0x2FFF #define REV_A_DATA_MEMORY_UNIT_LENGTH 0x0080 #define REV_A_DATA_MEMORY_LENGTH (REV_A_DATA_MEMORY_END - REV_A_DATA_MEMORY_BEGIN + 1) #define REV_B_DATA_MEMORY_BEGIN 0x1000 #define REV_B_DATA_MEMORY_END 0x2BFF #define REV_B_DATA_MEMORY_UNIT_LENGTH 0x0080 #define REV_B_DATA_MEMORY_LENGTH (REV_B_DATA_MEMORY_END - REV_B_DATA_MEMORY_BEGIN + 1) #define NUM_UNITS_KERNEL_CODE 16 #define NUM_UNITS_KERNEL_DATA 2 #define NUM_UNITS_KERNEL_CODE_WITH_HSP 16 #define NUM_UNITS_KERNEL_DATA_WITH_HSP 5 /* * Kernel data layout */ #define DP_SHIFT_COUNT 7 #define KDATA_BASE_ADDR 0x1000 #define KDATA_BASE_ADDR2 0x1080 #define KDATA_TASK0 (KDATA_BASE_ADDR + 0x0000) #define KDATA_TASK1 (KDATA_BASE_ADDR + 0x0001) #define KDATA_TASK2 (KDATA_BASE_ADDR + 0x0002) #define KDATA_TASK3 (KDATA_BASE_ADDR + 0x0003) #define KDATA_TASK4 (KDATA_BASE_ADDR + 0x0004) #define KDATA_TASK5 (KDATA_BASE_ADDR + 0x0005) #define KDATA_TASK6 (KDATA_BASE_ADDR + 0x0006) #define KDATA_TASK7 (KDATA_BASE_ADDR + 0x0007) #define KDATA_TASK_ENDMARK (KDATA_BASE_ADDR + 0x0008) #define KDATA_CURRENT_TASK (KDATA_BASE_ADDR + 0x0009) #define KDATA_TASK_SWITCH (KDATA_BASE_ADDR + 0x000A) #define KDATA_INSTANCE0_POS3D (KDATA_BASE_ADDR + 0x000B) #define KDATA_INSTANCE1_POS3D (KDATA_BASE_ADDR + 0x000C) #define KDATA_INSTANCE2_POS3D (KDATA_BASE_ADDR + 0x000D) #define KDATA_INSTANCE3_POS3D (KDATA_BASE_ADDR + 0x000E) #define KDATA_INSTANCE4_POS3D (KDATA_BASE_ADDR + 0x000F) #define KDATA_INSTANCE5_POS3D (KDATA_BASE_ADDR + 0x0010) #define KDATA_INSTANCE6_POS3D (KDATA_BASE_ADDR + 0x0011) #define KDATA_INSTANCE7_POS3D (KDATA_BASE_ADDR + 0x0012) #define KDATA_INSTANCE8_POS3D (KDATA_BASE_ADDR + 0x0013) #define KDATA_INSTANCE_POS3D_ENDMARK (KDATA_BASE_ADDR + 0x0014) #define KDATA_INSTANCE0_SPKVIRT (KDATA_BASE_ADDR + 0x0015) #define KDATA_INSTANCE_SPKVIRT_ENDMARK (KDATA_BASE_ADDR + 0x0016) #define KDATA_INSTANCE0_SPDIF (KDATA_BASE_ADDR + 0x0017) #define KDATA_INSTANCE_SPDIF_ENDMARK (KDATA_BASE_ADDR + 0x0018) #define KDATA_INSTANCE0_MODEM (KDATA_BASE_ADDR + 0x0019) #define KDATA_INSTANCE_MODEM_ENDMARK (KDATA_BASE_ADDR + 0x001A) #define KDATA_INSTANCE0_SRC (KDATA_BASE_ADDR + 0x001B) #define KDATA_INSTANCE1_SRC (KDATA_BASE_ADDR + 0x001C) #define KDATA_INSTANCE_SRC_ENDMARK (KDATA_BASE_ADDR + 0x001D) #define KDATA_INSTANCE0_MINISRC (KDATA_BASE_ADDR + 0x001E) #define KDATA_INSTANCE1_MINISRC (KDATA_BASE_ADDR + 0x001F) #define KDATA_INSTANCE2_MINISRC (KDATA_BASE_ADDR + 0x0020) #define KDATA_INSTANCE3_MINISRC (KDATA_BASE_ADDR + 0x0021) #define KDATA_INSTANCE_MINISRC_ENDMARK (KDATA_BASE_ADDR + 0x0022) #define KDATA_INSTANCE0_CPYTHRU (KDATA_BASE_ADDR + 0x0023) #define KDATA_INSTANCE1_CPYTHRU (KDATA_BASE_ADDR + 0x0024) #define KDATA_INSTANCE_CPYTHRU_ENDMARK (KDATA_BASE_ADDR + 0x0025) #define KDATA_CURRENT_DMA (KDATA_BASE_ADDR + 0x0026) #define KDATA_DMA_SWITCH (KDATA_BASE_ADDR + 0x0027) #define KDATA_DMA_ACTIVE (KDATA_BASE_ADDR + 0x0028) #define KDATA_DMA_XFER0 (KDATA_BASE_ADDR + 0x0029) #define KDATA_DMA_XFER1 (KDATA_BASE_ADDR + 0x002A) #define KDATA_DMA_XFER2 (KDATA_BASE_ADDR + 0x002B) #define KDATA_DMA_XFER3 (KDATA_BASE_ADDR + 0x002C) #define KDATA_DMA_XFER4 (KDATA_BASE_ADDR + 0x002D) #define KDATA_DMA_XFER5 (KDATA_BASE_ADDR + 0x002E) #define KDATA_DMA_XFER6 (KDATA_BASE_ADDR + 0x002F) #define KDATA_DMA_XFER7 (KDATA_BASE_ADDR + 0x0030) #define KDATA_DMA_XFER8 (KDATA_BASE_ADDR + 0x0031) #define KDATA_DMA_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0032) #define KDATA_I2S_SAMPLE_COUNT (KDATA_BASE_ADDR + 0x0033) #define KDATA_I2S_INT_METER (KDATA_BASE_ADDR + 0x0034) #define KDATA_I2S_ACTIVE (KDATA_BASE_ADDR + 0x0035) #define KDATA_TIMER_COUNT_RELOAD (KDATA_BASE_ADDR + 0x0036) #define KDATA_TIMER_COUNT_CURRENT (KDATA_BASE_ADDR + 0x0037) #define KDATA_HALT_SYNCH_CLIENT (KDATA_BASE_ADDR + 0x0038) #define KDATA_HALT_SYNCH_DMA (KDATA_BASE_ADDR + 0x0039) #define KDATA_HALT_ACKNOWLEDGE (KDATA_BASE_ADDR + 0x003A) #define KDATA_ADC1_XFER0 (KDATA_BASE_ADDR + 0x003B) #define KDATA_ADC1_XFER_ENDMARK (KDATA_BASE_ADDR + 0x003C) #define KDATA_ADC1_LEFT_VOLUME (KDATA_BASE_ADDR + 0x003D) #define KDATA_ADC1_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x003E) #define KDATA_ADC1_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x003F) #define KDATA_ADC1_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0040) #define KDATA_ADC2_XFER0 (KDATA_BASE_ADDR + 0x0041) #define KDATA_ADC2_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0042) #define KDATA_ADC2_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0043) #define KDATA_ADC2_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x0044) #define KDATA_ADC2_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x0045) #define KDATA_ADC2_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x0046) #define KDATA_CD_XFER0 (KDATA_BASE_ADDR + 0x0047) #define KDATA_CD_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0048) #define KDATA_CD_LEFT_VOLUME (KDATA_BASE_ADDR + 0x0049) #define KDATA_CD_RIGHT_VOLUME (KDATA_BASE_ADDR + 0x004A) #define KDATA_CD_LEFT_SUR_VOL (KDATA_BASE_ADDR + 0x004B) #define KDATA_CD_RIGHT_SUR_VOL (KDATA_BASE_ADDR + 0x004C) #define KDATA_MIC_XFER0 (KDATA_BASE_ADDR + 0x004D) #define KDATA_MIC_XFER_ENDMARK (KDATA_BASE_ADDR + 0x004E) #define KDATA_MIC_VOLUME (KDATA_BASE_ADDR + 0x004F) #define KDATA_MIC_SUR_VOL (KDATA_BASE_ADDR + 0x0050) #define KDATA_I2S_XFER0 (KDATA_BASE_ADDR + 0x0051) #define KDATA_I2S_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0052) #define KDATA_CHI_XFER0 (KDATA_BASE_ADDR + 0x0053) #define KDATA_CHI_XFER_ENDMARK (KDATA_BASE_ADDR + 0x0054) #define KDATA_SPDIF_XFER (KDATA_BASE_ADDR + 0x0055) #define KDATA_SPDIF_CURRENT_FRAME (KDATA_BASE_ADDR + 0x0056) #define KDATA_SPDIF_FRAME0 (KDATA_BASE_ADDR + 0x0057) #define KDATA_SPDIF_FRAME1 (KDATA_BASE_ADDR + 0x0058) #define KDATA_SPDIF_FRAME2 (KDATA_BASE_ADDR + 0x0059) #define KDATA_SPDIF_REQUEST (KDATA_BASE_ADDR + 0x005A) #define KDATA_SPDIF_TEMP (KDATA_BASE_ADDR + 0x005B) #define KDATA_SPDIFIN_XFER0 (KDATA_BASE_ADDR + 0x005C) #define KDATA_SPDIFIN_XFER_ENDMARK (KDATA_BASE_ADDR + 0x005D) #define KDATA_SPDIFIN_INT_METER (KDATA_BASE_ADDR + 0x005E) #define KDATA_DSP_RESET_COUNT (KDATA_BASE_ADDR + 0x005F) #define KDATA_DEBUG_OUTPUT (KDATA_BASE_ADDR + 0x0060) #define KDATA_KERNEL_ISR_LIST (KDATA_BASE_ADDR + 0x0061) #define KDATA_KERNEL_ISR_CBSR1 (KDATA_BASE_ADDR + 0x0062) #define KDATA_KERNEL_ISR_CBER1 (KDATA_BASE_ADDR + 0x0063) #define KDATA_KERNEL_ISR_CBCR (KDATA_BASE_ADDR + 0x0064) #define KDATA_KERNEL_ISR_AR0 (KDATA_BASE_ADDR + 0x0065) #define KDATA_KERNEL_ISR_AR1 (KDATA_BASE_ADDR + 0x0066) #define KDATA_KERNEL_ISR_AR2 (KDATA_BASE_ADDR + 0x0067) #define KDATA_KERNEL_ISR_AR3 (KDATA_BASE_ADDR + 0x0068) #define KDATA_KERNEL_ISR_AR4 (KDATA_BASE_ADDR + 0x0069) #define KDATA_KERNEL_ISR_AR5 (KDATA_BASE_ADDR + 0x006A) #define KDATA_KERNEL_ISR_BRCR (KDATA_BASE_ADDR + 0x006B) #define KDATA_KERNEL_ISR_PASR (KDATA_BASE_ADDR + 0x006C) #define KDATA_KERNEL_ISR_PAER (KDATA_BASE_ADDR + 0x006D) #define KDATA_CLIENT_SCRATCH0 (KDATA_BASE_ADDR + 0x006E) #define KDATA_CLIENT_SCRATCH1 (KDATA_BASE_ADDR + 0x006F) #define KDATA_KERNEL_SCRATCH (KDATA_BASE_ADDR + 0x0070) #define KDATA_KERNEL_ISR_SCRATCH (KDATA_BASE_ADDR + 0x0071) #define KDATA_OUEUE_LEFT (KDATA_BASE_ADDR + 0x0072) #define KDATA_QUEUE_RIGHT (KDATA_BASE_ADDR + 0x0073) #define KDATA_ADC1_REQUEST (KDATA_BASE_ADDR + 0x0074) #define KDATA_ADC2_REQUEST (KDATA_BASE_ADDR + 0x0075) #define KDATA_CD_REQUEST (KDATA_BASE_ADDR + 0x0076) #define KDATA_MIC_REQUEST (KDATA_BASE_ADDR + 0x0077) #define KDATA_ADC1_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0078) #define KDATA_ADC2_MIXER_REQUEST (KDATA_BASE_ADDR + 0x0079) #define KDATA_CD_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007A) #define KDATA_MIC_MIXER_REQUEST (KDATA_BASE_ADDR + 0x007B) #define KDATA_MIC_SYNC_COUNTER (KDATA_BASE_ADDR + 0x007C) /* * second 'segment' (?) reserved for mixer * buffers.. */ #define KDATA_MIXER_WORD0 (KDATA_BASE_ADDR2 + 0x0000) #define KDATA_MIXER_WORD1 (KDATA_BASE_ADDR2 + 0x0001) #define KDATA_MIXER_WORD2 (KDATA_BASE_ADDR2 + 0x0002) #define KDATA_MIXER_WORD3 (KDATA_BASE_ADDR2 + 0x0003) #define KDATA_MIXER_WORD4 (KDATA_BASE_ADDR2 + 0x0004) #define KDATA_MIXER_WORD5 (KDATA_BASE_ADDR2 + 0x0005) #define KDATA_MIXER_WORD6 (KDATA_BASE_ADDR2 + 0x0006) #define KDATA_MIXER_WORD7 (KDATA_BASE_ADDR2 + 0x0007) #define KDATA_MIXER_WORD8 (KDATA_BASE_ADDR2 + 0x0008) #define KDATA_MIXER_WORD9 (KDATA_BASE_ADDR2 + 0x0009) #define KDATA_MIXER_WORDA (KDATA_BASE_ADDR2 + 0x000A) #define KDATA_MIXER_WORDB (KDATA_BASE_ADDR2 + 0x000B) #define KDATA_MIXER_WORDC (KDATA_BASE_ADDR2 + 0x000C) #define KDATA_MIXER_WORDD (KDATA_BASE_ADDR2 + 0x000D) #define KDATA_MIXER_WORDE (KDATA_BASE_ADDR2 + 0x000E) #define KDATA_MIXER_WORDF (KDATA_BASE_ADDR2 + 0x000F) #define KDATA_MIXER_XFER0 (KDATA_BASE_ADDR2 + 0x0010) #define KDATA_MIXER_XFER1 (KDATA_BASE_ADDR2 + 0x0011) #define KDATA_MIXER_XFER2 (KDATA_BASE_ADDR2 + 0x0012) #define KDATA_MIXER_XFER3 (KDATA_BASE_ADDR2 + 0x0013) #define KDATA_MIXER_XFER4 (KDATA_BASE_ADDR2 + 0x0014) #define KDATA_MIXER_XFER5 (KDATA_BASE_ADDR2 + 0x0015) #define KDATA_MIXER_XFER6 (KDATA_BASE_ADDR2 + 0x0016) #define KDATA_MIXER_XFER7 (KDATA_BASE_ADDR2 + 0x0017) #define KDATA_MIXER_XFER8 (KDATA_BASE_ADDR2 + 0x0018) #define KDATA_MIXER_XFER9 (KDATA_BASE_ADDR2 + 0x0019) #define KDATA_MIXER_XFER_ENDMARK (KDATA_BASE_ADDR2 + 0x001A) #define KDATA_MIXER_TASK_NUMBER (KDATA_BASE_ADDR2 + 0x001B) #define KDATA_CURRENT_MIXER (KDATA_BASE_ADDR2 + 0x001C) #define KDATA_MIXER_ACTIVE (KDATA_BASE_ADDR2 + 0x001D) #define KDATA_MIXER_BANK_STATUS (KDATA_BASE_ADDR2 + 0x001E) #define KDATA_DAC_LEFT_VOLUME (KDATA_BASE_ADDR2 + 0x001F) #define KDATA_DAC_RIGHT_VOLUME (KDATA_BASE_ADDR2 + 0x0020) #define MAX_INSTANCE_MINISRC (KDATA_INSTANCE_MINISRC_ENDMARK - KDATA_INSTANCE0_MINISRC) #define MAX_VIRTUAL_DMA_CHANNELS (KDATA_DMA_XFER_ENDMARK - KDATA_DMA_XFER0) #define MAX_VIRTUAL_MIXER_CHANNELS (KDATA_MIXER_XFER_ENDMARK - KDATA_MIXER_XFER0) #define MAX_VIRTUAL_ADC1_CHANNELS (KDATA_ADC1_XFER_ENDMARK - KDATA_ADC1_XFER0) /* * client data area offsets */ #define CDATA_INSTANCE_READY 0x00 #define CDATA_HOST_SRC_ADDRL 0x01 #define CDATA_HOST_SRC_ADDRH 0x02 #define CDATA_HOST_SRC_END_PLUS_1L 0x03 #define CDATA_HOST_SRC_END_PLUS_1H 0x04 #define CDATA_HOST_SRC_CURRENTL 0x05 #define CDATA_HOST_SRC_CURRENTH 0x06 #define CDATA_IN_BUF_CONNECT 0x07 #define CDATA_OUT_BUF_CONNECT 0x08 #define CDATA_IN_BUF_BEGIN 0x09 #define CDATA_IN_BUF_END_PLUS_1 0x0A #define CDATA_IN_BUF_HEAD 0x0B #define CDATA_IN_BUF_TAIL 0x0C #define CDATA_OUT_BUF_BEGIN 0x0D #define CDATA_OUT_BUF_END_PLUS_1 0x0E #define CDATA_OUT_BUF_HEAD 0x0F #define CDATA_OUT_BUF_TAIL 0x10 #define CDATA_DMA_CONTROL 0x11 #define CDATA_RESERVED 0x12 #define CDATA_FREQUENCY 0x13 #define CDATA_LEFT_VOLUME 0x14 #define CDATA_RIGHT_VOLUME 0x15 #define CDATA_LEFT_SUR_VOL 0x16 #define CDATA_RIGHT_SUR_VOL 0x17 #define CDATA_HEADER_LEN 0x18 #define SRC3_DIRECTION_OFFSET CDATA_HEADER_LEN #define SRC3_MODE_OFFSET (CDATA_HEADER_LEN + 1) #define SRC3_WORD_LENGTH_OFFSET (CDATA_HEADER_LEN + 2) #define SRC3_PARAMETER_OFFSET (CDATA_HEADER_LEN + 3) #define SRC3_COEFF_ADDR_OFFSET (CDATA_HEADER_LEN + 8) #define SRC3_FILTAP_ADDR_OFFSET (CDATA_HEADER_LEN + 10) #define SRC3_TEMP_INBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 16) #define SRC3_TEMP_OUTBUF_ADDR_OFFSET (CDATA_HEADER_LEN + 17) #define MINISRC_IN_BUFFER_SIZE ( 0x50 * 2 ) #define MINISRC_OUT_BUFFER_SIZE ( 0x50 * 2 * 2) #define MINISRC_TMP_BUFFER_SIZE ( 112 + ( MINISRC_BIQUAD_STAGE * 3 + 4 ) * 2 * 2 ) #define MINISRC_BIQUAD_STAGE 2 #define MINISRC_COEF_LOC 0x175 #define DMACONTROL_BLOCK_MASK 0x000F #define DMAC_BLOCK0_SELECTOR 0x0000 #define DMAC_BLOCK1_SELECTOR 0x0001 #define DMAC_BLOCK2_SELECTOR 0x0002 #define DMAC_BLOCK3_SELECTOR 0x0003 #define DMAC_BLOCK4_SELECTOR 0x0004 #define DMAC_BLOCK5_SELECTOR 0x0005 #define DMAC_BLOCK6_SELECTOR 0x0006 #define DMAC_BLOCK7_SELECTOR 0x0007 #define DMAC_BLOCK8_SELECTOR 0x0008 #define DMAC_BLOCK9_SELECTOR 0x0009 #define DMAC_BLOCKA_SELECTOR 0x000A #define DMAC_BLOCKB_SELECTOR 0x000B #define DMAC_BLOCKC_SELECTOR 0x000C #define DMAC_BLOCKD_SELECTOR 0x000D #define DMAC_BLOCKE_SELECTOR 0x000E #define DMAC_BLOCKF_SELECTOR 0x000F #define DMACONTROL_PAGE_MASK 0x00F0 #define DMAC_PAGE0_SELECTOR 0x0030 #define DMAC_PAGE1_SELECTOR 0x0020 #define DMAC_PAGE2_SELECTOR 0x0010 #define DMAC_PAGE3_SELECTOR 0x0000 #define DMACONTROL_AUTOREPEAT 0x1000 #define DMACONTROL_STOPPED 0x2000 #define DMACONTROL_DIRECTION 0x0100 /* * an arbitrary volume we set the internal * volume settings to so that the ac97 volume * range is a little less insane. 0x7fff is * max. */ #define ARB_VOLUME ( 0x6800 ) /* */ struct m3_list { int curlen; int mem_addr; int max; }; struct m3_dma { int number; struct snd_pcm_substream *substream; struct assp_instance { unsigned short code, data; } inst; int running; int opened; unsigned long buffer_addr; int dma_size; int period_size; unsigned int hwptr; int count; int index[3]; struct m3_list *index_list[3]; int in_lists; struct list_head list; }; struct snd_m3 { struct snd_card *card; unsigned long iobase; int irq; unsigned int allegro_flag : 1; struct snd_ac97 *ac97; struct snd_pcm *pcm; struct pci_dev *pci; int dacs_active; int timer_users; struct m3_list msrc_list; struct m3_list mixer_list; struct m3_list adc1_list; struct m3_list dma_list; /* for storing reset state..*/ u8 reset_state; int external_amp; int amp_gpio; /* gpio pin # for external amp, -1 = default */ unsigned int hv_config; /* hardware-volume config bits */ unsigned irda_workaround :1; /* avoid to touch 0x10 on GPIO_DIRECTION (e.g. for IrDA on Dell Inspirons) */ unsigned is_omnibook :1; /* Do HP OmniBook GPIO magic? */ /* midi */ struct snd_rawmidi *rmidi; /* pcm streams */ int num_substreams; struct m3_dma *substreams; spinlock_t reg_lock; #ifdef CONFIG_SND_MAESTRO3_INPUT struct input_dev *input_dev; char phys[64]; /* physical device path */ #else struct snd_kcontrol *master_switch; struct snd_kcontrol *master_volume; #endif struct work_struct hwvol_work; unsigned int in_suspend; #ifdef CONFIG_PM u16 *suspend_mem; #endif const struct firmware *assp_kernel_image; const struct firmware *assp_minisrc_image; }; /* * pci ids */ static DEFINE_PCI_DEVICE_TABLE(snd_m3_ids) = { {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO_1, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_ALLEGRO, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2LE, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_CANYON3D_2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_1, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_HW, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {PCI_VENDOR_ID_ESS, PCI_DEVICE_ID_ESS_MAESTRO3_2, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_MULTIMEDIA_AUDIO << 8, 0xffff00, 0}, {0,}, }; MODULE_DEVICE_TABLE(pci, snd_m3_ids); static struct snd_pci_quirk m3_amp_quirk_list[] __devinitdata = { SND_PCI_QUIRK(0x0E11, 0x0094, "Compaq Evo N600c", 0x0c), SND_PCI_QUIRK(0x10f7, 0x833e, "Panasonic CF-28", 0x0d), SND_PCI_QUIRK(0x10f7, 0x833d, "Panasonic CF-72", 0x0d), SND_PCI_QUIRK(0x1033, 0x80f1, "NEC LM800J/7", 0x03), SND_PCI_QUIRK(0x1509, 0x1740, "LEGEND ZhaoYang 3100CF", 0x03), { } /* END */ }; static struct snd_pci_quirk m3_irda_quirk_list[] __devinitdata = { SND_PCI_QUIRK(0x1028, 0x00b0, "Dell Inspiron 4000", 1), SND_PCI_QUIRK(0x1028, 0x00a4, "Dell Inspiron 8000", 1), SND_PCI_QUIRK(0x1028, 0x00e6, "Dell Inspiron 8100", 1), { } /* END */ }; /* hardware volume quirks */ static struct snd_pci_quirk m3_hv_quirk_list[] __devinitdata = { /* Allegro chips */ SND_PCI_QUIRK(0x0E11, 0x002E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0x0094, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0xB112, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x0E11, 0xB114, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x0012, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x0018, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001C, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001D, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x103C, 0x001E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x107B, 0x3350, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x8338, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833C, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833D, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833E, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x10F7, 0x833F, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x1018, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x1019, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x13BD, 0x101A, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F03, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F04, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x14FF, 0x0F05, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB400, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB795, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xB797, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x156D, 0xC700, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD), SND_PCI_QUIRK(0x1033, 0x80F1, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x103C, 0x001A, NULL, /* HP OmniBook 6100 */ HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x107B, 0x340A, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x107B, 0x3450, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x109F, 0x3134, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x109F, 0x3161, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3280, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3281, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC002, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC003, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1509, 0x1740, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1610, 0x0010, NULL, HV_CTRL_ENABLE | HV_BUTTON_FROM_GD | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x1042, 0x1042, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x107B, 0x9500, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x14FF, 0x0F06, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x1558, 0x8586, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x161F, 0x2011, NULL, HV_CTRL_ENABLE), /* Maestro3 chips */ SND_PCI_QUIRK(0x103C, 0x000E, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x0010, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x0011, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x103C, 0x001B, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x104D, 0x80A6, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x104D, 0x80AA, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x107B, 0x5300, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x110A, 0x1998, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x1015, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x101C, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x13BD, 0x1802, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x1599, 0x0715, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x5643, 0x5643, NULL, HV_CTRL_ENABLE), SND_PCI_QUIRK(0x144D, 0x3260, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0x3261, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC000, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), SND_PCI_QUIRK(0x144D, 0xC001, NULL, HV_CTRL_ENABLE | REDUCED_DEBOUNCE), { } /* END */ }; /* HP Omnibook quirks */ static struct snd_pci_quirk m3_omnibook_quirk_list[] __devinitdata = { SND_PCI_QUIRK_ID(0x103c, 0x0010), /* HP OmniBook 6000 */ SND_PCI_QUIRK_ID(0x103c, 0x0011), /* HP OmniBook 500 */ { } /* END */ }; /* * lowlevel functions */ static inline void snd_m3_outw(struct snd_m3 *chip, u16 value, unsigned long reg) { outw(value, chip->iobase + reg); } static inline u16 snd_m3_inw(struct snd_m3 *chip, unsigned long reg) { return inw(chip->iobase + reg); } static inline void snd_m3_outb(struct snd_m3 *chip, u8 value, unsigned long reg) { outb(value, chip->iobase + reg); } static inline u8 snd_m3_inb(struct snd_m3 *chip, unsigned long reg) { return inb(chip->iobase + reg); } /* * access 16bit words to the code or data regions of the dsp's memory. * index addresses 16bit words. */ static u16 snd_m3_assp_read(struct snd_m3 *chip, u16 region, u16 index) { snd_m3_outw(chip, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE); snd_m3_outw(chip, index, DSP_PORT_MEMORY_INDEX); return snd_m3_inw(chip, DSP_PORT_MEMORY_DATA); } static void snd_m3_assp_write(struct snd_m3 *chip, u16 region, u16 index, u16 data) { snd_m3_outw(chip, region & MEMTYPE_MASK, DSP_PORT_MEMORY_TYPE); snd_m3_outw(chip, index, DSP_PORT_MEMORY_INDEX); snd_m3_outw(chip, data, DSP_PORT_MEMORY_DATA); } static void snd_m3_assp_halt(struct snd_m3 *chip) { chip->reset_state = snd_m3_inb(chip, DSP_PORT_CONTROL_REG_B) & ~REGB_STOP_CLOCK; msleep(10); snd_m3_outb(chip, chip->reset_state & ~REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B); } static void snd_m3_assp_continue(struct snd_m3 *chip) { snd_m3_outb(chip, chip->reset_state | REGB_ENABLE_RESET, DSP_PORT_CONTROL_REG_B); } /* * This makes me sad. the maestro3 has lists * internally that must be packed.. 0 terminates, * apparently, or maybe all unused entries have * to be 0, the lists have static lengths set * by the binary code images. */ static int snd_m3_add_list(struct snd_m3 *chip, struct m3_list *list, u16 val) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + list->curlen, val); return list->curlen++; } static void snd_m3_remove_list(struct snd_m3 *chip, struct m3_list *list, int index) { u16 val; int lastindex = list->curlen - 1; if (index != lastindex) { val = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + lastindex); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + index, val); } snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, list->mem_addr + lastindex, 0); list->curlen--; } static void snd_m3_inc_timer_users(struct snd_m3 *chip) { chip->timer_users++; if (chip->timer_users != 1) return; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_RELOAD, 240); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_CURRENT, 240); snd_m3_outw(chip, snd_m3_inw(chip, HOST_INT_CTRL) | CLKRUN_GEN_ENABLE, HOST_INT_CTRL); } static void snd_m3_dec_timer_users(struct snd_m3 *chip) { chip->timer_users--; if (chip->timer_users > 0) return; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_RELOAD, 0); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TIMER_COUNT_CURRENT, 0); snd_m3_outw(chip, snd_m3_inw(chip, HOST_INT_CTRL) & ~CLKRUN_GEN_ENABLE, HOST_INT_CTRL); } /* * start/stop */ /* spinlock held! */ static int snd_m3_pcm_start(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { if (! s || ! subs) return -EINVAL; snd_m3_inc_timer_users(chip); switch (subs->stream) { case SNDRV_PCM_STREAM_PLAYBACK: chip->dacs_active++; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 1); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER, chip->dacs_active); break; case SNDRV_PCM_STREAM_CAPTURE: snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_ADC1_REQUEST, 1); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 1); break; } return 0; } /* spinlock held! */ static int snd_m3_pcm_stop(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { if (! s || ! subs) return -EINVAL; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_INSTANCE_READY, 0); snd_m3_dec_timer_users(chip); switch (subs->stream) { case SNDRV_PCM_STREAM_PLAYBACK: chip->dacs_active--; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER, chip->dacs_active); break; case SNDRV_PCM_STREAM_CAPTURE: snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_ADC1_REQUEST, 0); break; } return 0; } static int snd_m3_pcm_trigger(struct snd_pcm_substream *subs, int cmd) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct m3_dma *s = subs->runtime->private_data; int err = -EINVAL; if (snd_BUG_ON(!s)) return -ENXIO; spin_lock(&chip->reg_lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: if (s->running) err = -EBUSY; else { s->running = 1; err = snd_m3_pcm_start(chip, s, subs); } break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: if (! s->running) err = 0; /* should return error? */ else { s->running = 0; err = snd_m3_pcm_stop(chip, s, subs); } break; } spin_unlock(&chip->reg_lock); return err; } /* * setup */ static void snd_m3_pcm_setup1(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { int dsp_in_size, dsp_out_size, dsp_in_buffer, dsp_out_buffer; struct snd_pcm_runtime *runtime = subs->runtime; if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) { dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x20 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x20 * 2); } else { dsp_in_size = MINISRC_IN_BUFFER_SIZE - (0x10 * 2); dsp_out_size = MINISRC_OUT_BUFFER_SIZE - (0x10 * 2); } dsp_in_buffer = s->inst.data + (MINISRC_TMP_BUFFER_SIZE / 2); dsp_out_buffer = dsp_in_buffer + (dsp_in_size / 2) + 1; s->dma_size = frames_to_bytes(runtime, runtime->buffer_size); s->period_size = frames_to_bytes(runtime, runtime->period_size); s->hwptr = 0; s->count = 0; #define LO(x) ((x) & 0xffff) #define HI(x) LO((x) >> 16) /* host dma buffer pointers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_ADDRL, LO(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_ADDRH, HI(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_END_PLUS_1L, LO(s->buffer_addr + s->dma_size)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_END_PLUS_1H, HI(s->buffer_addr + s->dma_size)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTL, LO(s->buffer_addr)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH, HI(s->buffer_addr)); #undef LO #undef HI /* dsp buffers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_BEGIN, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_END_PLUS_1, dsp_in_buffer + (dsp_in_size / 2)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_HEAD, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_IN_BUF_TAIL, dsp_in_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_BEGIN, dsp_out_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_END_PLUS_1, dsp_out_buffer + (dsp_out_size / 2)); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_HEAD, dsp_out_buffer); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_OUT_BUF_TAIL, dsp_out_buffer); } static void snd_m3_pcm_setup2(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_runtime *runtime) { u32 freq; /* * put us in the lists if we're not already there */ if (! s->in_lists) { s->index[0] = snd_m3_add_list(chip, s->index_list[0], s->inst.data >> DP_SHIFT_COUNT); s->index[1] = snd_m3_add_list(chip, s->index_list[1], s->inst.data >> DP_SHIFT_COUNT); s->index[2] = snd_m3_add_list(chip, s->index_list[2], s->inst.data >> DP_SHIFT_COUNT); s->in_lists = 1; } /* write to 'mono' word */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 1, runtime->channels == 2 ? 0 : 1); /* write to '8bit' word */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 2, snd_pcm_format_width(runtime->format) == 16 ? 0 : 1); /* set up dac/adc rate */ freq = ((runtime->rate << 15) + 24000 ) / 48000; if (freq) freq--; snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_FREQUENCY, freq); } static const struct play_vals { u16 addr, val; } pv[] = { {CDATA_LEFT_VOLUME, ARB_VOLUME}, {CDATA_RIGHT_VOLUME, ARB_VOLUME}, {SRC3_DIRECTION_OFFSET, 0} , /* +1, +2 are stereo/16 bit */ {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */ {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */ {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */ {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */ {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */ {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */ {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */ {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */ {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */ {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */ {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */ {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */ {SRC3_DIRECTION_OFFSET + 16, 8}, /* numin */ {SRC3_DIRECTION_OFFSET + 17, 50*2}, /* numout */ {SRC3_DIRECTION_OFFSET + 18, MINISRC_BIQUAD_STAGE - 1}, /* numstage */ {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */ {SRC3_DIRECTION_OFFSET + 21, 0} /* booster */ }; /* the mode passed should be already shifted and masked */ static void snd_m3_playback_setup(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { unsigned int i; /* * some per client initializers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 12, s->inst.data + 40 + 8); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 19, s->inst.code + MINISRC_COEF_LOC); /* enable or disable low pass filter? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 22, subs->runtime->rate > 45000 ? 0xff : 0); /* tell it which way dma is going? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_DMA_CONTROL, DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* * set an armload of static initializers */ for (i = 0; i < ARRAY_SIZE(pv); i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + pv[i].addr, pv[i].val); } /* * Native record driver */ static const struct rec_vals { u16 addr, val; } rv[] = { {CDATA_LEFT_VOLUME, ARB_VOLUME}, {CDATA_RIGHT_VOLUME, ARB_VOLUME}, {SRC3_DIRECTION_OFFSET, 1} , /* +1, +2 are stereo/16 bit */ {SRC3_DIRECTION_OFFSET + 3, 0x0000}, /* fraction? */ {SRC3_DIRECTION_OFFSET + 4, 0}, /* first l */ {SRC3_DIRECTION_OFFSET + 5, 0}, /* first r */ {SRC3_DIRECTION_OFFSET + 6, 0}, /* second l */ {SRC3_DIRECTION_OFFSET + 7, 0}, /* second r */ {SRC3_DIRECTION_OFFSET + 8, 0}, /* delta l */ {SRC3_DIRECTION_OFFSET + 9, 0}, /* delta r */ {SRC3_DIRECTION_OFFSET + 10, 0x8000}, /* round */ {SRC3_DIRECTION_OFFSET + 11, 0xFF00}, /* higher bute mark */ {SRC3_DIRECTION_OFFSET + 13, 0}, /* temp0 */ {SRC3_DIRECTION_OFFSET + 14, 0}, /* c fraction */ {SRC3_DIRECTION_OFFSET + 15, 0}, /* counter */ {SRC3_DIRECTION_OFFSET + 16, 50},/* numin */ {SRC3_DIRECTION_OFFSET + 17, 8}, /* numout */ {SRC3_DIRECTION_OFFSET + 18, 0}, /* numstage */ {SRC3_DIRECTION_OFFSET + 19, 0}, /* coef */ {SRC3_DIRECTION_OFFSET + 20, 0}, /* filtertap */ {SRC3_DIRECTION_OFFSET + 21, 0}, /* booster */ {SRC3_DIRECTION_OFFSET + 22, 0xff} /* skip lpf */ }; static void snd_m3_capture_setup(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { unsigned int i; /* * some per client initializers */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + SRC3_DIRECTION_OFFSET + 12, s->inst.data + 40 + 8); /* tell it which way dma is going? */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_DMA_CONTROL, DMACONTROL_DIRECTION + DMACONTROL_AUTOREPEAT + DMAC_PAGE3_SELECTOR + DMAC_BLOCKF_SELECTOR); /* * set an armload of static initializers */ for (i = 0; i < ARRAY_SIZE(rv); i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + rv[i].addr, rv[i].val); } static int snd_m3_pcm_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct m3_dma *s = substream->runtime->private_data; int err; if ((err = snd_pcm_lib_malloc_pages(substream, params_buffer_bytes(hw_params))) < 0) return err; /* set buffer address */ s->buffer_addr = substream->runtime->dma_addr; if (s->buffer_addr & 0x3) { snd_printk(KERN_ERR "oh my, not aligned\n"); s->buffer_addr = s->buffer_addr & ~0x3; } return 0; } static int snd_m3_pcm_hw_free(struct snd_pcm_substream *substream) { struct m3_dma *s; if (substream->runtime->private_data == NULL) return 0; s = substream->runtime->private_data; snd_pcm_lib_free_pages(substream); s->buffer_addr = 0; return 0; } static int snd_m3_pcm_prepare(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; struct m3_dma *s = runtime->private_data; if (snd_BUG_ON(!s)) return -ENXIO; if (runtime->format != SNDRV_PCM_FORMAT_U8 && runtime->format != SNDRV_PCM_FORMAT_S16_LE) return -EINVAL; if (runtime->rate > 48000 || runtime->rate < 8000) return -EINVAL; spin_lock_irq(&chip->reg_lock); snd_m3_pcm_setup1(chip, s, subs); if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) snd_m3_playback_setup(chip, s, subs); else snd_m3_capture_setup(chip, s, subs); snd_m3_pcm_setup2(chip, s, runtime); spin_unlock_irq(&chip->reg_lock); return 0; } /* * get current pointer */ static unsigned int snd_m3_get_pointer(struct snd_m3 *chip, struct m3_dma *s, struct snd_pcm_substream *subs) { u16 hi = 0, lo = 0; int retry = 10; u32 addr; /* * try and get a valid answer */ while (retry--) { hi = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH); lo = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTL); if (hi == snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, s->inst.data + CDATA_HOST_SRC_CURRENTH)) break; } addr = lo | ((u32)hi<<16); return (unsigned int)(addr - s->buffer_addr); } static snd_pcm_uframes_t snd_m3_pcm_pointer(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); unsigned int ptr; struct m3_dma *s = subs->runtime->private_data; if (snd_BUG_ON(!s)) return 0; spin_lock(&chip->reg_lock); ptr = snd_m3_get_pointer(chip, s, subs); spin_unlock(&chip->reg_lock); return bytes_to_frames(subs->runtime, ptr); } /* update pointer */ /* spinlock held! */ static void snd_m3_update_ptr(struct snd_m3 *chip, struct m3_dma *s) { struct snd_pcm_substream *subs = s->substream; unsigned int hwptr; int diff; if (! s->running) return; hwptr = snd_m3_get_pointer(chip, s, subs); /* try to avoid expensive modulo divisions */ if (hwptr >= s->dma_size) hwptr %= s->dma_size; diff = s->dma_size + hwptr - s->hwptr; if (diff >= s->dma_size) diff %= s->dma_size; s->hwptr = hwptr; s->count += diff; if (s->count >= (signed)s->period_size) { if (s->count < 2 * (signed)s->period_size) s->count -= (signed)s->period_size; else s->count %= s->period_size; spin_unlock(&chip->reg_lock); snd_pcm_period_elapsed(subs); spin_lock(&chip->reg_lock); } } /* The m3's hardware volume works by incrementing / decrementing 2 counters (without wrap around) in response to volume button presses and then generating an interrupt. The pair of counters is stored in bits 1-3 and 5-7 of a byte wide register. The meaning of bits 0 and 4 is unknown. */ static void snd_m3_update_hw_volume(struct work_struct *work) { struct snd_m3 *chip = container_of(work, struct snd_m3, hwvol_work); int x, val; /* Figure out which volume control button was pushed, based on differences from the default register values. */ x = inb(chip->iobase + SHADOW_MIX_REG_VOICE) & 0xee; /* Reset the volume counters to 4. Tests on the allegro integrated into a Compaq N600C laptop, have revealed that: 1) Writing any value will result in the 2 counters being reset to 4 so writing 0x88 is not strictly necessary 2) Writing to any of the 4 involved registers will reset all 4 of them (and reading them always returns the same value for all of them) It could be that a maestro deviates from this, so leave the code as is. */ outb(0x88, chip->iobase + SHADOW_MIX_REG_VOICE); outb(0x88, chip->iobase + HW_VOL_COUNTER_VOICE); outb(0x88, chip->iobase + SHADOW_MIX_REG_MASTER); outb(0x88, chip->iobase + HW_VOL_COUNTER_MASTER); /* Ignore spurious HV interrupts during suspend / resume, this avoids mistaking them for a mute button press. */ if (chip->in_suspend) return; #ifndef CONFIG_SND_MAESTRO3_INPUT if (!chip->master_switch || !chip->master_volume) return; val = snd_ac97_read(chip->ac97, AC97_MASTER); switch (x) { case 0x88: /* The counters have not changed, yet we've received a HV interrupt. According to tests run by various people this happens when pressing the mute button. */ val ^= 0x8000; break; case 0xaa: /* counters increased by 1 -> volume up */ if ((val & 0x7f) > 0) val--; if ((val & 0x7f00) > 0) val -= 0x0100; break; case 0x66: /* counters decreased by 1 -> volume down */ if ((val & 0x7f) < 0x1f) val++; if ((val & 0x7f00) < 0x1f00) val += 0x0100; break; } if (snd_ac97_update(chip->ac97, AC97_MASTER, val)) snd_ctl_notify(chip->card, SNDRV_CTL_EVENT_MASK_VALUE, &chip->master_switch->id); #else if (!chip->input_dev) return; val = 0; switch (x) { case 0x88: /* The counters have not changed, yet we've received a HV interrupt. According to tests run by various people this happens when pressing the mute button. */ val = KEY_MUTE; break; case 0xaa: /* counters increased by 1 -> volume up */ val = KEY_VOLUMEUP; break; case 0x66: /* counters decreased by 1 -> volume down */ val = KEY_VOLUMEDOWN; break; } if (val) { input_report_key(chip->input_dev, val, 1); input_sync(chip->input_dev); input_report_key(chip->input_dev, val, 0); input_sync(chip->input_dev); } #endif } static irqreturn_t snd_m3_interrupt(int irq, void *dev_id) { struct snd_m3 *chip = dev_id; u8 status; int i; status = inb(chip->iobase + HOST_INT_STATUS); if (status == 0xff) return IRQ_NONE; if (status & HV_INT_PENDING) schedule_work(&chip->hwvol_work); /* * ack an assp int if its running * and has an int pending */ if (status & ASSP_INT_PENDING) { u8 ctl = inb(chip->iobase + ASSP_CONTROL_B); if (!(ctl & STOP_ASSP_CLOCK)) { ctl = inb(chip->iobase + ASSP_HOST_INT_STATUS); if (ctl & DSP2HOST_REQ_TIMER) { outb(DSP2HOST_REQ_TIMER, chip->iobase + ASSP_HOST_INT_STATUS); /* update adc/dac info if it was a timer int */ spin_lock(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { struct m3_dma *s = &chip->substreams[i]; if (s->running) snd_m3_update_ptr(chip, s); } spin_unlock(&chip->reg_lock); } } } #if 0 /* TODO: not supported yet */ if ((status & MPU401_INT_PENDING) && chip->rmidi) snd_mpu401_uart_interrupt(irq, chip->rmidi->private_data, regs); #endif /* ack ints */ outb(status, chip->iobase + HOST_INT_STATUS); return IRQ_HANDLED; } /* */ static struct snd_pcm_hardware snd_m3_playback = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | /*SNDRV_PCM_INFO_PAUSE |*/ SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (512*1024), .period_bytes_min = 64, .period_bytes_max = (512*1024), .periods_min = 1, .periods_max = 1024, }; static struct snd_pcm_hardware snd_m3_capture = { .info = (SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_BLOCK_TRANSFER | /*SNDRV_PCM_INFO_PAUSE |*/ SNDRV_PCM_INFO_RESUME), .formats = SNDRV_PCM_FMTBIT_U8 | SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_CONTINUOUS | SNDRV_PCM_RATE_8000_48000, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .buffer_bytes_max = (512*1024), .period_bytes_min = 64, .period_bytes_max = (512*1024), .periods_min = 1, .periods_max = 1024, }; /* */ static int snd_m3_substream_open(struct snd_m3 *chip, struct snd_pcm_substream *subs) { int i; struct m3_dma *s; spin_lock_irq(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { s = &chip->substreams[i]; if (! s->opened) goto __found; } spin_unlock_irq(&chip->reg_lock); return -ENOMEM; __found: s->opened = 1; s->running = 0; spin_unlock_irq(&chip->reg_lock); subs->runtime->private_data = s; s->substream = subs; /* set list owners */ if (subs->stream == SNDRV_PCM_STREAM_PLAYBACK) { s->index_list[0] = &chip->mixer_list; } else s->index_list[0] = &chip->adc1_list; s->index_list[1] = &chip->msrc_list; s->index_list[2] = &chip->dma_list; return 0; } static void snd_m3_substream_close(struct snd_m3 *chip, struct snd_pcm_substream *subs) { struct m3_dma *s = subs->runtime->private_data; if (s == NULL) return; /* not opened properly */ spin_lock_irq(&chip->reg_lock); if (s->substream && s->running) snd_m3_pcm_stop(chip, s, s->substream); /* does this happen? */ if (s->in_lists) { snd_m3_remove_list(chip, s->index_list[0], s->index[0]); snd_m3_remove_list(chip, s->index_list[1], s->index[1]); snd_m3_remove_list(chip, s->index_list[2], s->index[2]); s->in_lists = 0; } s->running = 0; s->opened = 0; spin_unlock_irq(&chip->reg_lock); } static int snd_m3_playback_open(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; int err; if ((err = snd_m3_substream_open(chip, subs)) < 0) return err; runtime->hw = snd_m3_playback; return 0; } static int snd_m3_playback_close(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); snd_m3_substream_close(chip, subs); return 0; } static int snd_m3_capture_open(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); struct snd_pcm_runtime *runtime = subs->runtime; int err; if ((err = snd_m3_substream_open(chip, subs)) < 0) return err; runtime->hw = snd_m3_capture; return 0; } static int snd_m3_capture_close(struct snd_pcm_substream *subs) { struct snd_m3 *chip = snd_pcm_substream_chip(subs); snd_m3_substream_close(chip, subs); return 0; } /* * create pcm instance */ static struct snd_pcm_ops snd_m3_playback_ops = { .open = snd_m3_playback_open, .close = snd_m3_playback_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_m3_pcm_hw_params, .hw_free = snd_m3_pcm_hw_free, .prepare = snd_m3_pcm_prepare, .trigger = snd_m3_pcm_trigger, .pointer = snd_m3_pcm_pointer, }; static struct snd_pcm_ops snd_m3_capture_ops = { .open = snd_m3_capture_open, .close = snd_m3_capture_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_m3_pcm_hw_params, .hw_free = snd_m3_pcm_hw_free, .prepare = snd_m3_pcm_prepare, .trigger = snd_m3_pcm_trigger, .pointer = snd_m3_pcm_pointer, }; static int __devinit snd_m3_pcm(struct snd_m3 * chip, int device) { struct snd_pcm *pcm; int err; err = snd_pcm_new(chip->card, chip->card->driver, device, MAX_PLAYBACKS, MAX_CAPTURES, &pcm); if (err < 0) return err; snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_PLAYBACK, &snd_m3_playback_ops); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_m3_capture_ops); pcm->private_data = chip; pcm->info_flags = 0; strcpy(pcm->name, chip->card->driver); chip->pcm = pcm; snd_pcm_lib_preallocate_pages_for_all(pcm, SNDRV_DMA_TYPE_DEV, snd_dma_pci_data(chip->pci), 64*1024, 64*1024); return 0; } /* * ac97 interface */ /* * Wait for the ac97 serial bus to be free. * return nonzero if the bus is still busy. */ static int snd_m3_ac97_wait(struct snd_m3 *chip) { int i = 10000; do { if (! (snd_m3_inb(chip, 0x30) & 1)) return 0; cpu_relax(); } while (i-- > 0); snd_printk(KERN_ERR "ac97 serial bus busy\n"); return 1; } static unsigned short snd_m3_ac97_read(struct snd_ac97 *ac97, unsigned short reg) { struct snd_m3 *chip = ac97->private_data; unsigned short data = 0xffff; if (snd_m3_ac97_wait(chip)) goto fail; snd_m3_outb(chip, 0x80 | (reg & 0x7f), CODEC_COMMAND); if (snd_m3_ac97_wait(chip)) goto fail; data = snd_m3_inw(chip, CODEC_DATA); fail: return data; } static void snd_m3_ac97_write(struct snd_ac97 *ac97, unsigned short reg, unsigned short val) { struct snd_m3 *chip = ac97->private_data; if (snd_m3_ac97_wait(chip)) return; snd_m3_outw(chip, val, CODEC_DATA); snd_m3_outb(chip, reg & 0x7f, CODEC_COMMAND); } static void snd_m3_remote_codec_config(int io, int isremote) { isremote = isremote ? 1 : 0; outw((inw(io + RING_BUS_CTRL_B) & ~SECOND_CODEC_ID_MASK) | isremote, io + RING_BUS_CTRL_B); outw((inw(io + SDO_OUT_DEST_CTRL) & ~COMMAND_ADDR_OUT) | isremote, io + SDO_OUT_DEST_CTRL); outw((inw(io + SDO_IN_DEST_CTRL) & ~STATUS_ADDR_IN) | isremote, io + SDO_IN_DEST_CTRL); } /* * hack, returns non zero on err */ static int snd_m3_try_read_vendor(struct snd_m3 *chip) { u16 ret; if (snd_m3_ac97_wait(chip)) return 1; snd_m3_outb(chip, 0x80 | (AC97_VENDOR_ID1 & 0x7f), 0x30); if (snd_m3_ac97_wait(chip)) return 1; ret = snd_m3_inw(chip, 0x32); return (ret == 0) || (ret == 0xffff); } static void snd_m3_ac97_reset(struct snd_m3 *chip) { u16 dir; int delay1 = 0, delay2 = 0, i; int io = chip->iobase; if (chip->allegro_flag) { /* * the onboard codec on the allegro seems * to want to wait a very long time before * coming back to life */ delay1 = 50; delay2 = 800; } else { /* maestro3 */ delay1 = 20; delay2 = 500; } for (i = 0; i < 5; i++) { dir = inw(io + GPIO_DIRECTION); if (!chip->irda_workaround) dir |= 0x10; /* assuming pci bus master? */ snd_m3_remote_codec_config(io, 0); outw(IO_SRAM_ENABLE, io + RING_BUS_CTRL_A); udelay(20); outw(dir & ~GPO_PRIMARY_AC97 , io + GPIO_DIRECTION); outw(~GPO_PRIMARY_AC97 , io + GPIO_MASK); outw(0, io + GPIO_DATA); outw(dir | GPO_PRIMARY_AC97, io + GPIO_DIRECTION); schedule_timeout_uninterruptible(msecs_to_jiffies(delay1)); outw(GPO_PRIMARY_AC97, io + GPIO_DATA); udelay(5); /* ok, bring back the ac-link */ outw(IO_SRAM_ENABLE | SERIAL_AC_LINK_ENABLE, io + RING_BUS_CTRL_A); outw(~0, io + GPIO_MASK); schedule_timeout_uninterruptible(msecs_to_jiffies(delay2)); if (! snd_m3_try_read_vendor(chip)) break; delay1 += 10; delay2 += 100; snd_printd("maestro3: retrying codec reset with delays of %d and %d ms\n", delay1, delay2); } #if 0 /* more gung-ho reset that doesn't * seem to work anywhere :) */ tmp = inw(io + RING_BUS_CTRL_A); outw(RAC_SDFS_ENABLE|LAC_SDFS_ENABLE, io + RING_BUS_CTRL_A); msleep(20); outw(tmp, io + RING_BUS_CTRL_A); msleep(50); #endif } static int __devinit snd_m3_mixer(struct snd_m3 *chip) { struct snd_ac97_bus *pbus; struct snd_ac97_template ac97; #ifndef CONFIG_SND_MAESTRO3_INPUT struct snd_ctl_elem_id elem_id; #endif int err; static struct snd_ac97_bus_ops ops = { .write = snd_m3_ac97_write, .read = snd_m3_ac97_read, }; if ((err = snd_ac97_bus(chip->card, 0, &ops, NULL, &pbus)) < 0) return err; memset(&ac97, 0, sizeof(ac97)); ac97.private_data = chip; if ((err = snd_ac97_mixer(pbus, &ac97, &chip->ac97)) < 0) return err; /* seems ac97 PCM needs initialization.. hack hack.. */ snd_ac97_write(chip->ac97, AC97_PCM, 0x8000 | (15 << 8) | 15); schedule_timeout_uninterruptible(msecs_to_jiffies(100)); snd_ac97_write(chip->ac97, AC97_PCM, 0); #ifndef CONFIG_SND_MAESTRO3_INPUT memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, "Master Playback Switch"); chip->master_switch = snd_ctl_find_id(chip->card, &elem_id); memset(&elem_id, 0, sizeof(elem_id)); elem_id.iface = SNDRV_CTL_ELEM_IFACE_MIXER; strcpy(elem_id.name, "Master Playback Volume"); chip->master_volume = snd_ctl_find_id(chip->card, &elem_id); #endif return 0; } /* * initialize ASSP */ #define MINISRC_LPF_LEN 10 static const u16 minisrc_lpf[MINISRC_LPF_LEN] = { 0X0743, 0X1104, 0X0A4C, 0XF88D, 0X242C, 0X1023, 0X1AA9, 0X0B60, 0XEFDD, 0X186F }; static void snd_m3_assp_init(struct snd_m3 *chip) { unsigned int i; const u16 *data; /* zero kernel data */ for (i = 0; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_BASE_ADDR + i, 0); /* zero mixer data? */ for (i = 0; i < (REV_B_DATA_MEMORY_UNIT_LENGTH * NUM_UNITS_KERNEL_DATA) / 2; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_BASE_ADDR2 + i, 0); /* init dma pointer */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_CURRENT_DMA, KDATA_DMA_XFER0); /* write kernel into code memory.. */ data = (const u16 *)chip->assp_kernel_image->data; for (i = 0 ; i * 2 < chip->assp_kernel_image->size; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, REV_B_CODE_MEMORY_BEGIN + i, le16_to_cpu(data[i])); } /* * We only have this one client and we know that 0x400 * is free in our kernel's mem map, so lets just * drop it there. It seems that the minisrc doesn't * need vectors, so we won't bother with them.. */ data = (const u16 *)chip->assp_minisrc_image->data; for (i = 0; i * 2 < chip->assp_minisrc_image->size; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + i, le16_to_cpu(data[i])); } /* * write the coefficients for the low pass filter? */ for (i = 0; i < MINISRC_LPF_LEN ; i++) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + MINISRC_COEF_LOC + i, minisrc_lpf[i]); } snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, 0x400 + MINISRC_COEF_LOC + MINISRC_LPF_LEN, 0x8000); /* * the minisrc is the only thing on * our task list.. */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_TASK0, 0x400); /* * init the mixer number.. */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_MIXER_TASK_NUMBER,0); /* * EXTREME KERNEL MASTER VOLUME */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DAC_LEFT_VOLUME, ARB_VOLUME); snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DAC_RIGHT_VOLUME, ARB_VOLUME); chip->mixer_list.curlen = 0; chip->mixer_list.mem_addr = KDATA_MIXER_XFER0; chip->mixer_list.max = MAX_VIRTUAL_MIXER_CHANNELS; chip->adc1_list.curlen = 0; chip->adc1_list.mem_addr = KDATA_ADC1_XFER0; chip->adc1_list.max = MAX_VIRTUAL_ADC1_CHANNELS; chip->dma_list.curlen = 0; chip->dma_list.mem_addr = KDATA_DMA_XFER0; chip->dma_list.max = MAX_VIRTUAL_DMA_CHANNELS; chip->msrc_list.curlen = 0; chip->msrc_list.mem_addr = KDATA_INSTANCE0_MINISRC; chip->msrc_list.max = MAX_INSTANCE_MINISRC; } static int __devinit snd_m3_assp_client_init(struct snd_m3 *chip, struct m3_dma *s, int index) { int data_bytes = 2 * ( MINISRC_TMP_BUFFER_SIZE / 2 + MINISRC_IN_BUFFER_SIZE / 2 + 1 + MINISRC_OUT_BUFFER_SIZE / 2 + 1 ); int address, i; /* * the revb memory map has 0x1100 through 0x1c00 * free. */ /* * align instance address to 256 bytes so that its * shifted list address is aligned. * list address = (mem address >> 1) >> 7; */ data_bytes = ALIGN(data_bytes, 256); address = 0x1100 + ((data_bytes/2) * index); if ((address + (data_bytes/2)) >= 0x1c00) { snd_printk(KERN_ERR "no memory for %d bytes at ind %d (addr 0x%x)\n", data_bytes, index, address); return -ENOMEM; } s->number = index; s->inst.code = 0x400; s->inst.data = address; for (i = data_bytes / 2; i > 0; address++, i--) { snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, address, 0); } return 0; } /* * this works for the reference board, have to find * out about others * * this needs more magic for 4 speaker, but.. */ static void snd_m3_amp_enable(struct snd_m3 *chip, int enable) { int io = chip->iobase; u16 gpo, polarity; if (! chip->external_amp) return; polarity = enable ? 0 : 1; polarity = polarity << chip->amp_gpio; gpo = 1 << chip->amp_gpio; outw(~gpo, io + GPIO_MASK); outw(inw(io + GPIO_DIRECTION) | gpo, io + GPIO_DIRECTION); outw((GPO_SECONDARY_AC97 | GPO_PRIMARY_AC97 | polarity), io + GPIO_DATA); outw(0xffff, io + GPIO_MASK); } static void snd_m3_hv_init(struct snd_m3 *chip) { unsigned long io = chip->iobase; u16 val = GPI_VOL_DOWN | GPI_VOL_UP; if (!chip->is_omnibook) return; /* * Volume buttons on some HP OmniBook laptops * require some GPIO magic to work correctly. */ outw(0xffff, io + GPIO_MASK); outw(0x0000, io + GPIO_DATA); outw(~val, io + GPIO_MASK); outw(inw(io + GPIO_DIRECTION) & ~val, io + GPIO_DIRECTION); outw(val, io + GPIO_MASK); outw(0xffff, io + GPIO_MASK); } static int snd_m3_chip_init(struct snd_m3 *chip) { struct pci_dev *pcidev = chip->pci; unsigned long io = chip->iobase; u32 n; u16 w; u8 t; /* makes as much sense as 'n', no? */ pci_read_config_word(pcidev, PCI_LEGACY_AUDIO_CTRL, &w); w &= ~(SOUND_BLASTER_ENABLE|FM_SYNTHESIS_ENABLE| MPU401_IO_ENABLE|MPU401_IRQ_ENABLE|ALIAS_10BIT_IO| DISABLE_LEGACY); pci_write_config_word(pcidev, PCI_LEGACY_AUDIO_CTRL, w); pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n &= ~(HV_CTRL_ENABLE | REDUCED_DEBOUNCE | HV_BUTTON_FROM_GD); n |= chip->hv_config; /* For some reason we must always use reduced debounce. */ n |= REDUCED_DEBOUNCE; n |= PM_CTRL_ENABLE | CLK_DIV_BY_49 | USE_PCI_TIMING; pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); outb(RESET_ASSP, chip->iobase + ASSP_CONTROL_B); pci_read_config_dword(pcidev, PCI_ALLEGRO_CONFIG, &n); n &= ~INT_CLK_SELECT; if (!chip->allegro_flag) { n &= ~INT_CLK_MULT_ENABLE; n |= INT_CLK_SRC_NOT_PCI; } n &= ~( CLK_MULT_MODE_SELECT | CLK_MULT_MODE_SELECT_2 ); pci_write_config_dword(pcidev, PCI_ALLEGRO_CONFIG, n); if (chip->allegro_flag) { pci_read_config_dword(pcidev, PCI_USER_CONFIG, &n); n |= IN_CLK_12MHZ_SELECT; pci_write_config_dword(pcidev, PCI_USER_CONFIG, n); } t = inb(chip->iobase + ASSP_CONTROL_A); t &= ~( DSP_CLK_36MHZ_SELECT | ASSP_CLK_49MHZ_SELECT); t |= ASSP_CLK_49MHZ_SELECT; t |= ASSP_0_WS_ENABLE; outb(t, chip->iobase + ASSP_CONTROL_A); snd_m3_assp_init(chip); /* download DSP code before starting ASSP below */ outb(RUN_ASSP, chip->iobase + ASSP_CONTROL_B); outb(0x00, io + HARDWARE_VOL_CTRL); outb(0x88, io + SHADOW_MIX_REG_VOICE); outb(0x88, io + HW_VOL_COUNTER_VOICE); outb(0x88, io + SHADOW_MIX_REG_MASTER); outb(0x88, io + HW_VOL_COUNTER_MASTER); return 0; } static void snd_m3_enable_ints(struct snd_m3 *chip) { unsigned long io = chip->iobase; unsigned short val; /* TODO: MPU401 not supported yet */ val = ASSP_INT_ENABLE /*| MPU401_INT_ENABLE*/; if (chip->hv_config & HV_CTRL_ENABLE) val |= HV_INT_ENABLE; outb(val, chip->iobase + HOST_INT_STATUS); outw(val, io + HOST_INT_CTRL); outb(inb(io + ASSP_CONTROL_C) | ASSP_HOST_INT_ENABLE, io + ASSP_CONTROL_C); } /* */ static int snd_m3_free(struct snd_m3 *chip) { struct m3_dma *s; int i; cancel_work_sync(&chip->hwvol_work); #ifdef CONFIG_SND_MAESTRO3_INPUT if (chip->input_dev) input_unregister_device(chip->input_dev); #endif if (chip->substreams) { spin_lock_irq(&chip->reg_lock); for (i = 0; i < chip->num_substreams; i++) { s = &chip->substreams[i]; /* check surviving pcms; this should not happen though.. */ if (s->substream && s->running) snd_m3_pcm_stop(chip, s, s->substream); } spin_unlock_irq(&chip->reg_lock); kfree(chip->substreams); } if (chip->iobase) { outw(0, chip->iobase + HOST_INT_CTRL); /* disable ints */ } #ifdef CONFIG_PM vfree(chip->suspend_mem); #endif if (chip->irq >= 0) free_irq(chip->irq, chip); if (chip->iobase) pci_release_regions(chip->pci); release_firmware(chip->assp_kernel_image); release_firmware(chip->assp_minisrc_image); pci_disable_device(chip->pci); kfree(chip); return 0; } /* * APM support */ #ifdef CONFIG_PM static int m3_suspend(struct pci_dev *pci, pm_message_t state) { struct snd_card *card = pci_get_drvdata(pci); struct snd_m3 *chip = card->private_data; int i, dsp_index; if (chip->suspend_mem == NULL) return 0; chip->in_suspend = 1; cancel_work_sync(&chip->hwvol_work); snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_ac97_suspend(chip->ac97); msleep(10); /* give the assp a chance to idle.. */ snd_m3_assp_halt(chip); /* save dsp image */ dsp_index = 0; for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) chip->suspend_mem[dsp_index++] = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_CODE, i); for (i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++) chip->suspend_mem[dsp_index++] = snd_m3_assp_read(chip, MEMTYPE_INTERNAL_DATA, i); pci_disable_device(pci); pci_save_state(pci); pci_set_power_state(pci, pci_choose_state(pci, state)); return 0; } static int m3_resume(struct pci_dev *pci) { struct snd_card *card = pci_get_drvdata(pci); struct snd_m3 *chip = card->private_data; int i, dsp_index; if (chip->suspend_mem == NULL) return 0; pci_set_power_state(pci, PCI_D0); pci_restore_state(pci); if (pci_enable_device(pci) < 0) { printk(KERN_ERR "maestor3: pci_enable_device failed, " "disabling device\n"); snd_card_disconnect(card); return -EIO; } pci_set_master(pci); /* first lets just bring everything back. .*/ snd_m3_outw(chip, 0, 0x54); snd_m3_outw(chip, 0, 0x56); snd_m3_chip_init(chip); snd_m3_assp_halt(chip); snd_m3_ac97_reset(chip); /* restore dsp image */ dsp_index = 0; for (i = REV_B_CODE_MEMORY_BEGIN; i <= REV_B_CODE_MEMORY_END; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_CODE, i, chip->suspend_mem[dsp_index++]); for (i = REV_B_DATA_MEMORY_BEGIN ; i <= REV_B_DATA_MEMORY_END; i++) snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, i, chip->suspend_mem[dsp_index++]); /* tell the dma engine to restart itself */ snd_m3_assp_write(chip, MEMTYPE_INTERNAL_DATA, KDATA_DMA_ACTIVE, 0); /* restore ac97 registers */ snd_ac97_resume(chip->ac97); snd_m3_assp_continue(chip); snd_m3_enable_ints(chip); snd_m3_amp_enable(chip, 1); snd_m3_hv_init(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); chip->in_suspend = 0; return 0; } #endif /* CONFIG_PM */ #ifdef CONFIG_SND_MAESTRO3_INPUT static int __devinit snd_m3_input_register(struct snd_m3 *chip) { struct input_dev *input_dev; int err; input_dev = input_allocate_device(); if (!input_dev) return -ENOMEM; snprintf(chip->phys, sizeof(chip->phys), "pci-%s/input0", pci_name(chip->pci)); input_dev->name = chip->card->driver; input_dev->phys = chip->phys; input_dev->id.bustype = BUS_PCI; input_dev->id.vendor = chip->pci->vendor; input_dev->id.product = chip->pci->device; input_dev->dev.parent = &chip->pci->dev; __set_bit(EV_KEY, input_dev->evbit); __set_bit(KEY_MUTE, input_dev->keybit); __set_bit(KEY_VOLUMEDOWN, input_dev->keybit); __set_bit(KEY_VOLUMEUP, input_dev->keybit); err = input_register_device(input_dev); if (err) { input_free_device(input_dev); return err; } chip->input_dev = input_dev; return 0; } #endif /* CONFIG_INPUT */ /* */ static int snd_m3_dev_free(struct snd_device *device) { struct snd_m3 *chip = device->device_data; return snd_m3_free(chip); } static int __devinit snd_m3_create(struct snd_card *card, struct pci_dev *pci, int enable_amp, int amp_gpio, struct snd_m3 **chip_ret) { struct snd_m3 *chip; int i, err; const struct snd_pci_quirk *quirk; static struct snd_device_ops ops = { .dev_free = snd_m3_dev_free, }; *chip_ret = NULL; if (pci_enable_device(pci)) return -EIO; /* check, if we can restrict PCI DMA transfers to 28 bits */ if (pci_set_dma_mask(pci, DMA_BIT_MASK(28)) < 0 || pci_set_consistent_dma_mask(pci, DMA_BIT_MASK(28)) < 0) { snd_printk(KERN_ERR "architecture does not support 28bit PCI busmaster DMA\n"); pci_disable_device(pci); return -ENXIO; } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (chip == NULL) { pci_disable_device(pci); return -ENOMEM; } spin_lock_init(&chip->reg_lock); switch (pci->device) { case PCI_DEVICE_ID_ESS_ALLEGRO: case PCI_DEVICE_ID_ESS_ALLEGRO_1: case PCI_DEVICE_ID_ESS_CANYON3D_2LE: case PCI_DEVICE_ID_ESS_CANYON3D_2: chip->allegro_flag = 1; break; } chip->card = card; chip->pci = pci; chip->irq = -1; INIT_WORK(&chip->hwvol_work, snd_m3_update_hw_volume); chip->external_amp = enable_amp; if (amp_gpio >= 0 && amp_gpio <= 0x0f) chip->amp_gpio = amp_gpio; else { quirk = snd_pci_quirk_lookup(pci, m3_amp_quirk_list); if (quirk) { snd_printdd(KERN_INFO "maestro3: set amp-gpio " "for '%s'\n", quirk->name); chip->amp_gpio = quirk->value; } else if (chip->allegro_flag) chip->amp_gpio = GPO_EXT_AMP_ALLEGRO; else /* presumably this is for all 'maestro3's.. */ chip->amp_gpio = GPO_EXT_AMP_M3; } quirk = snd_pci_quirk_lookup(pci, m3_irda_quirk_list); if (quirk) { snd_printdd(KERN_INFO "maestro3: enabled irda workaround " "for '%s'\n", quirk->name); chip->irda_workaround = 1; } quirk = snd_pci_quirk_lookup(pci, m3_hv_quirk_list); if (quirk) chip->hv_config = quirk->value; if (snd_pci_quirk_lookup(pci, m3_omnibook_quirk_list)) chip->is_omnibook = 1; chip->num_substreams = NR_DSPS; chip->substreams = kcalloc(chip->num_substreams, sizeof(struct m3_dma), GFP_KERNEL); if (chip->substreams == NULL) { kfree(chip); pci_disable_device(pci); return -ENOMEM; } err = request_firmware(&chip->assp_kernel_image, "ess/maestro3_assp_kernel.fw", &pci->dev); if (err < 0) { snd_m3_free(chip); return err; } err = request_firmware(&chip->assp_minisrc_image, "ess/maestro3_assp_minisrc.fw", &pci->dev); if (err < 0) { snd_m3_free(chip); return err; } if ((err = pci_request_regions(pci, card->driver)) < 0) { snd_m3_free(chip); return err; } chip->iobase = pci_resource_start(pci, 0); /* just to be sure */ pci_set_master(pci); snd_m3_chip_init(chip); snd_m3_assp_halt(chip); snd_m3_ac97_reset(chip); snd_m3_amp_enable(chip, 1); snd_m3_hv_init(chip); if (request_irq(pci->irq, snd_m3_interrupt, IRQF_SHARED, KBUILD_MODNAME, chip)) { snd_printk(KERN_ERR "unable to grab IRQ %d\n", pci->irq); snd_m3_free(chip); return -ENOMEM; } chip->irq = pci->irq; #ifdef CONFIG_PM chip->suspend_mem = vmalloc(sizeof(u16) * (REV_B_CODE_MEMORY_LENGTH + REV_B_DATA_MEMORY_LENGTH)); if (chip->suspend_mem == NULL) snd_printk(KERN_WARNING "can't allocate apm buffer\n"); #endif if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops)) < 0) { snd_m3_free(chip); return err; } if ((err = snd_m3_mixer(chip)) < 0) return err; for (i = 0; i < chip->num_substreams; i++) { struct m3_dma *s = &chip->substreams[i]; if ((err = snd_m3_assp_client_init(chip, s, i)) < 0) return err; } if ((err = snd_m3_pcm(chip, 0)) < 0) return err; #ifdef CONFIG_SND_MAESTRO3_INPUT if (chip->hv_config & HV_CTRL_ENABLE) { err = snd_m3_input_register(chip); if (err) snd_printk(KERN_WARNING "Input device registration " "failed with error %i", err); } #endif snd_m3_enable_ints(chip); snd_m3_assp_continue(chip); snd_card_set_dev(card, &pci->dev); *chip_ret = chip; return 0; } /* */ static int __devinit snd_m3_probe(struct pci_dev *pci, const struct pci_device_id *pci_id) { static int dev; struct snd_card *card; struct snd_m3 *chip; int err; /* don't pick up modems */ if (((pci->class >> 8) & 0xffff) != PCI_CLASS_MULTIMEDIA_AUDIO) return -ENODEV; if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) return err; switch (pci->device) { case PCI_DEVICE_ID_ESS_ALLEGRO: case PCI_DEVICE_ID_ESS_ALLEGRO_1: strcpy(card->driver, "Allegro"); break; case PCI_DEVICE_ID_ESS_CANYON3D_2LE: case PCI_DEVICE_ID_ESS_CANYON3D_2: strcpy(card->driver, "Canyon3D-2"); break; default: strcpy(card->driver, "Maestro3"); break; } if ((err = snd_m3_create(card, pci, external_amp[dev], amp_gpio[dev], &chip)) < 0) { snd_card_free(card); return err; } card->private_data = chip; sprintf(card->shortname, "ESS %s PCI", card->driver); sprintf(card->longname, "%s at 0x%lx, irq %d", card->shortname, chip->iobase, chip->irq); if ((err = snd_card_register(card)) < 0) { snd_card_free(card); return err; } #if 0 /* TODO: not supported yet */ /* TODO enable MIDI IRQ and I/O */ err = snd_mpu401_uart_new(chip->card, 0, MPU401_HW_MPU401, chip->iobase + MPU401_DATA_PORT, MPU401_INFO_INTEGRATED | MPU401_INFO_IRQ_HOOK, -1, &chip->rmidi); if (err < 0) printk(KERN_WARNING "maestro3: no MIDI support.\n"); #endif pci_set_drvdata(pci, card); dev++; return 0; } static void __devexit snd_m3_remove(struct pci_dev *pci) { snd_card_free(pci_get_drvdata(pci)); pci_set_drvdata(pci, NULL); } static struct pci_driver driver = { .name = KBUILD_MODNAME, .id_table = snd_m3_ids, .probe = snd_m3_probe, .remove = __devexit_p(snd_m3_remove), #ifdef CONFIG_PM .suspend = m3_suspend, .resume = m3_resume, #endif }; static int __init alsa_card_m3_init(void) { return pci_register_driver(&driver); } static void __exit alsa_card_m3_exit(void) { pci_unregister_driver(&driver); } module_init(alsa_card_m3_init) module_exit(alsa_card_m3_exit)
gpl-2.0
ModdedPA/android_kernel_google_msm
drivers/i2c/muxes/gpio-i2cmux.c
4894
3877
/* * I2C multiplexer using GPIO API * * Peter Korsgaard <peter.korsgaard@barco.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/i2c-mux.h> #include <linux/gpio-i2cmux.h> #include <linux/platform_device.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/gpio.h> struct gpiomux { struct i2c_adapter *parent; struct i2c_adapter **adap; /* child busses */ struct gpio_i2cmux_platform_data data; }; static void gpiomux_set(const struct gpiomux *mux, unsigned val) { int i; for (i = 0; i < mux->data.n_gpios; i++) gpio_set_value(mux->data.gpios[i], val & (1 << i)); } static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan) { struct gpiomux *mux = data; gpiomux_set(mux, mux->data.values[chan]); return 0; } static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan) { struct gpiomux *mux = data; gpiomux_set(mux, mux->data.idle); return 0; } static int __devinit gpiomux_probe(struct platform_device *pdev) { struct gpiomux *mux; struct gpio_i2cmux_platform_data *pdata; struct i2c_adapter *parent; int (*deselect) (struct i2c_adapter *, void *, u32); unsigned initial_state; int i, ret; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "Missing platform data\n"); return -ENODEV; } parent = i2c_get_adapter(pdata->parent); if (!parent) { dev_err(&pdev->dev, "Parent adapter (%d) not found\n", pdata->parent); return -ENODEV; } mux = kzalloc(sizeof(*mux), GFP_KERNEL); if (!mux) { ret = -ENOMEM; goto alloc_failed; } mux->parent = parent; mux->data = *pdata; mux->adap = kzalloc(sizeof(struct i2c_adapter *) * pdata->n_values, GFP_KERNEL); if (!mux->adap) { ret = -ENOMEM; goto alloc_failed2; } if (pdata->idle != GPIO_I2CMUX_NO_IDLE) { initial_state = pdata->idle; deselect = gpiomux_deselect; } else { initial_state = pdata->values[0]; deselect = NULL; } for (i = 0; i < pdata->n_gpios; i++) { ret = gpio_request(pdata->gpios[i], "gpio-i2cmux"); if (ret) goto err_request_gpio; gpio_direction_output(pdata->gpios[i], initial_state & (1 << i)); } for (i = 0; i < pdata->n_values; i++) { u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0; mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i, gpiomux_select, deselect); if (!mux->adap[i]) { ret = -ENODEV; dev_err(&pdev->dev, "Failed to add adapter %d\n", i); goto add_adapter_failed; } } dev_info(&pdev->dev, "%d port mux on %s adapter\n", pdata->n_values, parent->name); platform_set_drvdata(pdev, mux); return 0; add_adapter_failed: for (; i > 0; i--) i2c_del_mux_adapter(mux->adap[i - 1]); i = pdata->n_gpios; err_request_gpio: for (; i > 0; i--) gpio_free(pdata->gpios[i - 1]); kfree(mux->adap); alloc_failed2: kfree(mux); alloc_failed: i2c_put_adapter(parent); return ret; } static int __devexit gpiomux_remove(struct platform_device *pdev) { struct gpiomux *mux = platform_get_drvdata(pdev); int i; for (i = 0; i < mux->data.n_values; i++) i2c_del_mux_adapter(mux->adap[i]); for (i = 0; i < mux->data.n_gpios; i++) gpio_free(mux->data.gpios[i]); platform_set_drvdata(pdev, NULL); i2c_put_adapter(mux->parent); kfree(mux->adap); kfree(mux); return 0; } static struct platform_driver gpiomux_driver = { .probe = gpiomux_probe, .remove = __devexit_p(gpiomux_remove), .driver = { .owner = THIS_MODULE, .name = "gpio-i2cmux", }, }; module_platform_driver(gpiomux_driver); MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver"); MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:gpio-i2cmux");
gpl-2.0
joni2012/joni
drivers/media/video/ak881x.c
5150
8673
/* * Driver for AK8813 / AK8814 TV-ecoders from Asahi Kasei Microsystems Co., Ltd. (AKM) * * Copyright (C) 2010, Guennadi Liakhovetski <g.liakhovetski@gmx.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/i2c.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/module.h> #include <media/ak881x.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-common.h> #include <media/v4l2-device.h> #define AK881X_INTERFACE_MODE 0 #define AK881X_VIDEO_PROCESS1 1 #define AK881X_VIDEO_PROCESS2 2 #define AK881X_VIDEO_PROCESS3 3 #define AK881X_DAC_MODE 5 #define AK881X_STATUS 0x24 #define AK881X_DEVICE_ID 0x25 #define AK881X_DEVICE_REVISION 0x26 struct ak881x { struct v4l2_subdev subdev; struct ak881x_pdata *pdata; unsigned int lines; int id; /* DEVICE_ID code V4L2_IDENT_AK881X code from v4l2-chip-ident.h */ char revision; /* DEVICE_REVISION content */ }; static int reg_read(struct i2c_client *client, const u8 reg) { return i2c_smbus_read_byte_data(client, reg); } static int reg_write(struct i2c_client *client, const u8 reg, const u8 data) { return i2c_smbus_write_byte_data(client, reg, data); } static int reg_set(struct i2c_client *client, const u8 reg, const u8 data, u8 mask) { int ret = reg_read(client, reg); if (ret < 0) return ret; return reg_write(client, reg, (ret & ~mask) | (data & mask)); } static struct ak881x *to_ak881x(const struct i2c_client *client) { return container_of(i2c_get_clientdata(client), struct ak881x, subdev); } static int ak881x_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *id) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); if (id->match.type != V4L2_CHIP_MATCH_I2C_ADDR) return -EINVAL; if (id->match.addr != client->addr) return -ENODEV; id->ident = ak881x->id; id->revision = ak881x->revision; return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int ak881x_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; reg->val = reg_read(client, reg->reg); if (reg->val > 0xffff) return -EIO; return 0; } static int ak881x_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0x26) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; if (reg_write(client, reg->reg, reg->val) < 0) return -EIO; return 0; } #endif static int ak881x_try_g_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); v4l_bound_align_image(&mf->width, 0, 720, 2, &mf->height, 0, ak881x->lines, 1, 0); mf->field = V4L2_FIELD_INTERLACED; mf->code = V4L2_MBUS_FMT_YUYV8_2X8; mf->colorspace = V4L2_COLORSPACE_SMPTE170M; return 0; } static int ak881x_s_mbus_fmt(struct v4l2_subdev *sd, struct v4l2_mbus_framefmt *mf) { if (mf->field != V4L2_FIELD_INTERLACED || mf->code != V4L2_MBUS_FMT_YUYV8_2X8) return -EINVAL; return ak881x_try_g_mbus_fmt(sd, mf); } static int ak881x_enum_mbus_fmt(struct v4l2_subdev *sd, unsigned int index, enum v4l2_mbus_pixelcode *code) { if (index) return -EINVAL; *code = V4L2_MBUS_FMT_YUYV8_2X8; return 0; } static int ak881x_cropcap(struct v4l2_subdev *sd, struct v4l2_cropcap *a) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); a->bounds.left = 0; a->bounds.top = 0; a->bounds.width = 720; a->bounds.height = ak881x->lines; a->defrect = a->bounds; a->type = V4L2_BUF_TYPE_VIDEO_OUTPUT; a->pixelaspect.numerator = 1; a->pixelaspect.denominator = 1; return 0; } static int ak881x_s_std_output(struct v4l2_subdev *sd, v4l2_std_id std) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); u8 vp1; if (std == V4L2_STD_NTSC_443) { vp1 = 3; ak881x->lines = 480; } else if (std == V4L2_STD_PAL_M) { vp1 = 5; ak881x->lines = 480; } else if (std == V4L2_STD_PAL_60) { vp1 = 7; ak881x->lines = 480; } else if (std && !(std & ~V4L2_STD_PAL)) { vp1 = 0xf; ak881x->lines = 576; } else if (std && !(std & ~V4L2_STD_NTSC)) { vp1 = 0; ak881x->lines = 480; } else { /* No SECAM or PAL_N/Nc supported */ return -EINVAL; } reg_set(client, AK881X_VIDEO_PROCESS1, vp1, 0xf); return 0; } static int ak881x_s_stream(struct v4l2_subdev *sd, int enable) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ak881x *ak881x = to_ak881x(client); if (enable) { u8 dac; /* For colour-bar testing set bit 6 of AK881X_VIDEO_PROCESS1 */ /* Default: composite output */ if (ak881x->pdata->flags & AK881X_COMPONENT) dac = 3; else dac = 4; /* Turn on the DAC(s) */ reg_write(client, AK881X_DAC_MODE, dac); dev_dbg(&client->dev, "chip status 0x%x\n", reg_read(client, AK881X_STATUS)); } else { /* ...and clear bit 6 of AK881X_VIDEO_PROCESS1 here */ reg_write(client, AK881X_DAC_MODE, 0); dev_dbg(&client->dev, "chip status 0x%x\n", reg_read(client, AK881X_STATUS)); } return 0; } static struct v4l2_subdev_core_ops ak881x_subdev_core_ops = { .g_chip_ident = ak881x_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = ak881x_g_register, .s_register = ak881x_s_register, #endif }; static struct v4l2_subdev_video_ops ak881x_subdev_video_ops = { .s_mbus_fmt = ak881x_s_mbus_fmt, .g_mbus_fmt = ak881x_try_g_mbus_fmt, .try_mbus_fmt = ak881x_try_g_mbus_fmt, .cropcap = ak881x_cropcap, .enum_mbus_fmt = ak881x_enum_mbus_fmt, .s_std_output = ak881x_s_std_output, .s_stream = ak881x_s_stream, }; static struct v4l2_subdev_ops ak881x_subdev_ops = { .core = &ak881x_subdev_core_ops, .video = &ak881x_subdev_video_ops, }; static int ak881x_probe(struct i2c_client *client, const struct i2c_device_id *did) { struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); struct ak881x *ak881x; u8 ifmode, data; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_warn(&adapter->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } ak881x = kzalloc(sizeof(struct ak881x), GFP_KERNEL); if (!ak881x) return -ENOMEM; v4l2_i2c_subdev_init(&ak881x->subdev, client, &ak881x_subdev_ops); data = reg_read(client, AK881X_DEVICE_ID); switch (data) { case 0x13: ak881x->id = V4L2_IDENT_AK8813; break; case 0x14: ak881x->id = V4L2_IDENT_AK8814; break; default: dev_err(&client->dev, "No ak881x chip detected, register read %x\n", data); kfree(ak881x); return -ENODEV; } ak881x->revision = reg_read(client, AK881X_DEVICE_REVISION); ak881x->pdata = client->dev.platform_data; if (ak881x->pdata) { if (ak881x->pdata->flags & AK881X_FIELD) ifmode = 4; else ifmode = 0; switch (ak881x->pdata->flags & AK881X_IF_MODE_MASK) { case AK881X_IF_MODE_BT656: ifmode |= 1; break; case AK881X_IF_MODE_MASTER: ifmode |= 2; break; case AK881X_IF_MODE_SLAVE: default: break; } dev_dbg(&client->dev, "IF mode %x\n", ifmode); /* * "Line Blanking No." seems to be the same as the number of * "black" lines on, e.g., SuperH VOU, whose default value of 20 * "incidentally" matches ak881x' default */ reg_write(client, AK881X_INTERFACE_MODE, ifmode | (20 << 3)); } /* Hardware default: NTSC-M */ ak881x->lines = 480; dev_info(&client->dev, "Detected an ak881x chip ID %x, revision %x\n", data, ak881x->revision); return 0; } static int ak881x_remove(struct i2c_client *client) { struct ak881x *ak881x = to_ak881x(client); v4l2_device_unregister_subdev(&ak881x->subdev); kfree(ak881x); return 0; } static const struct i2c_device_id ak881x_id[] = { { "ak8813", 0 }, { "ak8814", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ak881x_id); static struct i2c_driver ak881x_i2c_driver = { .driver = { .name = "ak881x", }, .probe = ak881x_probe, .remove = ak881x_remove, .id_table = ak881x_id, }; module_i2c_driver(ak881x_i2c_driver); MODULE_DESCRIPTION("TV-output driver for ak8813/ak8814"); MODULE_AUTHOR("Guennadi Liakhovetski <g.liakhovetski@gmx.de>"); MODULE_LICENSE("GPL v2");
gpl-2.0
CyanogenMod/android_kernel_sony_apq8064
drivers/media/video/m52790.c
5150
5951
/* * m52790 i2c ivtv driver. * Copyright (C) 2007 Hans Verkuil * * A/V source switching Mitsubishi M52790SP/FP * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/ioctl.h> #include <asm/uaccess.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/m52790.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> MODULE_DESCRIPTION("i2c device driver for m52790 A/V switch"); MODULE_AUTHOR("Hans Verkuil"); MODULE_LICENSE("GPL"); struct m52790_state { struct v4l2_subdev sd; u16 input; u16 output; }; static inline struct m52790_state *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct m52790_state, sd); } /* ----------------------------------------------------------------------- */ static int m52790_write(struct v4l2_subdev *sd) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); u8 sw1 = (state->input | state->output) & 0xff; u8 sw2 = (state->input | state->output) >> 8; return i2c_smbus_write_byte_data(client, sw1, sw2); } /* Note: audio and video are linked and cannot be switched separately. So audio and video routing commands are identical for this chip. In theory the video amplifier and audio modes could be handled separately for the output, but that seems to be overkill right now. The same holds for implementing an audio mute control, this is now part of the audio output routing. The normal case is that another chip takes care of the actual muting so making it part of the output routing seems to be the right thing to do for now. */ static int m52790_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct m52790_state *state = to_state(sd); state->input = input; state->output = output; m52790_write(sd); return 0; } #ifdef CONFIG_VIDEO_ADV_DEBUG static int m52790_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->reg != 0) return -EINVAL; reg->size = 1; reg->val = state->input | state->output; return 0; } static int m52790_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct m52790_state *state = to_state(sd); struct i2c_client *client = v4l2_get_subdevdata(sd); if (!v4l2_chip_match_i2c_client(client, &reg->match)) return -EINVAL; if (!capable(CAP_SYS_ADMIN)) return -EPERM; if (reg->reg != 0) return -EINVAL; state->input = reg->val & 0x0303; state->output = reg->val & ~0x0303; m52790_write(sd); return 0; } #endif static int m52790_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_M52790, 0); } static int m52790_log_status(struct v4l2_subdev *sd) { struct m52790_state *state = to_state(sd); v4l2_info(sd, "Switch 1: %02x\n", (state->input | state->output) & 0xff); v4l2_info(sd, "Switch 2: %02x\n", (state->input | state->output) >> 8); return 0; } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops m52790_core_ops = { .log_status = m52790_log_status, .g_chip_ident = m52790_g_chip_ident, #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = m52790_g_register, .s_register = m52790_s_register, #endif }; static const struct v4l2_subdev_audio_ops m52790_audio_ops = { .s_routing = m52790_s_routing, }; static const struct v4l2_subdev_video_ops m52790_video_ops = { .s_routing = m52790_s_routing, }; static const struct v4l2_subdev_ops m52790_ops = { .core = &m52790_core_ops, .audio = &m52790_audio_ops, .video = &m52790_video_ops, }; /* ----------------------------------------------------------------------- */ /* i2c implementation */ static int m52790_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct m52790_state *state; struct v4l2_subdev *sd; /* Check if the adapter supports the needed features */ if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) return -EIO; v4l_info(client, "chip found @ 0x%x (%s)\n", client->addr << 1, client->adapter->name); state = kzalloc(sizeof(struct m52790_state), GFP_KERNEL); if (state == NULL) return -ENOMEM; sd = &state->sd; v4l2_i2c_subdev_init(sd, client, &m52790_ops); state->input = M52790_IN_TUNER; state->output = M52790_OUT_STEREO; m52790_write(sd); return 0; } static int m52790_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); kfree(to_state(sd)); return 0; } /* ----------------------------------------------------------------------- */ static const struct i2c_device_id m52790_id[] = { { "m52790", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, m52790_id); static struct i2c_driver m52790_driver = { .driver = { .owner = THIS_MODULE, .name = "m52790", }, .probe = m52790_probe, .remove = m52790_remove, .id_table = m52790_id, }; module_i2c_driver(m52790_driver);
gpl-2.0
HTCKernels/One-SV-boost-k2cl
drivers/gpu/drm/nouveau/nv17_tv.c
5406
24157
/* * Copyright (C) 2009 Francisco Jerez. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm_crtc_helper.h" #include "nouveau_drv.h" #include "nouveau_encoder.h" #include "nouveau_connector.h" #include "nouveau_crtc.h" #include "nouveau_gpio.h" #include "nouveau_hw.h" #include "nv17_tv.h" static uint32_t nv42_tv_sample_load(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; uint32_t testval, regoffset = nv04_dac_output_offset(encoder); uint32_t gpio0, gpio1, fp_htotal, fp_hsync_start, fp_hsync_end, fp_control, test_ctrl, dacclk, ctv_14, ctv_1c, ctv_6c; uint32_t sample = 0; int head; #define RGB_TEST_DATA(r, g, b) (r << 0 | g << 10 | b << 20) testval = RGB_TEST_DATA(0x82, 0xeb, 0x82); if (dev_priv->vbios.tvdactestval) testval = dev_priv->vbios.tvdactestval; dacclk = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset); head = (dacclk & 0x100) >> 8; /* Save the previous state. */ gpio1 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC1); gpio0 = nouveau_gpio_func_get(dev, DCB_GPIO_TVDAC0); fp_htotal = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL); fp_hsync_start = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START); fp_hsync_end = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END); fp_control = NVReadRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL); test_ctrl = NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset); ctv_1c = NVReadRAMDAC(dev, head, 0x680c1c); ctv_14 = NVReadRAMDAC(dev, head, 0x680c14); ctv_6c = NVReadRAMDAC(dev, head, 0x680c6c); /* Prepare the DAC for load detection. */ nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, true); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, true); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, 1343); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, 1047); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, 1183); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12 | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS | NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, 0); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, (dacclk & ~0xff) | 0x22); msleep(1); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, (dacclk & ~0xff) | 0x21); NVWriteRAMDAC(dev, head, 0x680c1c, 1 << 20); NVWriteRAMDAC(dev, head, 0x680c14, 4 << 16); /* Sample pin 0x4 (usually S-video luma). */ NVWriteRAMDAC(dev, head, 0x680c6c, testval >> 10 & 0x3ff); msleep(20); sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) & 0x4 << 28; /* Sample the remaining pins. */ NVWriteRAMDAC(dev, head, 0x680c6c, testval & 0x3ff); msleep(20); sample |= NVReadRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset) & 0xa << 28; /* Restore the previous state. */ NVWriteRAMDAC(dev, head, 0x680c1c, ctv_1c); NVWriteRAMDAC(dev, head, 0x680c14, ctv_14); NVWriteRAMDAC(dev, head, 0x680c6c, ctv_6c); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + regoffset, dacclk); NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + regoffset, test_ctrl); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_TG_CONTROL, fp_control); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_END, fp_hsync_end); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HSYNC_START, fp_hsync_start); NVWriteRAMDAC(dev, head, NV_PRAMDAC_FP_HTOTAL, fp_htotal); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, gpio1); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, gpio0); return sample; } static bool get_tv_detect_quirks(struct drm_device *dev, uint32_t *pin_mask) { /* Zotac FX5200 */ if (nv_match_device(dev, 0x0322, 0x19da, 0x1035) || nv_match_device(dev, 0x0322, 0x19da, 0x2035)) { *pin_mask = 0xc; return false; } /* MSI nForce2 IGP */ if (nv_match_device(dev, 0x01f0, 0x1462, 0x5710)) { *pin_mask = 0xc; return false; } return true; } static enum drm_connector_status nv17_tv_detect(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_mode_config *conf = &dev->mode_config; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct dcb_entry *dcb = tv_enc->base.dcb; bool reliable = get_tv_detect_quirks(dev, &tv_enc->pin_mask); if (nv04_dac_in_use(encoder)) return connector_status_disconnected; if (reliable) { if (dev_priv->chipset == 0x42 || dev_priv->chipset == 0x43) tv_enc->pin_mask = nv42_tv_sample_load(encoder) >> 28 & 0xe; else tv_enc->pin_mask = nv17_dac_sample_load(encoder) >> 28 & 0xe; } switch (tv_enc->pin_mask) { case 0x2: case 0x4: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Composite; break; case 0xc: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SVIDEO; break; case 0xe: if (dcb->tvconf.has_component_output) tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Component; else tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_SCART; break; default: tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; break; } drm_connector_property_set_value(connector, conf->tv_subconnector_property, tv_enc->subconnector); if (!reliable) { return connector_status_unknown; } else if (tv_enc->subconnector) { NV_INFO(dev, "Load detected on output %c\n", '@' + ffs(dcb->or)); return connector_status_connected; } else { return connector_status_disconnected; } } static int nv17_tv_get_ld_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); const struct drm_display_mode *tv_mode; int n = 0; for (tv_mode = nv17_tv_modes; tv_mode->hdisplay; tv_mode++) { struct drm_display_mode *mode; mode = drm_mode_duplicate(encoder->dev, tv_mode); mode->clock = tv_norm->tv_enc_mode.vrefresh * mode->htotal / 1000 * mode->vtotal / 1000; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) mode->clock *= 2; if (mode->hdisplay == tv_norm->tv_enc_mode.hdisplay && mode->vdisplay == tv_norm->tv_enc_mode.vdisplay) mode->type |= DRM_MODE_TYPE_PREFERRED; drm_mode_probed_add(connector, mode); n++; } return n; } static int nv17_tv_get_hd_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; struct drm_display_mode *mode; const struct { int hdisplay; int vdisplay; } modes[] = { { 640, 400 }, { 640, 480 }, { 720, 480 }, { 720, 576 }, { 800, 600 }, { 1024, 768 }, { 1280, 720 }, { 1280, 1024 }, { 1920, 1080 } }; int i, n = 0; for (i = 0; i < ARRAY_SIZE(modes); i++) { if (modes[i].hdisplay > output_mode->hdisplay || modes[i].vdisplay > output_mode->vdisplay) continue; if (modes[i].hdisplay == output_mode->hdisplay && modes[i].vdisplay == output_mode->vdisplay) { mode = drm_mode_duplicate(encoder->dev, output_mode); mode->type |= DRM_MODE_TYPE_PREFERRED; } else { mode = drm_cvt_mode(encoder->dev, modes[i].hdisplay, modes[i].vdisplay, 60, false, (output_mode->flags & DRM_MODE_FLAG_INTERLACE), false); } /* CVT modes are sometimes unsuitable... */ if (output_mode->hdisplay <= 720 || output_mode->hdisplay >= 1920) { mode->htotal = output_mode->htotal; mode->hsync_start = (mode->hdisplay + (mode->htotal - mode->hdisplay) * 9 / 10) & ~7; mode->hsync_end = mode->hsync_start + 8; } if (output_mode->vdisplay >= 1024) { mode->vtotal = output_mode->vtotal; mode->vsync_start = output_mode->vsync_start; mode->vsync_end = output_mode->vsync_end; } mode->type |= DRM_MODE_TYPE_DRIVER; drm_mode_probed_add(connector, mode); n++; } return n; } static int nv17_tv_get_modes(struct drm_encoder *encoder, struct drm_connector *connector) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (tv_norm->kind == CTV_ENC_MODE) return nv17_tv_get_hd_modes(encoder, connector); else return nv17_tv_get_ld_modes(encoder, connector); } static int nv17_tv_mode_valid(struct drm_encoder *encoder, struct drm_display_mode *mode) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (tv_norm->kind == CTV_ENC_MODE) { struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; if (mode->clock > 400000) return MODE_CLOCK_HIGH; if (mode->hdisplay > output_mode->hdisplay || mode->vdisplay > output_mode->vdisplay) return MODE_BAD; if ((mode->flags & DRM_MODE_FLAG_INTERLACE) != (output_mode->flags & DRM_MODE_FLAG_INTERLACE)) return MODE_NO_INTERLACE; if (mode->flags & DRM_MODE_FLAG_DBLSCAN) return MODE_NO_DBLESCAN; } else { const int vsync_tolerance = 600; if (mode->clock > 70000) return MODE_CLOCK_HIGH; if (abs(drm_mode_vrefresh(mode) * 1000 - tv_norm->tv_enc_mode.vrefresh) > vsync_tolerance) return MODE_VSYNC; /* The encoder takes care of the actual interlacing */ if (mode->flags & DRM_MODE_FLAG_INTERLACE) return MODE_NO_INTERLACE; } return MODE_OK; } static bool nv17_tv_mode_fixup(struct drm_encoder *encoder, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (nv04_dac_in_use(encoder)) return false; if (tv_norm->kind == CTV_ENC_MODE) adjusted_mode->clock = tv_norm->ctv_enc_mode.mode.clock; else adjusted_mode->clock = 90000; return true; } static void nv17_tv_dpms(struct drm_encoder *encoder, int mode) { struct drm_device *dev = encoder->dev; struct nv17_tv_state *regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); if (nouveau_encoder(encoder)->last_dpms == mode) return; nouveau_encoder(encoder)->last_dpms = mode; NV_INFO(dev, "Setting dpms mode %d on TV encoder (output %d)\n", mode, nouveau_encoder(encoder)->dcb->index); regs->ptv_200 &= ~1; if (tv_norm->kind == CTV_ENC_MODE) { nv04_dfp_update_fp_control(encoder, mode); } else { nv04_dfp_update_fp_control(encoder, DRM_MODE_DPMS_OFF); if (mode == DRM_MODE_DPMS_ON) regs->ptv_200 |= 1; } nv_load_ptv(dev, regs, 200); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC1, mode == DRM_MODE_DPMS_ON); nouveau_gpio_func_set(dev, DCB_GPIO_TVDAC0, mode == DRM_MODE_DPMS_ON); nv04_dac_update_dacclk(encoder, mode == DRM_MODE_DPMS_ON); } static void nv17_tv_prepare(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_encoder_helper_funcs *helper = encoder->helper_private; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int head = nouveau_crtc(encoder->crtc)->index; uint8_t *cr_lcd = &dev_priv->mode_reg.crtc_reg[head].CRTC[ NV_CIO_CRE_LCD__INDEX]; uint32_t dacclk_off = NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder); uint32_t dacclk; helper->dpms(encoder, DRM_MODE_DPMS_OFF); nv04_dfp_disable(dev, head); /* Unbind any FP encoders from this head if we need the FP * stuff enabled. */ if (tv_norm->kind == CTV_ENC_MODE) { struct drm_encoder *enc; list_for_each_entry(enc, &dev->mode_config.encoder_list, head) { struct dcb_entry *dcb = nouveau_encoder(enc)->dcb; if ((dcb->type == OUTPUT_TMDS || dcb->type == OUTPUT_LVDS) && !enc->crtc && nv04_dfp_get_bound_head(dev, dcb) == head) { nv04_dfp_bind_head(dev, dcb, head ^ 1, dev_priv->vbios.fp.dual_link); } } } if (tv_norm->kind == CTV_ENC_MODE) *cr_lcd |= 0x1 | (head ? 0x0 : 0x8); /* Set the DACCLK register */ dacclk = (NVReadRAMDAC(dev, 0, dacclk_off) & ~0x30) | 0x1; if (dev_priv->card_type == NV_40) dacclk |= 0x1a << 16; if (tv_norm->kind == CTV_ENC_MODE) { dacclk |= 0x20; if (head) dacclk |= 0x100; else dacclk &= ~0x100; } else { dacclk |= 0x10; } NVWriteRAMDAC(dev, 0, dacclk_off, dacclk); } static void nv17_tv_mode_set(struct drm_encoder *encoder, struct drm_display_mode *drm_mode, struct drm_display_mode *adjusted_mode) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; int head = nouveau_crtc(encoder->crtc)->index; struct nv04_crtc_reg *regs = &dev_priv->mode_reg.crtc_reg[head]; struct nv17_tv_state *tv_regs = &to_tv_enc(encoder)->state; struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); int i; regs->CRTC[NV_CIO_CRE_53] = 0x40; /* FP_HTIMING */ regs->CRTC[NV_CIO_CRE_54] = 0; /* FP_VTIMING */ regs->ramdac_630 = 0x2; /* turn off green mode (tv test pattern?) */ regs->tv_setup = 1; regs->ramdac_8c0 = 0x0; if (tv_norm->kind == TV_ENC_MODE) { tv_regs->ptv_200 = 0x13111100; if (head) tv_regs->ptv_200 |= 0x10; tv_regs->ptv_20c = 0x808010; tv_regs->ptv_304 = 0x2d00000; tv_regs->ptv_600 = 0x0; tv_regs->ptv_60c = 0x0; tv_regs->ptv_610 = 0x1e00000; if (tv_norm->tv_enc_mode.vdisplay == 576) { tv_regs->ptv_508 = 0x1200000; tv_regs->ptv_614 = 0x33; } else if (tv_norm->tv_enc_mode.vdisplay == 480) { tv_regs->ptv_508 = 0xf00000; tv_regs->ptv_614 = 0x13; } if (dev_priv->card_type >= NV_30) { tv_regs->ptv_500 = 0xe8e0; tv_regs->ptv_504 = 0x1710; tv_regs->ptv_604 = 0x0; tv_regs->ptv_608 = 0x0; } else { if (tv_norm->tv_enc_mode.vdisplay == 576) { tv_regs->ptv_604 = 0x20; tv_regs->ptv_608 = 0x10; tv_regs->ptv_500 = 0x19710; tv_regs->ptv_504 = 0x68f0; } else if (tv_norm->tv_enc_mode.vdisplay == 480) { tv_regs->ptv_604 = 0x10; tv_regs->ptv_608 = 0x20; tv_regs->ptv_500 = 0x4b90; tv_regs->ptv_504 = 0x1b480; } } for (i = 0; i < 0x40; i++) tv_regs->tv_enc[i] = tv_norm->tv_enc_mode.tv_enc[i]; } else { struct drm_display_mode *output_mode = &tv_norm->ctv_enc_mode.mode; /* The registers in PRAMDAC+0xc00 control some timings and CSC * parameters for the CTV encoder (It's only used for "HD" TV * modes, I don't think I have enough working to guess what * they exactly mean...), it's probably connected at the * output of the FP encoder, but it also needs the analog * encoder in its OR enabled and routed to the head it's * using. It's enabled with the DACCLK register, bits [5:4]. */ for (i = 0; i < 38; i++) regs->ctv_regs[i] = tv_norm->ctv_enc_mode.ctv_regs[i]; regs->fp_horiz_regs[FP_DISPLAY_END] = output_mode->hdisplay - 1; regs->fp_horiz_regs[FP_TOTAL] = output_mode->htotal - 1; regs->fp_horiz_regs[FP_SYNC_START] = output_mode->hsync_start - 1; regs->fp_horiz_regs[FP_SYNC_END] = output_mode->hsync_end - 1; regs->fp_horiz_regs[FP_CRTC] = output_mode->hdisplay + max((output_mode->hdisplay-600)/40 - 1, 1); regs->fp_vert_regs[FP_DISPLAY_END] = output_mode->vdisplay - 1; regs->fp_vert_regs[FP_TOTAL] = output_mode->vtotal - 1; regs->fp_vert_regs[FP_SYNC_START] = output_mode->vsync_start - 1; regs->fp_vert_regs[FP_SYNC_END] = output_mode->vsync_end - 1; regs->fp_vert_regs[FP_CRTC] = output_mode->vdisplay - 1; regs->fp_control = NV_PRAMDAC_FP_TG_CONTROL_DISPEN_POS | NV_PRAMDAC_FP_TG_CONTROL_READ_PROG | NV_PRAMDAC_FP_TG_CONTROL_WIDTH_12; if (output_mode->flags & DRM_MODE_FLAG_PVSYNC) regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_VSYNC_POS; if (output_mode->flags & DRM_MODE_FLAG_PHSYNC) regs->fp_control |= NV_PRAMDAC_FP_TG_CONTROL_HSYNC_POS; regs->fp_debug_0 = NV_PRAMDAC_FP_DEBUG_0_YWEIGHT_ROUND | NV_PRAMDAC_FP_DEBUG_0_XWEIGHT_ROUND | NV_PRAMDAC_FP_DEBUG_0_YINTERP_BILINEAR | NV_PRAMDAC_FP_DEBUG_0_XINTERP_BILINEAR | NV_RAMDAC_FP_DEBUG_0_TMDS_ENABLED | NV_PRAMDAC_FP_DEBUG_0_YSCALE_ENABLE | NV_PRAMDAC_FP_DEBUG_0_XSCALE_ENABLE; regs->fp_debug_2 = 0; regs->fp_margin_color = 0x801080; } } static void nv17_tv_commit(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_crtc *nv_crtc = nouveau_crtc(encoder->crtc); struct nouveau_encoder *nv_encoder = nouveau_encoder(encoder); struct drm_encoder_helper_funcs *helper = encoder->helper_private; if (get_tv_norm(encoder)->kind == TV_ENC_MODE) { nv17_tv_update_rescaler(encoder); nv17_tv_update_properties(encoder); } else { nv17_ctv_update_rescaler(encoder); } nv17_tv_state_load(dev, &to_tv_enc(encoder)->state); /* This could use refinement for flatpanels, but it should work */ if (dev_priv->chipset < 0x44) NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0xf0000000); else NVWriteRAMDAC(dev, 0, NV_PRAMDAC_TEST_CONTROL + nv04_dac_output_offset(encoder), 0x00100000); helper->dpms(encoder, DRM_MODE_DPMS_ON); NV_INFO(dev, "Output %s is running on CRTC %d using output %c\n", drm_get_connector_name( &nouveau_encoder_connector_get(nv_encoder)->base), nv_crtc->index, '@' + ffs(nv_encoder->dcb->or)); } static void nv17_tv_save(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); nouveau_encoder(encoder)->restore.output = NVReadRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder)); nv17_tv_state_save(dev, &tv_enc->saved_state); tv_enc->state.ptv_200 = tv_enc->saved_state.ptv_200; } static void nv17_tv_restore(struct drm_encoder *encoder) { struct drm_device *dev = encoder->dev; NVWriteRAMDAC(dev, 0, NV_PRAMDAC_DACCLK + nv04_dac_output_offset(encoder), nouveau_encoder(encoder)->restore.output); nv17_tv_state_load(dev, &to_tv_enc(encoder)->saved_state); nouveau_encoder(encoder)->last_dpms = NV_DPMS_CLEARED; } static int nv17_tv_create_resources(struct drm_encoder *encoder, struct drm_connector *connector) { struct drm_device *dev = encoder->dev; struct drm_mode_config *conf = &dev->mode_config; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct dcb_entry *dcb = nouveau_encoder(encoder)->dcb; int num_tv_norms = dcb->tvconf.has_component_output ? NUM_TV_NORMS : NUM_LD_TV_NORMS; int i; if (nouveau_tv_norm) { for (i = 0; i < num_tv_norms; i++) { if (!strcmp(nv17_tv_norm_names[i], nouveau_tv_norm)) { tv_enc->tv_norm = i; break; } } if (i == num_tv_norms) NV_WARN(dev, "Invalid TV norm setting \"%s\"\n", nouveau_tv_norm); } drm_mode_create_tv_properties(dev, num_tv_norms, nv17_tv_norm_names); drm_connector_attach_property(connector, conf->tv_select_subconnector_property, tv_enc->select_subconnector); drm_connector_attach_property(connector, conf->tv_subconnector_property, tv_enc->subconnector); drm_connector_attach_property(connector, conf->tv_mode_property, tv_enc->tv_norm); drm_connector_attach_property(connector, conf->tv_flicker_reduction_property, tv_enc->flicker); drm_connector_attach_property(connector, conf->tv_saturation_property, tv_enc->saturation); drm_connector_attach_property(connector, conf->tv_hue_property, tv_enc->hue); drm_connector_attach_property(connector, conf->tv_overscan_property, tv_enc->overscan); return 0; } static int nv17_tv_set_property(struct drm_encoder *encoder, struct drm_connector *connector, struct drm_property *property, uint64_t val) { struct drm_mode_config *conf = &encoder->dev->mode_config; struct drm_crtc *crtc = encoder->crtc; struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); struct nv17_tv_norm_params *tv_norm = get_tv_norm(encoder); bool modes_changed = false; if (property == conf->tv_overscan_property) { tv_enc->overscan = val; if (encoder->crtc) { if (tv_norm->kind == CTV_ENC_MODE) nv17_ctv_update_rescaler(encoder); else nv17_tv_update_rescaler(encoder); } } else if (property == conf->tv_saturation_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->saturation = val; nv17_tv_update_properties(encoder); } else if (property == conf->tv_hue_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->hue = val; nv17_tv_update_properties(encoder); } else if (property == conf->tv_flicker_reduction_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->flicker = val; if (encoder->crtc) nv17_tv_update_rescaler(encoder); } else if (property == conf->tv_mode_property) { if (connector->dpms != DRM_MODE_DPMS_OFF) return -EINVAL; tv_enc->tv_norm = val; modes_changed = true; } else if (property == conf->tv_select_subconnector_property) { if (tv_norm->kind != TV_ENC_MODE) return -EINVAL; tv_enc->select_subconnector = val; nv17_tv_update_properties(encoder); } else { return -EINVAL; } if (modes_changed) { drm_helper_probe_single_connector_modes(connector, 0, 0); /* Disable the crtc to ensure a full modeset is * performed whenever it's turned on again. */ if (crtc) { struct drm_mode_set modeset = { .crtc = crtc, }; crtc->funcs->set_config(&modeset); } } return 0; } static void nv17_tv_destroy(struct drm_encoder *encoder) { struct nv17_tv_encoder *tv_enc = to_tv_enc(encoder); NV_DEBUG_KMS(encoder->dev, "\n"); drm_encoder_cleanup(encoder); kfree(tv_enc); } static struct drm_encoder_helper_funcs nv17_tv_helper_funcs = { .dpms = nv17_tv_dpms, .save = nv17_tv_save, .restore = nv17_tv_restore, .mode_fixup = nv17_tv_mode_fixup, .prepare = nv17_tv_prepare, .commit = nv17_tv_commit, .mode_set = nv17_tv_mode_set, .detect = nv17_tv_detect, }; static struct drm_encoder_slave_funcs nv17_tv_slave_funcs = { .get_modes = nv17_tv_get_modes, .mode_valid = nv17_tv_mode_valid, .create_resources = nv17_tv_create_resources, .set_property = nv17_tv_set_property, }; static struct drm_encoder_funcs nv17_tv_funcs = { .destroy = nv17_tv_destroy, }; int nv17_tv_create(struct drm_connector *connector, struct dcb_entry *entry) { struct drm_device *dev = connector->dev; struct drm_encoder *encoder; struct nv17_tv_encoder *tv_enc = NULL; tv_enc = kzalloc(sizeof(*tv_enc), GFP_KERNEL); if (!tv_enc) return -ENOMEM; tv_enc->overscan = 50; tv_enc->flicker = 50; tv_enc->saturation = 50; tv_enc->hue = 0; tv_enc->tv_norm = TV_NORM_PAL; tv_enc->subconnector = DRM_MODE_SUBCONNECTOR_Unknown; tv_enc->select_subconnector = DRM_MODE_SUBCONNECTOR_Automatic; tv_enc->pin_mask = 0; encoder = to_drm_encoder(&tv_enc->base); tv_enc->base.dcb = entry; tv_enc->base.or = ffs(entry->or) - 1; drm_encoder_init(dev, encoder, &nv17_tv_funcs, DRM_MODE_ENCODER_TVDAC); drm_encoder_helper_add(encoder, &nv17_tv_helper_funcs); to_encoder_slave(encoder)->slave_funcs = &nv17_tv_slave_funcs; encoder->possible_crtcs = entry->heads; encoder->possible_clones = 0; nv17_tv_create_resources(encoder, connector); drm_mode_connector_attach_encoder(connector, encoder); return 0; }
gpl-2.0
jgcaaprom/android_kernel_oneplus_msm8974
drivers/gpu/drm/vmwgfx/vmwgfx_overlay.c
5662
14965
/************************************************************************** * * Copyright © 2009 VMware, Inc., Palo Alto, CA., USA * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * **************************************************************************/ #include "drmP.h" #include "vmwgfx_drv.h" #include "ttm/ttm_placement.h" #include "svga_overlay.h" #include "svga_escape.h" #define VMW_MAX_NUM_STREAMS 1 struct vmw_stream { struct vmw_dma_buffer *buf; bool claimed; bool paused; struct drm_vmw_control_stream_arg saved; }; /** * Overlay control */ struct vmw_overlay { /* * Each stream is a single overlay. In Xv these are called ports. */ struct mutex mutex; struct vmw_stream stream[VMW_MAX_NUM_STREAMS]; }; static inline struct vmw_overlay *vmw_overlay(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); return dev_priv ? dev_priv->overlay_priv : NULL; } struct vmw_escape_header { uint32_t cmd; SVGAFifoCmdEscape body; }; struct vmw_escape_video_flush { struct vmw_escape_header escape; SVGAEscapeVideoFlush flush; }; static inline void fill_escape(struct vmw_escape_header *header, uint32_t size) { header->cmd = SVGA_CMD_ESCAPE; header->body.nsid = SVGA_ESCAPE_NSID_VMWARE; header->body.size = size; } static inline void fill_flush(struct vmw_escape_video_flush *cmd, uint32_t stream_id) { fill_escape(&cmd->escape, sizeof(cmd->flush)); cmd->flush.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_FLUSH; cmd->flush.streamId = stream_id; } /** * Send put command to hw. * * Returns * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_put(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { struct vmw_escape_video_flush *flush; size_t fifo_size; bool have_so = dev_priv->sou_priv ? true : false; int i, num_items; SVGAGuestPtr ptr; struct { struct vmw_escape_header escape; struct { uint32_t cmdType; uint32_t streamId; } header; } *cmds; struct { uint32_t registerId; uint32_t value; } *items; /* defines are a index needs + 1 */ if (have_so) num_items = SVGA_VIDEO_DST_SCREEN_ID + 1; else num_items = SVGA_VIDEO_PITCH_3 + 1; fifo_size = sizeof(*cmds) + sizeof(*flush) + sizeof(*items) * num_items; cmds = vmw_fifo_reserve(dev_priv, fifo_size); /* hardware has hung, can't do anything here */ if (!cmds) return -ENOMEM; items = (typeof(items))&cmds[1]; flush = (struct vmw_escape_video_flush *)&items[num_items]; /* the size is header + number of items */ fill_escape(&cmds->escape, sizeof(*items) * (num_items + 1)); cmds->header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; cmds->header.streamId = arg->stream_id; /* the IDs are neatly numbered */ for (i = 0; i < num_items; i++) items[i].registerId = i; vmw_bo_get_guest_ptr(&buf->base, &ptr); ptr.offset += arg->offset; items[SVGA_VIDEO_ENABLED].value = true; items[SVGA_VIDEO_FLAGS].value = arg->flags; items[SVGA_VIDEO_DATA_OFFSET].value = ptr.offset; items[SVGA_VIDEO_FORMAT].value = arg->format; items[SVGA_VIDEO_COLORKEY].value = arg->color_key; items[SVGA_VIDEO_SIZE].value = arg->size; items[SVGA_VIDEO_WIDTH].value = arg->width; items[SVGA_VIDEO_HEIGHT].value = arg->height; items[SVGA_VIDEO_SRC_X].value = arg->src.x; items[SVGA_VIDEO_SRC_Y].value = arg->src.y; items[SVGA_VIDEO_SRC_WIDTH].value = arg->src.w; items[SVGA_VIDEO_SRC_HEIGHT].value = arg->src.h; items[SVGA_VIDEO_DST_X].value = arg->dst.x; items[SVGA_VIDEO_DST_Y].value = arg->dst.y; items[SVGA_VIDEO_DST_WIDTH].value = arg->dst.w; items[SVGA_VIDEO_DST_HEIGHT].value = arg->dst.h; items[SVGA_VIDEO_PITCH_1].value = arg->pitch[0]; items[SVGA_VIDEO_PITCH_2].value = arg->pitch[1]; items[SVGA_VIDEO_PITCH_3].value = arg->pitch[2]; if (have_so) { items[SVGA_VIDEO_DATA_GMRID].value = ptr.gmrId; items[SVGA_VIDEO_DST_SCREEN_ID].value = SVGA_ID_INVALID; } fill_flush(flush, arg->stream_id); vmw_fifo_commit(dev_priv, fifo_size); return 0; } /** * Send stop command to hw. * * Returns * -ERESTARTSYS if interrupted by a signal. */ static int vmw_overlay_send_stop(struct vmw_private *dev_priv, uint32_t stream_id, bool interruptible) { struct { struct vmw_escape_header escape; SVGAEscapeVideoSetRegs body; struct vmw_escape_video_flush flush; } *cmds; int ret; for (;;) { cmds = vmw_fifo_reserve(dev_priv, sizeof(*cmds)); if (cmds) break; ret = vmw_fallback_wait(dev_priv, false, true, 0, interruptible, 3*HZ); if (interruptible && ret == -ERESTARTSYS) return ret; else BUG_ON(ret != 0); } fill_escape(&cmds->escape, sizeof(cmds->body)); cmds->body.header.cmdType = SVGA_ESCAPE_VMWARE_VIDEO_SET_REGS; cmds->body.header.streamId = stream_id; cmds->body.items[0].registerId = SVGA_VIDEO_ENABLED; cmds->body.items[0].value = false; fill_flush(&cmds->flush, stream_id); vmw_fifo_commit(dev_priv, sizeof(*cmds)); return 0; } /** * Move a buffer to vram or gmr if @pin is set, else unpin the buffer. * * With the introduction of screen objects buffers could now be * used with GMRs instead of being locked to vram. */ static int vmw_overlay_move_buffer(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, bool pin, bool inter) { if (!pin) return vmw_dmabuf_unpin(dev_priv, buf, inter); if (!dev_priv->sou_priv) return vmw_dmabuf_to_vram(dev_priv, buf, true, inter); return vmw_dmabuf_to_vram_or_gmr(dev_priv, buf, true, inter); } /** * Stop or pause a stream. * * If the stream is paused the no evict flag is removed from the buffer * but left in vram. This allows for instance mode_set to evict it * should it need to. * * The caller must hold the overlay lock. * * @stream_id which stream to stop/pause. * @pause true to pause, false to stop completely. */ static int vmw_overlay_stop(struct vmw_private *dev_priv, uint32_t stream_id, bool pause, bool interruptible) { struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_stream *stream = &overlay->stream[stream_id]; int ret; /* no buffer attached the stream is completely stopped */ if (!stream->buf) return 0; /* If the stream is paused this is already done */ if (!stream->paused) { ret = vmw_overlay_send_stop(dev_priv, stream_id, interruptible); if (ret) return ret; /* We just remove the NO_EVICT flag so no -ENOMEM */ ret = vmw_overlay_move_buffer(dev_priv, stream->buf, false, interruptible); if (interruptible && ret == -ERESTARTSYS) return ret; else BUG_ON(ret != 0); } if (!pause) { vmw_dmabuf_unreference(&stream->buf); stream->paused = false; } else { stream->paused = true; } return 0; } /** * Update a stream and send any put or stop fifo commands needed. * * The caller must hold the overlay lock. * * Returns * -ENOMEM if buffer doesn't fit in vram. * -ERESTARTSYS if interrupted. */ static int vmw_overlay_update_stream(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, struct drm_vmw_control_stream_arg *arg, bool interruptible) { struct vmw_overlay *overlay = dev_priv->overlay_priv; struct vmw_stream *stream = &overlay->stream[arg->stream_id]; int ret = 0; if (!buf) return -EINVAL; DRM_DEBUG(" %s: old %p, new %p, %spaused\n", __func__, stream->buf, buf, stream->paused ? "" : "not "); if (stream->buf != buf) { ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, interruptible); if (ret) return ret; } else if (!stream->paused) { /* If the buffers match and not paused then just send * the put command, no need to do anything else. */ ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); if (ret == 0) stream->saved = *arg; else BUG_ON(!interruptible); return ret; } /* We don't start the old stream if we are interrupted. * Might return -ENOMEM if it can't fit the buffer in vram. */ ret = vmw_overlay_move_buffer(dev_priv, buf, true, interruptible); if (ret) return ret; ret = vmw_overlay_send_put(dev_priv, buf, arg, interruptible); if (ret) { /* This one needs to happen no matter what. We only remove * the NO_EVICT flag so this is safe from -ENOMEM. */ BUG_ON(vmw_overlay_move_buffer(dev_priv, buf, false, false) != 0); return ret; } if (stream->buf != buf) stream->buf = vmw_dmabuf_reference(buf); stream->saved = *arg; /* stream is no longer stopped/paused */ stream->paused = false; return 0; } /** * Stop all streams. * * Used by the fb code when starting. * * Takes the overlay lock. */ int vmw_overlay_stop_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { struct vmw_stream *stream = &overlay->stream[i]; if (!stream->buf) continue; ret = vmw_overlay_stop(dev_priv, i, false, false); WARN_ON(ret != 0); } mutex_unlock(&overlay->mutex); return 0; } /** * Try to resume all paused streams. * * Used by the kms code after moving a new scanout buffer to vram. * * Takes the overlay lock. */ int vmw_overlay_resume_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { struct vmw_stream *stream = &overlay->stream[i]; if (!stream->paused) continue; ret = vmw_overlay_update_stream(dev_priv, stream->buf, &stream->saved, false); if (ret != 0) DRM_INFO("%s: *warning* failed to resume stream %i\n", __func__, i); } mutex_unlock(&overlay->mutex); return 0; } /** * Pauses all active streams. * * Used by the kms code when moving a new scanout buffer to vram. * * Takes the overlay lock. */ int vmw_overlay_pause_all(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, ret; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].paused) DRM_INFO("%s: *warning* stream %i already paused\n", __func__, i); ret = vmw_overlay_stop(dev_priv, i, true, false); WARN_ON(ret != 0); } mutex_unlock(&overlay->mutex); return 0; } int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; struct vmw_dma_buffer *buf; struct vmw_resource *res; int ret; if (!overlay) return -ENOSYS; ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); if (ret) return ret; mutex_lock(&overlay->mutex); if (!arg->enabled) { ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); goto out_unlock; } ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf); if (ret) goto out_unlock; ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); vmw_dmabuf_unreference(&buf); out_unlock: mutex_unlock(&overlay->mutex); vmw_resource_unreference(&res); return ret; } int vmw_overlay_num_overlays(struct vmw_private *dev_priv) { if (!dev_priv->overlay_priv) return 0; return VMW_MAX_NUM_STREAMS; } int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i, k; if (!overlay) return 0; mutex_lock(&overlay->mutex); for (i = 0, k = 0; i < VMW_MAX_NUM_STREAMS; i++) if (!overlay->stream[i].claimed) k++; mutex_unlock(&overlay->mutex); return k; } int vmw_overlay_claim(struct vmw_private *dev_priv, uint32_t *out) { struct vmw_overlay *overlay = dev_priv->overlay_priv; int i; if (!overlay) return -ENOSYS; mutex_lock(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].claimed) continue; overlay->stream[i].claimed = true; *out = i; mutex_unlock(&overlay->mutex); return 0; } mutex_unlock(&overlay->mutex); return -ESRCH; } int vmw_overlay_unref(struct vmw_private *dev_priv, uint32_t stream_id) { struct vmw_overlay *overlay = dev_priv->overlay_priv; BUG_ON(stream_id >= VMW_MAX_NUM_STREAMS); if (!overlay) return -ENOSYS; mutex_lock(&overlay->mutex); WARN_ON(!overlay->stream[stream_id].claimed); vmw_overlay_stop(dev_priv, stream_id, false, false); overlay->stream[stream_id].claimed = false; mutex_unlock(&overlay->mutex); return 0; } int vmw_overlay_init(struct vmw_private *dev_priv) { struct vmw_overlay *overlay; int i; if (dev_priv->overlay_priv) return -EINVAL; if (!(dev_priv->fifo.capabilities & SVGA_FIFO_CAP_VIDEO) && (dev_priv->fifo.capabilities & SVGA_FIFO_CAP_ESCAPE)) { DRM_INFO("hardware doesn't support overlays\n"); return -ENOSYS; } overlay = kzalloc(sizeof(*overlay), GFP_KERNEL); if (!overlay) return -ENOMEM; mutex_init(&overlay->mutex); for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { overlay->stream[i].buf = NULL; overlay->stream[i].paused = false; overlay->stream[i].claimed = false; } dev_priv->overlay_priv = overlay; return 0; } int vmw_overlay_close(struct vmw_private *dev_priv) { struct vmw_overlay *overlay = dev_priv->overlay_priv; bool forgotten_buffer = false; int i; if (!overlay) return -ENOSYS; for (i = 0; i < VMW_MAX_NUM_STREAMS; i++) { if (overlay->stream[i].buf) { forgotten_buffer = true; vmw_overlay_stop(dev_priv, i, false, false); } } WARN_ON(forgotten_buffer); dev_priv->overlay_priv = NULL; kfree(overlay); return 0; }
gpl-2.0
allanmatthew/linux-imx
arch/blackfin/kernel/ptrace.c
7198
10117
/* * linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds * these modifications are Copyright 2004-2010 Analog Devices Inc. * * Licensed under the GPL-2 */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/elf.h> #include <linux/errno.h> #include <linux/ptrace.h> #include <linux/user.h> #include <linux/regset.h> #include <linux/signal.h> #include <linux/tracehook.h> #include <linux/uaccess.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/processor.h> #include <asm/asm-offsets.h> #include <asm/dma.h> #include <asm/fixed_code.h> #include <asm/cacheflush.h> #include <asm/mem_map.h> #include <asm/mmu_context.h> /* * does not yet catch signals sent when the child dies. * in exit.c or in signal.c. */ /* * Get contents of register REGNO in task TASK. */ static inline long get_reg(struct task_struct *task, unsigned long regno, unsigned long __user *datap) { long tmp; struct pt_regs *regs = task_pt_regs(task); if (regno & 3 || regno > PT_LAST_PSEUDO) return -EIO; switch (regno) { case PT_TEXT_ADDR: tmp = task->mm->start_code; break; case PT_TEXT_END_ADDR: tmp = task->mm->end_code; break; case PT_DATA_ADDR: tmp = task->mm->start_data; break; case PT_USP: tmp = task->thread.usp; break; default: if (regno < sizeof(*regs)) { void *reg_ptr = regs; tmp = *(long *)(reg_ptr + regno); } else return -EIO; } return put_user(tmp, datap); } /* * Write contents of register REGNO in task TASK. */ static inline int put_reg(struct task_struct *task, unsigned long regno, unsigned long data) { struct pt_regs *regs = task_pt_regs(task); if (regno & 3 || regno > PT_LAST_PSEUDO) return -EIO; switch (regno) { case PT_PC: /*********************************************************************/ /* At this point the kernel is most likely in exception. */ /* The RETX register will be used to populate the pc of the process. */ /*********************************************************************/ regs->retx = data; regs->pc = data; break; case PT_RETX: break; /* regs->retx = data; break; */ case PT_USP: regs->usp = data; task->thread.usp = data; break; case PT_SYSCFG: /* don't let userspace screw with this */ if ((data & ~1) != 0x6) pr_warning("ptrace: ignore syscfg write of %#lx\n", data); break; /* regs->syscfg = data; break; */ default: if (regno < sizeof(*regs)) { void *reg_offset = regs; *(long *)(reg_offset + regno) = data; } /* Ignore writes to pseudo registers */ } return 0; } /* * check that an address falls within the bounds of the target process's memory mappings */ int is_user_addr_valid(struct task_struct *child, unsigned long start, unsigned long len) { struct vm_area_struct *vma; struct sram_list_struct *sraml; /* overflow */ if (start + len < start) return -EIO; vma = find_vma(child->mm, start); if (vma && start >= vma->vm_start && start + len <= vma->vm_end) return 0; for (sraml = child->mm->context.sram_list; sraml; sraml = sraml->next) if (start >= (unsigned long)sraml->addr && start + len < (unsigned long)sraml->addr + sraml->length) return 0; if (start >= FIXED_CODE_START && start + len < FIXED_CODE_END) return 0; #ifdef CONFIG_APP_STACK_L1 if (child->mm->context.l1_stack_save) if (start >= (unsigned long)l1_stack_base && start + len < (unsigned long)l1_stack_base + l1_stack_len) return 0; #endif return -EIO; } /* * retrieve the contents of Blackfin userspace general registers */ static int genregs_get(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, void *kbuf, void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; /* This sucks ... */ regs->usp = target->thread.usp; ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, regs, 0, sizeof(*regs)); if (ret < 0) return ret; return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf, sizeof(*regs), -1); } /* * update the contents of the Blackfin userspace general registers */ static int genregs_set(struct task_struct *target, const struct user_regset *regset, unsigned int pos, unsigned int count, const void *kbuf, const void __user *ubuf) { struct pt_regs *regs = task_pt_regs(target); int ret; /* Don't let people set SYSCFG (it's at the end of pt_regs) */ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, PT_SYSCFG); if (ret < 0) return ret; /* This sucks ... */ target->thread.usp = regs->usp; /* regs->retx = regs->pc; */ return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, PT_SYSCFG, -1); } /* * Define the register sets available on the Blackfin under Linux */ enum bfin_regset { REGSET_GENERAL, }; static const struct user_regset bfin_regsets[] = { [REGSET_GENERAL] = { .core_note_type = NT_PRSTATUS, .n = sizeof(struct pt_regs) / sizeof(long), .size = sizeof(long), .align = sizeof(long), .get = genregs_get, .set = genregs_set, }, }; static const struct user_regset_view user_bfin_native_view = { .name = "Blackfin", .e_machine = EM_BLACKFIN, .regsets = bfin_regsets, .n = ARRAY_SIZE(bfin_regsets), }; const struct user_regset_view *task_user_regset_view(struct task_struct *task) { return &user_bfin_native_view; } void user_enable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); regs->syscfg |= SYSCFG_SSSTEP; set_tsk_thread_flag(child, TIF_SINGLESTEP); } void user_disable_single_step(struct task_struct *child) { struct pt_regs *regs = task_pt_regs(child); regs->syscfg &= ~SYSCFG_SSSTEP; clear_tsk_thread_flag(child, TIF_SINGLESTEP); } long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; unsigned long __user *datap = (unsigned long __user *)data; void *paddr = (void *)addr; switch (request) { /* when I and D space are separate, these will need to be fixed. */ case PTRACE_PEEKDATA: pr_debug("ptrace: PEEKDATA\n"); /* fall through */ case PTRACE_PEEKTEXT: /* read word at location addr. */ { unsigned long tmp = 0; int copied = 0, to_copy = sizeof(tmp); ret = -EIO; pr_debug("ptrace: PEEKTEXT at addr 0x%08lx + %i\n", addr, to_copy); if (is_user_addr_valid(child, addr, to_copy) < 0) break; pr_debug("ptrace: user address is valid\n"); switch (bfin_mem_access_type(addr, to_copy)) { case BFIN_MEM_ACCESS_CORE: case BFIN_MEM_ACCESS_CORE_ONLY: copied = access_process_vm(child, addr, &tmp, to_copy, 0); if (copied) break; /* hrm, why didn't that work ... maybe no mapping */ if (addr >= FIXED_CODE_START && addr + to_copy <= FIXED_CODE_END) { copy_from_user_page(0, 0, 0, &tmp, paddr, to_copy); copied = to_copy; } else if (addr >= BOOT_ROM_START) { memcpy(&tmp, paddr, to_copy); copied = to_copy; } break; case BFIN_MEM_ACCESS_DMA: if (safe_dma_memcpy(&tmp, paddr, to_copy)) copied = to_copy; break; case BFIN_MEM_ACCESS_ITEST: if (isram_memcpy(&tmp, paddr, to_copy)) copied = to_copy; break; default: copied = 0; break; } pr_debug("ptrace: copied size %d [0x%08lx]\n", copied, tmp); if (copied == to_copy) ret = put_user(tmp, datap); break; } /* when I and D space are separate, this will have to be fixed. */ case PTRACE_POKEDATA: pr_debug("ptrace: PTRACE_PEEKDATA\n"); /* fall through */ case PTRACE_POKETEXT: /* write the word at location addr. */ { int copied = 0, to_copy = sizeof(data); ret = -EIO; pr_debug("ptrace: POKETEXT at addr 0x%08lx + %i bytes %lx\n", addr, to_copy, data); if (is_user_addr_valid(child, addr, to_copy) < 0) break; pr_debug("ptrace: user address is valid\n"); switch (bfin_mem_access_type(addr, to_copy)) { case BFIN_MEM_ACCESS_CORE: case BFIN_MEM_ACCESS_CORE_ONLY: copied = access_process_vm(child, addr, &data, to_copy, 1); break; case BFIN_MEM_ACCESS_DMA: if (safe_dma_memcpy(paddr, &data, to_copy)) copied = to_copy; break; case BFIN_MEM_ACCESS_ITEST: if (isram_memcpy(paddr, &data, to_copy)) copied = to_copy; break; default: copied = 0; break; } pr_debug("ptrace: copied size %d\n", copied); if (copied == to_copy) ret = 0; break; } case PTRACE_PEEKUSR: switch (addr) { #ifdef CONFIG_BINFMT_ELF_FDPIC /* backwards compat */ case PT_FDPIC_EXEC: request = PTRACE_GETFDPIC; addr = PTRACE_GETFDPIC_EXEC; goto case_default; case PT_FDPIC_INTERP: request = PTRACE_GETFDPIC; addr = PTRACE_GETFDPIC_INTERP; goto case_default; #endif default: ret = get_reg(child, addr, datap); } pr_debug("ptrace: PEEKUSR reg %li with %#lx = %i\n", addr, data, ret); break; case PTRACE_POKEUSR: ret = put_reg(child, addr, data); pr_debug("ptrace: POKEUSR reg %li with %li = %i\n", addr, data, ret); break; case PTRACE_GETREGS: pr_debug("ptrace: PTRACE_GETREGS\n"); return copy_regset_to_user(child, &user_bfin_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), datap); case PTRACE_SETREGS: pr_debug("ptrace: PTRACE_SETREGS\n"); return copy_regset_from_user(child, &user_bfin_native_view, REGSET_GENERAL, 0, sizeof(struct pt_regs), datap); case_default: default: ret = ptrace_request(child, request, addr, data); break; } return ret; } asmlinkage int syscall_trace_enter(struct pt_regs *regs) { int ret = 0; if (test_thread_flag(TIF_SYSCALL_TRACE)) ret = tracehook_report_syscall_entry(regs); return ret; } asmlinkage void syscall_trace_leave(struct pt_regs *regs) { int step; step = test_thread_flag(TIF_SINGLESTEP); if (step || test_thread_flag(TIF_SYSCALL_TRACE)) tracehook_report_syscall_exit(regs, step); }
gpl-2.0