repo_name string | path string | copies string | size string | content string | license string |
|---|---|---|---|---|---|
TheNotOnly/android_kernel_lge_jagnm_lp | sound/core/seq/seq_midi_emul.c | 7974 | 19934 | /*
* GM/GS/XG midi module.
*
* Copyright (C) 1999 Steve Ratcliffe
*
* Based on awe_wave.c by Takashi Iwai
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/*
* This module is used to keep track of the current midi state.
* It can be used for drivers that are required to emulate midi when
* the hardware doesn't.
*
* It was written for a AWE64 driver, but there should be no AWE specific
* code in here. If there is it should be reported as a bug.
*/
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/module.h>
#include <sound/core.h>
#include <sound/seq_kernel.h>
#include <sound/seq_midi_emul.h>
#include <sound/initval.h>
#include <sound/asoundef.h>
MODULE_AUTHOR("Takashi Iwai / Steve Ratcliffe");
MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer MIDI emulation.");
MODULE_LICENSE("GPL");
/* Prototypes for static functions */
static void note_off(struct snd_midi_op *ops, void *drv,
struct snd_midi_channel *chan,
int note, int vel);
static void do_control(struct snd_midi_op *ops, void *private,
struct snd_midi_channel_set *chset,
struct snd_midi_channel *chan,
int control, int value);
static void rpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset);
static void nrpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset);
static void sysex(struct snd_midi_op *ops, void *private, unsigned char *sysex,
int len, struct snd_midi_channel_set *chset);
static void all_sounds_off(struct snd_midi_op *ops, void *private,
struct snd_midi_channel *chan);
static void all_notes_off(struct snd_midi_op *ops, void *private,
struct snd_midi_channel *chan);
static void snd_midi_reset_controllers(struct snd_midi_channel *chan);
static void reset_all_channels(struct snd_midi_channel_set *chset);
/*
* Process an event in a driver independent way. This means dealing
* with RPN, NRPN, SysEx etc that are defined for common midi applications
* such as GM, GS and XG.
* There modes that this module will run in are:
* Generic MIDI - no interpretation at all, it will just save current values
* of controllers etc.
* GM - You can use all gm_ prefixed elements of chan. Controls, RPN, NRPN,
* SysEx will be interpreded as defined in General Midi.
* GS - You can use all gs_ prefixed elements of chan. Codes for GS will be
* interpreted.
* XG - You can use all xg_ prefixed elements of chan. Codes for XG will
* be interpreted.
*/
void
snd_midi_process_event(struct snd_midi_op *ops,
struct snd_seq_event *ev,
struct snd_midi_channel_set *chanset)
{
struct snd_midi_channel *chan;
void *drv;
int dest_channel = 0;
if (ev == NULL || chanset == NULL) {
snd_printd("ev or chanbase NULL (snd_midi_process_event)\n");
return;
}
if (chanset->channels == NULL)
return;
if (snd_seq_ev_is_channel_type(ev)) {
dest_channel = ev->data.note.channel;
if (dest_channel >= chanset->max_channels) {
snd_printd("dest channel is %d, max is %d\n",
dest_channel, chanset->max_channels);
return;
}
}
chan = chanset->channels + dest_channel;
drv = chanset->private_data;
/* EVENT_NOTE should be processed before queued */
if (ev->type == SNDRV_SEQ_EVENT_NOTE)
return;
/* Make sure that we don't have a note on that should really be
* a note off */
if (ev->type == SNDRV_SEQ_EVENT_NOTEON && ev->data.note.velocity == 0)
ev->type = SNDRV_SEQ_EVENT_NOTEOFF;
/* Make sure the note is within array range */
if (ev->type == SNDRV_SEQ_EVENT_NOTEON ||
ev->type == SNDRV_SEQ_EVENT_NOTEOFF ||
ev->type == SNDRV_SEQ_EVENT_KEYPRESS) {
if (ev->data.note.note >= 128)
return;
}
switch (ev->type) {
case SNDRV_SEQ_EVENT_NOTEON:
if (chan->note[ev->data.note.note] & SNDRV_MIDI_NOTE_ON) {
if (ops->note_off)
ops->note_off(drv, ev->data.note.note, 0, chan);
}
chan->note[ev->data.note.note] = SNDRV_MIDI_NOTE_ON;
if (ops->note_on)
ops->note_on(drv, ev->data.note.note, ev->data.note.velocity, chan);
break;
case SNDRV_SEQ_EVENT_NOTEOFF:
if (! (chan->note[ev->data.note.note] & SNDRV_MIDI_NOTE_ON))
break;
if (ops->note_off)
note_off(ops, drv, chan, ev->data.note.note, ev->data.note.velocity);
break;
case SNDRV_SEQ_EVENT_KEYPRESS:
if (ops->key_press)
ops->key_press(drv, ev->data.note.note, ev->data.note.velocity, chan);
break;
case SNDRV_SEQ_EVENT_CONTROLLER:
do_control(ops, drv, chanset, chan,
ev->data.control.param, ev->data.control.value);
break;
case SNDRV_SEQ_EVENT_PGMCHANGE:
chan->midi_program = ev->data.control.value;
break;
case SNDRV_SEQ_EVENT_PITCHBEND:
chan->midi_pitchbend = ev->data.control.value;
if (ops->control)
ops->control(drv, MIDI_CTL_PITCHBEND, chan);
break;
case SNDRV_SEQ_EVENT_CHANPRESS:
chan->midi_pressure = ev->data.control.value;
if (ops->control)
ops->control(drv, MIDI_CTL_CHAN_PRESSURE, chan);
break;
case SNDRV_SEQ_EVENT_CONTROL14:
/* Best guess is that this is any of the 14 bit controller values */
if (ev->data.control.param < 32) {
/* set low part first */
chan->control[ev->data.control.param + 32] =
ev->data.control.value & 0x7f;
do_control(ops, drv, chanset, chan,
ev->data.control.param,
((ev->data.control.value>>7) & 0x7f));
} else
do_control(ops, drv, chanset, chan,
ev->data.control.param,
ev->data.control.value);
break;
case SNDRV_SEQ_EVENT_NONREGPARAM:
/* Break it back into its controller values */
chan->param_type = SNDRV_MIDI_PARAM_TYPE_NONREGISTERED;
chan->control[MIDI_CTL_MSB_DATA_ENTRY]
= (ev->data.control.value >> 7) & 0x7f;
chan->control[MIDI_CTL_LSB_DATA_ENTRY]
= ev->data.control.value & 0x7f;
chan->control[MIDI_CTL_NONREG_PARM_NUM_MSB]
= (ev->data.control.param >> 7) & 0x7f;
chan->control[MIDI_CTL_NONREG_PARM_NUM_LSB]
= ev->data.control.param & 0x7f;
nrpn(ops, drv, chan, chanset);
break;
case SNDRV_SEQ_EVENT_REGPARAM:
/* Break it back into its controller values */
chan->param_type = SNDRV_MIDI_PARAM_TYPE_REGISTERED;
chan->control[MIDI_CTL_MSB_DATA_ENTRY]
= (ev->data.control.value >> 7) & 0x7f;
chan->control[MIDI_CTL_LSB_DATA_ENTRY]
= ev->data.control.value & 0x7f;
chan->control[MIDI_CTL_REGIST_PARM_NUM_MSB]
= (ev->data.control.param >> 7) & 0x7f;
chan->control[MIDI_CTL_REGIST_PARM_NUM_LSB]
= ev->data.control.param & 0x7f;
rpn(ops, drv, chan, chanset);
break;
case SNDRV_SEQ_EVENT_SYSEX:
if ((ev->flags & SNDRV_SEQ_EVENT_LENGTH_MASK) == SNDRV_SEQ_EVENT_LENGTH_VARIABLE) {
unsigned char sysexbuf[64];
int len;
len = snd_seq_expand_var_event(ev, sizeof(sysexbuf), sysexbuf, 1, 0);
if (len > 0)
sysex(ops, drv, sysexbuf, len, chanset);
}
break;
case SNDRV_SEQ_EVENT_SONGPOS:
case SNDRV_SEQ_EVENT_SONGSEL:
case SNDRV_SEQ_EVENT_CLOCK:
case SNDRV_SEQ_EVENT_START:
case SNDRV_SEQ_EVENT_CONTINUE:
case SNDRV_SEQ_EVENT_STOP:
case SNDRV_SEQ_EVENT_QFRAME:
case SNDRV_SEQ_EVENT_TEMPO:
case SNDRV_SEQ_EVENT_TIMESIGN:
case SNDRV_SEQ_EVENT_KEYSIGN:
goto not_yet;
case SNDRV_SEQ_EVENT_SENSING:
break;
case SNDRV_SEQ_EVENT_CLIENT_START:
case SNDRV_SEQ_EVENT_CLIENT_EXIT:
case SNDRV_SEQ_EVENT_CLIENT_CHANGE:
case SNDRV_SEQ_EVENT_PORT_START:
case SNDRV_SEQ_EVENT_PORT_EXIT:
case SNDRV_SEQ_EVENT_PORT_CHANGE:
case SNDRV_SEQ_EVENT_ECHO:
not_yet:
default:
/*snd_printd("Unimplemented event %d\n", ev->type);*/
break;
}
}
/*
* release note
*/
static void
note_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
int note, int vel)
{
if (chan->gm_hold) {
/* Hold this note until pedal is turned off */
chan->note[note] |= SNDRV_MIDI_NOTE_RELEASED;
} else if (chan->note[note] & SNDRV_MIDI_NOTE_SOSTENUTO) {
/* Mark this note as release; it will be turned off when sostenuto
* is turned off */
chan->note[note] |= SNDRV_MIDI_NOTE_RELEASED;
} else {
chan->note[note] = 0;
if (ops->note_off)
ops->note_off(drv, note, vel, chan);
}
}
/*
* Do all driver independent operations for this controller and pass
* events that need to take place immediately to the driver.
*/
static void
do_control(struct snd_midi_op *ops, void *drv, struct snd_midi_channel_set *chset,
struct snd_midi_channel *chan, int control, int value)
{
int i;
/* Switches */
if ((control >=64 && control <=69) || (control >= 80 && control <= 83)) {
/* These are all switches; either off or on so set to 0 or 127 */
value = (value >= 64)? 127: 0;
}
chan->control[control] = value;
switch (control) {
case MIDI_CTL_SUSTAIN:
if (value == 0) {
/* Sustain has been released, turn off held notes */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_RELEASED) {
chan->note[i] = SNDRV_MIDI_NOTE_OFF;
if (ops->note_off)
ops->note_off(drv, i, 0, chan);
}
}
}
break;
case MIDI_CTL_PORTAMENTO:
break;
case MIDI_CTL_SOSTENUTO:
if (value) {
/* Mark each note that is currently held down */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_ON)
chan->note[i] |= SNDRV_MIDI_NOTE_SOSTENUTO;
}
} else {
/* release all notes that were held */
for (i = 0; i < 128; i++) {
if (chan->note[i] & SNDRV_MIDI_NOTE_SOSTENUTO) {
chan->note[i] &= ~SNDRV_MIDI_NOTE_SOSTENUTO;
if (chan->note[i] & SNDRV_MIDI_NOTE_RELEASED) {
chan->note[i] = SNDRV_MIDI_NOTE_OFF;
if (ops->note_off)
ops->note_off(drv, i, 0, chan);
}
}
}
}
break;
case MIDI_CTL_MSB_DATA_ENTRY:
chan->control[MIDI_CTL_LSB_DATA_ENTRY] = 0;
/* go through here */
case MIDI_CTL_LSB_DATA_ENTRY:
if (chan->param_type == SNDRV_MIDI_PARAM_TYPE_REGISTERED)
rpn(ops, drv, chan, chset);
else
nrpn(ops, drv, chan, chset);
break;
case MIDI_CTL_REGIST_PARM_NUM_LSB:
case MIDI_CTL_REGIST_PARM_NUM_MSB:
chan->param_type = SNDRV_MIDI_PARAM_TYPE_REGISTERED;
break;
case MIDI_CTL_NONREG_PARM_NUM_LSB:
case MIDI_CTL_NONREG_PARM_NUM_MSB:
chan->param_type = SNDRV_MIDI_PARAM_TYPE_NONREGISTERED;
break;
case MIDI_CTL_ALL_SOUNDS_OFF:
all_sounds_off(ops, drv, chan);
break;
case MIDI_CTL_ALL_NOTES_OFF:
all_notes_off(ops, drv, chan);
break;
case MIDI_CTL_MSB_BANK:
if (chset->midi_mode == SNDRV_MIDI_MODE_XG) {
if (value == 127)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
break;
case MIDI_CTL_LSB_BANK:
break;
case MIDI_CTL_RESET_CONTROLLERS:
snd_midi_reset_controllers(chan);
break;
case MIDI_CTL_SOFT_PEDAL:
case MIDI_CTL_LEGATO_FOOTSWITCH:
case MIDI_CTL_HOLD2:
case MIDI_CTL_SC1_SOUND_VARIATION:
case MIDI_CTL_SC2_TIMBRE:
case MIDI_CTL_SC3_RELEASE_TIME:
case MIDI_CTL_SC4_ATTACK_TIME:
case MIDI_CTL_SC5_BRIGHTNESS:
case MIDI_CTL_E1_REVERB_DEPTH:
case MIDI_CTL_E2_TREMOLO_DEPTH:
case MIDI_CTL_E3_CHORUS_DEPTH:
case MIDI_CTL_E4_DETUNE_DEPTH:
case MIDI_CTL_E5_PHASER_DEPTH:
goto notyet;
notyet:
default:
if (ops->control)
ops->control(drv, control, chan);
break;
}
}
/*
* initialize the MIDI status
*/
void
snd_midi_channel_set_clear(struct snd_midi_channel_set *chset)
{
int i;
chset->midi_mode = SNDRV_MIDI_MODE_GM;
chset->gs_master_volume = 127;
for (i = 0; i < chset->max_channels; i++) {
struct snd_midi_channel *chan = chset->channels + i;
memset(chan->note, 0, sizeof(chan->note));
chan->midi_aftertouch = 0;
chan->midi_pressure = 0;
chan->midi_program = 0;
chan->midi_pitchbend = 0;
snd_midi_reset_controllers(chan);
chan->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
chan->gm_rpn_fine_tuning = 0;
chan->gm_rpn_coarse_tuning = 0;
if (i == 9)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
}
/*
* Process a rpn message.
*/
static void
rpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
int type;
int val;
if (chset->midi_mode != SNDRV_MIDI_MODE_NONE) {
type = (chan->control[MIDI_CTL_REGIST_PARM_NUM_MSB] << 8) |
chan->control[MIDI_CTL_REGIST_PARM_NUM_LSB];
val = (chan->control[MIDI_CTL_MSB_DATA_ENTRY] << 7) |
chan->control[MIDI_CTL_LSB_DATA_ENTRY];
switch (type) {
case 0x0000: /* Pitch bend sensitivity */
/* MSB only / 1 semitone per 128 */
chan->gm_rpn_pitch_bend_range = val;
break;
case 0x0001: /* fine tuning: */
/* MSB/LSB, 8192=center, 100/8192 cent step */
chan->gm_rpn_fine_tuning = val - 8192;
break;
case 0x0002: /* coarse tuning */
/* MSB only / 8192=center, 1 semitone per 128 */
chan->gm_rpn_coarse_tuning = val - 8192;
break;
case 0x7F7F: /* "lock-in" RPN */
/* ignored */
break;
}
}
/* should call nrpn or rpn callback here.. */
}
/*
* Process an nrpn message.
*/
static void
nrpn(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan,
struct snd_midi_channel_set *chset)
{
/* parse XG NRPNs here if possible */
if (ops->nrpn)
ops->nrpn(drv, chan, chset);
}
/*
* convert channel parameter in GS sysex
*/
static int
get_channel(unsigned char cmd)
{
int p = cmd & 0x0f;
if (p == 0)
p = 9;
else if (p < 10)
p--;
return p;
}
/*
* Process a sysex message.
*/
static void
sysex(struct snd_midi_op *ops, void *private, unsigned char *buf, int len,
struct snd_midi_channel_set *chset)
{
/* GM on */
static unsigned char gm_on_macro[] = {
0x7e,0x7f,0x09,0x01,
};
/* XG on */
static unsigned char xg_on_macro[] = {
0x43,0x10,0x4c,0x00,0x00,0x7e,0x00,
};
/* GS prefix
* drum channel: XX=0x1?(channel), YY=0x15, ZZ=on/off
* reverb mode: XX=0x01, YY=0x30, ZZ=0-7
* chorus mode: XX=0x01, YY=0x38, ZZ=0-7
* master vol: XX=0x00, YY=0x04, ZZ=0-127
*/
static unsigned char gs_pfx_macro[] = {
0x41,0x10,0x42,0x12,0x40,/*XX,YY,ZZ*/
};
int parsed = SNDRV_MIDI_SYSEX_NOT_PARSED;
if (len <= 0 || buf[0] != 0xf0)
return;
/* skip first byte */
buf++;
len--;
/* GM on */
if (len >= (int)sizeof(gm_on_macro) &&
memcmp(buf, gm_on_macro, sizeof(gm_on_macro)) == 0) {
if (chset->midi_mode != SNDRV_MIDI_MODE_GS &&
chset->midi_mode != SNDRV_MIDI_MODE_XG) {
chset->midi_mode = SNDRV_MIDI_MODE_GM;
reset_all_channels(chset);
parsed = SNDRV_MIDI_SYSEX_GM_ON;
}
}
/* GS macros */
else if (len >= 8 &&
memcmp(buf, gs_pfx_macro, sizeof(gs_pfx_macro)) == 0) {
if (chset->midi_mode != SNDRV_MIDI_MODE_GS &&
chset->midi_mode != SNDRV_MIDI_MODE_XG)
chset->midi_mode = SNDRV_MIDI_MODE_GS;
if (buf[5] == 0x00 && buf[6] == 0x7f && buf[7] == 0x00) {
/* GS reset */
parsed = SNDRV_MIDI_SYSEX_GS_RESET;
reset_all_channels(chset);
}
else if ((buf[5] & 0xf0) == 0x10 && buf[6] == 0x15) {
/* drum pattern */
int p = get_channel(buf[5]);
if (p < chset->max_channels) {
parsed = SNDRV_MIDI_SYSEX_GS_DRUM_CHANNEL;
if (buf[7])
chset->channels[p].drum_channel = 1;
else
chset->channels[p].drum_channel = 0;
}
} else if ((buf[5] & 0xf0) == 0x10 && buf[6] == 0x21) {
/* program */
int p = get_channel(buf[5]);
if (p < chset->max_channels &&
! chset->channels[p].drum_channel) {
parsed = SNDRV_MIDI_SYSEX_GS_DRUM_CHANNEL;
chset->channels[p].midi_program = buf[7];
}
} else if (buf[5] == 0x01 && buf[6] == 0x30) {
/* reverb mode */
parsed = SNDRV_MIDI_SYSEX_GS_REVERB_MODE;
chset->gs_reverb_mode = buf[7];
} else if (buf[5] == 0x01 && buf[6] == 0x38) {
/* chorus mode */
parsed = SNDRV_MIDI_SYSEX_GS_CHORUS_MODE;
chset->gs_chorus_mode = buf[7];
} else if (buf[5] == 0x00 && buf[6] == 0x04) {
/* master volume */
parsed = SNDRV_MIDI_SYSEX_GS_MASTER_VOLUME;
chset->gs_master_volume = buf[7];
}
}
/* XG on */
else if (len >= (int)sizeof(xg_on_macro) &&
memcmp(buf, xg_on_macro, sizeof(xg_on_macro)) == 0) {
int i;
chset->midi_mode = SNDRV_MIDI_MODE_XG;
parsed = SNDRV_MIDI_SYSEX_XG_ON;
/* reset CC#0 for drums */
for (i = 0; i < chset->max_channels; i++) {
if (chset->channels[i].drum_channel)
chset->channels[i].control[MIDI_CTL_MSB_BANK] = 127;
else
chset->channels[i].control[MIDI_CTL_MSB_BANK] = 0;
}
}
if (ops->sysex)
ops->sysex(private, buf - 1, len + 1, parsed, chset);
}
/*
* all sound off
*/
static void
all_sounds_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan)
{
int n;
if (! ops->note_terminate)
return;
for (n = 0; n < 128; n++) {
if (chan->note[n]) {
ops->note_terminate(drv, n, chan);
chan->note[n] = 0;
}
}
}
/*
* all notes off
*/
static void
all_notes_off(struct snd_midi_op *ops, void *drv, struct snd_midi_channel *chan)
{
int n;
if (! ops->note_off)
return;
for (n = 0; n < 128; n++) {
if (chan->note[n] == SNDRV_MIDI_NOTE_ON)
note_off(ops, drv, chan, n, 0);
}
}
/*
* Initialise a single midi channel control block.
*/
static void snd_midi_channel_init(struct snd_midi_channel *p, int n)
{
if (p == NULL)
return;
memset(p, 0, sizeof(struct snd_midi_channel));
p->private = NULL;
p->number = n;
snd_midi_reset_controllers(p);
p->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
p->gm_rpn_fine_tuning = 0;
p->gm_rpn_coarse_tuning = 0;
if (n == 9)
p->drum_channel = 1; /* Default ch 10 as drums */
}
/*
* Allocate and initialise a set of midi channel control blocks.
*/
static struct snd_midi_channel *snd_midi_channel_init_set(int n)
{
struct snd_midi_channel *chan;
int i;
chan = kmalloc(n * sizeof(struct snd_midi_channel), GFP_KERNEL);
if (chan) {
for (i = 0; i < n; i++)
snd_midi_channel_init(chan+i, i);
}
return chan;
}
/*
* reset all midi channels
*/
static void
reset_all_channels(struct snd_midi_channel_set *chset)
{
int ch;
for (ch = 0; ch < chset->max_channels; ch++) {
struct snd_midi_channel *chan = chset->channels + ch;
snd_midi_reset_controllers(chan);
chan->gm_rpn_pitch_bend_range = 256; /* 2 semitones */
chan->gm_rpn_fine_tuning = 0;
chan->gm_rpn_coarse_tuning = 0;
if (ch == 9)
chan->drum_channel = 1;
else
chan->drum_channel = 0;
}
}
/*
* Allocate and initialise a midi channel set.
*/
struct snd_midi_channel_set *snd_midi_channel_alloc_set(int n)
{
struct snd_midi_channel_set *chset;
chset = kmalloc(sizeof(*chset), GFP_KERNEL);
if (chset) {
chset->channels = snd_midi_channel_init_set(n);
chset->private_data = NULL;
chset->max_channels = n;
}
return chset;
}
/*
* Reset the midi controllers on a particular channel to default values.
*/
static void snd_midi_reset_controllers(struct snd_midi_channel *chan)
{
memset(chan->control, 0, sizeof(chan->control));
chan->gm_volume = 127;
chan->gm_expression = 127;
chan->gm_pan = 64;
}
/*
* Free a midi channel set.
*/
void snd_midi_channel_free_set(struct snd_midi_channel_set *chset)
{
if (chset == NULL)
return;
kfree(chset->channels);
kfree(chset);
}
static int __init alsa_seq_midi_emul_init(void)
{
return 0;
}
static void __exit alsa_seq_midi_emul_exit(void)
{
}
module_init(alsa_seq_midi_emul_init)
module_exit(alsa_seq_midi_emul_exit)
EXPORT_SYMBOL(snd_midi_process_event);
EXPORT_SYMBOL(snd_midi_channel_set_clear);
EXPORT_SYMBOL(snd_midi_channel_alloc_set);
EXPORT_SYMBOL(snd_midi_channel_free_set);
| gpl-2.0 |
miquelmartos/gp-kernel-peak | arch/xtensa/platforms/iss/setup.c | 9510 | 2342 | /*
*
* arch/xtensa/platform-iss/setup.c
*
* Platform specific initialization.
*
* Authors: Chris Zankel <chris@zankel.net>
* Joe Taylor <joe@tensilica.com>
*
* Copyright 2001 - 2005 Tensilica Inc.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/stddef.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/errno.h>
#include <linux/reboot.h>
#include <linux/kdev_t.h>
#include <linux/types.h>
#include <linux/major.h>
#include <linux/blkdev.h>
#include <linux/console.h>
#include <linux/delay.h>
#include <linux/stringify.h>
#include <linux/notifier.h>
#include <asm/platform.h>
#include <asm/bootparam.h>
void __init platform_init(bp_tag_t* bootparam)
{
}
void platform_halt(void)
{
printk (" ** Called platform_halt(), looping forever! **\n");
while (1);
}
void platform_power_off(void)
{
printk (" ** Called platform_power_off(), looping forever! **\n");
while (1);
}
void platform_restart(void)
{
/* Flush and reset the mmu, simulate a processor reset, and
* jump to the reset vector. */
__asm__ __volatile__("movi a2, 15\n\t"
"wsr a2, " __stringify(ICOUNTLEVEL) "\n\t"
"movi a2, 0\n\t"
"wsr a2, " __stringify(ICOUNT) "\n\t"
"wsr a2, " __stringify(IBREAKENABLE) "\n\t"
"wsr a2, " __stringify(LCOUNT) "\n\t"
"movi a2, 0x1f\n\t"
"wsr a2, " __stringify(PS) "\n\t"
"isync\n\t"
"jx %0\n\t"
:
: "a" (XCHAL_RESET_VECTOR_VADDR)
: "a2");
/* control never gets here */
}
extern void iss_net_poll(void);
const char twirl[]="|/-\\|/-\\";
void platform_heartbeat(void)
{
#if 0
static int i = 0, j = 0;
if (--i < 0) {
i = 99;
printk("\r%c\r", twirl[j++]);
if (j == 8)
j = 0;
}
#endif
}
static int
iss_panic_event(struct notifier_block *this, unsigned long event, void *ptr)
{
__asm__ __volatile__("movi a2, -1; simcall\n");
return NOTIFY_DONE;
}
static struct notifier_block iss_panic_block = {
iss_panic_event,
NULL,
0
};
void __init platform_setup(char **p_cmdline)
{
atomic_notifier_chain_register(&panic_notifier_list, &iss_panic_block);
}
| gpl-2.0 |
ZdrowyGosciu/kernel_g900f | sound/core/seq/seq_compat.c | 13094 | 4715 | /*
* 32bit -> 64bit ioctl wrapper for sequencer API
* Copyright (c) by Takashi Iwai <tiwai@suse.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
*/
/* This file included from seq.c */
#include <linux/compat.h>
#include <linux/slab.h>
struct snd_seq_port_info32 {
struct snd_seq_addr addr; /* client/port numbers */
char name[64]; /* port name */
u32 capability; /* port capability bits */
u32 type; /* port type bits */
s32 midi_channels; /* channels per MIDI port */
s32 midi_voices; /* voices per MIDI port */
s32 synth_voices; /* voices per SYNTH port */
s32 read_use; /* R/O: subscribers for output (from this port) */
s32 write_use; /* R/O: subscribers for input (to this port) */
u32 kernel; /* reserved for kernel use (must be NULL) */
u32 flags; /* misc. conditioning */
unsigned char time_queue; /* queue # for timestamping */
char reserved[59]; /* for future use */
};
static int snd_seq_call_port_info_ioctl(struct snd_seq_client *client, unsigned int cmd,
struct snd_seq_port_info32 __user *data32)
{
int err = -EFAULT;
struct snd_seq_port_info *data;
mm_segment_t fs;
data = memdup_user(data32, sizeof(*data32));
if (IS_ERR(data))
return PTR_ERR(data);
if (get_user(data->flags, &data32->flags) ||
get_user(data->time_queue, &data32->time_queue))
goto error;
data->kernel = NULL;
fs = snd_enter_user();
err = snd_seq_do_ioctl(client, cmd, data);
snd_leave_user(fs);
if (err < 0)
goto error;
if (copy_to_user(data32, data, sizeof(*data32)) ||
put_user(data->flags, &data32->flags) ||
put_user(data->time_queue, &data32->time_queue))
err = -EFAULT;
error:
kfree(data);
return err;
}
/*
*/
enum {
SNDRV_SEQ_IOCTL_CREATE_PORT32 = _IOWR('S', 0x20, struct snd_seq_port_info32),
SNDRV_SEQ_IOCTL_DELETE_PORT32 = _IOW ('S', 0x21, struct snd_seq_port_info32),
SNDRV_SEQ_IOCTL_GET_PORT_INFO32 = _IOWR('S', 0x22, struct snd_seq_port_info32),
SNDRV_SEQ_IOCTL_SET_PORT_INFO32 = _IOW ('S', 0x23, struct snd_seq_port_info32),
SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32 = _IOWR('S', 0x52, struct snd_seq_port_info32),
};
static long snd_seq_ioctl_compat(struct file *file, unsigned int cmd, unsigned long arg)
{
struct snd_seq_client *client = file->private_data;
void __user *argp = compat_ptr(arg);
if (snd_BUG_ON(!client))
return -ENXIO;
switch (cmd) {
case SNDRV_SEQ_IOCTL_PVERSION:
case SNDRV_SEQ_IOCTL_CLIENT_ID:
case SNDRV_SEQ_IOCTL_SYSTEM_INFO:
case SNDRV_SEQ_IOCTL_GET_CLIENT_INFO:
case SNDRV_SEQ_IOCTL_SET_CLIENT_INFO:
case SNDRV_SEQ_IOCTL_SUBSCRIBE_PORT:
case SNDRV_SEQ_IOCTL_UNSUBSCRIBE_PORT:
case SNDRV_SEQ_IOCTL_CREATE_QUEUE:
case SNDRV_SEQ_IOCTL_DELETE_QUEUE:
case SNDRV_SEQ_IOCTL_GET_QUEUE_INFO:
case SNDRV_SEQ_IOCTL_SET_QUEUE_INFO:
case SNDRV_SEQ_IOCTL_GET_NAMED_QUEUE:
case SNDRV_SEQ_IOCTL_GET_QUEUE_STATUS:
case SNDRV_SEQ_IOCTL_GET_QUEUE_TEMPO:
case SNDRV_SEQ_IOCTL_SET_QUEUE_TEMPO:
case SNDRV_SEQ_IOCTL_GET_QUEUE_TIMER:
case SNDRV_SEQ_IOCTL_SET_QUEUE_TIMER:
case SNDRV_SEQ_IOCTL_GET_QUEUE_CLIENT:
case SNDRV_SEQ_IOCTL_SET_QUEUE_CLIENT:
case SNDRV_SEQ_IOCTL_GET_CLIENT_POOL:
case SNDRV_SEQ_IOCTL_SET_CLIENT_POOL:
case SNDRV_SEQ_IOCTL_REMOVE_EVENTS:
case SNDRV_SEQ_IOCTL_QUERY_SUBS:
case SNDRV_SEQ_IOCTL_GET_SUBSCRIPTION:
case SNDRV_SEQ_IOCTL_QUERY_NEXT_CLIENT:
case SNDRV_SEQ_IOCTL_RUNNING_MODE:
return snd_seq_do_ioctl(client, cmd, argp);
case SNDRV_SEQ_IOCTL_CREATE_PORT32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_CREATE_PORT, argp);
case SNDRV_SEQ_IOCTL_DELETE_PORT32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_DELETE_PORT, argp);
case SNDRV_SEQ_IOCTL_GET_PORT_INFO32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_GET_PORT_INFO, argp);
case SNDRV_SEQ_IOCTL_SET_PORT_INFO32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_SET_PORT_INFO, argp);
case SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT32:
return snd_seq_call_port_info_ioctl(client, SNDRV_SEQ_IOCTL_QUERY_NEXT_PORT, argp);
}
return -ENOIOCTLCMD;
}
| gpl-2.0 |
yacc2000/limbo-android | jni/SDL/src/haptic/windows/SDL_syshaptic.c | 39 | 39945 | /*
Simple DirectMedia Layer
Copyright (C) 1997-2012 Sam Lantinga <slouken@libsdl.org>
This software is provided 'as-is', without any express or implied
warranty. In no event will the authors be held liable for any damages
arising from the use of this software.
Permission is granted to anyone to use this software for any purpose,
including commercial applications, and to alter it and redistribute it
freely, subject to the following restrictions:
1. The origin of this software must not be misrepresented; you must not
claim that you wrote the original software. If you use this software
in a product, an acknowledgment in the product documentation would be
appreciated but is not required.
2. Altered source versions must be plainly marked as such, and must not be
misrepresented as being the original software.
3. This notice may not be removed or altered from any source distribution.
*/
#include "SDL_config.h"
#ifdef SDL_HAPTIC_DINPUT
#include "SDL_haptic.h"
#include "../SDL_syshaptic.h"
#include "SDL_joystick.h"
#include "../../joystick/SDL_sysjoystick.h" /* For the real SDL_Joystick */
#include "../../joystick/windows/SDL_dxjoystick_c.h" /* For joystick hwdata */
#define MAX_HAPTICS 32
/*
* List of available haptic devices.
*/
static struct
{
DIDEVICEINSTANCE instance;
char *name;
SDL_Haptic *haptic;
DIDEVCAPS capabilities;
} SDL_hapticlist[MAX_HAPTICS];
/*
* Haptic system hardware data.
*/
struct haptic_hwdata
{
LPDIRECTINPUTDEVICE2 device;
DWORD axes[3]; /* Axes to use. */
int is_joystick; /* Device is loaded as joystick. */
};
/*
* Haptic system effect data.
*/
struct haptic_hweffect
{
DIEFFECT effect;
LPDIRECTINPUTEFFECT ref;
};
/*
* Internal stuff.
*/
static SDL_bool coinitialized = SDL_FALSE;
static LPDIRECTINPUT dinput = NULL;
/*
* External stuff.
*/
extern HWND SDL_HelperWindow;
/*
* Prototypes.
*/
static void DI_SetError(const char *str, HRESULT err);
static int DI_GUIDIsSame(const GUID * a, const GUID * b);
static int SDL_SYS_HapticOpenFromInstance(SDL_Haptic * haptic,
DIDEVICEINSTANCE instance);
static int SDL_SYS_HapticOpenFromDevice2(SDL_Haptic * haptic,
LPDIRECTINPUTDEVICE2 device2);
static DWORD DIGetTriggerButton(Uint16 button);
static int SDL_SYS_SetDirection(DIEFFECT * effect, SDL_HapticDirection * dir,
int naxes);
static int SDL_SYS_ToDIEFFECT(SDL_Haptic * haptic, DIEFFECT * dest,
SDL_HapticEffect * src);
static void SDL_SYS_HapticFreeDIEFFECT(DIEFFECT * effect, int type);
static REFGUID SDL_SYS_HapticEffectType(SDL_HapticEffect * effect);
/* Callbacks. */
static BOOL CALLBACK EnumHapticsCallback(const DIDEVICEINSTANCE *
pdidInstance, VOID * pContext);
static BOOL CALLBACK DI_EffectCallback(LPCDIEFFECTINFO pei, LPVOID pv);
/*
* Like SDL_SetError but for DX error codes.
*/
static void
DI_SetError(const char *str, HRESULT err)
{
/*
SDL_SetError("Haptic: %s - %s: %s", str,
DXGetErrorString8A(err), DXGetErrorDescription8A(err));
*/
SDL_SetError("Haptic error %s", str);
}
/*
* Checks to see if two GUID are the same.
*/
static int
DI_GUIDIsSame(const GUID * a, const GUID * b)
{
return (SDL_memcmp(a, b, sizeof (GUID)) == 0);
}
/*
* Initializes the haptic subsystem.
*/
int
SDL_SYS_HapticInit(void)
{
HRESULT ret;
HINSTANCE instance;
if (dinput != NULL) { /* Already open. */
SDL_SetError("Haptic: SubSystem already open.");
return -1;
}
/* Clear all the memory. */
SDL_memset(SDL_hapticlist, 0, sizeof(SDL_hapticlist));
SDL_numhaptics = 0;
ret = WIN_CoInitialize();
if (FAILED(ret)) {
DI_SetError("Coinitialize", ret);
return -1;
}
coinitialized = SDL_TRUE;
ret = CoCreateInstance(&CLSID_DirectInput, NULL, CLSCTX_INPROC_SERVER,
&IID_IDirectInput, (LPVOID) & dinput);
if (FAILED(ret)) {
SDL_SYS_HapticQuit();
DI_SetError("CoCreateInstance", ret);
return -1;
}
/* Because we used CoCreateInstance, we need to Initialize it, first. */
instance = GetModuleHandle(NULL);
if (instance == NULL) {
SDL_SYS_HapticQuit();
SDL_SetError("GetModuleHandle() failed with error code %d.",
GetLastError());
return -1;
}
ret = IDirectInput_Initialize(dinput, instance, DIRECTINPUT_VERSION);
if (FAILED(ret)) {
SDL_SYS_HapticQuit();
DI_SetError("Initializing DirectInput device", ret);
return -1;
}
/* Look for haptic devices. */
ret = IDirectInput_EnumDevices(dinput,
0,
EnumHapticsCallback,
NULL,
DIEDFL_FORCEFEEDBACK |
DIEDFL_ATTACHEDONLY);
if (FAILED(ret)) {
SDL_SYS_HapticQuit();
DI_SetError("Enumerating DirectInput devices", ret);
return -1;
}
return SDL_numhaptics;
}
/*
* Callback to find the haptic devices.
*/
static BOOL CALLBACK
EnumHapticsCallback(const DIDEVICEINSTANCE * pdidInstance, VOID * pContext)
{
HRESULT ret;
LPDIRECTINPUTDEVICE device;
/* Copy the instance over, useful for creating devices. */
SDL_memcpy(&SDL_hapticlist[SDL_numhaptics].instance, pdidInstance,
sizeof(DIDEVICEINSTANCE));
/* Open the device */
ret = IDirectInput_CreateDevice(dinput, &pdidInstance->guidInstance,
&device, NULL);
if (FAILED(ret)) {
/* DI_SetError("Creating DirectInput device",ret); */
return DIENUM_CONTINUE;
}
/* Get capabilities. */
SDL_hapticlist[SDL_numhaptics].capabilities.dwSize = sizeof(DIDEVCAPS);
ret = IDirectInputDevice_GetCapabilities(device,
&SDL_hapticlist[SDL_numhaptics].
capabilities);
if (FAILED(ret)) {
/* DI_SetError("Getting device capabilities",ret); */
IDirectInputDevice_Release(device);
return DIENUM_CONTINUE;
}
/* Copy the name */
SDL_hapticlist[SDL_numhaptics].name = WIN_StringToUTF8(SDL_hapticlist[SDL_numhaptics].instance.tszProductName);
/* Close up device and count it. */
IDirectInputDevice_Release(device);
SDL_numhaptics++;
/* Watch out for hard limit. */
if (SDL_numhaptics >= MAX_HAPTICS)
return DIENUM_STOP;
return DIENUM_CONTINUE;
}
/*
* Return the name of a haptic device, does not need to be opened.
*/
const char *
SDL_SYS_HapticName(int index)
{
return SDL_hapticlist[index].name;
}
/*
* Callback to get all supported effects.
*/
#define EFFECT_TEST(e,s) \
if (DI_GUIDIsSame(&pei->guid, &(e))) \
haptic->supported |= (s)
static BOOL CALLBACK
DI_EffectCallback(LPCDIEFFECTINFO pei, LPVOID pv)
{
/* Prepare the haptic device. */
SDL_Haptic *haptic = (SDL_Haptic *) pv;
/* Get supported. */
EFFECT_TEST(GUID_Spring, SDL_HAPTIC_SPRING);
EFFECT_TEST(GUID_Damper, SDL_HAPTIC_DAMPER);
EFFECT_TEST(GUID_Inertia, SDL_HAPTIC_INERTIA);
EFFECT_TEST(GUID_Friction, SDL_HAPTIC_FRICTION);
EFFECT_TEST(GUID_ConstantForce, SDL_HAPTIC_CONSTANT);
EFFECT_TEST(GUID_CustomForce, SDL_HAPTIC_CUSTOM);
EFFECT_TEST(GUID_Sine, SDL_HAPTIC_SINE);
EFFECT_TEST(GUID_Square, SDL_HAPTIC_SQUARE);
EFFECT_TEST(GUID_Triangle, SDL_HAPTIC_TRIANGLE);
EFFECT_TEST(GUID_SawtoothUp, SDL_HAPTIC_SAWTOOTHUP);
EFFECT_TEST(GUID_SawtoothDown, SDL_HAPTIC_SAWTOOTHDOWN);
EFFECT_TEST(GUID_RampForce, SDL_HAPTIC_RAMP);
/* Check for more. */
return DIENUM_CONTINUE;
}
/*
* Callback to get supported axes.
*/
static BOOL CALLBACK
DI_DeviceObjectCallback(LPCDIDEVICEOBJECTINSTANCE dev, LPVOID pvRef)
{
SDL_Haptic *haptic = (SDL_Haptic *) pvRef;
if ((dev->dwType & DIDFT_AXIS) && (dev->dwFlags & DIDOI_FFACTUATOR)) {
haptic->hwdata->axes[haptic->naxes] = dev->dwOfs;
haptic->naxes++;
/* Currently using the artificial limit of 3 axes. */
if (haptic->naxes >= 3) {
return DIENUM_STOP;
}
}
return DIENUM_CONTINUE;
}
/*
* Opens the haptic device from the file descriptor.
*
* Steps:
* - Open temporary DirectInputDevice interface.
* - Create DirectInputDevice2 interface.
* - Release DirectInputDevice interface.
* - Call SDL_SYS_HapticOpenFromDevice2
*/
static int
SDL_SYS_HapticOpenFromInstance(SDL_Haptic * haptic, DIDEVICEINSTANCE instance)
{
HRESULT ret;
int ret2;
LPDIRECTINPUTDEVICE device;
/* Allocate the hwdata */
haptic->hwdata = (struct haptic_hwdata *)
SDL_malloc(sizeof(*haptic->hwdata));
if (haptic->hwdata == NULL) {
SDL_OutOfMemory();
goto creat_err;
}
SDL_memset(haptic->hwdata, 0, sizeof(*haptic->hwdata));
/* Open the device */
ret = IDirectInput_CreateDevice(dinput, &instance.guidInstance,
&device, NULL);
if (FAILED(ret)) {
DI_SetError("Creating DirectInput device", ret);
goto creat_err;
}
/* Now get the IDirectInputDevice2 interface, instead. */
ret = IDirectInputDevice_QueryInterface(device,
&IID_IDirectInputDevice2,
(LPVOID *) & haptic->hwdata->
device);
/* Done with the temporary one now. */
IDirectInputDevice_Release(device);
if (FAILED(ret)) {
DI_SetError("Querying DirectInput interface", ret);
goto creat_err;
}
ret2 = SDL_SYS_HapticOpenFromDevice2(haptic, haptic->hwdata->device);
if (ret2 < 0) {
goto query_err;
}
return 0;
query_err:
IDirectInputDevice2_Release(haptic->hwdata->device);
creat_err:
if (haptic->hwdata != NULL) {
SDL_free(haptic->hwdata);
haptic->hwdata = NULL;
}
return -1;
}
/*
* Opens the haptic device from the file descriptor.
*
* Steps:
* - Set cooperative level.
* - Set data format.
* - Acquire exclusiveness.
* - Reset actuators.
* - Get supported featuers.
*/
static int
SDL_SYS_HapticOpenFromDevice2(SDL_Haptic * haptic,
LPDIRECTINPUTDEVICE2 device2)
{
HRESULT ret;
DIPROPDWORD dipdw;
/* We'll use the device2 from now on. */
haptic->hwdata->device = device2;
/* Grab it exclusively to use force feedback stuff. */
ret = IDirectInputDevice2_SetCooperativeLevel(haptic->hwdata->device,
SDL_HelperWindow,
DISCL_EXCLUSIVE |
DISCL_BACKGROUND);
if (FAILED(ret)) {
DI_SetError("Setting cooperative level to exclusive", ret);
goto acquire_err;
}
/* Set data format. */
ret = IDirectInputDevice2_SetDataFormat(haptic->hwdata->device,
&c_dfDIJoystick2);
if (FAILED(ret)) {
DI_SetError("Setting data format", ret);
goto acquire_err;
}
/* Get number of axes. */
ret = IDirectInputDevice2_EnumObjects(haptic->hwdata->device,
DI_DeviceObjectCallback,
haptic, DIDFT_AXIS);
if (FAILED(ret)) {
DI_SetError("Getting device axes", ret);
goto acquire_err;
}
/* Acquire the device. */
ret = IDirectInputDevice2_Acquire(haptic->hwdata->device);
if (FAILED(ret)) {
DI_SetError("Acquiring DirectInput device", ret);
goto acquire_err;
}
/* Reset all actuators - just in case. */
ret = IDirectInputDevice2_SendForceFeedbackCommand(haptic->hwdata->device,
DISFFC_RESET);
if (FAILED(ret)) {
DI_SetError("Resetting device", ret);
goto acquire_err;
}
/* Enabling actuators. */
ret = IDirectInputDevice2_SendForceFeedbackCommand(haptic->hwdata->device,
DISFFC_SETACTUATORSON);
if (FAILED(ret)) {
DI_SetError("Enabling actuators", ret);
goto acquire_err;
}
/* Get supported effects. */
ret = IDirectInputDevice2_EnumEffects(haptic->hwdata->device,
DI_EffectCallback, haptic,
DIEFT_ALL);
if (FAILED(ret)) {
DI_SetError("Enumerating supported effects", ret);
goto acquire_err;
}
if (haptic->supported == 0) { /* Error since device supports nothing. */
SDL_SetError("Haptic: Internal error on finding supported effects.");
goto acquire_err;
}
/* Check autogain and autocenter. */
dipdw.diph.dwSize = sizeof(DIPROPDWORD);
dipdw.diph.dwHeaderSize = sizeof(DIPROPHEADER);
dipdw.diph.dwObj = 0;
dipdw.diph.dwHow = DIPH_DEVICE;
dipdw.dwData = 10000;
ret = IDirectInputDevice2_SetProperty(haptic->hwdata->device,
DIPROP_FFGAIN, &dipdw.diph);
if (!FAILED(ret)) { /* Gain is supported. */
haptic->supported |= SDL_HAPTIC_GAIN;
}
dipdw.diph.dwObj = 0;
dipdw.diph.dwHow = DIPH_DEVICE;
dipdw.dwData = DIPROPAUTOCENTER_OFF;
ret = IDirectInputDevice2_SetProperty(haptic->hwdata->device,
DIPROP_AUTOCENTER, &dipdw.diph);
if (!FAILED(ret)) { /* Autocenter is supported. */
haptic->supported |= SDL_HAPTIC_AUTOCENTER;
}
/* Status is always supported. */
haptic->supported |= SDL_HAPTIC_STATUS | SDL_HAPTIC_PAUSE;
/* Check maximum effects. */
haptic->neffects = 128; /* This is not actually supported as thus under windows,
there is no way to tell the number of EFFECTS that a
device can hold, so we'll just use a "random" number
instead and put warnings in SDL_haptic.h */
haptic->nplaying = 128; /* Even more impossible to get this then neffects. */
/* Prepare effects memory. */
haptic->effects = (struct haptic_effect *)
SDL_malloc(sizeof(struct haptic_effect) * haptic->neffects);
if (haptic->effects == NULL) {
SDL_OutOfMemory();
goto acquire_err;
}
/* Clear the memory */
SDL_memset(haptic->effects, 0,
sizeof(struct haptic_effect) * haptic->neffects);
return 0;
/* Error handling */
acquire_err:
IDirectInputDevice2_Unacquire(haptic->hwdata->device);
return -1;
}
/*
* Opens a haptic device for usage.
*/
int
SDL_SYS_HapticOpen(SDL_Haptic * haptic)
{
return SDL_SYS_HapticOpenFromInstance(haptic,
SDL_hapticlist[haptic->index].
instance);
}
/*
* Opens a haptic device from first mouse it finds for usage.
*/
int
SDL_SYS_HapticMouse(void)
{
int i;
/* Grab the first mouse haptic device we find. */
for (i = 0; i < SDL_numhaptics; i++) {
if (SDL_hapticlist[i].capabilities.dwDevType == DIDEVTYPE_MOUSE) {
return i;
}
}
return -1;
}
/*
* Checks to see if a joystick has haptic features.
*/
int
SDL_SYS_JoystickIsHaptic(SDL_Joystick * joystick)
{
if (joystick->hwdata->Capabilities.dwFlags & DIDC_FORCEFEEDBACK) {
return SDL_TRUE;
}
return SDL_FALSE;
}
/*
* Checks to see if the haptic device and joystick and in reality the same.
*/
int
SDL_SYS_JoystickSameHaptic(SDL_Haptic * haptic, SDL_Joystick * joystick)
{
HRESULT ret;
DIDEVICEINSTANCE hap_instance, joy_instance;
hap_instance.dwSize = sizeof(DIDEVICEINSTANCE);
joy_instance.dwSize = sizeof(DIDEVICEINSTANCE);
/* Get the device instances. */
ret = IDirectInputDevice2_GetDeviceInfo(haptic->hwdata->device,
&hap_instance);
if (FAILED(ret)) {
return 0;
}
ret = IDirectInputDevice2_GetDeviceInfo(joystick->hwdata->InputDevice,
&joy_instance);
if (FAILED(ret)) {
return 0;
}
if (DI_GUIDIsSame(&hap_instance.guidInstance, &joy_instance.guidInstance))
return 1;
return 0;
}
/*
* Opens a SDL_Haptic from a SDL_Joystick.
*/
int
SDL_SYS_HapticOpenFromJoystick(SDL_Haptic * haptic, SDL_Joystick * joystick)
{
int i, ret;
HRESULT idret;
DIDEVICEINSTANCE joy_instance;
joy_instance.dwSize = sizeof(DIDEVICEINSTANCE);
/* Since it comes from a joystick we have to try to match it with a haptic device on our haptic list. */
for (i=0; i<SDL_numhaptics; i++) {
idret = IDirectInputDevice2_GetDeviceInfo(joystick->hwdata->InputDevice,
&joy_instance);
if (FAILED(idret)) {
return -1;
}
if (DI_GUIDIsSame(&SDL_hapticlist[i].instance.guidInstance,
&joy_instance.guidInstance)) {
haptic->index = i;
break;
}
}
if (i >= SDL_numhaptics) {
return -1;
}
/* Allocate the hwdata */
haptic->hwdata = (struct haptic_hwdata *)
SDL_malloc(sizeof(*haptic->hwdata));
if (haptic->hwdata == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(haptic->hwdata, 0, sizeof(*haptic->hwdata));
/* Now open the device. */
ret =
SDL_SYS_HapticOpenFromDevice2(haptic, joystick->hwdata->InputDevice);
if (ret < 0) {
return -1;
}
/* It's using the joystick device. */
haptic->hwdata->is_joystick = 1;
return 0;
}
/*
* Closes the haptic device.
*/
void
SDL_SYS_HapticClose(SDL_Haptic * haptic)
{
if (haptic->hwdata) {
/* Free effects. */
SDL_free(haptic->effects);
haptic->effects = NULL;
haptic->neffects = 0;
/* Clean up */
IDirectInputDevice2_Unacquire(haptic->hwdata->device);
/* Only release if isn't grabbed by a joystick. */
if (haptic->hwdata->is_joystick == 0) {
IDirectInputDevice2_Release(haptic->hwdata->device);
}
/* Free */
SDL_free(haptic->hwdata);
haptic->hwdata = NULL;
}
}
/*
* Clean up after system specific haptic stuff
*/
void
SDL_SYS_HapticQuit(void)
{
int i;
for (i = 0; i < SDL_arraysize(SDL_hapticlist); ++i) {
if (SDL_hapticlist[i].name) {
SDL_free(SDL_hapticlist[i].name);
SDL_hapticlist[i].name = NULL;
}
}
if (dinput != NULL) {
IDirectInput_Release(dinput);
dinput = NULL;
}
if (coinitialized) {
WIN_CoUninitialize();
coinitialized = SDL_FALSE;
}
}
/*
* Converts an SDL trigger button to an DIEFFECT trigger button.
*/
static DWORD
DIGetTriggerButton(Uint16 button)
{
DWORD dwTriggerButton;
dwTriggerButton = DIEB_NOTRIGGER;
if (button != 0) {
dwTriggerButton = DIJOFS_BUTTON(button - 1);
}
return dwTriggerButton;
}
/*
* Sets the direction.
*/
static int
SDL_SYS_SetDirection(DIEFFECT * effect, SDL_HapticDirection * dir, int naxes)
{
LONG *rglDir;
/* Handle no axes a part. */
if (naxes == 0) {
effect->dwFlags |= DIEFF_SPHERICAL; /* Set as default. */
effect->rglDirection = NULL;
return 0;
}
/* Has axes. */
rglDir = SDL_malloc(sizeof(LONG) * naxes);
if (rglDir == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(rglDir, 0, sizeof(LONG) * naxes);
effect->rglDirection = rglDir;
switch (dir->type) {
case SDL_HAPTIC_POLAR:
effect->dwFlags |= DIEFF_POLAR;
rglDir[0] = dir->dir[0];
return 0;
case SDL_HAPTIC_CARTESIAN:
effect->dwFlags |= DIEFF_CARTESIAN;
rglDir[0] = dir->dir[0];
if (naxes > 1)
rglDir[1] = dir->dir[1];
if (naxes > 2)
rglDir[2] = dir->dir[2];
return 0;
case SDL_HAPTIC_SPHERICAL:
effect->dwFlags |= DIEFF_SPHERICAL;
rglDir[0] = dir->dir[0];
if (naxes > 1)
rglDir[1] = dir->dir[1];
if (naxes > 2)
rglDir[2] = dir->dir[2];
return 0;
default:
SDL_SetError("Haptic: Unknown direction type.");
return -1;
}
}
#define CONVERT(x) (((x) > 0x7FFF) ? 10000 : ((x)*10000) / 0x7FFF)
/*
* Creates the DIEFFECT from a SDL_HapticEffect.
*/
static int
SDL_SYS_ToDIEFFECT(SDL_Haptic * haptic, DIEFFECT * dest,
SDL_HapticEffect * src)
{
int i;
DICONSTANTFORCE *constant;
DIPERIODIC *periodic;
DICONDITION *condition; /* Actually an array of conditions - one per axis. */
DIRAMPFORCE *ramp;
DICUSTOMFORCE *custom;
DIENVELOPE *envelope;
SDL_HapticConstant *hap_constant;
SDL_HapticPeriodic *hap_periodic;
SDL_HapticCondition *hap_condition;
SDL_HapticRamp *hap_ramp;
SDL_HapticCustom *hap_custom;
DWORD *axes;
/* Set global stuff. */
SDL_memset(dest, 0, sizeof(DIEFFECT));
dest->dwSize = sizeof(DIEFFECT); /* Set the structure size. */
dest->dwSamplePeriod = 0; /* Not used by us. */
dest->dwGain = 10000; /* Gain is set globally, not locally. */
dest->dwFlags = DIEFF_OBJECTOFFSETS; /* Seems obligatory. */
/* Envelope. */
envelope = SDL_malloc(sizeof(DIENVELOPE));
if (envelope == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(envelope, 0, sizeof(DIENVELOPE));
dest->lpEnvelope = envelope;
envelope->dwSize = sizeof(DIENVELOPE); /* Always should be this. */
/* Axes. */
dest->cAxes = haptic->naxes;
if (dest->cAxes > 0) {
axes = SDL_malloc(sizeof(DWORD) * dest->cAxes);
if (axes == NULL) {
SDL_OutOfMemory();
return -1;
}
axes[0] = haptic->hwdata->axes[0]; /* Always at least one axis. */
if (dest->cAxes > 1) {
axes[1] = haptic->hwdata->axes[1];
}
if (dest->cAxes > 2) {
axes[2] = haptic->hwdata->axes[2];
}
dest->rgdwAxes = axes;
}
/* The big type handling switch, even bigger then linux's version. */
switch (src->type) {
case SDL_HAPTIC_CONSTANT:
hap_constant = &src->constant;
constant = SDL_malloc(sizeof(DICONSTANTFORCE));
if (constant == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(constant, 0, sizeof(DICONSTANTFORCE));
/* Specifics */
constant->lMagnitude = CONVERT(hap_constant->level);
dest->cbTypeSpecificParams = sizeof(DICONSTANTFORCE);
dest->lpvTypeSpecificParams = constant;
/* Generics */
dest->dwDuration = hap_constant->length * 1000; /* In microseconds. */
dest->dwTriggerButton = DIGetTriggerButton(hap_constant->button);
dest->dwTriggerRepeatInterval = hap_constant->interval;
dest->dwStartDelay = hap_constant->delay * 1000; /* In microseconds. */
/* Direction. */
if (SDL_SYS_SetDirection(dest, &hap_constant->direction, dest->cAxes)
< 0) {
return -1;
}
/* Envelope */
if ((hap_constant->attack_length == 0)
&& (hap_constant->fade_length == 0)) {
SDL_free(dest->lpEnvelope);
dest->lpEnvelope = NULL;
} else {
envelope->dwAttackLevel = CONVERT(hap_constant->attack_level);
envelope->dwAttackTime = hap_constant->attack_length * 1000;
envelope->dwFadeLevel = CONVERT(hap_constant->fade_level);
envelope->dwFadeTime = hap_constant->fade_length * 1000;
}
break;
case SDL_HAPTIC_SINE:
case SDL_HAPTIC_SQUARE:
case SDL_HAPTIC_TRIANGLE:
case SDL_HAPTIC_SAWTOOTHUP:
case SDL_HAPTIC_SAWTOOTHDOWN:
hap_periodic = &src->periodic;
periodic = SDL_malloc(sizeof(DIPERIODIC));
if (periodic == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(periodic, 0, sizeof(DIPERIODIC));
/* Specifics */
periodic->dwMagnitude = CONVERT(hap_periodic->magnitude);
periodic->lOffset = CONVERT(hap_periodic->offset);
periodic->dwPhase = hap_periodic->phase;
periodic->dwPeriod = hap_periodic->period * 1000;
dest->cbTypeSpecificParams = sizeof(DIPERIODIC);
dest->lpvTypeSpecificParams = periodic;
/* Generics */
dest->dwDuration = hap_periodic->length * 1000; /* In microseconds. */
dest->dwTriggerButton = DIGetTriggerButton(hap_periodic->button);
dest->dwTriggerRepeatInterval = hap_periodic->interval;
dest->dwStartDelay = hap_periodic->delay * 1000; /* In microseconds. */
/* Direction. */
if (SDL_SYS_SetDirection(dest, &hap_periodic->direction, dest->cAxes)
< 0) {
return -1;
}
/* Envelope */
if ((hap_periodic->attack_length == 0)
&& (hap_periodic->fade_length == 0)) {
SDL_free(dest->lpEnvelope);
dest->lpEnvelope = NULL;
} else {
envelope->dwAttackLevel = CONVERT(hap_periodic->attack_level);
envelope->dwAttackTime = hap_periodic->attack_length * 1000;
envelope->dwFadeLevel = CONVERT(hap_periodic->fade_level);
envelope->dwFadeTime = hap_periodic->fade_length * 1000;
}
break;
case SDL_HAPTIC_SPRING:
case SDL_HAPTIC_DAMPER:
case SDL_HAPTIC_INERTIA:
case SDL_HAPTIC_FRICTION:
hap_condition = &src->condition;
condition = SDL_malloc(sizeof(DICONDITION) * dest->cAxes);
if (condition == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(condition, 0, sizeof(DICONDITION));
/* Specifics */
for (i = 0; i < (int) dest->cAxes; i++) {
condition[i].lOffset = CONVERT(hap_condition->center[i]);
condition[i].lPositiveCoefficient =
CONVERT(hap_condition->right_coeff[i]);
condition[i].lNegativeCoefficient =
CONVERT(hap_condition->left_coeff[i]);
condition[i].dwPositiveSaturation =
CONVERT(hap_condition->right_sat[i]);
condition[i].dwNegativeSaturation =
CONVERT(hap_condition->left_sat[i]);
condition[i].lDeadBand = CONVERT(hap_condition->deadband[i]);
}
dest->cbTypeSpecificParams = sizeof(DICONDITION) * dest->cAxes;
dest->lpvTypeSpecificParams = condition;
/* Generics */
dest->dwDuration = hap_condition->length * 1000; /* In microseconds. */
dest->dwTriggerButton = DIGetTriggerButton(hap_condition->button);
dest->dwTriggerRepeatInterval = hap_condition->interval;
dest->dwStartDelay = hap_condition->delay * 1000; /* In microseconds. */
/* Direction. */
if (SDL_SYS_SetDirection(dest, &hap_condition->direction, dest->cAxes)
< 0) {
return -1;
}
/* Envelope - Not actually supported by most CONDITION implementations. */
SDL_free(dest->lpEnvelope);
dest->lpEnvelope = NULL;
break;
case SDL_HAPTIC_RAMP:
hap_ramp = &src->ramp;
ramp = SDL_malloc(sizeof(DIRAMPFORCE));
if (ramp == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(ramp, 0, sizeof(DIRAMPFORCE));
/* Specifics */
ramp->lStart = CONVERT(hap_ramp->start);
ramp->lEnd = CONVERT(hap_ramp->end);
dest->cbTypeSpecificParams = sizeof(DIRAMPFORCE);
dest->lpvTypeSpecificParams = ramp;
/* Generics */
dest->dwDuration = hap_ramp->length * 1000; /* In microseconds. */
dest->dwTriggerButton = DIGetTriggerButton(hap_ramp->button);
dest->dwTriggerRepeatInterval = hap_ramp->interval;
dest->dwStartDelay = hap_ramp->delay * 1000; /* In microseconds. */
/* Direction. */
if (SDL_SYS_SetDirection(dest, &hap_ramp->direction, dest->cAxes) < 0) {
return -1;
}
/* Envelope */
if ((hap_ramp->attack_length == 0) && (hap_ramp->fade_length == 0)) {
SDL_free(dest->lpEnvelope);
dest->lpEnvelope = NULL;
} else {
envelope->dwAttackLevel = CONVERT(hap_ramp->attack_level);
envelope->dwAttackTime = hap_ramp->attack_length * 1000;
envelope->dwFadeLevel = CONVERT(hap_ramp->fade_level);
envelope->dwFadeTime = hap_ramp->fade_length * 1000;
}
break;
case SDL_HAPTIC_CUSTOM:
hap_custom = &src->custom;
custom = SDL_malloc(sizeof(DICUSTOMFORCE));
if (custom == NULL) {
SDL_OutOfMemory();
return -1;
}
SDL_memset(custom, 0, sizeof(DICUSTOMFORCE));
/* Specifics */
custom->cChannels = hap_custom->channels;
custom->dwSamplePeriod = hap_custom->period * 1000;
custom->cSamples = hap_custom->samples;
custom->rglForceData =
SDL_malloc(sizeof(LONG) * custom->cSamples * custom->cChannels);
for (i = 0; i < hap_custom->samples * hap_custom->channels; i++) { /* Copy data. */
custom->rglForceData[i] = CONVERT(hap_custom->data[i]);
}
dest->cbTypeSpecificParams = sizeof(DICUSTOMFORCE);
dest->lpvTypeSpecificParams = custom;
/* Generics */
dest->dwDuration = hap_custom->length * 1000; /* In microseconds. */
dest->dwTriggerButton = DIGetTriggerButton(hap_custom->button);
dest->dwTriggerRepeatInterval = hap_custom->interval;
dest->dwStartDelay = hap_custom->delay * 1000; /* In microseconds. */
/* Direction. */
if (SDL_SYS_SetDirection(dest, &hap_custom->direction, dest->cAxes) <
0) {
return -1;
}
/* Envelope */
if ((hap_custom->attack_length == 0)
&& (hap_custom->fade_length == 0)) {
SDL_free(dest->lpEnvelope);
dest->lpEnvelope = NULL;
} else {
envelope->dwAttackLevel = CONVERT(hap_custom->attack_level);
envelope->dwAttackTime = hap_custom->attack_length * 1000;
envelope->dwFadeLevel = CONVERT(hap_custom->fade_level);
envelope->dwFadeTime = hap_custom->fade_length * 1000;
}
break;
default:
SDL_SetError("Haptic: Unknown effect type.");
return -1;
}
return 0;
}
/*
* Frees an DIEFFECT allocated by SDL_SYS_ToDIEFFECT.
*/
static void
SDL_SYS_HapticFreeDIEFFECT(DIEFFECT * effect, int type)
{
DICUSTOMFORCE *custom;
if (effect->lpEnvelope != NULL) {
SDL_free(effect->lpEnvelope);
effect->lpEnvelope = NULL;
}
if (effect->rgdwAxes != NULL) {
SDL_free(effect->rgdwAxes);
effect->rgdwAxes = NULL;
}
if (effect->lpvTypeSpecificParams != NULL) {
if (type == SDL_HAPTIC_CUSTOM) { /* Must free the custom data. */
custom = (DICUSTOMFORCE *) effect->lpvTypeSpecificParams;
SDL_free(custom->rglForceData);
custom->rglForceData = NULL;
}
SDL_free(effect->lpvTypeSpecificParams);
effect->lpvTypeSpecificParams = NULL;
}
if (effect->rglDirection != NULL) {
SDL_free(effect->rglDirection);
effect->rglDirection = NULL;
}
}
/*
* Gets the effect type from the generic SDL haptic effect wrapper.
*/
static REFGUID
SDL_SYS_HapticEffectType(SDL_HapticEffect * effect)
{
switch (effect->type) {
case SDL_HAPTIC_CONSTANT:
return &GUID_ConstantForce;
case SDL_HAPTIC_RAMP:
return &GUID_RampForce;
case SDL_HAPTIC_SQUARE:
return &GUID_Square;
case SDL_HAPTIC_SINE:
return &GUID_Sine;
case SDL_HAPTIC_TRIANGLE:
return &GUID_Triangle;
case SDL_HAPTIC_SAWTOOTHUP:
return &GUID_SawtoothUp;
case SDL_HAPTIC_SAWTOOTHDOWN:
return &GUID_SawtoothDown;
case SDL_HAPTIC_SPRING:
return &GUID_Spring;
case SDL_HAPTIC_DAMPER:
return &GUID_Damper;
case SDL_HAPTIC_INERTIA:
return &GUID_Inertia;
case SDL_HAPTIC_FRICTION:
return &GUID_Friction;
case SDL_HAPTIC_CUSTOM:
return &GUID_CustomForce;
default:
SDL_SetError("Haptic: Unknown effect type.");
return NULL;
}
}
/*
* Creates a new haptic effect.
*/
int
SDL_SYS_HapticNewEffect(SDL_Haptic * haptic, struct haptic_effect *effect,
SDL_HapticEffect * base)
{
HRESULT ret;
/* Get the type. */
REFGUID type = SDL_SYS_HapticEffectType(base);
if (type == NULL) {
goto err_hweffect;
}
/* Alloc the effect. */
effect->hweffect = (struct haptic_hweffect *)
SDL_malloc(sizeof(struct haptic_hweffect));
if (effect->hweffect == NULL) {
SDL_OutOfMemory();
goto err_hweffect;
}
/* Get the effect. */
if (SDL_SYS_ToDIEFFECT(haptic, &effect->hweffect->effect, base) < 0) {
goto err_effectdone;
}
/* Create the actual effect. */
ret = IDirectInputDevice2_CreateEffect(haptic->hwdata->device, type,
&effect->hweffect->effect,
&effect->hweffect->ref, NULL);
if (FAILED(ret)) {
DI_SetError("Unable to create effect", ret);
goto err_effectdone;
}
return 0;
err_effectdone:
SDL_SYS_HapticFreeDIEFFECT(&effect->hweffect->effect, base->type);
err_hweffect:
if (effect->hweffect != NULL) {
SDL_free(effect->hweffect);
effect->hweffect = NULL;
}
return -1;
}
/*
* Updates an effect.
*/
int
SDL_SYS_HapticUpdateEffect(SDL_Haptic * haptic,
struct haptic_effect *effect,
SDL_HapticEffect * data)
{
HRESULT ret;
DWORD flags;
DIEFFECT temp;
/* Get the effect. */
SDL_memset(&temp, 0, sizeof(DIEFFECT));
if (SDL_SYS_ToDIEFFECT(haptic, &temp, data) < 0) {
goto err_update;
}
/* Set the flags. Might be worthwhile to diff temp with loaded effect and
* only change those parameters. */
flags = DIEP_DIRECTION |
DIEP_DURATION |
DIEP_ENVELOPE |
DIEP_STARTDELAY |
DIEP_TRIGGERBUTTON |
DIEP_TRIGGERREPEATINTERVAL | DIEP_TYPESPECIFICPARAMS;
/* Create the actual effect. */
ret =
IDirectInputEffect_SetParameters(effect->hweffect->ref, &temp, flags);
if (FAILED(ret)) {
DI_SetError("Unable to update effect", ret);
goto err_update;
}
/* Copy it over. */
SDL_SYS_HapticFreeDIEFFECT(&effect->hweffect->effect, data->type);
SDL_memcpy(&effect->hweffect->effect, &temp, sizeof(DIEFFECT));
return 0;
err_update:
SDL_SYS_HapticFreeDIEFFECT(&temp, data->type);
return -1;
}
/*
* Runs an effect.
*/
int
SDL_SYS_HapticRunEffect(SDL_Haptic * haptic, struct haptic_effect *effect,
Uint32 iterations)
{
HRESULT ret;
DWORD iter;
/* Check if it's infinite. */
if (iterations == SDL_HAPTIC_INFINITY) {
iter = INFINITE;
} else
iter = iterations;
/* Run the effect. */
ret = IDirectInputEffect_Start(effect->hweffect->ref, iter, 0);
if (FAILED(ret)) {
DI_SetError("Running the effect", ret);
return -1;
}
return 0;
}
/*
* Stops an effect.
*/
int
SDL_SYS_HapticStopEffect(SDL_Haptic * haptic, struct haptic_effect *effect)
{
HRESULT ret;
ret = IDirectInputEffect_Stop(effect->hweffect->ref);
if (FAILED(ret)) {
DI_SetError("Unable to stop effect", ret);
return -1;
}
return 0;
}
/*
* Frees the effect.
*/
void
SDL_SYS_HapticDestroyEffect(SDL_Haptic * haptic, struct haptic_effect *effect)
{
HRESULT ret;
ret = IDirectInputEffect_Unload(effect->hweffect->ref);
if (FAILED(ret)) {
DI_SetError("Removing effect from the device", ret);
}
SDL_SYS_HapticFreeDIEFFECT(&effect->hweffect->effect,
effect->effect.type);
SDL_free(effect->hweffect);
effect->hweffect = NULL;
}
/*
* Gets the status of a haptic effect.
*/
int
SDL_SYS_HapticGetEffectStatus(SDL_Haptic * haptic,
struct haptic_effect *effect)
{
HRESULT ret;
DWORD status;
ret = IDirectInputEffect_GetEffectStatus(effect->hweffect->ref, &status);
if (FAILED(ret)) {
DI_SetError("Getting effect status", ret);
return -1;
}
if (status == 0)
return SDL_FALSE;
return SDL_TRUE;
}
/*
* Sets the gain.
*/
int
SDL_SYS_HapticSetGain(SDL_Haptic * haptic, int gain)
{
HRESULT ret;
DIPROPDWORD dipdw;
/* Create the weird structure thingy. */
dipdw.diph.dwSize = sizeof(DIPROPDWORD);
dipdw.diph.dwHeaderSize = sizeof(DIPROPHEADER);
dipdw.diph.dwObj = 0;
dipdw.diph.dwHow = DIPH_DEVICE;
dipdw.dwData = gain * 100; /* 0 to 10,000 */
/* Try to set the autocenter. */
ret = IDirectInputDevice2_SetProperty(haptic->hwdata->device,
DIPROP_FFGAIN, &dipdw.diph);
if (FAILED(ret)) {
DI_SetError("Setting gain", ret);
return -1;
}
return 0;
}
/*
* Sets the autocentering.
*/
int
SDL_SYS_HapticSetAutocenter(SDL_Haptic * haptic, int autocenter)
{
HRESULT ret;
DIPROPDWORD dipdw;
/* Create the weird structure thingy. */
dipdw.diph.dwSize = sizeof(DIPROPDWORD);
dipdw.diph.dwHeaderSize = sizeof(DIPROPHEADER);
dipdw.diph.dwObj = 0;
dipdw.diph.dwHow = DIPH_DEVICE;
dipdw.dwData = (autocenter == 0) ? DIPROPAUTOCENTER_OFF :
DIPROPAUTOCENTER_ON;
/* Try to set the autocenter. */
ret = IDirectInputDevice2_SetProperty(haptic->hwdata->device,
DIPROP_AUTOCENTER, &dipdw.diph);
if (FAILED(ret)) {
DI_SetError("Setting autocenter", ret);
return -1;
}
return 0;
}
/*
* Pauses the device.
*/
int
SDL_SYS_HapticPause(SDL_Haptic * haptic)
{
HRESULT ret;
/* Pause the device. */
ret = IDirectInputDevice2_SendForceFeedbackCommand(haptic->hwdata->device,
DISFFC_PAUSE);
if (FAILED(ret)) {
DI_SetError("Pausing the device", ret);
return -1;
}
return 0;
}
/*
* Pauses the device.
*/
int
SDL_SYS_HapticUnpause(SDL_Haptic * haptic)
{
HRESULT ret;
/* Unpause the device. */
ret = IDirectInputDevice2_SendForceFeedbackCommand(haptic->hwdata->device,
DISFFC_CONTINUE);
if (FAILED(ret)) {
DI_SetError("Pausing the device", ret);
return -1;
}
return 0;
}
/*
* Stops all the playing effects on the device.
*/
int
SDL_SYS_HapticStopAll(SDL_Haptic * haptic)
{
HRESULT ret;
/* Try to stop the effects. */
ret = IDirectInputDevice2_SendForceFeedbackCommand(haptic->hwdata->device,
DISFFC_STOPALL);
if (FAILED(ret)) {
DI_SetError("Stopping the device", ret);
return -1;
}
return 0;
}
#endif /* SDL_HAPTIC_DINPUT */
| gpl-2.0 |
xtitany/Odroid-X2-Android-OS-4.1-Kernel | drivers/mfd/max8997.c | 39 | 11641 | /*
* max8997.c - mfd core driver for the Maxim 8966 and 8997
*
* Copyright (C) 2011 Samsung Electronics
* MyungJoo Ham <myungjoo.ham@smasung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*
* This driver is based on max8998.c
*/
#include <linux/slab.h>
#include <linux/i2c.h>
#include <linux/interrupt.h>
#include <linux/pm_runtime.h>
#include <linux/mutex.h>
#include <linux/mfd/core.h>
#include <linux/mfd/max8997.h>
#include <linux/mfd/max8997-private.h>
#define I2C_ADDR_PMIC (0xCC >> 1)
#define I2C_ADDR_MUIC (0x4A >> 1)
#define I2C_ADDR_BATTERY (0x6C >> 1)
#define I2C_ADDR_RTC (0x0C >> 1)
#define I2C_ADDR_HAPTIC (0x90 >> 1)
static struct mfd_cell max8997_devs[] = {
{ .name = "max8997-pmic", },
{ .name = "max8997-rtc", },
{ .name = "max8997-battery", },
{ .name = "max8997-haptic", },
{ .name = "max8997-muic", },
{ .name = "max8997-flash", },
};
int max8997_read_reg(struct i2c_client *i2c, u8 reg, u8 *dest)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_byte_data(i2c, reg);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
ret &= 0xff;
*dest = ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_read_reg);
int max8997_bulk_read(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_i2c_block_data(i2c, reg, count, buf);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_bulk_read);
int max8997_write_reg(struct i2c_client *i2c, u8 reg, u8 value)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_write_byte_data(i2c, reg, value);
mutex_unlock(&max8997->iolock);
return ret;
}
EXPORT_SYMBOL_GPL(max8997_write_reg);
int max8997_bulk_write(struct i2c_client *i2c, u8 reg, int count, u8 *buf)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_write_i2c_block_data(i2c, reg, count, buf);
mutex_unlock(&max8997->iolock);
if (ret < 0)
return ret;
return 0;
}
EXPORT_SYMBOL_GPL(max8997_bulk_write);
int max8997_update_reg(struct i2c_client *i2c, u8 reg, u8 val, u8 mask)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int ret;
mutex_lock(&max8997->iolock);
ret = i2c_smbus_read_byte_data(i2c, reg);
if (ret >= 0) {
u8 old_val = ret & 0xff;
u8 new_val = (val & mask) | (old_val & (~mask));
ret = i2c_smbus_write_byte_data(i2c, reg, new_val);
}
mutex_unlock(&max8997->iolock);
return ret;
}
EXPORT_SYMBOL_GPL(max8997_update_reg);
static int max8997_i2c_probe(struct i2c_client *i2c,
const struct i2c_device_id *id)
{
struct max8997_dev *max8997;
struct max8997_platform_data *pdata = i2c->dev.platform_data;
int ret = 0;
max8997 = kzalloc(sizeof(struct max8997_dev), GFP_KERNEL);
if (max8997 == NULL)
return -ENOMEM;
i2c_set_clientdata(i2c, max8997);
max8997->dev = &i2c->dev;
max8997->i2c = i2c;
max8997->type = id->driver_data;
max8997->irq = i2c->irq;
if (!pdata)
goto err;
max8997->irq_base = pdata->irq_base;
max8997->ono = pdata->ono;
mutex_init(&max8997->iolock);
max8997->rtc = i2c_new_dummy(i2c->adapter, I2C_ADDR_RTC);
i2c_set_clientdata(max8997->rtc, max8997);
max8997->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
i2c_set_clientdata(max8997->haptic, max8997);
max8997->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
i2c_set_clientdata(max8997->muic, max8997);
pm_runtime_set_active(max8997->dev);
max8997_irq_init(max8997);
mfd_add_devices(max8997->dev, -1, max8997_devs,
ARRAY_SIZE(max8997_devs),
NULL, 0);
/*
* TODO: enable others (flash, muic, rtc, battery, ...) and
* check the return value
*/
if (ret < 0)
goto err_mfd;
/* MAX8997 has a power button input. */
device_init_wakeup(max8997->dev, pdata->wakeup);
return ret;
err_mfd:
mfd_remove_devices(max8997->dev);
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
err:
kfree(max8997);
return ret;
}
static int max8997_i2c_remove(struct i2c_client *i2c)
{
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
mfd_remove_devices(max8997->dev);
i2c_unregister_device(max8997->muic);
i2c_unregister_device(max8997->haptic);
i2c_unregister_device(max8997->rtc);
kfree(max8997);
return 0;
}
static const struct i2c_device_id max8997_i2c_id[] = {
{ "max8997", TYPE_MAX8997 },
{ "max8966", TYPE_MAX8966 },
{ }
};
MODULE_DEVICE_TABLE(i2c, max8998_i2c_id);
u8 max8997_dumpaddr_pmic[] = {
MAX8997_REG_INT1MSK,
MAX8997_REG_INT2MSK,
MAX8997_REG_INT3MSK,
MAX8997_REG_INT4MSK,
MAX8997_REG_MAINCON1,
MAX8997_REG_MAINCON2,
MAX8997_REG_BUCKRAMP,
MAX8997_REG_BUCK1CTRL,
MAX8997_REG_BUCK1DVS1,
MAX8997_REG_BUCK1DVS2,
MAX8997_REG_BUCK1DVS3,
MAX8997_REG_BUCK1DVS4,
MAX8997_REG_BUCK1DVS5,
MAX8997_REG_BUCK1DVS6,
MAX8997_REG_BUCK1DVS7,
MAX8997_REG_BUCK1DVS8,
MAX8997_REG_BUCK2CTRL,
MAX8997_REG_BUCK2DVS1,
MAX8997_REG_BUCK2DVS2,
MAX8997_REG_BUCK2DVS3,
MAX8997_REG_BUCK2DVS4,
MAX8997_REG_BUCK2DVS5,
MAX8997_REG_BUCK2DVS6,
MAX8997_REG_BUCK2DVS7,
MAX8997_REG_BUCK2DVS8,
MAX8997_REG_BUCK3CTRL,
MAX8997_REG_BUCK3DVS,
MAX8997_REG_BUCK4CTRL,
MAX8997_REG_BUCK4DVS,
MAX8997_REG_BUCK5CTRL,
MAX8997_REG_BUCK5DVS1,
MAX8997_REG_BUCK5DVS2,
MAX8997_REG_BUCK5DVS3,
MAX8997_REG_BUCK5DVS4,
MAX8997_REG_BUCK5DVS5,
MAX8997_REG_BUCK5DVS6,
MAX8997_REG_BUCK5DVS7,
MAX8997_REG_BUCK5DVS8,
MAX8997_REG_BUCK6CTRL,
MAX8997_REG_BUCK6BPSKIPCTRL,
MAX8997_REG_BUCK7CTRL,
MAX8997_REG_BUCK7DVS,
MAX8997_REG_LDO1CTRL,
MAX8997_REG_LDO2CTRL,
MAX8997_REG_LDO3CTRL,
MAX8997_REG_LDO4CTRL,
MAX8997_REG_LDO5CTRL,
MAX8997_REG_LDO6CTRL,
MAX8997_REG_LDO7CTRL,
MAX8997_REG_LDO8CTRL,
MAX8997_REG_LDO9CTRL,
MAX8997_REG_LDO10CTRL,
MAX8997_REG_LDO11CTRL,
MAX8997_REG_LDO12CTRL,
MAX8997_REG_LDO13CTRL,
MAX8997_REG_LDO14CTRL,
MAX8997_REG_LDO15CTRL,
MAX8997_REG_LDO16CTRL,
MAX8997_REG_LDO17CTRL,
MAX8997_REG_LDO18CTRL,
MAX8997_REG_LDO21CTRL,
MAX8997_REG_MBCCTRL1,
MAX8997_REG_MBCCTRL2,
MAX8997_REG_MBCCTRL3,
MAX8997_REG_MBCCTRL4,
MAX8997_REG_MBCCTRL5,
MAX8997_REG_MBCCTRL6,
MAX8997_REG_OTPCGHCVS,
MAX8997_REG_SAFEOUTCTRL,
MAX8997_REG_LBCNFG1,
MAX8997_REG_LBCNFG2,
MAX8997_REG_BBCCTRL,
MAX8997_REG_FLASH1_CUR,
MAX8997_REG_FLASH2_CUR,
MAX8997_REG_MOVIE_CUR,
MAX8997_REG_GSMB_CUR,
MAX8997_REG_BOOST_CNTL,
MAX8997_REG_LEN_CNTL,
MAX8997_REG_FLASH_CNTL,
MAX8997_REG_WDT_CNTL,
MAX8997_REG_MAXFLASH1,
MAX8997_REG_MAXFLASH2,
MAX8997_REG_FLASHSTATUSMASK,
MAX8997_REG_GPIOCNTL1,
MAX8997_REG_GPIOCNTL2,
MAX8997_REG_GPIOCNTL3,
MAX8997_REG_GPIOCNTL4,
MAX8997_REG_GPIOCNTL5,
MAX8997_REG_GPIOCNTL6,
MAX8997_REG_GPIOCNTL7,
MAX8997_REG_GPIOCNTL8,
MAX8997_REG_GPIOCNTL9,
MAX8997_REG_GPIOCNTL10,
MAX8997_REG_GPIOCNTL11,
MAX8997_REG_GPIOCNTL12,
MAX8997_REG_LDO1CONFIG,
MAX8997_REG_LDO2CONFIG,
MAX8997_REG_LDO3CONFIG,
MAX8997_REG_LDO4CONFIG,
MAX8997_REG_LDO5CONFIG,
MAX8997_REG_LDO6CONFIG,
MAX8997_REG_LDO7CONFIG,
MAX8997_REG_LDO8CONFIG,
MAX8997_REG_LDO9CONFIG,
MAX8997_REG_LDO10CONFIG,
MAX8997_REG_LDO11CONFIG,
MAX8997_REG_LDO12CONFIG,
MAX8997_REG_LDO13CONFIG,
MAX8997_REG_LDO14CONFIG,
MAX8997_REG_LDO15CONFIG,
MAX8997_REG_LDO16CONFIG,
MAX8997_REG_LDO17CONFIG,
MAX8997_REG_LDO18CONFIG,
MAX8997_REG_LDO21CONFIG,
MAX8997_REG_DVSOKTIMER1,
MAX8997_REG_DVSOKTIMER2,
MAX8997_REG_DVSOKTIMER4,
MAX8997_REG_DVSOKTIMER5,
};
u8 max8997_dumpaddr_muic[] = {
MAX8997_MUIC_REG_INTMASK1,
MAX8997_MUIC_REG_INTMASK2,
MAX8997_MUIC_REG_INTMASK3,
MAX8997_MUIC_REG_CDETCTRL,
MAX8997_MUIC_REG_CONTROL1,
MAX8997_MUIC_REG_CONTROL2,
MAX8997_MUIC_REG_CONTROL3,
};
u8 max8997_dumpaddr_haptic[] = {
MAX8997_HAPTIC_REG_CONF1,
MAX8997_HAPTIC_REG_CONF2,
MAX8997_HAPTIC_REG_DRVCONF,
MAX8997_HAPTIC_REG_CYCLECONF1,
MAX8997_HAPTIC_REG_CYCLECONF2,
MAX8997_HAPTIC_REG_SIGCONF1,
MAX8997_HAPTIC_REG_SIGCONF2,
MAX8997_HAPTIC_REG_SIGCONF3,
MAX8997_HAPTIC_REG_SIGCONF4,
MAX8997_HAPTIC_REG_SIGDC1,
MAX8997_HAPTIC_REG_SIGDC2,
MAX8997_HAPTIC_REG_SIGPWMDC1,
MAX8997_HAPTIC_REG_SIGPWMDC2,
MAX8997_HAPTIC_REG_SIGPWMDC3,
MAX8997_HAPTIC_REG_SIGPWMDC4,
};
static int max8997_freeze(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int i;
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_pmic[i],
&max8997->reg_dump[i]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_muic[i],
&max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
max8997_read_reg(i2c, max8997_dumpaddr_haptic[i],
&max8997->reg_dump[i + MAX8997_REG_PMIC_END +
MAX8997_MUIC_REG_END]);
return 0;
}
static int max8997_restore(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
int i;
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_pmic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_pmic[i],
max8997->reg_dump[i]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_muic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_muic[i],
max8997->reg_dump[i + MAX8997_REG_PMIC_END]);
for (i = 0; i < ARRAY_SIZE(max8997_dumpaddr_haptic); i++)
max8997_write_reg(i2c, max8997_dumpaddr_haptic[i],
max8997->reg_dump[i + MAX8997_REG_PMIC_END +
MAX8997_MUIC_REG_END]);
return 0;
}
static int max8997_suspend(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 1);
return 0;
}
static int max8997_resume(struct device *dev)
{
struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
struct max8997_dev *max8997 = i2c_get_clientdata(i2c);
if (device_may_wakeup(dev))
irq_set_irq_wake(max8997->irq, 0);
return max8997_irq_resume(max8997);
}
const struct dev_pm_ops max8997_pm = {
.suspend = max8997_suspend,
.resume = max8997_resume,
.freeze = max8997_freeze,
.restore = max8997_restore,
};
static struct i2c_driver max8997_i2c_driver = {
.driver = {
.name = "max8997",
.owner = THIS_MODULE,
.pm = &max8997_pm,
},
.probe = max8997_i2c_probe,
.remove = max8997_i2c_remove,
.id_table = max8997_i2c_id,
};
static int __init max8997_i2c_init(void)
{
return i2c_add_driver(&max8997_i2c_driver);
}
/* init early so consumer devices can complete system boot */
subsys_initcall(max8997_i2c_init);
static void __exit max8997_i2c_exit(void)
{
i2c_del_driver(&max8997_i2c_driver);
}
module_exit(max8997_i2c_exit);
MODULE_DESCRIPTION("MAXIM 8997 multi-function core driver");
MODULE_AUTHOR("MyungJoo Ham <myungjoo.ham@samsung.com>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
jidongxiao/hyperpsonline | qemu-2.2.0/hw/display/qxl-render.c | 39 | 9610 | /*
* qxl local rendering (aka display on sdl/vnc)
*
* Copyright (C) 2010 Red Hat, Inc.
*
* maintained by Gerd Hoffmann <kraxel@redhat.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 or
* (at your option) version 3 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, see <http://www.gnu.org/licenses/>.
*/
#include "qxl.h"
#include "trace.h"
static void qxl_blit(PCIQXLDevice *qxl, QXLRect *rect)
{
DisplaySurface *surface = qemu_console_surface(qxl->vga.con);
uint8_t *dst = surface_data(surface);
uint8_t *src;
int len, i;
if (is_buffer_shared(surface)) {
return;
}
trace_qxl_render_blit(qxl->guest_primary.qxl_stride,
rect->left, rect->right, rect->top, rect->bottom);
src = qxl->guest_primary.data;
if (qxl->guest_primary.qxl_stride < 0) {
/* qxl surface is upside down, walk src scanlines
* in reverse order to flip it */
src += (qxl->guest_primary.surface.height - rect->top - 1) *
qxl->guest_primary.abs_stride;
} else {
src += rect->top * qxl->guest_primary.abs_stride;
}
dst += rect->top * qxl->guest_primary.abs_stride;
src += rect->left * qxl->guest_primary.bytes_pp;
dst += rect->left * qxl->guest_primary.bytes_pp;
len = (rect->right - rect->left) * qxl->guest_primary.bytes_pp;
for (i = rect->top; i < rect->bottom; i++) {
memcpy(dst, src, len);
dst += qxl->guest_primary.abs_stride;
src += qxl->guest_primary.qxl_stride;
}
}
void qxl_render_resize(PCIQXLDevice *qxl)
{
QXLSurfaceCreate *sc = &qxl->guest_primary.surface;
qxl->guest_primary.qxl_stride = sc->stride;
qxl->guest_primary.abs_stride = abs(sc->stride);
qxl->guest_primary.resized++;
switch (sc->format) {
case SPICE_SURFACE_FMT_16_555:
qxl->guest_primary.bytes_pp = 2;
qxl->guest_primary.bits_pp = 15;
break;
case SPICE_SURFACE_FMT_16_565:
qxl->guest_primary.bytes_pp = 2;
qxl->guest_primary.bits_pp = 16;
break;
case SPICE_SURFACE_FMT_32_xRGB:
case SPICE_SURFACE_FMT_32_ARGB:
qxl->guest_primary.bytes_pp = 4;
qxl->guest_primary.bits_pp = 32;
break;
default:
fprintf(stderr, "%s: unhandled format: %x\n", __FUNCTION__,
qxl->guest_primary.surface.format);
qxl->guest_primary.bytes_pp = 4;
qxl->guest_primary.bits_pp = 32;
break;
}
}
static void qxl_set_rect_to_surface(PCIQXLDevice *qxl, QXLRect *area)
{
area->left = 0;
area->right = qxl->guest_primary.surface.width;
area->top = 0;
area->bottom = qxl->guest_primary.surface.height;
}
static void qxl_render_update_area_unlocked(PCIQXLDevice *qxl)
{
VGACommonState *vga = &qxl->vga;
DisplaySurface *surface;
int i;
if (qxl->guest_primary.resized) {
qxl->guest_primary.resized = 0;
qxl->guest_primary.data = qxl_phys2virt(qxl,
qxl->guest_primary.surface.mem,
MEMSLOT_GROUP_GUEST);
if (!qxl->guest_primary.data) {
return;
}
qxl_set_rect_to_surface(qxl, &qxl->dirty[0]);
qxl->num_dirty_rects = 1;
trace_qxl_render_guest_primary_resized(
qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height,
qxl->guest_primary.qxl_stride,
qxl->guest_primary.bytes_pp,
qxl->guest_primary.bits_pp);
if (qxl->guest_primary.qxl_stride > 0) {
pixman_format_code_t format =
qemu_default_pixman_format(qxl->guest_primary.bits_pp, true);
surface = qemu_create_displaysurface_from
(qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height,
format,
qxl->guest_primary.abs_stride,
qxl->guest_primary.data);
} else {
surface = qemu_create_displaysurface
(qxl->guest_primary.surface.width,
qxl->guest_primary.surface.height);
}
dpy_gfx_replace_surface(vga->con, surface);
}
if (!qxl->guest_primary.data) {
return;
}
for (i = 0; i < qxl->num_dirty_rects; i++) {
if (qemu_spice_rect_is_empty(qxl->dirty+i)) {
break;
}
if (qxl->dirty[i].left < 0 ||
qxl->dirty[i].top < 0 ||
qxl->dirty[i].left > qxl->dirty[i].right ||
qxl->dirty[i].top > qxl->dirty[i].bottom ||
qxl->dirty[i].right > qxl->guest_primary.surface.width ||
qxl->dirty[i].bottom > qxl->guest_primary.surface.height) {
continue;
}
qxl_blit(qxl, qxl->dirty+i);
dpy_gfx_update(vga->con,
qxl->dirty[i].left, qxl->dirty[i].top,
qxl->dirty[i].right - qxl->dirty[i].left,
qxl->dirty[i].bottom - qxl->dirty[i].top);
}
qxl->num_dirty_rects = 0;
}
/*
* use ssd.lock to protect render_update_cookie_num.
* qxl_render_update is called by io thread or vcpu thread, and the completion
* callbacks are called by spice_server thread, defering to bh called from the
* io thread.
*/
void qxl_render_update(PCIQXLDevice *qxl)
{
QXLCookie *cookie;
qemu_mutex_lock(&qxl->ssd.lock);
if (!runstate_is_running() || !qxl->guest_primary.commands) {
qxl_render_update_area_unlocked(qxl);
qemu_mutex_unlock(&qxl->ssd.lock);
return;
}
qxl->guest_primary.commands = 0;
qxl->render_update_cookie_num++;
qemu_mutex_unlock(&qxl->ssd.lock);
cookie = qxl_cookie_new(QXL_COOKIE_TYPE_RENDER_UPDATE_AREA,
0);
qxl_set_rect_to_surface(qxl, &cookie->u.render.area);
qxl_spice_update_area(qxl, 0, &cookie->u.render.area, NULL,
0, 1 /* clear_dirty_region */, QXL_ASYNC, cookie);
}
void qxl_render_update_area_bh(void *opaque)
{
PCIQXLDevice *qxl = opaque;
qemu_mutex_lock(&qxl->ssd.lock);
qxl_render_update_area_unlocked(qxl);
qemu_mutex_unlock(&qxl->ssd.lock);
}
void qxl_render_update_area_done(PCIQXLDevice *qxl, QXLCookie *cookie)
{
qemu_mutex_lock(&qxl->ssd.lock);
trace_qxl_render_update_area_done(cookie);
qemu_bh_schedule(qxl->update_area_bh);
qxl->render_update_cookie_num--;
qemu_mutex_unlock(&qxl->ssd.lock);
g_free(cookie);
}
static QEMUCursor *qxl_cursor(PCIQXLDevice *qxl, QXLCursor *cursor)
{
QEMUCursor *c;
uint8_t *image, *mask;
size_t size;
c = cursor_alloc(cursor->header.width, cursor->header.height);
c->hot_x = cursor->header.hot_spot_x;
c->hot_y = cursor->header.hot_spot_y;
switch (cursor->header.type) {
case SPICE_CURSOR_TYPE_ALPHA:
size = sizeof(uint32_t) * cursor->header.width * cursor->header.height;
memcpy(c->data, cursor->chunk.data, size);
if (qxl->debug > 2) {
cursor_print_ascii_art(c, "qxl/alpha");
}
break;
case SPICE_CURSOR_TYPE_MONO:
mask = cursor->chunk.data;
image = mask + cursor_get_mono_bpl(c) * c->width;
cursor_set_mono(c, 0xffffff, 0x000000, image, 1, mask);
if (qxl->debug > 2) {
cursor_print_ascii_art(c, "qxl/mono");
}
break;
default:
fprintf(stderr, "%s: not implemented: type %d\n",
__FUNCTION__, cursor->header.type);
goto fail;
}
return c;
fail:
cursor_put(c);
return NULL;
}
/* called from spice server thread context only */
int qxl_render_cursor(PCIQXLDevice *qxl, QXLCommandExt *ext)
{
QXLCursorCmd *cmd = qxl_phys2virt(qxl, ext->cmd.data, ext->group_id);
QXLCursor *cursor;
QEMUCursor *c;
if (!cmd) {
return 1;
}
if (!dpy_cursor_define_supported(qxl->vga.con)) {
return 0;
}
if (qxl->debug > 1 && cmd->type != QXL_CURSOR_MOVE) {
fprintf(stderr, "%s", __FUNCTION__);
qxl_log_cmd_cursor(qxl, cmd, ext->group_id);
fprintf(stderr, "\n");
}
switch (cmd->type) {
case QXL_CURSOR_SET:
cursor = qxl_phys2virt(qxl, cmd->u.set.shape, ext->group_id);
if (!cursor) {
return 1;
}
if (cursor->chunk.data_size != cursor->data_size) {
fprintf(stderr, "%s: multiple chunks\n", __FUNCTION__);
return 1;
}
c = qxl_cursor(qxl, cursor);
if (c == NULL) {
c = cursor_builtin_left_ptr();
}
qemu_mutex_lock(&qxl->ssd.lock);
if (qxl->ssd.cursor) {
cursor_put(qxl->ssd.cursor);
}
qxl->ssd.cursor = c;
qxl->ssd.mouse_x = cmd->u.set.position.x;
qxl->ssd.mouse_y = cmd->u.set.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
break;
case QXL_CURSOR_MOVE:
qemu_mutex_lock(&qxl->ssd.lock);
qxl->ssd.mouse_x = cmd->u.position.x;
qxl->ssd.mouse_y = cmd->u.position.y;
qemu_mutex_unlock(&qxl->ssd.lock);
break;
}
return 0;
}
| gpl-2.0 |
loongson-community/preempt-rt-linux | drivers/gpu/drm/nouveau/nv50_fifo.c | 39 | 14789 | /*
* Copyright (C) 2007 Ben Skeggs.
* All Rights Reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice (including the
* next paragraph) shall be included in all copies or substantial
* portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
* IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
*/
#include "drmP.h"
#include "drm.h"
#include "nouveau_drv.h"
struct nv50_fifo_priv {
struct nouveau_gpuobj_ref *thingo[2];
int cur_thingo;
};
#define IS_G80 ((dev_priv->chipset & 0xf0) == 0x50)
static void
nv50_fifo_init_thingo(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
struct nouveau_gpuobj_ref *cur;
int i, nr;
NV_DEBUG(dev, "\n");
cur = priv->thingo[priv->cur_thingo];
priv->cur_thingo = !priv->cur_thingo;
/* We never schedule channel 0 or 127 */
dev_priv->engine.instmem.prepare_access(dev, true);
for (i = 1, nr = 0; i < 127; i++) {
if (dev_priv->fifos[i] && dev_priv->fifos[i]->ramfc)
nv_wo32(dev, cur->gpuobj, nr++, i);
}
dev_priv->engine.instmem.finish_access(dev);
nv_wr32(dev, 0x32f4, cur->instance >> 12);
nv_wr32(dev, 0x32ec, nr);
nv_wr32(dev, 0x2500, 0x101);
}
static int
nv50_fifo_channel_enable(struct drm_device *dev, int channel, bool nt)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_channel *chan = dev_priv->fifos[channel];
uint32_t inst;
NV_DEBUG(dev, "ch%d\n", channel);
if (!chan->ramfc)
return -EINVAL;
if (IS_G80)
inst = chan->ramfc->instance >> 12;
else
inst = chan->ramfc->instance >> 8;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel),
inst | NV50_PFIFO_CTX_TABLE_CHANNEL_ENABLED);
if (!nt)
nv50_fifo_init_thingo(dev);
return 0;
}
static void
nv50_fifo_channel_disable(struct drm_device *dev, int channel, bool nt)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
uint32_t inst;
NV_DEBUG(dev, "ch%d, nt=%d\n", channel, nt);
if (IS_G80)
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G80;
else
inst = NV50_PFIFO_CTX_TABLE_INSTANCE_MASK_G84;
nv_wr32(dev, NV50_PFIFO_CTX_TABLE(channel), inst);
if (!nt)
nv50_fifo_init_thingo(dev);
}
static void
nv50_fifo_init_reset(struct drm_device *dev)
{
uint32_t pmc_e = NV_PMC_ENABLE_PFIFO;
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) & ~pmc_e);
nv_wr32(dev, NV03_PMC_ENABLE, nv_rd32(dev, NV03_PMC_ENABLE) | pmc_e);
}
static void
nv50_fifo_init_intr(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
nv_wr32(dev, NV03_PFIFO_INTR_0, 0xFFFFFFFF);
nv_wr32(dev, NV03_PFIFO_INTR_EN_0, 0xFFFFFFFF);
}
static void
nv50_fifo_init_context_table(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
int i;
NV_DEBUG(dev, "\n");
for (i = 0; i < NV50_PFIFO_CTX_TABLE__SIZE; i++) {
if (dev_priv->fifos[i])
nv50_fifo_channel_enable(dev, i, true);
else
nv50_fifo_channel_disable(dev, i, true);
}
nv50_fifo_init_thingo(dev);
}
static void
nv50_fifo_init_regs__nv(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x250c, 0x6f3cfc34);
}
static void
nv50_fifo_init_regs(struct drm_device *dev)
{
NV_DEBUG(dev, "\n");
nv_wr32(dev, 0x2500, 0);
nv_wr32(dev, 0x3250, 0);
nv_wr32(dev, 0x3220, 0);
nv_wr32(dev, 0x3204, 0);
nv_wr32(dev, 0x3210, 0);
nv_wr32(dev, 0x3270, 0);
/* Enable dummy channels setup by nv50_instmem.c */
nv50_fifo_channel_enable(dev, 0, true);
nv50_fifo_channel_enable(dev, 127, true);
}
int
nv50_fifo_init(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv;
int ret;
NV_DEBUG(dev, "\n");
priv = dev_priv->engine.fifo.priv;
if (priv) {
priv->cur_thingo = !priv->cur_thingo;
goto just_reset;
}
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
dev_priv->engine.fifo.priv = priv;
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[0]);
if (ret) {
NV_ERROR(dev, "error creating thingo0: %d\n", ret);
return ret;
}
ret = nouveau_gpuobj_new_ref(dev, NULL, NULL, 0, 128*4, 0x1000,
NVOBJ_FLAG_ZERO_ALLOC, &priv->thingo[1]);
if (ret) {
NV_ERROR(dev, "error creating thingo1: %d\n", ret);
return ret;
}
just_reset:
nv50_fifo_init_reset(dev);
nv50_fifo_init_intr(dev);
nv50_fifo_init_context_table(dev);
nv50_fifo_init_regs__nv(dev);
nv50_fifo_init_regs(dev);
dev_priv->engine.fifo.enable(dev);
dev_priv->engine.fifo.reassign(dev, true);
return 0;
}
void
nv50_fifo_takedown(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nv50_fifo_priv *priv = dev_priv->engine.fifo.priv;
NV_DEBUG(dev, "\n");
if (!priv)
return;
nouveau_gpuobj_ref_del(dev, &priv->thingo[0]);
nouveau_gpuobj_ref_del(dev, &priv->thingo[1]);
dev_priv->engine.fifo.priv = NULL;
kfree(priv);
}
int
nv50_fifo_channel_id(struct drm_device *dev)
{
return nv_rd32(dev, NV03_PFIFO_CACHE1_PUSH1) &
NV50_PFIFO_CACHE1_PUSH1_CHID_MASK;
}
int
nv50_fifo_create_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = NULL;
int ret;
NV_DEBUG(dev, "ch%d\n", chan->id);
if (IS_G80) {
uint32_t ramin_poffset = chan->ramin->gpuobj->im_pramin->start;
uint32_t ramin_voffset = chan->ramin->gpuobj->im_backing_start;
ret = nouveau_gpuobj_new_fake(dev, ramin_poffset, ramin_voffset,
0x100, NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE, &ramfc,
&chan->ramfc);
if (ret)
return ret;
ret = nouveau_gpuobj_new_fake(dev, ramin_poffset + 0x0400,
ramin_voffset + 0x0400, 4096,
0, NULL, &chan->cache);
if (ret)
return ret;
} else {
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 0x100, 256,
NVOBJ_FLAG_ZERO_ALLOC |
NVOBJ_FLAG_ZERO_FREE,
&chan->ramfc);
if (ret)
return ret;
ramfc = chan->ramfc->gpuobj;
ret = nouveau_gpuobj_new_ref(dev, chan, NULL, 0, 4096, 1024,
0, &chan->cache);
if (ret)
return ret;
}
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, ramfc, 0x08/4, chan->pushbuf_base);
nv_wo32(dev, ramfc, 0x10/4, chan->pushbuf_base);
nv_wo32(dev, ramfc, 0x48/4, chan->pushbuf->instance >> 4);
nv_wo32(dev, ramfc, 0x80/4, (0xc << 24) | (chan->ramht->instance >> 4));
nv_wo32(dev, ramfc, 0x3c/4, 0x00086078);
nv_wo32(dev, ramfc, 0x44/4, 0x2101ffff);
nv_wo32(dev, ramfc, 0x60/4, 0x7fffffff);
nv_wo32(dev, ramfc, 0x40/4, 0x00000000);
nv_wo32(dev, ramfc, 0x7c/4, 0x30000001);
nv_wo32(dev, ramfc, 0x78/4, 0x00000000);
nv_wo32(dev, ramfc, 0x4c/4, 0xffffffff);
if (!IS_G80) {
nv_wo32(dev, chan->ramin->gpuobj, 0, chan->id);
nv_wo32(dev, chan->ramin->gpuobj, 1,
chan->ramfc->instance >> 8);
nv_wo32(dev, ramfc, 0x88/4, chan->cache->instance >> 10);
nv_wo32(dev, ramfc, 0x98/4, chan->ramin->instance >> 12);
}
dev_priv->engine.instmem.finish_access(dev);
ret = nv50_fifo_channel_enable(dev, chan->id, false);
if (ret) {
NV_ERROR(dev, "error enabling ch%d: %d\n", chan->id, ret);
nouveau_gpuobj_ref_del(dev, &chan->ramfc);
return ret;
}
return 0;
}
void
nv50_fifo_destroy_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct nouveau_gpuobj_ref *ramfc = chan->ramfc;
NV_DEBUG(dev, "ch%d\n", chan->id);
/* This will ensure the channel is seen as disabled. */
chan->ramfc = NULL;
nv50_fifo_channel_disable(dev, chan->id, false);
/* Dummy channel, also used on ch 127 */
if (chan->id == 0)
nv50_fifo_channel_disable(dev, 127, false);
nouveau_gpuobj_ref_del(dev, &ramfc);
nouveau_gpuobj_ref_del(dev, &chan->cache);
}
int
nv50_fifo_load_context(struct nouveau_channel *chan)
{
struct drm_device *dev = chan->dev;
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_gpuobj *ramfc = chan->ramfc->gpuobj;
struct nouveau_gpuobj *cache = chan->cache->gpuobj;
int ptr, cnt;
NV_DEBUG(dev, "ch%d\n", chan->id);
dev_priv->engine.instmem.prepare_access(dev, false);
nv_wr32(dev, 0x3330, nv_ro32(dev, ramfc, 0x00/4));
nv_wr32(dev, 0x3334, nv_ro32(dev, ramfc, 0x04/4));
nv_wr32(dev, 0x3240, nv_ro32(dev, ramfc, 0x08/4));
nv_wr32(dev, 0x3320, nv_ro32(dev, ramfc, 0x0c/4));
nv_wr32(dev, 0x3244, nv_ro32(dev, ramfc, 0x10/4));
nv_wr32(dev, 0x3328, nv_ro32(dev, ramfc, 0x14/4));
nv_wr32(dev, 0x3368, nv_ro32(dev, ramfc, 0x18/4));
nv_wr32(dev, 0x336c, nv_ro32(dev, ramfc, 0x1c/4));
nv_wr32(dev, 0x3370, nv_ro32(dev, ramfc, 0x20/4));
nv_wr32(dev, 0x3374, nv_ro32(dev, ramfc, 0x24/4));
nv_wr32(dev, 0x3378, nv_ro32(dev, ramfc, 0x28/4));
nv_wr32(dev, 0x337c, nv_ro32(dev, ramfc, 0x2c/4));
nv_wr32(dev, 0x3228, nv_ro32(dev, ramfc, 0x30/4));
nv_wr32(dev, 0x3364, nv_ro32(dev, ramfc, 0x34/4));
nv_wr32(dev, 0x32a0, nv_ro32(dev, ramfc, 0x38/4));
nv_wr32(dev, 0x3224, nv_ro32(dev, ramfc, 0x3c/4));
nv_wr32(dev, 0x324c, nv_ro32(dev, ramfc, 0x40/4));
nv_wr32(dev, 0x2044, nv_ro32(dev, ramfc, 0x44/4));
nv_wr32(dev, 0x322c, nv_ro32(dev, ramfc, 0x48/4));
nv_wr32(dev, 0x3234, nv_ro32(dev, ramfc, 0x4c/4));
nv_wr32(dev, 0x3340, nv_ro32(dev, ramfc, 0x50/4));
nv_wr32(dev, 0x3344, nv_ro32(dev, ramfc, 0x54/4));
nv_wr32(dev, 0x3280, nv_ro32(dev, ramfc, 0x58/4));
nv_wr32(dev, 0x3254, nv_ro32(dev, ramfc, 0x5c/4));
nv_wr32(dev, 0x3260, nv_ro32(dev, ramfc, 0x60/4));
nv_wr32(dev, 0x3264, nv_ro32(dev, ramfc, 0x64/4));
nv_wr32(dev, 0x3268, nv_ro32(dev, ramfc, 0x68/4));
nv_wr32(dev, 0x326c, nv_ro32(dev, ramfc, 0x6c/4));
nv_wr32(dev, 0x32e4, nv_ro32(dev, ramfc, 0x70/4));
nv_wr32(dev, 0x3248, nv_ro32(dev, ramfc, 0x74/4));
nv_wr32(dev, 0x2088, nv_ro32(dev, ramfc, 0x78/4));
nv_wr32(dev, 0x2058, nv_ro32(dev, ramfc, 0x7c/4));
nv_wr32(dev, 0x2210, nv_ro32(dev, ramfc, 0x80/4));
cnt = nv_ro32(dev, ramfc, 0x84/4);
for (ptr = 0; ptr < cnt; ptr++) {
nv_wr32(dev, NV40_PFIFO_CACHE1_METHOD(ptr),
nv_ro32(dev, cache, (ptr * 2) + 0));
nv_wr32(dev, NV40_PFIFO_CACHE1_DATA(ptr),
nv_ro32(dev, cache, (ptr * 2) + 1));
}
nv_wr32(dev, NV03_PFIFO_CACHE1_PUT, cnt << 2);
nv_wr32(dev, NV03_PFIFO_CACHE1_GET, 0);
/* guessing that all the 0x34xx regs aren't on NV50 */
if (!IS_G80) {
nv_wr32(dev, 0x340c, nv_ro32(dev, ramfc, 0x88/4));
nv_wr32(dev, 0x3400, nv_ro32(dev, ramfc, 0x8c/4));
nv_wr32(dev, 0x3404, nv_ro32(dev, ramfc, 0x90/4));
nv_wr32(dev, 0x3408, nv_ro32(dev, ramfc, 0x94/4));
nv_wr32(dev, 0x3410, nv_ro32(dev, ramfc, 0x98/4));
}
dev_priv->engine.instmem.finish_access(dev);
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, chan->id | (1<<16));
return 0;
}
int
nv50_fifo_unload_context(struct drm_device *dev)
{
struct drm_nouveau_private *dev_priv = dev->dev_private;
struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo;
struct nouveau_gpuobj *ramfc, *cache;
struct nouveau_channel *chan = NULL;
int chid, get, put, ptr;
NV_DEBUG(dev, "\n");
chid = pfifo->channel_id(dev);
if (chid < 1 || chid >= dev_priv->engine.fifo.channels - 1)
return 0;
chan = dev_priv->fifos[chid];
if (!chan) {
NV_ERROR(dev, "Inactive channel on PFIFO: %d\n", chid);
return -EINVAL;
}
NV_DEBUG(dev, "ch%d\n", chan->id);
ramfc = chan->ramfc->gpuobj;
cache = chan->cache->gpuobj;
dev_priv->engine.instmem.prepare_access(dev, true);
nv_wo32(dev, ramfc, 0x00/4, nv_rd32(dev, 0x3330));
nv_wo32(dev, ramfc, 0x04/4, nv_rd32(dev, 0x3334));
nv_wo32(dev, ramfc, 0x08/4, nv_rd32(dev, 0x3240));
nv_wo32(dev, ramfc, 0x0c/4, nv_rd32(dev, 0x3320));
nv_wo32(dev, ramfc, 0x10/4, nv_rd32(dev, 0x3244));
nv_wo32(dev, ramfc, 0x14/4, nv_rd32(dev, 0x3328));
nv_wo32(dev, ramfc, 0x18/4, nv_rd32(dev, 0x3368));
nv_wo32(dev, ramfc, 0x1c/4, nv_rd32(dev, 0x336c));
nv_wo32(dev, ramfc, 0x20/4, nv_rd32(dev, 0x3370));
nv_wo32(dev, ramfc, 0x24/4, nv_rd32(dev, 0x3374));
nv_wo32(dev, ramfc, 0x28/4, nv_rd32(dev, 0x3378));
nv_wo32(dev, ramfc, 0x2c/4, nv_rd32(dev, 0x337c));
nv_wo32(dev, ramfc, 0x30/4, nv_rd32(dev, 0x3228));
nv_wo32(dev, ramfc, 0x34/4, nv_rd32(dev, 0x3364));
nv_wo32(dev, ramfc, 0x38/4, nv_rd32(dev, 0x32a0));
nv_wo32(dev, ramfc, 0x3c/4, nv_rd32(dev, 0x3224));
nv_wo32(dev, ramfc, 0x40/4, nv_rd32(dev, 0x324c));
nv_wo32(dev, ramfc, 0x44/4, nv_rd32(dev, 0x2044));
nv_wo32(dev, ramfc, 0x48/4, nv_rd32(dev, 0x322c));
nv_wo32(dev, ramfc, 0x4c/4, nv_rd32(dev, 0x3234));
nv_wo32(dev, ramfc, 0x50/4, nv_rd32(dev, 0x3340));
nv_wo32(dev, ramfc, 0x54/4, nv_rd32(dev, 0x3344));
nv_wo32(dev, ramfc, 0x58/4, nv_rd32(dev, 0x3280));
nv_wo32(dev, ramfc, 0x5c/4, nv_rd32(dev, 0x3254));
nv_wo32(dev, ramfc, 0x60/4, nv_rd32(dev, 0x3260));
nv_wo32(dev, ramfc, 0x64/4, nv_rd32(dev, 0x3264));
nv_wo32(dev, ramfc, 0x68/4, nv_rd32(dev, 0x3268));
nv_wo32(dev, ramfc, 0x6c/4, nv_rd32(dev, 0x326c));
nv_wo32(dev, ramfc, 0x70/4, nv_rd32(dev, 0x32e4));
nv_wo32(dev, ramfc, 0x74/4, nv_rd32(dev, 0x3248));
nv_wo32(dev, ramfc, 0x78/4, nv_rd32(dev, 0x2088));
nv_wo32(dev, ramfc, 0x7c/4, nv_rd32(dev, 0x2058));
nv_wo32(dev, ramfc, 0x80/4, nv_rd32(dev, 0x2210));
put = (nv_rd32(dev, NV03_PFIFO_CACHE1_PUT) & 0x7ff) >> 2;
get = (nv_rd32(dev, NV03_PFIFO_CACHE1_GET) & 0x7ff) >> 2;
ptr = 0;
while (put != get) {
nv_wo32(dev, cache, ptr++,
nv_rd32(dev, NV40_PFIFO_CACHE1_METHOD(get)));
nv_wo32(dev, cache, ptr++,
nv_rd32(dev, NV40_PFIFO_CACHE1_DATA(get)));
get = (get + 1) & 0x1ff;
}
/* guessing that all the 0x34xx regs aren't on NV50 */
if (!IS_G80) {
nv_wo32(dev, ramfc, 0x84/4, ptr >> 1);
nv_wo32(dev, ramfc, 0x88/4, nv_rd32(dev, 0x340c));
nv_wo32(dev, ramfc, 0x8c/4, nv_rd32(dev, 0x3400));
nv_wo32(dev, ramfc, 0x90/4, nv_rd32(dev, 0x3404));
nv_wo32(dev, ramfc, 0x94/4, nv_rd32(dev, 0x3408));
nv_wo32(dev, ramfc, 0x98/4, nv_rd32(dev, 0x3410));
}
dev_priv->engine.instmem.finish_access(dev);
/*XXX: probably reload ch127 (NULL) state back too */
nv_wr32(dev, NV03_PFIFO_CACHE1_PUSH1, 127);
return 0;
}
| gpl-2.0 |
xInterlopeRx/android_kernel_samsung_lt02ltespr | drivers/char/csdio.c | 1319 | 27089 | /*
* Copyright (c) 2010, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/mutex.h>
#include <linux/serial_reg.h>
#include <linux/circ_buf.h>
#include <linux/gfp.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <linux/platform_device.h>
/* Char device */
#include <linux/cdev.h>
#include <linux/fs.h>
/* Sdio device */
#include <linux/mmc/core.h>
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
#include <linux/mmc/sdio.h>
#include <linux/mmc/sdio_func.h>
#include <linux/mmc/sdio_ids.h>
#include <linux/csdio.h>
#define FALSE 0
#define TRUE 1
#define VERSION "0.5"
#define CSDIO_NUM_OF_SDIO_FUNCTIONS 7
#define CSDIO_DEV_NAME "csdio"
#define TP_DEV_NAME CSDIO_DEV_NAME"f"
#define CSDIO_DEV_PERMISSIONS 0666
#define CSDIO_SDIO_BUFFER_SIZE (64*512)
int csdio_major;
int csdio_minor;
int csdio_transport_nr_devs = CSDIO_NUM_OF_SDIO_FUNCTIONS;
static uint csdio_vendor_id;
static uint csdio_device_id;
static char *host_name;
static struct csdio_func_t {
struct sdio_func *m_func;
int m_enabled;
struct cdev m_cdev; /* char device structure */
struct device *m_device;
u32 m_block_size;
} *g_csdio_func_table[CSDIO_NUM_OF_SDIO_FUNCTIONS] = {0};
struct csdio_t {
struct cdev m_cdev;
struct device *m_device;
struct class *m_driver_class;
struct fasync_struct *m_async_queue;
unsigned char m_current_irq_mask; /* currently enabled irqs */
struct mmc_host *m_host;
unsigned int m_num_of_func;
} g_csdio;
struct csdio_file_descriptor {
struct csdio_func_t *m_port;
u32 m_block_mode;/* data tran. byte(0)/block(1) */
u32 m_op_code; /* address auto increment flag */
u32 m_address;
};
static void *g_sdio_buffer;
/*
* Open and release
*/
static int csdio_transport_open(struct inode *inode, struct file *filp)
{
int ret = 0;
struct csdio_func_t *port = NULL; /* device information */
struct sdio_func *func = NULL;
struct csdio_file_descriptor *descriptor = NULL;
port = container_of(inode->i_cdev, struct csdio_func_t, m_cdev);
func = port->m_func;
descriptor = kzalloc(sizeof(struct csdio_file_descriptor), GFP_KERNEL);
if (!descriptor) {
ret = -ENOMEM;
goto exit;
}
pr_info(TP_DEV_NAME"%d: open: func=%p, port=%p\n",
func->num, func, port);
sdio_claim_host(func);
ret = sdio_enable_func(func);
if (ret) {
pr_err(TP_DEV_NAME"%d:Enable func failed (%d)\n",
func->num, ret);
ret = -EIO;
goto free_descriptor;
}
descriptor->m_port = port;
filp->private_data = descriptor;
goto release_host;
free_descriptor:
kfree(descriptor);
release_host:
sdio_release_host(func);
exit:
return ret;
}
static int csdio_transport_release(struct inode *inode, struct file *filp)
{
int ret = 0;
struct csdio_file_descriptor *descriptor = filp->private_data;
struct csdio_func_t *port = descriptor->m_port;
struct sdio_func *func = port->m_func;
pr_info(TP_DEV_NAME"%d: release\n", func->num);
sdio_claim_host(func);
ret = sdio_disable_func(func);
if (ret) {
pr_err(TP_DEV_NAME"%d:Disable func failed(%d)\n",
func->num, ret);
ret = -EIO;
}
sdio_release_host(func);
kfree(descriptor);
return ret;
}
/*
* Data management: read and write
*/
static ssize_t csdio_transport_read(struct file *filp,
char __user *buf,
size_t count,
loff_t *f_pos)
{
ssize_t ret = 0;
struct csdio_file_descriptor *descriptor = filp->private_data;
struct csdio_func_t *port = descriptor->m_port;
struct sdio_func *func = port->m_func;
size_t t_count = count;
if (descriptor->m_block_mode) {
pr_info(TP_DEV_NAME "%d: CMD53 read, Md:%d, Addr:0x%04X,"
" Un:%d (Bl:%d, BlSz:%d)\n", func->num,
descriptor->m_block_mode,
descriptor->m_address,
count*port->m_block_size,
count, port->m_block_size);
/* recalculate size */
count *= port->m_block_size;
}
sdio_claim_host(func);
if (descriptor->m_op_code) {
/* auto increment */
ret = sdio_memcpy_fromio(func, g_sdio_buffer,
descriptor->m_address, count);
} else { /* FIFO */
ret = sdio_readsb(func, g_sdio_buffer,
descriptor->m_address, count);
}
sdio_release_host(func);
if (!ret) {
if (copy_to_user(buf, g_sdio_buffer, count))
ret = -EFAULT;
else
ret = t_count;
}
if (ret < 0) {
pr_err(TP_DEV_NAME "%d: CMD53 read failed (%d)"
"(Md:%d, Addr:0x%04X, Sz:%d)\n",
func->num, ret,
descriptor->m_block_mode,
descriptor->m_address, count);
}
return ret;
}
static ssize_t csdio_transport_write(struct file *filp,
const char __user *buf,
size_t count,
loff_t *f_pos)
{
ssize_t ret = 0;
struct csdio_file_descriptor *descriptor = filp->private_data;
struct csdio_func_t *port = descriptor->m_port;
struct sdio_func *func = port->m_func;
size_t t_count = count;
if (descriptor->m_block_mode)
count *= port->m_block_size;
if (copy_from_user(g_sdio_buffer, buf, count)) {
pr_err(TP_DEV_NAME"%d:copy_from_user failed\n", func->num);
ret = -EFAULT;
} else {
sdio_claim_host(func);
if (descriptor->m_op_code) {
/* auto increment */
ret = sdio_memcpy_toio(func, descriptor->m_address,
g_sdio_buffer, count);
} else {
/* FIFO */
ret = sdio_writesb(func, descriptor->m_address,
g_sdio_buffer, count);
}
sdio_release_host(func);
if (!ret) {
ret = t_count;
} else {
pr_err(TP_DEV_NAME "%d: CMD53 write failed (%d)"
"(Md:%d, Addr:0x%04X, Sz:%d)\n",
func->num, ret, descriptor->m_block_mode,
descriptor->m_address, count);
}
}
return ret;
}
/* disable interrupt for sdio client */
static int disable_sdio_client_isr(struct sdio_func *func)
{
int ret;
/* disable for all functions, to restore interrupts
* use g_csdio.m_current_irq_mask */
sdio_f0_writeb(func, 0, SDIO_CCCR_IENx, &ret);
if (ret)
pr_err(CSDIO_DEV_NAME" Can't sdio_f0_writeb (%d)\n", ret);
return ret;
}
/*
* This handles the interrupt from SDIO.
*/
static void csdio_sdio_irq(struct sdio_func *func)
{
int ret;
pr_info(CSDIO_DEV_NAME" csdio_sdio_irq: func=%d\n", func->num);
ret = disable_sdio_client_isr(func);
if (ret) {
pr_err(CSDIO_DEV_NAME" Can't disable client isr(%d)\n", ret);
return;
}
/* signal asynchronous readers */
if (g_csdio.m_async_queue)
kill_fasync(&g_csdio.m_async_queue, SIGIO, POLL_IN);
}
/*
* The ioctl() implementation
*/
static int csdio_transport_ioctl(struct inode *inode,
struct file *filp,
unsigned int cmd,
unsigned long arg)
{
int err = 0;
int ret = 0;
struct csdio_file_descriptor *descriptor = filp->private_data;
struct csdio_func_t *port = descriptor->m_port;
struct sdio_func *func = port->m_func;
/* extract the type and number bitfields
sanity check: return ENOTTY (inappropriate ioctl) before
access_ok()
*/
if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) ||
(_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) {
pr_err(TP_DEV_NAME "Wrong ioctl command parameters\n");
ret = -ENOTTY;
goto exit;
}
/* the direction is a bitmask, and VERIFY_WRITE catches R/W
* transfers. `Type' is user-oriented, while access_ok is
kernel-oriented, so the concept of "read" and "write" is reversed
*/
if (_IOC_DIR(cmd) & _IOC_READ) {
err = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
} else {
if (_IOC_DIR(cmd) & _IOC_WRITE) {
err = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
}
}
if (err) {
pr_err(TP_DEV_NAME "Wrong ioctl access direction\n");
ret = -EFAULT;
goto exit;
}
switch (cmd) {
case CSDIO_IOC_SET_OP_CODE:
{
pr_info(TP_DEV_NAME"%d:SET_OP_CODE=%d\n",
func->num, descriptor->m_op_code);
ret = get_user(descriptor->m_op_code,
(unsigned char __user *)arg);
if (ret) {
pr_err(TP_DEV_NAME"%d:SET_OP_CODE get data"
" from user space failed(%d)\n",
func->num, ret);
ret = -ENOTTY;
break;
}
}
break;
case CSDIO_IOC_FUNCTION_SET_BLOCK_SIZE:
{
unsigned block_size;
ret = get_user(block_size, (unsigned __user *)arg);
if (ret) {
pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE get data"
" from user space failed(%d)\n",
func->num, ret);
ret = -ENOTTY;
break;
}
pr_info(TP_DEV_NAME"%d:SET_BLOCK_SIZE=%d\n",
func->num, block_size);
sdio_claim_host(func);
ret = sdio_set_block_size(func, block_size);
if (!ret) {
port->m_block_size = block_size;
} else {
pr_err(TP_DEV_NAME"%d:SET_BLOCK_SIZE set block"
" size to %d failed (%d)\n",
func->num, block_size, ret);
ret = -ENOTTY;
break;
}
sdio_release_host(func);
}
break;
case CSDIO_IOC_SET_BLOCK_MODE:
{
pr_info(TP_DEV_NAME"%d:SET_BLOCK_MODE=%d\n",
func->num, descriptor->m_block_mode);
ret = get_user(descriptor->m_block_mode,
(unsigned char __user *)arg);
if (ret) {
pr_err(TP_DEV_NAME"%d:SET_BLOCK_MODE get data"
" from user space failed\n",
func->num);
ret = -ENOTTY;
break;
}
}
break;
case CSDIO_IOC_CMD52:
{
struct csdio_cmd52_ctrl_t cmd52ctrl;
int cmd52ret;
if (copy_from_user(&cmd52ctrl,
(const unsigned char __user *)arg,
sizeof(cmd52ctrl))) {
pr_err(TP_DEV_NAME"%d:IOC_CMD52 get data"
" from user space failed\n",
func->num);
ret = -ENOTTY;
break;
}
sdio_claim_host(func);
if (cmd52ctrl.m_write)
sdio_writeb(func, cmd52ctrl.m_data,
cmd52ctrl.m_address, &cmd52ret);
else
cmd52ctrl.m_data = sdio_readb(func,
cmd52ctrl.m_address, &cmd52ret);
cmd52ctrl.m_ret = cmd52ret;
sdio_release_host(func);
if (cmd52ctrl.m_ret)
pr_err(TP_DEV_NAME"%d:IOC_CMD52 failed (%d)\n",
func->num, cmd52ctrl.m_ret);
if (copy_to_user((unsigned char __user *)arg,
&cmd52ctrl,
sizeof(cmd52ctrl))) {
pr_err(TP_DEV_NAME"%d:IOC_CMD52 put data"
" to user space failed\n",
func->num);
ret = -ENOTTY;
break;
}
}
break;
case CSDIO_IOC_CMD53:
{
struct csdio_cmd53_ctrl_t csdio_cmd53_ctrl;
if (copy_from_user(&csdio_cmd53_ctrl,
(const char __user *)arg,
sizeof(csdio_cmd53_ctrl))) {
ret = -EPERM;
pr_err(TP_DEV_NAME"%d:"
"Get data from user space failed\n",
func->num);
break;
}
descriptor->m_block_mode =
csdio_cmd53_ctrl.m_block_mode;
descriptor->m_op_code = csdio_cmd53_ctrl.m_op_code;
descriptor->m_address = csdio_cmd53_ctrl.m_address;
}
break;
case CSDIO_IOC_CONNECT_ISR:
{
pr_info(CSDIO_DEV_NAME" SDIO_CONNECT_ISR"
" func=%d, csdio_sdio_irq=%x\n",
func->num, (unsigned int)csdio_sdio_irq);
sdio_claim_host(func);
ret = sdio_claim_irq(func, csdio_sdio_irq);
sdio_release_host(func);
if (ret) {
pr_err(CSDIO_DEV_NAME" SDIO_CONNECT_ISR"
" claim irq failed(%d)\n", ret);
} else {
/* update current irq mask for disable/enable */
g_csdio.m_current_irq_mask |= (1 << func->num);
}
}
break;
case CSDIO_IOC_DISCONNECT_ISR:
{
pr_info(CSDIO_DEV_NAME " SDIO_DISCONNECT_ISR func=%d\n",
func->num);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_release_host(func);
/* update current irq mask for disable/enable */
g_csdio.m_current_irq_mask &= ~(1 << func->num);
}
break;
default: /* redundant, as cmd was checked against MAXNR */
pr_warning(TP_DEV_NAME"%d: Redundant IOCTL\n",
func->num);
ret = -ENOTTY;
}
exit:
return ret;
}
static const struct file_operations csdio_transport_fops = {
.owner = THIS_MODULE,
.read = csdio_transport_read,
.write = csdio_transport_write,
.ioctl = csdio_transport_ioctl,
.open = csdio_transport_open,
.release = csdio_transport_release,
};
static void csdio_transport_cleanup(struct csdio_func_t *port)
{
int devno = MKDEV(csdio_major, csdio_minor + port->m_func->num);
device_destroy(g_csdio.m_driver_class, devno);
port->m_device = NULL;
cdev_del(&port->m_cdev);
}
#if defined(CONFIG_DEVTMPFS)
static inline int csdio_cdev_update_permissions(
const char *devname, int dev_minor)
{
return 0;
}
#else
static int csdio_cdev_update_permissions(
const char *devname, int dev_minor)
{
int ret = 0;
mm_segment_t fs;
struct file *file;
struct inode *inode;
struct iattr newattrs;
int mode = CSDIO_DEV_PERMISSIONS;
char dev_file[64];
fs = get_fs();
set_fs(get_ds());
snprintf(dev_file, sizeof(dev_file), "/dev/%s%d",
devname, dev_minor);
file = filp_open(dev_file, O_RDWR, 0);
if (IS_ERR(file)) {
ret = -EFAULT;
goto exit;
}
inode = file->f_path.dentry->d_inode;
mutex_lock(&inode->i_mutex);
newattrs.ia_mode =
(mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
ret = notify_change(file->f_path.dentry, &newattrs);
mutex_unlock(&inode->i_mutex);
filp_close(file, NULL);
exit:
set_fs(fs);
return ret;
}
#endif
static struct device *csdio_cdev_init(struct cdev *char_dev,
const struct file_operations *file_op, int dev_minor,
const char *devname, struct device *parent)
{
int ret = 0;
struct device *new_device = NULL;
dev_t devno = MKDEV(csdio_major, dev_minor);
/* Initialize transport device */
cdev_init(char_dev, file_op);
char_dev->owner = THIS_MODULE;
char_dev->ops = file_op;
ret = cdev_add(char_dev, devno, 1);
/* Fail gracefully if need be */
if (ret) {
pr_warning("Error %d adding CSDIO char device '%s%d'",
ret, devname, dev_minor);
goto exit;
}
pr_info("'%s%d' char driver registered\n", devname, dev_minor);
/* create a /dev entry for transport drivers */
new_device = device_create(g_csdio.m_driver_class, parent, devno, NULL,
"%s%d", devname, dev_minor);
if (!new_device) {
pr_err("Can't create device node '/dev/%s%d'\n",
devname, dev_minor);
goto cleanup;
}
/* no irq attached */
g_csdio.m_current_irq_mask = 0;
if (csdio_cdev_update_permissions(devname, dev_minor)) {
pr_warning("%s%d: Unable to update access permissions of the"
" '/dev/%s%d'\n",
devname, dev_minor, devname, dev_minor);
}
pr_info("%s%d: Device node '/dev/%s%d' created successfully\n",
devname, dev_minor, devname, dev_minor);
goto exit;
cleanup:
cdev_del(char_dev);
exit:
return new_device;
}
/* Looks for first non empty function, returns NULL otherwise */
static struct sdio_func *get_active_func(void)
{
int i;
for (i = 0; i < CSDIO_NUM_OF_SDIO_FUNCTIONS; i++) {
if (g_csdio_func_table[i])
return g_csdio_func_table[i]->m_func;
}
return NULL;
}
static ssize_t
show_vdd(struct device *dev, struct device_attribute *attr, char *buf)
{
if (NULL == g_csdio.m_host)
return snprintf(buf, PAGE_SIZE, "N/A\n");
return snprintf(buf, PAGE_SIZE, "%d\n",
g_csdio.m_host->ios.vdd);
}
static int
set_vdd_helper(int value)
{
struct mmc_ios *ios = NULL;
if (NULL == g_csdio.m_host) {
pr_err("%s0: Set VDD, no MMC host assigned\n", CSDIO_DEV_NAME);
return -ENXIO;
}
mmc_claim_host(g_csdio.m_host);
ios = &g_csdio.m_host->ios;
ios->vdd = value;
g_csdio.m_host->ops->set_ios(g_csdio.m_host, ios);
mmc_release_host(g_csdio.m_host);
return 0;
}
static ssize_t
set_vdd(struct device *dev, struct device_attribute *att,
const char *buf, size_t count)
{
int value = 0;
sscanf(buf, "%d", &value);
if (set_vdd_helper(value))
return -ENXIO;
return count;
}
static DEVICE_ATTR(vdd, S_IRUGO | S_IWUSR,
show_vdd, set_vdd);
static struct attribute *dev_attrs[] = {
&dev_attr_vdd.attr,
NULL,
};
static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
/*
* The ioctl() implementation for control device
*/
static int csdio_ctrl_ioctl(struct inode *inode, struct file *filp,
unsigned int cmd, unsigned long arg)
{
int err = 0;
int ret = 0;
pr_info("CSDIO ctrl ioctl.\n");
/* extract the type and number bitfields
sanity check: return ENOTTY (inappropriate ioctl) before
access_ok()
*/
if ((_IOC_TYPE(cmd) != CSDIO_IOC_MAGIC) ||
(_IOC_NR(cmd) > CSDIO_IOC_MAXNR)) {
pr_err(CSDIO_DEV_NAME "Wrong ioctl command parameters\n");
ret = -ENOTTY;
goto exit;
}
/* the direction is a bitmask, and VERIFY_WRITE catches R/W
transfers. `Type' is user-oriented, while access_ok is
kernel-oriented, so the concept of "read" and "write" is reversed
*/
if (_IOC_DIR(cmd) & _IOC_READ) {
err = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
} else {
if (_IOC_DIR(cmd) & _IOC_WRITE)
err = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
}
if (err) {
pr_err(CSDIO_DEV_NAME "Wrong ioctl access direction\n");
ret = -EFAULT;
goto exit;
}
switch (cmd) {
case CSDIO_IOC_ENABLE_HIGHSPEED_MODE:
pr_info(CSDIO_DEV_NAME" ENABLE_HIGHSPEED_MODE\n");
break;
case CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS:
{
struct mmc_host *host = g_csdio.m_host;
struct mmc_ios *ios = NULL;
if (NULL == host) {
pr_err("%s0: "
"CSDIO_IOC_SET_DATA_TRANSFER_CLOCKS,"
" no MMC host assigned\n",
CSDIO_DEV_NAME);
ret = -EFAULT;
goto exit;
}
ios = &host->ios;
mmc_claim_host(host);
ret = get_user(host->ios.clock,
(unsigned int __user *)arg);
if (ret) {
pr_err(CSDIO_DEV_NAME
" get data from user space failed\n");
} else {
pr_err(CSDIO_DEV_NAME
"SET_DATA_TRANSFER_CLOCKS(%d-%d)(%d)\n",
host->f_min, host->f_max,
host->ios.clock);
host->ops->set_ios(host, ios);
}
mmc_release_host(host);
}
break;
case CSDIO_IOC_ENABLE_ISR:
{
int ret;
unsigned char reg;
struct sdio_func *func = get_active_func();
if (!func) {
pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR"
" no active sdio function\n");
ret = -EFAULT;
goto exit;
}
pr_info(CSDIO_DEV_NAME
" CSDIO_IOC_ENABLE_ISR func=%d\n",
func->num);
reg = g_csdio.m_current_irq_mask | 1;
sdio_claim_host(func);
sdio_f0_writeb(func, reg, SDIO_CCCR_IENx, &ret);
sdio_release_host(func);
if (ret) {
pr_err(CSDIO_DEV_NAME
" Can't sdio_f0_writeb (%d)\n",
ret);
goto exit;
}
}
break;
case CSDIO_IOC_DISABLE_ISR:
{
int ret;
struct sdio_func *func = get_active_func();
if (!func) {
pr_err(CSDIO_DEV_NAME " CSDIO_IOC_ENABLE_ISR"
" no active sdio function\n");
ret = -EFAULT;
goto exit;
}
pr_info(CSDIO_DEV_NAME
" CSDIO_IOC_DISABLE_ISR func=%p\n",
func);
sdio_claim_host(func);
ret = disable_sdio_client_isr(func);
sdio_release_host(func);
if (ret) {
pr_err("%s0: Can't disable client isr (%d)\n",
CSDIO_DEV_NAME, ret);
goto exit;
}
}
break;
case CSDIO_IOC_SET_VDD:
{
unsigned int vdd = 0;
ret = get_user(vdd, (unsigned int __user *)arg);
if (ret) {
pr_err("%s0: CSDIO_IOC_SET_VDD,"
" get data from user space failed\n",
CSDIO_DEV_NAME);
goto exit;
}
pr_info(CSDIO_DEV_NAME" CSDIO_IOC_SET_VDD - %d\n", vdd);
ret = set_vdd_helper(vdd);
if (ret)
goto exit;
}
break;
case CSDIO_IOC_GET_VDD:
{
if (NULL == g_csdio.m_host) {
pr_err("%s0: CSDIO_IOC_GET_VDD,"
" no MMC host assigned\n",
CSDIO_DEV_NAME);
ret = -EFAULT;
goto exit;
}
ret = put_user(g_csdio.m_host->ios.vdd,
(unsigned short __user *)arg);
if (ret) {
pr_err("%s0: CSDIO_IOC_GET_VDD, put data"
" to user space failed\n",
CSDIO_DEV_NAME);
goto exit;
}
}
break;
default: /* redundant, as cmd was checked against MAXNR */
pr_warning(CSDIO_DEV_NAME" Redundant IOCTL\n");
ret = -ENOTTY;
}
exit:
return ret;
}
static int csdio_ctrl_fasync(int fd, struct file *filp, int mode)
{
pr_info(CSDIO_DEV_NAME
" csdio_ctrl_fasync: fd=%d, filp=%p, mode=%d\n",
fd, filp, mode);
return fasync_helper(fd, filp, mode, &g_csdio.m_async_queue);
}
/*
* Open and close
*/
static int csdio_ctrl_open(struct inode *inode, struct file *filp)
{
int ret = 0;
struct csdio_t *csdio_ctrl_drv = NULL; /* device information */
pr_info("CSDIO ctrl open.\n");
csdio_ctrl_drv = container_of(inode->i_cdev, struct csdio_t, m_cdev);
filp->private_data = csdio_ctrl_drv; /* for other methods */
return ret;
}
static int csdio_ctrl_release(struct inode *inode, struct file *filp)
{
pr_info("CSDIO ctrl release.\n");
/* remove this filp from the asynchronously notified filp's */
csdio_ctrl_fasync(-1, filp, 0);
return 0;
}
static const struct file_operations csdio_ctrl_fops = {
.owner = THIS_MODULE,
.ioctl = csdio_ctrl_ioctl,
.open = csdio_ctrl_open,
.release = csdio_ctrl_release,
.fasync = csdio_ctrl_fasync,
};
static int csdio_probe(struct sdio_func *func,
const struct sdio_device_id *id)
{
struct csdio_func_t *port;
int ret = 0;
struct mmc_host *host = func->card->host;
if (NULL != g_csdio.m_host && g_csdio.m_host != host) {
pr_info("%s: Device is on unexpected host\n",
CSDIO_DEV_NAME);
ret = -ENODEV;
goto exit;
}
/* enforce single instance policy */
if (g_csdio_func_table[func->num-1]) {
pr_err("%s - only single SDIO device supported",
sdio_func_id(func));
ret = -EEXIST;
goto exit;
}
port = kzalloc(sizeof(struct csdio_func_t), GFP_KERNEL);
if (!port) {
pr_err("Can't allocate memory\n");
ret = -ENOMEM;
goto exit;
}
/* initialize SDIO side */
port->m_func = func;
sdio_set_drvdata(func, port);
pr_info("%s - SDIO device found. Function %d\n",
sdio_func_id(func), func->num);
port->m_device = csdio_cdev_init(&port->m_cdev, &csdio_transport_fops,
csdio_minor + port->m_func->num,
TP_DEV_NAME, &port->m_func->dev);
/* create appropriate char device */
if (!port->m_device)
goto free;
if (0 == g_csdio.m_num_of_func && NULL == host_name)
g_csdio.m_host = host;
g_csdio.m_num_of_func++;
g_csdio_func_table[func->num-1] = port;
port->m_enabled = TRUE;
goto exit;
free:
kfree(port);
exit:
return ret;
}
static void csdio_remove(struct sdio_func *func)
{
struct csdio_func_t *port = sdio_get_drvdata(func);
csdio_transport_cleanup(port);
sdio_claim_host(func);
sdio_release_irq(func);
sdio_disable_func(func);
sdio_release_host(func);
kfree(port);
g_csdio_func_table[func->num-1] = NULL;
g_csdio.m_num_of_func--;
if (0 == g_csdio.m_num_of_func && NULL == host_name)
g_csdio.m_host = NULL;
pr_info("%s%d: Device removed (%s). Function %d\n",
CSDIO_DEV_NAME, func->num, sdio_func_id(func), func->num);
}
/* CONFIG_CSDIO_VENDOR_ID and CONFIG_CSDIO_DEVICE_ID are defined in Kconfig.
* Use kernel configuration to change the values or overwrite them through
* module parameters */
static struct sdio_device_id csdio_ids[] = {
{ SDIO_DEVICE(CONFIG_CSDIO_VENDOR_ID, CONFIG_CSDIO_DEVICE_ID) },
{ /* end: all zeroes */},
};
MODULE_DEVICE_TABLE(sdio, csdio_ids);
static struct sdio_driver csdio_driver = {
.probe = csdio_probe,
.remove = csdio_remove,
.name = "csdio",
.id_table = csdio_ids,
};
static void __exit csdio_exit(void)
{
dev_t devno = MKDEV(csdio_major, csdio_minor);
sdio_unregister_driver(&csdio_driver);
sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp);
kfree(g_sdio_buffer);
device_destroy(g_csdio.m_driver_class, devno);
cdev_del(&g_csdio.m_cdev);
class_destroy(g_csdio.m_driver_class);
unregister_chrdev_region(devno, csdio_transport_nr_devs);
pr_info("%s: Exit driver module\n", CSDIO_DEV_NAME);
}
static char *csdio_devnode(struct device *dev, mode_t *mode)
{
*mode = CSDIO_DEV_PERMISSIONS;
return NULL;
}
static int __init csdio_init(void)
{
int ret = 0;
dev_t devno = 0;
pr_info("Init CSDIO driver module.\n");
/* Get a range of minor numbers to work with, asking for a dynamic */
/* major unless directed otherwise at load time. */
if (csdio_major) {
devno = MKDEV(csdio_major, csdio_minor);
ret = register_chrdev_region(devno, csdio_transport_nr_devs,
CSDIO_DEV_NAME);
} else {
ret = alloc_chrdev_region(&devno, csdio_minor,
csdio_transport_nr_devs, CSDIO_DEV_NAME);
csdio_major = MAJOR(devno);
}
if (ret < 0) {
pr_err("CSDIO: can't get major %d\n", csdio_major);
goto exit;
}
pr_info("CSDIO char driver major number is %d\n", csdio_major);
/* kernel module got parameters: overwrite vendor and device id's */
if ((csdio_vendor_id != 0) && (csdio_device_id != 0)) {
csdio_ids[0].vendor = (u16)csdio_vendor_id;
csdio_ids[0].device = (u16)csdio_device_id;
}
/* prepare create /dev/... instance */
g_csdio.m_driver_class = class_create(THIS_MODULE, CSDIO_DEV_NAME);
if (IS_ERR(g_csdio.m_driver_class)) {
ret = -ENOMEM;
pr_err(CSDIO_DEV_NAME " class_create failed\n");
goto unregister_region;
}
g_csdio.m_driver_class->devnode = csdio_devnode;
/* create CSDIO ctrl driver */
g_csdio.m_device = csdio_cdev_init(&g_csdio.m_cdev,
&csdio_ctrl_fops, csdio_minor, CSDIO_DEV_NAME, NULL);
if (!g_csdio.m_device) {
pr_err("%s: Unable to create ctrl driver\n",
CSDIO_DEV_NAME);
goto destroy_class;
}
g_sdio_buffer = kmalloc(CSDIO_SDIO_BUFFER_SIZE, GFP_KERNEL);
if (!g_sdio_buffer) {
pr_err("Unable to allocate %d bytes\n", CSDIO_SDIO_BUFFER_SIZE);
ret = -ENOMEM;
goto destroy_cdev;
}
ret = sysfs_create_group(&g_csdio.m_device->kobj, &dev_attr_grp);
if (ret) {
pr_err("%s: Unable to create device attribute\n",
CSDIO_DEV_NAME);
goto free_sdio_buff;
}
g_csdio.m_num_of_func = 0;
g_csdio.m_host = NULL;
if (NULL != host_name) {
struct device *dev = bus_find_device_by_name(&platform_bus_type,
NULL, host_name);
if (NULL != dev) {
g_csdio.m_host = dev_get_drvdata(dev);
} else {
pr_err("%s: Host '%s' doesn't exist!\n", CSDIO_DEV_NAME,
host_name);
}
}
pr_info("%s: Match with VendorId=0x%X, DeviceId=0x%X, Host = %s\n",
CSDIO_DEV_NAME, csdio_device_id, csdio_vendor_id,
(NULL == host_name) ? "Any" : host_name);
/* register sdio driver */
ret = sdio_register_driver(&csdio_driver);
if (ret) {
pr_err("%s: Unable to register as SDIO driver\n",
CSDIO_DEV_NAME);
goto remove_group;
}
goto exit;
remove_group:
sysfs_remove_group(&g_csdio.m_device->kobj, &dev_attr_grp);
free_sdio_buff:
kfree(g_sdio_buffer);
destroy_cdev:
cdev_del(&g_csdio.m_cdev);
destroy_class:
class_destroy(g_csdio.m_driver_class);
unregister_region:
unregister_chrdev_region(devno, csdio_transport_nr_devs);
exit:
return ret;
}
module_param(csdio_vendor_id, uint, S_IRUGO);
module_param(csdio_device_id, uint, S_IRUGO);
module_param(host_name, charp, S_IRUGO);
module_init(csdio_init);
module_exit(csdio_exit);
MODULE_AUTHOR("The Linux Foundation");
MODULE_DESCRIPTION("CSDIO device driver version " VERSION);
MODULE_VERSION(VERSION);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
SM-G920P/TeamSPR_Kernel_OLD | drivers/mmc/host/sdhci-dove.c | 2087 | 5361 | /*
* sdhci-dove.c Support for SDHCI on Marvell's Dove SoC
*
* Author: Saeed Bishara <saeed@marvell.com>
* Mike Rapoport <mike@compulab.co.il>
* Based on sdhci-cns3xxx.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/io.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/of.h>
#include <linux/of_gpio.h>
#include "sdhci-pltfm.h"
struct sdhci_dove_priv {
struct clk *clk;
int gpio_cd;
};
static irqreturn_t sdhci_dove_carddetect_irq(int irq, void *data)
{
struct sdhci_host *host = data;
tasklet_schedule(&host->card_tasklet);
return IRQ_HANDLED;
}
static u16 sdhci_dove_readw(struct sdhci_host *host, int reg)
{
u16 ret;
switch (reg) {
case SDHCI_HOST_VERSION:
case SDHCI_SLOT_INT_STATUS:
/* those registers don't exist */
return 0;
default:
ret = readw(host->ioaddr + reg);
}
return ret;
}
static u32 sdhci_dove_readl(struct sdhci_host *host, int reg)
{
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_dove_priv *priv = pltfm_host->priv;
u32 ret;
ret = readl(host->ioaddr + reg);
switch (reg) {
case SDHCI_CAPABILITIES:
/* Mask the support for 3.0V */
ret &= ~SDHCI_CAN_VDD_300;
break;
case SDHCI_PRESENT_STATE:
if (gpio_is_valid(priv->gpio_cd)) {
if (gpio_get_value(priv->gpio_cd) == 0)
ret |= SDHCI_CARD_PRESENT;
else
ret &= ~SDHCI_CARD_PRESENT;
}
break;
}
return ret;
}
static const struct sdhci_ops sdhci_dove_ops = {
.read_w = sdhci_dove_readw,
.read_l = sdhci_dove_readl,
};
static const struct sdhci_pltfm_data sdhci_dove_pdata = {
.ops = &sdhci_dove_ops,
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL |
SDHCI_QUIRK_FORCE_DMA |
SDHCI_QUIRK_NO_HISPD_BIT,
};
static int sdhci_dove_probe(struct platform_device *pdev)
{
struct sdhci_host *host;
struct sdhci_pltfm_host *pltfm_host;
struct sdhci_dove_priv *priv;
int ret;
priv = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_dove_priv),
GFP_KERNEL);
if (!priv) {
dev_err(&pdev->dev, "unable to allocate private data");
return -ENOMEM;
}
priv->clk = devm_clk_get(&pdev->dev, NULL);
if (pdev->dev.of_node) {
priv->gpio_cd = of_get_named_gpio(pdev->dev.of_node,
"cd-gpios", 0);
} else {
priv->gpio_cd = -EINVAL;
}
if (gpio_is_valid(priv->gpio_cd)) {
ret = gpio_request(priv->gpio_cd, "sdhci-cd");
if (ret) {
dev_err(&pdev->dev, "card detect gpio request failed: %d\n",
ret);
return ret;
}
gpio_direction_input(priv->gpio_cd);
}
host = sdhci_pltfm_init(pdev, &sdhci_dove_pdata);
if (IS_ERR(host)) {
ret = PTR_ERR(host);
goto err_sdhci_pltfm_init;
}
pltfm_host = sdhci_priv(host);
pltfm_host->priv = priv;
if (!IS_ERR(priv->clk))
clk_prepare_enable(priv->clk);
sdhci_get_of_property(pdev);
ret = sdhci_add_host(host);
if (ret)
goto err_sdhci_add;
/*
* We must request the IRQ after sdhci_add_host(), as the tasklet only
* gets setup in sdhci_add_host() and we oops.
*/
if (gpio_is_valid(priv->gpio_cd)) {
ret = request_irq(gpio_to_irq(priv->gpio_cd),
sdhci_dove_carddetect_irq,
IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING,
mmc_hostname(host->mmc), host);
if (ret) {
dev_err(&pdev->dev, "card detect irq request failed: %d\n",
ret);
goto err_request_irq;
}
}
return 0;
err_request_irq:
sdhci_remove_host(host, 0);
err_sdhci_add:
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
sdhci_pltfm_free(pdev);
err_sdhci_pltfm_init:
if (gpio_is_valid(priv->gpio_cd))
gpio_free(priv->gpio_cd);
return ret;
}
static int sdhci_dove_remove(struct platform_device *pdev)
{
struct sdhci_host *host = platform_get_drvdata(pdev);
struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
struct sdhci_dove_priv *priv = pltfm_host->priv;
sdhci_pltfm_unregister(pdev);
if (gpio_is_valid(priv->gpio_cd)) {
free_irq(gpio_to_irq(priv->gpio_cd), host);
gpio_free(priv->gpio_cd);
}
if (!IS_ERR(priv->clk))
clk_disable_unprepare(priv->clk);
return 0;
}
static const struct of_device_id sdhci_dove_of_match_table[] = {
{ .compatible = "marvell,dove-sdhci", },
{}
};
MODULE_DEVICE_TABLE(of, sdhci_dove_of_match_table);
static struct platform_driver sdhci_dove_driver = {
.driver = {
.name = "sdhci-dove",
.owner = THIS_MODULE,
.pm = SDHCI_PLTFM_PMOPS,
.of_match_table = of_match_ptr(sdhci_dove_of_match_table),
},
.probe = sdhci_dove_probe,
.remove = sdhci_dove_remove,
};
module_platform_driver(sdhci_dove_driver);
MODULE_DESCRIPTION("SDHCI driver for Dove");
MODULE_AUTHOR("Saeed Bishara <saeed@marvell.com>, "
"Mike Rapoport <mike@compulab.co.il>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
OneEducation/kernel-rk310-kitkat-firefly | drivers/clk/tegra/clk-pll.c | 2087 | 38779 | /*
* Copyright (c) 2012, 2013, NVIDIA CORPORATION. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
* version 2, as published by the Free Software Foundation.
*
* This program is distributed in the hope it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/slab.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/err.h>
#include <linux/clk-provider.h>
#include <linux/clk.h>
#include "clk.h"
#define PLL_BASE_BYPASS BIT(31)
#define PLL_BASE_ENABLE BIT(30)
#define PLL_BASE_REF_ENABLE BIT(29)
#define PLL_BASE_OVERRIDE BIT(28)
#define PLL_BASE_DIVP_SHIFT 20
#define PLL_BASE_DIVP_WIDTH 3
#define PLL_BASE_DIVN_SHIFT 8
#define PLL_BASE_DIVN_WIDTH 10
#define PLL_BASE_DIVM_SHIFT 0
#define PLL_BASE_DIVM_WIDTH 5
#define PLLU_POST_DIVP_MASK 0x1
#define PLL_MISC_DCCON_SHIFT 20
#define PLL_MISC_CPCON_SHIFT 8
#define PLL_MISC_CPCON_WIDTH 4
#define PLL_MISC_CPCON_MASK ((1 << PLL_MISC_CPCON_WIDTH) - 1)
#define PLL_MISC_LFCON_SHIFT 4
#define PLL_MISC_LFCON_WIDTH 4
#define PLL_MISC_LFCON_MASK ((1 << PLL_MISC_LFCON_WIDTH) - 1)
#define PLL_MISC_VCOCON_SHIFT 0
#define PLL_MISC_VCOCON_WIDTH 4
#define PLL_MISC_VCOCON_MASK ((1 << PLL_MISC_VCOCON_WIDTH) - 1)
#define OUT_OF_TABLE_CPCON 8
#define PMC_PLLP_WB0_OVERRIDE 0xf8
#define PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE BIT(12)
#define PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE BIT(11)
#define PLL_POST_LOCK_DELAY 50
#define PLLDU_LFCON_SET_DIVN 600
#define PLLE_BASE_DIVCML_SHIFT 24
#define PLLE_BASE_DIVCML_WIDTH 4
#define PLLE_BASE_DIVP_SHIFT 16
#define PLLE_BASE_DIVP_WIDTH 7
#define PLLE_BASE_DIVN_SHIFT 8
#define PLLE_BASE_DIVN_WIDTH 8
#define PLLE_BASE_DIVM_SHIFT 0
#define PLLE_BASE_DIVM_WIDTH 8
#define PLLE_MISC_SETUP_BASE_SHIFT 16
#define PLLE_MISC_SETUP_BASE_MASK (0xffff << PLLE_MISC_SETUP_BASE_SHIFT)
#define PLLE_MISC_LOCK_ENABLE BIT(9)
#define PLLE_MISC_READY BIT(15)
#define PLLE_MISC_SETUP_EX_SHIFT 2
#define PLLE_MISC_SETUP_EX_MASK (3 << PLLE_MISC_SETUP_EX_SHIFT)
#define PLLE_MISC_SETUP_MASK (PLLE_MISC_SETUP_BASE_MASK | \
PLLE_MISC_SETUP_EX_MASK)
#define PLLE_MISC_SETUP_VALUE (7 << PLLE_MISC_SETUP_BASE_SHIFT)
#define PLLE_SS_CTRL 0x68
#define PLLE_SS_DISABLE (7 << 10)
#define PLLE_AUX_PLLP_SEL BIT(2)
#define PLLE_AUX_ENABLE_SWCTL BIT(4)
#define PLLE_AUX_SEQ_ENABLE BIT(24)
#define PLLE_AUX_PLLRE_SEL BIT(28)
#define PLLE_MISC_PLLE_PTS BIT(8)
#define PLLE_MISC_IDDQ_SW_VALUE BIT(13)
#define PLLE_MISC_IDDQ_SW_CTRL BIT(14)
#define PLLE_MISC_VREG_BG_CTRL_SHIFT 4
#define PLLE_MISC_VREG_BG_CTRL_MASK (3 << PLLE_MISC_VREG_BG_CTRL_SHIFT)
#define PLLE_MISC_VREG_CTRL_SHIFT 2
#define PLLE_MISC_VREG_CTRL_MASK (2 << PLLE_MISC_VREG_CTRL_SHIFT)
#define PLLCX_MISC_STROBE BIT(31)
#define PLLCX_MISC_RESET BIT(30)
#define PLLCX_MISC_SDM_DIV_SHIFT 28
#define PLLCX_MISC_SDM_DIV_MASK (0x3 << PLLCX_MISC_SDM_DIV_SHIFT)
#define PLLCX_MISC_FILT_DIV_SHIFT 26
#define PLLCX_MISC_FILT_DIV_MASK (0x3 << PLLCX_MISC_FILT_DIV_SHIFT)
#define PLLCX_MISC_ALPHA_SHIFT 18
#define PLLCX_MISC_DIV_LOW_RANGE \
((0x1 << PLLCX_MISC_SDM_DIV_SHIFT) | \
(0x1 << PLLCX_MISC_FILT_DIV_SHIFT))
#define PLLCX_MISC_DIV_HIGH_RANGE \
((0x2 << PLLCX_MISC_SDM_DIV_SHIFT) | \
(0x2 << PLLCX_MISC_FILT_DIV_SHIFT))
#define PLLCX_MISC_COEF_LOW_RANGE \
((0x14 << PLLCX_MISC_KA_SHIFT) | (0x38 << PLLCX_MISC_KB_SHIFT))
#define PLLCX_MISC_KA_SHIFT 2
#define PLLCX_MISC_KB_SHIFT 9
#define PLLCX_MISC_DEFAULT (PLLCX_MISC_COEF_LOW_RANGE | \
(0x19 << PLLCX_MISC_ALPHA_SHIFT) | \
PLLCX_MISC_DIV_LOW_RANGE | \
PLLCX_MISC_RESET)
#define PLLCX_MISC1_DEFAULT 0x000d2308
#define PLLCX_MISC2_DEFAULT 0x30211200
#define PLLCX_MISC3_DEFAULT 0x200
#define PMC_PLLM_WB0_OVERRIDE 0x1dc
#define PMC_PLLM_WB0_OVERRIDE_2 0x2b0
#define PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK BIT(27)
#define PMC_SATA_PWRGT 0x1ac
#define PMC_SATA_PWRGT_PLLE_IDDQ_VALUE BIT(5)
#define PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL BIT(4)
#define pll_readl(offset, p) readl_relaxed(p->clk_base + offset)
#define pll_readl_base(p) pll_readl(p->params->base_reg, p)
#define pll_readl_misc(p) pll_readl(p->params->misc_reg, p)
#define pll_writel(val, offset, p) writel_relaxed(val, p->clk_base + offset)
#define pll_writel_base(val, p) pll_writel(val, p->params->base_reg, p)
#define pll_writel_misc(val, p) pll_writel(val, p->params->misc_reg, p)
#define mask(w) ((1 << (w)) - 1)
#define divm_mask(p) mask(p->divm_width)
#define divn_mask(p) mask(p->divn_width)
#define divp_mask(p) (p->flags & TEGRA_PLLU ? PLLU_POST_DIVP_MASK : \
mask(p->divp_width))
#define divm_max(p) (divm_mask(p))
#define divn_max(p) (divn_mask(p))
#define divp_max(p) (1 << (divp_mask(p)))
#ifdef CONFIG_ARCH_TEGRA_114_SOC
/* PLLXC has 4-bit PDIV, but entry 15 is not allowed in h/w */
#define PLLXC_PDIV_MAX 14
/* non-monotonic mapping below is not a typo */
static u8 pllxc_p[PLLXC_PDIV_MAX + 1] = {
/* PDIV: 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14 */
/* p: */ 1, 2, 3, 4, 5, 6, 8, 10, 12, 16, 12, 16, 20, 24, 32
};
#define PLLCX_PDIV_MAX 7
static u8 pllcx_p[PLLCX_PDIV_MAX + 1] = {
/* PDIV: 0, 1, 2, 3, 4, 5, 6, 7 */
/* p: */ 1, 2, 3, 4, 6, 8, 12, 16
};
#endif
static void clk_pll_enable_lock(struct tegra_clk_pll *pll)
{
u32 val;
if (!(pll->flags & TEGRA_PLL_USE_LOCK))
return;
if (!(pll->flags & TEGRA_PLL_HAS_LOCK_ENABLE))
return;
val = pll_readl_misc(pll);
val |= BIT(pll->params->lock_enable_bit_idx);
pll_writel_misc(val, pll);
}
static int clk_pll_wait_for_lock(struct tegra_clk_pll *pll)
{
int i;
u32 val, lock_mask;
void __iomem *lock_addr;
if (!(pll->flags & TEGRA_PLL_USE_LOCK)) {
udelay(pll->params->lock_delay);
return 0;
}
lock_addr = pll->clk_base;
if (pll->flags & TEGRA_PLL_LOCK_MISC)
lock_addr += pll->params->misc_reg;
else
lock_addr += pll->params->base_reg;
lock_mask = pll->params->lock_mask;
for (i = 0; i < pll->params->lock_delay; i++) {
val = readl_relaxed(lock_addr);
if ((val & lock_mask) == lock_mask) {
udelay(PLL_POST_LOCK_DELAY);
return 0;
}
udelay(2); /* timeout = 2 * lock time */
}
pr_err("%s: Timed out waiting for pll %s lock\n", __func__,
__clk_get_name(pll->hw.clk));
return -1;
}
static int clk_pll_is_enabled(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
if (pll->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE)
return val & PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE ? 1 : 0;
}
val = pll_readl_base(pll);
return val & PLL_BASE_ENABLE ? 1 : 0;
}
static void _clk_pll_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
clk_pll_enable_lock(pll);
val = pll_readl_base(pll);
if (pll->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val |= PLL_BASE_ENABLE;
pll_writel_base(val, pll);
if (pll->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val |= PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
}
}
static void _clk_pll_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
val = pll_readl_base(pll);
if (pll->flags & TEGRA_PLL_BYPASS)
val &= ~PLL_BASE_BYPASS;
val &= ~PLL_BASE_ENABLE;
pll_writel_base(val, pll);
if (pll->flags & TEGRA_PLLM) {
val = readl_relaxed(pll->pmc + PMC_PLLP_WB0_OVERRIDE);
val &= ~PMC_PLLP_WB0_OVERRIDE_PLLM_ENABLE;
writel_relaxed(val, pll->pmc + PMC_PLLP_WB0_OVERRIDE);
}
}
static int clk_pll_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
int ret;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static void clk_pll_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_disable(hw);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
static int _get_table_rate(struct clk_hw *hw,
struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table *sel;
for (sel = pll->freq_table; sel->input_rate != 0; sel++)
if (sel->input_rate == parent_rate &&
sel->output_rate == rate)
break;
if (sel->input_rate == 0)
return -EINVAL;
cfg->input_rate = sel->input_rate;
cfg->output_rate = sel->output_rate;
cfg->m = sel->m;
cfg->n = sel->n;
cfg->p = sel->p;
cfg->cpcon = sel->cpcon;
return 0;
}
static int _calc_rate(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct pdiv_map *p_tohw = pll->params->pdiv_tohw;
unsigned long cfreq;
u32 p_div = 0;
switch (parent_rate) {
case 12000000:
case 26000000:
cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2000000;
break;
case 13000000:
cfreq = (rate <= 1000000 * 1000) ? 1000000 : 2600000;
break;
case 16800000:
case 19200000:
cfreq = (rate <= 1200000 * 1000) ? 1200000 : 2400000;
break;
case 9600000:
case 28800000:
/*
* PLL_P_OUT1 rate is not listed in PLLA table
*/
cfreq = parent_rate/(parent_rate/1000000);
break;
default:
pr_err("%s Unexpected reference rate %lu\n",
__func__, parent_rate);
BUG();
}
/* Raise VCO to guarantee 0.5% accuracy */
for (cfg->output_rate = rate; cfg->output_rate < 200 * cfreq;
cfg->output_rate <<= 1)
p_div++;
cfg->m = parent_rate / cfreq;
cfg->n = cfg->output_rate / cfreq;
cfg->cpcon = OUT_OF_TABLE_CPCON;
if (cfg->m > divm_max(pll) || cfg->n > divn_max(pll) ||
(1 << p_div) > divp_max(pll)
|| cfg->output_rate > pll->params->vco_max) {
pr_err("%s: Failed to set %s rate %lu\n",
__func__, __clk_get_name(hw->clk), rate);
return -EINVAL;
}
if (p_tohw) {
p_div = 1 << p_div;
while (p_tohw->pdiv) {
if (p_div <= p_tohw->pdiv) {
cfg->p = p_tohw->hw_val;
break;
}
p_tohw++;
}
if (!p_tohw->pdiv)
return -EINVAL;
} else
cfg->p = p_div;
return 0;
}
static void _update_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg)
{
u32 val;
val = pll_readl_base(pll);
val &= ~((divm_mask(pll) << pll->divm_shift) |
(divn_mask(pll) << pll->divn_shift) |
(divp_mask(pll) << pll->divp_shift));
val |= ((cfg->m << pll->divm_shift) |
(cfg->n << pll->divn_shift) |
(cfg->p << pll->divp_shift));
pll_writel_base(val, pll);
}
static void _get_pll_mnp(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg)
{
u32 val;
val = pll_readl_base(pll);
cfg->m = (val >> pll->divm_shift) & (divm_mask(pll));
cfg->n = (val >> pll->divn_shift) & (divn_mask(pll));
cfg->p = (val >> pll->divp_shift) & (divp_mask(pll));
}
static void _update_pll_cpcon(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg,
unsigned long rate)
{
u32 val;
val = pll_readl_misc(pll);
val &= ~(PLL_MISC_CPCON_MASK << PLL_MISC_CPCON_SHIFT);
val |= cfg->cpcon << PLL_MISC_CPCON_SHIFT;
if (pll->flags & TEGRA_PLL_SET_LFCON) {
val &= ~(PLL_MISC_LFCON_MASK << PLL_MISC_LFCON_SHIFT);
if (cfg->n >= PLLDU_LFCON_SET_DIVN)
val |= 1 << PLL_MISC_LFCON_SHIFT;
} else if (pll->flags & TEGRA_PLL_SET_DCCON) {
val &= ~(1 << PLL_MISC_DCCON_SHIFT);
if (rate >= (pll->params->vco_max >> 1))
val |= 1 << PLL_MISC_DCCON_SHIFT;
}
pll_writel_misc(val, pll);
}
static int _program_pll(struct clk_hw *hw, struct tegra_clk_pll_freq_table *cfg,
unsigned long rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
int state, ret = 0;
state = clk_pll_is_enabled(hw);
if (state)
_clk_pll_disable(hw);
_update_pll_mnp(pll, cfg);
if (pll->flags & TEGRA_PLL_HAS_CPCON)
_update_pll_cpcon(pll, cfg, rate);
if (state) {
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
}
return ret;
}
static int clk_pll_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg, old_cfg;
unsigned long flags = 0;
int ret = 0;
if (pll->flags & TEGRA_PLL_FIXED) {
if (rate != pll->fixed_rate) {
pr_err("%s: Can not change %s fixed rate %lu to %lu\n",
__func__, __clk_get_name(hw->clk),
pll->fixed_rate, rate);
return -EINVAL;
}
return 0;
}
if (_get_table_rate(hw, &cfg, rate, parent_rate) &&
_calc_rate(hw, &cfg, rate, parent_rate))
return -EINVAL;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_get_pll_mnp(pll, &old_cfg);
if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_cfg.p != cfg.p)
ret = _program_pll(hw, &cfg, rate);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static long clk_pll_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
u64 output_rate = *prate;
if (pll->flags & TEGRA_PLL_FIXED)
return pll->fixed_rate;
/* PLLM is used for memory; we do not change rate */
if (pll->flags & TEGRA_PLLM)
return __clk_get_rate(hw->clk);
if (_get_table_rate(hw, &cfg, rate, *prate) &&
_calc_rate(hw, &cfg, rate, *prate))
return -EINVAL;
output_rate *= cfg.n;
do_div(output_rate, cfg.m * (1 << cfg.p));
return output_rate;
}
static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg;
struct pdiv_map *p_tohw = pll->params->pdiv_tohw;
u32 val;
u64 rate = parent_rate;
int pdiv;
val = pll_readl_base(pll);
if ((pll->flags & TEGRA_PLL_BYPASS) && (val & PLL_BASE_BYPASS))
return parent_rate;
if ((pll->flags & TEGRA_PLL_FIXED) && !(val & PLL_BASE_OVERRIDE)) {
struct tegra_clk_pll_freq_table sel;
if (_get_table_rate(hw, &sel, pll->fixed_rate, parent_rate)) {
pr_err("Clock %s has unknown fixed frequency\n",
__clk_get_name(hw->clk));
BUG();
}
return pll->fixed_rate;
}
_get_pll_mnp(pll, &cfg);
if (p_tohw) {
while (p_tohw->pdiv) {
if (cfg.p == p_tohw->hw_val) {
pdiv = p_tohw->pdiv;
break;
}
p_tohw++;
}
if (!p_tohw->pdiv) {
WARN_ON(1);
pdiv = 1;
}
} else
pdiv = 1 << cfg.p;
cfg.m *= pdiv;
rate *= cfg.n;
do_div(rate, cfg.m);
return rate;
}
static int clk_plle_training(struct tegra_clk_pll *pll)
{
u32 val;
unsigned long timeout;
if (!pll->pmc)
return -ENOSYS;
/*
* PLLE is already disabled, and setup cleared;
* create falling edge on PLLE IDDQ input.
*/
val = readl(pll->pmc + PMC_SATA_PWRGT);
val |= PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
writel(val, pll->pmc + PMC_SATA_PWRGT);
val = readl(pll->pmc + PMC_SATA_PWRGT);
val |= PMC_SATA_PWRGT_PLLE_IDDQ_SWCTL;
writel(val, pll->pmc + PMC_SATA_PWRGT);
val = readl(pll->pmc + PMC_SATA_PWRGT);
val &= ~PMC_SATA_PWRGT_PLLE_IDDQ_VALUE;
writel(val, pll->pmc + PMC_SATA_PWRGT);
val = pll_readl_misc(pll);
timeout = jiffies + msecs_to_jiffies(100);
while (1) {
val = pll_readl_misc(pll);
if (val & PLLE_MISC_READY)
break;
if (time_after(jiffies, timeout)) {
pr_err("%s: timeout waiting for PLLE\n", __func__);
return -EBUSY;
}
udelay(300);
}
return 0;
}
static int clk_plle_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
struct tegra_clk_pll_freq_table sel;
u32 val;
int err;
if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
return -EINVAL;
clk_pll_disable(hw);
val = pll_readl_misc(pll);
val &= ~(PLLE_MISC_LOCK_ENABLE | PLLE_MISC_SETUP_MASK);
pll_writel_misc(val, pll);
val = pll_readl_misc(pll);
if (!(val & PLLE_MISC_READY)) {
err = clk_plle_training(pll);
if (err)
return err;
}
if (pll->flags & TEGRA_PLLE_CONFIGURE) {
/* configure dividers */
val = pll_readl_base(pll);
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
val |= sel.m << pll->divm_shift;
val |= sel.n << pll->divn_shift;
val |= sel.p << pll->divp_shift;
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
}
val = pll_readl_misc(pll);
val |= PLLE_MISC_SETUP_VALUE;
val |= PLLE_MISC_LOCK_ENABLE;
pll_writel_misc(val, pll);
val = readl(pll->clk_base + PLLE_SS_CTRL);
val |= PLLE_SS_DISABLE;
writel(val, pll->clk_base + PLLE_SS_CTRL);
val |= pll_readl_base(pll);
val |= (PLL_BASE_BYPASS | PLL_BASE_ENABLE);
pll_writel_base(val, pll);
clk_pll_wait_for_lock(pll);
return 0;
}
static unsigned long clk_plle_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val = pll_readl_base(pll);
u32 divn = 0, divm = 0, divp = 0;
u64 rate = parent_rate;
divp = (val >> pll->divp_shift) & (divp_mask(pll));
divn = (val >> pll->divn_shift) & (divn_mask(pll));
divm = (val >> pll->divm_shift) & (divm_mask(pll));
divm *= divp;
rate *= divn;
do_div(rate, divm);
return rate;
}
const struct clk_ops tegra_clk_pll_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_enable,
.disable = clk_pll_disable,
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_round_rate,
.set_rate = clk_pll_set_rate,
};
const struct clk_ops tegra_clk_plle_ops = {
.recalc_rate = clk_plle_recalc_rate,
.is_enabled = clk_pll_is_enabled,
.disable = clk_pll_disable,
.enable = clk_plle_enable,
};
#ifdef CONFIG_ARCH_TEGRA_114_SOC
static int _pll_fixed_mdiv(struct tegra_clk_pll_params *pll_params,
unsigned long parent_rate)
{
if (parent_rate > pll_params->cf_max)
return 2;
else
return 1;
}
static int clk_pll_iddq_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
u32 val;
int ret;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
val = pll_readl(pll->params->iddq_reg, pll);
val &= ~BIT(pll->params->iddq_bit_idx);
pll_writel(val, pll->params->iddq_reg, pll);
udelay(2);
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return 0;
}
static void clk_pll_iddq_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
u32 val;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_disable(hw);
val = pll_readl(pll->params->iddq_reg, pll);
val |= BIT(pll->params->iddq_bit_idx);
pll_writel(val, pll->params->iddq_reg, pll);
udelay(2);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
static int _calc_dynamic_ramp_rate(struct clk_hw *hw,
struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned int p;
if (!rate)
return -EINVAL;
p = DIV_ROUND_UP(pll->params->vco_min, rate);
cfg->m = _pll_fixed_mdiv(pll->params, parent_rate);
cfg->p = p;
cfg->output_rate = rate * cfg->p;
cfg->n = cfg->output_rate * cfg->m / parent_rate;
if (cfg->n > divn_max(pll) || cfg->output_rate > pll->params->vco_max)
return -EINVAL;
return 0;
}
static int _pll_ramp_calc_pll(struct clk_hw *hw,
struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
int err = 0;
err = _get_table_rate(hw, cfg, rate, parent_rate);
if (err < 0)
err = _calc_dynamic_ramp_rate(hw, cfg, rate, parent_rate);
else if (cfg->m != _pll_fixed_mdiv(pll->params, parent_rate)) {
WARN_ON(1);
err = -EINVAL;
goto out;
}
if (!cfg->p || (cfg->p > pll->params->max_p))
err = -EINVAL;
out:
return err;
}
static int clk_pllxc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table cfg, old_cfg;
unsigned long flags = 0;
int ret = 0;
u8 old_p;
ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
if (ret < 0)
return ret;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_get_pll_mnp(pll, &old_cfg);
old_p = pllxc_p[old_cfg.p];
if (old_cfg.m != cfg.m || old_cfg.n != cfg.n || old_p != cfg.p) {
cfg.p -= 1;
ret = _program_pll(hw, &cfg, rate);
}
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static long clk_pll_ramp_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_pll_freq_table cfg;
int ret = 0;
u64 output_rate = *prate;
ret = _pll_ramp_calc_pll(hw, &cfg, rate, *prate);
if (ret < 0)
return ret;
output_rate *= cfg.n;
do_div(output_rate, cfg.m * cfg.p);
return output_rate;
}
static int clk_pllm_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll_freq_table cfg;
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
int state, ret = 0;
u32 val;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
state = clk_pll_is_enabled(hw);
if (state) {
if (rate != clk_get_rate(hw->clk)) {
pr_err("%s: Cannot change active PLLM\n", __func__);
ret = -EINVAL;
goto out;
}
goto out;
}
ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
if (ret < 0)
goto out;
cfg.p -= 1;
val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE);
if (val & PMC_PLLP_WB0_OVERRIDE_PLLM_OVERRIDE) {
val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE_2);
val = cfg.p ? (val | PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK) :
(val & ~PMC_PLLM_WB0_OVERRIDE_2_DIVP_MASK);
writel_relaxed(val, pll->pmc + PMC_PLLM_WB0_OVERRIDE_2);
val = readl_relaxed(pll->pmc + PMC_PLLM_WB0_OVERRIDE);
val &= ~(divn_mask(pll) | divm_mask(pll));
val |= (cfg.m << pll->divm_shift) | (cfg.n << pll->divn_shift);
writel_relaxed(val, pll->pmc + PMC_PLLM_WB0_OVERRIDE);
} else
_update_pll_mnp(pll, &cfg);
out:
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static void _pllcx_strobe(struct tegra_clk_pll *pll)
{
u32 val;
val = pll_readl_misc(pll);
val |= PLLCX_MISC_STROBE;
pll_writel_misc(val, pll);
udelay(2);
val &= ~PLLCX_MISC_STROBE;
pll_writel_misc(val, pll);
}
static int clk_pllc_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
int ret = 0;
unsigned long flags = 0;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_enable(hw);
udelay(2);
val = pll_readl_misc(pll);
val &= ~PLLCX_MISC_RESET;
pll_writel_misc(val, pll);
udelay(2);
_pllcx_strobe(pll);
ret = clk_pll_wait_for_lock(pll);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static void _clk_pllc_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
u32 val;
_clk_pll_disable(hw);
val = pll_readl_misc(pll);
val |= PLLCX_MISC_RESET;
pll_writel_misc(val, pll);
udelay(2);
}
static void clk_pllc_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pllc_disable(hw);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
static int _pllcx_update_dynamic_coef(struct tegra_clk_pll *pll,
unsigned long input_rate, u32 n)
{
u32 val, n_threshold;
switch (input_rate) {
case 12000000:
n_threshold = 70;
break;
case 13000000:
case 26000000:
n_threshold = 71;
break;
case 16800000:
n_threshold = 55;
break;
case 19200000:
n_threshold = 48;
break;
default:
pr_err("%s: Unexpected reference rate %lu\n",
__func__, input_rate);
return -EINVAL;
}
val = pll_readl_misc(pll);
val &= ~(PLLCX_MISC_SDM_DIV_MASK | PLLCX_MISC_FILT_DIV_MASK);
val |= n <= n_threshold ?
PLLCX_MISC_DIV_LOW_RANGE : PLLCX_MISC_DIV_HIGH_RANGE;
pll_writel_misc(val, pll);
return 0;
}
static int clk_pllc_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll_freq_table cfg;
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
int state, ret = 0;
u32 val;
u16 old_m, old_n;
u8 old_p;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
ret = _pll_ramp_calc_pll(hw, &cfg, rate, parent_rate);
if (ret < 0)
goto out;
val = pll_readl_base(pll);
old_m = (val >> pll->divm_shift) & (divm_mask(pll));
old_n = (val >> pll->divn_shift) & (divn_mask(pll));
old_p = pllcx_p[(val >> pll->divp_shift) & (divp_mask(pll))];
if (cfg.m != old_m) {
WARN_ON(1);
goto out;
}
if (old_n == cfg.n && old_p == cfg.p)
goto out;
cfg.p -= 1;
state = clk_pll_is_enabled(hw);
if (state)
_clk_pllc_disable(hw);
ret = _pllcx_update_dynamic_coef(pll, parent_rate, cfg.n);
if (ret < 0)
goto out;
_update_pll_mnp(pll, &cfg);
if (state)
ret = clk_pllc_enable(hw);
out:
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static long _pllre_calc_rate(struct tegra_clk_pll *pll,
struct tegra_clk_pll_freq_table *cfg,
unsigned long rate, unsigned long parent_rate)
{
u16 m, n;
u64 output_rate = parent_rate;
m = _pll_fixed_mdiv(pll->params, parent_rate);
n = rate * m / parent_rate;
output_rate *= n;
do_div(output_rate, m);
if (cfg) {
cfg->m = m;
cfg->n = n;
}
return output_rate;
}
static int clk_pllre_set_rate(struct clk_hw *hw, unsigned long rate,
unsigned long parent_rate)
{
struct tegra_clk_pll_freq_table cfg, old_cfg;
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
int state, ret = 0;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_pllre_calc_rate(pll, &cfg, rate, parent_rate);
_get_pll_mnp(pll, &old_cfg);
cfg.p = old_cfg.p;
if (cfg.m != old_cfg.m || cfg.n != old_cfg.n) {
state = clk_pll_is_enabled(hw);
if (state)
_clk_pll_disable(hw);
_update_pll_mnp(pll, &cfg);
if (state) {
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
}
}
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static unsigned long clk_pllre_recalc_rate(struct clk_hw *hw,
unsigned long parent_rate)
{
struct tegra_clk_pll_freq_table cfg;
struct tegra_clk_pll *pll = to_clk_pll(hw);
u64 rate = parent_rate;
_get_pll_mnp(pll, &cfg);
rate *= cfg.n;
do_div(rate, cfg.m);
return rate;
}
static long clk_pllre_round_rate(struct clk_hw *hw, unsigned long rate,
unsigned long *prate)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
return _pllre_calc_rate(pll, NULL, rate, *prate);
}
static int clk_plle_tegra114_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table sel;
u32 val;
int ret;
unsigned long flags = 0;
unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
if (_get_table_rate(hw, &sel, pll->fixed_rate, input_rate))
return -EINVAL;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
val = pll_readl_base(pll);
val &= ~BIT(29); /* Disable lock override */
pll_writel_base(val, pll);
val = pll_readl(pll->params->aux_reg, pll);
val |= PLLE_AUX_ENABLE_SWCTL;
val &= ~PLLE_AUX_SEQ_ENABLE;
pll_writel(val, pll->params->aux_reg, pll);
udelay(1);
val = pll_readl_misc(pll);
val |= PLLE_MISC_LOCK_ENABLE;
val |= PLLE_MISC_IDDQ_SW_CTRL;
val &= ~PLLE_MISC_IDDQ_SW_VALUE;
val |= PLLE_MISC_PLLE_PTS;
val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
pll_writel_misc(val, pll);
udelay(5);
val = pll_readl(PLLE_SS_CTRL, pll);
val |= PLLE_SS_DISABLE;
pll_writel(val, PLLE_SS_CTRL, pll);
val = pll_readl_base(pll);
val &= ~(divm_mask(pll) | divn_mask(pll) | divp_mask(pll));
val &= ~(PLLE_BASE_DIVCML_WIDTH << PLLE_BASE_DIVCML_SHIFT);
val |= sel.m << pll->divm_shift;
val |= sel.n << pll->divn_shift;
val |= sel.cpcon << PLLE_BASE_DIVCML_SHIFT;
pll_writel_base(val, pll);
udelay(1);
_clk_pll_enable(hw);
ret = clk_pll_wait_for_lock(pll);
if (ret < 0)
goto out;
/* TODO: enable hw control of xusb brick pll */
out:
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
return ret;
}
static void clk_plle_tegra114_disable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
unsigned long flags = 0;
u32 val;
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
_clk_pll_disable(hw);
val = pll_readl_misc(pll);
val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
pll_writel_misc(val, pll);
udelay(1);
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
#endif
static struct tegra_clk_pll *_tegra_init_pll(void __iomem *clk_base,
void __iomem *pmc, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
struct tegra_clk_pll *pll;
pll = kzalloc(sizeof(*pll), GFP_KERNEL);
if (!pll)
return ERR_PTR(-ENOMEM);
pll->clk_base = clk_base;
pll->pmc = pmc;
pll->freq_table = freq_table;
pll->params = pll_params;
pll->fixed_rate = fixed_rate;
pll->flags = pll_flags;
pll->lock = lock;
pll->divp_shift = PLL_BASE_DIVP_SHIFT;
pll->divp_width = PLL_BASE_DIVP_WIDTH;
pll->divn_shift = PLL_BASE_DIVN_SHIFT;
pll->divn_width = PLL_BASE_DIVN_WIDTH;
pll->divm_shift = PLL_BASE_DIVM_SHIFT;
pll->divm_width = PLL_BASE_DIVM_WIDTH;
return pll;
}
static struct clk *_tegra_clk_register_pll(struct tegra_clk_pll *pll,
const char *name, const char *parent_name, unsigned long flags,
const struct clk_ops *ops)
{
struct clk_init_data init;
init.name = name;
init.ops = ops;
init.flags = flags;
init.parent_names = (parent_name ? &parent_name : NULL);
init.num_parents = (parent_name ? 1 : 0);
/* Data in .init is copied by clk_register(), so stack variable OK */
pll->hw.init = &init;
return clk_register(NULL, &pll->hw);
}
struct clk *tegra_clk_register_pll(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
pll_flags |= TEGRA_PLL_BYPASS;
pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pll_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
struct clk *tegra_clk_register_plle(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params, u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table, spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
pll_flags |= TEGRA_PLL_LOCK_MISC | TEGRA_PLL_BYPASS;
pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
#ifdef CONFIG_ARCH_TEGRA_114_SOC
const struct clk_ops tegra_clk_pllxc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_ramp_round_rate,
.set_rate = clk_pllxc_set_rate,
};
const struct clk_ops tegra_clk_pllm_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_ramp_round_rate,
.set_rate = clk_pllm_set_rate,
};
const struct clk_ops tegra_clk_pllc_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pllc_enable,
.disable = clk_pllc_disable,
.recalc_rate = clk_pll_recalc_rate,
.round_rate = clk_pll_ramp_round_rate,
.set_rate = clk_pllc_set_rate,
};
const struct clk_ops tegra_clk_pllre_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_pll_iddq_enable,
.disable = clk_pll_iddq_disable,
.recalc_rate = clk_pllre_recalc_rate,
.round_rate = clk_pllre_round_rate,
.set_rate = clk_pllre_set_rate,
};
const struct clk_ops tegra_clk_plle_tegra114_ops = {
.is_enabled = clk_pll_is_enabled,
.enable = clk_plle_tegra114_enable,
.disable = clk_plle_tegra114_disable,
.recalc_rate = clk_pll_recalc_rate,
};
struct clk *tegra_clk_register_pllxc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pllxc_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
struct clk *tegra_clk_register_pllre(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock, unsigned long parent_rate)
{
u32 val;
struct tegra_clk_pll *pll;
struct clk *clk;
pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
/* program minimum rate by default */
val = pll_readl_base(pll);
if (val & PLL_BASE_ENABLE)
WARN_ON(val & pll_params->iddq_bit_idx);
else {
int m;
m = _pll_fixed_mdiv(pll_params, parent_rate);
val = m << PLL_BASE_DIVM_SHIFT;
val |= (pll_params->vco_min / parent_rate)
<< PLL_BASE_DIVN_SHIFT;
pll_writel_base(val, pll);
}
/* disable lock override */
val = pll_readl_misc(pll);
val &= ~BIT(29);
pll_writel_misc(val, pll);
pll_flags |= TEGRA_PLL_LOCK_MISC;
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pllre_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
struct clk *tegra_clk_register_pllm(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
if (!pll_params->pdiv_tohw)
return ERR_PTR(-EINVAL);
pll_flags |= TEGRA_PLL_BYPASS;
pll_flags |= TEGRA_PLL_HAS_LOCK_ENABLE;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pllm_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
struct clk *tegra_clk_register_pllc(const char *name, const char *parent_name,
void __iomem *clk_base, void __iomem *pmc,
unsigned long flags, unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
u32 pll_flags,
struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct clk *parent, *clk;
struct pdiv_map *p_tohw = pll_params->pdiv_tohw;
struct tegra_clk_pll *pll;
struct tegra_clk_pll_freq_table cfg;
unsigned long parent_rate;
if (!p_tohw)
return ERR_PTR(-EINVAL);
parent = __clk_lookup(parent_name);
if (IS_ERR(parent)) {
WARN(1, "parent clk %s of %s must be registered first\n",
name, parent_name);
return ERR_PTR(-EINVAL);
}
pll_flags |= TEGRA_PLL_BYPASS;
pll = _tegra_init_pll(clk_base, pmc, fixed_rate, pll_params, pll_flags,
freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
parent_rate = __clk_get_rate(parent);
/*
* Most of PLLC register fields are shadowed, and can not be read
* directly from PLL h/w. Hence, actual PLLC boot state is unknown.
* Initialize PLL to default state: disabled, reset; shadow registers
* loaded with default parameters; dividers are preset for half of
* minimum VCO rate (the latter assured that shadowed divider settings
* are within supported range).
*/
cfg.m = _pll_fixed_mdiv(pll_params, parent_rate);
cfg.n = cfg.m * pll_params->vco_min / parent_rate;
while (p_tohw->pdiv) {
if (p_tohw->pdiv == 2) {
cfg.p = p_tohw->hw_val;
break;
}
p_tohw++;
}
if (!p_tohw->pdiv) {
WARN_ON(1);
return ERR_PTR(-EINVAL);
}
pll_writel_base(0, pll);
_update_pll_mnp(pll, &cfg);
pll_writel_misc(PLLCX_MISC_DEFAULT, pll);
pll_writel(PLLCX_MISC1_DEFAULT, pll_params->ext_misc_reg[0], pll);
pll_writel(PLLCX_MISC2_DEFAULT, pll_params->ext_misc_reg[1], pll);
pll_writel(PLLCX_MISC3_DEFAULT, pll_params->ext_misc_reg[2], pll);
_pllcx_update_dynamic_coef(pll, parent_rate, cfg.n);
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_pllc_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
struct clk *tegra_clk_register_plle_tegra114(const char *name,
const char *parent_name,
void __iomem *clk_base, unsigned long flags,
unsigned long fixed_rate,
struct tegra_clk_pll_params *pll_params,
struct tegra_clk_pll_freq_table *freq_table,
spinlock_t *lock)
{
struct tegra_clk_pll *pll;
struct clk *clk;
u32 val, val_aux;
pll = _tegra_init_pll(clk_base, NULL, fixed_rate, pll_params,
TEGRA_PLL_HAS_LOCK_ENABLE, freq_table, lock);
if (IS_ERR(pll))
return ERR_CAST(pll);
/* ensure parent is set to pll_re_vco */
val = pll_readl_base(pll);
val_aux = pll_readl(pll_params->aux_reg, pll);
if (val & PLL_BASE_ENABLE) {
if (!(val_aux & PLLE_AUX_PLLRE_SEL))
WARN(1, "pll_e enabled with unsupported parent %s\n",
(val & PLLE_AUX_PLLP_SEL) ? "pllp_out0" : "pll_ref");
} else {
val_aux |= PLLE_AUX_PLLRE_SEL;
pll_writel(val, pll_params->aux_reg, pll);
}
clk = _tegra_clk_register_pll(pll, name, parent_name, flags,
&tegra_clk_plle_tegra114_ops);
if (IS_ERR(clk))
kfree(pll);
return clk;
}
#endif
| gpl-2.0 |
PixNDom/android_kernel_lenovo_Tab2A710F | drivers/staging/media/go7007/go7007-usb.c | 2087 | 36247 | /*
* Copyright (C) 2005-2006 Micronas USA Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License (Version 2) as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software Foundation,
* Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/wait.h>
#include <linux/list.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/mm.h>
#include <linux/usb.h>
#include <linux/i2c.h>
#include <asm/byteorder.h>
#include <media/saa7115.h>
#include <media/tuner.h>
#include <media/uda1342.h>
#include "go7007-priv.h"
static unsigned int assume_endura;
module_param(assume_endura, int, 0644);
MODULE_PARM_DESC(assume_endura, "when probing fails, "
"hardware is a Pelco Endura");
/* #define GO7007_USB_DEBUG */
/* #define GO7007_I2C_DEBUG */ /* for debugging the EZ-USB I2C adapter */
#define HPI_STATUS_ADDR 0xFFF4
#define INT_PARAM_ADDR 0xFFF6
#define INT_INDEX_ADDR 0xFFF8
/*
* Pipes on EZ-USB interface:
* 0 snd - Control
* 0 rcv - Control
* 2 snd - Download firmware (control)
* 4 rcv - Read Interrupt (interrupt)
* 6 rcv - Read Video (bulk)
* 8 rcv - Read Audio (bulk)
*/
#define GO7007_USB_EZUSB (1<<0)
#define GO7007_USB_EZUSB_I2C (1<<1)
struct go7007_usb_board {
unsigned int flags;
struct go7007_board_info main_info;
};
struct go7007_usb {
const struct go7007_usb_board *board;
struct mutex i2c_lock;
struct usb_device *usbdev;
struct urb *video_urbs[8];
struct urb *audio_urbs[8];
struct urb *intr_urb;
};
/*********************** Product specification data ***********************/
static const struct go7007_usb_board board_matrix_ii = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_SAA7115 |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "saa7115",
.addr = 0x20,
.is_video = 1,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 9,
.name = "S-Video",
},
},
.video_config = SAA7115_IDQ_IS_DEFAULT,
},
};
static const struct go7007_usb_board board_matrix_reload = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "saa7113",
.addr = 0x25,
.is_video = 1,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 9,
.name = "S-Video",
},
},
.video_config = SAA7115_IDQ_IS_DEFAULT,
},
};
static const struct go7007_usb_board board_star_trek = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO, /* |
GO7007_BOARD_HAS_TUNER, */
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_SAA7115 |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "saa7115",
.addr = 0x20,
.is_video = 1,
},
},
.num_inputs = 2,
.inputs = {
/* {
* .video_input = 3,
* .audio_index = AUDIO_TUNER,
* .name = "Tuner",
* },
*/
{
.video_input = 1,
/* .audio_index = AUDIO_EXTERN, */
.name = "Composite",
},
{
.video_input = 8,
/* .audio_index = AUDIO_EXTERN, */
.name = "S-Video",
},
},
.video_config = SAA7115_IDQ_IS_DEFAULT,
},
};
static const struct go7007_usb_board board_px_tv402u = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_HAS_TUNER,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_SAA7115 |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.num_i2c_devs = 5,
.i2c_devs = {
{
.type = "saa7115",
.addr = 0x20,
.is_video = 1,
},
{
.type = "uda1342",
.addr = 0x1a,
.is_audio = 1,
},
{
.type = "tuner",
.addr = 0x60,
},
{
.type = "tuner",
.addr = 0x43,
},
{
.type = "sony-btf-mpx",
.addr = 0x44,
},
},
.num_inputs = 3,
.inputs = {
{
.video_input = 3,
.audio_index = 0,
.name = "Tuner",
},
{
.video_input = 1,
.audio_index = 1,
.name = "Composite",
},
{
.video_input = 8,
.audio_index = 1,
.name = "S-Video",
},
},
.video_config = SAA7115_IDQ_IS_DEFAULT,
.num_aud_inputs = 2,
.aud_inputs = {
{
.audio_input = UDA1342_IN2,
.name = "Tuner",
},
{
.audio_input = UDA1342_IN1,
.name = "Line In",
},
},
},
};
static const struct go7007_usb_board board_xmen = {
.flags = 0,
.main_info = {
.flags = GO7007_BOARD_USE_ONBOARD_I2C,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_VREF_POLAR,
.sensor_width = 320,
.sensor_height = 240,
.sensor_framerate = 30030,
.audio_flags = GO7007_AUDIO_ONE_CHANNEL |
GO7007_AUDIO_I2S_MODE_3 |
GO7007_AUDIO_WORD_14 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_BCLK_POLAR |
GO7007_AUDIO_OKI_MODE,
.audio_rate = 8000,
.audio_bclk_div = 48,
.audio_main_div = 1,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "ov7640",
.addr = 0x21,
},
},
.num_inputs = 1,
.inputs = {
{
.name = "Camera",
},
},
},
};
static const struct go7007_usb_board board_matrix_revolution = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "tw9903",
.is_video = 1,
.addr = 0x44,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 2,
.name = "Composite",
},
{
.video_input = 8,
.name = "S-Video",
},
},
},
};
static const struct go7007_usb_board board_lifeview_lr192 = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_VALID_ENABLE |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI |
GO7007_SENSOR_SCALING,
.num_i2c_devs = 0,
.num_inputs = 1,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
},
},
};
static const struct go7007_usb_board board_endura = {
.flags = 0,
.main_info = {
.flags = 0,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 8000,
.audio_bclk_div = 48,
.audio_main_div = 8,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.sensor_h_offset = 8,
.num_i2c_devs = 0,
.num_inputs = 1,
.inputs = {
{
.name = "Camera",
},
},
},
};
static const struct go7007_usb_board board_adlink_mpg24 = {
.flags = 0,
.main_info = {
.flags = GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 0,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "tw2804",
.addr = 0x00, /* yes, really */
.flags = I2C_CLIENT_TEN,
.is_video = 1,
},
},
.num_inputs = 1,
.inputs = {
{
.name = "Composite",
},
},
},
};
static const struct go7007_usb_board board_sensoray_2250 = {
.flags = GO7007_USB_EZUSB | GO7007_USB_EZUSB_I2C,
.main_info = {
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.flags = GO7007_BOARD_HAS_AUDIO,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "s2250",
.addr = 0x43,
.is_video = 1,
.is_audio = 1,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 1,
.name = "S-Video",
},
},
.num_aud_inputs = 3,
.aud_inputs = {
{
.audio_input = 0,
.name = "Line In",
},
{
.audio_input = 1,
.name = "Mic",
},
{
.audio_input = 2,
.name = "Mic Boost",
},
},
},
};
static const struct go7007_usb_board board_ads_usbav_709 = {
.flags = GO7007_USB_EZUSB,
.main_info = {
.flags = GO7007_BOARD_HAS_AUDIO |
GO7007_BOARD_USE_ONBOARD_I2C,
.audio_flags = GO7007_AUDIO_I2S_MODE_1 |
GO7007_AUDIO_I2S_MASTER |
GO7007_AUDIO_WORD_16,
.audio_rate = 48000,
.audio_bclk_div = 8,
.audio_main_div = 2,
.hpi_buffer_cap = 7,
.sensor_flags = GO7007_SENSOR_656 |
GO7007_SENSOR_TV |
GO7007_SENSOR_VBI,
.num_i2c_devs = 1,
.i2c_devs = {
{
.type = "tw9906",
.is_video = 1,
.addr = 0x44,
},
},
.num_inputs = 2,
.inputs = {
{
.video_input = 0,
.name = "Composite",
},
{
.video_input = 10,
.name = "S-Video",
},
},
},
};
static const struct usb_device_id go7007_usb_id_table[] = {
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x200, /* Revision number of XMen */
.bcdDevice_hi = 0x200,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x202, /* Revision number of Matrix II */
.bcdDevice_hi = 0x202,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_II,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x204, /* Revision number of Matrix */
.bcdDevice_hi = 0x204, /* Reloaded */
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_RELOAD,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x205, /* Revision number of XMen-II */
.bcdDevice_hi = 0x205,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_II,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x208, /* Revision number of Star Trek */
.bcdDevice_hi = 0x208,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_STAR_TREK,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION |
USB_DEVICE_ID_MATCH_INT_INFO,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x209, /* Revision number of XMen-III */
.bcdDevice_hi = 0x209,
.bInterfaceClass = 255,
.bInterfaceSubClass = 0,
.bInterfaceProtocol = 255,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_XMEN_III,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x0eb1, /* Vendor ID of WIS Technologies */
.idProduct = 0x7007, /* Product ID of GO7007SB chip */
.bcdDevice_lo = 0x210, /* Revision number of Matrix */
.bcdDevice_hi = 0x210, /* Revolution */
.driver_info = (kernel_ulong_t)GO7007_BOARDID_MATRIX_REV,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x093b, /* Vendor ID of Plextor */
.idProduct = 0xa102, /* Product ID of M402U */
.bcdDevice_lo = 0x1, /* revision number of Blueberry */
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_M402U,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x093b, /* Vendor ID of Plextor */
.idProduct = 0xa104, /* Product ID of TV402U */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_PX_TV402U,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x10fd, /* Vendor ID of Anubis Electronics */
.idProduct = 0xde00, /* Product ID of Lifeview LR192 */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_LIFEVIEW_LR192,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x1943, /* Vendor ID Sensoray */
.idProduct = 0x2250, /* Product ID of 2250/2251 */
.bcdDevice_lo = 0x1,
.bcdDevice_hi = 0x1,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_SENSORAY_2250,
},
{
.match_flags = USB_DEVICE_ID_MATCH_DEVICE_AND_VERSION,
.idVendor = 0x06e1, /* Vendor ID of ADS Technologies */
.idProduct = 0x0709, /* Product ID of DVD Xpress DX2 */
.bcdDevice_lo = 0x204,
.bcdDevice_hi = 0x204,
.driver_info = (kernel_ulong_t)GO7007_BOARDID_ADS_USBAV_709,
},
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE(usb, go7007_usb_id_table);
/********************* Driver for EZ-USB HPI interface *********************/
static int go7007_usb_vendor_request(struct go7007 *go, int request,
int value, int index, void *transfer_buffer, int length, int in)
{
struct go7007_usb *usb = go->hpi_context;
int timeout = 5000;
if (in) {
return usb_control_msg(usb->usbdev,
usb_rcvctrlpipe(usb->usbdev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
value, index, transfer_buffer, length, timeout);
} else {
return usb_control_msg(usb->usbdev,
usb_sndctrlpipe(usb->usbdev, 0), request,
USB_TYPE_VENDOR | USB_RECIP_DEVICE,
value, index, transfer_buffer, length, timeout);
}
}
static int go7007_usb_interface_reset(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
u16 intr_val, intr_data;
if (go->status == STATUS_SHUTDOWN)
return -1;
/* Reset encoder */
if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0)
return -1;
msleep(100);
if (usb->board->flags & GO7007_USB_EZUSB) {
/* Reset buffer in EZ-USB */
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: resetting EZ-USB buffers\n");
#endif
if (go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0 ||
go7007_usb_vendor_request(go, 0x10, 0, 0, NULL, 0, 0) < 0)
return -1;
/* Reset encoder again */
if (go7007_write_interrupt(go, 0x0001, 0x0001) < 0)
return -1;
msleep(100);
}
/* Wait for an interrupt to indicate successful hardware reset */
if (go7007_read_interrupt(go, &intr_val, &intr_data) < 0 ||
(intr_val & ~0x1) != 0x55aa) {
printk(KERN_ERR
"go7007-usb: unable to reset the USB interface\n");
return -1;
}
return 0;
}
static int go7007_usb_ezusb_write_interrupt(struct go7007 *go,
int addr, int data)
{
struct go7007_usb *usb = go->hpi_context;
int i, r;
u16 status_reg = 0;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG
"go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
#endif
for (i = 0; i < 100; ++i) {
r = usb_control_msg(usb->usbdev,
usb_rcvctrlpipe(usb->usbdev, 0), 0x14,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0, HPI_STATUS_ADDR, go->usb_buf,
sizeof(status_reg), timeout);
if (r < 0)
break;
status_reg = le16_to_cpu(*((u16 *)go->usb_buf));
if (!(status_reg & 0x0010))
break;
msleep(10);
}
if (r < 0)
goto write_int_error;
if (i == 100) {
printk(KERN_ERR
"go7007-usb: device is hung, status reg = 0x%04x\n",
status_reg);
return -1;
}
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0), 0x12,
USB_TYPE_VENDOR | USB_RECIP_DEVICE, data,
INT_PARAM_ADDR, NULL, 0, timeout);
if (r < 0)
goto write_int_error;
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 0),
0x12, USB_TYPE_VENDOR | USB_RECIP_DEVICE, addr,
INT_INDEX_ADDR, NULL, 0, timeout);
if (r < 0)
goto write_int_error;
return 0;
write_int_error:
printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
return r;
}
static int go7007_usb_onboard_write_interrupt(struct go7007 *go,
int addr, int data)
{
struct go7007_usb *usb = go->hpi_context;
int r;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG
"go7007-usb: WriteInterrupt: %04x %04x\n", addr, data);
#endif
go->usb_buf[0] = data & 0xff;
go->usb_buf[1] = data >> 8;
go->usb_buf[2] = addr & 0xff;
go->usb_buf[3] = addr >> 8;
go->usb_buf[4] = go->usb_buf[5] = go->usb_buf[6] = go->usb_buf[7] = 0;
r = usb_control_msg(usb->usbdev, usb_sndctrlpipe(usb->usbdev, 2), 0x00,
USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0x55aa,
0xf0f0, go->usb_buf, 8, timeout);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error in WriteInterrupt: %d\n", r);
return r;
}
return 0;
}
static void go7007_usb_readinterrupt_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
u16 *regs = (u16 *)urb->transfer_buffer;
int status = urb->status;
if (status) {
if (status != -ESHUTDOWN &&
go->status != STATUS_SHUTDOWN) {
printk(KERN_ERR
"go7007-usb: error in read interrupt: %d\n",
urb->status);
} else {
wake_up(&go->interrupt_waitq);
return;
}
} else if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in interrupt pipe!\n");
} else {
go->interrupt_available = 1;
go->interrupt_data = __le16_to_cpu(regs[0]);
go->interrupt_value = __le16_to_cpu(regs[1]);
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: ReadInterrupt: %04x %04x\n",
go->interrupt_value, go->interrupt_data);
#endif
}
wake_up(&go->interrupt_waitq);
}
static int go7007_usb_read_interrupt(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int r;
r = usb_submit_urb(usb->intr_urb, GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR
"go7007-usb: unable to submit interrupt urb: %d\n", r);
return r;
}
return 0;
}
static void go7007_usb_read_video_pipe_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
int r, status = urb->status;
if (!vb2_is_streaming(&go->vidq)) {
wake_up_interruptible(&go->frame_waitq);
return;
}
if (status) {
printk(KERN_ERR "go7007-usb: error in video pipe: %d\n",
status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in video pipe!\n");
return;
}
go7007_parse_video_stream(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
printk(KERN_ERR "go7007-usb: error in video pipe: %d\n", r);
}
static void go7007_usb_read_audio_pipe_complete(struct urb *urb)
{
struct go7007 *go = (struct go7007 *)urb->context;
int r, status = urb->status;
if (!vb2_is_streaming(&go->vidq))
return;
if (status) {
printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n",
status);
return;
}
if (urb->actual_length != urb->transfer_buffer_length) {
printk(KERN_ERR "go7007-usb: short read in audio pipe!\n");
return;
}
if (go->audio_deliver != NULL)
go->audio_deliver(go, urb->transfer_buffer, urb->actual_length);
r = usb_submit_urb(urb, GFP_ATOMIC);
if (r < 0)
printk(KERN_ERR "go7007-usb: error in audio pipe: %d\n", r);
}
static int go7007_usb_stream_start(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int i, r;
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->video_urbs[i], GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error submitting video "
"urb %d: %d\n", i, r);
goto video_submit_failed;
}
}
if (!go->audio_enabled)
return 0;
for (i = 0; i < 8; ++i) {
r = usb_submit_urb(usb->audio_urbs[i], GFP_KERNEL);
if (r < 0) {
printk(KERN_ERR "go7007-usb: error submitting audio "
"urb %d: %d\n", i, r);
goto audio_submit_failed;
}
}
return 0;
audio_submit_failed:
for (i = 0; i < 7; ++i)
usb_kill_urb(usb->audio_urbs[i]);
video_submit_failed:
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->video_urbs[i]);
return -1;
}
static int go7007_usb_stream_stop(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
int i;
if (go->status == STATUS_SHUTDOWN)
return 0;
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->video_urbs[i]);
if (go->audio_enabled)
for (i = 0; i < 8; ++i)
usb_kill_urb(usb->audio_urbs[i]);
return 0;
}
static int go7007_usb_send_firmware(struct go7007 *go, u8 *data, int len)
{
struct go7007_usb *usb = go->hpi_context;
int transferred, pipe;
int timeout = 500;
#ifdef GO7007_USB_DEBUG
printk(KERN_DEBUG "go7007-usb: DownloadBuffer sending %d bytes\n", len);
#endif
if (usb->board->flags & GO7007_USB_EZUSB)
pipe = usb_sndbulkpipe(usb->usbdev, 2);
else
pipe = usb_sndbulkpipe(usb->usbdev, 3);
return usb_bulk_msg(usb->usbdev, pipe, data, len,
&transferred, timeout);
}
static void go7007_usb_release(struct go7007 *go)
{
struct go7007_usb *usb = go->hpi_context;
struct urb *vurb, *aurb;
int i;
if (usb->intr_urb) {
usb_kill_urb(usb->intr_urb);
kfree(usb->intr_urb->transfer_buffer);
usb_free_urb(usb->intr_urb);
}
/* Free USB-related structs */
for (i = 0; i < 8; ++i) {
vurb = usb->video_urbs[i];
if (vurb) {
usb_kill_urb(vurb);
kfree(vurb->transfer_buffer);
usb_free_urb(vurb);
}
aurb = usb->audio_urbs[i];
if (aurb) {
usb_kill_urb(aurb);
kfree(aurb->transfer_buffer);
usb_free_urb(aurb);
}
}
kfree(go->hpi_context);
}
static struct go7007_hpi_ops go7007_usb_ezusb_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_ezusb_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
.stream_start = go7007_usb_stream_start,
.stream_stop = go7007_usb_stream_stop,
.send_firmware = go7007_usb_send_firmware,
.release = go7007_usb_release,
};
static struct go7007_hpi_ops go7007_usb_onboard_hpi_ops = {
.interface_reset = go7007_usb_interface_reset,
.write_interrupt = go7007_usb_onboard_write_interrupt,
.read_interrupt = go7007_usb_read_interrupt,
.stream_start = go7007_usb_stream_start,
.stream_stop = go7007_usb_stream_stop,
.send_firmware = go7007_usb_send_firmware,
.release = go7007_usb_release,
};
/********************* Driver for EZ-USB I2C adapter *********************/
static int go7007_usb_i2c_master_xfer(struct i2c_adapter *adapter,
struct i2c_msg msgs[], int num)
{
struct go7007 *go = i2c_get_adapdata(adapter);
struct go7007_usb *usb = go->hpi_context;
u8 *buf = go->usb_buf;
int buf_len, i;
int ret = -EIO;
if (go->status == STATUS_SHUTDOWN)
return -ENODEV;
mutex_lock(&usb->i2c_lock);
for (i = 0; i < num; ++i) {
/* The hardware command is "write some bytes then read some
* bytes", so we try to coalesce a write followed by a read
* into a single USB transaction */
if (i + 1 < num && msgs[i].addr == msgs[i + 1].addr &&
!(msgs[i].flags & I2C_M_RD) &&
(msgs[i + 1].flags & I2C_M_RD)) {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c write/read %d/%d "
"bytes on %02x\n", msgs[i].len,
msgs[i + 1].len, msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = msgs[i].len + 1;
buf[2] = msgs[i].addr << 1;
memcpy(&buf[3], msgs[i].buf, msgs[i].len);
buf_len = msgs[i].len + 3;
buf[buf_len++] = msgs[++i].len;
} else if (msgs[i].flags & I2C_M_RD) {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c read %d "
"bytes on %02x\n", msgs[i].len,
msgs[i].addr);
#endif
buf[0] = 0x01;
buf[1] = 1;
buf[2] = msgs[i].addr << 1;
buf[3] = msgs[i].len;
buf_len = 4;
} else {
#ifdef GO7007_I2C_DEBUG
printk(KERN_DEBUG "go7007-usb: i2c write %d "
"bytes on %02x\n", msgs[i].len,
msgs[i].addr);
#endif
buf[0] = 0x00;
buf[1] = msgs[i].len + 1;
buf[2] = msgs[i].addr << 1;
memcpy(&buf[3], msgs[i].buf, msgs[i].len);
buf_len = msgs[i].len + 3;
buf[buf_len++] = 0;
}
if (go7007_usb_vendor_request(go, 0x24, 0, 0,
buf, buf_len, 0) < 0)
goto i2c_done;
if (msgs[i].flags & I2C_M_RD) {
memset(buf, 0, msgs[i].len + 1);
if (go7007_usb_vendor_request(go, 0x25, 0, 0, buf,
msgs[i].len + 1, 1) < 0)
goto i2c_done;
memcpy(msgs[i].buf, buf + 1, msgs[i].len);
}
}
ret = num;
i2c_done:
mutex_unlock(&usb->i2c_lock);
return ret;
}
static u32 go7007_usb_functionality(struct i2c_adapter *adapter)
{
/* No errors are reported by the hardware, so we don't bother
* supporting quick writes to avoid confusing probing */
return (I2C_FUNC_SMBUS_EMUL) & ~I2C_FUNC_SMBUS_QUICK;
}
static struct i2c_algorithm go7007_usb_algo = {
.master_xfer = go7007_usb_i2c_master_xfer,
.functionality = go7007_usb_functionality,
};
static struct i2c_adapter go7007_usb_adap_templ = {
.owner = THIS_MODULE,
.name = "WIS GO7007SB EZ-USB",
.algo = &go7007_usb_algo,
};
/********************* USB add/remove functions *********************/
static int go7007_usb_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct go7007 *go;
struct go7007_usb *usb;
const struct go7007_usb_board *board;
struct usb_device *usbdev = interface_to_usbdev(intf);
unsigned num_i2c_devs;
char *name;
int video_pipe, i, v_urb_len;
printk(KERN_DEBUG "go7007-usb: probing new GO7007 USB board\n");
switch (id->driver_info) {
case GO7007_BOARDID_MATRIX_II:
name = "WIS Matrix II or compatible";
board = &board_matrix_ii;
break;
case GO7007_BOARDID_MATRIX_RELOAD:
name = "WIS Matrix Reloaded or compatible";
board = &board_matrix_reload;
break;
case GO7007_BOARDID_MATRIX_REV:
name = "WIS Matrix Revolution or compatible";
board = &board_matrix_revolution;
break;
case GO7007_BOARDID_STAR_TREK:
name = "WIS Star Trek or compatible";
board = &board_star_trek;
break;
case GO7007_BOARDID_XMEN:
name = "WIS XMen or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_XMEN_II:
name = "WIS XMen II or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_XMEN_III:
name = "WIS XMen III or compatible";
board = &board_xmen;
break;
case GO7007_BOARDID_PX_M402U:
name = "Plextor PX-M402U";
board = &board_matrix_ii;
break;
case GO7007_BOARDID_PX_TV402U:
name = "Plextor PX-TV402U (unknown tuner)";
board = &board_px_tv402u;
break;
case GO7007_BOARDID_LIFEVIEW_LR192:
printk(KERN_ERR "go7007-usb: The Lifeview TV Walker Ultra "
"is not supported. Sorry!\n");
return 0;
name = "Lifeview TV Walker Ultra";
board = &board_lifeview_lr192;
break;
case GO7007_BOARDID_SENSORAY_2250:
printk(KERN_INFO "Sensoray 2250 found\n");
name = "Sensoray 2250/2251";
board = &board_sensoray_2250;
break;
case GO7007_BOARDID_ADS_USBAV_709:
name = "ADS Tech DVD Xpress DX2";
board = &board_ads_usbav_709;
break;
default:
printk(KERN_ERR "go7007-usb: unknown board ID %d!\n",
(unsigned int)id->driver_info);
return 0;
}
go = go7007_alloc(&board->main_info, &intf->dev);
if (go == NULL)
return -ENOMEM;
usb = kzalloc(sizeof(struct go7007_usb), GFP_KERNEL);
if (usb == NULL) {
kfree(go);
return -ENOMEM;
}
usb->board = board;
usb->usbdev = usbdev;
usb_make_path(usbdev, go->bus_info, sizeof(go->bus_info));
go->board_id = id->driver_info;
strncpy(go->name, name, sizeof(go->name));
if (board->flags & GO7007_USB_EZUSB)
go->hpi_ops = &go7007_usb_ezusb_hpi_ops;
else
go->hpi_ops = &go7007_usb_onboard_hpi_ops;
go->hpi_context = usb;
/* Allocate the URB and buffer for receiving incoming interrupts */
usb->intr_urb = usb_alloc_urb(0, GFP_KERNEL);
if (usb->intr_urb == NULL)
goto allocfail;
usb->intr_urb->transfer_buffer = kmalloc(2*sizeof(u16), GFP_KERNEL);
if (usb->intr_urb->transfer_buffer == NULL)
goto allocfail;
if (go->board_id == GO7007_BOARDID_SENSORAY_2250)
usb_fill_bulk_urb(usb->intr_urb, usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
go7007_usb_readinterrupt_complete, go);
else
usb_fill_int_urb(usb->intr_urb, usb->usbdev,
usb_rcvintpipe(usb->usbdev, 4),
usb->intr_urb->transfer_buffer, 2*sizeof(u16),
go7007_usb_readinterrupt_complete, go, 8);
usb_set_intfdata(intf, &go->v4l2_dev);
/* Boot the GO7007 */
if (go7007_boot_encoder(go, go->board_info->flags &
GO7007_BOARD_USE_ONBOARD_I2C) < 0)
goto allocfail;
/* Register the EZ-USB I2C adapter, if we're using it */
if (board->flags & GO7007_USB_EZUSB_I2C) {
memcpy(&go->i2c_adapter, &go7007_usb_adap_templ,
sizeof(go7007_usb_adap_templ));
mutex_init(&usb->i2c_lock);
go->i2c_adapter.dev.parent = go->dev;
i2c_set_adapdata(&go->i2c_adapter, go);
if (i2c_add_adapter(&go->i2c_adapter) < 0) {
printk(KERN_ERR
"go7007-usb: error: i2c_add_adapter failed\n");
goto allocfail;
}
go->i2c_adapter_online = 1;
}
/* Pelco and Adlink reused the XMen and XMen-III vendor and product
* IDs for their own incompatible designs. We can detect XMen boards
* by probing the sensor, but there is no way to probe the sensors on
* the Pelco and Adlink designs so we default to the Adlink. If it
* is actually a Pelco, the user must set the assume_endura module
* parameter. */
if ((go->board_id == GO7007_BOARDID_XMEN ||
go->board_id == GO7007_BOARDID_XMEN_III) &&
go->i2c_adapter_online) {
union i2c_smbus_data data;
/* Check to see if register 0x0A is 0x76 */
i2c_smbus_xfer(&go->i2c_adapter, 0x21, I2C_CLIENT_SCCB,
I2C_SMBUS_READ, 0x0A, I2C_SMBUS_BYTE_DATA, &data);
if (data.byte != 0x76) {
if (assume_endura) {
go->board_id = GO7007_BOARDID_ENDURA;
usb->board = board = &board_endura;
go->board_info = &board->main_info;
strncpy(go->name, "Pelco Endura",
sizeof(go->name));
} else {
u16 channel;
/* read channel number from GPIO[1:0] */
go7007_read_addr(go, 0x3c81, &channel);
channel &= 0x3;
go->board_id = GO7007_BOARDID_ADLINK_MPG24;
usb->board = board = &board_adlink_mpg24;
go->board_info = &board->main_info;
go->channel_number = channel;
snprintf(go->name, sizeof(go->name),
"Adlink PCI-MPG24, channel #%d",
channel);
}
go7007_update_board(go);
}
}
num_i2c_devs = go->board_info->num_i2c_devs;
/* Probe the tuner model on the TV402U */
if (go->board_id == GO7007_BOARDID_PX_TV402U) {
/* Board strapping indicates tuner model */
if (go7007_usb_vendor_request(go, 0x41, 0, 0, go->usb_buf, 3, 1) < 0) {
printk(KERN_ERR "go7007-usb: GPIO read failed!\n");
goto allocfail;
}
switch (go->usb_buf[0] >> 6) {
case 1:
go->tuner_type = TUNER_SONY_BTF_PG472Z;
go->std = V4L2_STD_PAL;
strncpy(go->name, "Plextor PX-TV402U-EU",
sizeof(go->name));
break;
case 2:
go->tuner_type = TUNER_SONY_BTF_PK467Z;
go->std = V4L2_STD_NTSC_M_JP;
num_i2c_devs -= 2;
strncpy(go->name, "Plextor PX-TV402U-JP",
sizeof(go->name));
break;
case 3:
go->tuner_type = TUNER_SONY_BTF_PB463Z;
num_i2c_devs -= 2;
strncpy(go->name, "Plextor PX-TV402U-NA",
sizeof(go->name));
break;
default:
printk(KERN_DEBUG "go7007-usb: unable to detect "
"tuner type!\n");
break;
}
/* Configure tuner mode selection inputs connected
* to the EZ-USB GPIO output pins */
if (go7007_usb_vendor_request(go, 0x40, 0x7f02, 0,
NULL, 0, 0) < 0) {
printk(KERN_ERR "go7007-usb: GPIO write failed!\n");
goto allocfail;
}
}
/* Print a nasty message if the user attempts to use a USB2.0 device in
* a USB1.1 port. There will be silent corruption of the stream. */
if ((board->flags & GO7007_USB_EZUSB) &&
usbdev->speed != USB_SPEED_HIGH)
printk(KERN_ERR "go7007-usb: *** WARNING *** This device "
"must be connected to a USB 2.0 port! "
"Attempting to capture video through a USB 1.1 "
"port will result in stream corruption, even "
"at low bitrates!\n");
/* Allocate the URBs and buffers for receiving the video stream */
if (board->flags & GO7007_USB_EZUSB) {
v_urb_len = 1024;
video_pipe = usb_rcvbulkpipe(usb->usbdev, 6);
} else {
v_urb_len = 512;
video_pipe = usb_rcvbulkpipe(usb->usbdev, 1);
}
for (i = 0; i < 8; ++i) {
usb->video_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (usb->video_urbs[i] == NULL)
goto allocfail;
usb->video_urbs[i]->transfer_buffer =
kmalloc(v_urb_len, GFP_KERNEL);
if (usb->video_urbs[i]->transfer_buffer == NULL)
goto allocfail;
usb_fill_bulk_urb(usb->video_urbs[i], usb->usbdev, video_pipe,
usb->video_urbs[i]->transfer_buffer, v_urb_len,
go7007_usb_read_video_pipe_complete, go);
}
/* Allocate the URBs and buffers for receiving the audio stream */
if ((board->flags & GO7007_USB_EZUSB) &&
(board->flags & GO7007_BOARD_HAS_AUDIO)) {
for (i = 0; i < 8; ++i) {
usb->audio_urbs[i] = usb_alloc_urb(0, GFP_KERNEL);
if (usb->audio_urbs[i] == NULL)
goto allocfail;
usb->audio_urbs[i]->transfer_buffer = kmalloc(4096,
GFP_KERNEL);
if (usb->audio_urbs[i]->transfer_buffer == NULL)
goto allocfail;
usb_fill_bulk_urb(usb->audio_urbs[i], usb->usbdev,
usb_rcvbulkpipe(usb->usbdev, 8),
usb->audio_urbs[i]->transfer_buffer, 4096,
go7007_usb_read_audio_pipe_complete, go);
}
}
/* Do any final GO7007 initialization, then register the
* V4L2 and ALSA interfaces */
if (go7007_register_encoder(go, num_i2c_devs) < 0)
goto allocfail;
go->status = STATUS_ONLINE;
return 0;
allocfail:
go7007_usb_release(go);
kfree(go);
return -ENOMEM;
}
static void go7007_usb_disconnect(struct usb_interface *intf)
{
struct go7007 *go = to_go7007(usb_get_intfdata(intf));
mutex_lock(&go->queue_lock);
mutex_lock(&go->serialize_lock);
if (go->audio_enabled)
go7007_snd_remove(go);
go->status = STATUS_SHUTDOWN;
v4l2_device_disconnect(&go->v4l2_dev);
video_unregister_device(&go->vdev);
mutex_unlock(&go->serialize_lock);
mutex_unlock(&go->queue_lock);
v4l2_device_put(&go->v4l2_dev);
}
static struct usb_driver go7007_usb_driver = {
.name = "go7007",
.probe = go7007_usb_probe,
.disconnect = go7007_usb_disconnect,
.id_table = go7007_usb_id_table,
};
module_usb_driver(go7007_usb_driver);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
cybernet/rhel7-kernel | kernel/sound/soc/samsung/neo1973_wm8753.c | 2087 | 12031 | /*
* neo1973_wm8753.c -- SoC audio for Openmoko Neo1973 and Freerunner devices
*
* Copyright 2007 Openmoko Inc
* Author: Graeme Gregory <graeme@openmoko.org>
* Copyright 2007 Wolfson Microelectronics PLC.
* Author: Graeme Gregory
* graeme.gregory@wolfsonmicro.com or linux@wolfsonmicro.com
* Copyright 2009 Wolfson Microelectronics
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/gpio.h>
#include <sound/soc.h>
#include <asm/mach-types.h>
#include "regs-iis.h"
#include "../codecs/wm8753.h"
#include "s3c24xx-i2s.h"
static int neo1973_hifi_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
unsigned int pll_out = 0, bclk = 0;
int ret = 0;
unsigned long iis_clkrate;
iis_clkrate = s3c24xx_i2s_get_clockrate();
switch (params_rate(params)) {
case 8000:
case 16000:
pll_out = 12288000;
break;
case 48000:
bclk = WM8753_BCLK_DIV_4;
pll_out = 12288000;
break;
case 96000:
bclk = WM8753_BCLK_DIV_2;
pll_out = 12288000;
break;
case 11025:
bclk = WM8753_BCLK_DIV_16;
pll_out = 11289600;
break;
case 22050:
bclk = WM8753_BCLK_DIV_8;
pll_out = 11289600;
break;
case 44100:
bclk = WM8753_BCLK_DIV_4;
pll_out = 11289600;
break;
case 88200:
bclk = WM8753_BCLK_DIV_2;
pll_out = 11289600;
break;
}
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai,
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set cpu DAI configuration */
ret = snd_soc_dai_set_fmt(cpu_dai,
SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF |
SND_SOC_DAIFMT_CBM_CFM);
if (ret < 0)
return ret;
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_MCLK, pll_out,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set MCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_MCLK,
S3C2410_IISMOD_32FS);
if (ret < 0)
return ret;
/* set codec BCLK division for sample rate */
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_BCLKDIV, bclk);
if (ret < 0)
return ret;
/* set prescaler division for sample rate */
ret = snd_soc_dai_set_clkdiv(cpu_dai, S3C24XX_DIV_PRESCALER,
S3C24XX_PRESCALE(4, 4));
if (ret < 0)
return ret;
/* codec PLL input is PCLK/4 */
ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0,
iis_clkrate / 4, pll_out);
if (ret < 0)
return ret;
return 0;
}
static int neo1973_hifi_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL1, 0, 0, 0);
}
/*
* Neo1973 WM8753 HiFi DAI opserations.
*/
static struct snd_soc_ops neo1973_hifi_ops = {
.hw_params = neo1973_hifi_hw_params,
.hw_free = neo1973_hifi_hw_free,
};
static int neo1973_voice_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
unsigned int pcmdiv = 0;
int ret = 0;
unsigned long iis_clkrate;
iis_clkrate = s3c24xx_i2s_get_clockrate();
if (params_rate(params) != 8000)
return -EINVAL;
if (params_channels(params) != 1)
return -EINVAL;
pcmdiv = WM8753_PCM_DIV_6; /* 2.048 MHz */
/* todo: gg check mode (DSP_B) against CSR datasheet */
/* set codec DAI configuration */
ret = snd_soc_dai_set_fmt(codec_dai, SND_SOC_DAIFMT_DSP_B |
SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS);
if (ret < 0)
return ret;
/* set the codec system clock for DAC and ADC */
ret = snd_soc_dai_set_sysclk(codec_dai, WM8753_PCMCLK, 12288000,
SND_SOC_CLOCK_IN);
if (ret < 0)
return ret;
/* set codec PCM division for sample rate */
ret = snd_soc_dai_set_clkdiv(codec_dai, WM8753_PCMDIV, pcmdiv);
if (ret < 0)
return ret;
/* configure and enable PLL for 12.288MHz output */
ret = snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0,
iis_clkrate / 4, 12288000);
if (ret < 0)
return ret;
return 0;
}
static int neo1973_voice_hw_free(struct snd_pcm_substream *substream)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct snd_soc_dai *codec_dai = rtd->codec_dai;
/* disable the PLL */
return snd_soc_dai_set_pll(codec_dai, WM8753_PLL2, 0, 0, 0);
}
static struct snd_soc_ops neo1973_voice_ops = {
.hw_params = neo1973_voice_hw_params,
.hw_free = neo1973_voice_hw_free,
};
/* Shared routes and controls */
static const struct snd_soc_dapm_widget neo1973_wm8753_dapm_widgets[] = {
SND_SOC_DAPM_LINE("GSM Line Out", NULL),
SND_SOC_DAPM_LINE("GSM Line In", NULL),
SND_SOC_DAPM_MIC("Headset Mic", NULL),
SND_SOC_DAPM_MIC("Handset Mic", NULL),
};
static const struct snd_soc_dapm_route neo1973_wm8753_routes[] = {
/* Connections to the GSM Module */
{"GSM Line Out", NULL, "MONO1"},
{"GSM Line Out", NULL, "MONO2"},
{"RXP", NULL, "GSM Line In"},
{"RXN", NULL, "GSM Line In"},
/* Connections to Headset */
{"MIC1", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Headset Mic"},
/* Call Mic */
{"MIC2", NULL, "Mic Bias"},
{"MIC2N", NULL, "Mic Bias"},
{"Mic Bias", NULL, "Handset Mic"},
/* Connect the ALC pins */
{"ACIN", NULL, "ACOP"},
};
static const struct snd_kcontrol_new neo1973_wm8753_controls[] = {
SOC_DAPM_PIN_SWITCH("GSM Line Out"),
SOC_DAPM_PIN_SWITCH("GSM Line In"),
SOC_DAPM_PIN_SWITCH("Headset Mic"),
SOC_DAPM_PIN_SWITCH("Handset Mic"),
};
/* GTA02 specific routes and controls */
static int gta02_speaker_enabled;
static int lm4853_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
gta02_speaker_enabled = ucontrol->value.integer.value[0];
gpio_set_value(S3C2410_GPJ(2), !gta02_speaker_enabled);
return 0;
}
static int lm4853_get_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
ucontrol->value.integer.value[0] = gta02_speaker_enabled;
return 0;
}
static int lm4853_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *k, int event)
{
gpio_set_value(S3C2410_GPJ(1), SND_SOC_DAPM_EVENT_OFF(event));
return 0;
}
static const struct snd_soc_dapm_route neo1973_gta02_routes[] = {
/* Connections to the amp */
{"Stereo Out", NULL, "LOUT1"},
{"Stereo Out", NULL, "ROUT1"},
/* Call Speaker */
{"Handset Spk", NULL, "LOUT2"},
{"Handset Spk", NULL, "ROUT2"},
};
static const struct snd_kcontrol_new neo1973_gta02_wm8753_controls[] = {
SOC_DAPM_PIN_SWITCH("Handset Spk"),
SOC_DAPM_PIN_SWITCH("Stereo Out"),
SOC_SINGLE_BOOL_EXT("Amp Spk Switch", 0,
lm4853_get_spk,
lm4853_set_spk),
};
static const struct snd_soc_dapm_widget neo1973_gta02_wm8753_dapm_widgets[] = {
SND_SOC_DAPM_SPK("Handset Spk", NULL),
SND_SOC_DAPM_SPK("Stereo Out", lm4853_event),
};
static int neo1973_gta02_wm8753_init(struct snd_soc_codec *codec)
{
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
ret = snd_soc_dapm_new_controls(dapm, neo1973_gta02_wm8753_dapm_widgets,
ARRAY_SIZE(neo1973_gta02_wm8753_dapm_widgets));
if (ret)
return ret;
ret = snd_soc_dapm_add_routes(dapm, neo1973_gta02_routes,
ARRAY_SIZE(neo1973_gta02_routes));
if (ret)
return ret;
ret = snd_soc_add_card_controls(codec->card, neo1973_gta02_wm8753_controls,
ARRAY_SIZE(neo1973_gta02_wm8753_controls));
if (ret)
return ret;
snd_soc_dapm_disable_pin(dapm, "Stereo Out");
snd_soc_dapm_disable_pin(dapm, "Handset Spk");
snd_soc_dapm_ignore_suspend(dapm, "Stereo Out");
snd_soc_dapm_ignore_suspend(dapm, "Handset Spk");
return 0;
}
static int neo1973_wm8753_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
/* set up NC codec pins */
snd_soc_dapm_nc_pin(dapm, "OUT3");
snd_soc_dapm_nc_pin(dapm, "OUT4");
snd_soc_dapm_nc_pin(dapm, "LINE1");
snd_soc_dapm_nc_pin(dapm, "LINE2");
/* Add neo1973 specific widgets */
ret = snd_soc_dapm_new_controls(dapm, neo1973_wm8753_dapm_widgets,
ARRAY_SIZE(neo1973_wm8753_dapm_widgets));
if (ret)
return ret;
/* add neo1973 specific controls */
ret = snd_soc_add_card_controls(rtd->card, neo1973_wm8753_controls,
ARRAY_SIZE(neo1973_wm8753_controls));
if (ret)
return ret;
/* set up neo1973 specific audio routes */
ret = snd_soc_dapm_add_routes(dapm, neo1973_wm8753_routes,
ARRAY_SIZE(neo1973_wm8753_routes));
if (ret)
return ret;
/* set endpoints to default off mode */
snd_soc_dapm_disable_pin(dapm, "GSM Line Out");
snd_soc_dapm_disable_pin(dapm, "GSM Line In");
snd_soc_dapm_disable_pin(dapm, "Headset Mic");
snd_soc_dapm_disable_pin(dapm, "Handset Mic");
/* allow audio paths from the GSM modem to run during suspend */
snd_soc_dapm_ignore_suspend(dapm, "GSM Line Out");
snd_soc_dapm_ignore_suspend(dapm, "GSM Line In");
snd_soc_dapm_ignore_suspend(dapm, "Headset Mic");
snd_soc_dapm_ignore_suspend(dapm, "Handset Mic");
if (machine_is_neo1973_gta02()) {
ret = neo1973_gta02_wm8753_init(codec);
if (ret)
return ret;
}
return 0;
}
static struct snd_soc_dai_link neo1973_dai[] = {
{ /* Hifi Playback - for similatious use with voice below */
.name = "WM8753",
.stream_name = "WM8753 HiFi",
.platform_name = "s3c24xx-iis",
.cpu_dai_name = "s3c24xx-iis",
.codec_dai_name = "wm8753-hifi",
.codec_name = "wm8753.0-001a",
.init = neo1973_wm8753_init,
.ops = &neo1973_hifi_ops,
},
{ /* Voice via BT */
.name = "Bluetooth",
.stream_name = "Voice",
.cpu_dai_name = "dfbmcs320-pcm",
.codec_dai_name = "wm8753-voice",
.codec_name = "wm8753.0-001a",
.ops = &neo1973_voice_ops,
},
};
static struct snd_soc_aux_dev neo1973_aux_devs[] = {
{
.name = "dfbmcs320",
.codec_name = "dfbmcs320.0",
},
};
static struct snd_soc_codec_conf neo1973_codec_conf[] = {
{
.dev_name = "lm4857.0-007c",
.name_prefix = "Amp",
},
};
static const struct gpio neo1973_gta02_gpios[] = {
{ S3C2410_GPJ(2), GPIOF_OUT_INIT_HIGH, "GTA02_HP_IN" },
{ S3C2410_GPJ(1), GPIOF_OUT_INIT_HIGH, "GTA02_AMP_SHUT" },
};
static struct snd_soc_card neo1973 = {
.name = "neo1973",
.owner = THIS_MODULE,
.dai_link = neo1973_dai,
.num_links = ARRAY_SIZE(neo1973_dai),
.aux_dev = neo1973_aux_devs,
.num_aux_devs = ARRAY_SIZE(neo1973_aux_devs),
.codec_conf = neo1973_codec_conf,
.num_configs = ARRAY_SIZE(neo1973_codec_conf),
};
static struct platform_device *neo1973_snd_device;
static int __init neo1973_init(void)
{
int ret;
if (!machine_is_neo1973_gta02())
return -ENODEV;
if (machine_is_neo1973_gta02()) {
neo1973.name = "neo1973gta02";
neo1973.num_aux_devs = 1;
ret = gpio_request_array(neo1973_gta02_gpios,
ARRAY_SIZE(neo1973_gta02_gpios));
if (ret)
return ret;
}
neo1973_snd_device = platform_device_alloc("soc-audio", -1);
if (!neo1973_snd_device) {
ret = -ENOMEM;
goto err_gpio_free;
}
platform_set_drvdata(neo1973_snd_device, &neo1973);
ret = platform_device_add(neo1973_snd_device);
if (ret)
goto err_put_device;
return 0;
err_put_device:
platform_device_put(neo1973_snd_device);
err_gpio_free:
if (machine_is_neo1973_gta02()) {
gpio_free_array(neo1973_gta02_gpios,
ARRAY_SIZE(neo1973_gta02_gpios));
}
return ret;
}
module_init(neo1973_init);
static void __exit neo1973_exit(void)
{
platform_device_unregister(neo1973_snd_device);
if (machine_is_neo1973_gta02()) {
gpio_free_array(neo1973_gta02_gpios,
ARRAY_SIZE(neo1973_gta02_gpios));
}
}
module_exit(neo1973_exit);
/* Module information */
MODULE_AUTHOR("Graeme Gregory, graeme@openmoko.org, www.openmoko.org");
MODULE_DESCRIPTION("ALSA SoC WM8753 Neo1973 and Frerunner");
MODULE_LICENSE("GPL");
| gpl-2.0 |
dirty-hank/frankenlenok | sound/soc/jz4740/jz4740-i2s.c | 2087 | 13043 | /*
* Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/init.h>
#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/delay.h>
#include <linux/dma-mapping.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/soc.h>
#include <sound/initval.h>
#include "jz4740-i2s.h"
#include "jz4740-pcm.h"
#define JZ_REG_AIC_CONF 0x00
#define JZ_REG_AIC_CTRL 0x04
#define JZ_REG_AIC_I2S_FMT 0x10
#define JZ_REG_AIC_FIFO_STATUS 0x14
#define JZ_REG_AIC_I2S_STATUS 0x1c
#define JZ_REG_AIC_CLK_DIV 0x30
#define JZ_REG_AIC_FIFO 0x34
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_MASK (0xf << 12)
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_MASK (0xf << 8)
#define JZ_AIC_CONF_OVERFLOW_PLAY_LAST BIT(6)
#define JZ_AIC_CONF_INTERNAL_CODEC BIT(5)
#define JZ_AIC_CONF_I2S BIT(4)
#define JZ_AIC_CONF_RESET BIT(3)
#define JZ_AIC_CONF_BIT_CLK_MASTER BIT(2)
#define JZ_AIC_CONF_SYNC_CLK_MASTER BIT(1)
#define JZ_AIC_CONF_ENABLE BIT(0)
#define JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET 12
#define JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET 8
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK (0x7 << 19)
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK (0x7 << 16)
#define JZ_AIC_CTRL_ENABLE_RX_DMA BIT(15)
#define JZ_AIC_CTRL_ENABLE_TX_DMA BIT(14)
#define JZ_AIC_CTRL_MONO_TO_STEREO BIT(11)
#define JZ_AIC_CTRL_SWITCH_ENDIANNESS BIT(10)
#define JZ_AIC_CTRL_SIGNED_TO_UNSIGNED BIT(9)
#define JZ_AIC_CTRL_FLUSH BIT(8)
#define JZ_AIC_CTRL_ENABLE_ROR_INT BIT(6)
#define JZ_AIC_CTRL_ENABLE_TUR_INT BIT(5)
#define JZ_AIC_CTRL_ENABLE_RFS_INT BIT(4)
#define JZ_AIC_CTRL_ENABLE_TFS_INT BIT(3)
#define JZ_AIC_CTRL_ENABLE_LOOPBACK BIT(2)
#define JZ_AIC_CTRL_ENABLE_PLAYBACK BIT(1)
#define JZ_AIC_CTRL_ENABLE_CAPTURE BIT(0)
#define JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET 19
#define JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET 16
#define JZ_AIC_I2S_FMT_DISABLE_BIT_CLK BIT(12)
#define JZ_AIC_I2S_FMT_ENABLE_SYS_CLK BIT(4)
#define JZ_AIC_I2S_FMT_MSB BIT(0)
#define JZ_AIC_I2S_STATUS_BUSY BIT(2)
#define JZ_AIC_CLK_DIV_MASK 0xf
struct jz4740_i2s {
struct resource *mem;
void __iomem *base;
dma_addr_t phys_base;
struct clk *clk_aic;
struct clk *clk_i2s;
struct jz4740_pcm_config pcm_config_playback;
struct jz4740_pcm_config pcm_config_capture;
};
static inline uint32_t jz4740_i2s_read(const struct jz4740_i2s *i2s,
unsigned int reg)
{
return readl(i2s->base + reg);
}
static inline void jz4740_i2s_write(const struct jz4740_i2s *i2s,
unsigned int reg, uint32_t value)
{
writel(value, i2s->base + reg);
}
static int jz4740_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf, ctrl;
if (dai->active)
return 0;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
ctrl |= JZ_AIC_CTRL_FLUSH;
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
clk_enable(i2s->clk_i2s);
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf |= JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
return 0;
}
static void jz4740_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
if (dai->active)
return;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
clk_disable(i2s->clk_i2s);
}
static int jz4740_i2s_trigger(struct snd_pcm_substream *substream, int cmd,
struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t ctrl;
uint32_t mask;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
mask = JZ_AIC_CTRL_ENABLE_PLAYBACK | JZ_AIC_CTRL_ENABLE_TX_DMA;
else
mask = JZ_AIC_CTRL_ENABLE_CAPTURE | JZ_AIC_CTRL_ENABLE_RX_DMA;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
ctrl |= mask;
break;
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
ctrl &= ~mask;
break;
default:
return -EINVAL;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
return 0;
}
static int jz4740_i2s_set_fmt(struct snd_soc_dai *dai, unsigned int fmt)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t format = 0;
uint32_t conf;
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~(JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER);
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER | JZ_AIC_CONF_SYNC_CLK_MASTER;
format |= JZ_AIC_I2S_FMT_ENABLE_SYS_CLK;
break;
case SND_SOC_DAIFMT_CBM_CFS:
conf |= JZ_AIC_CONF_SYNC_CLK_MASTER;
break;
case SND_SOC_DAIFMT_CBS_CFM:
conf |= JZ_AIC_CONF_BIT_CLK_MASTER;
break;
case SND_SOC_DAIFMT_CBM_CFM:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_MSB:
format |= JZ_AIC_I2S_FMT_MSB;
break;
case SND_SOC_DAIFMT_I2S:
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
break;
default:
return -EINVAL;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
jz4740_i2s_write(i2s, JZ_REG_AIC_I2S_FMT, format);
return 0;
}
static int jz4740_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params, struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
enum jz4740_dma_width dma_width;
struct jz4740_pcm_config *pcm_config;
unsigned int sample_size;
uint32_t ctrl;
ctrl = jz4740_i2s_read(i2s, JZ_REG_AIC_CTRL);
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S8:
sample_size = 0;
dma_width = JZ4740_DMA_WIDTH_8BIT;
break;
case SNDRV_PCM_FORMAT_S16:
sample_size = 1;
dma_width = JZ4740_DMA_WIDTH_16BIT;
break;
default:
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
ctrl &= ~JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_OUTPUT_SAMPLE_SIZE_OFFSET;
if (params_channels(params) == 1)
ctrl |= JZ_AIC_CTRL_MONO_TO_STEREO;
else
ctrl &= ~JZ_AIC_CTRL_MONO_TO_STEREO;
pcm_config = &i2s->pcm_config_playback;
pcm_config->dma_config.dst_width = dma_width;
} else {
ctrl &= ~JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_MASK;
ctrl |= sample_size << JZ_AIC_CTRL_INPUT_SAMPLE_SIZE_OFFSET;
pcm_config = &i2s->pcm_config_capture;
pcm_config->dma_config.src_width = dma_width;
}
jz4740_i2s_write(i2s, JZ_REG_AIC_CTRL, ctrl);
snd_soc_dai_set_dma_data(dai, substream, pcm_config);
return 0;
}
static int jz4740_i2s_set_sysclk(struct snd_soc_dai *dai, int clk_id,
unsigned int freq, int dir)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
struct clk *parent;
int ret = 0;
switch (clk_id) {
case JZ4740_I2S_CLKSRC_EXT:
parent = clk_get(NULL, "ext");
clk_set_parent(i2s->clk_i2s, parent);
break;
case JZ4740_I2S_CLKSRC_PLL:
parent = clk_get(NULL, "pll half");
clk_set_parent(i2s->clk_i2s, parent);
ret = clk_set_rate(i2s->clk_i2s, freq);
break;
default:
return -EINVAL;
}
clk_put(parent);
return ret;
}
static int jz4740_i2s_suspend(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
if (dai->active) {
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf &= ~JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
clk_disable(i2s->clk_i2s);
}
clk_disable(i2s->clk_aic);
return 0;
}
static int jz4740_i2s_resume(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
clk_enable(i2s->clk_aic);
if (dai->active) {
clk_enable(i2s->clk_i2s);
conf = jz4740_i2s_read(i2s, JZ_REG_AIC_CONF);
conf |= JZ_AIC_CONF_ENABLE;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
}
return 0;
}
static void jz4740_i2c_init_pcm_config(struct jz4740_i2s *i2s)
{
struct jz4740_dma_config *dma_config;
/* Playback */
dma_config = &i2s->pcm_config_playback.dma_config;
dma_config->src_width = JZ4740_DMA_WIDTH_32BIT;
dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE;
dma_config->request_type = JZ4740_DMA_TYPE_AIC_TRANSMIT;
dma_config->flags = JZ4740_DMA_SRC_AUTOINC;
dma_config->mode = JZ4740_DMA_MODE_SINGLE;
i2s->pcm_config_playback.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO;
/* Capture */
dma_config = &i2s->pcm_config_capture.dma_config;
dma_config->dst_width = JZ4740_DMA_WIDTH_32BIT;
dma_config->transfer_size = JZ4740_DMA_TRANSFER_SIZE_16BYTE;
dma_config->request_type = JZ4740_DMA_TYPE_AIC_RECEIVE;
dma_config->flags = JZ4740_DMA_DST_AUTOINC;
dma_config->mode = JZ4740_DMA_MODE_SINGLE;
i2s->pcm_config_capture.fifo_addr = i2s->phys_base + JZ_REG_AIC_FIFO;
}
static int jz4740_i2s_dai_probe(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
uint32_t conf;
clk_enable(i2s->clk_aic);
jz4740_i2c_init_pcm_config(i2s);
conf = (7 << JZ_AIC_CONF_FIFO_RX_THRESHOLD_OFFSET) |
(8 << JZ_AIC_CONF_FIFO_TX_THRESHOLD_OFFSET) |
JZ_AIC_CONF_OVERFLOW_PLAY_LAST |
JZ_AIC_CONF_I2S |
JZ_AIC_CONF_INTERNAL_CODEC;
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, JZ_AIC_CONF_RESET);
jz4740_i2s_write(i2s, JZ_REG_AIC_CONF, conf);
return 0;
}
static int jz4740_i2s_dai_remove(struct snd_soc_dai *dai)
{
struct jz4740_i2s *i2s = snd_soc_dai_get_drvdata(dai);
clk_disable(i2s->clk_aic);
return 0;
}
static const struct snd_soc_dai_ops jz4740_i2s_dai_ops = {
.startup = jz4740_i2s_startup,
.shutdown = jz4740_i2s_shutdown,
.trigger = jz4740_i2s_trigger,
.hw_params = jz4740_i2s_hw_params,
.set_fmt = jz4740_i2s_set_fmt,
.set_sysclk = jz4740_i2s_set_sysclk,
};
#define JZ4740_I2S_FMTS (SNDRV_PCM_FMTBIT_S8 | \
SNDRV_PCM_FMTBIT_S16_LE)
static struct snd_soc_dai_driver jz4740_i2s_dai = {
.probe = jz4740_i2s_dai_probe,
.remove = jz4740_i2s_dai_remove,
.playback = {
.channels_min = 1,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_48000,
.formats = JZ4740_I2S_FMTS,
},
.symmetric_rates = 1,
.ops = &jz4740_i2s_dai_ops,
.suspend = jz4740_i2s_suspend,
.resume = jz4740_i2s_resume,
};
static const struct snd_soc_component_driver jz4740_i2s_component = {
.name = "jz4740-i2s",
};
static int jz4740_i2s_dev_probe(struct platform_device *pdev)
{
struct jz4740_i2s *i2s;
int ret;
i2s = kzalloc(sizeof(*i2s), GFP_KERNEL);
if (!i2s)
return -ENOMEM;
i2s->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!i2s->mem) {
ret = -ENOENT;
goto err_free;
}
i2s->mem = request_mem_region(i2s->mem->start, resource_size(i2s->mem),
pdev->name);
if (!i2s->mem) {
ret = -EBUSY;
goto err_free;
}
i2s->base = ioremap_nocache(i2s->mem->start, resource_size(i2s->mem));
if (!i2s->base) {
ret = -EBUSY;
goto err_release_mem_region;
}
i2s->phys_base = i2s->mem->start;
i2s->clk_aic = clk_get(&pdev->dev, "aic");
if (IS_ERR(i2s->clk_aic)) {
ret = PTR_ERR(i2s->clk_aic);
goto err_iounmap;
}
i2s->clk_i2s = clk_get(&pdev->dev, "i2s");
if (IS_ERR(i2s->clk_i2s)) {
ret = PTR_ERR(i2s->clk_i2s);
goto err_clk_put_aic;
}
platform_set_drvdata(pdev, i2s);
ret = snd_soc_register_component(&pdev->dev, &jz4740_i2s_component,
&jz4740_i2s_dai, 1);
if (ret) {
dev_err(&pdev->dev, "Failed to register DAI\n");
goto err_clk_put_i2s;
}
return 0;
err_clk_put_i2s:
clk_put(i2s->clk_i2s);
err_clk_put_aic:
clk_put(i2s->clk_aic);
err_iounmap:
iounmap(i2s->base);
err_release_mem_region:
release_mem_region(i2s->mem->start, resource_size(i2s->mem));
err_free:
kfree(i2s);
return ret;
}
static int jz4740_i2s_dev_remove(struct platform_device *pdev)
{
struct jz4740_i2s *i2s = platform_get_drvdata(pdev);
snd_soc_unregister_component(&pdev->dev);
clk_put(i2s->clk_i2s);
clk_put(i2s->clk_aic);
iounmap(i2s->base);
release_mem_region(i2s->mem->start, resource_size(i2s->mem));
platform_set_drvdata(pdev, NULL);
kfree(i2s);
return 0;
}
static struct platform_driver jz4740_i2s_driver = {
.probe = jz4740_i2s_dev_probe,
.remove = jz4740_i2s_dev_remove,
.driver = {
.name = "jz4740-i2s",
.owner = THIS_MODULE,
},
};
module_platform_driver(jz4740_i2s_driver);
MODULE_AUTHOR("Lars-Peter Clausen, <lars@metafoo.de>");
MODULE_DESCRIPTION("Ingenic JZ4740 SoC I2S driver");
MODULE_LICENSE("GPL");
MODULE_ALIAS("platform:jz4740-i2s");
| gpl-2.0 |
venue3x40-dev/android_kernel_dell_venue3x40 | sound/soc/cirrus/ep93xx-i2s.c | 2087 | 11290 | /*
* linux/sound/soc/ep93xx-i2s.c
* EP93xx I2S driver
*
* Copyright (C) 2010 Ryan Mallon
*
* Based on the original driver by:
* Copyright (C) 2007 Chase Douglas <chasedouglas@gmail>
* Copyright (C) 2006 Lennert Buytenhek <buytenh@wantstofly.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
*/
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/pcm_params.h>
#include <sound/initval.h>
#include <sound/soc.h>
#include <mach/hardware.h>
#include <mach/ep93xx-regs.h>
#include <linux/platform_data/dma-ep93xx.h>
#define EP93XX_I2S_TXCLKCFG 0x00
#define EP93XX_I2S_RXCLKCFG 0x04
#define EP93XX_I2S_GLCTRL 0x0C
#define EP93XX_I2S_TXLINCTRLDATA 0x28
#define EP93XX_I2S_TXCTRL 0x2C
#define EP93XX_I2S_TXWRDLEN 0x30
#define EP93XX_I2S_TX0EN 0x34
#define EP93XX_I2S_RXLINCTRLDATA 0x58
#define EP93XX_I2S_RXCTRL 0x5C
#define EP93XX_I2S_RXWRDLEN 0x60
#define EP93XX_I2S_RX0EN 0x64
#define EP93XX_I2S_WRDLEN_16 (0 << 0)
#define EP93XX_I2S_WRDLEN_24 (1 << 0)
#define EP93XX_I2S_WRDLEN_32 (2 << 0)
#define EP93XX_I2S_LINCTRLDATA_R_JUST (1 << 2) /* Right justify */
#define EP93XX_I2S_CLKCFG_LRS (1 << 0) /* lrclk polarity */
#define EP93XX_I2S_CLKCFG_CKP (1 << 1) /* Bit clock polarity */
#define EP93XX_I2S_CLKCFG_REL (1 << 2) /* First bit transition */
#define EP93XX_I2S_CLKCFG_MASTER (1 << 3) /* Master mode */
#define EP93XX_I2S_CLKCFG_NBCG (1 << 4) /* Not bit clock gating */
struct ep93xx_i2s_info {
struct clk *mclk;
struct clk *sclk;
struct clk *lrclk;
struct ep93xx_dma_data *dma_data;
void __iomem *regs;
};
struct ep93xx_dma_data ep93xx_i2s_dma_data[] = {
[SNDRV_PCM_STREAM_PLAYBACK] = {
.name = "i2s-pcm-out",
.port = EP93XX_DMA_I2S1,
.direction = DMA_MEM_TO_DEV,
},
[SNDRV_PCM_STREAM_CAPTURE] = {
.name = "i2s-pcm-in",
.port = EP93XX_DMA_I2S1,
.direction = DMA_DEV_TO_MEM,
},
};
static inline void ep93xx_i2s_write_reg(struct ep93xx_i2s_info *info,
unsigned reg, unsigned val)
{
__raw_writel(val, info->regs + reg);
}
static inline unsigned ep93xx_i2s_read_reg(struct ep93xx_i2s_info *info,
unsigned reg)
{
return __raw_readl(info->regs + reg);
}
static void ep93xx_i2s_enable(struct ep93xx_i2s_info *info, int stream)
{
unsigned base_reg;
int i;
if ((ep93xx_i2s_read_reg(info, EP93XX_I2S_TX0EN) & 0x1) == 0 &&
(ep93xx_i2s_read_reg(info, EP93XX_I2S_RX0EN) & 0x1) == 0) {
/* Enable clocks */
clk_enable(info->mclk);
clk_enable(info->sclk);
clk_enable(info->lrclk);
/* Enable i2s */
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 1);
}
/* Enable fifos */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
base_reg = EP93XX_I2S_TX0EN;
else
base_reg = EP93XX_I2S_RX0EN;
for (i = 0; i < 3; i++)
ep93xx_i2s_write_reg(info, base_reg + (i * 4), 1);
}
static void ep93xx_i2s_disable(struct ep93xx_i2s_info *info, int stream)
{
unsigned base_reg;
int i;
/* Disable fifos */
if (stream == SNDRV_PCM_STREAM_PLAYBACK)
base_reg = EP93XX_I2S_TX0EN;
else
base_reg = EP93XX_I2S_RX0EN;
for (i = 0; i < 3; i++)
ep93xx_i2s_write_reg(info, base_reg + (i * 4), 0);
if ((ep93xx_i2s_read_reg(info, EP93XX_I2S_TX0EN) & 0x1) == 0 &&
(ep93xx_i2s_read_reg(info, EP93XX_I2S_RX0EN) & 0x1) == 0) {
/* Disable i2s */
ep93xx_i2s_write_reg(info, EP93XX_I2S_GLCTRL, 0);
/* Disable clocks */
clk_disable(info->lrclk);
clk_disable(info->sclk);
clk_disable(info->mclk);
}
}
static int ep93xx_i2s_startup(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct snd_soc_pcm_runtime *rtd = substream->private_data;
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
snd_soc_dai_set_dma_data(cpu_dai, substream,
&info->dma_data[substream->stream]);
return 0;
}
static void ep93xx_i2s_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
ep93xx_i2s_disable(info, substream->stream);
}
static int ep93xx_i2s_set_dai_fmt(struct snd_soc_dai *cpu_dai,
unsigned int fmt)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
unsigned int clk_cfg, lin_ctrl;
clk_cfg = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXCLKCFG);
lin_ctrl = ep93xx_i2s_read_reg(info, EP93XX_I2S_RXLINCTRLDATA);
switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) {
case SND_SOC_DAIFMT_I2S:
clk_cfg |= EP93XX_I2S_CLKCFG_REL;
lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
break;
case SND_SOC_DAIFMT_LEFT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
lin_ctrl &= ~EP93XX_I2S_LINCTRLDATA_R_JUST;
break;
case SND_SOC_DAIFMT_RIGHT_J:
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
lin_ctrl |= EP93XX_I2S_LINCTRLDATA_R_JUST;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) {
case SND_SOC_DAIFMT_CBS_CFS:
/* CPU is master */
clk_cfg |= EP93XX_I2S_CLKCFG_MASTER;
break;
case SND_SOC_DAIFMT_CBM_CFM:
/* Codec is master */
clk_cfg &= ~EP93XX_I2S_CLKCFG_MASTER;
break;
default:
return -EINVAL;
}
switch (fmt & SND_SOC_DAIFMT_INV_MASK) {
case SND_SOC_DAIFMT_NB_NF:
/* Negative bit clock, lrclk low on left word */
clk_cfg &= ~(EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL);
break;
case SND_SOC_DAIFMT_NB_IF:
/* Negative bit clock, lrclk low on right word */
clk_cfg &= ~EP93XX_I2S_CLKCFG_CKP;
clk_cfg |= EP93XX_I2S_CLKCFG_REL;
break;
case SND_SOC_DAIFMT_IB_NF:
/* Positive bit clock, lrclk low on left word */
clk_cfg |= EP93XX_I2S_CLKCFG_CKP;
clk_cfg &= ~EP93XX_I2S_CLKCFG_REL;
break;
case SND_SOC_DAIFMT_IB_IF:
/* Positive bit clock, lrclk low on right word */
clk_cfg |= EP93XX_I2S_CLKCFG_CKP | EP93XX_I2S_CLKCFG_REL;
break;
}
/* Write new register values */
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXCLKCFG, clk_cfg);
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXCLKCFG, clk_cfg);
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXLINCTRLDATA, lin_ctrl);
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXLINCTRLDATA, lin_ctrl);
return 0;
}
static int ep93xx_i2s_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
unsigned word_len, div, sdiv, lrdiv;
int err;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
word_len = EP93XX_I2S_WRDLEN_16;
break;
case SNDRV_PCM_FORMAT_S24_LE:
word_len = EP93XX_I2S_WRDLEN_24;
break;
case SNDRV_PCM_FORMAT_S32_LE:
word_len = EP93XX_I2S_WRDLEN_32;
break;
default:
return -EINVAL;
}
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
ep93xx_i2s_write_reg(info, EP93XX_I2S_TXWRDLEN, word_len);
else
ep93xx_i2s_write_reg(info, EP93XX_I2S_RXWRDLEN, word_len);
/*
* EP93xx I2S module can be setup so SCLK / LRCLK value can be
* 32, 64, 128. MCLK / SCLK value can be 2 and 4.
* We set LRCLK equal to `rate' and minimum SCLK / LRCLK
* value is 64, because our sample size is 32 bit * 2 channels.
* I2S standard permits us to transmit more bits than
* the codec uses.
*/
div = clk_get_rate(info->mclk) / params_rate(params);
sdiv = 4;
if (div > (256 + 512) / 2) {
lrdiv = 128;
} else {
lrdiv = 64;
if (div < (128 + 256) / 2)
sdiv = 2;
}
err = clk_set_rate(info->sclk, clk_get_rate(info->mclk) / sdiv);
if (err)
return err;
err = clk_set_rate(info->lrclk, clk_get_rate(info->sclk) / lrdiv);
if (err)
return err;
ep93xx_i2s_enable(info, substream->stream);
return 0;
}
static int ep93xx_i2s_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id,
unsigned int freq, int dir)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(cpu_dai);
if (dir == SND_SOC_CLOCK_IN || clk_id != 0)
return -EINVAL;
return clk_set_rate(info->mclk, freq);
}
#ifdef CONFIG_PM
static int ep93xx_i2s_suspend(struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
if (!dai->active)
return 0;
ep93xx_i2s_disable(info, SNDRV_PCM_STREAM_PLAYBACK);
ep93xx_i2s_disable(info, SNDRV_PCM_STREAM_CAPTURE);
return 0;
}
static int ep93xx_i2s_resume(struct snd_soc_dai *dai)
{
struct ep93xx_i2s_info *info = snd_soc_dai_get_drvdata(dai);
if (!dai->active)
return 0;
ep93xx_i2s_enable(info, SNDRV_PCM_STREAM_PLAYBACK);
ep93xx_i2s_enable(info, SNDRV_PCM_STREAM_CAPTURE);
return 0;
}
#else
#define ep93xx_i2s_suspend NULL
#define ep93xx_i2s_resume NULL
#endif
static const struct snd_soc_dai_ops ep93xx_i2s_dai_ops = {
.startup = ep93xx_i2s_startup,
.shutdown = ep93xx_i2s_shutdown,
.hw_params = ep93xx_i2s_hw_params,
.set_sysclk = ep93xx_i2s_set_sysclk,
.set_fmt = ep93xx_i2s_set_dai_fmt,
};
#define EP93XX_I2S_FORMATS (SNDRV_PCM_FMTBIT_S32_LE)
static struct snd_soc_dai_driver ep93xx_i2s_dai = {
.symmetric_rates= 1,
.suspend = ep93xx_i2s_suspend,
.resume = ep93xx_i2s_resume,
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = EP93XX_I2S_FORMATS,
},
.capture = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = EP93XX_I2S_FORMATS,
},
.ops = &ep93xx_i2s_dai_ops,
};
static const struct snd_soc_component_driver ep93xx_i2s_component = {
.name = "ep93xx-i2s",
};
static int ep93xx_i2s_probe(struct platform_device *pdev)
{
struct ep93xx_i2s_info *info;
struct resource *res;
int err;
info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -ENODEV;
info->regs = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(info->regs))
return PTR_ERR(info->regs);
info->mclk = clk_get(&pdev->dev, "mclk");
if (IS_ERR(info->mclk)) {
err = PTR_ERR(info->mclk);
goto fail;
}
info->sclk = clk_get(&pdev->dev, "sclk");
if (IS_ERR(info->sclk)) {
err = PTR_ERR(info->sclk);
goto fail_put_mclk;
}
info->lrclk = clk_get(&pdev->dev, "lrclk");
if (IS_ERR(info->lrclk)) {
err = PTR_ERR(info->lrclk);
goto fail_put_sclk;
}
dev_set_drvdata(&pdev->dev, info);
info->dma_data = ep93xx_i2s_dma_data;
err = snd_soc_register_component(&pdev->dev, &ep93xx_i2s_component,
&ep93xx_i2s_dai, 1);
if (err)
goto fail_put_lrclk;
return 0;
fail_put_lrclk:
dev_set_drvdata(&pdev->dev, NULL);
clk_put(info->lrclk);
fail_put_sclk:
clk_put(info->sclk);
fail_put_mclk:
clk_put(info->mclk);
fail:
return err;
}
static int ep93xx_i2s_remove(struct platform_device *pdev)
{
struct ep93xx_i2s_info *info = dev_get_drvdata(&pdev->dev);
snd_soc_unregister_component(&pdev->dev);
dev_set_drvdata(&pdev->dev, NULL);
clk_put(info->lrclk);
clk_put(info->sclk);
clk_put(info->mclk);
return 0;
}
static struct platform_driver ep93xx_i2s_driver = {
.probe = ep93xx_i2s_probe,
.remove = ep93xx_i2s_remove,
.driver = {
.name = "ep93xx-i2s",
.owner = THIS_MODULE,
},
};
module_platform_driver(ep93xx_i2s_driver);
MODULE_ALIAS("platform:ep93xx-i2s");
MODULE_AUTHOR("Ryan Mallon");
MODULE_DESCRIPTION("EP93XX I2S driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
myhro/debian-linux-kernel-gzip | drivers/net/wireless/cw1200/hwio.c | 2599 | 7385 | /*
* Low-level device IO routines for ST-Ericsson CW1200 drivers
*
* Copyright (c) 2010, ST-Ericsson
* Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no>
*
* Based on:
* ST-Ericsson UMAC CW1200 driver, which is
* Copyright (c) 2010, ST-Ericsson
* Author: Ajitpal Singh <ajitpal.singh@lockless.no>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/types.h>
#include "cw1200.h"
#include "hwio.h"
#include "hwbus.h"
/* Sdio addr is 4*spi_addr */
#define SPI_REG_ADDR_TO_SDIO(spi_reg_addr) ((spi_reg_addr) << 2)
#define SDIO_ADDR17BIT(buf_id, mpf, rfu, reg_id_ofs) \
((((buf_id) & 0x1F) << 7) \
| (((mpf) & 1) << 6) \
| (((rfu) & 1) << 5) \
| (((reg_id_ofs) & 0x1F) << 0))
#define MAX_RETRY 3
static int __cw1200_reg_read(struct cw1200_common *priv, u16 addr,
void *buf, size_t buf_len, int buf_id)
{
u16 addr_sdio;
u32 sdio_reg_addr_17bit;
/* Check if buffer is aligned to 4 byte boundary */
if (WARN_ON(((unsigned long)buf & 3) && (buf_len > 4))) {
pr_err("buffer is not aligned.\n");
return -EINVAL;
}
/* Convert to SDIO Register Address */
addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
return priv->hwbus_ops->hwbus_memcpy_fromio(priv->hwbus_priv,
sdio_reg_addr_17bit,
buf, buf_len);
}
static int __cw1200_reg_write(struct cw1200_common *priv, u16 addr,
const void *buf, size_t buf_len, int buf_id)
{
u16 addr_sdio;
u32 sdio_reg_addr_17bit;
/* Convert to SDIO Register Address */
addr_sdio = SPI_REG_ADDR_TO_SDIO(addr);
sdio_reg_addr_17bit = SDIO_ADDR17BIT(buf_id, 0, 0, addr_sdio);
return priv->hwbus_ops->hwbus_memcpy_toio(priv->hwbus_priv,
sdio_reg_addr_17bit,
buf, buf_len);
}
static inline int __cw1200_reg_read_32(struct cw1200_common *priv,
u16 addr, u32 *val)
{
__le32 tmp;
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
*val = le32_to_cpu(tmp);
return i;
}
static inline int __cw1200_reg_write_32(struct cw1200_common *priv,
u16 addr, u32 val)
{
__le32 tmp = cpu_to_le32(val);
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
}
static inline int __cw1200_reg_read_16(struct cw1200_common *priv,
u16 addr, u16 *val)
{
__le16 tmp;
int i = __cw1200_reg_read(priv, addr, &tmp, sizeof(tmp), 0);
*val = le16_to_cpu(tmp);
return i;
}
static inline int __cw1200_reg_write_16(struct cw1200_common *priv,
u16 addr, u16 val)
{
__le16 tmp = cpu_to_le16(val);
return __cw1200_reg_write(priv, addr, &tmp, sizeof(tmp), 0);
}
int cw1200_reg_read(struct cw1200_common *priv, u16 addr, void *buf,
size_t buf_len)
{
int ret;
priv->hwbus_ops->lock(priv->hwbus_priv);
ret = __cw1200_reg_read(priv, addr, buf, buf_len, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_reg_write(struct cw1200_common *priv, u16 addr, const void *buf,
size_t buf_len)
{
int ret;
priv->hwbus_ops->lock(priv->hwbus_priv);
ret = __cw1200_reg_write(priv, addr, buf, buf_len, 0);
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_data_read(struct cw1200_common *priv, void *buf, size_t buf_len)
{
int ret, retry = 1;
int buf_id_rx = priv->buf_id_rx;
priv->hwbus_ops->lock(priv->hwbus_priv);
while (retry <= MAX_RETRY) {
ret = __cw1200_reg_read(priv,
ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
buf_len, buf_id_rx + 1);
if (!ret) {
buf_id_rx = (buf_id_rx + 1) & 3;
priv->buf_id_rx = buf_id_rx;
break;
} else {
retry++;
mdelay(1);
pr_err("error :[%d]\n", ret);
}
}
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_data_write(struct cw1200_common *priv, const void *buf,
size_t buf_len)
{
int ret, retry = 1;
int buf_id_tx = priv->buf_id_tx;
priv->hwbus_ops->lock(priv->hwbus_priv);
while (retry <= MAX_RETRY) {
ret = __cw1200_reg_write(priv,
ST90TDS_IN_OUT_QUEUE_REG_ID, buf,
buf_len, buf_id_tx);
if (!ret) {
buf_id_tx = (buf_id_tx + 1) & 31;
priv->buf_id_tx = buf_id_tx;
break;
} else {
retry++;
mdelay(1);
pr_err("error :[%d]\n", ret);
}
}
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_indirect_read(struct cw1200_common *priv, u32 addr, void *buf,
size_t buf_len, u32 prefetch, u16 port_addr)
{
u32 val32 = 0;
int i, ret;
if ((buf_len / 2) >= 0x1000) {
pr_err("Can't read more than 0xfff words.\n");
return -EINVAL;
}
priv->hwbus_ops->lock(priv->hwbus_priv);
/* Write address */
ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
if (ret < 0) {
pr_err("Can't write address register.\n");
goto out;
}
/* Read CONFIG Register Value - We will read 32 bits */
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't read config register.\n");
goto out;
}
/* Set PREFETCH bit */
ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID,
val32 | prefetch);
if (ret < 0) {
pr_err("Can't write prefetch bit.\n");
goto out;
}
/* Check for PRE-FETCH bit to be cleared */
for (i = 0; i < 20; i++) {
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't check prefetch bit.\n");
goto out;
}
if (!(val32 & prefetch))
break;
mdelay(i);
}
if (val32 & prefetch) {
pr_err("Prefetch bit is not cleared.\n");
goto out;
}
/* Read data port */
ret = __cw1200_reg_read(priv, port_addr, buf, buf_len, 0);
if (ret < 0) {
pr_err("Can't read data port.\n");
goto out;
}
out:
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int cw1200_apb_write(struct cw1200_common *priv, u32 addr, const void *buf,
size_t buf_len)
{
int ret;
if ((buf_len / 2) >= 0x1000) {
pr_err("Can't write more than 0xfff words.\n");
return -EINVAL;
}
priv->hwbus_ops->lock(priv->hwbus_priv);
/* Write address */
ret = __cw1200_reg_write_32(priv, ST90TDS_SRAM_BASE_ADDR_REG_ID, addr);
if (ret < 0) {
pr_err("Can't write address register.\n");
goto out;
}
/* Write data port */
ret = __cw1200_reg_write(priv, ST90TDS_SRAM_DPORT_REG_ID,
buf, buf_len, 0);
if (ret < 0) {
pr_err("Can't write data port.\n");
goto out;
}
out:
priv->hwbus_ops->unlock(priv->hwbus_priv);
return ret;
}
int __cw1200_irq_enable(struct cw1200_common *priv, int enable)
{
u32 val32;
u16 val16;
int ret;
if (HIF_8601_SILICON == priv->hw_type) {
ret = __cw1200_reg_read_32(priv, ST90TDS_CONFIG_REG_ID, &val32);
if (ret < 0) {
pr_err("Can't read config register.\n");
return ret;
}
if (enable)
val32 |= ST90TDS_CONF_IRQ_RDY_ENABLE;
else
val32 &= ~ST90TDS_CONF_IRQ_RDY_ENABLE;
ret = __cw1200_reg_write_32(priv, ST90TDS_CONFIG_REG_ID, val32);
if (ret < 0) {
pr_err("Can't write config register.\n");
return ret;
}
} else {
ret = __cw1200_reg_read_16(priv, ST90TDS_CONFIG_REG_ID, &val16);
if (ret < 0) {
pr_err("Can't read control register.\n");
return ret;
}
if (enable)
val16 |= ST90TDS_CONT_IRQ_RDY_ENABLE;
else
val16 &= ~ST90TDS_CONT_IRQ_RDY_ENABLE;
ret = __cw1200_reg_write_16(priv, ST90TDS_CONFIG_REG_ID, val16);
if (ret < 0) {
pr_err("Can't write control register.\n");
return ret;
}
}
return 0;
}
| gpl-2.0 |
bangprovn/android_stock | arch/arm/mach-omap2/sdram-nokia.c | 4903 | 6412 | /*
* SDRC register values for Nokia boards
*
* Copyright (C) 2008, 2010-2011 Nokia Corporation
*
* Lauri Leukkunen <lauri.leukkunen@nokia.com>
*
* Original code by Juha Yrjola <juha.yrjola@solidboot.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/kernel.h>
#include <linux/clk.h>
#include <linux/err.h>
#include <linux/io.h>
#include "common.h"
#include <plat/clock.h>
#include <plat/sdrc.h>
#include "sdram-nokia.h"
/* In picoseconds, except for tREF (ns), tXP, tCKE, tWTR (clks) */
struct sdram_timings {
u32 casl;
u32 tDAL;
u32 tDPL;
u32 tRRD;
u32 tRCD;
u32 tRP;
u32 tRAS;
u32 tRC;
u32 tRFC;
u32 tXSR;
u32 tREF; /* in ns */
u32 tXP;
u32 tCKE;
u32 tWTR;
};
static const struct sdram_timings nokia_97dot6mhz_timings[] = {
{
.casl = 3,
.tDAL = 30725,
.tDPL = 15362,
.tRRD = 10241,
.tRCD = 20483,
.tRP = 15362,
.tRAS = 40967,
.tRC = 56330,
.tRFC = 138266,
.tXSR = 204839,
.tREF = 7798,
.tXP = 2,
.tCKE = 4,
.tWTR = 2,
},
};
static const struct sdram_timings nokia_166mhz_timings[] = {
{
.casl = 3,
.tDAL = 33000,
.tDPL = 15000,
.tRRD = 12000,
.tRCD = 22500,
.tRP = 18000,
.tRAS = 42000,
.tRC = 66000,
.tRFC = 138000,
.tXSR = 200000,
.tREF = 7800,
.tXP = 2,
.tCKE = 2,
.tWTR = 2
},
};
static const struct sdram_timings nokia_195dot2mhz_timings[] = {
{
.casl = 3,
.tDAL = 30725,
.tDPL = 15362,
.tRRD = 10241,
.tRCD = 20483,
.tRP = 15362,
.tRAS = 40967,
.tRC = 56330,
.tRFC = 138266,
.tXSR = 204839,
.tREF = 7752,
.tXP = 2,
.tCKE = 4,
.tWTR = 2,
},
};
static const struct sdram_timings nokia_200mhz_timings[] = {
{
.casl = 3,
.tDAL = 30000,
.tDPL = 15000,
.tRRD = 10000,
.tRCD = 20000,
.tRP = 15000,
.tRAS = 40000,
.tRC = 55000,
.tRFC = 140000,
.tXSR = 200000,
.tREF = 7800,
.tXP = 2,
.tCKE = 4,
.tWTR = 2
},
};
static const struct {
long rate;
struct sdram_timings const *data;
} nokia_timings[] = {
{ 83000000, nokia_166mhz_timings },
{ 97600000, nokia_97dot6mhz_timings },
{ 100000000, nokia_200mhz_timings },
{ 166000000, nokia_166mhz_timings },
{ 195200000, nokia_195dot2mhz_timings },
{ 200000000, nokia_200mhz_timings },
};
static struct omap_sdrc_params nokia_sdrc_params[ARRAY_SIZE(nokia_timings) + 1];
static unsigned long sdrc_get_fclk_period(long rate)
{
/* In picoseconds */
return 1000000000 / rate;
}
static unsigned int sdrc_ps_to_ticks(unsigned int time_ps, long rate)
{
unsigned long tick_ps;
/* Calculate in picosecs to yield more exact results */
tick_ps = sdrc_get_fclk_period(rate);
return (time_ps + tick_ps - 1) / tick_ps;
}
#undef DEBUG
#ifdef DEBUG
static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
int ticks, long rate, const char *name)
#else
static int set_sdrc_timing_regval(u32 *regval, int st_bit, int end_bit,
int ticks)
#endif
{
int mask, nr_bits;
nr_bits = end_bit - st_bit + 1;
if (ticks >= 1 << nr_bits)
return -1;
mask = (1 << nr_bits) - 1;
*regval &= ~(mask << st_bit);
*regval |= ticks << st_bit;
#ifdef DEBUG
printk(KERN_INFO "SDRC %s: %i ticks %i ns\n", name, ticks,
(unsigned int)sdrc_get_fclk_period(rate) * ticks /
1000);
#endif
return 0;
}
#ifdef DEBUG
#define SDRC_SET_ONE(reg, st, end, field, rate) \
if (set_sdrc_timing_regval((reg), (st), (end), \
memory_timings->field, (rate), #field) < 0) \
err = -1;
#else
#define SDRC_SET_ONE(reg, st, end, field, rate) \
if (set_sdrc_timing_regval((reg), (st), (end), \
memory_timings->field) < 0) \
err = -1;
#endif
#ifdef DEBUG
static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
int time, long rate, const char *name)
#else
static int set_sdrc_timing_regval_ps(u32 *regval, int st_bit, int end_bit,
int time, long rate)
#endif
{
int ticks, ret;
ret = 0;
if (time == 0)
ticks = 0;
else
ticks = sdrc_ps_to_ticks(time, rate);
#ifdef DEBUG
ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks,
rate, name);
#else
ret = set_sdrc_timing_regval(regval, st_bit, end_bit, ticks);
#endif
return ret;
}
#ifdef DEBUG
#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
if (set_sdrc_timing_regval_ps((reg), (st), (end), \
memory_timings->field, \
(rate), #field) < 0) \
err = -1;
#else
#define SDRC_SET_ONE_PS(reg, st, end, field, rate) \
if (set_sdrc_timing_regval_ps((reg), (st), (end), \
memory_timings->field, (rate)) < 0) \
err = -1;
#endif
static int sdrc_timings(int id, long rate,
const struct sdram_timings *memory_timings)
{
u32 ticks_per_ms;
u32 rfr, l;
u32 actim_ctrla = 0, actim_ctrlb = 0;
u32 rfr_ctrl;
int err = 0;
long l3_rate = rate / 1000;
SDRC_SET_ONE_PS(&actim_ctrla, 0, 4, tDAL, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 6, 8, tDPL, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 9, 11, tRRD, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 12, 14, tRCD, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 15, 17, tRP, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 18, 21, tRAS, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 22, 26, tRC, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrla, 27, 31, tRFC, l3_rate);
SDRC_SET_ONE_PS(&actim_ctrlb, 0, 7, tXSR, l3_rate);
SDRC_SET_ONE(&actim_ctrlb, 8, 10, tXP, l3_rate);
SDRC_SET_ONE(&actim_ctrlb, 12, 14, tCKE, l3_rate);
SDRC_SET_ONE(&actim_ctrlb, 16, 17, tWTR, l3_rate);
ticks_per_ms = l3_rate;
rfr = memory_timings[0].tREF * ticks_per_ms / 1000000;
if (rfr > 65535 + 50)
rfr = 65535;
else
rfr -= 50;
#ifdef DEBUG
printk(KERN_INFO "SDRC tREF: %i ticks\n", rfr);
#endif
l = rfr << 8;
rfr_ctrl = l | 0x1; /* autorefresh, reload counter with 1xARCV */
nokia_sdrc_params[id].rate = rate;
nokia_sdrc_params[id].actim_ctrla = actim_ctrla;
nokia_sdrc_params[id].actim_ctrlb = actim_ctrlb;
nokia_sdrc_params[id].rfr_ctrl = rfr_ctrl;
nokia_sdrc_params[id].mr = 0x32;
nokia_sdrc_params[id + 1].rate = 0;
return err;
}
struct omap_sdrc_params *nokia_get_sdram_timings(void)
{
int err = 0;
int i;
for (i = 0; i < ARRAY_SIZE(nokia_timings); i++) {
err |= sdrc_timings(i, nokia_timings[i].rate,
nokia_timings[i].data);
if (err)
pr_err("%s: error with rate %ld: %d\n", __func__,
nokia_timings[i].rate, err);
}
return err ? NULL : nokia_sdrc_params;
}
| gpl-2.0 |
hehopmajieh/linux-3.4-h3 | drivers/staging/rtl8192u/ieee80211/scatterwalk.c | 7719 | 3016 | /*
* Cryptographic API.
*
* Cipher operations.
*
* Copyright (c) 2002 James Morris <jmorris@intercode.com.au>
* 2002 Adam J. Richter <adam@yggdrasil.com>
* 2004 Jean-Luc Cooke <jlcooke@certainkey.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/highmem.h>
#include <asm/scatterlist.h>
#include "internal.h"
#include "scatterwalk.h"
void *scatterwalk_whichbuf(struct scatter_walk *walk, unsigned int nbytes, void *scratch)
{
if (nbytes <= walk->len_this_page &&
(((unsigned long)walk->data) & (PAGE_CACHE_SIZE - 1)) + nbytes <=
PAGE_CACHE_SIZE)
return walk->data;
else
return scratch;
}
static void memcpy_dir(void *buf, void *sgdata, size_t nbytes, int out)
{
if (out)
memcpy(sgdata, buf, nbytes);
else
memcpy(buf, sgdata, nbytes);
}
void scatterwalk_start(struct scatter_walk *walk, struct scatterlist *sg)
{
unsigned int rest_of_page;
walk->sg = sg;
walk->page = sg->page;
walk->len_this_segment = sg->length;
rest_of_page = PAGE_CACHE_SIZE - (sg->offset & (PAGE_CACHE_SIZE - 1));
walk->len_this_page = min(sg->length, rest_of_page);
walk->offset = sg->offset;
}
void scatterwalk_map(struct scatter_walk *walk)
{
walk->data = kmap_atomic(walk->page) + walk->offset;
}
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
unsigned int more)
{
/* walk->data may be pointing the first byte of the next page;
however, we know we transferred at least one byte. So,
walk->data - 1 will be a virtual address in the mapped page. */
if (out)
flush_dcache_page(walk->page);
if (more) {
walk->len_this_segment -= walk->len_this_page;
if (walk->len_this_segment) {
walk->page++;
walk->len_this_page = min(walk->len_this_segment,
(unsigned)PAGE_CACHE_SIZE);
walk->offset = 0;
}
else
scatterwalk_start(walk, sg_next(walk->sg));
}
}
void scatterwalk_done(struct scatter_walk *walk, int out, int more)
{
crypto_kunmap(walk->data, out);
if (walk->len_this_page == 0 || !more)
scatterwalk_pagedone(walk, out, more);
}
/*
* Do not call this unless the total length of all of the fragments
* has been verified as multiple of the block size.
*/
int scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes)
{
if (buf != walk->data) {
while (nbytes > walk->len_this_page) {
memcpy_dir(buf, walk->data, walk->len_this_page, out);
buf += walk->len_this_page;
nbytes -= walk->len_this_page;
kunmap_atomic(walk->data);
scatterwalk_pagedone(walk, out, 1);
scatterwalk_map(walk);
}
memcpy_dir(buf, walk->data, nbytes, out);
}
walk->offset += nbytes;
walk->len_this_page -= nbytes;
walk->len_this_segment -= nbytes;
return 0;
}
| gpl-2.0 |
pengdonglin137/linux-4.4_tiny4412 | drivers/isdn/hisax/st5481_b.c | 9511 | 9957 | /*
* Driver for ST5481 USB ISDN modem
*
* Author Frode Isaksen
* Copyright 2001 by Frode Isaksen <fisaksen@bewan.com>
* 2001 by Kai Germaschewski <kai.germaschewski@gmx.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
*/
#include <linux/init.h>
#include <linux/gfp.h>
#include <linux/usb.h>
#include <linux/netdevice.h>
#include <linux/bitrev.h>
#include "st5481.h"
static inline void B_L1L2(struct st5481_bcs *bcs, int pr, void *arg)
{
struct hisax_if *ifc = (struct hisax_if *) &bcs->b_if;
ifc->l1l2(ifc, pr, arg);
}
/*
* Encode and transmit next frame.
*/
static void usb_b_out(struct st5481_bcs *bcs, int buf_nr)
{
struct st5481_b_out *b_out = &bcs->b_out;
struct st5481_adapter *adapter = bcs->adapter;
struct urb *urb;
unsigned int packet_size, offset;
int len, buf_size, bytes_sent;
int i;
struct sk_buff *skb;
if (test_and_set_bit(buf_nr, &b_out->busy)) {
DBG(4, "ep %d urb %d busy", (bcs->channel + 1) * 2, buf_nr);
return;
}
urb = b_out->urb[buf_nr];
// Adjust isoc buffer size according to flow state
if (b_out->flow_event & (OUT_DOWN | OUT_UNDERRUN)) {
buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST;
packet_size = SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST;
DBG(4, "B%d,adjust flow,add %d bytes", bcs->channel + 1, B_FLOW_ADJUST);
} else if (b_out->flow_event & OUT_UP) {
buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST;
packet_size = SIZE_ISO_PACKETS_B_OUT - B_FLOW_ADJUST;
DBG(4, "B%d,adjust flow,remove %d bytes", bcs->channel + 1, B_FLOW_ADJUST);
} else {
buf_size = NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT;
packet_size = 8;
}
b_out->flow_event = 0;
len = 0;
while (len < buf_size) {
if ((skb = b_out->tx_skb)) {
DBG_SKB(0x100, skb);
DBG(4, "B%d,len=%d", bcs->channel + 1, skb->len);
if (bcs->mode == L1_MODE_TRANS) {
bytes_sent = buf_size - len;
if (skb->len < bytes_sent)
bytes_sent = skb->len;
{ /* swap tx bytes to get hearable audio data */
register unsigned char *src = skb->data;
register unsigned char *dest = urb->transfer_buffer + len;
register unsigned int count;
for (count = 0; count < bytes_sent; count++)
*dest++ = bitrev8(*src++);
}
len += bytes_sent;
} else {
len += isdnhdlc_encode(&b_out->hdlc_state,
skb->data, skb->len, &bytes_sent,
urb->transfer_buffer + len, buf_size-len);
}
skb_pull(skb, bytes_sent);
if (!skb->len) {
// Frame sent
b_out->tx_skb = NULL;
B_L1L2(bcs, PH_DATA | CONFIRM, (void *)(unsigned long) skb->truesize);
dev_kfree_skb_any(skb);
/* if (!(bcs->tx_skb = skb_dequeue(&bcs->sq))) { */
/* st5481B_sched_event(bcs, B_XMTBUFREADY); */
/* } */
}
} else {
if (bcs->mode == L1_MODE_TRANS) {
memset(urb->transfer_buffer + len, 0xff, buf_size-len);
len = buf_size;
} else {
// Send flags
len += isdnhdlc_encode(&b_out->hdlc_state,
NULL, 0, &bytes_sent,
urb->transfer_buffer + len, buf_size-len);
}
}
}
// Prepare the URB
for (i = 0, offset = 0; offset < len; i++) {
urb->iso_frame_desc[i].offset = offset;
urb->iso_frame_desc[i].length = packet_size;
offset += packet_size;
packet_size = SIZE_ISO_PACKETS_B_OUT;
}
urb->transfer_buffer_length = len;
urb->number_of_packets = i;
urb->dev = adapter->usb_dev;
DBG_ISO_PACKET(0x200, urb);
SUBMIT_URB(urb, GFP_NOIO);
}
/*
* Start transferring (flags or data) on the B channel, since
* FIFO counters has been set to a non-zero value.
*/
static void st5481B_start_xfer(void *context)
{
struct st5481_bcs *bcs = context;
DBG(4, "B%d", bcs->channel + 1);
// Start transmitting (flags or data) on B channel
usb_b_out(bcs, 0);
usb_b_out(bcs, 1);
}
/*
* If the adapter has only 2 LEDs, the green
* LED will blink with a rate depending
* on the number of channels opened.
*/
static void led_blink(struct st5481_adapter *adapter)
{
u_char leds = adapter->leds;
// 50 frames/sec for each channel
if (++adapter->led_counter % 50) {
return;
}
if (adapter->led_counter % 100) {
leds |= GREEN_LED;
} else {
leds &= ~GREEN_LED;
}
st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, leds, NULL, NULL);
}
static void usb_b_out_complete(struct urb *urb)
{
struct st5481_bcs *bcs = urb->context;
struct st5481_b_out *b_out = &bcs->b_out;
struct st5481_adapter *adapter = bcs->adapter;
int buf_nr;
buf_nr = get_buf_nr(b_out->urb, urb);
test_and_clear_bit(buf_nr, &b_out->busy);
if (unlikely(urb->status < 0)) {
switch (urb->status) {
case -ENOENT:
case -ESHUTDOWN:
case -ECONNRESET:
DBG(4, "urb killed status %d", urb->status);
return; // Give up
default:
WARNING("urb status %d", urb->status);
if (b_out->busy == 0) {
st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2 | USB_DIR_OUT, NULL, NULL);
}
break;
}
}
usb_b_out(bcs, buf_nr);
if (adapter->number_of_leds == 2)
led_blink(adapter);
}
/*
* Start or stop the transfer on the B channel.
*/
static void st5481B_mode(struct st5481_bcs *bcs, int mode)
{
struct st5481_b_out *b_out = &bcs->b_out;
struct st5481_adapter *adapter = bcs->adapter;
DBG(4, "B%d,mode=%d", bcs->channel + 1, mode);
if (bcs->mode == mode)
return;
bcs->mode = mode;
// Cancel all USB transfers on this B channel
usb_unlink_urb(b_out->urb[0]);
usb_unlink_urb(b_out->urb[1]);
b_out->busy = 0;
st5481_in_mode(&bcs->b_in, mode);
if (bcs->mode != L1_MODE_NULL) {
// Open the B channel
if (bcs->mode != L1_MODE_TRANS) {
u32 features = HDLC_BITREVERSE;
if (bcs->mode == L1_MODE_HDLC_56K)
features |= HDLC_56KBIT;
isdnhdlc_out_init(&b_out->hdlc_state, features);
}
st5481_usb_pipe_reset(adapter, (bcs->channel + 1) * 2, NULL, NULL);
// Enable B channel interrupts
st5481_usb_device_ctrl_msg(adapter, FFMSK_B1 + (bcs->channel * 2),
OUT_UP + OUT_DOWN + OUT_UNDERRUN, NULL, NULL);
// Enable B channel FIFOs
st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 32, st5481B_start_xfer, bcs);
if (adapter->number_of_leds == 4) {
if (bcs->channel == 0) {
adapter->leds |= B1_LED;
} else {
adapter->leds |= B2_LED;
}
}
} else {
// Disble B channel interrupts
st5481_usb_device_ctrl_msg(adapter, FFMSK_B1+(bcs->channel * 2), 0, NULL, NULL);
// Disable B channel FIFOs
st5481_usb_device_ctrl_msg(adapter, OUT_B1_COUNTER+(bcs->channel * 2), 0, NULL, NULL);
if (adapter->number_of_leds == 4) {
if (bcs->channel == 0) {
adapter->leds &= ~B1_LED;
} else {
adapter->leds &= ~B2_LED;
}
} else {
st5481_usb_device_ctrl_msg(adapter, GPIO_OUT, adapter->leds, NULL, NULL);
}
if (b_out->tx_skb) {
dev_kfree_skb_any(b_out->tx_skb);
b_out->tx_skb = NULL;
}
}
}
static int st5481_setup_b_out(struct st5481_bcs *bcs)
{
struct usb_device *dev = bcs->adapter->usb_dev;
struct usb_interface *intf;
struct usb_host_interface *altsetting = NULL;
struct usb_host_endpoint *endpoint;
struct st5481_b_out *b_out = &bcs->b_out;
DBG(4, "");
intf = usb_ifnum_to_if(dev, 0);
if (intf)
altsetting = usb_altnum_to_altsetting(intf, 3);
if (!altsetting)
return -ENXIO;
// Allocate URBs and buffers for the B channel out
endpoint = &altsetting->endpoint[EP_B1_OUT - 1 + bcs->channel * 2];
DBG(4, "endpoint address=%02x,packet size=%d",
endpoint->desc.bEndpointAddress, le16_to_cpu(endpoint->desc.wMaxPacketSize));
// Allocate memory for 8000bytes/sec + extra bytes if underrun
return st5481_setup_isocpipes(b_out->urb, dev,
usb_sndisocpipe(dev, endpoint->desc.bEndpointAddress),
NUM_ISO_PACKETS_B, SIZE_ISO_PACKETS_B_OUT,
NUM_ISO_PACKETS_B * SIZE_ISO_PACKETS_B_OUT + B_FLOW_ADJUST,
usb_b_out_complete, bcs);
}
static void st5481_release_b_out(struct st5481_bcs *bcs)
{
struct st5481_b_out *b_out = &bcs->b_out;
DBG(4, "");
st5481_release_isocpipes(b_out->urb);
}
int st5481_setup_b(struct st5481_bcs *bcs)
{
int retval;
DBG(4, "");
retval = st5481_setup_b_out(bcs);
if (retval)
goto err;
bcs->b_in.bufsize = HSCX_BUFMAX;
bcs->b_in.num_packets = NUM_ISO_PACKETS_B;
bcs->b_in.packet_size = SIZE_ISO_PACKETS_B_IN;
bcs->b_in.ep = (bcs->channel ? EP_B2_IN : EP_B1_IN) | USB_DIR_IN;
bcs->b_in.counter = bcs->channel ? IN_B2_COUNTER : IN_B1_COUNTER;
bcs->b_in.adapter = bcs->adapter;
bcs->b_in.hisax_if = &bcs->b_if.ifc;
retval = st5481_setup_in(&bcs->b_in);
if (retval)
goto err_b_out;
return 0;
err_b_out:
st5481_release_b_out(bcs);
err:
return retval;
}
/*
* Release buffers and URBs for the B channels
*/
void st5481_release_b(struct st5481_bcs *bcs)
{
DBG(4, "");
st5481_release_in(&bcs->b_in);
st5481_release_b_out(bcs);
}
/*
* st5481_b_l2l1 is the entry point for upper layer routines that want to
* transmit on the B channel. PH_DATA | REQUEST is a normal packet that
* we either start transmitting (if idle) or queue (if busy).
* PH_PULL | REQUEST can be called to request a callback message
* (PH_PULL | CONFIRM)
* once the link is idle. After a "pull" callback, the upper layer
* routines can use PH_PULL | INDICATION to send data.
*/
void st5481_b_l2l1(struct hisax_if *ifc, int pr, void *arg)
{
struct st5481_bcs *bcs = ifc->priv;
struct sk_buff *skb = arg;
long mode;
DBG(4, "");
switch (pr) {
case PH_DATA | REQUEST:
BUG_ON(bcs->b_out.tx_skb);
bcs->b_out.tx_skb = skb;
break;
case PH_ACTIVATE | REQUEST:
mode = (long) arg;
DBG(4, "B%d,PH_ACTIVATE_REQUEST %ld", bcs->channel + 1, mode);
st5481B_mode(bcs, mode);
B_L1L2(bcs, PH_ACTIVATE | INDICATION, NULL);
break;
case PH_DEACTIVATE | REQUEST:
DBG(4, "B%d,PH_DEACTIVATE_REQUEST", bcs->channel + 1);
st5481B_mode(bcs, L1_MODE_NULL);
B_L1L2(bcs, PH_DEACTIVATE | INDICATION, NULL);
break;
default:
WARNING("pr %#x\n", pr);
}
}
| gpl-2.0 |
franciscofranco/mako | drivers/media/rc/keymaps/rc-snapstream-firefly.c | 9511 | 2662 | /*
* SnapStream Firefly X10 RF remote keytable
*
* Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table snapstream_firefly[] = {
{ 0x2c, KEY_ZOOM }, /* Maximize */
{ 0x02, KEY_CLOSE },
{ 0x0d, KEY_1 },
{ 0x0e, KEY_2 },
{ 0x0f, KEY_3 },
{ 0x10, KEY_4 },
{ 0x11, KEY_5 },
{ 0x12, KEY_6 },
{ 0x13, KEY_7 },
{ 0x14, KEY_8 },
{ 0x15, KEY_9 },
{ 0x17, KEY_0 },
{ 0x16, KEY_BACK },
{ 0x18, KEY_KPENTER }, /* ent */
{ 0x09, KEY_VOLUMEUP },
{ 0x08, KEY_VOLUMEDOWN },
{ 0x0a, KEY_MUTE },
{ 0x0b, KEY_CHANNELUP },
{ 0x0c, KEY_CHANNELDOWN },
{ 0x00, KEY_VENDOR }, /* firefly */
{ 0x2e, KEY_INFO },
{ 0x2f, KEY_OPTION },
{ 0x1d, KEY_LEFT },
{ 0x1f, KEY_RIGHT },
{ 0x22, KEY_DOWN },
{ 0x1a, KEY_UP },
{ 0x1e, KEY_OK },
{ 0x1c, KEY_MENU },
{ 0x20, KEY_EXIT },
{ 0x27, KEY_RECORD },
{ 0x25, KEY_PLAY },
{ 0x28, KEY_STOP },
{ 0x24, KEY_REWIND },
{ 0x26, KEY_FORWARD },
{ 0x29, KEY_PAUSE },
{ 0x2b, KEY_PREVIOUS },
{ 0x2a, KEY_NEXT },
{ 0x06, KEY_AUDIO }, /* Music */
{ 0x05, KEY_IMAGES }, /* Photos */
{ 0x04, KEY_DVD },
{ 0x03, KEY_TV },
{ 0x07, KEY_VIDEO },
{ 0x01, KEY_HELP },
{ 0x2d, KEY_MODE }, /* Mouse */
{ 0x19, KEY_A },
{ 0x1b, KEY_B },
{ 0x21, KEY_C },
{ 0x23, KEY_D },
};
static struct rc_map_list snapstream_firefly_map = {
.map = {
.scan = snapstream_firefly,
.size = ARRAY_SIZE(snapstream_firefly),
.rc_type = RC_TYPE_OTHER,
.name = RC_MAP_SNAPSTREAM_FIREFLY,
}
};
static int __init init_rc_map_snapstream_firefly(void)
{
return rc_map_register(&snapstream_firefly_map);
}
static void __exit exit_rc_map_snapstream_firefly(void)
{
rc_map_unregister(&snapstream_firefly_map);
}
module_init(init_rc_map_snapstream_firefly)
module_exit(exit_rc_map_snapstream_firefly)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
| gpl-2.0 |
DSMKexec/kexec-kernel-a850s | drivers/media/rc/keymaps/rc-medion-x10.c | 9511 | 3380 | /*
* Medion X10 RF remote keytable
*
* Copyright (C) 2011 Anssi Hannula <anssi.hannula@?ki.fi>
*
* This file is based on a keytable provided by
* Jan Losinski <losinski@wh2.tu-dresden.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
#include <linux/module.h>
#include <media/rc-map.h>
static struct rc_map_table medion_x10[] = {
{ 0x2c, KEY_TV }, /* TV */
{ 0x2d, KEY_VCR }, /* VCR */
{ 0x04, KEY_DVD }, /* DVD */
{ 0x06, KEY_AUDIO }, /* MUSIC */
{ 0x2e, KEY_RADIO }, /* RADIO */
{ 0x05, KEY_DIRECTORY }, /* PHOTO */
{ 0x2f, KEY_INFO }, /* TV-PREVIEW */
{ 0x30, KEY_LIST }, /* CHANNEL-LST */
{ 0x1b, KEY_SETUP }, /* SETUP */
{ 0x31, KEY_VIDEO }, /* VIDEO DESKTOP */
{ 0x08, KEY_VOLUMEDOWN }, /* VOL - */
{ 0x09, KEY_VOLUMEUP }, /* VOL + */
{ 0x0b, KEY_CHANNELUP }, /* CHAN + */
{ 0x0c, KEY_CHANNELDOWN }, /* CHAN - */
{ 0x00, KEY_MUTE }, /* MUTE */
{ 0x32, KEY_RED }, /* red */
{ 0x33, KEY_GREEN }, /* green */
{ 0x34, KEY_YELLOW }, /* yellow */
{ 0x35, KEY_BLUE }, /* blue */
{ 0x16, KEY_TEXT }, /* TXT */
{ 0x0d, KEY_1 },
{ 0x0e, KEY_2 },
{ 0x0f, KEY_3 },
{ 0x10, KEY_4 },
{ 0x11, KEY_5 },
{ 0x12, KEY_6 },
{ 0x13, KEY_7 },
{ 0x14, KEY_8 },
{ 0x15, KEY_9 },
{ 0x17, KEY_0 },
{ 0x1c, KEY_SEARCH }, /* TV/RAD, CH SRC */
{ 0x20, KEY_DELETE }, /* DELETE */
{ 0x36, KEY_KEYBOARD }, /* RENAME */
{ 0x18, KEY_SCREEN }, /* SNAPSHOT */
{ 0x1a, KEY_UP }, /* up */
{ 0x22, KEY_DOWN }, /* down */
{ 0x1d, KEY_LEFT }, /* left */
{ 0x1f, KEY_RIGHT }, /* right */
{ 0x1e, KEY_OK }, /* OK */
{ 0x37, KEY_SELECT }, /* ACQUIRE IMAGE */
{ 0x38, KEY_EDIT }, /* EDIT IMAGE */
{ 0x24, KEY_REWIND }, /* rewind (<<) */
{ 0x25, KEY_PLAY }, /* play ( >) */
{ 0x26, KEY_FORWARD }, /* forward (>>) */
{ 0x27, KEY_RECORD }, /* record ( o) */
{ 0x28, KEY_STOP }, /* stop ([]) */
{ 0x29, KEY_PAUSE }, /* pause ('') */
{ 0x21, KEY_PREVIOUS }, /* prev */
{ 0x39, KEY_SWITCHVIDEOMODE }, /* F SCR */
{ 0x23, KEY_NEXT }, /* next */
{ 0x19, KEY_MENU }, /* MENU */
{ 0x3a, KEY_LANGUAGE }, /* AUDIO */
{ 0x02, KEY_POWER }, /* POWER */
};
static struct rc_map_list medion_x10_map = {
.map = {
.scan = medion_x10,
.size = ARRAY_SIZE(medion_x10),
.rc_type = RC_TYPE_OTHER,
.name = RC_MAP_MEDION_X10,
}
};
static int __init init_rc_map_medion_x10(void)
{
return rc_map_register(&medion_x10_map);
}
static void __exit exit_rc_map_medion_x10(void)
{
rc_map_unregister(&medion_x10_map);
}
module_init(init_rc_map_medion_x10)
module_exit(exit_rc_map_medion_x10)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Anssi Hannula <anssi.hannula@iki.fi>");
| gpl-2.0 |
jollaman999/jolla-kernel_G_Gen3 | arch/sh/kernel/asm-offsets.c | 11815 | 2670 | /*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#include <linux/stddef.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
#include <asm/thread_info.h>
#include <asm/suspend.h>
int main(void)
{
/* offsets into the thread_info struct */
DEFINE(TI_TASK, offsetof(struct thread_info, task));
DEFINE(TI_EXEC_DOMAIN, offsetof(struct thread_info, exec_domain));
DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
DEFINE(TI_PRE_COUNT, offsetof(struct thread_info, preempt_count));
DEFINE(TI_RESTART_BLOCK,offsetof(struct thread_info, restart_block));
DEFINE(TI_SIZE, sizeof(struct thread_info));
#ifdef CONFIG_HIBERNATION
DEFINE(PBE_ADDRESS, offsetof(struct pbe, address));
DEFINE(PBE_ORIG_ADDRESS, offsetof(struct pbe, orig_address));
DEFINE(PBE_NEXT, offsetof(struct pbe, next));
DEFINE(SWSUSP_ARCH_REGS_SIZE, sizeof(struct swsusp_arch_regs));
#endif
DEFINE(SH_SLEEP_MODE, offsetof(struct sh_sleep_data, mode));
DEFINE(SH_SLEEP_SF_PRE, offsetof(struct sh_sleep_data, sf_pre));
DEFINE(SH_SLEEP_SF_POST, offsetof(struct sh_sleep_data, sf_post));
DEFINE(SH_SLEEP_RESUME, offsetof(struct sh_sleep_data, resume));
DEFINE(SH_SLEEP_VBR, offsetof(struct sh_sleep_data, vbr));
DEFINE(SH_SLEEP_SPC, offsetof(struct sh_sleep_data, spc));
DEFINE(SH_SLEEP_SR, offsetof(struct sh_sleep_data, sr));
DEFINE(SH_SLEEP_SP, offsetof(struct sh_sleep_data, sp));
DEFINE(SH_SLEEP_BASE_ADDR, offsetof(struct sh_sleep_data, addr));
DEFINE(SH_SLEEP_BASE_DATA, offsetof(struct sh_sleep_data, data));
DEFINE(SH_SLEEP_REG_STBCR, offsetof(struct sh_sleep_regs, stbcr));
DEFINE(SH_SLEEP_REG_BAR, offsetof(struct sh_sleep_regs, bar));
DEFINE(SH_SLEEP_REG_PTEH, offsetof(struct sh_sleep_regs, pteh));
DEFINE(SH_SLEEP_REG_PTEL, offsetof(struct sh_sleep_regs, ptel));
DEFINE(SH_SLEEP_REG_TTB, offsetof(struct sh_sleep_regs, ttb));
DEFINE(SH_SLEEP_REG_TEA, offsetof(struct sh_sleep_regs, tea));
DEFINE(SH_SLEEP_REG_MMUCR, offsetof(struct sh_sleep_regs, mmucr));
DEFINE(SH_SLEEP_REG_PTEA, offsetof(struct sh_sleep_regs, ptea));
DEFINE(SH_SLEEP_REG_PASCR, offsetof(struct sh_sleep_regs, pascr));
DEFINE(SH_SLEEP_REG_IRMCR, offsetof(struct sh_sleep_regs, irmcr));
DEFINE(SH_SLEEP_REG_CCR, offsetof(struct sh_sleep_regs, ccr));
DEFINE(SH_SLEEP_REG_RAMCR, offsetof(struct sh_sleep_regs, ramcr));
return 0;
}
| gpl-2.0 |
multipath-tcp/mptcp_3.12.x | drivers/net/ethernet/chelsio/cxgb/mv88x201x.c | 12327 | 8786 | /*****************************************************************************
* *
* File: mv88x201x.c *
* $Revision: 1.12 $ *
* $Date: 2005/04/15 19:27:14 $ *
* Description: *
* Marvell PHY (mv88x201x) functionality. *
* part of the Chelsio 10Gb Ethernet Driver. *
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License, version 2, as *
* published by the Free Software Foundation. *
* *
* You should have received a copy of the GNU General Public License along *
* with this program; if not, write to the Free Software Foundation, Inc., *
* 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. *
* *
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR IMPLIED *
* WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED WARRANTIES OF *
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. *
* *
* http://www.chelsio.com *
* *
* Copyright (c) 2003 - 2005 Chelsio Communications, Inc. *
* All rights reserved. *
* *
* Maintainers: maintainers@chelsio.com *
* *
* Authors: Dimitrios Michailidis <dm@chelsio.com> *
* Tina Yang <tainay@chelsio.com> *
* Felix Marti <felix@chelsio.com> *
* Scott Bardone <sbardone@chelsio.com> *
* Kurt Ottaway <kottaway@chelsio.com> *
* Frank DiMambro <frank@chelsio.com> *
* *
* History: *
* *
****************************************************************************/
#include "cphy.h"
#include "elmer0.h"
/*
* The 88x2010 Rev C. requires some link status registers * to be read
* twice in order to get the right values. Future * revisions will fix
* this problem and then this macro * can disappear.
*/
#define MV88x2010_LINK_STATUS_BUGS 1
static int led_init(struct cphy *cphy)
{
/* Setup the LED registers so we can turn on/off.
* Writing these bits maps control to another
* register. mmd(0x1) addr(0x7)
*/
cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8304, 0xdddd);
return 0;
}
static int led_link(struct cphy *cphy, u32 do_enable)
{
u32 led = 0;
#define LINK_ENABLE_BIT 0x1
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, &led);
if (do_enable & LINK_ENABLE_BIT) {
led |= LINK_ENABLE_BIT;
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
} else {
led &= ~LINK_ENABLE_BIT;
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_CTRL2, led);
}
return 0;
}
/* Port Reset */
static int mv88x201x_reset(struct cphy *cphy, int wait)
{
/* This can be done through registers. It is not required since
* a full chip reset is used.
*/
return 0;
}
static int mv88x201x_interrupt_enable(struct cphy *cphy)
{
/* Enable PHY LASI interrupts. */
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL,
MDIO_PMA_LASI_LSALARM);
/* Enable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_disable(struct cphy *cphy)
{
/* Disable PHY LASI interrupts. */
cphy_mdio_write(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_CTRL, 0x0);
/* Disable Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
u32 elmer;
t1_tpi_read(cphy->adapter, A_ELMER0_INT_ENABLE, &elmer);
elmer &= ~ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_ENABLE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_clear(struct cphy *cphy)
{
u32 elmer;
u32 val;
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Required to read twice before clear takes affect. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
/* Read this register after the others above it else
* the register doesn't clear correctly.
*/
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
#endif
/* Clear link status. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
/* Clear PHY LASI interrupts. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT, &val);
#ifdef MV88x2010_LINK_STATUS_BUGS
/* Do it again. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_RXSTAT, &val);
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_TXSTAT, &val);
#endif
/* Clear Marvell interrupts through Elmer0. */
if (t1_is_asic(cphy->adapter)) {
t1_tpi_read(cphy->adapter, A_ELMER0_INT_CAUSE, &elmer);
elmer |= ELMER0_GP_BIT6;
t1_tpi_write(cphy->adapter, A_ELMER0_INT_CAUSE, elmer);
}
return 0;
}
static int mv88x201x_interrupt_handler(struct cphy *cphy)
{
/* Clear interrupts */
mv88x201x_interrupt_clear(cphy);
/* We have only enabled link change interrupts and so
* cphy_cause must be a link change interrupt.
*/
return cphy_cause_link_change;
}
static int mv88x201x_set_loopback(struct cphy *cphy, int on)
{
return 0;
}
static int mv88x201x_get_link_status(struct cphy *cphy, int *link_ok,
int *speed, int *duplex, int *fc)
{
u32 val = 0;
if (link_ok) {
/* Read link status. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT1, &val);
val &= MDIO_STAT1_LSTATUS;
*link_ok = (val == MDIO_STAT1_LSTATUS);
/* Turn on/off Link LED */
led_link(cphy, *link_ok);
}
if (speed)
*speed = SPEED_10000;
if (duplex)
*duplex = DUPLEX_FULL;
if (fc)
*fc = PAUSE_RX | PAUSE_TX;
return 0;
}
static void mv88x201x_destroy(struct cphy *cphy)
{
kfree(cphy);
}
static struct cphy_ops mv88x201x_ops = {
.destroy = mv88x201x_destroy,
.reset = mv88x201x_reset,
.interrupt_enable = mv88x201x_interrupt_enable,
.interrupt_disable = mv88x201x_interrupt_disable,
.interrupt_clear = mv88x201x_interrupt_clear,
.interrupt_handler = mv88x201x_interrupt_handler,
.get_link_status = mv88x201x_get_link_status,
.set_loopback = mv88x201x_set_loopback,
.mmds = (MDIO_DEVS_PMAPMD | MDIO_DEVS_PCS |
MDIO_DEVS_PHYXS | MDIO_DEVS_WIS),
};
static struct cphy *mv88x201x_phy_create(struct net_device *dev, int phy_addr,
const struct mdio_ops *mdio_ops)
{
u32 val;
struct cphy *cphy = kzalloc(sizeof(*cphy), GFP_KERNEL);
if (!cphy)
return NULL;
cphy_init(cphy, dev, phy_addr, &mv88x201x_ops, mdio_ops);
/* Commands the PHY to enable XFP's clock. */
cphy_mdio_read(cphy, MDIO_MMD_PCS, 0x8300, &val);
cphy_mdio_write(cphy, MDIO_MMD_PCS, 0x8300, val | 1);
/* Clear link status. Required because of a bug in the PHY. */
cphy_mdio_read(cphy, MDIO_MMD_PMAPMD, MDIO_STAT2, &val);
cphy_mdio_read(cphy, MDIO_MMD_PCS, MDIO_STAT2, &val);
/* Allows for Link,Ack LED turn on/off */
led_init(cphy);
return cphy;
}
/* Chip Reset */
static int mv88x201x_phy_reset(adapter_t *adapter)
{
u32 val;
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val &= ~4;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
msleep(100);
t1_tpi_write(adapter, A_ELMER0_GPO, val | 4);
msleep(1000);
/* Now lets enable the Laser. Delay 100us */
t1_tpi_read(adapter, A_ELMER0_GPO, &val);
val |= 0x8000;
t1_tpi_write(adapter, A_ELMER0_GPO, val);
udelay(100);
return 0;
}
const struct gphy t1_mv88x201x_ops = {
.create = mv88x201x_phy_create,
.reset = mv88x201x_phy_reset
};
| gpl-2.0 |
blueskycoco/rt-thread | bsp/stm32/libraries/STM32F7xx_HAL/STM32F7xx_HAL_Driver/Src/stm32f7xx_hal_dac_ex.c | 40 | 14511 | /**
******************************************************************************
* @file stm32f7xx_hal_dac_ex.c
* @author MCD Application Team
* @brief Extended DAC HAL module driver.
* This file provides firmware functions to manage the following
* functionalities of DAC extension peripheral:
* + Extended features functions
*
*
@verbatim
==============================================================================
##### How to use this driver #####
==============================================================================
[..]
(+) When Dual mode is enabled (i.e DAC Channel1 and Channel2 are used simultaneously) :
Use HAL_DACEx_DualGetValue() to get digital data to be converted and use
HAL_DACEx_DualSetValue() to set digital value to converted simultaneously in Channel 1 and Channel 2.
(+) Use HAL_DACEx_TriangleWaveGenerate() to generate Triangle signal.
(+) Use HAL_DACEx_NoiseWaveGenerate() to generate Noise signal.
@endverbatim
******************************************************************************
* @attention
*
* <h2><center>© Copyright (c) 2017 STMicroelectronics.
* All rights reserved.</center></h2>
*
* This software component is licensed by ST under BSD 3-Clause license,
* the "License"; You may not use this file except in compliance with the
* License. You may obtain a copy of the License at:
* opensource.org/licenses/BSD-3-Clause
*
******************************************************************************
*/
/* Includes ------------------------------------------------------------------*/
#include "stm32f7xx_hal.h"
/** @addtogroup STM32F7xx_HAL_Driver
* @{
*/
/** @defgroup DACEx DACEx
* @brief DAC driver modules
* @{
*/
#ifdef HAL_DAC_MODULE_ENABLED
/* Private typedef -----------------------------------------------------------*/
/* Private define ------------------------------------------------------------*/
/* Private macro -------------------------------------------------------------*/
/* Private variables ---------------------------------------------------------*/
/* Private function prototypes -----------------------------------------------*/
/* Private functions ---------------------------------------------------------*/
/* Exported functions --------------------------------------------------------*/
/** @defgroup DACEx_Exported_Functions DAC Exported Functions
* @{
*/
/** @defgroup DACEx_Exported_Functions_Group1 Extended features functions
* @brief Extended features functions
*
@verbatim
==============================================================================
##### Extended features functions #####
==============================================================================
[..] This section provides functions allowing to:
(+) Start conversion.
(+) Stop conversion.
(+) Start conversion and enable DMA transfer.
(+) Stop conversion and disable DMA transfer.
(+) Get result of conversion.
(+) Get result of dual mode conversion.
@endverbatim
* @{
*/
/**
* @brief Returns the last data output value of the selected DAC channel.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @retval The selected DAC channel data output value.
*/
uint32_t HAL_DACEx_DualGetValue(DAC_HandleTypeDef* hdac)
{
uint32_t tmp = 0;
tmp |= hdac->Instance->DOR1;
tmp |= hdac->Instance->DOR2 << 16;
/* Returns the DAC channel data output register value */
return tmp;
}
/**
* @brief Enables or disables the selected DAC channel wave generation.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @param Channel: The selected DAC channel.
* This parameter can be one of the following values:
* @arg DAC_CHANNEL_1: DAC Channel1 selected
* @arg DAC_CHANNEL_2: DAC Channel2 selected
* @param Amplitude: Select max triangle amplitude.
* This parameter can be one of the following values:
* @arg DAC_TRIANGLEAMPLITUDE_1: Select max triangle amplitude of 1
* @arg DAC_TRIANGLEAMPLITUDE_3: Select max triangle amplitude of 3
* @arg DAC_TRIANGLEAMPLITUDE_7: Select max triangle amplitude of 7
* @arg DAC_TRIANGLEAMPLITUDE_15: Select max triangle amplitude of 15
* @arg DAC_TRIANGLEAMPLITUDE_31: Select max triangle amplitude of 31
* @arg DAC_TRIANGLEAMPLITUDE_63: Select max triangle amplitude of 63
* @arg DAC_TRIANGLEAMPLITUDE_127: Select max triangle amplitude of 127
* @arg DAC_TRIANGLEAMPLITUDE_255: Select max triangle amplitude of 255
* @arg DAC_TRIANGLEAMPLITUDE_511: Select max triangle amplitude of 511
* @arg DAC_TRIANGLEAMPLITUDE_1023: Select max triangle amplitude of 1023
* @arg DAC_TRIANGLEAMPLITUDE_2047: Select max triangle amplitude of 2047
* @arg DAC_TRIANGLEAMPLITUDE_4095: Select max triangle amplitude of 4095
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DACEx_TriangleWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude)
{
/* Check the parameters */
assert_param(IS_DAC_CHANNEL(Channel));
assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude));
/* Process locked */
__HAL_LOCK(hdac);
/* Change DAC state */
hdac->State = HAL_DAC_STATE_BUSY;
/* Enable the selected wave generation for the selected DAC channel */
MODIFY_REG(hdac->Instance->CR, (DAC_CR_WAVE1 | DAC_CR_MAMP1) << Channel, (DAC_CR_WAVE1_1 | Amplitude) << Channel);
/* Change DAC state */
hdac->State = HAL_DAC_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdac);
/* Return function status */
return HAL_OK;
}
/**
* @brief Enables or disables the selected DAC channel wave generation.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @param Channel: The selected DAC channel.
* This parameter can be one of the following values:
* @arg DAC_CHANNEL_1: DAC Channel1 selected
* @arg DAC_CHANNEL_2: DAC Channel2 selected
* @param Amplitude: Unmask DAC channel LFSR for noise wave generation.
* This parameter can be one of the following values:
* @arg DAC_LFSRUNMASK_BIT0: Unmask DAC channel LFSR bit0 for noise wave generation
* @arg DAC_LFSRUNMASK_BITS1_0: Unmask DAC channel LFSR bit[1:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS2_0: Unmask DAC channel LFSR bit[2:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS3_0: Unmask DAC channel LFSR bit[3:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS4_0: Unmask DAC channel LFSR bit[4:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS5_0: Unmask DAC channel LFSR bit[5:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS6_0: Unmask DAC channel LFSR bit[6:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS7_0: Unmask DAC channel LFSR bit[7:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS8_0: Unmask DAC channel LFSR bit[8:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS9_0: Unmask DAC channel LFSR bit[9:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS10_0: Unmask DAC channel LFSR bit[10:0] for noise wave generation
* @arg DAC_LFSRUNMASK_BITS11_0: Unmask DAC channel LFSR bit[11:0] for noise wave generation
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DACEx_NoiseWaveGenerate(DAC_HandleTypeDef* hdac, uint32_t Channel, uint32_t Amplitude)
{
/* Check the parameters */
assert_param(IS_DAC_CHANNEL(Channel));
assert_param(IS_DAC_LFSR_UNMASK_TRIANGLE_AMPLITUDE(Amplitude));
/* Process locked */
__HAL_LOCK(hdac);
/* Change DAC state */
hdac->State = HAL_DAC_STATE_BUSY;
/* Enable the selected wave generation for the selected DAC channel */
MODIFY_REG(hdac->Instance->CR, (DAC_CR_WAVE1 | DAC_CR_MAMP1) << Channel, (DAC_CR_WAVE1_0 | Amplitude) << Channel);
/* Change DAC state */
hdac->State = HAL_DAC_STATE_READY;
/* Process unlocked */
__HAL_UNLOCK(hdac);
/* Return function status */
return HAL_OK;
}
/**
* @brief Set the specified data holding register value for dual DAC channel.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @param Alignment: Specifies the data alignment for dual channel DAC.
* This parameter can be one of the following values:
* DAC_ALIGN_8B_R: 8bit right data alignment selected
* DAC_ALIGN_12B_L: 12bit left data alignment selected
* DAC_ALIGN_12B_R: 12bit right data alignment selected
* @param Data1: Data for DAC Channel2 to be loaded in the selected data holding register.
* @param Data2: Data for DAC Channel1 to be loaded in the selected data holding register.
* @note In dual mode, a unique register access is required to write in both
* DAC channels at the same time.
* @retval HAL status
*/
HAL_StatusTypeDef HAL_DACEx_DualSetValue(DAC_HandleTypeDef* hdac, uint32_t Alignment, uint32_t Data1, uint32_t Data2)
{
uint32_t data = 0, tmp = 0;
/* Check the parameters */
assert_param(IS_DAC_ALIGN(Alignment));
assert_param(IS_DAC_DATA(Data1));
assert_param(IS_DAC_DATA(Data2));
/* Calculate and set dual DAC data holding register value */
if (Alignment == DAC_ALIGN_8B_R)
{
data = ((uint32_t)Data2 << 8) | Data1;
}
else
{
data = ((uint32_t)Data2 << 16) | Data1;
}
tmp = (uint32_t)hdac->Instance;
tmp += DAC_DHR12RD_ALIGNMENT(Alignment);
/* Set the dual DAC selected data holding register */
*(__IO uint32_t *)tmp = data;
/* Return function status */
return HAL_OK;
}
/**
* @}
*/
/**
* @brief Conversion complete callback in non blocking mode for Channel2
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @retval None
*/
__weak void HAL_DACEx_ConvCpltCallbackCh2(DAC_HandleTypeDef* hdac)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdac);
/* NOTE : This function Should not be modified, when the callback is needed,
the HAL_DAC_ConvCpltCallbackCh2 could be implemented in the user file
*/
}
/**
* @brief Conversion half DMA transfer callback in non blocking mode for Channel2
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @retval None
*/
__weak void HAL_DACEx_ConvHalfCpltCallbackCh2(DAC_HandleTypeDef* hdac)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdac);
/* NOTE : This function Should not be modified, when the callback is needed,
the HAL_DACEx_ConvHalfCpltCallbackCh2 could be implemented in the user file
*/
}
/**
* @brief Error DAC callback for Channel2.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @retval None
*/
__weak void HAL_DACEx_ErrorCallbackCh2(DAC_HandleTypeDef *hdac)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdac);
/* NOTE : This function Should not be modified, when the callback is needed,
the HAL_DACEx_ErrorCallbackCh2 could be implemented in the user file
*/
}
/**
* @brief DMA underrun DAC callback for channel2.
* @param hdac: pointer to a DAC_HandleTypeDef structure that contains
* the configuration information for the specified DAC.
* @retval None
*/
__weak void HAL_DACEx_DMAUnderrunCallbackCh2(DAC_HandleTypeDef *hdac)
{
/* Prevent unused argument(s) compilation warning */
UNUSED(hdac);
/* NOTE : This function Should not be modified, when the callback is needed,
the HAL_DACEx_DMAUnderrunCallbackCh2 could be implemented in the user file
*/
}
/**
* @brief DMA conversion complete callback.
* @param hdma: pointer to a DMA_HandleTypeDef structure that contains
* the configuration information for the specified DMA module.
* @retval None
*/
void DAC_DMAConvCpltCh2(DMA_HandleTypeDef *hdma)
{
DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent;
#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1)
hdac->ConvCpltCallbackCh2(hdac);
#else
HAL_DACEx_ConvCpltCallbackCh2(hdac);
#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */
hdac->State= HAL_DAC_STATE_READY;
}
/**
* @brief DMA half transfer complete callback.
* @param hdma: pointer to a DMA_HandleTypeDef structure that contains
* the configuration information for the specified DMA module.
* @retval None
*/
void DAC_DMAHalfConvCpltCh2(DMA_HandleTypeDef *hdma)
{
DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent;
/* Conversion complete callback */
#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1)
hdac->ConvHalfCpltCallbackCh2(hdac);
#else
HAL_DACEx_ConvHalfCpltCallbackCh2(hdac);
#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */
}
/**
* @brief DMA error callback
* @param hdma: pointer to a DMA_HandleTypeDef structure that contains
* the configuration information for the specified DMA module.
* @retval None
*/
void DAC_DMAErrorCh2(DMA_HandleTypeDef *hdma)
{
DAC_HandleTypeDef* hdac = ( DAC_HandleTypeDef* )((DMA_HandleTypeDef* )hdma)->Parent;
/* Set DAC error code to DMA error */
hdac->ErrorCode |= HAL_DAC_ERROR_DMA;
#if (USE_HAL_DAC_REGISTER_CALLBACKS == 1)
hdac->ErrorCallbackCh2(hdac);
#else
HAL_DACEx_ErrorCallbackCh2(hdac);
#endif /* USE_HAL_DAC_REGISTER_CALLBACKS */
hdac->State= HAL_DAC_STATE_READY;
}
/**
* @}
*/
#endif /* HAL_DAC_MODULE_ENABLED */
/**
* @}
*/
/**
* @}
*/
/************************ (C) COPYRIGHT STMicroelectronics *****END OF FILE****/
| gpl-2.0 |
mohankr/android_kernel_samsung_msm8x60 | arch/arm/mach-msm/rmt_storage_client.c | 40 | 46208 | /* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/miscdevice.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/types.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/wakelock.h>
#include <linux/rmt_storage_client.h>
#include <linux/debugfs.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <mach/msm_rpcrouter.h>
#ifdef CONFIG_MSM_SDIO_SMEM
#include <mach/sdio_smem.h>
#endif
#include "smd_private.h"
enum {
RMT_STORAGE_EVNT_OPEN = 0,
RMT_STORAGE_EVNT_CLOSE,
RMT_STORAGE_EVNT_WRITE_BLOCK,
RMT_STORAGE_EVNT_GET_DEV_ERROR,
RMT_STORAGE_EVNT_WRITE_IOVEC,
RMT_STORAGE_EVNT_SEND_USER_DATA,
RMT_STORAGE_EVNT_READ_IOVEC,
RMT_STORAGE_EVNT_ALLOC_RMT_BUF,
} rmt_storage_event;
struct shared_ramfs_entry {
uint32_t client_id; /* Client id to uniquely identify a client */
uint32_t base_addr; /* Base address of shared RAMFS memory */
uint32_t size; /* Size of the shared RAMFS memory */
uint32_t client_sts; /* This will be initialized to 1 when
remote storage RPC client is ready
to process requests */
};
struct shared_ramfs_table {
uint32_t magic_id; /* Identify RAMFS details in SMEM */
uint32_t version; /* Version of shared_ramfs_table */
uint32_t entries; /* Total number of valid entries */
/* List all entries */
struct shared_ramfs_entry ramfs_entry[MAX_RAMFS_TBL_ENTRIES];
};
struct rmt_storage_client_info {
unsigned long cids;
struct list_head shrd_mem_list; /* List of shared memory entries */
int open_excl;
atomic_t total_events;
wait_queue_head_t event_q;
struct list_head event_list;
struct list_head client_list; /* List of remote storage clients */
/* Lock to protect lists */
spinlock_t lock;
/* Wakelock to be acquired when processing requests from modem */
struct wake_lock wlock;
atomic_t wcount;
struct workqueue_struct *workq;
};
struct rmt_storage_kevent {
struct list_head list;
struct rmt_storage_event event;
};
/* Remote storage server on modem */
struct rmt_storage_srv {
uint32_t prog;
int sync_token;
struct platform_driver plat_drv;
struct msm_rpc_client *rpc_client;
struct delayed_work restart_work;
};
/* Remote storage client on modem */
struct rmt_storage_client {
uint32_t handle;
uint32_t sid; /* Storage ID */
char path[MAX_PATH_NAME];
struct rmt_storage_srv *srv;
struct list_head list;
};
struct rmt_shrd_mem {
struct list_head list;
struct rmt_shrd_mem_param param;
struct shared_ramfs_entry *smem_info;
struct rmt_storage_srv *srv;
};
static struct rmt_storage_srv *rmt_storage_get_srv(uint32_t prog);
static uint32_t rmt_storage_get_sid(const char *path);
#ifdef CONFIG_MSM_SDIO_SMEM
static void rmt_storage_sdio_smem_work(struct work_struct *work);
#endif
static struct rmt_storage_client_info *rmc;
#ifdef CONFIG_MSM_SDIO_SMEM
DECLARE_DELAYED_WORK(sdio_smem_work, rmt_storage_sdio_smem_work);
#endif
#ifdef CONFIG_MSM_SDIO_SMEM
#define MDM_LOCAL_BUF_SZ 0xC0000
static struct sdio_smem_client *sdio_smem;
#endif
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
struct rmt_storage_op_stats {
unsigned long count;
ktime_t start;
ktime_t min;
ktime_t max;
ktime_t total;
};
struct rmt_storage_stats {
char path[MAX_PATH_NAME];
struct rmt_storage_op_stats rd_stats;
struct rmt_storage_op_stats wr_stats;
};
static struct rmt_storage_stats client_stats[MAX_NUM_CLIENTS];
static struct dentry *stats_dentry;
#endif
#define MSM_RMT_STORAGE_APIPROG 0x300000A7
#define MDM_RMT_STORAGE_APIPROG 0x300100A7
#define RMT_STORAGE_OP_FINISH_PROC 2
#define RMT_STORAGE_REGISTER_OPEN_PROC 3
#define RMT_STORAGE_REGISTER_WRITE_IOVEC_PROC 4
#define RMT_STORAGE_REGISTER_CB_PROC 5
#define RMT_STORAGE_UN_REGISTER_CB_PROC 6
#define RMT_STORAGE_FORCE_SYNC_PROC 7
#define RMT_STORAGE_GET_SYNC_STATUS_PROC 8
#define RMT_STORAGE_REGISTER_READ_IOVEC_PROC 9
#define RMT_STORAGE_REGISTER_ALLOC_RMT_BUF_PROC 10
#define RMT_STORAGE_OPEN_CB_TYPE_PROC 1
#define RMT_STORAGE_WRITE_IOVEC_CB_TYPE_PROC 2
#define RMT_STORAGE_EVENT_CB_TYPE_PROC 3
#define RMT_STORAGE_READ_IOVEC_CB_TYPE_PROC 4
#define RMT_STORAGE_ALLOC_RMT_BUF_CB_TYPE_PROC 5
#define RAMFS_INFO_MAGICNUMBER 0x654D4D43
#define RAMFS_INFO_VERSION 0x00000001
#define RAMFS_DEFAULT 0xFFFFFFFF
/* MSM EFS*/
#define RAMFS_MODEMSTORAGE_ID 0x4D454653
#define RAMFS_SHARED_EFS_RAM_BASE 0x46100000
#define RAMFS_SHARED_EFS_RAM_SIZE (3 * 1024 * 1024)
/* MDM EFS*/
#define RAMFS_MDM_STORAGE_ID 0x4D4583A1
/* SSD */
#define RAMFS_SSD_STORAGE_ID 0x00535344
#define RAMFS_SHARED_SSD_RAM_BASE 0x42E00000
#define RAMFS_SHARED_SSD_RAM_SIZE 0x2000
static struct rmt_storage_client *rmt_storage_get_client(uint32_t handle)
{
struct rmt_storage_client *rs_client;
list_for_each_entry(rs_client, &rmc->client_list, list)
if (rs_client->handle == handle)
return rs_client;
return NULL;
}
static struct rmt_storage_client *
rmt_storage_get_client_by_path(const char *path)
{
struct rmt_storage_client *rs_client;
list_for_each_entry(rs_client, &rmc->client_list, list)
if (!strncmp(path, rs_client->path, MAX_PATH_NAME))
return rs_client;
return NULL;
}
static struct rmt_shrd_mem_param *rmt_storage_get_shrd_mem(uint32_t sid)
{
struct rmt_shrd_mem *shrd_mem;
struct rmt_shrd_mem_param *shrd_mem_param = NULL;
spin_lock(&rmc->lock);
list_for_each_entry(shrd_mem, &rmc->shrd_mem_list, list)
if (shrd_mem->param.sid == sid)
shrd_mem_param = &shrd_mem->param;
spin_unlock(&rmc->lock);
return shrd_mem_param;
}
static int rmt_storage_add_shrd_mem(uint32_t sid, uint32_t start,
uint32_t size, void *base,
struct shared_ramfs_entry *smem_info,
struct rmt_storage_srv *srv)
{
struct rmt_shrd_mem *shrd_mem;
shrd_mem = kzalloc(sizeof(struct rmt_shrd_mem), GFP_KERNEL);
if (!shrd_mem)
return -ENOMEM;
shrd_mem->param.sid = sid;
shrd_mem->param.start = start;
shrd_mem->param.size = size;
shrd_mem->param.base = base;
shrd_mem->smem_info = smem_info;
shrd_mem->srv = srv;
spin_lock(&rmc->lock);
list_add(&shrd_mem->list, &rmc->shrd_mem_list);
spin_unlock(&rmc->lock);
return 0;
}
static struct msm_rpc_client *rmt_storage_get_rpc_client(uint32_t handle)
{
struct rmt_storage_client *rs_client;
rs_client = rmt_storage_get_client(handle);
if (!rs_client)
return NULL;
return rs_client->srv->rpc_client;
}
static int rmt_storage_validate_iovec(uint32_t handle,
struct rmt_storage_iovec_desc *xfer)
{
struct rmt_storage_client *rs_client;
struct rmt_shrd_mem_param *shrd_mem;
rs_client = rmt_storage_get_client(handle);
if (!rs_client)
return -EINVAL;
shrd_mem = rmt_storage_get_shrd_mem(rs_client->sid);
if (!shrd_mem)
return -EINVAL;
if ((xfer->data_phy_addr < shrd_mem->start) ||
((xfer->data_phy_addr + RAMFS_BLOCK_SIZE * xfer->num_sector) >
(shrd_mem->start + shrd_mem->size)))
return -EINVAL;
return 0;
}
static int rmt_storage_send_sts_arg(struct msm_rpc_client *client,
struct msm_rpc_xdr *xdr, void *data)
{
struct rmt_storage_send_sts *args = data;
xdr_send_uint32(xdr, &args->handle);
xdr_send_uint32(xdr, &args->err_code);
xdr_send_uint32(xdr, &args->data);
return 0;
}
static void put_event(struct rmt_storage_client_info *rmc,
struct rmt_storage_kevent *kevent)
{
spin_lock(&rmc->lock);
list_add_tail(&kevent->list, &rmc->event_list);
spin_unlock(&rmc->lock);
}
static struct rmt_storage_kevent *get_event(struct rmt_storage_client_info *rmc)
{
struct rmt_storage_kevent *kevent = NULL;
spin_lock(&rmc->lock);
if (!list_empty(&rmc->event_list)) {
kevent = list_first_entry(&rmc->event_list,
struct rmt_storage_kevent, list);
list_del(&kevent->list);
}
spin_unlock(&rmc->lock);
return kevent;
}
static int rmt_storage_event_open_cb(struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
uint32_t cid, len, event_type;
char *path;
int ret;
struct rmt_storage_srv *srv;
struct rmt_storage_client *rs_client = NULL;
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
struct rmt_storage_stats *stats;
#endif
srv = rmt_storage_get_srv(event_args->usr_data);
if (!srv)
return -EINVAL;
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_OPEN)
return -1;
pr_info("%s: open callback received\n", __func__);
ret = xdr_recv_bytes(xdr, (void **)&path, &len);
if (ret || !path) {
pr_err("%s: Invalid path\n", __func__);
if (!ret)
ret = -1;
goto free_rs_client;
}
rs_client = rmt_storage_get_client_by_path(path);
if (rs_client) {
pr_debug("%s: Handle %d found for %s\n",
__func__, rs_client->handle, path);
event_args->id = RMT_STORAGE_NOOP;
cid = rs_client->handle;
goto end_open_cb;
}
rs_client = kzalloc(sizeof(struct rmt_storage_client), GFP_KERNEL);
if (!rs_client) {
pr_err("%s: Error allocating rmt storage client\n", __func__);
ret = -ENOMEM;
goto free_path;
}
memcpy(event_args->path, path, len);
rs_client->sid = rmt_storage_get_sid(event_args->path);
if (!rs_client->sid) {
pr_err("%s: No storage id found for %s\n", __func__,
event_args->path);
ret = -EINVAL;
goto free_path;
}
strncpy(rs_client->path, event_args->path, MAX_PATH_NAME);
cid = find_first_zero_bit(&rmc->cids, sizeof(rmc->cids) * 8);
if (cid > MAX_NUM_CLIENTS) {
pr_err("%s: Max clients are reached\n", __func__);
cid = 0;
kfree(rs_client);
return cid;
}
__set_bit(cid, &rmc->cids);
pr_info("open partition %s handle=%d\n", event_args->path, cid);
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
stats = &client_stats[cid - 1];
memcpy(stats->path, event_args->path, len);
memset(stats->rd_stats, 0, sizeof(struct rmt_storage_op_stats));
memset(stats->wr_stats, 0, sizeof(struct rmt_storage_op_stats));
stats->rd_stats.min.tv64 = KTIME_MAX;
stats->wr_stats.min.tv64 = KTIME_MAX;
#endif
event_args->id = RMT_STORAGE_OPEN;
event_args->sid = rs_client->sid;
event_args->handle = cid;
rs_client->handle = event_args->handle;
rs_client->srv = srv;
INIT_LIST_HEAD(&rs_client->list);
spin_lock(&rmc->lock);
list_add_tail(&rs_client->list, &rmc->client_list);
spin_unlock(&rmc->lock);
end_open_cb:
kfree(path);
return cid;
free_path:
kfree(path);
free_rs_client:
kfree(rs_client);
return ret;
}
struct rmt_storage_close_args {
uint32_t handle;
};
struct rmt_storage_rw_block_args {
uint32_t handle;
uint32_t data_phy_addr;
uint32_t sector_addr;
uint32_t num_sector;
};
struct rmt_storage_get_err_args {
uint32_t handle;
};
struct rmt_storage_user_data_args {
uint32_t handle;
uint32_t data;
};
struct rmt_storage_event_params {
uint32_t type;
union {
struct rmt_storage_close_args close;
struct rmt_storage_rw_block_args block;
struct rmt_storage_get_err_args get_err;
struct rmt_storage_user_data_args user_data;
} params;
};
static int rmt_storage_parse_params(struct msm_rpc_xdr *xdr,
struct rmt_storage_event_params *event)
{
xdr_recv_uint32(xdr, &event->type);
switch (event->type) {
case RMT_STORAGE_EVNT_CLOSE: {
struct rmt_storage_close_args *args;
args = &event->params.close;
xdr_recv_uint32(xdr, &args->handle);
break;
}
case RMT_STORAGE_EVNT_WRITE_BLOCK: {
struct rmt_storage_rw_block_args *args;
args = &event->params.block;
xdr_recv_uint32(xdr, &args->handle);
xdr_recv_uint32(xdr, &args->data_phy_addr);
xdr_recv_uint32(xdr, &args->sector_addr);
xdr_recv_uint32(xdr, &args->num_sector);
break;
}
case RMT_STORAGE_EVNT_GET_DEV_ERROR: {
struct rmt_storage_get_err_args *args;
args = &event->params.get_err;
xdr_recv_uint32(xdr, &args->handle);
break;
}
case RMT_STORAGE_EVNT_SEND_USER_DATA: {
struct rmt_storage_user_data_args *args;
args = &event->params.user_data;
xdr_recv_uint32(xdr, &args->handle);
xdr_recv_uint32(xdr, &args->data);
break;
}
default:
pr_err("%s: unknown event %d\n", __func__, event->type);
return -1;
}
return 0;
}
static int rmt_storage_event_close_cb(struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_event_params *event;
struct rmt_storage_close_args *close;
struct rmt_storage_client *rs_client;
uint32_t event_type;
int ret;
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_CLOSE)
return -1;
pr_debug("%s: close callback received\n", __func__);
ret = xdr_recv_pointer(xdr, (void **)&event,
sizeof(struct rmt_storage_event_params),
rmt_storage_parse_params);
if (ret || !event)
return -1;
close = &event->params.close;
event_args->handle = close->handle;
event_args->id = RMT_STORAGE_CLOSE;
__clear_bit(event_args->handle, &rmc->cids);
rs_client = rmt_storage_get_client(event_args->handle);
if (rs_client) {
list_del(&rs_client->list);
kfree(rs_client);
}
kfree(event);
return RMT_STORAGE_NO_ERROR;
}
static int rmt_storage_event_write_block_cb(
struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_event_params *event;
struct rmt_storage_rw_block_args *write_block;
struct rmt_storage_iovec_desc *xfer;
uint32_t event_type;
int ret;
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_WRITE_BLOCK)
return -1;
pr_debug("%s: write block callback received\n", __func__);
ret = xdr_recv_pointer(xdr, (void **)&event,
sizeof(struct rmt_storage_event_params),
rmt_storage_parse_params);
if (ret || !event)
return -1;
write_block = &event->params.block;
event_args->handle = write_block->handle;
xfer = &event_args->xfer_desc[0];
xfer->sector_addr = write_block->sector_addr;
xfer->data_phy_addr = write_block->data_phy_addr;
xfer->num_sector = write_block->num_sector;
ret = rmt_storage_validate_iovec(event_args->handle, xfer);
if (ret)
return -1;
event_args->xfer_cnt = 1;
event_args->id = RMT_STORAGE_WRITE;
if (atomic_inc_return(&rmc->wcount) == 1)
wake_lock(&rmc->wlock);
pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n\n",
xfer->sector_addr, xfer->data_phy_addr,
xfer->num_sector);
kfree(event);
return RMT_STORAGE_NO_ERROR;
}
static int rmt_storage_event_get_err_cb(struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_event_params *event;
struct rmt_storage_get_err_args *get_err;
uint32_t event_type;
int ret;
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_GET_DEV_ERROR)
return -1;
pr_debug("%s: get err callback received\n", __func__);
ret = xdr_recv_pointer(xdr, (void **)&event,
sizeof(struct rmt_storage_event_params),
rmt_storage_parse_params);
if (ret || !event)
return -1;
get_err = &event->params.get_err;
event_args->handle = get_err->handle;
kfree(event);
/* Not implemented */
return -1;
}
static int rmt_storage_event_user_data_cb(struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_event_params *event;
struct rmt_storage_user_data_args *user_data;
uint32_t event_type;
int ret;
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_SEND_USER_DATA)
return -1;
pr_info("%s: send user data callback received\n", __func__);
ret = xdr_recv_pointer(xdr, (void **)&event,
sizeof(struct rmt_storage_event_params),
rmt_storage_parse_params);
if (ret || !event)
return -1;
user_data = &event->params.user_data;
event_args->handle = user_data->handle;
event_args->usr_data = user_data->data;
event_args->id = RMT_STORAGE_SEND_USER_DATA;
kfree(event);
return RMT_STORAGE_NO_ERROR;
}
static int rmt_storage_event_write_iovec_cb(
struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_iovec_desc *xfer;
uint32_t i, ent, event_type;
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
struct rmt_storage_stats *stats;
#endif
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_WRITE_IOVEC)
return -EINVAL;
pr_info("%s: write iovec callback received\n", __func__);
xdr_recv_uint32(xdr, &event_args->handle);
xdr_recv_uint32(xdr, &ent);
pr_debug("handle = %d\n", event_args->handle);
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
stats = &client_stats[event_args->handle - 1];
stats->wr_stats.start = ktime_get();
#endif
for (i = 0; i < ent; i++) {
xfer = &event_args->xfer_desc[i];
xdr_recv_uint32(xdr, &xfer->sector_addr);
xdr_recv_uint32(xdr, &xfer->data_phy_addr);
xdr_recv_uint32(xdr, &xfer->num_sector);
if (rmt_storage_validate_iovec(event_args->handle, xfer))
return -EINVAL;
pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n",
xfer->sector_addr, xfer->data_phy_addr,
xfer->num_sector);
}
xdr_recv_uint32(xdr, &event_args->xfer_cnt);
event_args->id = RMT_STORAGE_WRITE;
if (atomic_inc_return(&rmc->wcount) == 1)
wake_lock(&rmc->wlock);
pr_debug("iovec transfer count = %d\n\n", event_args->xfer_cnt);
return RMT_STORAGE_NO_ERROR;
}
static int rmt_storage_event_read_iovec_cb(
struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_iovec_desc *xfer;
uint32_t i, ent, event_type;
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
struct rmt_storage_stats *stats;
#endif
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_READ_IOVEC)
return -EINVAL;
pr_info("%s: read iovec callback received\n", __func__);
xdr_recv_uint32(xdr, &event_args->handle);
xdr_recv_uint32(xdr, &ent);
pr_debug("handle = %d\n", event_args->handle);
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
stats = &client_stats[event_args->handle - 1];
stats->rd_stats.start = ktime_get();
#endif
for (i = 0; i < ent; i++) {
xfer = &event_args->xfer_desc[i];
xdr_recv_uint32(xdr, &xfer->sector_addr);
xdr_recv_uint32(xdr, &xfer->data_phy_addr);
xdr_recv_uint32(xdr, &xfer->num_sector);
if (rmt_storage_validate_iovec(event_args->handle, xfer))
return -EINVAL;
pr_debug("sec_addr = %u, data_addr = %x, num_sec = %d\n",
xfer->sector_addr, xfer->data_phy_addr,
xfer->num_sector);
}
xdr_recv_uint32(xdr, &event_args->xfer_cnt);
event_args->id = RMT_STORAGE_READ;
if (atomic_inc_return(&rmc->wcount) == 1)
wake_lock(&rmc->wlock);
pr_debug("iovec transfer count = %d\n\n", event_args->xfer_cnt);
return RMT_STORAGE_NO_ERROR;
}
#ifdef CONFIG_MSM_SDIO_SMEM
static int sdio_smem_cb(int event)
{
pr_debug("%s: Received event %d\n", __func__, event);
switch (event) {
case SDIO_SMEM_EVENT_READ_DONE:
pr_debug("Read done\n");
break;
case SDIO_SMEM_EVENT_READ_ERR:
pr_err("Read overflow\n");
return -EIO;
default:
pr_err("Unhandled event\n");
}
return 0;
}
static int rmt_storage_sdio_smem_probe(struct platform_device *pdev)
{
int ret = 0;
struct rmt_shrd_mem_param *shrd_mem;
sdio_smem = container_of(pdev, struct sdio_smem_client, plat_dev);
/* SDIO SMEM is supported only for MDM */
shrd_mem = rmt_storage_get_shrd_mem(RAMFS_MDM_STORAGE_ID);
if (!shrd_mem) {
pr_err("%s: No shared mem entry for sid=0x%08x\n",
__func__, (uint32_t)RAMFS_MDM_STORAGE_ID);
return -ENOMEM;
}
sdio_smem->buf = __va(shrd_mem->start);
sdio_smem->size = shrd_mem->size;
sdio_smem->cb_func = sdio_smem_cb;
ret = sdio_smem_register_client();
if (ret)
pr_info("%s: Error (%d) registering sdio_smem client\n",
__func__, ret);
return ret;
}
static int rmt_storage_sdio_smem_remove(struct platform_device *pdev)
{
sdio_smem_unregister_client();
queue_delayed_work(rmc->workq, &sdio_smem_work, 0);
return 0;
}
static int sdio_smem_drv_registered;
static struct platform_driver sdio_smem_drv = {
.probe = rmt_storage_sdio_smem_probe,
.remove = rmt_storage_sdio_smem_remove,
.driver = {
.name = "SDIO_SMEM_CLIENT",
.owner = THIS_MODULE,
},
};
static void rmt_storage_sdio_smem_work(struct work_struct *work)
{
platform_driver_unregister(&sdio_smem_drv);
sdio_smem_drv_registered = 0;
}
#endif
static int rmt_storage_event_alloc_rmt_buf_cb(
struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr)
{
struct rmt_storage_client *rs_client;
struct rmt_shrd_mem_param *shrd_mem;
uint32_t event_type, handle, size;
#ifdef CONFIG_MSM_SDIO_SMEM
int ret;
#endif
xdr_recv_uint32(xdr, &event_type);
if (event_type != RMT_STORAGE_EVNT_ALLOC_RMT_BUF)
return -EINVAL;
pr_info("%s: Alloc rmt buf callback received\n", __func__);
xdr_recv_uint32(xdr, &handle);
xdr_recv_uint32(xdr, &size);
pr_debug("%s: handle=0x%x size=0x%x\n", __func__, handle, size);
rs_client = rmt_storage_get_client(handle);
if (!rs_client) {
pr_err("%s: Unable to find client for handle=%d\n",
__func__, handle);
return -EINVAL;
}
rs_client->sid = rmt_storage_get_sid(rs_client->path);
if (!rs_client->sid) {
pr_err("%s: No storage id found for %s\n",
__func__, rs_client->path);
return -EINVAL;
}
shrd_mem = rmt_storage_get_shrd_mem(rs_client->sid);
if (!shrd_mem) {
pr_err("%s: No shared memory entry found\n",
__func__);
return -ENOMEM;
}
if (shrd_mem->size < size) {
pr_err("%s: Size mismatch for handle=%d\n",
__func__, rs_client->handle);
return -EINVAL;
}
pr_debug("%s: %d bytes at phys=0x%x for handle=%d found\n",
__func__, size, shrd_mem->start, rs_client->handle);
#ifdef CONFIG_MSM_SDIO_SMEM
if (rs_client->srv->prog == MDM_RMT_STORAGE_APIPROG) {
if (!sdio_smem_drv_registered) {
ret = platform_driver_register(&sdio_smem_drv);
if (!ret)
sdio_smem_drv_registered = 1;
else
pr_err("%s: Cant register sdio smem client\n",
__func__);
}
}
#endif
event_args->id = RMT_STORAGE_NOOP;
return (int)shrd_mem->start;
}
static int handle_rmt_storage_call(struct msm_rpc_client *client,
struct rpc_request_hdr *req,
struct msm_rpc_xdr *xdr)
{
int rc;
uint32_t result = RMT_STORAGE_NO_ERROR;
uint32_t rpc_status = RPC_ACCEPTSTAT_SUCCESS;
struct rmt_storage_event *event_args;
struct rmt_storage_kevent *kevent;
kevent = kzalloc(sizeof(struct rmt_storage_kevent), GFP_KERNEL);
if (!kevent) {
rpc_status = RPC_ACCEPTSTAT_SYSTEM_ERR;
goto out;
}
event_args = &kevent->event;
switch (req->procedure) {
case RMT_STORAGE_OPEN_CB_TYPE_PROC:
/* client created in cb needs a ref. to its server */
event_args->usr_data = client->prog;
/* fall through */
case RMT_STORAGE_WRITE_IOVEC_CB_TYPE_PROC:
/* fall through */
case RMT_STORAGE_READ_IOVEC_CB_TYPE_PROC:
/* fall through */
case RMT_STORAGE_ALLOC_RMT_BUF_CB_TYPE_PROC:
/* fall through */
case RMT_STORAGE_EVENT_CB_TYPE_PROC: {
uint32_t cb_id;
int (*cb_func)(struct rmt_storage_event *event_args,
struct msm_rpc_xdr *xdr);
xdr_recv_uint32(xdr, &cb_id);
cb_func = msm_rpc_get_cb_func(client, cb_id);
if (!cb_func) {
rpc_status = RPC_ACCEPTSTAT_GARBAGE_ARGS;
kfree(kevent);
goto out;
}
rc = cb_func(event_args, xdr);
if (IS_ERR_VALUE(rc)) {
pr_err("%s: Invalid parameters received\n", __func__);
if (req->procedure == RMT_STORAGE_OPEN_CB_TYPE_PROC)
result = 0; /* bad handle to signify err */
else
result = RMT_STORAGE_ERROR_PARAM;
kfree(kevent);
goto out;
}
result = (uint32_t) rc;
break;
}
default:
kfree(kevent);
pr_err("%s: unknown procedure %d\n", __func__, req->procedure);
rpc_status = RPC_ACCEPTSTAT_PROC_UNAVAIL;
goto out;
}
if (kevent->event.id != RMT_STORAGE_NOOP) {
put_event(rmc, kevent);
atomic_inc(&rmc->total_events);
wake_up(&rmc->event_q);
} else
kfree(kevent);
out:
pr_debug("%s: Sending result=0x%x\n", __func__, result);
xdr_start_accepted_reply(xdr, rpc_status);
xdr_send_uint32(xdr, &result);
rc = xdr_send_msg(xdr);
if (rc)
pr_err("%s: send accepted reply failed: %d\n", __func__, rc);
return rc;
}
static int rmt_storage_open(struct inode *ip, struct file *fp)
{
int ret = 0;
spin_lock(&rmc->lock);
if (!rmc->open_excl)
rmc->open_excl = 1;
else
ret = -EBUSY;
spin_unlock(&rmc->lock);
return ret;
}
static int rmt_storage_release(struct inode *ip, struct file *fp)
{
spin_lock(&rmc->lock);
rmc->open_excl = 0;
spin_unlock(&rmc->lock);
return 0;
}
static long rmt_storage_ioctl(struct file *fp, unsigned int cmd,
unsigned long arg)
{
int ret = 0;
struct rmt_storage_kevent *kevent;
struct rmt_storage_send_sts status;
static struct msm_rpc_client *rpc_client;
struct rmt_shrd_mem_param usr_shrd_mem, *shrd_mem;
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
struct rmt_storage_stats *stats;
struct rmt_storage_op_stats *op_stats;
ktime_t curr_stat;
#endif
switch (cmd) {
case RMT_STORAGE_SHRD_MEM_PARAM:
pr_debug("%s: get shared memory parameters ioctl\n", __func__);
if (copy_from_user(&usr_shrd_mem, (void __user *)arg,
sizeof(struct rmt_shrd_mem_param))) {
pr_err("%s: copy from user failed\n\n", __func__);
ret = -EFAULT;
break;
}
shrd_mem = rmt_storage_get_shrd_mem(usr_shrd_mem.sid);
if (!shrd_mem) {
pr_err("%s: invalid sid (0x%x)\n", __func__,
usr_shrd_mem.sid);
ret = -EFAULT;
break;
}
if (copy_to_user((void __user *)arg, shrd_mem,
sizeof(struct rmt_shrd_mem_param))) {
pr_err("%s: copy to user failed\n\n", __func__);
ret = -EFAULT;
}
break;
case RMT_STORAGE_WAIT_FOR_REQ:
pr_debug("%s: wait for request ioctl\n", __func__);
if (atomic_read(&rmc->total_events) == 0) {
ret = wait_event_interruptible(rmc->event_q,
atomic_read(&rmc->total_events) != 0);
}
if (ret < 0)
break;
atomic_dec(&rmc->total_events);
kevent = get_event(rmc);
WARN_ON(kevent == NULL);
if (copy_to_user((void __user *)arg, &kevent->event,
sizeof(struct rmt_storage_event))) {
pr_err("%s: copy to user failed\n\n", __func__);
ret = -EFAULT;
}
kfree(kevent);
break;
case RMT_STORAGE_SEND_STATUS:
pr_info("%s: send status ioctl\n", __func__);
if (copy_from_user(&status, (void __user *)arg,
sizeof(struct rmt_storage_send_sts))) {
pr_err("%s: copy from user failed\n\n", __func__);
ret = -EFAULT;
if (atomic_dec_return(&rmc->wcount) == 0)
wake_unlock(&rmc->wlock);
break;
}
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
stats = &client_stats[status.handle - 1];
if (status.xfer_dir == RMT_STORAGE_WRITE)
op_stats = &stats->wr_stats;
else
op_stats = &stats->rd_stats;
curr_stat = ktime_sub(ktime_get(), op_stats->start);
op_stats->total = ktime_add(op_stats->total, curr_stat);
op_stats->count++;
if (curr_stat.tv64 < stats->min.tv64)
op_stats->min = curr_stat;
if (curr_stat.tv64 > stats->max.tv64)
op_stats->max = curr_stat;
#endif
pr_debug("%s: \thandle=%d err_code=%d data=0x%x\n", __func__,
status.handle, status.err_code, status.data);
rpc_client = rmt_storage_get_rpc_client(status.handle);
if (rpc_client)
ret = msm_rpc_client_req2(rpc_client,
RMT_STORAGE_OP_FINISH_PROC,
rmt_storage_send_sts_arg,
&status, NULL, NULL, -1);
else
ret = -EINVAL;
if (ret < 0)
pr_err("%s: send status failed with ret val = %d\n",
__func__, ret);
if (atomic_dec_return(&rmc->wcount) == 0)
wake_unlock(&rmc->wlock);
break;
default:
ret = -EINVAL;
break;
}
return ret;
}
struct rmt_storage_sync_recv_arg {
int data;
};
static int rmt_storage_receive_sync_arg(struct msm_rpc_client *client,
struct msm_rpc_xdr *xdr, void *data)
{
struct rmt_storage_sync_recv_arg *args = data;
struct rmt_storage_srv *srv;
srv = rmt_storage_get_srv(client->prog);
if (!srv)
return -EINVAL;
xdr_recv_int32(xdr, &args->data);
srv->sync_token = args->data;
return 0;
}
static int rmt_storage_force_sync(struct msm_rpc_client *client)
{
struct rmt_storage_sync_recv_arg args;
int rc;
rc = msm_rpc_client_req2(client,
RMT_STORAGE_FORCE_SYNC_PROC, NULL, NULL,
rmt_storage_receive_sync_arg, &args, -1);
if (rc) {
pr_err("%s: force sync RPC req failed: %d\n", __func__, rc);
return rc;
}
return 0;
}
struct rmt_storage_sync_sts_arg {
int token;
};
static int rmt_storage_send_sync_sts_arg(struct msm_rpc_client *client,
struct msm_rpc_xdr *xdr, void *data)
{
struct rmt_storage_sync_sts_arg *req = data;
xdr_send_int32(xdr, &req->token);
return 0;
}
static int rmt_storage_receive_sync_sts_arg(struct msm_rpc_client *client,
struct msm_rpc_xdr *xdr, void *data)
{
struct rmt_storage_sync_recv_arg *args = data;
xdr_recv_int32(xdr, &args->data);
return 0;
}
static int rmt_storage_get_sync_status(struct msm_rpc_client *client)
{
struct rmt_storage_sync_recv_arg recv_args;
struct rmt_storage_sync_sts_arg send_args;
struct rmt_storage_srv *srv;
int rc;
srv = rmt_storage_get_srv(client->prog);
if (!srv)
return -EINVAL;
if (srv->sync_token < 0)
return -EINVAL;
send_args.token = srv->sync_token;
rc = msm_rpc_client_req2(client,
RMT_STORAGE_GET_SYNC_STATUS_PROC,
rmt_storage_send_sync_sts_arg, &send_args,
rmt_storage_receive_sync_sts_arg, &recv_args, -1);
if (rc) {
pr_err("%s: sync status RPC req failed: %d\n", __func__, rc);
return rc;
}
return recv_args.data;
}
static int rmt_storage_mmap(struct file *file, struct vm_area_struct *vma)
{
unsigned long vsize = vma->vm_end - vma->vm_start;
int ret = -EINVAL;
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
ret = io_remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff,
vsize, vma->vm_page_prot);
if (ret < 0)
pr_err("%s: failed with return val %d\n", __func__, ret);
return ret;
}
struct rmt_storage_reg_cb_args {
uint32_t event;
uint32_t cb_id;
};
static int rmt_storage_arg_cb(struct msm_rpc_client *client,
struct msm_rpc_xdr *xdr, void *data)
{
struct rmt_storage_reg_cb_args *args = data;
xdr_send_uint32(xdr, &args->event);
xdr_send_uint32(xdr, &args->cb_id);
return 0;
}
static int rmt_storage_reg_cb(struct msm_rpc_client *client,
uint32_t proc, uint32_t event, void *callback)
{
struct rmt_storage_reg_cb_args args;
int rc, cb_id;
int retries = 10;
cb_id = msm_rpc_add_cb_func(client, callback);
if ((cb_id < 0) && (cb_id != MSM_RPC_CLIENT_NULL_CB_ID))
return cb_id;
args.event = event;
args.cb_id = cb_id;
while (retries) {
rc = msm_rpc_client_req2(client, proc, rmt_storage_arg_cb,
&args, NULL, NULL, -1);
if (rc != -ETIMEDOUT)
break;
retries--;
udelay(1000);
}
if (rc)
pr_err("%s: Failed to register callback for event %d\n",
__func__, event);
return rc;
}
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
static int rmt_storage_stats_open(struct inode *inode, struct file *file)
{
return 0;
}
static ssize_t rmt_storage_stats_read(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
uint32_t tot_clients;
char buf[512];
int max, j, i = 0;
struct rmt_storage_stats *stats;
max = sizeof(buf) - 1;
tot_clients = find_first_zero_bit(&rmc->cids, sizeof(rmc->cids)) - 1;
for (j = 0; j < tot_clients; j++) {
stats = &client_stats[j];
i += scnprintf(buf + i, max - i, "stats for partition %s:\n",
stats->path);
i += scnprintf(buf + i, max - i, "Min read time: %lld us\n",
ktime_to_us(stats->rd_stats.min));
i += scnprintf(buf + i, max - i, "Max read time: %lld us\n",
ktime_to_us(stats->rd_stats.max));
i += scnprintf(buf + i, max - i, "Total read time: %lld us\n",
ktime_to_us(stats->rd_stats.total));
i += scnprintf(buf + i, max - i, "Total read requests: %ld\n",
stats->rd_stats.count);
if (stats->count)
i += scnprintf(buf + i, max - i,
"Avg read time: %lld us\n",
div_s64(ktime_to_us(stats->total),
stats->rd_stats.count));
i += scnprintf(buf + i, max - i, "Min write time: %lld us\n",
ktime_to_us(stats->wr_stats.min));
i += scnprintf(buf + i, max - i, "Max write time: %lld us\n",
ktime_to_us(stats->wr_stats.max));
i += scnprintf(buf + i, max - i, "Total write time: %lld us\n",
ktime_to_us(stats->wr_stats.total));
i += scnprintf(buf + i, max - i, "Total read requests: %ld\n",
stats->wr_stats.count);
if (stats->count)
i += scnprintf(buf + i, max - i,
"Avg write time: %lld us\n",
div_s64(ktime_to_us(stats->total),
stats->wr_stats.count));
}
return simple_read_from_buffer(ubuf, count, ppos, buf, i);
}
static const struct file_operations debug_ops = {
.owner = THIS_MODULE,
.open = rmt_storage_stats_open,
.read = rmt_storage_stats_read,
};
#endif
const struct file_operations rmt_storage_fops = {
.owner = THIS_MODULE,
.open = rmt_storage_open,
.unlocked_ioctl = rmt_storage_ioctl,
.mmap = rmt_storage_mmap,
.release = rmt_storage_release,
};
static struct miscdevice rmt_storage_device = {
.minor = MISC_DYNAMIC_MINOR,
.name = "rmt_storage",
.fops = &rmt_storage_fops,
};
static int rmt_storage_get_ramfs(struct rmt_storage_srv *srv)
{
struct shared_ramfs_table *ramfs_table;
struct shared_ramfs_entry *ramfs_entry;
int index, ret;
if (srv->prog != MSM_RMT_STORAGE_APIPROG)
return 0;
ramfs_table = smem_alloc(SMEM_SEFS_INFO,
sizeof(struct shared_ramfs_table));
if (!ramfs_table) {
pr_err("%s: No RAMFS table in SMEM\n", __func__);
return -ENOENT;
}
if ((ramfs_table->magic_id != (u32) RAMFS_INFO_MAGICNUMBER) ||
(ramfs_table->version != (u32) RAMFS_INFO_VERSION)) {
pr_err("%s: Magic / Version mismatch:, "
"magic_id=%#x, format_version=%#x\n", __func__,
ramfs_table->magic_id, ramfs_table->version);
return -ENOENT;
}
for (index = 0; index < ramfs_table->entries; index++) {
ramfs_entry = &ramfs_table->ramfs_entry[index];
if (!ramfs_entry->client_id ||
ramfs_entry->client_id == (u32) RAMFS_DEFAULT)
break;
pr_info("%s: RAMFS entry: addr = 0x%08x, size = 0x%08x\n",
__func__, ramfs_entry->base_addr, ramfs_entry->size);
ret = rmt_storage_add_shrd_mem(ramfs_entry->client_id,
ramfs_entry->base_addr,
ramfs_entry->size,
NULL,
ramfs_entry,
srv);
if (ret) {
pr_err("%s: Error (%d) adding shared mem\n",
__func__, ret);
return ret;
}
}
return 0;
}
static ssize_t
show_force_sync(struct device *dev, struct device_attribute *attr,
char *buf)
{
struct platform_device *pdev;
struct rpcsvr_platform_device *rpc_pdev;
struct rmt_storage_srv *srv;
pdev = container_of(dev, struct platform_device, dev);
rpc_pdev = container_of(pdev, struct rpcsvr_platform_device, base);
srv = rmt_storage_get_srv(rpc_pdev->prog);
if (!srv) {
pr_err("%s: Unable to find prog=0x%x\n", __func__,
rpc_pdev->prog);
return -EINVAL;
}
return rmt_storage_force_sync(srv->rpc_client);
}
/* Returns -EINVAL for invalid sync token and an error value for any failure
* in RPC call. Upon success, it returns a sync status of 1 (sync done)
* or 0 (sync still pending).
*/
static ssize_t
show_sync_sts(struct device *dev, struct device_attribute *attr, char *buf)
{
struct platform_device *pdev;
struct rpcsvr_platform_device *rpc_pdev;
struct rmt_storage_srv *srv;
pdev = container_of(dev, struct platform_device, dev);
rpc_pdev = container_of(pdev, struct rpcsvr_platform_device, base);
srv = rmt_storage_get_srv(rpc_pdev->prog);
if (!srv) {
pr_err("%s: Unable to find prog=0x%x\n", __func__,
rpc_pdev->prog);
return -EINVAL;
}
return snprintf(buf, PAGE_SIZE, "%d\n",
rmt_storage_get_sync_status(srv->rpc_client));
}
static int rmt_storage_init_ramfs(struct rmt_storage_srv *srv)
{
struct shared_ramfs_table *ramfs_table;
if (srv->prog != MSM_RMT_STORAGE_APIPROG)
return 0;
ramfs_table = smem_alloc(SMEM_SEFS_INFO,
sizeof(struct shared_ramfs_table));
if (!ramfs_table) {
pr_err("%s: No RAMFS table in SMEM\n", __func__);
return -ENOENT;
}
if (ramfs_table->magic_id == RAMFS_INFO_MAGICNUMBER) {
pr_debug("RAMFS table already filled... skipping %s", \
__func__);
return 0;
}
ramfs_table->ramfs_entry[0].client_id = RAMFS_MODEMSTORAGE_ID;
ramfs_table->ramfs_entry[0].base_addr = RAMFS_SHARED_EFS_RAM_BASE;
ramfs_table->ramfs_entry[0].size = RAMFS_SHARED_EFS_RAM_SIZE;
ramfs_table->ramfs_entry[0].client_sts = RAMFS_DEFAULT;
ramfs_table->ramfs_entry[1].client_id = RAMFS_SSD_STORAGE_ID;
ramfs_table->ramfs_entry[1].base_addr = RAMFS_SHARED_SSD_RAM_BASE;
ramfs_table->ramfs_entry[1].size = RAMFS_SHARED_SSD_RAM_SIZE;
ramfs_table->ramfs_entry[1].client_sts = RAMFS_DEFAULT;
ramfs_table->entries = 2;
ramfs_table->version = RAMFS_INFO_VERSION;
ramfs_table->magic_id = RAMFS_INFO_MAGICNUMBER;
return 0;
}
static void rmt_storage_set_client_status(struct rmt_storage_srv *srv,
int enable)
{
struct rmt_shrd_mem *shrd_mem;
spin_lock(&rmc->lock);
list_for_each_entry(shrd_mem, &rmc->shrd_mem_list, list)
if (shrd_mem->srv->prog == srv->prog)
if (shrd_mem->smem_info)
shrd_mem->smem_info->client_sts = !!enable;
spin_unlock(&rmc->lock);
}
static DEVICE_ATTR(force_sync, S_IRUGO | S_IWUSR, show_force_sync, NULL);
static DEVICE_ATTR(sync_sts, S_IRUGO | S_IWUSR, show_sync_sts, NULL);
static struct attribute *dev_attrs[] = {
&dev_attr_force_sync.attr,
&dev_attr_sync_sts.attr,
NULL,
};
static struct attribute_group dev_attr_grp = {
.attrs = dev_attrs,
};
static void handle_restart_teardown(struct msm_rpc_client *client)
{
struct rmt_storage_srv *srv;
srv = rmt_storage_get_srv(client->prog);
if (!srv)
return;
pr_debug("%s: Modem restart for 0x%08x\n", __func__, srv->prog);
cancel_delayed_work_sync(&srv->restart_work);
}
#define RESTART_WORK_DELAY_MS 1000
static void handle_restart_setup(struct msm_rpc_client *client)
{
struct rmt_storage_srv *srv;
srv = rmt_storage_get_srv(client->prog);
if (!srv)
return;
pr_debug("%s: Scheduling restart for 0x%08x\n", __func__, srv->prog);
queue_delayed_work(rmc->workq, &srv->restart_work,
msecs_to_jiffies(RESTART_WORK_DELAY_MS));
}
static int rmt_storage_reg_callbacks(struct msm_rpc_client *client)
{
int ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_OPEN_PROC,
RMT_STORAGE_EVNT_OPEN,
rmt_storage_event_open_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_CB_PROC,
RMT_STORAGE_EVNT_CLOSE,
rmt_storage_event_close_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_CB_PROC,
RMT_STORAGE_EVNT_WRITE_BLOCK,
rmt_storage_event_write_block_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_CB_PROC,
RMT_STORAGE_EVNT_GET_DEV_ERROR,
rmt_storage_event_get_err_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_WRITE_IOVEC_PROC,
RMT_STORAGE_EVNT_WRITE_IOVEC,
rmt_storage_event_write_iovec_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_READ_IOVEC_PROC,
RMT_STORAGE_EVNT_READ_IOVEC,
rmt_storage_event_read_iovec_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_CB_PROC,
RMT_STORAGE_EVNT_SEND_USER_DATA,
rmt_storage_event_user_data_cb);
if (ret)
return ret;
ret = rmt_storage_reg_cb(client,
RMT_STORAGE_REGISTER_ALLOC_RMT_BUF_PROC,
RMT_STORAGE_EVNT_ALLOC_RMT_BUF,
rmt_storage_event_alloc_rmt_buf_cb);
if (ret)
pr_info("%s: Unable (%d) registering aloc_rmt_buf\n",
__func__, ret);
pr_debug("%s: Callbacks (re)registered for 0x%08x\n\n", __func__,
client->prog);
return 0;
}
static void rmt_storage_restart_work(struct work_struct *work)
{
struct rmt_storage_srv *srv;
int ret;
srv = container_of((struct delayed_work *)work,
struct rmt_storage_srv, restart_work);
if (!rmt_storage_get_srv(srv->prog)) {
pr_err("%s: Invalid server\n", __func__);
return;
}
ret = rmt_storage_reg_callbacks(srv->rpc_client);
if (!ret)
return;
pr_err("%s: Error (%d) re-registering callbacks for0x%08x\n",
__func__, ret, srv->prog);
if (!msm_rpc_client_in_reset(srv->rpc_client))
queue_delayed_work(rmc->workq, &srv->restart_work,
msecs_to_jiffies(RESTART_WORK_DELAY_MS));
}
static int rmt_storage_probe(struct platform_device *pdev)
{
struct rpcsvr_platform_device *dev;
struct rmt_storage_srv *srv;
int ret;
dev = container_of(pdev, struct rpcsvr_platform_device, base);
srv = rmt_storage_get_srv(dev->prog);
if (!srv) {
pr_err("%s: Invalid prog = %#x\n", __func__, dev->prog);
return -ENXIO;
}
rmt_storage_init_ramfs(srv);
rmt_storage_get_ramfs(srv);
INIT_DELAYED_WORK(&srv->restart_work, rmt_storage_restart_work);
/* Client Registration */
srv->rpc_client = msm_rpc_register_client2("rmt_storage",
dev->prog, dev->vers, 1,
handle_rmt_storage_call);
if (IS_ERR(srv->rpc_client)) {
pr_err("%s: Unable to register client (prog %.8x vers %.8x)\n",
__func__, dev->prog, dev->vers);
ret = PTR_ERR(srv->rpc_client);
return ret;
}
ret = msm_rpc_register_reset_callbacks(srv->rpc_client,
handle_restart_teardown,
handle_restart_setup);
if (ret)
goto unregister_client;
pr_info("%s: Remote storage RPC client (0x%x)initialized\n",
__func__, dev->prog);
/* register server callbacks */
ret = rmt_storage_reg_callbacks(srv->rpc_client);
if (ret)
goto unregister_client;
/* For targets that poll SMEM, set status to ready */
rmt_storage_set_client_status(srv, 1);
ret = sysfs_create_group(&pdev->dev.kobj, &dev_attr_grp);
if (ret)
pr_err("%s: Failed to create sysfs node: %d\n", __func__, ret);
return 0;
unregister_client:
msm_rpc_unregister_client(srv->rpc_client);
return ret;
}
static void rmt_storage_client_shutdown(struct platform_device *pdev)
{
struct rpcsvr_platform_device *dev;
struct rmt_storage_srv *srv;
dev = container_of(pdev, struct rpcsvr_platform_device, base);
srv = rmt_storage_get_srv(dev->prog);
rmt_storage_set_client_status(srv, 0);
}
static void rmt_storage_destroy_rmc(void)
{
wake_lock_destroy(&rmc->wlock);
}
static void __init rmt_storage_init_client_info(void)
{
/* Initialization */
init_waitqueue_head(&rmc->event_q);
spin_lock_init(&rmc->lock);
atomic_set(&rmc->total_events, 0);
INIT_LIST_HEAD(&rmc->event_list);
INIT_LIST_HEAD(&rmc->client_list);
INIT_LIST_HEAD(&rmc->shrd_mem_list);
/* The client expects a non-zero return value for
* its open requests. Hence reserve 0 bit. */
__set_bit(0, &rmc->cids);
atomic_set(&rmc->wcount, 0);
wake_lock_init(&rmc->wlock, WAKE_LOCK_SUSPEND, "rmt_storage");
}
static struct rmt_storage_srv msm_srv = {
.prog = MSM_RMT_STORAGE_APIPROG,
.plat_drv = {
.probe = rmt_storage_probe,
.shutdown = rmt_storage_client_shutdown,
.driver = {
.name = "rs300000a7",
.owner = THIS_MODULE,
},
},
};
static struct rmt_storage_srv mdm_srv = {
.prog = MDM_RMT_STORAGE_APIPROG,
.plat_drv = {
.probe = rmt_storage_probe,
.shutdown = rmt_storage_client_shutdown,
.driver = {
.name = "rs300100a7",
.owner = THIS_MODULE,
},
},
};
static struct rmt_storage_srv *rmt_storage_get_srv(uint32_t prog)
{
if (prog == MSM_RMT_STORAGE_APIPROG)
return &msm_srv;
if (prog == MDM_RMT_STORAGE_APIPROG)
return &mdm_srv;
return NULL;
}
static uint32_t rmt_storage_get_sid(const char *path)
{
if (!strncmp(path, "/boot/modem_fs1", MAX_PATH_NAME))
return RAMFS_MODEMSTORAGE_ID;
if (!strncmp(path, "/boot/modem_fs2", MAX_PATH_NAME))
return RAMFS_MODEMSTORAGE_ID;
if (!strncmp(path, "/boot/modem_fsg", MAX_PATH_NAME))
return RAMFS_MODEMSTORAGE_ID;
if (!strncmp(path, "/q6_fs1_parti_id_0x59", MAX_PATH_NAME))
return RAMFS_MDM_STORAGE_ID;
if (!strncmp(path, "/q6_fs2_parti_id_0x5A", MAX_PATH_NAME))
return RAMFS_MDM_STORAGE_ID;
if (!strncmp(path, "/q6_fsg_parti_id_0x5B", MAX_PATH_NAME))
return RAMFS_MDM_STORAGE_ID;
if (!strncmp(path, "ssd", MAX_PATH_NAME))
return RAMFS_SSD_STORAGE_ID;
return 0;
}
static int __init rmt_storage_init(void)
{
#ifdef CONFIG_MSM_SDIO_SMEM
void *mdm_local_buf;
#endif
int ret = 0;
rmc = kzalloc(sizeof(struct rmt_storage_client_info), GFP_KERNEL);
if (!rmc) {
pr_err("%s: Unable to allocate memory\n", __func__);
return -ENOMEM;
}
rmt_storage_init_client_info();
ret = platform_driver_register(&msm_srv.plat_drv);
if (ret) {
pr_err("%s: Unable to register MSM RPC driver\n", __func__);
goto rmc_free;
}
ret = platform_driver_register(&mdm_srv.plat_drv);
if (ret) {
pr_err("%s: Unable to register MDM RPC driver\n", __func__);
goto unreg_msm_rpc;
}
ret = misc_register(&rmt_storage_device);
if (ret) {
pr_err("%s: Unable to register misc device %d\n", __func__,
MISC_DYNAMIC_MINOR);
goto unreg_mdm_rpc;
}
#ifdef CONFIG_MSM_SDIO_SMEM
mdm_local_buf = kzalloc(MDM_LOCAL_BUF_SZ, GFP_KERNEL);
if (!mdm_local_buf) {
pr_err("%s: Unable to allocate shadow mem\n", __func__);
ret = -ENOMEM;
goto unreg_misc;
}
ret = rmt_storage_add_shrd_mem(RAMFS_MDM_STORAGE_ID,
__pa(mdm_local_buf),
MDM_LOCAL_BUF_SZ,
NULL, NULL, &mdm_srv);
if (ret) {
pr_err("%s: Unable to add shadow mem entry\n", __func__);
goto free_mdm_local_buf;
}
pr_debug("%s: Shadow memory at %p (phys=%lx), %d bytes\n", __func__,
mdm_local_buf, __pa(mdm_local_buf), MDM_LOCAL_BUF_SZ);
#endif
rmc->workq = create_singlethread_workqueue("rmt_storage");
if (!rmc->workq)
return -ENOMEM;
#ifdef CONFIG_MSM_RMT_STORAGE_CLIENT_STATS
stats_dentry = debugfs_create_file("rmt_storage_stats", 0444, 0,
NULL, &debug_ops);
if (!stats_dentry)
pr_err("%s: Failed to create stats debugfs file\n", __func__);
#endif
return 0;
#ifdef CONFIG_MSM_SDIO_SMEM
free_mdm_local_buf:
kfree(mdm_local_buf);
unreg_misc:
misc_deregister(&rmt_storage_device);
#endif
unreg_mdm_rpc:
platform_driver_unregister(&mdm_srv.plat_drv);
unreg_msm_rpc:
platform_driver_unregister(&msm_srv.plat_drv);
rmc_free:
rmt_storage_destroy_rmc();
kfree(rmc);
return ret;
}
module_init(rmt_storage_init);
MODULE_DESCRIPTION("Remote Storage RPC Client");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
Vajnar/linux-stable-hx4700 | drivers/usb/host/ehci-s5p.c | 40 | 7930 | /*
* SAMSUNG S5P USB HOST EHCI Controller
*
* Copyright (C) 2011 Samsung Electronics Co.Ltd
* Author: Jingoo Han <jg1.han@samsung.com>
* Author: Joonyoung Shim <jy0922.shim@samsung.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
* Free Software Foundation; either version 2 of the License, or (at your
* option) any later version.
*
*/
#include <linux/clk.h>
#include <linux/platform_device.h>
#include <plat/ehci.h>
#include <plat/usb-phy.h>
#define EHCI_INSNREG00(base) (base + 0x90)
#define EHCI_INSNREG00_ENA_INCR16 (0x1 << 25)
#define EHCI_INSNREG00_ENA_INCR8 (0x1 << 24)
#define EHCI_INSNREG00_ENA_INCR4 (0x1 << 23)
#define EHCI_INSNREG00_ENA_INCRX_ALIGN (0x1 << 22)
#define EHCI_INSNREG00_ENABLE_DMA_BURST \
(EHCI_INSNREG00_ENA_INCR16 | EHCI_INSNREG00_ENA_INCR8 | \
EHCI_INSNREG00_ENA_INCR4 | EHCI_INSNREG00_ENA_INCRX_ALIGN)
struct s5p_ehci_hcd {
struct device *dev;
struct usb_hcd *hcd;
struct clk *clk;
};
static const struct hc_driver s5p_ehci_hc_driver = {
.description = hcd_name,
.product_desc = "S5P EHCI Host Controller",
.hcd_priv_size = sizeof(struct ehci_hcd),
.irq = ehci_irq,
.flags = HCD_MEMORY | HCD_USB2,
.reset = ehci_init,
.start = ehci_run,
.stop = ehci_stop,
.shutdown = ehci_shutdown,
.get_frame_number = ehci_get_frame,
.urb_enqueue = ehci_urb_enqueue,
.urb_dequeue = ehci_urb_dequeue,
.endpoint_disable = ehci_endpoint_disable,
.endpoint_reset = ehci_endpoint_reset,
.hub_status_data = ehci_hub_status_data,
.hub_control = ehci_hub_control,
.bus_suspend = ehci_bus_suspend,
.bus_resume = ehci_bus_resume,
.relinquish_port = ehci_relinquish_port,
.port_handed_over = ehci_port_handed_over,
.clear_tt_buffer_complete = ehci_clear_tt_buffer_complete,
};
static int __devinit s5p_ehci_probe(struct platform_device *pdev)
{
struct s5p_ehci_platdata *pdata;
struct s5p_ehci_hcd *s5p_ehci;
struct usb_hcd *hcd;
struct ehci_hcd *ehci;
struct resource *res;
int irq;
int err;
pdata = pdev->dev.platform_data;
if (!pdata) {
dev_err(&pdev->dev, "No platform data defined\n");
return -EINVAL;
}
s5p_ehci = kzalloc(sizeof(struct s5p_ehci_hcd), GFP_KERNEL);
if (!s5p_ehci)
return -ENOMEM;
s5p_ehci->dev = &pdev->dev;
hcd = usb_create_hcd(&s5p_ehci_hc_driver, &pdev->dev,
dev_name(&pdev->dev));
if (!hcd) {
dev_err(&pdev->dev, "Unable to create HCD\n");
err = -ENOMEM;
goto fail_hcd;
}
s5p_ehci->hcd = hcd;
s5p_ehci->clk = clk_get(&pdev->dev, "usbhost");
if (IS_ERR(s5p_ehci->clk)) {
dev_err(&pdev->dev, "Failed to get usbhost clock\n");
err = PTR_ERR(s5p_ehci->clk);
goto fail_clk;
}
err = clk_enable(s5p_ehci->clk);
if (err)
goto fail_clken;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
dev_err(&pdev->dev, "Failed to get I/O memory\n");
err = -ENXIO;
goto fail_io;
}
hcd->rsrc_start = res->start;
hcd->rsrc_len = resource_size(res);
hcd->regs = ioremap(res->start, resource_size(res));
if (!hcd->regs) {
dev_err(&pdev->dev, "Failed to remap I/O memory\n");
err = -ENOMEM;
goto fail_io;
}
irq = platform_get_irq(pdev, 0);
if (!irq) {
dev_err(&pdev->dev, "Failed to get IRQ\n");
err = -ENODEV;
goto fail;
}
if (pdata->phy_init)
pdata->phy_init(pdev, S5P_USB_PHY_HOST);
ehci = hcd_to_ehci(hcd);
ehci->caps = hcd->regs;
ehci->regs = hcd->regs +
HC_LENGTH(ehci, readl(&ehci->caps->hc_capbase));
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
dbg_hcs_params(ehci, "reset");
dbg_hcc_params(ehci, "reset");
/* cache this readonly data; minimize chip reads */
ehci->hcs_params = readl(&ehci->caps->hcs_params);
ehci_reset(ehci);
err = usb_add_hcd(hcd, irq, IRQF_SHARED);
if (err) {
dev_err(&pdev->dev, "Failed to add USB HCD\n");
goto fail;
}
platform_set_drvdata(pdev, s5p_ehci);
return 0;
fail:
iounmap(hcd->regs);
fail_io:
clk_disable(s5p_ehci->clk);
fail_clken:
clk_put(s5p_ehci->clk);
fail_clk:
usb_put_hcd(hcd);
fail_hcd:
kfree(s5p_ehci);
return err;
}
static int __devexit s5p_ehci_remove(struct platform_device *pdev)
{
struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ehci->hcd;
usb_remove_hcd(hcd);
if (pdata && pdata->phy_exit)
pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
iounmap(hcd->regs);
clk_disable(s5p_ehci->clk);
clk_put(s5p_ehci->clk);
usb_put_hcd(hcd);
kfree(s5p_ehci);
return 0;
}
static void s5p_ehci_shutdown(struct platform_device *pdev)
{
struct s5p_ehci_hcd *s5p_ehci = platform_get_drvdata(pdev);
struct usb_hcd *hcd = s5p_ehci->hcd;
if (hcd->driver->shutdown)
hcd->driver->shutdown(hcd);
}
#ifdef CONFIG_PM
static int s5p_ehci_suspend(struct device *dev)
{
struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
struct usb_hcd *hcd = s5p_ehci->hcd;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
unsigned long flags;
int rc = 0;
if (time_before(jiffies, ehci->next_statechange))
msleep(20);
/*
* Root hub was already suspended. Disable irq emission and
* mark HW unaccessible. The PM and USB cores make sure that
* the root hub is either suspended or stopped.
*/
ehci_prepare_ports_for_controller_suspend(ehci, device_may_wakeup(dev));
spin_lock_irqsave(&ehci->lock, flags);
ehci_writel(ehci, 0, &ehci->regs->intr_enable);
(void)ehci_readl(ehci, &ehci->regs->intr_enable);
clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
spin_unlock_irqrestore(&ehci->lock, flags);
if (pdata && pdata->phy_exit)
pdata->phy_exit(pdev, S5P_USB_PHY_HOST);
clk_disable(s5p_ehci->clk);
return rc;
}
static int s5p_ehci_resume(struct device *dev)
{
struct s5p_ehci_hcd *s5p_ehci = dev_get_drvdata(dev);
struct usb_hcd *hcd = s5p_ehci->hcd;
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
struct platform_device *pdev = to_platform_device(dev);
struct s5p_ehci_platdata *pdata = pdev->dev.platform_data;
clk_enable(s5p_ehci->clk);
if (pdata && pdata->phy_init)
pdata->phy_init(pdev, S5P_USB_PHY_HOST);
/* DMA burst Enable */
writel(EHCI_INSNREG00_ENABLE_DMA_BURST, EHCI_INSNREG00(hcd->regs));
if (time_before(jiffies, ehci->next_statechange))
msleep(100);
/* Mark hardware accessible again as we are out of D3 state by now */
set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
if (ehci_readl(ehci, &ehci->regs->configured_flag) == FLAG_CF) {
int mask = INTR_MASK;
ehci_prepare_ports_for_controller_resume(ehci);
if (!hcd->self.root_hub->do_remote_wakeup)
mask &= ~STS_PCD;
ehci_writel(ehci, mask, &ehci->regs->intr_enable);
ehci_readl(ehci, &ehci->regs->intr_enable);
return 0;
}
usb_root_hub_lost_power(hcd->self.root_hub);
(void) ehci_halt(ehci);
(void) ehci_reset(ehci);
/* emptying the schedule aborts any urbs */
spin_lock_irq(&ehci->lock);
if (ehci->reclaim)
end_unlink_async(ehci);
ehci_work(ehci);
spin_unlock_irq(&ehci->lock);
ehci_writel(ehci, ehci->command, &ehci->regs->command);
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
/* here we "know" root ports should always stay powered */
ehci_port_power(ehci, 1);
ehci->rh_state = EHCI_RH_SUSPENDED;
return 0;
}
#else
#define s5p_ehci_suspend NULL
#define s5p_ehci_resume NULL
#endif
static const struct dev_pm_ops s5p_ehci_pm_ops = {
.suspend = s5p_ehci_suspend,
.resume = s5p_ehci_resume,
};
static struct platform_driver s5p_ehci_driver = {
.probe = s5p_ehci_probe,
.remove = __devexit_p(s5p_ehci_remove),
.shutdown = s5p_ehci_shutdown,
.driver = {
.name = "s5p-ehci",
.owner = THIS_MODULE,
.pm = &s5p_ehci_pm_ops,
}
};
MODULE_ALIAS("platform:s5p-ehci");
| gpl-2.0 |
VincenzoDo/my-kernel | drivers/gpu/drm/nouveau/nvif/object.c | 552 | 7541 | /*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <nvif/object.h>
#include <nvif/client.h>
#include <nvif/driver.h>
#include <nvif/ioctl.h>
int
nvif_object_ioctl(struct nvif_object *object, void *data, u32 size, void **hack)
{
struct nvif_client *client = nvif_client(object);
union {
struct nvif_ioctl_v0 v0;
} *args = data;
if (size >= sizeof(*args) && args->v0.version == 0) {
args->v0.owner = NVIF_IOCTL_V0_OWNER_ANY;
args->v0.path_nr = 0;
while (args->v0.path_nr < ARRAY_SIZE(args->v0.path)) {
args->v0.path[args->v0.path_nr++] = object->handle;
if (object->parent == object)
break;
object = object->parent;
}
} else
return -ENOSYS;
return client->driver->ioctl(client->base.priv, client->super, data, size, hack);
}
int
nvif_object_sclass(struct nvif_object *object, u32 *oclass, int count)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_sclass_v0 sclass;
} *args;
u32 size = count * sizeof(args->sclass.oclass[0]);
int ret;
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
return -ENOMEM;
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_SCLASS;
args->sclass.version = 0;
args->sclass.count = count;
memcpy(args->sclass.oclass, oclass, size);
ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
ret = ret ? ret : args->sclass.count;
memcpy(oclass, args->sclass.oclass, size);
kfree(args);
return ret;
}
u32
nvif_object_rd(struct nvif_object *object, int size, u64 addr)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_rd_v0 rd;
} args = {
.ioctl.type = NVIF_IOCTL_V0_RD,
.rd.size = size,
.rd.addr = addr,
};
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret) {
/*XXX: warn? */
return 0;
}
return args.rd.data;
}
void
nvif_object_wr(struct nvif_object *object, int size, u64 addr, u32 data)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_wr_v0 wr;
} args = {
.ioctl.type = NVIF_IOCTL_V0_WR,
.wr.size = size,
.wr.addr = addr,
.wr.data = data,
};
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret) {
/*XXX: warn? */
}
}
int
nvif_object_mthd(struct nvif_object *object, u32 mthd, void *data, u32 size)
{
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_mthd_v0 mthd;
} *args;
u8 stack[128];
int ret;
if (sizeof(*args) + size > sizeof(stack)) {
if (!(args = kmalloc(sizeof(*args) + size, GFP_KERNEL)))
return -ENOMEM;
} else {
args = (void *)stack;
}
args->ioctl.version = 0;
args->ioctl.type = NVIF_IOCTL_V0_MTHD;
args->mthd.version = 0;
args->mthd.method = mthd;
memcpy(args->mthd.data, data, size);
ret = nvif_object_ioctl(object, args, sizeof(*args) + size, NULL);
memcpy(data, args->mthd.data, size);
if (args != (void *)stack)
kfree(args);
return ret;
}
void
nvif_object_unmap(struct nvif_object *object)
{
if (object->map.size) {
struct nvif_client *client = nvif_client(object);
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_unmap unmap;
} args = {
.ioctl.type = NVIF_IOCTL_V0_UNMAP,
};
if (object->map.ptr) {
client->driver->unmap(client, object->map.ptr,
object->map.size);
object->map.ptr = NULL;
}
nvif_object_ioctl(object, &args, sizeof(args), NULL);
object->map.size = 0;
}
}
int
nvif_object_map(struct nvif_object *object)
{
struct nvif_client *client = nvif_client(object);
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_map_v0 map;
} args = {
.ioctl.type = NVIF_IOCTL_V0_MAP,
};
int ret = nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (ret == 0) {
object->map.size = args.map.length;
object->map.ptr = client->driver->map(client, args.map.handle,
object->map.size);
if (ret = -ENOMEM, object->map.ptr)
return 0;
nvif_object_unmap(object);
}
return ret;
}
struct ctor {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_new_v0 new;
};
void
nvif_object_fini(struct nvif_object *object)
{
struct ctor *ctor = container_of(object->data, typeof(*ctor), new.data);
if (object->parent) {
struct {
struct nvif_ioctl_v0 ioctl;
struct nvif_ioctl_del del;
} args = {
.ioctl.type = NVIF_IOCTL_V0_DEL,
};
nvif_object_unmap(object);
nvif_object_ioctl(object, &args, sizeof(args), NULL);
if (object->data) {
object->size = 0;
object->data = NULL;
kfree(ctor);
}
nvif_object_ref(NULL, &object->parent);
}
}
int
nvif_object_init(struct nvif_object *parent, void (*dtor)(struct nvif_object *),
u32 handle, u32 oclass, void *data, u32 size,
struct nvif_object *object)
{
struct ctor *ctor;
int ret = 0;
object->parent = NULL;
object->object = object;
nvif_object_ref(parent, &object->parent);
kref_init(&object->refcount);
object->handle = handle;
object->oclass = oclass;
object->data = NULL;
object->size = 0;
object->dtor = dtor;
object->map.ptr = NULL;
object->map.size = 0;
if (object->parent) {
if (!(ctor = kmalloc(sizeof(*ctor) + size, GFP_KERNEL))) {
nvif_object_fini(object);
return -ENOMEM;
}
object->data = ctor->new.data;
object->size = size;
memcpy(object->data, data, size);
ctor->ioctl.version = 0;
ctor->ioctl.type = NVIF_IOCTL_V0_NEW;
ctor->new.version = 0;
ctor->new.route = NVIF_IOCTL_V0_ROUTE_NVIF;
ctor->new.token = (unsigned long)(void *)object;
ctor->new.handle = handle;
ctor->new.oclass = oclass;
ret = nvif_object_ioctl(parent, ctor, sizeof(*ctor) +
object->size, &object->priv);
}
if (ret)
nvif_object_fini(object);
return ret;
}
static void
nvif_object_del(struct nvif_object *object)
{
nvif_object_fini(object);
kfree(object);
}
int
nvif_object_new(struct nvif_object *parent, u32 handle, u32 oclass,
void *data, u32 size, struct nvif_object **pobject)
{
struct nvif_object *object = kzalloc(sizeof(*object), GFP_KERNEL);
if (object) {
int ret = nvif_object_init(parent, nvif_object_del, handle,
oclass, data, size, object);
if (ret) {
kfree(object);
object = NULL;
}
*pobject = object;
return ret;
}
return -ENOMEM;
}
static void
nvif_object_put(struct kref *kref)
{
struct nvif_object *object =
container_of(kref, typeof(*object), refcount);
object->dtor(object);
}
void
nvif_object_ref(struct nvif_object *object, struct nvif_object **pobject)
{
if (object)
kref_get(&object->refcount);
if (*pobject)
kref_put(&(*pobject)->refcount, nvif_object_put);
*pobject = object;
}
| gpl-2.0 |
Chad0989/incredikernel | drivers/rtc/rtc-twl4030.c | 552 | 14232 | /*
* rtc-twl4030.c -- TWL4030 Real Time Clock interface
*
* Copyright (C) 2007 MontaVista Software, Inc
* Author: Alexandre Rusev <source@mvista.com>
*
* Based on original TI driver twl4030-rtc.c
* Copyright (C) 2006 Texas Instruments, Inc.
*
* Based on rtc-omap.c
* Copyright (C) 2003 MontaVista Software, Inc.
* Author: George G. Davis <gdavis@mvista.com> or <source@mvista.com>
* Copyright (C) 2006 David Brownell
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/types.h>
#include <linux/rtc.h>
#include <linux/bcd.h>
#include <linux/platform_device.h>
#include <linux/interrupt.h>
#include <linux/i2c/twl4030.h>
/*
* RTC block register offsets (use TWL_MODULE_RTC)
*/
#define REG_SECONDS_REG 0x00
#define REG_MINUTES_REG 0x01
#define REG_HOURS_REG 0x02
#define REG_DAYS_REG 0x03
#define REG_MONTHS_REG 0x04
#define REG_YEARS_REG 0x05
#define REG_WEEKS_REG 0x06
#define REG_ALARM_SECONDS_REG 0x07
#define REG_ALARM_MINUTES_REG 0x08
#define REG_ALARM_HOURS_REG 0x09
#define REG_ALARM_DAYS_REG 0x0A
#define REG_ALARM_MONTHS_REG 0x0B
#define REG_ALARM_YEARS_REG 0x0C
#define REG_RTC_CTRL_REG 0x0D
#define REG_RTC_STATUS_REG 0x0E
#define REG_RTC_INTERRUPTS_REG 0x0F
#define REG_RTC_COMP_LSB_REG 0x10
#define REG_RTC_COMP_MSB_REG 0x11
/* RTC_CTRL_REG bitfields */
#define BIT_RTC_CTRL_REG_STOP_RTC_M 0x01
#define BIT_RTC_CTRL_REG_ROUND_30S_M 0x02
#define BIT_RTC_CTRL_REG_AUTO_COMP_M 0x04
#define BIT_RTC_CTRL_REG_MODE_12_24_M 0x08
#define BIT_RTC_CTRL_REG_TEST_MODE_M 0x10
#define BIT_RTC_CTRL_REG_SET_32_COUNTER_M 0x20
#define BIT_RTC_CTRL_REG_GET_TIME_M 0x40
/* RTC_STATUS_REG bitfields */
#define BIT_RTC_STATUS_REG_RUN_M 0x02
#define BIT_RTC_STATUS_REG_1S_EVENT_M 0x04
#define BIT_RTC_STATUS_REG_1M_EVENT_M 0x08
#define BIT_RTC_STATUS_REG_1H_EVENT_M 0x10
#define BIT_RTC_STATUS_REG_1D_EVENT_M 0x20
#define BIT_RTC_STATUS_REG_ALARM_M 0x40
#define BIT_RTC_STATUS_REG_POWER_UP_M 0x80
/* RTC_INTERRUPTS_REG bitfields */
#define BIT_RTC_INTERRUPTS_REG_EVERY_M 0x03
#define BIT_RTC_INTERRUPTS_REG_IT_TIMER_M 0x04
#define BIT_RTC_INTERRUPTS_REG_IT_ALARM_M 0x08
/* REG_SECONDS_REG through REG_YEARS_REG is how many registers? */
#define ALL_TIME_REGS 6
/*----------------------------------------------------------------------*/
/*
* Supports 1 byte read from TWL4030 RTC register.
*/
static int twl4030_rtc_read_u8(u8 *data, u8 reg)
{
int ret;
ret = twl4030_i2c_read_u8(TWL4030_MODULE_RTC, data, reg);
if (ret < 0)
pr_err("twl4030_rtc: Could not read TWL4030"
"register %X - error %d\n", reg, ret);
return ret;
}
/*
* Supports 1 byte write to TWL4030 RTC registers.
*/
static int twl4030_rtc_write_u8(u8 data, u8 reg)
{
int ret;
ret = twl4030_i2c_write_u8(TWL4030_MODULE_RTC, data, reg);
if (ret < 0)
pr_err("twl4030_rtc: Could not write TWL4030"
"register %X - error %d\n", reg, ret);
return ret;
}
/*
* Cache the value for timer/alarm interrupts register; this is
* only changed by callers holding rtc ops lock (or resume).
*/
static unsigned char rtc_irq_bits;
/*
* Enable 1/second update and/or alarm interrupts.
*/
static int set_rtc_irq_bit(unsigned char bit)
{
unsigned char val;
int ret;
val = rtc_irq_bits | bit;
val &= ~BIT_RTC_INTERRUPTS_REG_EVERY_M;
ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
return ret;
}
/*
* Disable update and/or alarm interrupts.
*/
static int mask_rtc_irq_bit(unsigned char bit)
{
unsigned char val;
int ret;
val = rtc_irq_bits & ~bit;
ret = twl4030_rtc_write_u8(val, REG_RTC_INTERRUPTS_REG);
if (ret == 0)
rtc_irq_bits = val;
return ret;
}
static int twl4030_rtc_alarm_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
if (enabled)
ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
else
ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
return ret;
}
static int twl4030_rtc_update_irq_enable(struct device *dev, unsigned enabled)
{
int ret;
if (enabled)
ret = set_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
else
ret = mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
return ret;
}
/*
* Gets current TWL4030 RTC time and date parameters.
*
* The RTC's time/alarm representation is not what gmtime(3) requires
* Linux to use:
*
* - Months are 1..12 vs Linux 0-11
* - Years are 0..99 vs Linux 1900..N (we assume 21st century)
*/
static int twl4030_rtc_read_time(struct device *dev, struct rtc_time *tm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
u8 save_control;
ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
save_control |= BIT_RTC_CTRL_REG_GET_TIME_M;
ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
return ret;
ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
REG_SECONDS_REG, ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_time error %d\n", ret);
return ret;
}
tm->tm_sec = bcd2bin(rtc_data[0]);
tm->tm_min = bcd2bin(rtc_data[1]);
tm->tm_hour = bcd2bin(rtc_data[2]);
tm->tm_mday = bcd2bin(rtc_data[3]);
tm->tm_mon = bcd2bin(rtc_data[4]) - 1;
tm->tm_year = bcd2bin(rtc_data[5]) + 100;
return ret;
}
static int twl4030_rtc_set_time(struct device *dev, struct rtc_time *tm)
{
unsigned char save_control;
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
rtc_data[1] = bin2bcd(tm->tm_sec);
rtc_data[2] = bin2bcd(tm->tm_min);
rtc_data[3] = bin2bcd(tm->tm_hour);
rtc_data[4] = bin2bcd(tm->tm_mday);
rtc_data[5] = bin2bcd(tm->tm_mon + 1);
rtc_data[6] = bin2bcd(tm->tm_year - 100);
/* Stop RTC while updating the TC registers */
ret = twl4030_rtc_read_u8(&save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
save_control &= ~BIT_RTC_CTRL_REG_STOP_RTC_M;
twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
if (ret < 0)
goto out;
/* update all the time registers in one shot */
ret = twl4030_i2c_write(TWL4030_MODULE_RTC, rtc_data,
REG_SECONDS_REG, ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_set_time error %d\n", ret);
goto out;
}
/* Start back RTC */
save_control |= BIT_RTC_CTRL_REG_STOP_RTC_M;
ret = twl4030_rtc_write_u8(save_control, REG_RTC_CTRL_REG);
out:
return ret;
}
/*
* Gets current TWL4030 RTC alarm time.
*/
static int twl4030_rtc_read_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char rtc_data[ALL_TIME_REGS + 1];
int ret;
ret = twl4030_i2c_read(TWL4030_MODULE_RTC, rtc_data,
REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
if (ret < 0) {
dev_err(dev, "rtc_read_alarm error %d\n", ret);
return ret;
}
/* some of these fields may be wildcard/"match all" */
alm->time.tm_sec = bcd2bin(rtc_data[0]);
alm->time.tm_min = bcd2bin(rtc_data[1]);
alm->time.tm_hour = bcd2bin(rtc_data[2]);
alm->time.tm_mday = bcd2bin(rtc_data[3]);
alm->time.tm_mon = bcd2bin(rtc_data[4]) - 1;
alm->time.tm_year = bcd2bin(rtc_data[5]) + 100;
/* report cached alarm enable state */
if (rtc_irq_bits & BIT_RTC_INTERRUPTS_REG_IT_ALARM_M)
alm->enabled = 1;
return ret;
}
static int twl4030_rtc_set_alarm(struct device *dev, struct rtc_wkalrm *alm)
{
unsigned char alarm_data[ALL_TIME_REGS + 1];
int ret;
ret = twl4030_rtc_alarm_irq_enable(dev, 0);
if (ret)
goto out;
alarm_data[1] = bin2bcd(alm->time.tm_sec);
alarm_data[2] = bin2bcd(alm->time.tm_min);
alarm_data[3] = bin2bcd(alm->time.tm_hour);
alarm_data[4] = bin2bcd(alm->time.tm_mday);
alarm_data[5] = bin2bcd(alm->time.tm_mon + 1);
alarm_data[6] = bin2bcd(alm->time.tm_year - 100);
/* update all the alarm registers in one shot */
ret = twl4030_i2c_write(TWL4030_MODULE_RTC, alarm_data,
REG_ALARM_SECONDS_REG, ALL_TIME_REGS);
if (ret) {
dev_err(dev, "rtc_set_alarm error %d\n", ret);
goto out;
}
if (alm->enabled)
ret = twl4030_rtc_alarm_irq_enable(dev, 1);
out:
return ret;
}
static irqreturn_t twl4030_rtc_interrupt(int irq, void *rtc)
{
unsigned long events = 0;
int ret = IRQ_NONE;
int res;
u8 rd_reg;
#ifdef CONFIG_LOCKDEP
/* WORKAROUND for lockdep forcing IRQF_DISABLED on us, which
* we don't want and can't tolerate. Although it might be
* friendlier not to borrow this thread context...
*/
local_irq_enable();
#endif
res = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (res)
goto out;
/*
* Figure out source of interrupt: ALARM or TIMER in RTC_STATUS_REG.
* only one (ALARM or RTC) interrupt source may be enabled
* at time, we also could check our results
* by reading RTS_INTERRUPTS_REGISTER[IT_TIMER,IT_ALARM]
*/
if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
events |= RTC_IRQF | RTC_AF;
else
events |= RTC_IRQF | RTC_UF;
res = twl4030_rtc_write_u8(rd_reg | BIT_RTC_STATUS_REG_ALARM_M,
REG_RTC_STATUS_REG);
if (res)
goto out;
/* Clear on Read enabled. RTC_IT bit of TWL4030_INT_PWR_ISR1
* needs 2 reads to clear the interrupt. One read is done in
* do_twl4030_pwrirq(). Doing the second read, to clear
* the bit.
*
* FIXME the reason PWR_ISR1 needs an extra read is that
* RTC_IF retriggered until we cleared REG_ALARM_M above.
* But re-reading like this is a bad hack; by doing so we
* risk wrongly clearing status for some other IRQ (losing
* the interrupt). Be smarter about handling RTC_UF ...
*/
res = twl4030_i2c_read_u8(TWL4030_MODULE_INT,
&rd_reg, TWL4030_INT_PWR_ISR1);
if (res)
goto out;
/* Notify RTC core on event */
rtc_update_irq(rtc, 1, events);
ret = IRQ_HANDLED;
out:
return ret;
}
static struct rtc_class_ops twl4030_rtc_ops = {
.read_time = twl4030_rtc_read_time,
.set_time = twl4030_rtc_set_time,
.read_alarm = twl4030_rtc_read_alarm,
.set_alarm = twl4030_rtc_set_alarm,
.alarm_irq_enable = twl4030_rtc_alarm_irq_enable,
.update_irq_enable = twl4030_rtc_update_irq_enable,
};
/*----------------------------------------------------------------------*/
static int __devinit twl4030_rtc_probe(struct platform_device *pdev)
{
struct rtc_device *rtc;
int ret = 0;
int irq = platform_get_irq(pdev, 0);
u8 rd_reg;
if (irq <= 0)
return -EINVAL;
rtc = rtc_device_register(pdev->name,
&pdev->dev, &twl4030_rtc_ops, THIS_MODULE);
if (IS_ERR(rtc)) {
ret = PTR_ERR(rtc);
dev_err(&pdev->dev, "can't register RTC device, err %ld\n",
PTR_ERR(rtc));
goto out0;
}
platform_set_drvdata(pdev, rtc);
ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
if (rd_reg & BIT_RTC_STATUS_REG_POWER_UP_M)
dev_warn(&pdev->dev, "Power up reset detected.\n");
if (rd_reg & BIT_RTC_STATUS_REG_ALARM_M)
dev_warn(&pdev->dev, "Pending Alarm interrupt detected.\n");
/* Clear RTC Power up reset and pending alarm interrupts */
ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_STATUS_REG);
if (ret < 0)
goto out1;
ret = request_irq(irq, twl4030_rtc_interrupt,
IRQF_TRIGGER_RISING,
dev_name(&rtc->dev), rtc);
if (ret < 0) {
dev_err(&pdev->dev, "IRQ is not free.\n");
goto out1;
}
/* Check RTC module status, Enable if it is off */
ret = twl4030_rtc_read_u8(&rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
if (!(rd_reg & BIT_RTC_CTRL_REG_STOP_RTC_M)) {
dev_info(&pdev->dev, "Enabling TWL4030-RTC.\n");
rd_reg = BIT_RTC_CTRL_REG_STOP_RTC_M;
ret = twl4030_rtc_write_u8(rd_reg, REG_RTC_CTRL_REG);
if (ret < 0)
goto out2;
}
/* init cached IRQ enable bits */
ret = twl4030_rtc_read_u8(&rtc_irq_bits, REG_RTC_INTERRUPTS_REG);
if (ret < 0)
goto out2;
return ret;
out2:
free_irq(irq, rtc);
out1:
rtc_device_unregister(rtc);
out0:
return ret;
}
/*
* Disable all TWL4030 RTC module interrupts.
* Sets status flag to free.
*/
static int __devexit twl4030_rtc_remove(struct platform_device *pdev)
{
/* leave rtc running, but disable irqs */
struct rtc_device *rtc = platform_get_drvdata(pdev);
int irq = platform_get_irq(pdev, 0);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_ALARM_M);
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
free_irq(irq, rtc);
rtc_device_unregister(rtc);
platform_set_drvdata(pdev, NULL);
return 0;
}
static void twl4030_rtc_shutdown(struct platform_device *pdev)
{
/* mask timer interrupts, but leave alarm interrupts on to enable
power-on when alarm is triggered */
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
}
#ifdef CONFIG_PM
static unsigned char irqstat;
static int twl4030_rtc_suspend(struct platform_device *pdev, pm_message_t state)
{
irqstat = rtc_irq_bits;
mask_rtc_irq_bit(BIT_RTC_INTERRUPTS_REG_IT_TIMER_M);
return 0;
}
static int twl4030_rtc_resume(struct platform_device *pdev)
{
set_rtc_irq_bit(irqstat);
return 0;
}
#else
#define twl4030_rtc_suspend NULL
#define twl4030_rtc_resume NULL
#endif
MODULE_ALIAS("platform:twl4030_rtc");
static struct platform_driver twl4030rtc_driver = {
.probe = twl4030_rtc_probe,
.remove = __devexit_p(twl4030_rtc_remove),
.shutdown = twl4030_rtc_shutdown,
.suspend = twl4030_rtc_suspend,
.resume = twl4030_rtc_resume,
.driver = {
.owner = THIS_MODULE,
.name = "twl4030_rtc",
},
};
static int __init twl4030_rtc_init(void)
{
return platform_driver_register(&twl4030rtc_driver);
}
module_init(twl4030_rtc_init);
static void __exit twl4030_rtc_exit(void)
{
platform_driver_unregister(&twl4030rtc_driver);
}
module_exit(twl4030_rtc_exit);
MODULE_AUTHOR("Texas Instruments, MontaVista Software");
MODULE_LICENSE("GPL");
| gpl-2.0 |
zzicewind/linux | drivers/gpu/drm/nouveau/nvkm/subdev/i2c/base.c | 552 | 15623 | /*
* Copyright 2013 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "priv.h"
#include "pad.h"
#include <core/device.h>
#include <core/notify.h>
#include <core/option.h>
#include <subdev/bios.h>
#include <subdev/bios/dcb.h>
/******************************************************************************
* interface to linux i2c bit-banging algorithm
*****************************************************************************/
#ifdef CONFIG_NOUVEAU_I2C_INTERNAL_DEFAULT
#define CSTMSEL true
#else
#define CSTMSEL false
#endif
static int
nvkm_i2c_pre_xfer(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit = adap->algo_data;
struct nvkm_i2c_port *port = bit->data;
return nvkm_i2c(port)->acquire(port, bit->timeout);
}
static void
nvkm_i2c_post_xfer(struct i2c_adapter *adap)
{
struct i2c_algo_bit_data *bit = adap->algo_data;
struct nvkm_i2c_port *port = bit->data;
return nvkm_i2c(port)->release(port);
}
static void
nvkm_i2c_setscl(void *data, int state)
{
struct nvkm_i2c_port *port = data;
port->func->drive_scl(port, state);
}
static void
nvkm_i2c_setsda(void *data, int state)
{
struct nvkm_i2c_port *port = data;
port->func->drive_sda(port, state);
}
static int
nvkm_i2c_getscl(void *data)
{
struct nvkm_i2c_port *port = data;
return port->func->sense_scl(port);
}
static int
nvkm_i2c_getsda(void *data)
{
struct nvkm_i2c_port *port = data;
return port->func->sense_sda(port);
}
/******************************************************************************
* base i2c "port" class implementation
*****************************************************************************/
int
_nvkm_i2c_port_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_i2c_port *port = (void *)object;
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
nv_ofuncs(pad)->fini(nv_object(pad), suspend);
return nvkm_object_fini(&port->base, suspend);
}
void
_nvkm_i2c_port_dtor(struct nvkm_object *object)
{
struct nvkm_i2c_port *port = (void *)object;
i2c_del_adapter(&port->adapter);
nvkm_object_destroy(&port->base);
}
int
nvkm_i2c_port_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, u8 index,
const struct i2c_algorithm *algo,
const struct nvkm_i2c_func *func,
int size, void **pobject)
{
struct nvkm_device *device = nv_device(parent);
struct nvkm_i2c *i2c = nvkm_i2c(parent);
struct nvkm_i2c_port *port;
int ret;
ret = nvkm_object_create_(parent, engine, oclass, 0, size, pobject);
port = *pobject;
if (ret)
return ret;
snprintf(port->adapter.name, sizeof(port->adapter.name),
"nvkm-%s-%d", device->name, index);
port->adapter.owner = THIS_MODULE;
port->adapter.dev.parent = nv_device_base(device);
port->index = index;
port->aux = -1;
port->func = func;
mutex_init(&port->mutex);
if ( algo == &nvkm_i2c_bit_algo &&
!nvkm_boolopt(device->cfgopt, "NvI2C", CSTMSEL)) {
struct i2c_algo_bit_data *bit;
bit = kzalloc(sizeof(*bit), GFP_KERNEL);
if (!bit)
return -ENOMEM;
bit->udelay = 10;
bit->timeout = usecs_to_jiffies(2200);
bit->data = port;
bit->pre_xfer = nvkm_i2c_pre_xfer;
bit->post_xfer = nvkm_i2c_post_xfer;
bit->setsda = nvkm_i2c_setsda;
bit->setscl = nvkm_i2c_setscl;
bit->getsda = nvkm_i2c_getsda;
bit->getscl = nvkm_i2c_getscl;
port->adapter.algo_data = bit;
ret = i2c_bit_add_bus(&port->adapter);
} else {
port->adapter.algo_data = port;
port->adapter.algo = algo;
ret = i2c_add_adapter(&port->adapter);
}
if (ret == 0)
list_add_tail(&port->head, &i2c->ports);
return ret;
}
/******************************************************************************
* base i2c subdev class implementation
*****************************************************************************/
static struct nvkm_i2c_port *
nvkm_i2c_find(struct nvkm_i2c *i2c, u8 index)
{
struct nvkm_bios *bios = nvkm_bios(i2c);
struct nvkm_i2c_port *port;
if (index == NV_I2C_DEFAULT(0) ||
index == NV_I2C_DEFAULT(1)) {
u8 ver, hdr, cnt, len;
u16 i2c = dcb_i2c_table(bios, &ver, &hdr, &cnt, &len);
if (i2c && ver >= 0x30) {
u8 auxidx = nv_ro08(bios, i2c + 4);
if (index == NV_I2C_DEFAULT(0))
index = (auxidx & 0x0f) >> 0;
else
index = (auxidx & 0xf0) >> 4;
} else {
index = 2;
}
}
list_for_each_entry(port, &i2c->ports, head) {
if (port->index == index)
return port;
}
return NULL;
}
static struct nvkm_i2c_port *
nvkm_i2c_find_type(struct nvkm_i2c *i2c, u16 type)
{
struct nvkm_i2c_port *port;
list_for_each_entry(port, &i2c->ports, head) {
if (nv_hclass(port) == type)
return port;
}
return NULL;
}
static void
nvkm_i2c_release_pad(struct nvkm_i2c_port *port)
{
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
struct nvkm_i2c *i2c = nvkm_i2c(port);
if (atomic_dec_and_test(&nv_object(pad)->usecount)) {
nv_ofuncs(pad)->fini(nv_object(pad), false);
wake_up_all(&i2c->wait);
}
}
static int
nvkm_i2c_try_acquire_pad(struct nvkm_i2c_port *port)
{
struct nvkm_i2c_pad *pad = nvkm_i2c_pad(port);
if (atomic_add_return(1, &nv_object(pad)->usecount) != 1) {
struct nvkm_object *owner = (void *)pad->port;
do {
if (owner == (void *)port)
return 0;
owner = owner->parent;
} while(owner);
nvkm_i2c_release_pad(port);
return -EBUSY;
}
pad->next = port;
nv_ofuncs(pad)->init(nv_object(pad));
return 0;
}
static int
nvkm_i2c_acquire_pad(struct nvkm_i2c_port *port, unsigned long timeout)
{
struct nvkm_i2c *i2c = nvkm_i2c(port);
if (timeout) {
if (wait_event_timeout(i2c->wait,
nvkm_i2c_try_acquire_pad(port) == 0,
timeout) == 0)
return -EBUSY;
} else {
wait_event(i2c->wait, nvkm_i2c_try_acquire_pad(port) == 0);
}
return 0;
}
static void
nvkm_i2c_release(struct nvkm_i2c_port *port)
__releases(pad->mutex)
{
nvkm_i2c(port)->release_pad(port);
mutex_unlock(&port->mutex);
}
static int
nvkm_i2c_acquire(struct nvkm_i2c_port *port, unsigned long timeout)
__acquires(pad->mutex)
{
int ret;
mutex_lock(&port->mutex);
if ((ret = nvkm_i2c(port)->acquire_pad(port, timeout)))
mutex_unlock(&port->mutex);
return ret;
}
static int
nvkm_i2c_identify(struct nvkm_i2c *i2c, int index, const char *what,
struct nvkm_i2c_board_info *info,
bool (*match)(struct nvkm_i2c_port *,
struct i2c_board_info *, void *), void *data)
{
struct nvkm_i2c_port *port = nvkm_i2c_find(i2c, index);
int i;
if (!port) {
nv_debug(i2c, "no bus when probing %s on %d\n", what, index);
return -ENODEV;
}
nv_debug(i2c, "probing %ss on bus: %d\n", what, port->index);
for (i = 0; info[i].dev.addr; i++) {
u8 orig_udelay = 0;
if ((port->adapter.algo == &i2c_bit_algo) &&
(info[i].udelay != 0)) {
struct i2c_algo_bit_data *algo = port->adapter.algo_data;
nv_debug(i2c, "using custom udelay %d instead of %d\n",
info[i].udelay, algo->udelay);
orig_udelay = algo->udelay;
algo->udelay = info[i].udelay;
}
if (nv_probe_i2c(port, info[i].dev.addr) &&
(!match || match(port, &info[i].dev, data))) {
nv_info(i2c, "detected %s: %s\n", what,
info[i].dev.type);
return i;
}
if (orig_udelay) {
struct i2c_algo_bit_data *algo = port->adapter.algo_data;
algo->udelay = orig_udelay;
}
}
nv_debug(i2c, "no devices found.\n");
return -ENODEV;
}
static void
nvkm_i2c_intr_fini(struct nvkm_event *event, int type, int index)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nvkm_i2c_port *port = i2c->find(i2c, index);
const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 0);
}
static void
nvkm_i2c_intr_init(struct nvkm_event *event, int type, int index)
{
struct nvkm_i2c *i2c = container_of(event, typeof(*i2c), event);
struct nvkm_i2c_port *port = i2c->find(i2c, index);
const struct nvkm_i2c_impl *impl = (void *)nv_object(i2c)->oclass;
if (port && port->aux >= 0)
impl->aux_mask(i2c, type, 1 << port->aux, 1 << port->aux);
}
static int
nvkm_i2c_intr_ctor(struct nvkm_object *object, void *data, u32 size,
struct nvkm_notify *notify)
{
struct nvkm_i2c_ntfy_req *req = data;
if (!WARN_ON(size != sizeof(*req))) {
notify->size = sizeof(struct nvkm_i2c_ntfy_rep);
notify->types = req->mask;
notify->index = req->port;
return 0;
}
return -EINVAL;
}
static void
nvkm_i2c_intr(struct nvkm_subdev *subdev)
{
struct nvkm_i2c_impl *impl = (void *)nv_oclass(subdev);
struct nvkm_i2c *i2c = nvkm_i2c(subdev);
struct nvkm_i2c_port *port;
u32 hi, lo, rq, tx, e;
if (impl->aux_stat) {
impl->aux_stat(i2c, &hi, &lo, &rq, &tx);
if (hi || lo || rq || tx) {
list_for_each_entry(port, &i2c->ports, head) {
if (e = 0, port->aux < 0)
continue;
if (hi & (1 << port->aux)) e |= NVKM_I2C_PLUG;
if (lo & (1 << port->aux)) e |= NVKM_I2C_UNPLUG;
if (rq & (1 << port->aux)) e |= NVKM_I2C_IRQ;
if (tx & (1 << port->aux)) e |= NVKM_I2C_DONE;
if (e) {
struct nvkm_i2c_ntfy_rep rep = {
.mask = e,
};
nvkm_event_send(&i2c->event, rep.mask,
port->index, &rep,
sizeof(rep));
}
}
}
}
}
static const struct nvkm_event_func
nvkm_i2c_intr_func = {
.ctor = nvkm_i2c_intr_ctor,
.init = nvkm_i2c_intr_init,
.fini = nvkm_i2c_intr_fini,
};
int
_nvkm_i2c_fini(struct nvkm_object *object, bool suspend)
{
struct nvkm_i2c_impl *impl = (void *)nv_oclass(object);
struct nvkm_i2c *i2c = (void *)object;
struct nvkm_i2c_port *port;
u32 mask;
int ret;
list_for_each_entry(port, &i2c->ports, head) {
ret = nv_ofuncs(port)->fini(nv_object(port), suspend);
if (ret && suspend)
goto fail;
}
if ((mask = (1 << impl->aux) - 1), impl->aux_stat) {
impl->aux_mask(i2c, NVKM_I2C_ANY, mask, 0);
impl->aux_stat(i2c, &mask, &mask, &mask, &mask);
}
return nvkm_subdev_fini(&i2c->base, suspend);
fail:
list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
nv_ofuncs(port)->init(nv_object(port));
}
return ret;
}
int
_nvkm_i2c_init(struct nvkm_object *object)
{
struct nvkm_i2c *i2c = (void *)object;
struct nvkm_i2c_port *port;
int ret;
ret = nvkm_subdev_init(&i2c->base);
if (ret == 0) {
list_for_each_entry(port, &i2c->ports, head) {
ret = nv_ofuncs(port)->init(nv_object(port));
if (ret)
goto fail;
}
}
return ret;
fail:
list_for_each_entry_continue_reverse(port, &i2c->ports, head) {
nv_ofuncs(port)->fini(nv_object(port), false);
}
return ret;
}
void
_nvkm_i2c_dtor(struct nvkm_object *object)
{
struct nvkm_i2c *i2c = (void *)object;
struct nvkm_i2c_port *port, *temp;
nvkm_event_fini(&i2c->event);
list_for_each_entry_safe(port, temp, &i2c->ports, head) {
nvkm_object_ref(NULL, (struct nvkm_object **)&port);
}
nvkm_subdev_destroy(&i2c->base);
}
static struct nvkm_oclass *
nvkm_i2c_extdev_sclass[] = {
nvkm_anx9805_sclass,
};
static void
nvkm_i2c_create_port(struct nvkm_i2c *i2c, int index, u8 type,
struct dcb_i2c_entry *info)
{
const struct nvkm_i2c_impl *impl = (void *)nv_oclass(i2c);
struct nvkm_oclass *oclass;
struct nvkm_object *parent;
struct nvkm_object *object;
int ret, pad;
if (info->share != DCB_I2C_UNUSED) {
pad = info->share;
oclass = impl->pad_s;
} else {
if (type != DCB_I2C_NVIO_AUX)
pad = 0x100 + info->drive;
else
pad = 0x100 + info->auxch;
oclass = impl->pad_x;
}
ret = nvkm_object_ctor(nv_object(i2c), NULL, oclass,
NULL, pad, &parent);
if (ret < 0)
return;
oclass = impl->sclass;
do {
ret = -EINVAL;
if (oclass->handle == type) {
ret = nvkm_object_ctor(parent, NULL, oclass,
info, index, &object);
}
} while (ret && (++oclass)->handle);
nvkm_object_ref(NULL, &parent);
}
int
nvkm_i2c_create_(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, int length, void **pobject)
{
struct nvkm_bios *bios = nvkm_bios(parent);
struct nvkm_i2c *i2c;
struct nvkm_object *object;
struct dcb_i2c_entry info;
int ret, i, j, index = -1;
struct dcb_output outp;
u8 ver, hdr;
u32 data;
ret = nvkm_subdev_create(parent, engine, oclass, 0, "I2C", "i2c", &i2c);
*pobject = nv_object(i2c);
if (ret)
return ret;
nv_subdev(i2c)->intr = nvkm_i2c_intr;
i2c->find = nvkm_i2c_find;
i2c->find_type = nvkm_i2c_find_type;
i2c->acquire_pad = nvkm_i2c_acquire_pad;
i2c->release_pad = nvkm_i2c_release_pad;
i2c->acquire = nvkm_i2c_acquire;
i2c->release = nvkm_i2c_release;
i2c->identify = nvkm_i2c_identify;
init_waitqueue_head(&i2c->wait);
INIT_LIST_HEAD(&i2c->ports);
while (!dcb_i2c_parse(bios, ++index, &info)) {
switch (info.type) {
case DCB_I2C_NV04_BIT:
case DCB_I2C_NV4E_BIT:
case DCB_I2C_NVIO_BIT:
nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
info.type, &info);
break;
case DCB_I2C_NVIO_AUX:
nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
info.type, &info);
break;
case DCB_I2C_PMGR:
if (info.drive != DCB_I2C_UNUSED) {
nvkm_i2c_create_port(i2c, NV_I2C_PORT(index),
DCB_I2C_NVIO_BIT, &info);
}
if (info.auxch != DCB_I2C_UNUSED) {
nvkm_i2c_create_port(i2c, NV_I2C_AUX(index),
DCB_I2C_NVIO_AUX, &info);
}
break;
case DCB_I2C_UNUSED:
default:
continue;
}
}
/* in addition to the busses specified in the i2c table, there
* may be ddc/aux channels hiding behind external tmds/dp/etc
* transmitters.
*/
index = NV_I2C_EXT(0);
i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &outp))) {
if (!outp.location || !outp.extdev)
continue;
switch (outp.type) {
case DCB_OUTPUT_TMDS:
info.type = NV_I2C_TYPE_EXTDDC(outp.extdev);
break;
case DCB_OUTPUT_DP:
info.type = NV_I2C_TYPE_EXTAUX(outp.extdev);
break;
default:
continue;
}
ret = -ENODEV;
j = -1;
while (ret && ++j < ARRAY_SIZE(nvkm_i2c_extdev_sclass)) {
parent = nv_object(i2c->find(i2c, outp.i2c_index));
oclass = nvkm_i2c_extdev_sclass[j];
do {
if (oclass->handle != info.type)
continue;
ret = nvkm_object_ctor(parent, NULL, oclass,
NULL, index++, &object);
} while (ret && (++oclass)->handle);
}
}
ret = nvkm_event_init(&nvkm_i2c_intr_func, 4, index, &i2c->event);
if (ret)
return ret;
return 0;
}
int
_nvkm_i2c_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
struct nvkm_oclass *oclass, void *data, u32 size,
struct nvkm_object **pobject)
{
struct nvkm_i2c *i2c;
int ret;
ret = nvkm_i2c_create(parent, engine, oclass, &i2c);
*pobject = nv_object(i2c);
if (ret)
return ret;
return 0;
}
| gpl-2.0 |
iamroot12C/linux | drivers/gpu/drm/nouveau/nvif/device.c | 552 | 2428 | /*
* Copyright 2014 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs <bskeggs@redhat.com>
*/
#include <nvif/device.h>
void
nvif_device_fini(struct nvif_device *device)
{
nvif_object_fini(&device->base);
}
int
nvif_device_init(struct nvif_object *parent, void (*dtor)(struct nvif_device *),
u32 handle, u32 oclass, void *data, u32 size,
struct nvif_device *device)
{
int ret = nvif_object_init(parent, (void *)dtor, handle, oclass,
data, size, &device->base);
if (ret == 0) {
device->object = &device->base;
device->info.version = 0;
ret = nvif_object_mthd(&device->base, NV_DEVICE_V0_INFO,
&device->info, sizeof(device->info));
}
return ret;
}
static void
nvif_device_del(struct nvif_device *device)
{
nvif_device_fini(device);
kfree(device);
}
int
nvif_device_new(struct nvif_object *parent, u32 handle, u32 oclass,
void *data, u32 size, struct nvif_device **pdevice)
{
struct nvif_device *device = kzalloc(sizeof(*device), GFP_KERNEL);
if (device) {
int ret = nvif_device_init(parent, nvif_device_del, handle,
oclass, data, size, device);
if (ret) {
kfree(device);
device = NULL;
}
*pdevice = device;
return ret;
}
return -ENOMEM;
}
void
nvif_device_ref(struct nvif_device *device, struct nvif_device **pdevice)
{
nvif_object_ref(&device->base, (struct nvif_object **)pdevice);
}
| gpl-2.0 |
AiJiaZone/linux-4.0 | drivers/media/dvb-frontends/mb86a16.c | 808 | 46875 | /*
Fujitsu MB86A16 DVB-S/DSS DC Receiver driver
Copyright (C) Manu Abraham (abraham.manu@gmail.com)
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/slab.h>
#include "dvb_frontend.h"
#include "mb86a16.h"
#include "mb86a16_priv.h"
static unsigned int verbose = 5;
module_param(verbose, int, 0644);
#define ABS(x) ((x) < 0 ? (-x) : (x))
struct mb86a16_state {
struct i2c_adapter *i2c_adap;
const struct mb86a16_config *config;
struct dvb_frontend frontend;
/* tuning parameters */
int frequency;
int srate;
/* Internal stuff */
int master_clk;
int deci;
int csel;
int rsel;
};
#define MB86A16_ERROR 0
#define MB86A16_NOTICE 1
#define MB86A16_INFO 2
#define MB86A16_DEBUG 3
#define dprintk(x, y, z, format, arg...) do { \
if (z) { \
if ((x > MB86A16_ERROR) && (x > y)) \
printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \
else if ((x > MB86A16_NOTICE) && (x > y)) \
printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \
else if ((x > MB86A16_INFO) && (x > y)) \
printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \
else if ((x > MB86A16_DEBUG) && (x > y)) \
printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \
} else { \
if (x > y) \
printk(format, ##arg); \
} \
} while (0)
#define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()")
#define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->")
static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val)
{
int ret;
u8 buf[] = { reg, val };
struct i2c_msg msg = {
.addr = state->config->demod_address,
.flags = 0,
.buf = buf,
.len = 2
};
dprintk(verbose, MB86A16_DEBUG, 1,
"writing to [0x%02x],Reg[0x%02x],Data[0x%02x]",
state->config->demod_address, buf[0], buf[1]);
ret = i2c_transfer(state->i2c_adap, &msg, 1);
return (ret != 1) ? -EREMOTEIO : 0;
}
static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val)
{
int ret;
u8 b0[] = { reg };
u8 b1[] = { 0 };
struct i2c_msg msg[] = {
{
.addr = state->config->demod_address,
.flags = 0,
.buf = b0,
.len = 1
}, {
.addr = state->config->demod_address,
.flags = I2C_M_RD,
.buf = b1,
.len = 1
}
};
ret = i2c_transfer(state->i2c_adap, msg, 2);
if (ret != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=%i)",
reg, ret);
if (ret < 0)
return ret;
return -EREMOTEIO;
}
*val = b1[0];
return ret;
}
static int CNTM_set(struct mb86a16_state *state,
unsigned char timint1,
unsigned char timint2,
unsigned char cnext)
{
unsigned char val;
val = (timint1 << 4) | (timint2 << 2) | cnext;
if (mb86a16_write(state, MB86A16_CNTMR, val) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int smrt_set(struct mb86a16_state *state, int rate)
{
int tmp ;
int m ;
unsigned char STOFS0, STOFS1;
m = 1 << state->deci;
tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk;
STOFS0 = tmp & 0x0ff;
STOFS1 = (tmp & 0xf00) >> 8;
if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) |
(state->csel << 1) |
state->rsel) < 0)
goto err;
if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0)
goto err;
if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -1;
}
static int srst(struct mb86a16_state *state)
{
if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int afcex_data_set(struct mb86a16_state *state,
unsigned char AFCEX_L,
unsigned char AFCEX_H)
{
if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0)
goto err;
if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -1;
}
static int afcofs_data_set(struct mb86a16_state *state,
unsigned char AFCEX_L,
unsigned char AFCEX_H)
{
if (mb86a16_write(state, 0x58, AFCEX_L) < 0)
goto err;
if (mb86a16_write(state, 0x59, AFCEX_H) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int stlp_set(struct mb86a16_state *state,
unsigned char STRAS,
unsigned char STRBS)
{
if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA)
{
if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0)
goto err;
if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int initial_set(struct mb86a16_state *state)
{
if (stlp_set(state, 5, 7))
goto err;
udelay(100);
if (afcex_data_set(state, 0, 0))
goto err;
udelay(100);
if (afcofs_data_set(state, 0, 0))
goto err;
udelay(100);
if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0)
goto err;
if (mb86a16_write(state, 0x2f, 0x21) < 0)
goto err;
if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0)
goto err;
if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0)
goto err;
if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0)
goto err;
if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0)
goto err;
if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0)
goto err;
if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0)
goto err;
if (mb86a16_write(state, 0x54, 0xff) < 0)
goto err;
if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int S01T_set(struct mb86a16_state *state,
unsigned char s1t,
unsigned s0t)
{
if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int EN_set(struct mb86a16_state *state,
int cren,
int afcen)
{
unsigned char val;
val = 0x7a | (cren << 7) | (afcen << 2);
if (mb86a16_write(state, 0x49, val) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int AFCEXEN_set(struct mb86a16_state *state,
int afcexen,
int smrt)
{
unsigned char AFCA ;
if (smrt > 18875)
AFCA = 4;
else if (smrt > 9375)
AFCA = 3;
else if (smrt > 2250)
AFCA = 2;
else
AFCA = 1;
if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int DAGC_data_set(struct mb86a16_state *state,
unsigned char DAGCA,
unsigned char DAGCW)
{
if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static void smrt_info_get(struct mb86a16_state *state, int rate)
{
if (rate >= 37501) {
state->deci = 0; state->csel = 0; state->rsel = 0;
} else if (rate >= 30001) {
state->deci = 0; state->csel = 0; state->rsel = 1;
} else if (rate >= 26251) {
state->deci = 0; state->csel = 1; state->rsel = 0;
} else if (rate >= 22501) {
state->deci = 0; state->csel = 1; state->rsel = 1;
} else if (rate >= 18751) {
state->deci = 1; state->csel = 0; state->rsel = 0;
} else if (rate >= 15001) {
state->deci = 1; state->csel = 0; state->rsel = 1;
} else if (rate >= 13126) {
state->deci = 1; state->csel = 1; state->rsel = 0;
} else if (rate >= 11251) {
state->deci = 1; state->csel = 1; state->rsel = 1;
} else if (rate >= 9376) {
state->deci = 2; state->csel = 0; state->rsel = 0;
} else if (rate >= 7501) {
state->deci = 2; state->csel = 0; state->rsel = 1;
} else if (rate >= 6563) {
state->deci = 2; state->csel = 1; state->rsel = 0;
} else if (rate >= 5626) {
state->deci = 2; state->csel = 1; state->rsel = 1;
} else if (rate >= 4688) {
state->deci = 3; state->csel = 0; state->rsel = 0;
} else if (rate >= 3751) {
state->deci = 3; state->csel = 0; state->rsel = 1;
} else if (rate >= 3282) {
state->deci = 3; state->csel = 1; state->rsel = 0;
} else if (rate >= 2814) {
state->deci = 3; state->csel = 1; state->rsel = 1;
} else if (rate >= 2344) {
state->deci = 4; state->csel = 0; state->rsel = 0;
} else if (rate >= 1876) {
state->deci = 4; state->csel = 0; state->rsel = 1;
} else if (rate >= 1641) {
state->deci = 4; state->csel = 1; state->rsel = 0;
} else if (rate >= 1407) {
state->deci = 4; state->csel = 1; state->rsel = 1;
} else if (rate >= 1172) {
state->deci = 5; state->csel = 0; state->rsel = 0;
} else if (rate >= 939) {
state->deci = 5; state->csel = 0; state->rsel = 1;
} else if (rate >= 821) {
state->deci = 5; state->csel = 1; state->rsel = 0;
} else {
state->deci = 5; state->csel = 1; state->rsel = 1;
}
if (state->csel == 0)
state->master_clk = 92000;
else
state->master_clk = 61333;
}
static int signal_det(struct mb86a16_state *state,
int smrt,
unsigned char *SIG)
{
int ret ;
int smrtd ;
int wait_sym ;
u32 wait_t;
unsigned char S[3] ;
int i ;
if (*SIG > 45) {
if (CNTM_set(state, 2, 1, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
return -1;
}
wait_sym = 40000;
} else {
if (CNTM_set(state, 3, 1, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
return -1;
}
wait_sym = 80000;
}
for (i = 0; i < 3; i++) {
if (i == 0)
smrtd = smrt * 98 / 100;
else if (i == 1)
smrtd = smrt;
else
smrtd = smrt * 102 / 100;
smrt_info_get(state, smrtd);
smrt_set(state, smrtd);
srst(state);
wait_t = (wait_sym + 99 * smrtd / 100) / smrtd;
if (wait_t == 0)
wait_t = 1;
msleep_interruptible(10);
if (mb86a16_read(state, 0x37, &(S[i])) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
}
if ((S[1] > S[0] * 112 / 100) &&
(S[1] > S[2] * 112 / 100)) {
ret = 1;
} else {
ret = 0;
}
*SIG = S[1];
if (CNTM_set(state, 0, 1, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error");
return -1;
}
return ret;
}
static int rf_val_set(struct mb86a16_state *state,
int f,
int smrt,
unsigned char R)
{
unsigned char C, F, B;
int M;
unsigned char rf_val[5];
int ack = -1;
if (smrt > 37750)
C = 1;
else if (smrt > 18875)
C = 2;
else if (smrt > 5500)
C = 3;
else
C = 4;
if (smrt > 30500)
F = 3;
else if (smrt > 9375)
F = 1;
else if (smrt > 4625)
F = 0;
else
F = 2;
if (f < 1060)
B = 0;
else if (f < 1175)
B = 1;
else if (f < 1305)
B = 2;
else if (f < 1435)
B = 3;
else if (f < 1570)
B = 4;
else if (f < 1715)
B = 5;
else if (f < 1845)
B = 6;
else if (f < 1980)
B = 7;
else if (f < 2080)
B = 8;
else
B = 9;
M = f * (1 << R) / 2;
rf_val[0] = 0x01 | (C << 3) | (F << 1);
rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12);
rf_val[2] = (M & 0x00ff0) >> 4;
rf_val[3] = ((M & 0x0000f) << 4) | B;
/* Frequency Set */
if (mb86a16_write(state, 0x21, rf_val[0]) < 0)
ack = 0;
if (mb86a16_write(state, 0x22, rf_val[1]) < 0)
ack = 0;
if (mb86a16_write(state, 0x23, rf_val[2]) < 0)
ack = 0;
if (mb86a16_write(state, 0x24, rf_val[3]) < 0)
ack = 0;
if (mb86a16_write(state, 0x25, 0x01) < 0)
ack = 0;
if (ack == 0) {
dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int afcerr_chk(struct mb86a16_state *state)
{
unsigned char AFCM_L, AFCM_H ;
int AFCM ;
int afcm, afcerr ;
if (mb86a16_read(state, 0x0e, &AFCM_L) != 2)
goto err;
if (mb86a16_read(state, 0x0f, &AFCM_H) != 2)
goto err;
AFCM = (AFCM_H << 8) + AFCM_L;
if (AFCM > 2048)
afcm = AFCM - 4096;
else
afcm = AFCM;
afcerr = afcm * state->master_clk / 8192;
return afcerr;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int dagcm_val_get(struct mb86a16_state *state)
{
int DAGCM;
unsigned char DAGCM_H, DAGCM_L;
if (mb86a16_read(state, 0x45, &DAGCM_L) != 2)
goto err;
if (mb86a16_read(state, 0x46, &DAGCM_H) != 2)
goto err;
DAGCM = (DAGCM_H << 8) + DAGCM_L;
return DAGCM;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int mb86a16_read_status(struct dvb_frontend *fe, enum fe_status *status)
{
u8 stat, stat2;
struct mb86a16_state *state = fe->demodulator_priv;
*status = 0;
if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2)
goto err;
if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2)
goto err;
if ((stat > 25) && (stat2 > 25))
*status |= FE_HAS_SIGNAL;
if ((stat > 45) && (stat2 > 45))
*status |= FE_HAS_CARRIER;
if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2)
goto err;
if (stat & 0x01)
*status |= FE_HAS_SYNC;
if (stat & 0x01)
*status |= FE_HAS_VITERBI;
if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2)
goto err;
if ((stat & 0x0f) && (*status & FE_HAS_VITERBI))
*status |= FE_HAS_LOCK;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int sync_chk(struct mb86a16_state *state,
unsigned char *VIRM)
{
unsigned char val;
int sync;
if (mb86a16_read(state, 0x0d, &val) != 2)
goto err;
dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val);
sync = val & 0x01;
*VIRM = (val & 0x1c) >> 2;
return sync;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int freqerr_chk(struct mb86a16_state *state,
int fTP,
int smrt,
int unit)
{
unsigned char CRM, AFCML, AFCMH;
unsigned char temp1, temp2, temp3;
int crm, afcm, AFCM;
int crrerr, afcerr; /* kHz */
int frqerr; /* MHz */
int afcen, afcexen = 0;
int R, M, fOSC, fOSC_OFS;
if (mb86a16_read(state, 0x43, &CRM) != 2)
goto err;
if (CRM > 127)
crm = CRM - 256;
else
crm = CRM;
crrerr = smrt * crm / 256;
if (mb86a16_read(state, 0x49, &temp1) != 2)
goto err;
afcen = (temp1 & 0x04) >> 2;
if (afcen == 0) {
if (mb86a16_read(state, 0x2a, &temp1) != 2)
goto err;
afcexen = (temp1 & 0x20) >> 5;
}
if (afcen == 1) {
if (mb86a16_read(state, 0x0e, &AFCML) != 2)
goto err;
if (mb86a16_read(state, 0x0f, &AFCMH) != 2)
goto err;
} else if (afcexen == 1) {
if (mb86a16_read(state, 0x2b, &AFCML) != 2)
goto err;
if (mb86a16_read(state, 0x2c, &AFCMH) != 2)
goto err;
}
if ((afcen == 1) || (afcexen == 1)) {
smrt_info_get(state, smrt);
AFCM = ((AFCMH & 0x01) << 8) + AFCML;
if (AFCM > 255)
afcm = AFCM - 512;
else
afcm = AFCM;
afcerr = afcm * state->master_clk / 8192;
} else
afcerr = 0;
if (mb86a16_read(state, 0x22, &temp1) != 2)
goto err;
if (mb86a16_read(state, 0x23, &temp2) != 2)
goto err;
if (mb86a16_read(state, 0x24, &temp3) != 2)
goto err;
R = (temp1 & 0xe0) >> 5;
M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4);
if (R == 0)
fOSC = 2 * M;
else
fOSC = M;
fOSC_OFS = fOSC - fTP;
if (unit == 0) { /* MHz */
if (crrerr + afcerr + fOSC_OFS * 1000 >= 0)
frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000;
else
frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000;
} else { /* kHz */
frqerr = crrerr + afcerr + fOSC_OFS * 1000;
}
return frqerr;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt)
{
unsigned char R;
if (smrt > 9375)
R = 0;
else
R = 1;
return R;
}
static void swp_info_get(struct mb86a16_state *state,
int fOSC_start,
int smrt,
int v, int R,
int swp_ofs,
int *fOSC,
int *afcex_freq,
unsigned char *AFCEX_L,
unsigned char *AFCEX_H)
{
int AFCEX ;
int crnt_swp_freq ;
crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs;
if (R == 0)
*fOSC = (crnt_swp_freq + 1000) / 2000 * 2;
else
*fOSC = (crnt_swp_freq + 500) / 1000;
if (*fOSC >= crnt_swp_freq)
*afcex_freq = *fOSC * 1000 - crnt_swp_freq;
else
*afcex_freq = crnt_swp_freq - *fOSC * 1000;
AFCEX = *afcex_freq * 8192 / state->master_clk;
*AFCEX_L = AFCEX & 0x00ff;
*AFCEX_H = (AFCEX & 0x0f00) >> 8;
}
static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin,
int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1)
{
int swp_freq ;
if ((i % 2 == 1) && (v <= vmax)) {
/* positive v (case 1) */
if ((v - 1 == vmin) &&
(*(V + 30 + v) >= 0) &&
(*(V + 30 + v - 1) >= 0) &&
(*(V + 30 + v - 1) > *(V + 30 + v)) &&
(*(V + 30 + v - 1) > SIGMIN)) {
swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
*SIG1 = *(V + 30 + v - 1);
} else if ((v == vmax) &&
(*(V + 30 + v) >= 0) &&
(*(V + 30 + v - 1) >= 0) &&
(*(V + 30 + v) > *(V + 30 + v - 1)) &&
(*(V + 30 + v) > SIGMIN)) {
/* (case 2) */
swp_freq = fOSC * 1000 + afcex_freq;
*SIG1 = *(V + 30 + v);
} else if ((*(V + 30 + v) > 0) &&
(*(V + 30 + v - 1) > 0) &&
(*(V + 30 + v - 2) > 0) &&
(*(V + 30 + v - 3) > 0) &&
(*(V + 30 + v - 1) > *(V + 30 + v)) &&
(*(V + 30 + v - 2) > *(V + 30 + v - 3)) &&
((*(V + 30 + v - 1) > SIGMIN) ||
(*(V + 30 + v - 2) > SIGMIN))) {
/* (case 3) */
if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) {
swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
*SIG1 = *(V + 30 + v - 1);
} else {
swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2;
*SIG1 = *(V + 30 + v - 2);
}
} else if ((v == vmax) &&
(*(V + 30 + v) >= 0) &&
(*(V + 30 + v - 1) >= 0) &&
(*(V + 30 + v - 2) >= 0) &&
(*(V + 30 + v) > *(V + 30 + v - 2)) &&
(*(V + 30 + v - 1) > *(V + 30 + v - 2)) &&
((*(V + 30 + v) > SIGMIN) ||
(*(V + 30 + v - 1) > SIGMIN))) {
/* (case 4) */
if (*(V + 30 + v) >= *(V + 30 + v - 1)) {
swp_freq = fOSC * 1000 + afcex_freq;
*SIG1 = *(V + 30 + v);
} else {
swp_freq = fOSC * 1000 + afcex_freq - swp_ofs;
*SIG1 = *(V + 30 + v - 1);
}
} else {
swp_freq = -1 ;
}
} else if ((i % 2 == 0) && (v >= vmin)) {
/* Negative v (case 1) */
if ((*(V + 30 + v) > 0) &&
(*(V + 30 + v + 1) > 0) &&
(*(V + 30 + v + 2) > 0) &&
(*(V + 30 + v + 1) > *(V + 30 + v)) &&
(*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
(*(V + 30 + v + 1) > SIGMIN)) {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
*SIG1 = *(V + 30 + v + 1);
} else if ((v + 1 == vmax) &&
(*(V + 30 + v) >= 0) &&
(*(V + 30 + v + 1) >= 0) &&
(*(V + 30 + v + 1) > *(V + 30 + v)) &&
(*(V + 30 + v + 1) > SIGMIN)) {
/* (case 2) */
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
*SIG1 = *(V + 30 + v);
} else if ((v == vmin) &&
(*(V + 30 + v) > 0) &&
(*(V + 30 + v + 1) > 0) &&
(*(V + 30 + v + 2) > 0) &&
(*(V + 30 + v) > *(V + 30 + v + 1)) &&
(*(V + 30 + v) > *(V + 30 + v + 2)) &&
(*(V + 30 + v) > SIGMIN)) {
/* (case 3) */
swp_freq = fOSC * 1000 + afcex_freq;
*SIG1 = *(V + 30 + v);
} else if ((*(V + 30 + v) >= 0) &&
(*(V + 30 + v + 1) >= 0) &&
(*(V + 30 + v + 2) >= 0) &&
(*(V + 30 + v + 3) >= 0) &&
(*(V + 30 + v + 1) > *(V + 30 + v)) &&
(*(V + 30 + v + 2) > *(V + 30 + v + 3)) &&
((*(V + 30 + v + 1) > SIGMIN) ||
(*(V + 30 + v + 2) > SIGMIN))) {
/* (case 4) */
if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
*SIG1 = *(V + 30 + v + 1);
} else {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
*SIG1 = *(V + 30 + v + 2);
}
} else if ((*(V + 30 + v) >= 0) &&
(*(V + 30 + v + 1) >= 0) &&
(*(V + 30 + v + 2) >= 0) &&
(*(V + 30 + v + 3) >= 0) &&
(*(V + 30 + v) > *(V + 30 + v + 2)) &&
(*(V + 30 + v + 1) > *(V + 30 + v + 2)) &&
(*(V + 30 + v) > *(V + 30 + v + 3)) &&
(*(V + 30 + v + 1) > *(V + 30 + v + 3)) &&
((*(V + 30 + v) > SIGMIN) ||
(*(V + 30 + v + 1) > SIGMIN))) {
/* (case 5) */
if (*(V + 30 + v) >= *(V + 30 + v + 1)) {
swp_freq = fOSC * 1000 + afcex_freq;
*SIG1 = *(V + 30 + v);
} else {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
*SIG1 = *(V + 30 + v + 1);
}
} else if ((v + 2 == vmin) &&
(*(V + 30 + v) >= 0) &&
(*(V + 30 + v + 1) >= 0) &&
(*(V + 30 + v + 2) >= 0) &&
(*(V + 30 + v + 1) > *(V + 30 + v)) &&
(*(V + 30 + v + 2) > *(V + 30 + v)) &&
((*(V + 30 + v + 1) > SIGMIN) ||
(*(V + 30 + v + 2) > SIGMIN))) {
/* (case 6) */
if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs;
*SIG1 = *(V + 30 + v + 1);
} else {
swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2;
*SIG1 = *(V + 30 + v + 2);
}
} else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) {
swp_freq = fOSC * 1000;
*SIG1 = *(V + 30 + v);
} else
swp_freq = -1;
} else
swp_freq = -1;
return swp_freq;
}
static void swp_info_get2(struct mb86a16_state *state,
int smrt,
int R,
int swp_freq,
int *afcex_freq,
int *fOSC,
unsigned char *AFCEX_L,
unsigned char *AFCEX_H)
{
int AFCEX ;
if (R == 0)
*fOSC = (swp_freq + 1000) / 2000 * 2;
else
*fOSC = (swp_freq + 500) / 1000;
if (*fOSC >= swp_freq)
*afcex_freq = *fOSC * 1000 - swp_freq;
else
*afcex_freq = swp_freq - *fOSC * 1000;
AFCEX = *afcex_freq * 8192 / state->master_clk;
*AFCEX_L = AFCEX & 0x00ff;
*AFCEX_H = (AFCEX & 0x0f00) >> 8;
}
static void afcex_info_get(struct mb86a16_state *state,
int afcex_freq,
unsigned char *AFCEX_L,
unsigned char *AFCEX_H)
{
int AFCEX ;
AFCEX = afcex_freq * 8192 / state->master_clk;
*AFCEX_L = AFCEX & 0x00ff;
*AFCEX_H = (AFCEX & 0x0f00) >> 8;
}
static int SEQ_set(struct mb86a16_state *state, unsigned char loop)
{
/* SLOCK0 = 0 */
if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV)
{
/* Viterbi Rate, IQ Settings */
if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int FEC_srst(struct mb86a16_state *state)
{
if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int S2T_set(struct mb86a16_state *state, unsigned char S2T)
{
if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T)
{
if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
return 0;
}
static int mb86a16_set_fe(struct mb86a16_state *state)
{
u8 agcval, cnmval;
int i, j;
int fOSC = 0;
int fOSC_start = 0;
int wait_t;
int fcp;
int swp_ofs;
int V[60];
u8 SIG1MIN;
unsigned char CREN, AFCEN, AFCEXEN;
unsigned char SIG1;
unsigned char TIMINT1, TIMINT2, TIMEXT;
unsigned char S0T, S1T;
unsigned char S2T;
/* unsigned char S2T, S3T; */
unsigned char S4T, S5T;
unsigned char AFCEX_L, AFCEX_H;
unsigned char R;
unsigned char VIRM;
unsigned char ETH, VIA;
unsigned char junk;
int loop;
int ftemp;
int v, vmax, vmin;
int vmax_his, vmin_his;
int swp_freq, prev_swp_freq[20];
int prev_freq_num;
int signal_dupl;
int afcex_freq;
int signal;
int afcerr;
int temp_freq, delta_freq;
int dagcm[4];
int smrt_d;
/* int freq_err; */
int n;
int ret = -1;
int sync;
dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate);
fcp = 3000;
swp_ofs = state->srate / 4;
for (i = 0; i < 60; i++)
V[i] = -1;
for (i = 0; i < 20; i++)
prev_swp_freq[i] = 0;
SIG1MIN = 25;
for (n = 0; ((n < 3) && (ret == -1)); n++) {
SEQ_set(state, 0);
iq_vt_set(state, 0);
CREN = 0;
AFCEN = 0;
AFCEXEN = 1;
TIMINT1 = 0;
TIMINT2 = 1;
TIMEXT = 2;
S1T = 0;
S0T = 0;
if (initial_set(state) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "initial set failed");
return -1;
}
if (DAGC_data_set(state, 3, 2) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
return -1;
}
if (EN_set(state, CREN, AFCEN) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
return -1; /* (0, 0) */
}
if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
return -1; /* (1, smrt) = (1, symbolrate) */
}
if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error");
return -1; /* (0, 1, 2) */
}
if (S01T_set(state, S1T, S0T) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
return -1; /* (0, 0) */
}
smrt_info_get(state, state->srate);
if (smrt_set(state, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error");
return -1;
}
R = vco_dev_get(state, state->srate);
if (R == 1)
fOSC_start = state->frequency;
else if (R == 0) {
if (state->frequency % 2 == 0) {
fOSC_start = state->frequency;
} else {
fOSC_start = state->frequency + 1;
if (fOSC_start > 2150)
fOSC_start = state->frequency - 1;
}
}
loop = 1;
ftemp = fOSC_start * 1000;
vmax = 0 ;
while (loop == 1) {
ftemp = ftemp + swp_ofs;
vmax++;
/* Upper bound */
if (ftemp > 2150000) {
loop = 0;
vmax--;
} else {
if ((ftemp == 2150000) ||
(ftemp - state->frequency * 1000 >= fcp + state->srate / 4))
loop = 0;
}
}
loop = 1;
ftemp = fOSC_start * 1000;
vmin = 0 ;
while (loop == 1) {
ftemp = ftemp - swp_ofs;
vmin--;
/* Lower bound */
if (ftemp < 950000) {
loop = 0;
vmin++;
} else {
if ((ftemp == 950000) ||
(state->frequency * 1000 - ftemp >= fcp + state->srate / 4))
loop = 0;
}
}
wait_t = (8000 + state->srate / 2) / state->srate;
if (wait_t == 0)
wait_t = 1;
i = 0;
j = 0;
prev_freq_num = 0;
loop = 1;
signal = 0;
vmax_his = 0;
vmin_his = 0;
v = 0;
while (loop == 1) {
swp_info_get(state, fOSC_start, state->srate,
v, R, swp_ofs, &fOSC,
&afcex_freq, &AFCEX_L, &AFCEX_H);
udelay(100);
if (rf_val_set(state, fOSC, state->srate, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
return -1;
}
udelay(100);
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
return -1;
}
if (srst(state) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "srst error");
return -1;
}
msleep_interruptible(wait_t);
if (mb86a16_read(state, 0x37, &SIG1) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -1;
}
V[30 + v] = SIG1 ;
swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin,
SIG1MIN, fOSC, afcex_freq,
swp_ofs, &SIG1); /* changed */
signal_dupl = 0;
for (j = 0; j < prev_freq_num; j++) {
if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) {
signal_dupl = 1;
dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j);
}
}
if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) {
dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate);
prev_swp_freq[prev_freq_num] = swp_freq;
prev_freq_num++;
swp_info_get2(state, state->srate, R, swp_freq,
&afcex_freq, &fOSC,
&AFCEX_L, &AFCEX_H);
if (rf_val_set(state, fOSC, state->srate, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
return -1;
}
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
return -1;
}
signal = signal_det(state, state->srate, &SIG1);
if (signal == 1) {
dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****");
loop = 0;
} else {
dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again...");
smrt_info_get(state, state->srate);
if (smrt_set(state, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
return -1;
}
}
}
if (v > vmax)
vmax_his = 1 ;
if (v < vmin)
vmin_his = 1 ;
i++;
if ((i % 2 == 1) && (vmax_his == 1))
i++;
if ((i % 2 == 0) && (vmin_his == 1))
i++;
if (i % 2 == 1)
v = (i + 1) / 2;
else
v = -i / 2;
if ((vmax_his == 1) && (vmin_his == 1))
loop = 0 ;
}
if (signal == 1) {
dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check");
S1T = 7 ;
S0T = 1 ;
CREN = 0 ;
AFCEN = 1 ;
AFCEXEN = 0 ;
if (S01T_set(state, S1T, S0T) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
return -1;
}
smrt_info_get(state, state->srate);
if (smrt_set(state, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
return -1;
}
if (EN_set(state, CREN, AFCEN) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
return -1;
}
if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
return -1;
}
afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H);
if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error");
return -1;
}
if (srst(state) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "srst error");
return -1;
}
/* delay 4~200 */
wait_t = 200000 / state->master_clk + 200000 / state->srate;
msleep(wait_t);
afcerr = afcerr_chk(state);
if (afcerr == -1)
return -1;
swp_freq = fOSC * 1000 + afcerr ;
AFCEXEN = 1 ;
if (state->srate >= 1500)
smrt_d = state->srate / 3;
else
smrt_d = state->srate / 2;
smrt_info_get(state, smrt_d);
if (smrt_set(state, smrt_d) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
return -1;
}
if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
return -1;
}
R = vco_dev_get(state, smrt_d);
if (DAGC_data_set(state, 2, 0) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
return -1;
}
for (i = 0; i < 3; i++) {
temp_freq = swp_freq + (i - 1) * state->srate / 8;
swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
return -1;
}
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
return -1;
}
wait_t = 200000 / state->master_clk + 40000 / smrt_d;
msleep(wait_t);
dagcm[i] = dagcm_val_get(state);
}
if ((dagcm[0] > dagcm[1]) &&
(dagcm[0] > dagcm[2]) &&
(dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) {
temp_freq = swp_freq - 2 * state->srate / 8;
swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
return -1;
}
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
return -1;
}
wait_t = 200000 / state->master_clk + 40000 / smrt_d;
msleep(wait_t);
dagcm[3] = dagcm_val_get(state);
if (dagcm[3] > dagcm[1])
delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300;
else
delta_freq = 0;
} else if ((dagcm[2] > dagcm[1]) &&
(dagcm[2] > dagcm[0]) &&
(dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) {
temp_freq = swp_freq + 2 * state->srate / 8;
swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
if (rf_val_set(state, fOSC, smrt_d, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set");
return -1;
}
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set");
return -1;
}
wait_t = 200000 / state->master_clk + 40000 / smrt_d;
msleep(wait_t);
dagcm[3] = dagcm_val_get(state);
if (dagcm[3] > dagcm[1])
delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300;
else
delta_freq = 0 ;
} else {
delta_freq = 0 ;
}
dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq);
swp_freq += delta_freq;
dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq);
if (ABS(state->frequency * 1000 - swp_freq) > 3800) {
dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !");
} else {
S1T = 0;
S0T = 3;
CREN = 1;
AFCEN = 0;
AFCEXEN = 1;
if (S01T_set(state, S1T, S0T) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "S01T set error");
return -1;
}
if (DAGC_data_set(state, 0, 0) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error");
return -1;
}
R = vco_dev_get(state, state->srate);
smrt_info_get(state, state->srate);
if (smrt_set(state, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "smrt set error");
return -1;
}
if (EN_set(state, CREN, AFCEN) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "EN set error");
return -1;
}
if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error");
return -1;
}
swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H);
if (rf_val_set(state, fOSC, state->srate, R) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "rf val set error");
return -1;
}
if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error");
return -1;
}
if (srst(state) < 0) {
dprintk(verbose, MB86A16_ERROR, 1, "srst error");
return -1;
}
wait_t = 7 + (10000 + state->srate / 2) / state->srate;
if (wait_t == 0)
wait_t = 1;
msleep_interruptible(wait_t);
if (mb86a16_read(state, 0x37, &SIG1) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
if (SIG1 > 110) {
S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6;
wait_t = 7 + (917504 + state->srate / 2) / state->srate;
} else if (SIG1 > 105) {
S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
wait_t = 7 + (1048576 + state->srate / 2) / state->srate;
} else if (SIG1 > 85) {
S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
wait_t = 7 + (1310720 + state->srate / 2) / state->srate;
} else if (SIG1 > 65) {
S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
wait_t = 7 + (1572864 + state->srate / 2) / state->srate;
} else {
S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2;
wait_t = 7 + (2097152 + state->srate / 2) / state->srate;
}
wait_t *= 2; /* FOS */
S2T_set(state, S2T);
S45T_set(state, S4T, S5T);
Vi_set(state, ETH, VIA);
srst(state);
msleep_interruptible(wait_t);
sync = sync_chk(state, &VIRM);
dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync);
if (VIRM) {
if (VIRM == 4) {
/* 5/6 */
if (SIG1 > 110)
wait_t = (786432 + state->srate / 2) / state->srate;
else
wait_t = (1572864 + state->srate / 2) / state->srate;
if (state->srate < 5000)
/* FIXME ! , should be a long wait ! */
msleep_interruptible(wait_t);
else
msleep_interruptible(wait_t);
if (sync_chk(state, &junk) == 0) {
iq_vt_set(state, 1);
FEC_srst(state);
}
}
/* 1/2, 2/3, 3/4, 7/8 */
if (SIG1 > 110)
wait_t = (786432 + state->srate / 2) / state->srate;
else
wait_t = (1572864 + state->srate / 2) / state->srate;
msleep_interruptible(wait_t);
SEQ_set(state, 1);
} else {
dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC");
SEQ_set(state, 1);
ret = -1;
}
}
} else {
dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL");
ret = -1;
}
sync = sync_chk(state, &junk);
if (sync) {
dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******");
freqerr_chk(state, state->frequency, state->srate, 1);
ret = 0;
break;
}
}
mb86a16_read(state, 0x15, &agcval);
mb86a16_read(state, 0x26, &cnmval);
dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval);
return ret;
}
static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe,
struct dvb_diseqc_master_cmd *cmd)
{
struct mb86a16_state *state = fe->demodulator_priv;
int i;
u8 regs;
if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
goto err;
if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
goto err;
regs = 0x18;
if (cmd->msg_len > 5 || cmd->msg_len < 4)
return -EINVAL;
for (i = 0; i < cmd->msg_len; i++) {
if (mb86a16_write(state, regs, cmd->msg[i]) < 0)
goto err;
regs++;
}
i += 0x90;
msleep_interruptible(10);
if (mb86a16_write(state, MB86A16_DCC1, i) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
goto err;
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe,
enum fe_sec_mini_cmd burst)
{
struct mb86a16_state *state = fe->demodulator_priv;
switch (burst) {
case SEC_MINI_A:
if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
MB86A16_DCC1_TBEN |
MB86A16_DCC1_TBO) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
goto err;
break;
case SEC_MINI_B:
if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
MB86A16_DCC1_TBEN) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
goto err;
break;
}
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int mb86a16_set_tone(struct dvb_frontend *fe, enum fe_sec_tone_mode tone)
{
struct mb86a16_state *state = fe->demodulator_priv;
switch (tone) {
case SEC_TONE_ON:
if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA |
MB86A16_DCC1_CTOE) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0)
goto err;
break;
case SEC_TONE_OFF:
if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0)
goto err;
if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0)
goto err;
break;
default:
return -EINVAL;
}
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe)
{
struct dtv_frontend_properties *p = &fe->dtv_property_cache;
struct mb86a16_state *state = fe->demodulator_priv;
state->frequency = p->frequency / 1000;
state->srate = p->symbol_rate / 1000;
if (!mb86a16_set_fe(state)) {
dprintk(verbose, MB86A16_ERROR, 1, "Successfully acquired LOCK");
return DVBFE_ALGO_SEARCH_SUCCESS;
}
dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!");
return DVBFE_ALGO_SEARCH_FAILED;
}
static void mb86a16_release(struct dvb_frontend *fe)
{
struct mb86a16_state *state = fe->demodulator_priv;
kfree(state);
}
static int mb86a16_init(struct dvb_frontend *fe)
{
return 0;
}
static int mb86a16_sleep(struct dvb_frontend *fe)
{
return 0;
}
static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber)
{
u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst;
u32 timer;
struct mb86a16_state *state = fe->demodulator_priv;
*ber = 0;
if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2)
goto err;
if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2)
goto err;
if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2)
goto err;
if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2)
goto err;
if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2)
goto err;
/* BER monitor invalid when BER_EN = 0 */
if (ber_mon & 0x04) {
/* coarse, fast calculation */
*ber = ber_tab & 0x1f;
dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber);
if (ber_mon & 0x01) {
/*
* BER_SEL = 1, The monitored BER is the estimated
* value with a Reed-Solomon decoder error amount at
* the deinterleaver output.
* monitored BER is expressed as a 20 bit output in total
*/
ber_rst = ber_mon >> 3;
*ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
if (ber_rst == 0)
timer = 12500000;
if (ber_rst == 1)
timer = 25000000;
if (ber_rst == 2)
timer = 50000000;
if (ber_rst == 3)
timer = 100000000;
*ber /= timer;
dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
} else {
/*
* BER_SEL = 0, The monitored BER is the estimated
* value with a Viterbi decoder error amount at the
* QPSK demodulator output.
* monitored BER is expressed as a 24 bit output in total
*/
ber_tim = ber_mon >> 1;
*ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb;
if (ber_tim == 0)
timer = 16;
if (ber_tim == 1)
timer = 24;
*ber /= 2 ^ timer;
dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber);
}
}
return 0;
err:
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength)
{
u8 agcm = 0;
struct mb86a16_state *state = fe->demodulator_priv;
*strength = 0;
if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
*strength = ((0xff - agcm) * 100) / 256;
dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength);
*strength = (0xffff - 0xff) + agcm;
return 0;
}
struct cnr {
u8 cn_reg;
u8 cn_val;
};
static const struct cnr cnr_tab[] = {
{ 35, 2 },
{ 40, 3 },
{ 50, 4 },
{ 60, 5 },
{ 70, 6 },
{ 80, 7 },
{ 92, 8 },
{ 103, 9 },
{ 115, 10 },
{ 138, 12 },
{ 162, 15 },
{ 180, 18 },
{ 185, 19 },
{ 189, 20 },
{ 195, 22 },
{ 199, 24 },
{ 201, 25 },
{ 202, 26 },
{ 203, 27 },
{ 205, 28 },
{ 208, 30 }
};
static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr)
{
struct mb86a16_state *state = fe->demodulator_priv;
int i = 0;
int low_tide = 2, high_tide = 30, q_level;
u8 cn;
*snr = 0;
if (mb86a16_read(state, 0x26, &cn) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) {
if (cn < cnr_tab[i].cn_reg) {
*snr = cnr_tab[i].cn_val;
break;
}
}
q_level = (*snr * 100) / (high_tide - low_tide);
dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level);
*snr = (0xffff - 0xff) + *snr;
return 0;
}
static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks)
{
u8 dist;
struct mb86a16_state *state = fe->demodulator_priv;
if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) {
dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error");
return -EREMOTEIO;
}
*ucblocks = dist;
return 0;
}
static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe)
{
return DVBFE_ALGO_CUSTOM;
}
static struct dvb_frontend_ops mb86a16_ops = {
.delsys = { SYS_DVBS },
.info = {
.name = "Fujitsu MB86A16 DVB-S",
.frequency_min = 950000,
.frequency_max = 2150000,
.frequency_stepsize = 3000,
.frequency_tolerance = 0,
.symbol_rate_min = 1000000,
.symbol_rate_max = 45000000,
.symbol_rate_tolerance = 500,
.caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 |
FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 |
FE_CAN_FEC_7_8 | FE_CAN_QPSK |
FE_CAN_FEC_AUTO
},
.release = mb86a16_release,
.get_frontend_algo = mb86a16_frontend_algo,
.search = mb86a16_search,
.init = mb86a16_init,
.sleep = mb86a16_sleep,
.read_status = mb86a16_read_status,
.read_ber = mb86a16_read_ber,
.read_signal_strength = mb86a16_read_signal_strength,
.read_snr = mb86a16_read_snr,
.read_ucblocks = mb86a16_read_ucblocks,
.diseqc_send_master_cmd = mb86a16_send_diseqc_msg,
.diseqc_send_burst = mb86a16_send_diseqc_burst,
.set_tone = mb86a16_set_tone,
};
struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config,
struct i2c_adapter *i2c_adap)
{
u8 dev_id = 0;
struct mb86a16_state *state = NULL;
state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL);
if (state == NULL)
goto error;
state->config = config;
state->i2c_adap = i2c_adap;
mb86a16_read(state, 0x7f, &dev_id);
if (dev_id != 0xfe)
goto error;
memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops));
state->frontend.demodulator_priv = state;
state->frontend.ops.set_voltage = state->config->set_voltage;
return &state->frontend;
error:
kfree(state);
return NULL;
}
EXPORT_SYMBOL(mb86a16_attach);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Manu Abraham");
| gpl-2.0 |
CyanogenMod/android_kernel_sony_msm8930 | drivers/gpio/gpiolib.c | 1576 | 45843 | #include <linux/kernel.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
#include <linux/device.h>
#include <linux/err.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/idr.h>
#include <linux/slab.h>
#define CREATE_TRACE_POINTS
#include <trace/events/gpio.h>
/* Optional implementation infrastructure for GPIO interfaces.
*
* Platforms may want to use this if they tend to use very many GPIOs
* that aren't part of a System-On-Chip core; or across I2C/SPI/etc.
*
* When kernel footprint or instruction count is an issue, simpler
* implementations may be preferred. The GPIO programming interface
* allows for inlining speed-critical get/set operations for common
* cases, so that access to SOC-integrated GPIOs can sometimes cost
* only an instruction or two per bit.
*/
/* When debugging, extend minimal trust to callers and platform code.
* Also emit diagnostic messages that may help initial bringup, when
* board setup or driver bugs are most common.
*
* Otherwise, minimize overhead in what may be bitbanging codepaths.
*/
#ifdef DEBUG
#define extra_checks 1
#else
#define extra_checks 0
#endif
/* gpio_lock prevents conflicts during gpio_desc[] table updates.
* While any GPIO is requested, its gpio_chip is not removable;
* each GPIO's "requested" flag serves as a lock and refcount.
*/
static DEFINE_SPINLOCK(gpio_lock);
struct gpio_desc {
struct gpio_chip *chip;
unsigned long flags;
/* flag symbols are bit numbers */
#define FLAG_REQUESTED 0
#define FLAG_IS_OUT 1
#define FLAG_RESERVED 2
#define FLAG_EXPORT 3 /* protected by sysfs_lock */
#define FLAG_SYSFS 4 /* exported via /sys/class/gpio/control */
#define FLAG_TRIG_FALL 5 /* trigger on falling edge */
#define FLAG_TRIG_RISE 6 /* trigger on rising edge */
#define FLAG_ACTIVE_LOW 7 /* sysfs value has active low */
#define FLAG_OPEN_DRAIN 8 /* Gpio is open drain type */
#define FLAG_OPEN_SOURCE 9 /* Gpio is open source type */
#define ID_SHIFT 16 /* add new flags before this one */
#define GPIO_FLAGS_MASK ((1 << ID_SHIFT) - 1)
#define GPIO_TRIGGER_MASK (BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE))
#ifdef CONFIG_DEBUG_FS
const char *label;
#endif
};
static struct gpio_desc gpio_desc[ARCH_NR_GPIOS];
#ifdef CONFIG_GPIO_SYSFS
static DEFINE_IDR(dirent_idr);
#endif
static inline void desc_set_label(struct gpio_desc *d, const char *label)
{
#ifdef CONFIG_DEBUG_FS
d->label = label;
#endif
}
/* Warn when drivers omit gpio_request() calls -- legal but ill-advised
* when setting direction, and otherwise illegal. Until board setup code
* and drivers use explicit requests everywhere (which won't happen when
* those calls have no teeth) we can't avoid autorequesting. This nag
* message should motivate switching to explicit requests... so should
* the weaker cleanup after faults, compared to gpio_request().
*
* NOTE: the autorequest mechanism is going away; at this point it's
* only "legal" in the sense that (old) code using it won't break yet,
* but instead only triggers a WARN() stack dump.
*/
static int gpio_ensure_requested(struct gpio_desc *desc, unsigned offset)
{
const struct gpio_chip *chip = desc->chip;
const int gpio = chip->base + offset;
if (WARN(test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0,
"autorequest GPIO-%d\n", gpio)) {
if (!try_module_get(chip->owner)) {
pr_err("GPIO-%d: module can't be gotten \n", gpio);
clear_bit(FLAG_REQUESTED, &desc->flags);
/* lose */
return -EIO;
}
desc_set_label(desc, "[auto]");
/* caller must chip->request() w/o spinlock */
if (chip->request)
return 1;
}
return 0;
}
/* caller holds gpio_lock *OR* gpio is marked as requested */
struct gpio_chip *gpio_to_chip(unsigned gpio)
{
return gpio_desc[gpio].chip;
}
/* dynamic allocation of GPIOs, e.g. on a hotplugged device */
static int gpiochip_find_base(int ngpio)
{
int i;
int spare = 0;
int base = -ENOSPC;
for (i = ARCH_NR_GPIOS - 1; i >= 0 ; i--) {
struct gpio_desc *desc = &gpio_desc[i];
struct gpio_chip *chip = desc->chip;
if (!chip && !test_bit(FLAG_RESERVED, &desc->flags)) {
spare++;
if (spare == ngpio) {
base = i;
break;
}
} else {
spare = 0;
if (chip)
i -= chip->ngpio - 1;
}
}
if (gpio_is_valid(base))
pr_debug("%s: found new base at %d\n", __func__, base);
return base;
}
/**
* gpiochip_reserve() - reserve range of gpios to use with platform code only
* @start: starting gpio number
* @ngpio: number of gpios to reserve
* Context: platform init, potentially before irqs or kmalloc will work
*
* Returns a negative errno if any gpio within the range is already reserved
* or registered, else returns zero as a success code. Use this function
* to mark a range of gpios as unavailable for dynamic gpio number allocation,
* for example because its driver support is not yet loaded.
*/
int __init gpiochip_reserve(int start, int ngpio)
{
int ret = 0;
unsigned long flags;
int i;
if (!gpio_is_valid(start) || !gpio_is_valid(start + ngpio - 1))
return -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
for (i = start; i < start + ngpio; i++) {
struct gpio_desc *desc = &gpio_desc[i];
if (desc->chip || test_bit(FLAG_RESERVED, &desc->flags)) {
ret = -EBUSY;
goto err;
}
set_bit(FLAG_RESERVED, &desc->flags);
}
pr_debug("%s: reserved gpios from %d to %d\n",
__func__, start, start + ngpio - 1);
err:
spin_unlock_irqrestore(&gpio_lock, flags);
return ret;
}
#ifdef CONFIG_GPIO_SYSFS
/* lock protects against unexport_gpio() being called while
* sysfs files are active.
*/
static DEFINE_MUTEX(sysfs_lock);
/*
* /sys/class/gpio/gpioN... only for GPIOs that are exported
* /direction
* * MAY BE OMITTED if kernel won't allow direction changes
* * is read/write as "in" or "out"
* * may also be written as "high" or "low", initializing
* output value as specified ("out" implies "low")
* /value
* * always readable, subject to hardware behavior
* * may be writable, as zero/nonzero
* /edge
* * configures behavior of poll(2) on /value
* * available only if pin can generate IRQs on input
* * is read/write as "none", "falling", "rising", or "both"
* /active_low
* * configures polarity of /value
* * is read/write as zero/nonzero
* * also affects existing and subsequent "falling" and "rising"
* /edge configuration
*/
static ssize_t gpio_direction_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else
status = sprintf(buf, "%s\n",
test_bit(FLAG_IS_OUT, &desc->flags)
? "out" : "in");
mutex_unlock(&sysfs_lock);
return status;
}
static ssize_t gpio_direction_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
unsigned gpio = desc - gpio_desc;
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else if (sysfs_streq(buf, "high"))
status = gpio_direction_output(gpio, 1);
else if (sysfs_streq(buf, "out") || sysfs_streq(buf, "low"))
status = gpio_direction_output(gpio, 0);
else if (sysfs_streq(buf, "in"))
status = gpio_direction_input(gpio);
else
status = -EINVAL;
mutex_unlock(&sysfs_lock);
return status ? : size;
}
static /* const */ DEVICE_ATTR(direction, 0644,
gpio_direction_show, gpio_direction_store);
static ssize_t gpio_value_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
unsigned gpio = desc - gpio_desc;
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
} else {
int value;
value = !!gpio_get_value_cansleep(gpio);
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
status = sprintf(buf, "%d\n", value);
}
mutex_unlock(&sysfs_lock);
return status;
}
static ssize_t gpio_value_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
unsigned gpio = desc - gpio_desc;
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else if (!test_bit(FLAG_IS_OUT, &desc->flags))
status = -EPERM;
else {
long value;
status = strict_strtol(buf, 0, &value);
if (status == 0) {
if (test_bit(FLAG_ACTIVE_LOW, &desc->flags))
value = !value;
gpio_set_value_cansleep(gpio, value != 0);
status = size;
}
}
mutex_unlock(&sysfs_lock);
return status;
}
static const DEVICE_ATTR(value, 0644,
gpio_value_show, gpio_value_store);
static irqreturn_t gpio_sysfs_irq(int irq, void *priv)
{
struct sysfs_dirent *value_sd = priv;
sysfs_notify_dirent(value_sd);
return IRQ_HANDLED;
}
static int gpio_setup_irq(struct gpio_desc *desc, struct device *dev,
unsigned long gpio_flags)
{
struct sysfs_dirent *value_sd;
unsigned long irq_flags;
int ret, irq, id;
if ((desc->flags & GPIO_TRIGGER_MASK) == gpio_flags)
return 0;
irq = gpio_to_irq(desc - gpio_desc);
if (irq < 0)
return -EIO;
id = desc->flags >> ID_SHIFT;
value_sd = idr_find(&dirent_idr, id);
if (value_sd)
free_irq(irq, value_sd);
desc->flags &= ~GPIO_TRIGGER_MASK;
if (!gpio_flags) {
ret = 0;
goto free_id;
}
irq_flags = IRQF_SHARED;
if (test_bit(FLAG_TRIG_FALL, &gpio_flags))
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_RISING : IRQF_TRIGGER_FALLING;
if (test_bit(FLAG_TRIG_RISE, &gpio_flags))
irq_flags |= test_bit(FLAG_ACTIVE_LOW, &desc->flags) ?
IRQF_TRIGGER_FALLING : IRQF_TRIGGER_RISING;
if (!value_sd) {
value_sd = sysfs_get_dirent(dev->kobj.sd, NULL, "value");
if (!value_sd) {
ret = -ENODEV;
goto err_out;
}
do {
ret = -ENOMEM;
if (idr_pre_get(&dirent_idr, GFP_KERNEL))
ret = idr_get_new_above(&dirent_idr, value_sd,
1, &id);
} while (ret == -EAGAIN);
if (ret)
goto free_sd;
desc->flags &= GPIO_FLAGS_MASK;
desc->flags |= (unsigned long)id << ID_SHIFT;
if (desc->flags >> ID_SHIFT != id) {
ret = -ERANGE;
goto free_id;
}
}
ret = request_any_context_irq(irq, gpio_sysfs_irq, irq_flags,
"gpiolib", value_sd);
if (ret < 0)
goto free_id;
desc->flags |= gpio_flags;
return 0;
free_id:
idr_remove(&dirent_idr, id);
desc->flags &= GPIO_FLAGS_MASK;
free_sd:
if (value_sd)
sysfs_put(value_sd);
err_out:
return ret;
}
static const struct {
const char *name;
unsigned long flags;
} trigger_types[] = {
{ "none", 0 },
{ "falling", BIT(FLAG_TRIG_FALL) },
{ "rising", BIT(FLAG_TRIG_RISE) },
{ "both", BIT(FLAG_TRIG_FALL) | BIT(FLAG_TRIG_RISE) },
};
static ssize_t gpio_edge_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else {
int i;
status = 0;
for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
if ((desc->flags & GPIO_TRIGGER_MASK)
== trigger_types[i].flags) {
status = sprintf(buf, "%s\n",
trigger_types[i].name);
break;
}
}
mutex_unlock(&sysfs_lock);
return status;
}
static ssize_t gpio_edge_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
int i;
for (i = 0; i < ARRAY_SIZE(trigger_types); i++)
if (sysfs_streq(trigger_types[i].name, buf))
goto found;
return -EINVAL;
found:
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else {
status = gpio_setup_irq(desc, dev, trigger_types[i].flags);
if (!status)
status = size;
}
mutex_unlock(&sysfs_lock);
return status;
}
static DEVICE_ATTR(edge, 0644, gpio_edge_show, gpio_edge_store);
static int sysfs_set_active_low(struct gpio_desc *desc, struct device *dev,
int value)
{
int status = 0;
if (!!test_bit(FLAG_ACTIVE_LOW, &desc->flags) == !!value)
return 0;
if (value)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
else
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
/* reconfigure poll(2) support if enabled on one edge only */
if (dev != NULL && (!!test_bit(FLAG_TRIG_RISE, &desc->flags) ^
!!test_bit(FLAG_TRIG_FALL, &desc->flags))) {
unsigned long trigger_flags = desc->flags & GPIO_TRIGGER_MASK;
gpio_setup_irq(desc, dev, 0);
status = gpio_setup_irq(desc, dev, trigger_flags);
}
return status;
}
static ssize_t gpio_active_low_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags))
status = -EIO;
else
status = sprintf(buf, "%d\n",
!!test_bit(FLAG_ACTIVE_LOW, &desc->flags));
mutex_unlock(&sysfs_lock);
return status;
}
static ssize_t gpio_active_low_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t size)
{
struct gpio_desc *desc = dev_get_drvdata(dev);
ssize_t status;
mutex_lock(&sysfs_lock);
if (!test_bit(FLAG_EXPORT, &desc->flags)) {
status = -EIO;
} else {
long value;
status = strict_strtol(buf, 0, &value);
if (status == 0)
status = sysfs_set_active_low(desc, dev, value != 0);
}
mutex_unlock(&sysfs_lock);
return status ? : size;
}
static const DEVICE_ATTR(active_low, 0644,
gpio_active_low_show, gpio_active_low_store);
static const struct attribute *gpio_attrs[] = {
&dev_attr_value.attr,
&dev_attr_active_low.attr,
NULL,
};
static const struct attribute_group gpio_attr_group = {
.attrs = (struct attribute **) gpio_attrs,
};
/*
* /sys/class/gpio/gpiochipN/
* /base ... matching gpio_chip.base (N)
* /label ... matching gpio_chip.label
* /ngpio ... matching gpio_chip.ngpio
*/
static ssize_t chip_base_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%d\n", chip->base);
}
static DEVICE_ATTR(base, 0444, chip_base_show, NULL);
static ssize_t chip_label_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%s\n", chip->label ? : "");
}
static DEVICE_ATTR(label, 0444, chip_label_show, NULL);
static ssize_t chip_ngpio_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
const struct gpio_chip *chip = dev_get_drvdata(dev);
return sprintf(buf, "%u\n", chip->ngpio);
}
static DEVICE_ATTR(ngpio, 0444, chip_ngpio_show, NULL);
static const struct attribute *gpiochip_attrs[] = {
&dev_attr_base.attr,
&dev_attr_label.attr,
&dev_attr_ngpio.attr,
NULL,
};
static const struct attribute_group gpiochip_attr_group = {
.attrs = (struct attribute **) gpiochip_attrs,
};
/*
* /sys/class/gpio/export ... write-only
* integer N ... number of GPIO to export (full access)
* /sys/class/gpio/unexport ... write-only
* integer N ... number of GPIO to unexport
*/
static ssize_t export_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
long gpio;
int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
goto done;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
status = gpio_request(gpio, "sysfs");
if (status < 0)
goto done;
status = gpio_export(gpio, true);
if (status < 0)
gpio_free(gpio);
else
set_bit(FLAG_SYSFS, &gpio_desc[gpio].flags);
done:
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
}
static ssize_t unexport_store(struct class *class,
struct class_attribute *attr,
const char *buf, size_t len)
{
long gpio;
int status;
status = strict_strtol(buf, 0, &gpio);
if (status < 0)
goto done;
status = -EINVAL;
/* reject bogus commands (gpio_unexport ignores them) */
if (!gpio_is_valid(gpio))
goto done;
/* No extra locking here; FLAG_SYSFS just signifies that the
* request and export were done by on behalf of userspace, so
* they may be undone on its behalf too.
*/
if (test_and_clear_bit(FLAG_SYSFS, &gpio_desc[gpio].flags)) {
status = 0;
gpio_free(gpio);
}
done:
if (status)
pr_debug("%s: status %d\n", __func__, status);
return status ? : len;
}
static struct class_attribute gpio_class_attrs[] = {
__ATTR(export, 0200, NULL, export_store),
__ATTR(unexport, 0200, NULL, unexport_store),
__ATTR_NULL,
};
static struct class gpio_class = {
.name = "gpio",
.owner = THIS_MODULE,
.class_attrs = gpio_class_attrs,
};
/**
* gpio_export - export a GPIO through sysfs
* @gpio: gpio to make available, already requested
* @direction_may_change: true if userspace may change gpio direction
* Context: arch_initcall or later
*
* When drivers want to make a GPIO accessible to userspace after they
* have requested it -- perhaps while debugging, or as part of their
* public interface -- they may use this routine. If the GPIO can
* change direction (some can't) and the caller allows it, userspace
* will see "direction" sysfs attribute which may be used to change
* the gpio's direction. A "value" attribute will always be provided.
*
* Returns zero on success, else an error.
*/
int gpio_export(unsigned gpio, bool direction_may_change)
{
unsigned long flags;
struct gpio_desc *desc;
int status = -EINVAL;
const char *ioname = NULL;
/* can't export until sysfs is available ... */
if (!gpio_class.p) {
pr_debug("%s: called too early!\n", __func__);
return -ENOENT;
}
if (!gpio_is_valid(gpio))
goto done;
mutex_lock(&sysfs_lock);
spin_lock_irqsave(&gpio_lock, flags);
desc = &gpio_desc[gpio];
if (test_bit(FLAG_REQUESTED, &desc->flags)
&& !test_bit(FLAG_EXPORT, &desc->flags)) {
status = 0;
if (!desc->chip->direction_input
|| !desc->chip->direction_output)
direction_may_change = false;
}
spin_unlock_irqrestore(&gpio_lock, flags);
if (desc->chip->names && desc->chip->names[gpio - desc->chip->base])
ioname = desc->chip->names[gpio - desc->chip->base];
if (status == 0) {
struct device *dev;
dev = device_create(&gpio_class, desc->chip->dev, MKDEV(0, 0),
desc, ioname ? ioname : "gpio%u", gpio);
if (!IS_ERR(dev)) {
status = sysfs_create_group(&dev->kobj,
&gpio_attr_group);
if (!status && direction_may_change)
status = device_create_file(dev,
&dev_attr_direction);
if (!status && gpio_to_irq(gpio) >= 0
&& (direction_may_change
|| !test_bit(FLAG_IS_OUT,
&desc->flags)))
status = device_create_file(dev,
&dev_attr_edge);
if (status != 0)
device_unregister(dev);
} else
status = PTR_ERR(dev);
if (status == 0)
set_bit(FLAG_EXPORT, &desc->flags);
}
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_export);
static int match_export(struct device *dev, void *data)
{
return dev_get_drvdata(dev) == data;
}
/**
* gpio_export_link - create a sysfs link to an exported GPIO node
* @dev: device under which to create symlink
* @name: name of the symlink
* @gpio: gpio to create symlink to, already exported
*
* Set up a symlink from /sys/.../dev/name to /sys/class/gpio/gpioN
* node. Caller is responsible for unlinking.
*
* Returns zero on success, else an error.
*/
int gpio_export_link(struct device *dev, const char *name, unsigned gpio)
{
struct gpio_desc *desc;
int status = -EINVAL;
if (!gpio_is_valid(gpio))
goto done;
mutex_lock(&sysfs_lock);
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
struct device *tdev;
tdev = class_find_device(&gpio_class, NULL, desc, match_export);
if (tdev != NULL) {
status = sysfs_create_link(&dev->kobj, &tdev->kobj,
name);
} else {
status = -ENODEV;
}
}
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_export_link);
/**
* gpio_sysfs_set_active_low - set the polarity of gpio sysfs value
* @gpio: gpio to change
* @value: non-zero to use active low, i.e. inverted values
*
* Set the polarity of /sys/class/gpio/gpioN/value sysfs attribute.
* The GPIO does not have to be exported yet. If poll(2) support has
* been enabled for either rising or falling edge, it will be
* reconfigured to follow the new polarity.
*
* Returns zero on success, else an error.
*/
int gpio_sysfs_set_active_low(unsigned gpio, int value)
{
struct gpio_desc *desc;
struct device *dev = NULL;
int status = -EINVAL;
if (!gpio_is_valid(gpio))
goto done;
mutex_lock(&sysfs_lock);
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev == NULL) {
status = -ENODEV;
goto unlock;
}
}
status = sysfs_set_active_low(desc, dev, value);
unlock:
mutex_unlock(&sysfs_lock);
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_sysfs_set_active_low);
/**
* gpio_unexport - reverse effect of gpio_export()
* @gpio: gpio to make unavailable
*
* This is implicit on gpio_free().
*/
void gpio_unexport(unsigned gpio)
{
struct gpio_desc *desc;
int status = 0;
struct device *dev = NULL;
if (!gpio_is_valid(gpio)) {
status = -EINVAL;
goto done;
}
mutex_lock(&sysfs_lock);
desc = &gpio_desc[gpio];
if (test_bit(FLAG_EXPORT, &desc->flags)) {
dev = class_find_device(&gpio_class, NULL, desc, match_export);
if (dev) {
gpio_setup_irq(desc, dev, 0);
clear_bit(FLAG_EXPORT, &desc->flags);
} else
status = -ENODEV;
}
mutex_unlock(&sysfs_lock);
if (dev) {
device_unregister(dev);
put_device(dev);
}
done:
if (status)
pr_debug("%s: gpio%d status %d\n", __func__, gpio, status);
}
EXPORT_SYMBOL_GPL(gpio_unexport);
static int gpiochip_export(struct gpio_chip *chip)
{
int status;
struct device *dev;
/* Many systems register gpio chips for SOC support very early,
* before driver model support is available. In those cases we
* export this later, in gpiolib_sysfs_init() ... here we just
* verify that _some_ field of gpio_class got initialized.
*/
if (!gpio_class.p)
return 0;
/* use chip->base for the ID; it's already known to be unique */
mutex_lock(&sysfs_lock);
dev = device_create(&gpio_class, chip->dev, MKDEV(0, 0), chip,
"gpiochip%d", chip->base);
if (!IS_ERR(dev)) {
status = sysfs_create_group(&dev->kobj,
&gpiochip_attr_group);
} else
status = PTR_ERR(dev);
chip->exported = (status == 0);
mutex_unlock(&sysfs_lock);
if (status) {
unsigned long flags;
unsigned gpio;
spin_lock_irqsave(&gpio_lock, flags);
gpio = chip->base;
while (gpio_desc[gpio].chip == chip)
gpio_desc[gpio++].chip = NULL;
spin_unlock_irqrestore(&gpio_lock, flags);
pr_debug("%s: chip %s status %d\n", __func__,
chip->label, status);
}
return status;
}
static void gpiochip_unexport(struct gpio_chip *chip)
{
int status;
struct device *dev;
mutex_lock(&sysfs_lock);
dev = class_find_device(&gpio_class, NULL, chip, match_export);
if (dev) {
put_device(dev);
device_unregister(dev);
chip->exported = 0;
status = 0;
} else
status = -ENODEV;
mutex_unlock(&sysfs_lock);
if (status)
pr_debug("%s: chip %s status %d\n", __func__,
chip->label, status);
}
static int __init gpiolib_sysfs_init(void)
{
int status;
unsigned long flags;
unsigned gpio;
status = class_register(&gpio_class);
if (status < 0)
return status;
/* Scan and register the gpio_chips which registered very
* early (e.g. before the class_register above was called).
*
* We run before arch_initcall() so chip->dev nodes can have
* registered, and so arch_initcall() can always gpio_export().
*/
spin_lock_irqsave(&gpio_lock, flags);
for (gpio = 0; gpio < ARCH_NR_GPIOS; gpio++) {
struct gpio_chip *chip;
chip = gpio_desc[gpio].chip;
if (!chip || chip->exported)
continue;
spin_unlock_irqrestore(&gpio_lock, flags);
status = gpiochip_export(chip);
spin_lock_irqsave(&gpio_lock, flags);
}
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
postcore_initcall(gpiolib_sysfs_init);
#else
static inline int gpiochip_export(struct gpio_chip *chip)
{
return 0;
}
static inline void gpiochip_unexport(struct gpio_chip *chip)
{
}
#endif /* CONFIG_GPIO_SYSFS */
/**
* gpiochip_add() - register a gpio_chip
* @chip: the chip to register, with chip->base initialized
* Context: potentially before irqs or kmalloc will work
*
* Returns a negative errno if the chip can't be registered, such as
* because the chip->base is invalid or already associated with a
* different chip. Otherwise it returns zero as a success code.
*
* When gpiochip_add() is called very early during boot, so that GPIOs
* can be freely used, the chip->dev device must be registered before
* the gpio framework's arch_initcall(). Otherwise sysfs initialization
* for GPIOs will fail rudely.
*
* If chip->base is negative, this requests dynamic assignment of
* a range of valid GPIOs.
*/
int gpiochip_add(struct gpio_chip *chip)
{
unsigned long flags;
int status = 0;
unsigned id;
int base = chip->base;
if ((!gpio_is_valid(base) || !gpio_is_valid(base + chip->ngpio - 1))
&& base >= 0) {
status = -EINVAL;
goto fail;
}
spin_lock_irqsave(&gpio_lock, flags);
if (base < 0) {
base = gpiochip_find_base(chip->ngpio);
if (base < 0) {
status = base;
goto unlock;
}
chip->base = base;
}
/* these GPIO numbers must not be managed by another gpio_chip */
for (id = base; id < base + chip->ngpio; id++) {
if (gpio_desc[id].chip != NULL) {
status = -EBUSY;
break;
}
}
if (status == 0) {
for (id = base; id < base + chip->ngpio; id++) {
gpio_desc[id].chip = chip;
/* REVISIT: most hardware initializes GPIOs as
* inputs (often with pullups enabled) so power
* usage is minimized. Linux code should set the
* gpio direction first thing; but until it does,
* we may expose the wrong direction in sysfs.
*/
gpio_desc[id].flags = !chip->direction_input
? (1 << FLAG_IS_OUT)
: 0;
}
}
of_gpiochip_add(chip);
unlock:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
goto fail;
status = gpiochip_export(chip);
if (status)
goto fail;
pr_info("gpiochip_add: registered GPIOs %d to %d on device: %s\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return 0;
fail:
/* failures here can mean systems won't boot... */
pr_err("gpiochip_add: gpios %d..%d (%s) failed to register\n",
chip->base, chip->base + chip->ngpio - 1,
chip->label ? : "generic");
return status;
}
EXPORT_SYMBOL_GPL(gpiochip_add);
/**
* gpiochip_remove() - unregister a gpio_chip
* @chip: the chip to unregister
*
* A gpio_chip with any GPIOs still requested may not be removed.
*/
int gpiochip_remove(struct gpio_chip *chip)
{
unsigned long flags;
int status = 0;
unsigned id;
spin_lock_irqsave(&gpio_lock, flags);
of_gpiochip_remove(chip);
for (id = chip->base; id < chip->base + chip->ngpio; id++) {
if (test_bit(FLAG_REQUESTED, &gpio_desc[id].flags)) {
status = -EBUSY;
break;
}
}
if (status == 0) {
for (id = chip->base; id < chip->base + chip->ngpio; id++)
gpio_desc[id].chip = NULL;
}
spin_unlock_irqrestore(&gpio_lock, flags);
if (status == 0)
gpiochip_unexport(chip);
return status;
}
EXPORT_SYMBOL_GPL(gpiochip_remove);
/**
* gpiochip_find() - iterator for locating a specific gpio_chip
* @data: data to pass to match function
* @callback: Callback function to check gpio_chip
*
* Similar to bus_find_device. It returns a reference to a gpio_chip as
* determined by a user supplied @match callback. The callback should return
* 0 if the device doesn't match and non-zero if it does. If the callback is
* non-zero, this function will return to the caller and not iterate over any
* more gpio_chips.
*/
struct gpio_chip *gpiochip_find(const void *data,
int (*match)(struct gpio_chip *chip,
const void *data))
{
struct gpio_chip *chip = NULL;
unsigned long flags;
int i;
spin_lock_irqsave(&gpio_lock, flags);
for (i = 0; i < ARCH_NR_GPIOS; i++) {
if (!gpio_desc[i].chip)
continue;
if (match(gpio_desc[i].chip, data)) {
chip = gpio_desc[i].chip;
break;
}
}
spin_unlock_irqrestore(&gpio_lock, flags);
return chip;
}
EXPORT_SYMBOL_GPL(gpiochip_find);
/* These "optional" allocation calls help prevent drivers from stomping
* on each other, and help provide better diagnostics in debugfs.
* They're called even less than the "set direction" calls.
*/
int gpio_request(unsigned gpio, const char *label)
{
struct gpio_desc *desc;
struct gpio_chip *chip;
int status = -EINVAL;
unsigned long flags;
spin_lock_irqsave(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
goto done;
desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip == NULL)
goto done;
if (!try_module_get(chip->owner))
goto done;
/* NOTE: gpio_request() can be called in early boot,
* before IRQs are enabled, for non-sleeping (SOC) GPIOs.
*/
if (test_and_set_bit(FLAG_REQUESTED, &desc->flags) == 0) {
desc_set_label(desc, label ? : "?");
status = 0;
} else {
status = -EBUSY;
module_put(chip->owner);
goto done;
}
if (chip->request) {
/* chip->request may sleep */
spin_unlock_irqrestore(&gpio_lock, flags);
status = chip->request(chip, gpio - chip->base);
spin_lock_irqsave(&gpio_lock, flags);
if (status < 0) {
desc_set_label(desc, NULL);
module_put(chip->owner);
clear_bit(FLAG_REQUESTED, &desc->flags);
}
}
done:
if (status)
pr_debug("gpio_request: gpio-%d (%s) status %d\n",
gpio, label ? : "?", status);
spin_unlock_irqrestore(&gpio_lock, flags);
return status;
}
EXPORT_SYMBOL_GPL(gpio_request);
void gpio_free(unsigned gpio)
{
unsigned long flags;
struct gpio_desc *desc;
struct gpio_chip *chip;
might_sleep();
if (!gpio_is_valid(gpio)) {
WARN_ON(extra_checks);
return;
}
gpio_unexport(gpio);
spin_lock_irqsave(&gpio_lock, flags);
desc = &gpio_desc[gpio];
chip = desc->chip;
if (chip && test_bit(FLAG_REQUESTED, &desc->flags)) {
if (chip->free) {
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
chip->free(chip, gpio - chip->base);
spin_lock_irqsave(&gpio_lock, flags);
}
desc_set_label(desc, NULL);
module_put(desc->chip->owner);
clear_bit(FLAG_ACTIVE_LOW, &desc->flags);
clear_bit(FLAG_REQUESTED, &desc->flags);
clear_bit(FLAG_OPEN_DRAIN, &desc->flags);
clear_bit(FLAG_OPEN_SOURCE, &desc->flags);
} else
WARN_ON(extra_checks);
spin_unlock_irqrestore(&gpio_lock, flags);
}
EXPORT_SYMBOL_GPL(gpio_free);
/**
* gpio_request_one - request a single GPIO with initial configuration
* @gpio: the GPIO number
* @flags: GPIO configuration as specified by GPIOF_*
* @label: a literal description string of this GPIO
*/
int gpio_request_one(unsigned gpio, unsigned long flags, const char *label)
{
int err;
err = gpio_request(gpio, label);
if (err)
return err;
if (flags & GPIOF_OPEN_DRAIN)
set_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags);
if (flags & GPIOF_OPEN_SOURCE)
set_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags);
if (flags & GPIOF_DIR_IN)
err = gpio_direction_input(gpio);
else
err = gpio_direction_output(gpio,
(flags & GPIOF_INIT_HIGH) ? 1 : 0);
if (err)
gpio_free(gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_one);
/**
* gpio_request_array - request multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
int gpio_request_array(const struct gpio *array, size_t num)
{
int i, err;
for (i = 0; i < num; i++, array++) {
err = gpio_request_one(array->gpio, array->flags, array->label);
if (err)
goto err_free;
}
return 0;
err_free:
while (i--)
gpio_free((--array)->gpio);
return err;
}
EXPORT_SYMBOL_GPL(gpio_request_array);
/**
* gpio_free_array - release multiple GPIOs in a single call
* @array: array of the 'struct gpio'
* @num: how many GPIOs in the array
*/
void gpio_free_array(const struct gpio *array, size_t num)
{
while (num--)
gpio_free((array++)->gpio);
}
EXPORT_SYMBOL_GPL(gpio_free_array);
/**
* gpiochip_is_requested - return string iff signal was requested
* @chip: controller managing the signal
* @offset: of signal within controller's 0..(ngpio - 1) range
*
* Returns NULL if the GPIO is not currently requested, else a string.
* If debugfs support is enabled, the string returned is the label passed
* to gpio_request(); otherwise it is a meaningless constant.
*
* This function is for use by GPIO controller drivers. The label can
* help with diagnostics, and knowing that the signal is used as a GPIO
* can help avoid accidentally multiplexing it to another controller.
*/
const char *gpiochip_is_requested(struct gpio_chip *chip, unsigned offset)
{
unsigned gpio = chip->base + offset;
if (!gpio_is_valid(gpio) || gpio_desc[gpio].chip != chip)
return NULL;
if (test_bit(FLAG_REQUESTED, &gpio_desc[gpio].flags) == 0)
return NULL;
#ifdef CONFIG_DEBUG_FS
return gpio_desc[gpio].label;
#else
return "?";
#endif
}
EXPORT_SYMBOL_GPL(gpiochip_is_requested);
/* Drivers MUST set GPIO direction before making get/set calls. In
* some cases this is done in early boot, before IRQs are enabled.
*
* As a rule these aren't called more than once (except for drivers
* using the open-drain emulation idiom) so these are natural places
* to accumulate extra debugging checks. Note that we can't (yet)
* rely on gpio_request() having been called beforehand.
*/
int gpio_direction_input(unsigned gpio)
{
unsigned long flags;
struct gpio_chip *chip;
struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
goto fail;
chip = desc->chip;
if (!chip || !chip->get || !chip->direction_input)
goto fail;
gpio -= chip->base;
if (gpio >= chip->ngpio)
goto fail;
status = gpio_ensure_requested(desc, gpio);
if (status < 0)
goto fail;
/* now we know the gpio is valid and chip won't vanish */
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
if (status) {
status = chip->request(chip, gpio);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
chip->base + gpio, status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
goto lose;
}
}
status = chip->direction_input(chip, gpio);
if (status == 0)
clear_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_direction(chip->base + gpio, 1, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_direction_input);
int gpio_direction_output(unsigned gpio, int value)
{
unsigned long flags;
struct gpio_chip *chip;
struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
/* Open drain pin should not be driven to 1 */
if (value && test_bit(FLAG_OPEN_DRAIN, &desc->flags))
return gpio_direction_input(gpio);
/* Open source pin should not be driven to 0 */
if (!value && test_bit(FLAG_OPEN_SOURCE, &desc->flags))
return gpio_direction_input(gpio);
spin_lock_irqsave(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->direction_output)
goto fail;
gpio -= chip->base;
if (gpio >= chip->ngpio)
goto fail;
status = gpio_ensure_requested(desc, gpio);
if (status < 0)
goto fail;
/* now we know the gpio is valid and chip won't vanish */
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
if (status) {
status = chip->request(chip, gpio);
if (status < 0) {
pr_debug("GPIO-%d: chip request fail, %d\n",
chip->base + gpio, status);
/* and it's not available to anyone else ...
* gpio_request() is the fully clean solution.
*/
goto lose;
}
}
status = chip->direction_output(chip, gpio, value);
if (status == 0)
set_bit(FLAG_IS_OUT, &desc->flags);
trace_gpio_value(chip->base + gpio, 0, value);
trace_gpio_direction(chip->base + gpio, 0, status);
lose:
return status;
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_direction_output);
/**
* gpio_set_debounce - sets @debounce time for a @gpio
* @gpio: the gpio to set debounce time
* @debounce: debounce time is microseconds
*/
int gpio_set_debounce(unsigned gpio, unsigned debounce)
{
unsigned long flags;
struct gpio_chip *chip;
struct gpio_desc *desc = &gpio_desc[gpio];
int status = -EINVAL;
spin_lock_irqsave(&gpio_lock, flags);
if (!gpio_is_valid(gpio))
goto fail;
chip = desc->chip;
if (!chip || !chip->set || !chip->set_debounce)
goto fail;
gpio -= chip->base;
if (gpio >= chip->ngpio)
goto fail;
status = gpio_ensure_requested(desc, gpio);
if (status < 0)
goto fail;
/* now we know the gpio is valid and chip won't vanish */
spin_unlock_irqrestore(&gpio_lock, flags);
might_sleep_if(chip->can_sleep);
return chip->set_debounce(chip, gpio, debounce);
fail:
spin_unlock_irqrestore(&gpio_lock, flags);
if (status)
pr_debug("%s: gpio-%d status %d\n",
__func__, gpio, status);
return status;
}
EXPORT_SYMBOL_GPL(gpio_set_debounce);
/* I/O calls are only valid after configuration completed; the relevant
* "is this a valid GPIO" error checks should already have been done.
*
* "Get" operations are often inlinable as reading a pin value register,
* and masking the relevant bit in that register.
*
* When "set" operations are inlinable, they involve writing that mask to
* one register to set a low value, or a different register to set it high.
* Otherwise locking is needed, so there may be little value to inlining.
*
*------------------------------------------------------------------------
*
* IMPORTANT!!! The hot paths -- get/set value -- assume that callers
* have requested the GPIO. That can include implicit requesting by
* a direction setting call. Marking a gpio as requested locks its chip
* in memory, guaranteeing that these table lookups need no more locking
* and that gpiochip_remove() will fail.
*
* REVISIT when debugging, consider adding some instrumentation to ensure
* that the GPIO was actually requested.
*/
/**
* __gpio_get_value() - return a gpio's value
* @gpio: gpio whose value will be returned
* Context: any
*
* This is used directly or indirectly to implement gpio_get_value().
* It returns the zero or nonzero value provided by the associated
* gpio_chip.get() method; or zero if no such method is provided.
*/
int __gpio_get_value(unsigned gpio)
{
struct gpio_chip *chip;
int value;
chip = gpio_to_chip(gpio);
/* Should be using gpio_get_value_cansleep() */
WARN_ON(chip->can_sleep);
value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
trace_gpio_value(gpio, 1, value);
return value;
}
EXPORT_SYMBOL_GPL(__gpio_get_value);
/*
* _gpio_set_open_drain_value() - Set the open drain gpio's value.
* @gpio: Gpio whose state need to be set.
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
static void _gpio_set_open_drain_value(unsigned gpio,
struct gpio_chip *chip, int value)
{
int err = 0;
if (value) {
err = chip->direction_input(chip, gpio - chip->base);
if (!err)
clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
} else {
err = chip->direction_output(chip, gpio - chip->base, 0);
if (!err)
set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
}
trace_gpio_direction(gpio, value, err);
if (err < 0)
pr_err("%s: Error in set_value for open drain gpio%d err %d\n",
__func__, gpio, err);
}
/*
* _gpio_set_open_source() - Set the open source gpio's value.
* @gpio: Gpio whose state need to be set.
* @chip: Gpio chip.
* @value: Non-zero for setting it HIGH otherise it will set to LOW.
*/
static void _gpio_set_open_source_value(unsigned gpio,
struct gpio_chip *chip, int value)
{
int err = 0;
if (value) {
err = chip->direction_output(chip, gpio - chip->base, 1);
if (!err)
set_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
} else {
err = chip->direction_input(chip, gpio - chip->base);
if (!err)
clear_bit(FLAG_IS_OUT, &gpio_desc[gpio].flags);
}
trace_gpio_direction(gpio, !value, err);
if (err < 0)
pr_err("%s: Error in set_value for open source gpio%d err %d\n",
__func__, gpio, err);
}
/**
* __gpio_set_value() - assign a gpio's value
* @gpio: gpio whose value will be assigned
* @value: value to assign
* Context: any
*
* This is used directly or indirectly to implement gpio_set_value().
* It invokes the associated gpio_chip.set() method.
*/
void __gpio_set_value(unsigned gpio, int value)
{
struct gpio_chip *chip;
chip = gpio_to_chip(gpio);
/* Should be using gpio_set_value_cansleep() */
WARN_ON(chip->can_sleep);
trace_gpio_value(gpio, 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
_gpio_set_open_drain_value(gpio, chip, value);
else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
_gpio_set_open_source_value(gpio, chip, value);
else
chip->set(chip, gpio - chip->base, value);
}
EXPORT_SYMBOL_GPL(__gpio_set_value);
/**
* __gpio_cansleep() - report whether gpio value access will sleep
* @gpio: gpio in question
* Context: any
*
* This is used directly or indirectly to implement gpio_cansleep(). It
* returns nonzero if access reading or writing the GPIO value can sleep.
*/
int __gpio_cansleep(unsigned gpio)
{
struct gpio_chip *chip;
/* only call this on GPIOs that are valid! */
chip = gpio_to_chip(gpio);
return chip->can_sleep;
}
EXPORT_SYMBOL_GPL(__gpio_cansleep);
/**
* __gpio_to_irq() - return the IRQ corresponding to a GPIO
* @gpio: gpio whose IRQ will be returned (already requested)
* Context: any
*
* This is used directly or indirectly to implement gpio_to_irq().
* It returns the number of the IRQ signaled by this (input) GPIO,
* or a negative errno.
*/
int __gpio_to_irq(unsigned gpio)
{
struct gpio_chip *chip;
chip = gpio_to_chip(gpio);
return chip->to_irq ? chip->to_irq(chip, gpio - chip->base) : -ENXIO;
}
EXPORT_SYMBOL_GPL(__gpio_to_irq);
/* There's no value in making it easy to inline GPIO calls that may sleep.
* Common examples include ones connected to I2C or SPI chips.
*/
int gpio_get_value_cansleep(unsigned gpio)
{
struct gpio_chip *chip;
int value;
might_sleep_if(extra_checks);
chip = gpio_to_chip(gpio);
value = chip->get ? chip->get(chip, gpio - chip->base) : 0;
trace_gpio_value(gpio, 1, value);
return value;
}
EXPORT_SYMBOL_GPL(gpio_get_value_cansleep);
void gpio_set_value_cansleep(unsigned gpio, int value)
{
struct gpio_chip *chip;
might_sleep_if(extra_checks);
chip = gpio_to_chip(gpio);
trace_gpio_value(gpio, 0, value);
if (test_bit(FLAG_OPEN_DRAIN, &gpio_desc[gpio].flags))
_gpio_set_open_drain_value(gpio, chip, value);
else if (test_bit(FLAG_OPEN_SOURCE, &gpio_desc[gpio].flags))
_gpio_set_open_source_value(gpio, chip, value);
else
chip->set(chip, gpio - chip->base, value);
}
EXPORT_SYMBOL_GPL(gpio_set_value_cansleep);
#ifdef CONFIG_DEBUG_FS
static void gpiolib_dbg_show(struct seq_file *s, struct gpio_chip *chip)
{
unsigned i;
unsigned gpio = chip->base;
struct gpio_desc *gdesc = &gpio_desc[gpio];
int is_out;
for (i = 0; i < chip->ngpio; i++, gpio++, gdesc++) {
if (!test_bit(FLAG_REQUESTED, &gdesc->flags))
continue;
is_out = test_bit(FLAG_IS_OUT, &gdesc->flags);
seq_printf(s, " gpio-%-3d (%-20.20s) %s %s",
gpio, gdesc->label,
is_out ? "out" : "in ",
chip->get
? (chip->get(chip, i) ? "hi" : "lo")
: "? ");
seq_printf(s, "\n");
}
}
static int gpiolib_show(struct seq_file *s, void *unused)
{
struct gpio_chip *chip = NULL;
unsigned gpio;
int started = 0;
/* REVISIT this isn't locked against gpio_chip removal ... */
for (gpio = 0; gpio_is_valid(gpio); gpio++) {
struct device *dev;
if (chip == gpio_desc[gpio].chip)
continue;
chip = gpio_desc[gpio].chip;
if (!chip)
continue;
seq_printf(s, "%sGPIOs %d-%d",
started ? "\n" : "",
chip->base, chip->base + chip->ngpio - 1);
dev = chip->dev;
if (dev)
seq_printf(s, ", %s/%s",
dev->bus ? dev->bus->name : "no-bus",
dev_name(dev));
if (chip->label)
seq_printf(s, ", %s", chip->label);
if (chip->can_sleep)
seq_printf(s, ", can sleep");
seq_printf(s, ":\n");
started = 1;
if (chip->dbg_show)
chip->dbg_show(s, chip);
else
gpiolib_dbg_show(s, chip);
}
return 0;
}
static int gpiolib_open(struct inode *inode, struct file *file)
{
return single_open(file, gpiolib_show, NULL);
}
static const struct file_operations gpiolib_operations = {
.open = gpiolib_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init gpiolib_debugfs_init(void)
{
/* /sys/kernel/debug/gpio */
(void) debugfs_create_file("gpio", S_IFREG | S_IRUGO,
NULL, NULL, &gpiolib_operations);
return 0;
}
subsys_initcall(gpiolib_debugfs_init);
#endif /* DEBUG_FS */
| gpl-2.0 |
jstotero/Old_Cucciolone | drivers/gpio/vr41xx_giu.c | 1576 | 12973 | /*
* Driver for NEC VR4100 series General-purpose I/O Unit.
*
* Copyright (C) 2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
* Copyright (C) 2003-2009 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/errno.h>
#include <linux/fs.h>
#include <linux/gpio.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <asm/vr41xx/giu.h>
#include <asm/vr41xx/irq.h>
#include <asm/vr41xx/vr41xx.h>
MODULE_AUTHOR("Yoichi Yuasa <yuasa@linux-mips.org>");
MODULE_DESCRIPTION("NEC VR4100 series General-purpose I/O Unit driver");
MODULE_LICENSE("GPL");
#define GIUIOSELL 0x00
#define GIUIOSELH 0x02
#define GIUPIODL 0x04
#define GIUPIODH 0x06
#define GIUINTSTATL 0x08
#define GIUINTSTATH 0x0a
#define GIUINTENL 0x0c
#define GIUINTENH 0x0e
#define GIUINTTYPL 0x10
#define GIUINTTYPH 0x12
#define GIUINTALSELL 0x14
#define GIUINTALSELH 0x16
#define GIUINTHTSELL 0x18
#define GIUINTHTSELH 0x1a
#define GIUPODATL 0x1c
#define GIUPODATEN 0x1c
#define GIUPODATH 0x1e
#define PIOEN0 0x0100
#define PIOEN1 0x0200
#define GIUPODAT 0x1e
#define GIUFEDGEINHL 0x20
#define GIUFEDGEINHH 0x22
#define GIUREDGEINHL 0x24
#define GIUREDGEINHH 0x26
#define GIUUSEUPDN 0x1e0
#define GIUTERMUPDN 0x1e2
#define GPIO_HAS_PULLUPDOWN_IO 0x0001
#define GPIO_HAS_OUTPUT_ENABLE 0x0002
#define GPIO_HAS_INTERRUPT_EDGE_SELECT 0x0100
enum {
GPIO_INPUT,
GPIO_OUTPUT,
};
static DEFINE_SPINLOCK(giu_lock);
static unsigned long giu_flags;
static void __iomem *giu_base;
#define giu_read(offset) readw(giu_base + (offset))
#define giu_write(offset, value) writew((value), giu_base + (offset))
#define GPIO_PIN_OF_IRQ(irq) ((irq) - GIU_IRQ_BASE)
#define GIUINT_HIGH_OFFSET 16
#define GIUINT_HIGH_MAX 32
static inline u16 giu_set(u16 offset, u16 set)
{
u16 data;
data = giu_read(offset);
data |= set;
giu_write(offset, data);
return data;
}
static inline u16 giu_clear(u16 offset, u16 clear)
{
u16 data;
data = giu_read(offset);
data &= ~clear;
giu_write(offset, data);
return data;
}
static void ack_giuint_low(unsigned int irq)
{
giu_write(GIUINTSTATL, 1 << GPIO_PIN_OF_IRQ(irq));
}
static void mask_giuint_low(unsigned int irq)
{
giu_clear(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
}
static void mask_ack_giuint_low(unsigned int irq)
{
unsigned int pin;
pin = GPIO_PIN_OF_IRQ(irq);
giu_clear(GIUINTENL, 1 << pin);
giu_write(GIUINTSTATL, 1 << pin);
}
static void unmask_giuint_low(unsigned int irq)
{
giu_set(GIUINTENL, 1 << GPIO_PIN_OF_IRQ(irq));
}
static struct irq_chip giuint_low_irq_chip = {
.name = "GIUINTL",
.ack = ack_giuint_low,
.mask = mask_giuint_low,
.mask_ack = mask_ack_giuint_low,
.unmask = unmask_giuint_low,
};
static void ack_giuint_high(unsigned int irq)
{
giu_write(GIUINTSTATH,
1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
}
static void mask_giuint_high(unsigned int irq)
{
giu_clear(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
}
static void mask_ack_giuint_high(unsigned int irq)
{
unsigned int pin;
pin = GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET;
giu_clear(GIUINTENH, 1 << pin);
giu_write(GIUINTSTATH, 1 << pin);
}
static void unmask_giuint_high(unsigned int irq)
{
giu_set(GIUINTENH, 1 << (GPIO_PIN_OF_IRQ(irq) - GIUINT_HIGH_OFFSET));
}
static struct irq_chip giuint_high_irq_chip = {
.name = "GIUINTH",
.ack = ack_giuint_high,
.mask = mask_giuint_high,
.mask_ack = mask_ack_giuint_high,
.unmask = unmask_giuint_high,
};
static int giu_get_irq(unsigned int irq)
{
u16 pendl, pendh, maskl, maskh;
int i;
pendl = giu_read(GIUINTSTATL);
pendh = giu_read(GIUINTSTATH);
maskl = giu_read(GIUINTENL);
maskh = giu_read(GIUINTENH);
maskl &= pendl;
maskh &= pendh;
if (maskl) {
for (i = 0; i < 16; i++) {
if (maskl & (1 << i))
return GIU_IRQ(i);
}
} else if (maskh) {
for (i = 0; i < 16; i++) {
if (maskh & (1 << i))
return GIU_IRQ(i + GIUINT_HIGH_OFFSET);
}
}
printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
maskl, pendl, maskh, pendh);
atomic_inc(&irq_err_count);
return -EINVAL;
}
void vr41xx_set_irq_trigger(unsigned int pin, irq_trigger_t trigger,
irq_signal_t signal)
{
u16 mask;
if (pin < GIUINT_HIGH_OFFSET) {
mask = 1 << pin;
if (trigger != IRQ_TRIGGER_LEVEL) {
giu_set(GIUINTTYPL, mask);
if (signal == IRQ_SIGNAL_HOLD)
giu_set(GIUINTHTSELL, mask);
else
giu_clear(GIUINTHTSELL, mask);
if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHL, mask);
giu_clear(GIUREDGEINHL, mask);
break;
case IRQ_TRIGGER_EDGE_RISING:
giu_clear(GIUFEDGEINHL, mask);
giu_set(GIUREDGEINHL, mask);
break;
default:
giu_set(GIUFEDGEINHL, mask);
giu_set(GIUREDGEINHL, mask);
break;
}
}
set_irq_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPL, mask);
giu_clear(GIUINTHTSELL, mask);
set_irq_chip_and_handler(GIU_IRQ(pin),
&giuint_low_irq_chip,
handle_level_irq);
}
giu_write(GIUINTSTATL, mask);
} else if (pin < GIUINT_HIGH_MAX) {
mask = 1 << (pin - GIUINT_HIGH_OFFSET);
if (trigger != IRQ_TRIGGER_LEVEL) {
giu_set(GIUINTTYPH, mask);
if (signal == IRQ_SIGNAL_HOLD)
giu_set(GIUINTHTSELH, mask);
else
giu_clear(GIUINTHTSELH, mask);
if (giu_flags & GPIO_HAS_INTERRUPT_EDGE_SELECT) {
switch (trigger) {
case IRQ_TRIGGER_EDGE_FALLING:
giu_set(GIUFEDGEINHH, mask);
giu_clear(GIUREDGEINHH, mask);
break;
case IRQ_TRIGGER_EDGE_RISING:
giu_clear(GIUFEDGEINHH, mask);
giu_set(GIUREDGEINHH, mask);
break;
default:
giu_set(GIUFEDGEINHH, mask);
giu_set(GIUREDGEINHH, mask);
break;
}
}
set_irq_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_edge_irq);
} else {
giu_clear(GIUINTTYPH, mask);
giu_clear(GIUINTHTSELH, mask);
set_irq_chip_and_handler(GIU_IRQ(pin),
&giuint_high_irq_chip,
handle_level_irq);
}
giu_write(GIUINTSTATH, mask);
}
}
EXPORT_SYMBOL_GPL(vr41xx_set_irq_trigger);
void vr41xx_set_irq_level(unsigned int pin, irq_level_t level)
{
u16 mask;
if (pin < GIUINT_HIGH_OFFSET) {
mask = 1 << pin;
if (level == IRQ_LEVEL_HIGH)
giu_set(GIUINTALSELL, mask);
else
giu_clear(GIUINTALSELL, mask);
giu_write(GIUINTSTATL, mask);
} else if (pin < GIUINT_HIGH_MAX) {
mask = 1 << (pin - GIUINT_HIGH_OFFSET);
if (level == IRQ_LEVEL_HIGH)
giu_set(GIUINTALSELH, mask);
else
giu_clear(GIUINTALSELH, mask);
giu_write(GIUINTSTATH, mask);
}
}
EXPORT_SYMBOL_GPL(vr41xx_set_irq_level);
static int giu_set_direction(struct gpio_chip *chip, unsigned pin, int dir)
{
u16 offset, mask, reg;
unsigned long flags;
if (pin >= chip->ngpio)
return -EINVAL;
if (pin < 16) {
offset = GIUIOSELL;
mask = 1 << pin;
} else if (pin < 32) {
offset = GIUIOSELH;
mask = 1 << (pin - 16);
} else {
if (giu_flags & GPIO_HAS_OUTPUT_ENABLE) {
offset = GIUPODATEN;
mask = 1 << (pin - 32);
} else {
switch (pin) {
case 48:
offset = GIUPODATH;
mask = PIOEN0;
break;
case 49:
offset = GIUPODATH;
mask = PIOEN1;
break;
default:
return -EINVAL;
}
}
}
spin_lock_irqsave(&giu_lock, flags);
reg = giu_read(offset);
if (dir == GPIO_OUTPUT)
reg |= mask;
else
reg &= ~mask;
giu_write(offset, reg);
spin_unlock_irqrestore(&giu_lock, flags);
return 0;
}
int vr41xx_gpio_pullupdown(unsigned int pin, gpio_pull_t pull)
{
u16 reg, mask;
unsigned long flags;
if ((giu_flags & GPIO_HAS_PULLUPDOWN_IO) != GPIO_HAS_PULLUPDOWN_IO)
return -EPERM;
if (pin >= 15)
return -EINVAL;
mask = 1 << pin;
spin_lock_irqsave(&giu_lock, flags);
if (pull == GPIO_PULL_UP || pull == GPIO_PULL_DOWN) {
reg = giu_read(GIUTERMUPDN);
if (pull == GPIO_PULL_UP)
reg |= mask;
else
reg &= ~mask;
giu_write(GIUTERMUPDN, reg);
reg = giu_read(GIUUSEUPDN);
reg |= mask;
giu_write(GIUUSEUPDN, reg);
} else {
reg = giu_read(GIUUSEUPDN);
reg &= ~mask;
giu_write(GIUUSEUPDN, reg);
}
spin_unlock_irqrestore(&giu_lock, flags);
return 0;
}
EXPORT_SYMBOL_GPL(vr41xx_gpio_pullupdown);
static int vr41xx_gpio_get(struct gpio_chip *chip, unsigned pin)
{
u16 reg, mask;
if (pin >= chip->ngpio)
return -EINVAL;
if (pin < 16) {
reg = giu_read(GIUPIODL);
mask = 1 << pin;
} else if (pin < 32) {
reg = giu_read(GIUPIODH);
mask = 1 << (pin - 16);
} else if (pin < 48) {
reg = giu_read(GIUPODATL);
mask = 1 << (pin - 32);
} else {
reg = giu_read(GIUPODATH);
mask = 1 << (pin - 48);
}
if (reg & mask)
return 1;
return 0;
}
static void vr41xx_gpio_set(struct gpio_chip *chip, unsigned pin,
int value)
{
u16 offset, mask, reg;
unsigned long flags;
if (pin >= chip->ngpio)
return;
if (pin < 16) {
offset = GIUPIODL;
mask = 1 << pin;
} else if (pin < 32) {
offset = GIUPIODH;
mask = 1 << (pin - 16);
} else if (pin < 48) {
offset = GIUPODATL;
mask = 1 << (pin - 32);
} else {
offset = GIUPODATH;
mask = 1 << (pin - 48);
}
spin_lock_irqsave(&giu_lock, flags);
reg = giu_read(offset);
if (value)
reg |= mask;
else
reg &= ~mask;
giu_write(offset, reg);
spin_unlock_irqrestore(&giu_lock, flags);
}
static int vr41xx_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
{
return giu_set_direction(chip, offset, GPIO_INPUT);
}
static int vr41xx_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
int value)
{
vr41xx_gpio_set(chip, offset, value);
return giu_set_direction(chip, offset, GPIO_OUTPUT);
}
static int vr41xx_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
{
if (offset >= chip->ngpio)
return -EINVAL;
return GIU_IRQ_BASE + offset;
}
static struct gpio_chip vr41xx_gpio_chip = {
.label = "vr41xx",
.owner = THIS_MODULE,
.direction_input = vr41xx_gpio_direction_input,
.get = vr41xx_gpio_get,
.direction_output = vr41xx_gpio_direction_output,
.set = vr41xx_gpio_set,
.to_irq = vr41xx_gpio_to_irq,
};
static int __devinit giu_probe(struct platform_device *pdev)
{
struct resource *res;
unsigned int trigger, i, pin;
struct irq_chip *chip;
int irq, retval;
switch (pdev->id) {
case GPIO_50PINS_PULLUPDOWN:
giu_flags = GPIO_HAS_PULLUPDOWN_IO;
vr41xx_gpio_chip.ngpio = 50;
break;
case GPIO_36PINS:
vr41xx_gpio_chip.ngpio = 36;
break;
case GPIO_48PINS_EDGE_SELECT:
giu_flags = GPIO_HAS_INTERRUPT_EDGE_SELECT;
vr41xx_gpio_chip.ngpio = 48;
break;
default:
dev_err(&pdev->dev, "GIU: unknown ID %d\n", pdev->id);
return -ENODEV;
}
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res)
return -EBUSY;
giu_base = ioremap(res->start, res->end - res->start + 1);
if (!giu_base)
return -ENOMEM;
vr41xx_gpio_chip.dev = &pdev->dev;
retval = gpiochip_add(&vr41xx_gpio_chip);
giu_write(GIUINTENL, 0);
giu_write(GIUINTENH, 0);
trigger = giu_read(GIUINTTYPH) << 16;
trigger |= giu_read(GIUINTTYPL);
for (i = GIU_IRQ_BASE; i <= GIU_IRQ_LAST; i++) {
pin = GPIO_PIN_OF_IRQ(i);
if (pin < GIUINT_HIGH_OFFSET)
chip = &giuint_low_irq_chip;
else
chip = &giuint_high_irq_chip;
if (trigger & (1 << pin))
set_irq_chip_and_handler(i, chip, handle_edge_irq);
else
set_irq_chip_and_handler(i, chip, handle_level_irq);
}
irq = platform_get_irq(pdev, 0);
if (irq < 0 || irq >= nr_irqs)
return -EBUSY;
return cascade_irq(irq, giu_get_irq);
}
static int __devexit giu_remove(struct platform_device *pdev)
{
if (giu_base) {
iounmap(giu_base);
giu_base = NULL;
}
return 0;
}
static struct platform_driver giu_device_driver = {
.probe = giu_probe,
.remove = __devexit_p(giu_remove),
.driver = {
.name = "GIU",
.owner = THIS_MODULE,
},
};
static int __init vr41xx_giu_init(void)
{
return platform_driver_register(&giu_device_driver);
}
static void __exit vr41xx_giu_exit(void)
{
platform_driver_unregister(&giu_device_driver);
}
module_init(vr41xx_giu_init);
module_exit(vr41xx_giu_exit);
| gpl-2.0 |
yytang2012/linux-kvm-arm | tools/power/cpupower/utils/cpuidle-info.c | 1832 | 4671 | /*
* (C) 2004-2009 Dominik Brodowski <linux@dominikbrodowski.de>
* (C) 2010 Thomas Renninger <trenn@suse.de>
*
* Licensed under the terms of the GNU GPL License version 2.
*/
#include <unistd.h>
#include <stdio.h>
#include <errno.h>
#include <stdlib.h>
#include <string.h>
#include <getopt.h>
#include <cpufreq.h>
#include "helpers/helpers.h"
#include "helpers/sysfs.h"
#include "helpers/bitmask.h"
#define LINE_LEN 10
static void cpuidle_cpu_output(unsigned int cpu, int verbose)
{
unsigned int idlestates, idlestate;
char *tmp;
printf(_ ("Analyzing CPU %d:\n"), cpu);
idlestates = sysfs_get_idlestate_count(cpu);
if (idlestates == 0) {
printf(_("CPU %u: No idle states\n"), cpu);
return;
}
printf(_("Number of idle states: %d\n"), idlestates);
printf(_("Available idle states:"));
for (idlestate = 0; idlestate < idlestates; idlestate++) {
tmp = sysfs_get_idlestate_name(cpu, idlestate);
if (!tmp)
continue;
printf(" %s", tmp);
free(tmp);
}
printf("\n");
if (!verbose)
return;
for (idlestate = 0; idlestate < idlestates; idlestate++) {
int disabled = sysfs_is_idlestate_disabled(cpu, idlestate);
/* Disabled interface not supported on older kernels */
if (disabled < 0)
disabled = 0;
tmp = sysfs_get_idlestate_name(cpu, idlestate);
if (!tmp)
continue;
printf("%s%s:\n", tmp, (disabled) ? " (DISABLED) " : "");
free(tmp);
tmp = sysfs_get_idlestate_desc(cpu, idlestate);
if (!tmp)
continue;
printf(_("Flags/Description: %s\n"), tmp);
free(tmp);
printf(_("Latency: %lu\n"),
sysfs_get_idlestate_latency(cpu, idlestate));
printf(_("Usage: %lu\n"),
sysfs_get_idlestate_usage(cpu, idlestate));
printf(_("Duration: %llu\n"),
sysfs_get_idlestate_time(cpu, idlestate));
}
printf("\n");
}
static void cpuidle_general_output(void)
{
char *tmp;
tmp = sysfs_get_cpuidle_driver();
if (!tmp) {
printf(_("Could not determine cpuidle driver\n"));
return;
}
printf(_("CPUidle driver: %s\n"), tmp);
free(tmp);
tmp = sysfs_get_cpuidle_governor();
if (!tmp) {
printf(_("Could not determine cpuidle governor\n"));
return;
}
printf(_("CPUidle governor: %s\n"), tmp);
free(tmp);
}
static void proc_cpuidle_cpu_output(unsigned int cpu)
{
long max_allowed_cstate = 2000000000;
unsigned int cstate, cstates;
cstates = sysfs_get_idlestate_count(cpu);
if (cstates == 0) {
printf(_("CPU %u: No C-states info\n"), cpu);
return;
}
printf(_("active state: C0\n"));
printf(_("max_cstate: C%u\n"), cstates-1);
printf(_("maximum allowed latency: %lu usec\n"), max_allowed_cstate);
printf(_("states:\t\n"));
for (cstate = 1; cstate < cstates; cstate++) {
printf(_(" C%d: "
"type[C%d] "), cstate, cstate);
printf(_("promotion[--] demotion[--] "));
printf(_("latency[%03lu] "),
sysfs_get_idlestate_latency(cpu, cstate));
printf(_("usage[%08lu] "),
sysfs_get_idlestate_usage(cpu, cstate));
printf(_("duration[%020Lu] \n"),
sysfs_get_idlestate_time(cpu, cstate));
}
}
static struct option info_opts[] = {
{ .name = "silent", .has_arg = no_argument, .flag = NULL, .val = 's'},
{ .name = "proc", .has_arg = no_argument, .flag = NULL, .val = 'o'},
{ },
};
static inline void cpuidle_exit(int fail)
{
exit(EXIT_FAILURE);
}
int cmd_idle_info(int argc, char **argv)
{
extern char *optarg;
extern int optind, opterr, optopt;
int ret = 0, cont = 1, output_param = 0, verbose = 1;
unsigned int cpu = 0;
do {
ret = getopt_long(argc, argv, "os", info_opts, NULL);
if (ret == -1)
break;
switch (ret) {
case '?':
output_param = '?';
cont = 0;
break;
case 's':
verbose = 0;
break;
case -1:
cont = 0;
break;
case 'o':
if (output_param) {
output_param = -1;
cont = 0;
break;
}
output_param = ret;
break;
}
} while (cont);
switch (output_param) {
case -1:
printf(_("You can't specify more than one "
"output-specific argument\n"));
cpuidle_exit(EXIT_FAILURE);
case '?':
printf(_("invalid or unknown argument\n"));
cpuidle_exit(EXIT_FAILURE);
}
/* Default is: show output of CPU 0 only */
if (bitmask_isallclear(cpus_chosen))
bitmask_setbit(cpus_chosen, 0);
if (output_param == 0)
cpuidle_general_output();
for (cpu = bitmask_first(cpus_chosen);
cpu <= bitmask_last(cpus_chosen); cpu++) {
if (!bitmask_isbitset(cpus_chosen, cpu) ||
cpufreq_cpu_exists(cpu))
continue;
switch (output_param) {
case 'o':
proc_cpuidle_cpu_output(cpu);
break;
case 0:
printf("\n");
cpuidle_cpu_output(cpu, verbose);
break;
}
}
return EXIT_SUCCESS;
}
| gpl-2.0 |
kcarden/android_kernel_lge_msm8916 | drivers/net/caif/caif_serial.c | 2088 | 10521 | /*
* Copyright (C) ST-Ericsson AB 2010
* Author: Sjur Brendeland
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/hardirq.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/device.h>
#include <linux/types.h>
#include <linux/skbuff.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/tty.h>
#include <linux/file.h>
#include <linux/if_arp.h>
#include <net/caif/caif_device.h>
#include <net/caif/cfcnfg.h>
#include <linux/err.h>
#include <linux/debugfs.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Sjur Brendeland");
MODULE_DESCRIPTION("CAIF serial device TTY line discipline");
MODULE_LICENSE("GPL");
MODULE_ALIAS_LDISC(N_CAIF);
#define SEND_QUEUE_LOW 10
#define SEND_QUEUE_HIGH 100
#define CAIF_SENDING 1 /* Bit 1 = 0x02*/
#define CAIF_FLOW_OFF_SENT 4 /* Bit 4 = 0x10 */
#define MAX_WRITE_CHUNK 4096
#define ON 1
#define OFF 0
#define CAIF_MAX_MTU 4096
/*This list is protected by the rtnl lock. */
static LIST_HEAD(ser_list);
static bool ser_loop;
module_param(ser_loop, bool, S_IRUGO);
MODULE_PARM_DESC(ser_loop, "Run in simulated loopback mode.");
static bool ser_use_stx = true;
module_param(ser_use_stx, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_stx, "STX enabled or not.");
static bool ser_use_fcs = true;
module_param(ser_use_fcs, bool, S_IRUGO);
MODULE_PARM_DESC(ser_use_fcs, "FCS enabled or not.");
static int ser_write_chunk = MAX_WRITE_CHUNK;
module_param(ser_write_chunk, int, S_IRUGO);
MODULE_PARM_DESC(ser_write_chunk, "Maximum size of data written to UART.");
static struct dentry *debugfsdir;
static int caif_net_open(struct net_device *dev);
static int caif_net_close(struct net_device *dev);
struct ser_device {
struct caif_dev_common common;
struct list_head node;
struct net_device *dev;
struct sk_buff_head head;
struct tty_struct *tty;
bool tx_started;
unsigned long state;
char *tty_name;
#ifdef CONFIG_DEBUG_FS
struct dentry *debugfs_tty_dir;
struct debugfs_blob_wrapper tx_blob;
struct debugfs_blob_wrapper rx_blob;
u8 rx_data[128];
u8 tx_data[128];
u8 tty_status;
#endif
};
static void caifdev_setup(struct net_device *dev);
static void ldisc_tx_wakeup(struct tty_struct *tty);
#ifdef CONFIG_DEBUG_FS
static inline void update_tty_status(struct ser_device *ser)
{
ser->tty_status =
ser->tty->stopped << 5 |
ser->tty->flow_stopped << 3 |
ser->tty->packet << 2 |
ser->tty->port->low_latency << 1;
}
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
ser->debugfs_tty_dir =
debugfs_create_dir(tty->name, debugfsdir);
if (!IS_ERR(ser->debugfs_tty_dir)) {
debugfs_create_blob("last_tx_msg", S_IRUSR,
ser->debugfs_tty_dir,
&ser->tx_blob);
debugfs_create_blob("last_rx_msg", S_IRUSR,
ser->debugfs_tty_dir,
&ser->rx_blob);
debugfs_create_x32("ser_state", S_IRUSR,
ser->debugfs_tty_dir,
(u32 *)&ser->state);
debugfs_create_x8("tty_status", S_IRUSR,
ser->debugfs_tty_dir,
&ser->tty_status);
}
ser->tx_blob.data = ser->tx_data;
ser->tx_blob.size = 0;
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = 0;
}
static inline void debugfs_deinit(struct ser_device *ser)
{
debugfs_remove_recursive(ser->debugfs_tty_dir);
}
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
if (size > sizeof(ser->rx_data))
size = sizeof(ser->rx_data);
memcpy(ser->rx_data, data, size);
ser->rx_blob.data = ser->rx_data;
ser->rx_blob.size = size;
}
static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
{
if (size > sizeof(ser->tx_data))
size = sizeof(ser->tx_data);
memcpy(ser->tx_data, data, size);
ser->tx_blob.data = ser->tx_data;
ser->tx_blob.size = size;
}
#else
static inline void debugfs_init(struct ser_device *ser, struct tty_struct *tty)
{
}
static inline void debugfs_deinit(struct ser_device *ser)
{
}
static inline void update_tty_status(struct ser_device *ser)
{
}
static inline void debugfs_rx(struct ser_device *ser, const u8 *data, int size)
{
}
static inline void debugfs_tx(struct ser_device *ser, const u8 *data, int size)
{
}
#endif
static void ldisc_receive(struct tty_struct *tty, const u8 *data,
char *flags, int count)
{
struct sk_buff *skb = NULL;
struct ser_device *ser;
int ret;
u8 *p;
ser = tty->disc_data;
/*
* NOTE: flags may contain information about break or overrun.
* This is not yet handled.
*/
/*
* Workaround for garbage at start of transmission,
* only enable if STX handling is not enabled.
*/
if (!ser->common.use_stx && !ser->tx_started) {
dev_info(&ser->dev->dev,
"Bytes received before initial transmission -"
"bytes discarded.\n");
return;
}
BUG_ON(ser->dev == NULL);
/* Get a suitable caif packet and copy in data. */
skb = netdev_alloc_skb(ser->dev, count+1);
if (skb == NULL)
return;
p = skb_put(skb, count);
memcpy(p, data, count);
skb->protocol = htons(ETH_P_CAIF);
skb_reset_mac_header(skb);
skb->dev = ser->dev;
debugfs_rx(ser, data, count);
/* Push received packet up the stack. */
ret = netif_rx_ni(skb);
if (!ret) {
ser->dev->stats.rx_packets++;
ser->dev->stats.rx_bytes += count;
} else
++ser->dev->stats.rx_dropped;
update_tty_status(ser);
}
static int handle_tx(struct ser_device *ser)
{
struct tty_struct *tty;
struct sk_buff *skb;
int tty_wr, len, room;
tty = ser->tty;
ser->tx_started = true;
/* Enter critical section */
if (test_and_set_bit(CAIF_SENDING, &ser->state))
return 0;
/* skb_peek is safe because handle_tx is called after skb_queue_tail */
while ((skb = skb_peek(&ser->head)) != NULL) {
/* Make sure you don't write too much */
len = skb->len;
room = tty_write_room(tty);
if (!room)
break;
if (room > ser_write_chunk)
room = ser_write_chunk;
if (len > room)
len = room;
/* Write to tty or loopback */
if (!ser_loop) {
tty_wr = tty->ops->write(tty, skb->data, len);
update_tty_status(ser);
} else {
tty_wr = len;
ldisc_receive(tty, skb->data, NULL, len);
}
ser->dev->stats.tx_packets++;
ser->dev->stats.tx_bytes += tty_wr;
/* Error on TTY ?! */
if (tty_wr < 0)
goto error;
/* Reduce buffer written, and discard if empty */
skb_pull(skb, tty_wr);
if (skb->len == 0) {
struct sk_buff *tmp = skb_dequeue(&ser->head);
WARN_ON(tmp != skb);
if (in_interrupt())
dev_kfree_skb_irq(skb);
else
kfree_skb(skb);
}
}
/* Send flow off if queue is empty */
if (ser->head.qlen <= SEND_QUEUE_LOW &&
test_and_clear_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
ser->common.flowctrl != NULL)
ser->common.flowctrl(ser->dev, ON);
clear_bit(CAIF_SENDING, &ser->state);
return 0;
error:
clear_bit(CAIF_SENDING, &ser->state);
return tty_wr;
}
static int caif_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ser_device *ser;
BUG_ON(dev == NULL);
ser = netdev_priv(dev);
/* Send flow off once, on high water mark */
if (ser->head.qlen > SEND_QUEUE_HIGH &&
!test_and_set_bit(CAIF_FLOW_OFF_SENT, &ser->state) &&
ser->common.flowctrl != NULL)
ser->common.flowctrl(ser->dev, OFF);
skb_queue_tail(&ser->head, skb);
return handle_tx(ser);
}
static void ldisc_tx_wakeup(struct tty_struct *tty)
{
struct ser_device *ser;
ser = tty->disc_data;
BUG_ON(ser == NULL);
WARN_ON(ser->tty != tty);
handle_tx(ser);
}
static int ldisc_open(struct tty_struct *tty)
{
struct ser_device *ser;
struct net_device *dev;
char name[64];
int result;
/* No write no play */
if (tty->ops->write == NULL)
return -EOPNOTSUPP;
if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_TTY_CONFIG))
return -EPERM;
sprintf(name, "cf%s", tty->name);
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
if (!dev)
return -ENOMEM;
ser = netdev_priv(dev);
ser->tty = tty_kref_get(tty);
ser->dev = dev;
debugfs_init(ser, tty);
tty->receive_room = N_TTY_BUF_SIZE;
tty->disc_data = ser;
set_bit(TTY_DO_WRITE_WAKEUP, &tty->flags);
rtnl_lock();
result = register_netdevice(dev);
if (result) {
rtnl_unlock();
free_netdev(dev);
return -ENODEV;
}
list_add(&ser->node, &ser_list);
rtnl_unlock();
netif_stop_queue(dev);
update_tty_status(ser);
return 0;
}
static void ldisc_close(struct tty_struct *tty)
{
struct ser_device *ser = tty->disc_data;
/* Remove may be called inside or outside of rtnl_lock */
int islocked = rtnl_is_locked();
if (!islocked)
rtnl_lock();
/* device is freed automagically by net-sysfs */
dev_close(ser->dev);
unregister_netdevice(ser->dev);
list_del(&ser->node);
debugfs_deinit(ser);
tty_kref_put(ser->tty);
if (!islocked)
rtnl_unlock();
}
/* The line discipline structure. */
static struct tty_ldisc_ops caif_ldisc = {
.owner = THIS_MODULE,
.magic = TTY_LDISC_MAGIC,
.name = "n_caif",
.open = ldisc_open,
.close = ldisc_close,
.receive_buf = ldisc_receive,
.write_wakeup = ldisc_tx_wakeup
};
static int register_ldisc(void)
{
int result;
result = tty_register_ldisc(N_CAIF, &caif_ldisc);
if (result < 0) {
pr_err("cannot register CAIF ldisc=%d err=%d\n", N_CAIF,
result);
return result;
}
return result;
}
static const struct net_device_ops netdev_ops = {
.ndo_open = caif_net_open,
.ndo_stop = caif_net_close,
.ndo_start_xmit = caif_xmit
};
static void caifdev_setup(struct net_device *dev)
{
struct ser_device *serdev = netdev_priv(dev);
dev->features = 0;
dev->netdev_ops = &netdev_ops;
dev->type = ARPHRD_CAIF;
dev->flags = IFF_POINTOPOINT | IFF_NOARP;
dev->mtu = CAIF_MAX_MTU;
dev->tx_queue_len = 0;
dev->destructor = free_netdev;
skb_queue_head_init(&serdev->head);
serdev->common.link_select = CAIF_LINK_LOW_LATENCY;
serdev->common.use_frag = true;
serdev->common.use_stx = ser_use_stx;
serdev->common.use_fcs = ser_use_fcs;
serdev->dev = dev;
}
static int caif_net_open(struct net_device *dev)
{
netif_wake_queue(dev);
return 0;
}
static int caif_net_close(struct net_device *dev)
{
netif_stop_queue(dev);
return 0;
}
static int __init caif_ser_init(void)
{
int ret;
ret = register_ldisc();
debugfsdir = debugfs_create_dir("caif_serial", NULL);
return ret;
}
static void __exit caif_ser_exit(void)
{
struct ser_device *ser = NULL;
struct list_head *node;
struct list_head *_tmp;
list_for_each_safe(node, _tmp, &ser_list) {
ser = list_entry(node, struct ser_device, node);
dev_close(ser->dev);
unregister_netdevice(ser->dev);
list_del(node);
}
tty_unregister_ldisc(N_CAIF);
debugfs_remove_recursive(debugfsdir);
}
module_init(caif_ser_init);
module_exit(caif_ser_exit);
| gpl-2.0 |
ryandxter/SM-G900H_KK_Opensource_Update3 | drivers/staging/silicom/bpctl_mod.c | 2088 | 202178 | /******************************************************************************/
/* */
/* Bypass Control utility, Copyright (c) 2005-20011 Silicom */
/* */
/* This program is free software; you can redistribute it and/or modify */
/* it under the terms of the GNU General Public License as published by */
/* the Free Software Foundation, located in the file LICENSE. */
/* Copyright(c) 2007 - 2009 Intel Corporation. All rights reserved. */
/* */
/* */
/******************************************************************************/
#include <linux/kernel.h> /* We're doing kernel work */
#include <linux/module.h> /* Specifically, a module */
#include <linux/fs.h>
#include <linux/pci.h>
#include <linux/delay.h>
#include <linux/netdevice.h>
#include <linux/rtnetlink.h>
#include <linux/rcupdate.h>
#include <linux/etherdevice.h>
#include <linux/uaccess.h> /* for get_user and put_user */
#include <linux/sched.h>
#include <linux/ethtool.h>
#include <linux/proc_fs.h>
#include "bp_ioctl.h"
#include "bp_mod.h"
#include "bypass.h"
#include "libbp_sd.h"
#define SUCCESS 0
#define BP_MOD_VER "9.0.4"
#define BP_MOD_DESCR "Silicom Bypass-SD Control driver"
#define BP_SYNC_FLAG 1
static int major_num = 0;
MODULE_AUTHOR("Anna Lukin, annal@silicom.co.il");
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION(BP_MOD_DESCR);
MODULE_VERSION(BP_MOD_VER);
spinlock_t bpvm_lock;
#define lock_bpctl() \
if (down_interruptible(&bpctl_sema)) { \
return -ERESTARTSYS; \
} \
#define unlock_bpctl() \
up(&bpctl_sema);
/* Media Types */
typedef enum {
bp_copper = 0,
bp_fiber,
bp_cx4,
bp_none,
} bp_media_type;
struct bypass_pfs_sd {
char dir_name[32];
struct proc_dir_entry *bypass_entry;
};
typedef struct _bpctl_dev {
char *name;
char *desc;
struct pci_dev *pdev; /* PCI device */
struct net_device *ndev; /* net device */
unsigned long mem_map;
uint8_t bus;
uint8_t slot;
uint8_t func;
u_int32_t device;
u_int32_t vendor;
u_int32_t subvendor;
u_int32_t subdevice;
int ifindex;
uint32_t bp_caps;
uint32_t bp_caps_ex;
uint8_t bp_fw_ver;
int bp_ext_ver;
int wdt_status;
unsigned long bypass_wdt_on_time;
uint32_t bypass_timer_interval;
struct timer_list bp_timer;
uint32_t reset_time;
uint8_t bp_status_un;
atomic_t wdt_busy;
bp_media_type media_type;
int bp_tpl_flag;
struct timer_list bp_tpl_timer;
spinlock_t bypass_wr_lock;
int bp_10g;
int bp_10gb;
int bp_fiber5;
int bp_10g9;
int bp_i80;
int bp_540;
int (*hard_start_xmit_save) (struct sk_buff *skb,
struct net_device *dev);
const struct net_device_ops *old_ops;
struct net_device_ops new_ops;
int bp_self_test_flag;
char *bp_tx_data;
struct bypass_pfs_sd bypass_pfs_set;
} bpctl_dev_t;
static bpctl_dev_t *bpctl_dev_arr;
static struct semaphore bpctl_sema;
static int device_num = 0;
static int get_dev_idx(int ifindex);
static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev);
static int disc_status(bpctl_dev_t *pbpctl_dev);
static int bypass_status(bpctl_dev_t *pbpctl_dev);
static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left);
static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev);
static void if_scan_init(void);
int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block);
int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block);
int bp_proc_create(void);
int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
int get_dev_idx_bsf(int bus, int slot, int func);
static unsigned long str_to_hex(char *p);
static int bp_device_event(struct notifier_block *unused,
unsigned long event, void *ptr)
{
struct net_device *dev = ptr;
static bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
int dev_num = 0, ret = 0, ret_d = 0, time_left = 0;
/* printk("BP_PROC_SUPPORT event =%d %s %d\n", event,dev->name, dev->ifindex ); */
/* return NOTIFY_DONE; */
if (!dev)
return NOTIFY_DONE;
if (event == NETDEV_REGISTER) {
{
struct ethtool_drvinfo drvinfo;
char cbuf[32];
char *buf = NULL;
char res[10];
int i = 0, ifindex, idx_dev = 0;
int bus = 0, slot = 0, func = 0;
ifindex = dev->ifindex;
memset(res, 0, 10);
memset(&drvinfo, 0, sizeof(struct ethtool_drvinfo));
if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
} else
return NOTIFY_DONE;
if (!drvinfo.bus_info)
return NOTIFY_DONE;
if (!strcmp(drvinfo.bus_info, "N/A"))
return NOTIFY_DONE;
memcpy(&cbuf, drvinfo.bus_info, 32);
buf = &cbuf[0];
while (*buf++ != ':') ;
for (i = 0; i < 10; i++, buf++) {
if (*buf == ':')
break;
res[i] = *buf;
}
buf++;
bus = str_to_hex(res);
memset(res, 0, 10);
for (i = 0; i < 10; i++, buf++) {
if (*buf == '.')
break;
res[i] = *buf;
}
buf++;
slot = str_to_hex(res);
func = str_to_hex(buf);
idx_dev = get_dev_idx_bsf(bus, slot, func);
if (idx_dev != -1) {
bpctl_dev_arr[idx_dev].ifindex = ifindex;
bpctl_dev_arr[idx_dev].ndev = dev;
bypass_proc_remove_dev_sd(&bpctl_dev_arr
[idx_dev]);
bypass_proc_create_dev_sd(&bpctl_dev_arr
[idx_dev]);
}
}
return NOTIFY_DONE;
}
if (event == NETDEV_UNREGISTER) {
int idx_dev = 0;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL)
&& (idx_dev < device_num)); idx_dev++) {
if (bpctl_dev_arr[idx_dev].ndev == dev) {
bypass_proc_remove_dev_sd(&bpctl_dev_arr
[idx_dev]);
bpctl_dev_arr[idx_dev].ndev = NULL;
return NOTIFY_DONE;
}
}
return NOTIFY_DONE;
}
if (event == NETDEV_CHANGENAME) {
int idx_dev = 0;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL)
&& (idx_dev < device_num)); idx_dev++) {
if (bpctl_dev_arr[idx_dev].ndev == dev) {
bypass_proc_remove_dev_sd(&bpctl_dev_arr
[idx_dev]);
bypass_proc_create_dev_sd(&bpctl_dev_arr
[idx_dev]);
return NOTIFY_DONE;
}
}
return NOTIFY_DONE;
}
switch (event) {
case NETDEV_CHANGE:{
if (netif_carrier_ok(dev))
return NOTIFY_DONE;
if (((dev_num = get_dev_idx(dev->ifindex)) == -1) ||
(!(pbpctl_dev = &bpctl_dev_arr[dev_num])))
return NOTIFY_DONE;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (!pbpctl_dev_m)
return NOTIFY_DONE;
ret = bypass_status(pbpctl_dev_m);
if (ret == 1)
printk("bpmod: %s is in the Bypass mode now",
dev->name);
ret_d = disc_status(pbpctl_dev_m);
if (ret_d == 1)
printk
("bpmod: %s is in the Disconnect mode now",
dev->name);
if (ret || ret_d) {
wdt_timer(pbpctl_dev_m, &time_left);
if (time_left == -1)
printk("; WDT has expired");
printk(".\n");
}
return NOTIFY_DONE;
}
default:
return NOTIFY_DONE;
}
return NOTIFY_DONE;
}
static struct notifier_block bp_notifier_block = {
.notifier_call = bp_device_event,
};
int is_bypass_fn(bpctl_dev_t *pbpctl_dev);
int wdt_time_left(bpctl_dev_t *pbpctl_dev);
static void write_pulse(bpctl_dev_t *pbpctl_dev,
unsigned int ctrl_ext,
unsigned char value, unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
bpctl_dev_t *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if (pbpctl_dev->bp_540)
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
if (pbpctl_dev->bp_10g9) {
if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
return;
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
}
while (i--) {
ctrl_val = (value >> i) & 0x1;
if (ctrl_val) {
if (pbpctl_dev->bp_10g9) {
/* To start management : MCLK 1, MDIO 1, output */
/* DATA 1 CLK 1 */
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext|BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9)); */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
ctrl_ext |
BP10G_MDIO_DATA_OUT9);
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DATA_OUT9 |
BP10G_MCLK_DIR_OUT9));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, (ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, (ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80
|
BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, (ctrl |
BPCTLI_CTRL_EXT_MCLK_DIR80
|
BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP, (ctrl |
BP540_MDIO_DIR
|
BP540_MDIO_DATA
|
BP540_MCLK_DIR
|
BP540_MCLK_DATA));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_SET |
BP10GB_MCLK_SET) &
~(BP10GB_MCLK_DIR |
BP10GB_MDIO_DIR |
BP10GB_MDIO_CLR |
BP10GB_MCLK_CLR));
} else if (!pbpctl_dev->bp_10g)
/* To start management : MCLK 1, MDIO 1, output */
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR |
BPCTLI_CTRL_EXT_MDIO_DIR |
BPCTLI_CTRL_EXT_MDIO_DATA |
BPCTLI_CTRL_EXT_MCLK_DATA));
else {
/* To start management : MCLK 1, MDIO 1, output*/
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext | BP10G_MCLK_DATA_OUT
| BP10G_MDIO_DATA_OUT));
}
usec_delay(PULSE_TIME);
if (pbpctl_dev->bp_10g9) {
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, ((ctrl_ext|BP10G_MDIO_DATA_OUT9)&~(BP10G_MCLK_DATA_OUT9))); */
/* DATA 1 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
ctrl_ext |
BP10G_MDIO_DATA_OUT9);
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DIR_OUT9) &
~BP10G_MCLK_DATA_OUT9);
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5 |
BPCTLI_CTRL_EXT_MDIO_DIR5 |
BPCTLI_CTRL_EXT_MDIO_DATA5)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, (ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80
|
BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl |
BPCTLI_CTRL_EXT_MCLK_DIR80)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA80)));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(ctrl | BP540_MDIO_DIR |
BP540_MDIO_DATA |
BP540_MCLK_DIR) &
~(BP540_MCLK_DATA));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_SET |
BP10GB_MCLK_CLR) &
~(BP10GB_MCLK_DIR |
BP10GB_MDIO_DIR |
BP10GB_MDIO_CLR |
BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR |
BPCTLI_CTRL_EXT_MDIO_DIR |
BPCTLI_CTRL_EXT_MDIO_DATA)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
((ctrl_ext |
BP10G_MDIO_DATA_OUT) &
~(BP10G_MCLK_DATA_OUT)));
}
usec_delay(PULSE_TIME);
} else {
if (pbpctl_dev->bp_10g9) {
/* DATA 0 CLK 1 */
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, ((ctrl_ext|BP10G_MCLK_DATA_OUT9)&~BP10G_MDIO_DATA_OUT9)); */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext &
~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DATA_OUT9 |
BP10G_MCLK_DIR_OUT9));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5 |
BPCTLI_CTRL_EXT_MDIO_DIR5 |
BPCTLI_CTRL_EXT_MCLK_DATA5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA80)));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl |
BPCTLI_CTRL_EXT_MCLK_DIR80 |
BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP540_MCLK_DIR |
BP540_MCLK_DATA |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR |
BP10GB_MCLK_SET) &
~(BP10GB_MCLK_DIR |
BP10GB_MDIO_DIR |
BP10GB_MDIO_SET |
BP10GB_MCLK_CLR));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR |
BPCTLI_CTRL_EXT_MDIO_DIR |
BPCTLI_CTRL_EXT_MCLK_DATA)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
((ctrl_ext |
BP10G_MCLK_DATA_OUT) &
~BP10G_MDIO_DATA_OUT));
}
usec_delay(PULSE_TIME);
if (pbpctl_dev->bp_10g9) {
/* DATA 0 CLK 0 */
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext &
~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5 |
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~(BPCTLI_CTRL_EXT_MCLK_DATA5
|
BPCTLI_CTRL_EXT_MDIO_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl |
BPCTLI_CTRL_EXT_MCLK_DIR80)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA80)));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP540_MCLK_DIR |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR |
BP10GB_MCLK_CLR) &
~(BP10GB_MCLK_DIR |
BP10GB_MDIO_DIR |
BP10GB_MDIO_SET |
BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR |
BPCTLI_CTRL_EXT_MDIO_DIR) &
~(BPCTLI_CTRL_EXT_MCLK_DATA
|
BPCTLI_CTRL_EXT_MDIO_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT |
BP10G_MDIO_DATA_OUT)));
}
usec_delay(PULSE_TIME);
}
}
}
static int read_pulse(bpctl_dev_t *pbpctl_dev, unsigned int ctrl_ext,
unsigned char len)
{
unsigned char ctrl_val = 0;
unsigned int i = len;
unsigned int ctrl = 0;
bpctl_dev_t *pbpctl_dev_c = NULL;
if (pbpctl_dev->bp_i80)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if (pbpctl_dev->bp_540)
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
if (pbpctl_dev->bp_10g9) {
if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
return -1;
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
}
while (i--) {
if (pbpctl_dev->bp_10g9) {
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, ((ctrl_ext|BP10G_MDIO_DATA_OUT9)&~BP10G_MCLK_DATA_OUT9)); */
/* DATA ? CLK 0 */
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl_ext &
~BPCTLI_CTRL_EXT_MDIO_DIR80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80)
& ~(BPCTLI_CTRL_EXT_MCLK_DATA80)));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP540_MCLK_DIR) &
~(BP540_MDIO_DIR | BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_DIR |
BP10GB_MCLK_CLR) & ~(BP10GB_MCLK_DIR |
BP10GB_MDIO_CLR |
BP10GB_MDIO_SET |
BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP, ((ctrl_ext | BP10G_MDIO_DATA_OUT) & ~BP10G_MCLK_DATA_OUT)); /* ? */
/* printk("0x28=0x%x\n",BP10G_READ_REG(pbpctl_dev,EODSDP);); */
}
usec_delay(PULSE_TIME);
if (pbpctl_dev->bp_10g9) {
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext|BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9)); */
/* DATA ? CLK 1 */
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DATA_OUT9 |
BP10G_MCLK_DIR_OUT9));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl_ext &
~(BPCTLI_CTRL_EXT_MDIO_DIR80)));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80 |
BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP540_MCLK_DIR |
BP540_MCLK_DATA) &
~(BP540_MDIO_DIR)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_DIR |
BP10GB_MCLK_SET) & ~(BP10GB_MCLK_DIR |
BP10GB_MDIO_CLR |
BP10GB_MDIO_SET |
BP10GB_MCLK_CLR));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MCLK_DATA)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext | BP10G_MCLK_DATA_OUT |
BP10G_MDIO_DATA_OUT));
}
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
} else if ((pbpctl_dev->bp_fiber5) || (pbpctl_dev->bp_i80)) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
} else if (pbpctl_dev->bp_540) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, ESDP);
} else if (pbpctl_dev->bp_10gb)
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
else if (!pbpctl_dev->bp_10g)
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
else
ctrl_ext = BP10G_READ_REG(pbpctl_dev, EODSDP);
usec_delay(PULSE_TIME);
if (pbpctl_dev->bp_10g9) {
if (ctrl_ext & BP10G_MDIO_DATA_IN9)
ctrl_val |= 1 << i;
} else if (pbpctl_dev->bp_fiber5) {
if (ctrl_ext & BPCTLI_CTRL_EXT_MDIO_DATA5)
ctrl_val |= 1 << i;
} else if (pbpctl_dev->bp_i80) {
if (ctrl_ext & BPCTLI_CTRL_EXT_MDIO_DATA80)
ctrl_val |= 1 << i;
} else if (pbpctl_dev->bp_540) {
if (ctrl_ext & BP540_MDIO_DATA)
ctrl_val |= 1 << i;
} else if (pbpctl_dev->bp_10gb) {
if (ctrl_ext & BP10GB_MDIO_DATA)
ctrl_val |= 1 << i;
} else if (!pbpctl_dev->bp_10g) {
if (ctrl_ext & BPCTLI_CTRL_EXT_MDIO_DATA)
ctrl_val |= 1 << i;
} else {
if (ctrl_ext & BP10G_MDIO_DATA_IN)
ctrl_val |= 1 << i;
}
}
return ctrl_val;
}
static void write_reg(bpctl_dev_t *pbpctl_dev, unsigned char value,
unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0;
bpctl_dev_t *pbpctl_dev_c = NULL;
unsigned long flags;
if (pbpctl_dev->bp_10g9) {
if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
return;
}
if ((pbpctl_dev->wdt_status == WDT_STATUS_EN) &&
(pbpctl_dev->bp_ext_ver < PXG4BPFI_VER))
wdt_time_left(pbpctl_dev);
#ifdef BP_SYNC_FLAG
spin_lock_irqsave(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 1);
#endif
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
/* DATA 0 CLK 0 */
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
ctrl = ctrl_ext = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl |
BP540_MDIO_DIR |
BP540_MCLK_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
} else {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
ctrl_ext = BP10G_READ_REG(pbpctl_dev, EODSDP);
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
usec_delay(CMND_INTERVAL);
/*send sync cmd */
write_pulse(pbpctl_dev, ctrl_ext, SYNC_CMD_VAL, SYNC_CMD_LEN);
/*send wr cmd */
write_pulse(pbpctl_dev, ctrl_ext, WR_CMD_VAL, WR_CMD_LEN);
write_pulse(pbpctl_dev, ctrl_ext, addr, ADDR_CMD_LEN);
/*write data */
write_pulse(pbpctl_dev, ctrl_ext, value, WR_DATA_LEN);
if (pbpctl_dev->bp_10g9) {
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
/* DATA 0 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl |
BP540_MDIO_DIR |
BP540_MCLK_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
usec_delay(CMND_INTERVAL * 4);
if ((pbpctl_dev->wdt_status == WDT_STATUS_EN) &&
(pbpctl_dev->bp_ext_ver < PXG4BPFI_VER) && (addr == CMND_REG_ADDR))
pbpctl_dev->bypass_wdt_on_time = jiffies;
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 0);
#endif
}
static void write_data(bpctl_dev_t *pbpctl_dev, unsigned char value)
{
write_reg(pbpctl_dev, value, CMND_REG_ADDR);
}
static int read_reg(bpctl_dev_t *pbpctl_dev, unsigned char addr)
{
uint32_t ctrl_ext = 0, ctrl = 0, ctrl_value = 0;
bpctl_dev_t *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
spin_lock_irqsave(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 1);
#endif
if (pbpctl_dev->bp_10g9) {
if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
return -1;
}
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
/* DATA 0 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, ESDP);
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl | BP540_MCLK_DIR |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
#if 0
/*BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO, (ctrl_ext | BP10GB_MCLK_DIR | BP10GB_MDIO_DIR|
BP10GB_MCLK_CLR|BP10GB_MDIO_CLR));
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
printk("1reg=%x\n", ctrl_ext); */
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO, ((ctrl_ext |
BP10GB_MCLK_SET |
BP10GB_MDIO_CLR))
& ~(BP10GB_MCLK_CLR | BP10GB_MDIO_SET |
BP10GB_MCLK_DIR | BP10GB_MDIO_DIR));
/* bnx2x_set_spio(pbpctl_dev, 5, MISC_REGISTERS_SPIO_OUTPUT_LOW);
bnx2x_set_spio(pbpctl_dev, 4, MISC_REGISTERS_SPIO_OUTPUT_LOW);
bnx2x_set_spio(pbpctl_dev, 4, MISC_REGISTERS_SPIO_INPUT_HI_Z); */
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
printk("2reg=%x\n", ctrl_ext);
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 0);
#endif
return 0;
#endif
} else if (!pbpctl_dev->bp_10g) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
} else {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
ctrl_ext = BP10G_READ_REG(pbpctl_dev, EODSDP);
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
usec_delay(CMND_INTERVAL);
/*send sync cmd */
write_pulse(pbpctl_dev, ctrl_ext, SYNC_CMD_VAL, SYNC_CMD_LEN);
/*send rd cmd */
write_pulse(pbpctl_dev, ctrl_ext, RD_CMD_VAL, RD_CMD_LEN);
/*send addr */
write_pulse(pbpctl_dev, ctrl_ext, addr, ADDR_CMD_LEN);
/*read data */
/* zero */
if (pbpctl_dev->bp_10g9) {
/* DATA 0 CLK 1 */
/*BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext|BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9)); */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext | BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DATA_OUT9 |
BP10G_MCLK_DIR_OUT9));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl_ext &
~(BPCTLI_CTRL_EXT_MDIO_DATA80 |
BPCTLI_CTRL_EXT_MDIO_DIR80)));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80 |
BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(((ctrl | BP540_MDIO_DIR | BP540_MCLK_DIR |
BP540_MCLK_DATA) & ~BP540_MDIO_DATA)));
} else if (pbpctl_dev->bp_10gb) {
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_DIR | BP10GB_MCLK_SET)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_SET |
BP10GB_MDIO_CLR | BP10GB_MCLK_CLR));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MCLK_DATA)
&
~
(BPCTLI_CTRL_EXT_MDIO_DIR
|
BPCTLI_CTRL_EXT_MDIO_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext | BP10G_MCLK_DATA_OUT |
BP10G_MDIO_DATA_OUT));
}
usec_delay(PULSE_TIME);
ctrl_value = read_pulse(pbpctl_dev, ctrl_ext, RD_DATA_LEN);
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
/* DATA 0 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl | BP540_MCLK_DIR |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
} else {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
ctrl_ext = BP10G_READ_REG(pbpctl_dev, EODSDP);
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
usec_delay(CMND_INTERVAL * 4);
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 0);
#endif
return ctrl_value;
}
static int wdt_pulse(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl = 0;
bpctl_dev_t *pbpctl_dev_c = NULL;
#ifdef BP_SYNC_FLAG
unsigned long flags;
spin_lock_irqsave(&pbpctl_dev->bypass_wr_lock, flags);
#else
if ((atomic_read(&pbpctl_dev->wdt_busy)) == 1)
return -1;
#endif
if (pbpctl_dev->bp_10g9) {
if (!(pbpctl_dev_c = get_status_port_fn(pbpctl_dev)))
return -1;
}
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev, I2CCTL);
ctrl = BP10G_READ_REG(pbpctl_dev_c, ESDP);
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
/* DATA 0 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)));
} else if (pbpctl_dev->bp_i80) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL);
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
ctrl_ext = ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl | BP540_MCLK_DIR |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA
|
BPCTLI_CTRL_EXT_MCLK_DATA)));
} else {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
ctrl_ext = BP10G_READ_REG(pbpctl_dev, EODSDP);
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
if (pbpctl_dev->bp_10g9) {
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, ((ctrl_ext|BP10G_MCLK_DATA_OUT9)&~BP10G_MDIO_DATA_OUT9)); */
/* DATA 0 CLK 1 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
(ctrl | BP10G_MCLK_DATA_OUT9 |
BP10G_MCLK_DIR_OUT9));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5
|
BPCTLI_CTRL_EXT_MCLK_DATA5)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80 |
BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl |
BP540_MDIO_DIR |
BP540_MCLK_DIR |
BP540_MCLK_DATA) &
~BP540_MDIO_DATA));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_SET)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_CLR));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR
|
BPCTLI_CTRL_EXT_MCLK_DATA)
&
~
(BPCTLI_CTRL_EXT_MDIO_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
((ctrl_ext | BP10G_MCLK_DATA_OUT) &
~BP10G_MDIO_DATA_OUT));
}
usec_delay(WDT_INTERVAL);
if (pbpctl_dev->bp_10g9) {
/* BP10G_WRITE_REG(pbpctl_dev, I2CCTL, (ctrl_ext&~(BP10G_MCLK_DATA_OUT9|BP10G_MDIO_DATA_OUT9))); */
/* DATA 0 CLK 0 */
BP10G_WRITE_REG(pbpctl_dev, I2CCTL,
(ctrl_ext & ~BP10G_MDIO_DATA_OUT9));
BP10G_WRITE_REG(pbpctl_dev_c, ESDP,
((ctrl | BP10G_MCLK_DIR_OUT9) &
~(BP10G_MCLK_DATA_OUT9)));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR5
|
BPCTLI_CTRL_EXT_MDIO_DIR5)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA5
|
BPCTLI_CTRL_EXT_MDIO_DATA5)));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, ((ctrl_ext |
BPCTLI_CTRL_EXT_MDIO_DIR80)
&
~BPCTLI_CTRL_EXT_MDIO_DATA80));
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl | BPCTLI_CTRL_EXT_MCLK_DIR80) &
~BPCTLI_CTRL_EXT_MCLK_DATA80));
} else if (pbpctl_dev->bp_540) {
BP10G_WRITE_REG(pbpctl_dev, ESDP, ((ctrl | BP540_MCLK_DIR |
BP540_MDIO_DIR) &
~(BP540_MDIO_DATA |
BP540_MCLK_DATA)));
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext = BP10GB_READ_REG(pbpctl_dev, MISC_REG_SPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_SPIO,
(ctrl_ext | BP10GB_MDIO_CLR | BP10GB_MCLK_CLR)
& ~(BP10GB_MCLK_DIR | BP10GB_MDIO_DIR |
BP10GB_MDIO_SET | BP10GB_MCLK_SET));
} else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR
|
BPCTLI_CTRL_EXT_MDIO_DIR)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA
|
BPCTLI_CTRL_EXT_MDIO_DATA)));
else {
BP10G_WRITE_REG(pbpctl_dev, EODSDP,
(ctrl_ext &
~(BP10G_MCLK_DATA_OUT | BP10G_MDIO_DATA_OUT)));
}
if ((pbpctl_dev->wdt_status == WDT_STATUS_EN) /*&&
(pbpctl_dev->bp_ext_ver<PXG4BPFI_VER) */ )
pbpctl_dev->bypass_wdt_on_time = jiffies;
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#endif
usec_delay(CMND_INTERVAL * 4);
return 0;
}
static void data_pulse(bpctl_dev_t *pbpctl_dev, unsigned char value)
{
uint32_t ctrl_ext = 0;
#ifdef BP_SYNC_FLAG
unsigned long flags;
#endif
wdt_time_left(pbpctl_dev);
#ifdef BP_SYNC_FLAG
spin_lock_irqsave(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 1);
#endif
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP7_DIR) &
~(BPCTLI_CTRL_EXT_SDP6_DATA |
BPCTLI_CTRL_EXT_SDP7_DATA)));
usec_delay(INIT_CMND_INTERVAL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP7_DIR |
BPCTLI_CTRL_EXT_SDP6_DATA) &
~
(BPCTLI_CTRL_EXT_SDP7_DATA)));
usec_delay(INIT_CMND_INTERVAL);
while (value) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP7_DIR |
BPCTLI_CTRL_EXT_SDP6_DATA |
BPCTLI_CTRL_EXT_SDP7_DATA);
usec_delay(PULSE_INTERVAL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR
|
BPCTLI_CTRL_EXT_SDP7_DIR
|
BPCTLI_CTRL_EXT_SDP6_DATA)
&
~BPCTLI_CTRL_EXT_SDP7_DATA));
usec_delay(PULSE_INTERVAL);
value--;
}
usec_delay(INIT_CMND_INTERVAL - PULSE_INTERVAL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP7_DIR) &
~(BPCTLI_CTRL_EXT_SDP6_DATA |
BPCTLI_CTRL_EXT_SDP7_DATA)));
usec_delay(WDT_TIME_CNT);
if (pbpctl_dev->wdt_status == WDT_STATUS_EN)
pbpctl_dev->bypass_wdt_on_time = jiffies;
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#else
atomic_set(&pbpctl_dev->wdt_busy, 0);
#endif
}
static int send_wdt_pulse(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
#ifdef BP_SYNC_FLAG
unsigned long flags;
spin_lock_irqsave(&pbpctl_dev->bypass_wr_lock, flags);
#else
if ((atomic_read(&pbpctl_dev->wdt_busy)) == 1)
return -1;
#endif
wdt_time_left(pbpctl_dev);
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ctrl_ext | /* 1 */
BPCTLI_CTRL_EXT_SDP7_DIR |
BPCTLI_CTRL_EXT_SDP7_DATA);
usec_delay(PULSE_INTERVAL);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext | /* 0 */
BPCTLI_CTRL_EXT_SDP7_DIR) &
~BPCTLI_CTRL_EXT_SDP7_DATA));
usec_delay(PULSE_INTERVAL);
if (pbpctl_dev->wdt_status == WDT_STATUS_EN)
pbpctl_dev->bypass_wdt_on_time = jiffies;
#ifdef BP_SYNC_FLAG
spin_unlock_irqrestore(&pbpctl_dev->bypass_wr_lock, flags);
#endif
return 0;
}
void send_bypass_clear_pulse(bpctl_dev_t *pbpctl_dev, unsigned int value)
{
uint32_t ctrl_ext = 0;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext | /* 0 */
BPCTLI_CTRL_EXT_SDP6_DIR) &
~BPCTLI_CTRL_EXT_SDP6_DATA));
usec_delay(PULSE_INTERVAL);
while (value) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ctrl_ext | /* 1 */
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP6_DATA);
usec_delay(PULSE_INTERVAL);
value--;
}
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext | /* 0 */
BPCTLI_CTRL_EXT_SDP6_DIR) &
~BPCTLI_CTRL_EXT_SDP6_DATA));
usec_delay(PULSE_INTERVAL);
}
/* #endif OLD_FW */
#ifdef BYPASS_DEBUG
int pulse_set_fn(bpctl_dev_t *pbpctl_dev, unsigned int counter)
{
uint32_t ctrl_ext = 0;
if (!pbpctl_dev)
return -1;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
write_pulse_1(pbpctl_dev, ctrl_ext, counter, counter);
pbpctl_dev->bypass_wdt_status = 0;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
write_pulse_1(pbpctl_dev, ctrl_ext, counter, counter);
} else {
wdt_time_left(pbpctl_dev);
if (pbpctl_dev->wdt_status == WDT_STATUS_EN) {
pbpctl_dev->wdt_status = 0;
data_pulse(pbpctl_dev, counter);
pbpctl_dev->wdt_status = WDT_STATUS_EN;
pbpctl_dev->bypass_wdt_on_time = jiffies;
} else
data_pulse(pbpctl_dev, counter);
}
return 0;
}
int zero_set_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
printk("zero_set");
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_MCLK_DIR)
&
~
(BPCTLI_CTRL_EXT_MCLK_DATA
|
BPCTLI_CTRL_EXT_MDIO_DIR
|
BPCTLI_CTRL_EXT_MDIO_DATA)));
}
return ctrl_value;
}
int pulse_get2_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
printk("pulse_get_fn\n");
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
ctrl_value = read_pulse_2(pbpctl_dev, ctrl_ext);
printk("read:%d\n", ctrl_value);
}
return ctrl_value;
}
int pulse_get1_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0, ctrl_value = 0;
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
printk("pulse_get_fn\n");
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
ctrl_value = read_pulse_1(pbpctl_dev, ctrl_ext);
printk("read:%d\n", ctrl_value);
}
return ctrl_value;
}
int gpio6_set_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP6_DATA);
return 0;
}
int gpio7_set_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ctrl_ext |
BPCTLI_CTRL_EXT_SDP7_DIR |
BPCTLI_CTRL_EXT_SDP7_DATA);
return 0;
}
int gpio7_clear_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP7_DIR) &
~BPCTLI_CTRL_EXT_SDP7_DATA));
return 0;
}
int gpio6_clear_fn(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, ((ctrl_ext |
BPCTLI_CTRL_EXT_SDP6_DIR) &
~BPCTLI_CTRL_EXT_SDP6_DATA));
return 0;
}
#endif /*BYPASS_DEBUG */
static bpctl_dev_t *lookup_port(bpctl_dev_t *dev)
{
bpctl_dev_t *p;
int n;
for (n = 0, p = bpctl_dev_arr; n < device_num && p->pdev; n++) {
if (p->bus == dev->bus
&& p->slot == dev->slot
&& p->func == (dev->func ^ 1))
return p;
}
return NULL;
}
static bpctl_dev_t *get_status_port_fn(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 0 || pbpctl_dev->func == 2)
return lookup_port(pbpctl_dev);
}
return NULL;
}
static bpctl_dev_t *get_master_port_fn(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev) {
if (pbpctl_dev->func == 1 || pbpctl_dev->func == 3)
return lookup_port(pbpctl_dev);
}
return NULL;
}
/**************************************/
/**************INTEL API***************/
/**************************************/
static void write_data_port_int(bpctl_dev_t *pbpctl_dev,
unsigned char ctrl_value)
{
uint32_t value;
value = BPCTL_READ_REG(pbpctl_dev, CTRL);
/* Make SDP0 Pin Directonality to Output */
value |= BPCTLI_CTRL_SDP0_DIR;
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, value);
value &= ~BPCTLI_CTRL_SDP0_DATA;
value |= ((ctrl_value & 0x1) << BPCTLI_CTRL_SDP0_SHIFT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL, value);
value = (BPCTL_READ_REG(pbpctl_dev, CTRL_EXT));
/* Make SDP2 Pin Directonality to Output */
value |= BPCTLI_CTRL_EXT_SDP6_DIR;
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, value);
value &= ~BPCTLI_CTRL_EXT_SDP6_DATA;
value |= (((ctrl_value & 0x2) >> 1) << BPCTLI_CTRL_EXT_SDP6_SHIFT);
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT, value);
}
static int write_data_int(bpctl_dev_t *pbpctl_dev, unsigned char value)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return -1;
atomic_set(&pbpctl_dev->wdt_busy, 1);
write_data_port_int(pbpctl_dev, value & 0x3);
write_data_port_int(pbpctl_dev_b, ((value & 0xc) >> 2));
atomic_set(&pbpctl_dev->wdt_busy, 0);
return 0;
}
static int wdt_pulse_int(bpctl_dev_t *pbpctl_dev)
{
if ((atomic_read(&pbpctl_dev->wdt_busy)) == 1)
return -1;
if ((write_data_int(pbpctl_dev, RESET_WDT_INT)) < 0)
return -1;
msec_delay_bp(CMND_INTERVAL_INT);
if ((write_data_int(pbpctl_dev, CMND_OFF_INT)) < 0)
return -1;
msec_delay_bp(CMND_INTERVAL_INT);
if (pbpctl_dev->wdt_status == WDT_STATUS_EN)
pbpctl_dev->bypass_wdt_on_time = jiffies;
return 0;
}
/*************************************/
/************* COMMANDS **************/
/*************************************/
/* CMND_ON 0x4 (100)*/
int cmnd_on(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return 0;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
write_data(pbpctl_dev, CMND_ON);
else
data_pulse(pbpctl_dev, CMND_ON);
ret = 0;
}
return ret;
}
/* CMND_OFF 0x2 (10)*/
int cmnd_off(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, CMND_OFF_INT);
msec_delay_bp(CMND_INTERVAL_INT);
} else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
write_data(pbpctl_dev, CMND_OFF);
else
data_pulse(pbpctl_dev, CMND_OFF);
ret = 0;
};
return ret;
}
/* BYPASS_ON (0xa)*/
int bypass_on(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & BP_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, BYPASS_ON_INT);
msec_delay_bp(BYPASS_DELAY_INT);
pbpctl_dev->bp_status_un = 0;
} else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
write_data(pbpctl_dev, BYPASS_ON);
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
msec_delay_bp(LATCH_DELAY);
} else
data_pulse(pbpctl_dev, BYPASS_ON);
ret = 0;
};
return ret;
}
/* BYPASS_OFF (0x8 111)*/
int bypass_off(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & BP_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, DIS_BYPASS_CAP_INT);
msec_delay_bp(BYPASS_DELAY_INT);
write_data_int(pbpctl_dev, PWROFF_BYPASS_ON_INT);
msec_delay_bp(BYPASS_DELAY_INT);
pbpctl_dev->bp_status_un = 0;
} else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
write_data(pbpctl_dev, BYPASS_OFF);
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
msec_delay_bp(LATCH_DELAY);
} else
data_pulse(pbpctl_dev, BYPASS_OFF);
ret = 0;
}
return ret;
}
/* TAP_OFF (0x9)*/
int tap_off(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
&& (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)) {
write_data(pbpctl_dev, TAP_OFF);
msec_delay_bp(LATCH_DELAY);
ret = 0;
};
return ret;
}
/* TAP_ON (0xb)*/
int tap_on(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if ((pbpctl_dev->bp_caps & TAP_CAP)
&& (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)) {
write_data(pbpctl_dev, TAP_ON);
msec_delay_bp(LATCH_DELAY);
ret = 0;
};
return ret;
}
/* DISC_OFF (0x9)*/
int disc_off(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
write_data(pbpctl_dev, DISC_OFF);
msec_delay_bp(LATCH_DELAY);
} else
ret = BP_NOT_CAP;
return ret;
}
/* DISC_ON (0xb)*/
int disc_on(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & DISC_CAP) && (pbpctl_dev->bp_ext_ver >= 0x8)) {
write_data(pbpctl_dev, /*DISC_ON */ 0x85);
msec_delay_bp(LATCH_DELAY);
} else
ret = BP_NOT_CAP;
return ret;
}
/* DISC_PORT_ON */
int disc_port_on(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1) {
write_data(pbpctl_dev_m, TX_DISA);
} else {
write_data(pbpctl_dev_m, TX_DISB);
}
msec_delay_bp(LATCH_DELAY);
}
return ret;
}
/* DISC_PORT_OFF */
int disc_port_off(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1)
write_data(pbpctl_dev_m, TX_ENA);
else
write_data(pbpctl_dev_m, TX_ENB);
msec_delay_bp(LATCH_DELAY);
}
return ret;
}
/*TWO_PORT_LINK_HW_EN (0xe)*/
int tpl_hw_on(bpctl_dev_t *pbpctl_dev)
{
int ret = 0, ctrl = 0;
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
cmnd_on(pbpctl_dev);
write_data(pbpctl_dev, TPL2_ON);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
cmnd_off(pbpctl_dev);
return ret;
}
if (TPL_IF_SERIES(pbpctl_dev->subdevice)) {
ctrl = BPCTL_READ_REG(pbpctl_dev_b, CTRL);
BPCTL_BP_WRITE_REG(pbpctl_dev_b, CTRL,
((ctrl | BPCTLI_CTRL_SWDPIO0) &
~BPCTLI_CTRL_SWDPIN0));
} else
ret = BP_NOT_CAP;
return ret;
}
/*TWO_PORT_LINK_HW_DIS (0xc)*/
int tpl_hw_off(bpctl_dev_t *pbpctl_dev)
{
int ret = 0, ctrl = 0;
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
cmnd_on(pbpctl_dev);
write_data(pbpctl_dev, TPL2_OFF);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
cmnd_off(pbpctl_dev);
return ret;
}
if (TPL_IF_SERIES(pbpctl_dev->subdevice)) {
ctrl = BPCTL_READ_REG(pbpctl_dev_b, CTRL);
BPCTL_BP_WRITE_REG(pbpctl_dev_b, CTRL,
(ctrl | BPCTLI_CTRL_SWDPIO0 |
BPCTLI_CTRL_SWDPIN0));
} else
ret = BP_NOT_CAP;
return ret;
}
/* WDT_OFF (0x6 110)*/
int wdt_off(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
bypass_off(pbpctl_dev);
} else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
write_data(pbpctl_dev, WDT_OFF);
else
data_pulse(pbpctl_dev, WDT_OFF);
pbpctl_dev->wdt_status = WDT_STATUS_DIS;
ret = 0;
};
return ret;
}
/* WDT_ON (0x10)*/
/***Global***/
static unsigned int
wdt_val_array[] = { 1000, 1500, 2000, 3000, 4000, 8000, 16000, 32000, 0 };
int wdt_on(bpctl_dev_t *pbpctl_dev, unsigned int timeout)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
unsigned int pulse = 0, temp_value = 0, temp_cnt = 0;
pbpctl_dev->wdt_status = 0;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
for (; wdt_val_array[temp_cnt]; temp_cnt++)
if (timeout <= wdt_val_array[temp_cnt])
break;
if (!wdt_val_array[temp_cnt])
temp_cnt--;
timeout = wdt_val_array[temp_cnt];
temp_cnt += 0x7;
write_data_int(pbpctl_dev, DIS_BYPASS_CAP_INT);
msec_delay_bp(BYPASS_DELAY_INT);
pbpctl_dev->bp_status_un = 0;
write_data_int(pbpctl_dev, temp_cnt);
pbpctl_dev->bypass_wdt_on_time = jiffies;
msec_delay_bp(CMND_INTERVAL_INT);
pbpctl_dev->bypass_timer_interval = timeout;
} else {
timeout =
(timeout <
TIMEOUT_UNIT ? TIMEOUT_UNIT : (timeout >
WDT_TIMEOUT_MAX ?
WDT_TIMEOUT_MAX :
timeout));
temp_value = timeout / 100;
while ((temp_value >>= 1))
temp_cnt++;
if (timeout > ((1 << temp_cnt) * 100))
temp_cnt++;
pbpctl_dev->bypass_wdt_on_time = jiffies;
pulse = (WDT_ON | temp_cnt);
if (pbpctl_dev->bp_ext_ver == OLD_IF_VER)
data_pulse(pbpctl_dev, pulse);
else
write_data(pbpctl_dev, pulse);
pbpctl_dev->bypass_timer_interval =
(1 << temp_cnt) * 100;
}
pbpctl_dev->wdt_status = WDT_STATUS_EN;
return 0;
}
return BP_NOT_CAP;
}
void bp75_put_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
{
u32 swsm;
swsm = BPCTL_READ_REG(pbpctl_dev, SWSM);
swsm &= ~(BPCTLI_SWSM_SMBI | BPCTLI_SWSM_SWESMBI);
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm);
}
s32 bp75_get_hw_semaphore_generic(bpctl_dev_t *pbpctl_dev)
{
u32 swsm;
s32 ret_val = 0;
s32 timeout = 8192 + 1;
s32 i = 0;
/* Get the SW semaphore */
while (i < timeout) {
swsm = BPCTL_READ_REG(pbpctl_dev, SWSM);
if (!(swsm & BPCTLI_SWSM_SMBI))
break;
usec_delay(50);
i++;
}
if (i == timeout) {
printk
("bpctl_mod: Driver can't access device - SMBI bit is set.\n");
ret_val = -1;
goto out;
}
/* Get the FW semaphore. */
for (i = 0; i < timeout; i++) {
swsm = BPCTL_READ_REG(pbpctl_dev, SWSM);
BPCTL_WRITE_REG(pbpctl_dev, SWSM, swsm | BPCTLI_SWSM_SWESMBI);
/* Semaphore acquired if bit latched */
if (BPCTL_READ_REG(pbpctl_dev, SWSM) & BPCTLI_SWSM_SWESMBI)
break;
usec_delay(50);
}
if (i == timeout) {
/* Release semaphores */
bp75_put_hw_semaphore_generic(pbpctl_dev);
printk("bpctl_mod: Driver can't access the NVM\n");
ret_val = -1;
goto out;
}
out:
return ret_val;
}
static void bp75_release_phy(bpctl_dev_t *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
if ((pbpctl_dev->func == 1) || (pbpctl_dev->func == 3))
mask = BPCTLI_SWFW_PHY1_SM;
while (bp75_get_hw_semaphore_generic(pbpctl_dev) != 0) ;
/* Empty */
swfw_sync = BPCTL_READ_REG(pbpctl_dev, SW_FW_SYNC);
swfw_sync &= ~mask;
BPCTL_WRITE_REG(pbpctl_dev, SW_FW_SYNC, swfw_sync);
bp75_put_hw_semaphore_generic(pbpctl_dev);
}
static s32 bp75_acquire_phy(bpctl_dev_t *pbpctl_dev)
{
u16 mask = BPCTLI_SWFW_PHY0_SM;
u32 swfw_sync;
u32 swmask;
u32 fwmask;
s32 ret_val = 0;
s32 i = 0, timeout = 200;
if ((pbpctl_dev->func == 1) || (pbpctl_dev->func == 3))
mask = BPCTLI_SWFW_PHY1_SM;
swmask = mask;
fwmask = mask << 16;
while (i < timeout) {
if (bp75_get_hw_semaphore_generic(pbpctl_dev)) {
ret_val = -1;
goto out;
}
swfw_sync = BPCTL_READ_REG(pbpctl_dev, SW_FW_SYNC);
if (!(swfw_sync & (fwmask | swmask)))
break;
bp75_put_hw_semaphore_generic(pbpctl_dev);
mdelay(5);
i++;
}
if (i == timeout) {
printk
("bpctl_mod: Driver can't access resource, SW_FW_SYNC timeout.\n");
ret_val = -1;
goto out;
}
swfw_sync |= swmask;
BPCTL_WRITE_REG(pbpctl_dev, SW_FW_SYNC, swfw_sync);
bp75_put_hw_semaphore_generic(pbpctl_dev);
out:
return ret_val;
}
s32 bp75_read_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
u32 phy_addr = 1;
mdic = ((offset << BPCTLI_MDIC_REG_SHIFT) |
(phy_addr << BPCTLI_MDIC_PHY_SHIFT) | (BPCTLI_MDIC_OP_READ));
BPCTL_WRITE_REG(pbpctl_dev, MDIC, mdic);
for (i = 0; i < (BPCTLI_GEN_POLL_TIMEOUT * 3); i++) {
usec_delay(50);
mdic = BPCTL_READ_REG(pbpctl_dev, MDIC);
if (mdic & BPCTLI_MDIC_READY)
break;
}
if (!(mdic & BPCTLI_MDIC_READY)) {
printk("bpctl_mod: MDI Read did not complete\n");
ret_val = -1;
goto out;
}
if (mdic & BPCTLI_MDIC_ERROR) {
printk("bpctl_mod: MDI Error\n");
ret_val = -1;
goto out;
}
*data = (u16) mdic;
out:
return ret_val;
}
s32 bp75_write_phy_reg_mdic(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
{
u32 i, mdic = 0;
s32 ret_val = 0;
u32 phy_addr = 1;
mdic = (((u32) data) |
(offset << BPCTLI_MDIC_REG_SHIFT) |
(phy_addr << BPCTLI_MDIC_PHY_SHIFT) | (BPCTLI_MDIC_OP_WRITE));
BPCTL_WRITE_REG(pbpctl_dev, MDIC, mdic);
for (i = 0; i < (BPCTLI_GEN_POLL_TIMEOUT * 3); i++) {
usec_delay(50);
mdic = BPCTL_READ_REG(pbpctl_dev, MDIC);
if (mdic & BPCTLI_MDIC_READY)
break;
}
if (!(mdic & BPCTLI_MDIC_READY)) {
printk("bpctl_mod: MDI Write did not complete\n");
ret_val = -1;
goto out;
}
if (mdic & BPCTLI_MDIC_ERROR) {
printk("bpctl_mod: MDI Error\n");
ret_val = -1;
goto out;
}
out:
return ret_val;
}
static s32 bp75_read_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 *data)
{
s32 ret_val = 0;
ret_val = bp75_acquire_phy(pbpctl_dev);
if (ret_val)
goto out;
if (offset > BPCTLI_MAX_PHY_MULTI_PAGE_REG) {
ret_val = bp75_write_phy_reg_mdic(pbpctl_dev,
BPCTLI_IGP01E1000_PHY_PAGE_SELECT,
(u16) offset);
if (ret_val)
goto release;
}
ret_val =
bp75_read_phy_reg_mdic(pbpctl_dev,
BPCTLI_MAX_PHY_REG_ADDRESS & offset, data);
release:
bp75_release_phy(pbpctl_dev);
out:
return ret_val;
}
static s32 bp75_write_phy_reg(bpctl_dev_t *pbpctl_dev, u32 offset, u16 data)
{
s32 ret_val = 0;
ret_val = bp75_acquire_phy(pbpctl_dev);
if (ret_val)
goto out;
if (offset > BPCTLI_MAX_PHY_MULTI_PAGE_REG) {
ret_val = bp75_write_phy_reg_mdic(pbpctl_dev,
BPCTLI_IGP01E1000_PHY_PAGE_SELECT,
(u16) offset);
if (ret_val)
goto release;
}
ret_val =
bp75_write_phy_reg_mdic(pbpctl_dev,
BPCTLI_MAX_PHY_REG_ADDRESS & offset, data);
release:
bp75_release_phy(pbpctl_dev);
out:
return ret_val;
}
/* SET_TX (non-Bypass command :)) */
static int set_tx(bpctl_dev_t *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
if (!tx_state) {
if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(ctrl | BP10G_SDP1_DIR |
BP10G_SDP1_DATA));
} else {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl | BPCTLI_CTRL_SDP1_DIR
| BPCTLI_CTRL_SWDPIN1));
}
} else {
if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP10G_SDP1_DIR) &
~BP10G_SDP1_DATA));
} else {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl |
BPCTLI_CTRL_SDP1_DIR) &
~BPCTLI_CTRL_SWDPIN1));
}
return ret;
}
} else if (pbpctl_dev->bp_caps & TX_CTL_CAP) {
if (PEG5_IF_SERIES(pbpctl_dev->subdevice)) {
if (tx_state) {
uint16_t mii_reg;
if (!
(ret =
bp75_read_phy_reg(pbpctl_dev,
BPCTLI_PHY_CONTROL,
&mii_reg))) {
if (mii_reg & BPCTLI_MII_CR_POWER_DOWN) {
ret =
bp75_write_phy_reg
(pbpctl_dev,
BPCTLI_PHY_CONTROL,
mii_reg &
~BPCTLI_MII_CR_POWER_DOWN);
}
}
} else {
uint16_t mii_reg;
if (!
(ret =
bp75_read_phy_reg(pbpctl_dev,
BPCTLI_PHY_CONTROL,
&mii_reg))) {
mii_reg |= BPCTLI_MII_CR_POWER_DOWN;
ret =
bp75_write_phy_reg(pbpctl_dev,
BPCTLI_PHY_CONTROL,
mii_reg);
}
}
}
if (pbpctl_dev->bp_fiber5) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
} else if (pbpctl_dev->bp_10gb)
ctrl = BP10GB_READ_REG(pbpctl_dev, MISC_REG_GPIO);
else if (!pbpctl_dev->bp_10g)
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
else
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
if (!tx_state)
if (pbpctl_dev->bp_10g9) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(ctrl | BP10G_SDP3_DATA |
BP10G_SDP3_DIR));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
(ctrl |
BPCTLI_CTRL_EXT_SDP6_DIR |
BPCTLI_CTRL_EXT_SDP6_DATA));
} else if (pbpctl_dev->bp_10gb) {
if ((pbpctl_dev->func == 1)
|| (pbpctl_dev->func == 3))
BP10GB_WRITE_REG(pbpctl_dev,
MISC_REG_GPIO,
(ctrl |
BP10GB_GPIO0_SET_P1) &
~(BP10GB_GPIO0_CLR_P1 |
BP10GB_GPIO0_OE_P1));
else
BP10GB_WRITE_REG(pbpctl_dev,
MISC_REG_GPIO,
(ctrl |
BP10GB_GPIO0_OE_P0 |
BP10GB_GPIO0_SET_P0));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl | BPCTLI_CTRL_SDP1_DIR
| BPCTLI_CTRL_SWDPIN1));
} else if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(ctrl | BP10G_SDP1_DIR |
BP10G_SDP1_DATA));
}
else if (!pbpctl_dev->bp_10g)
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl | BPCTLI_CTRL_SWDPIO0 |
BPCTLI_CTRL_SWDPIN0));
else
BP10G_WRITE_REG(pbpctl_dev, ESDP,
(ctrl | BP10G_SDP0_DATA |
BP10G_SDP0_DIR));
else {
if (pbpctl_dev->bp_10g9) {
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP10G_SDP3_DIR) &
~BP10G_SDP3_DATA));
} else if (pbpctl_dev->bp_fiber5) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL_EXT,
((ctrl |
BPCTLI_CTRL_EXT_SDP6_DIR) &
~BPCTLI_CTRL_EXT_SDP6_DATA));
} else if (pbpctl_dev->bp_10gb) {
if ((bpctl_dev_arr->func == 1)
|| (bpctl_dev_arr->func == 3))
BP10GB_WRITE_REG(pbpctl_dev,
MISC_REG_GPIO,
(ctrl |
BP10GB_GPIO0_CLR_P1) &
~(BP10GB_GPIO0_SET_P1 |
BP10GB_GPIO0_OE_P1));
else
BP10GB_WRITE_REG(pbpctl_dev,
MISC_REG_GPIO,
(ctrl |
BP10GB_GPIO0_OE_P0 |
BP10GB_GPIO0_CLR_P0));
} else if (pbpctl_dev->bp_i80) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl |
BPCTLI_CTRL_SDP1_DIR) &
~BPCTLI_CTRL_SWDPIN1));
} else if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP10G_SDP1_DIR) &
~BP10G_SDP1_DATA));
}
else if (!pbpctl_dev->bp_10g) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
((ctrl | BPCTLI_CTRL_SWDPIO0)
& ~BPCTLI_CTRL_SWDPIN0));
if (!PEGF_IF_SERIES(pbpctl_dev->subdevice)) {
BPCTL_BP_WRITE_REG(pbpctl_dev, CTRL,
(ctrl &
~
(BPCTLI_CTRL_SDP0_DATA
|
BPCTLI_CTRL_SDP0_DIR)));
}
} else
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP10G_SDP0_DIR) &
~BP10G_SDP0_DATA));
}
} else
ret = BP_NOT_CAP;
return ret;
}
/* SET_FORCE_LINK (non-Bypass command :)) */
static int set_bp_force_link(bpctl_dev_t *pbpctl_dev, int tx_state)
{
int ret = 0, ctrl = 0;
if (DBI_IF_SERIES(pbpctl_dev->subdevice)) {
if ((pbpctl_dev->bp_10g) || (pbpctl_dev->bp_10g9)) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
if (!tx_state)
BP10G_WRITE_REG(pbpctl_dev, ESDP,
ctrl & ~BP10G_SDP1_DIR);
else
BP10G_WRITE_REG(pbpctl_dev, ESDP,
((ctrl | BP10G_SDP1_DIR) &
~BP10G_SDP1_DATA));
return ret;
}
}
return BP_NOT_CAP;
}
/*RESET_CONT 0x20 */
int reset_cont(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
write_data(pbpctl_dev, RESET_CONT);
else
data_pulse(pbpctl_dev, RESET_CONT);
ret = 0;
};
return ret;
}
/*DIS_BYPASS_CAP 0x22 */
int dis_bypass_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, DIS_BYPASS_CAP_INT);
msec_delay_bp(BYPASS_DELAY_INT);
} else {
write_data(pbpctl_dev, BYPASS_OFF);
msec_delay_bp(LATCH_DELAY);
write_data(pbpctl_dev, DIS_BYPASS_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
return 0;
}
return BP_NOT_CAP;
}
/*EN_BYPASS_CAP 0x24 */
int en_bypass_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, PWROFF_BYPASS_ON_INT);
msec_delay_bp(BYPASS_DELAY_INT);
} else {
write_data(pbpctl_dev, EN_BYPASS_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
return 0;
}
return BP_NOT_CAP;
}
/* BYPASS_STATE_PWRON 0x26*/
int bypass_state_pwron(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
msec_delay_bp(DFLT_PWRON_DELAY);
else
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/* NORMAL_STATE_PWRON 0x28*/
int normal_state_pwron(bpctl_dev_t *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP)
|| (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWRON);
if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
msec_delay_bp(DFLT_PWRON_DELAY);
else
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/* BYPASS_STATE_PWROFF 0x27*/
int bypass_state_pwroff(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWROFF);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/* NORMAL_STATE_PWROFF 0x29*/
int normal_state_pwroff(bpctl_dev_t *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
write_data(pbpctl_dev, NORMAL_STATE_PWROFF);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/*TAP_STATE_PWRON 0x2a*/
int tap_state_pwron(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/*DIS_TAP_CAP 0x2c*/
int dis_tap_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/*EN_TAP_CAP 0x2e*/
int en_tap_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
return 0;
}
return BP_NOT_CAP;
}
/*DISC_STATE_PWRON 0x2a*/
int disc_state_pwron(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
write_data(pbpctl_dev, DISC_STATE_PWRON);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
/*DIS_DISC_CAP 0x2c*/
int dis_disc_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
write_data(pbpctl_dev, DIS_DISC_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
/*DISC_STATE_PWRON 0x2a*/
int disc_port_state_pwron(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
bpctl_dev_t *pbpctl_dev_m;
return BP_NOT_CAP;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1)
write_data(pbpctl_dev_m, TX_DISA_PWRUP);
else
write_data(pbpctl_dev_m, TX_DISB_PWRUP);
msec_delay_bp(LATCH_DELAY);
}
return ret;
}
int normal_port_state_pwron(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
bpctl_dev_t *pbpctl_dev_m;
return BP_NOT_CAP;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1)
write_data(pbpctl_dev_m, TX_ENA_PWRUP);
else
write_data(pbpctl_dev_m, TX_ENB_PWRUP);
msec_delay_bp(LATCH_DELAY);
}
return ret;
}
/*EN_TAP_CAP 0x2e*/
int en_disc_cap(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
write_data(pbpctl_dev, EN_DISC_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
int std_nic_on(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, DIS_BYPASS_CAP_INT);
msec_delay_bp(BYPASS_DELAY_INT);
pbpctl_dev->bp_status_un = 0;
return BP_OK;
}
if (pbpctl_dev->bp_ext_ver >= 0x8) {
write_data(pbpctl_dev, STD_NIC_ON);
msec_delay_bp(BYPASS_CAP_DELAY);
return BP_OK;
}
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
wdt_off(pbpctl_dev);
if (pbpctl_dev->bp_caps & BP_CAP) {
write_data(pbpctl_dev, BYPASS_OFF);
msec_delay_bp(LATCH_DELAY);
}
if (pbpctl_dev->bp_caps & TAP_CAP) {
write_data(pbpctl_dev, TAP_OFF);
msec_delay_bp(LATCH_DELAY);
}
write_data(pbpctl_dev, NORMAL_STATE_PWRON);
if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
msec_delay_bp(DFLT_PWRON_DELAY);
else
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
write_data(pbpctl_dev, DIS_BYPASS_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, DIS_TAP_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
return 0;
}
}
return BP_NOT_CAP;
}
int std_nic_off(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
write_data_int(pbpctl_dev, PWROFF_BYPASS_ON_INT);
msec_delay_bp(BYPASS_DELAY_INT);
return BP_OK;
}
if (pbpctl_dev->bp_ext_ver >= 0x8) {
write_data(pbpctl_dev, STD_NIC_OFF);
msec_delay_bp(BYPASS_CAP_DELAY);
return BP_OK;
}
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, TAP_STATE_PWRON);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
}
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
write_data(pbpctl_dev, BYPASS_STATE_PWRON);
if (pbpctl_dev->bp_ext_ver > PXG2BPI_VER)
msec_delay_bp(LATCH_DELAY +
EEPROM_WR_DELAY);
else
msec_delay_bp(DFLT_PWRON_DELAY);
}
if (pbpctl_dev->bp_caps & TAP_DIS_CAP) {
write_data(pbpctl_dev, EN_TAP_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
if (pbpctl_dev->bp_caps & DISC_DIS_CAP) {
write_data(pbpctl_dev, EN_DISC_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
write_data(pbpctl_dev, EN_BYPASS_CAP);
msec_delay_bp(BYPASS_CAP_DELAY);
}
return 0;
}
}
return BP_NOT_CAP;
}
int wdt_time_left(bpctl_dev_t *pbpctl_dev)
{
/* unsigned long curr_time=((long long)(jiffies*1000))/HZ, delta_time=0,wdt_on_time=((long long)(pbpctl_dev->bypass_wdt_on_time*1000))/HZ; */
unsigned long curr_time = jiffies, delta_time = 0, wdt_on_time =
pbpctl_dev->bypass_wdt_on_time, delta_time_msec = 0;
int time_left = 0;
switch (pbpctl_dev->wdt_status) {
case WDT_STATUS_DIS:
time_left = 0;
break;
case WDT_STATUS_EN:
delta_time =
(curr_time >=
wdt_on_time) ? (curr_time - wdt_on_time) : (~wdt_on_time +
curr_time);
delta_time_msec = jiffies_to_msecs(delta_time);
time_left = pbpctl_dev->bypass_timer_interval - delta_time_msec;
if (time_left < 0) {
time_left = -1;
pbpctl_dev->wdt_status = WDT_STATUS_EXP;
}
break;
case WDT_STATUS_EXP:
time_left = -1;
break;
}
return time_left;
}
static int wdt_timer(bpctl_dev_t *pbpctl_dev, int *time_left)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
{
if (pbpctl_dev->wdt_status == WDT_STATUS_UNKNOWN)
ret = BP_NOT_CAP;
else
*time_left = wdt_time_left(pbpctl_dev);
}
} else
ret = BP_NOT_CAP;
return ret;
}
static int wdt_timer_reload(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if ((pbpctl_dev->bp_caps & WD_CTL_CAP) &&
(pbpctl_dev->wdt_status != WDT_STATUS_UNKNOWN)) {
if (pbpctl_dev->wdt_status == WDT_STATUS_DIS)
return 0;
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER)
ret = wdt_pulse(pbpctl_dev);
else if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
ret = wdt_pulse_int(pbpctl_dev);
else
ret = send_wdt_pulse(pbpctl_dev);
/* if (ret==-1)
mod_timer(&pbpctl_dev->bp_timer, jiffies+1);*/
return 1;
}
return BP_NOT_CAP;
}
static void wd_reset_timer(unsigned long param)
{
bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
#ifdef BP_SELF_TEST
struct sk_buff *skb_tmp;
#endif
if ((pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) &&
((atomic_read(&pbpctl_dev->wdt_busy)) == 1)) {
mod_timer(&pbpctl_dev->bp_timer, jiffies + 1);
return;
}
#ifdef BP_SELF_TEST
if (pbpctl_dev->bp_self_test_flag == 1) {
skb_tmp = dev_alloc_skb(BPTEST_DATA_LEN + 2);
if ((skb_tmp) && (pbpctl_dev->ndev) && (pbpctl_dev->bp_tx_data)) {
memcpy(skb_put(skb_tmp, BPTEST_DATA_LEN),
pbpctl_dev->bp_tx_data, BPTEST_DATA_LEN);
skb_tmp->dev = pbpctl_dev->ndev;
skb_tmp->protocol =
eth_type_trans(skb_tmp, pbpctl_dev->ndev);
skb_tmp->ip_summed = CHECKSUM_UNNECESSARY;
netif_receive_skb(skb_tmp);
goto bp_timer_reload;
return;
}
}
#endif
wdt_timer_reload(pbpctl_dev);
#ifdef BP_SELF_TEST
bp_timer_reload:
#endif
if (pbpctl_dev->reset_time) {
mod_timer(&pbpctl_dev->bp_timer,
jiffies + (HZ * pbpctl_dev->reset_time) / 1000);
}
}
/*WAIT_AT_PWRUP 0x80 */
int bp_wait_at_pwup_en(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
write_data(pbpctl_dev, BP_WAIT_AT_PWUP_EN);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
/*DIS_WAIT_AT_PWRUP 0x81 */
int bp_wait_at_pwup_dis(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
write_data(pbpctl_dev, BP_WAIT_AT_PWUP_DIS);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
/*EN_HW_RESET 0x82 */
int bp_hw_reset_en(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
write_data(pbpctl_dev, BP_HW_RESET_EN);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
/*DIS_HW_RESET 0x83 */
int bp_hw_reset_dis(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
write_data(pbpctl_dev, BP_HW_RESET_DIS);
msec_delay_bp(LATCH_DELAY + EEPROM_WR_DELAY);
return BP_OK;
}
}
return BP_NOT_CAP;
}
int wdt_exp_mode(bpctl_dev_t *pbpctl_dev, int mode)
{
uint32_t status_reg = 0, status_reg1 = 0;
if ((pbpctl_dev->bp_caps & (TAP_STATUS_CAP | DISC_CAP)) &&
(pbpctl_dev->bp_caps & BP_CAP)) {
if (pbpctl_dev->bp_ext_ver >= PXE2TBPI_VER) {
if ((pbpctl_dev->bp_ext_ver >= 0x8) &&
(mode == 2) && (pbpctl_dev->bp_caps & DISC_CAP)) {
status_reg1 =
read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR);
if (!(status_reg1 & WDTE_DISC_BPN_MASK))
write_reg(pbpctl_dev,
status_reg1 |
WDTE_DISC_BPN_MASK,
STATUS_DISC_REG_ADDR);
return BP_OK;
}
}
status_reg = read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR);
if ((mode == 0) && (pbpctl_dev->bp_caps & BP_CAP)) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
status_reg1 =
read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR);
if (status_reg1 & WDTE_DISC_BPN_MASK)
write_reg(pbpctl_dev,
status_reg1 &
~WDTE_DISC_BPN_MASK,
STATUS_DISC_REG_ADDR);
}
if (status_reg & WDTE_TAP_BPN_MASK)
write_reg(pbpctl_dev,
status_reg & ~WDTE_TAP_BPN_MASK,
STATUS_TAP_REG_ADDR);
return BP_OK;
} else if ((mode == 1) && (pbpctl_dev->bp_caps & TAP_CAP)) {
if (!(status_reg & WDTE_TAP_BPN_MASK))
write_reg(pbpctl_dev,
status_reg | WDTE_TAP_BPN_MASK,
STATUS_TAP_REG_ADDR);
/*else return BP_NOT_CAP; */
return BP_OK;
}
}
return BP_NOT_CAP;
}
int bypass_fw_ver(bpctl_dev_t *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return read_reg(pbpctl_dev, VER_REG_ADDR);
else
return BP_NOT_CAP;
}
int bypass_sign_check(bpctl_dev_t *pbpctl_dev)
{
if (is_bypass_fn(pbpctl_dev))
return (((read_reg(pbpctl_dev, PIC_SIGN_REG_ADDR)) ==
PIC_SIGN_VALUE) ? 1 : 0);
else
return BP_NOT_CAP;
}
static int tx_status(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl = 0;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
if (pbpctl_dev->bp_i80)
return ((ctrl & BPCTLI_CTRL_SWDPIN1) != 0 ? 0 : 1);
if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
return ((ctrl & BP10G_SDP1_DATA) != 0 ? 0 : 1);
}
}
if (pbpctl_dev->bp_caps & TX_CTL_CAP) {
if (PEG5_IF_SERIES(pbpctl_dev->subdevice)) {
uint16_t mii_reg;
if (!
(bp75_read_phy_reg
(pbpctl_dev, BPCTLI_PHY_CONTROL, &mii_reg))) {
if (mii_reg & BPCTLI_MII_CR_POWER_DOWN)
return 0;
else
return 1;
}
return -1;
}
if (pbpctl_dev->bp_10g9) {
return ((BP10G_READ_REG(pbpctl_dev, ESDP) &
BP10G_SDP3_DATA) != 0 ? 0 : 1);
} else if (pbpctl_dev->bp_fiber5) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if (ctrl & BPCTLI_CTRL_EXT_SDP6_DATA)
return 0;
return 1;
} else if (pbpctl_dev->bp_10gb) {
ctrl = BP10GB_READ_REG(pbpctl_dev, MISC_REG_GPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_GPIO,
(ctrl | BP10GB_GPIO0_OE_P1) &
~(BP10GB_GPIO0_SET_P1 |
BP10GB_GPIO0_CLR_P1));
if ((pbpctl_dev->func == 1) || (pbpctl_dev->func == 3))
return (((BP10GB_READ_REG
(pbpctl_dev,
MISC_REG_GPIO)) & BP10GB_GPIO0_P1) !=
0 ? 0 : 1);
else
return (((BP10GB_READ_REG
(pbpctl_dev,
MISC_REG_GPIO)) & BP10GB_GPIO0_P0) !=
0 ? 0 : 1);
}
if (!pbpctl_dev->bp_10g) {
ctrl = BPCTL_READ_REG(pbpctl_dev, CTRL);
if (pbpctl_dev->bp_i80)
return ((ctrl & BPCTLI_CTRL_SWDPIN1) !=
0 ? 0 : 1);
if (pbpctl_dev->bp_540) {
ctrl = BP10G_READ_REG(pbpctl_dev, ESDP);
return ((ctrl & BP10G_SDP1_DATA) != 0 ? 0 : 1);
}
return ((ctrl & BPCTLI_CTRL_SWDPIN0) != 0 ? 0 : 1);
} else
return ((BP10G_READ_REG(pbpctl_dev, ESDP) &
BP10G_SDP0_DATA) != 0 ? 0 : 1);
}
return BP_NOT_CAP;
}
static int bp_force_link_status(bpctl_dev_t *pbpctl_dev)
{
if (DBI_IF_SERIES(pbpctl_dev->subdevice)) {
if ((pbpctl_dev->bp_10g) || (pbpctl_dev->bp_10g9)) {
return ((BP10G_READ_REG(pbpctl_dev, ESDP) &
BP10G_SDP1_DIR) != 0 ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int bypass_from_last_read(bpctl_dev_t *pbpctl_dev)
{
uint32_t ctrl_ext = 0;
bpctl_dev_t *pbpctl_dev_b = NULL;
if ((pbpctl_dev->bp_caps & SW_CTL_CAP)
&& (pbpctl_dev_b = get_status_port_fn(pbpctl_dev))) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev_b, CTRL_EXT);
BPCTL_BP_WRITE_REG(pbpctl_dev_b, CTRL_EXT,
(ctrl_ext & ~BPCTLI_CTRL_EXT_SDP7_DIR));
ctrl_ext = BPCTL_READ_REG(pbpctl_dev_b, CTRL_EXT);
if (ctrl_ext & BPCTLI_CTRL_EXT_SDP7_DATA)
return 0;
return 1;
} else
return BP_NOT_CAP;
}
int bypass_status_clear(bpctl_dev_t *pbpctl_dev)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if ((pbpctl_dev->bp_caps & SW_CTL_CAP)
&& (pbpctl_dev_b = get_status_port_fn(pbpctl_dev))) {
send_bypass_clear_pulse(pbpctl_dev_b, 1);
return 0;
} else
return BP_NOT_CAP;
}
int bypass_flag_status(bpctl_dev_t *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & BP_CAP)) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
BYPASS_FLAG_MASK) ==
BYPASS_FLAG_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int bypass_flag_status_clear(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
uint32_t status_reg = 0;
status_reg = read_reg(pbpctl_dev, STATUS_REG_ADDR);
write_reg(pbpctl_dev, status_reg & ~BYPASS_FLAG_MASK,
STATUS_REG_ADDR);
return 0;
}
}
return BP_NOT_CAP;
}
int bypass_change_status(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & BP_STATUS_CHANGE_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
ret = bypass_flag_status(pbpctl_dev);
bypass_flag_status_clear(pbpctl_dev);
} else if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
ret = bypass_flag_status(pbpctl_dev);
bypass_flag_status_clear(pbpctl_dev);
} else {
ret = bypass_from_last_read(pbpctl_dev);
bypass_status_clear(pbpctl_dev);
}
}
return ret;
}
int bypass_off_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
BYPASS_OFF_MASK) == BYPASS_OFF_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
static int bypass_status(bpctl_dev_t *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & BP_CAP) {
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
if (!pbpctl_dev->bp_status_un)
return (((BPCTL_READ_REG
(pbpctl_dev_b,
CTRL_EXT)) &
BPCTLI_CTRL_EXT_SDP7_DATA) !=
0 ? 1 : 0);
else
return BP_NOT_CAP;
}
if (pbpctl_dev->bp_ext_ver >= 0x8) {
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, I2CCTL);
BP10G_WRITE_REG(pbpctl_dev_b, I2CCTL,
(ctrl_ext | BP10G_I2C_CLK_OUT));
return ((BP10G_READ_REG(pbpctl_dev_b, I2CCTL) &
BP10G_I2C_CLK_IN) != 0 ? 0 : 1);
} else if (pbpctl_dev->bp_540) {
return (((BP10G_READ_REG(pbpctl_dev_b, ESDP)) &
BP10G_SDP0_DATA) != 0 ? 0 : 1);
}
else if ((pbpctl_dev->bp_fiber5)
|| (pbpctl_dev->bp_i80)) {
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL)) &
BPCTLI_CTRL_SWDPIN0) != 0 ? 0 : 1);
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext =
BP10GB_READ_REG(pbpctl_dev, MISC_REG_GPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_GPIO,
(ctrl_ext | BP10GB_GPIO3_OE_P0)
& ~(BP10GB_GPIO3_SET_P0 |
BP10GB_GPIO3_CLR_P0));
return (((BP10GB_READ_REG
(pbpctl_dev,
MISC_REG_GPIO)) & BP10GB_GPIO3_P0) !=
0 ? 0 : 1);
}
else if (!pbpctl_dev->bp_10g)
return (((BPCTL_READ_REG
(pbpctl_dev_b,
CTRL_EXT)) &
BPCTLI_CTRL_EXT_SDP7_DATA) !=
0 ? 0 : 1);
else {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, EODSDP);
BP10G_WRITE_REG(pbpctl_dev_b, EODSDP,
(ctrl_ext |
BP10G_SDP7_DATA_OUT));
return ((BP10G_READ_REG(pbpctl_dev_b, EODSDP) &
BP10G_SDP7_DATA_IN) != 0 ? 0 : 1);
}
} else if (pbpctl_dev->media_type == bp_copper) {
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL)) &
BPCTLI_CTRL_SWDPIN1) != 0 ? 1 : 0);
} else {
if ((bypass_status_clear(pbpctl_dev)) >= 0)
return bypass_from_last_read(pbpctl_dev);
}
}
return BP_NOT_CAP;
}
int default_pwron_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg
(pbpctl_dev,
STATUS_REG_ADDR)) & DFLT_PWRON_MASK)
== DFLT_PWRON_MASK) ? 0 : 1);
}
} /*else if ((!pbpctl_dev->bp_caps&BP_DIS_CAP)&&
(pbpctl_dev->bp_caps&BP_PWUP_ON_CAP))
return 1; */
}
return BP_NOT_CAP;
}
static int default_pwroff_status(bpctl_dev_t *pbpctl_dev)
{
/*if ((!pbpctl_dev->bp_caps&BP_DIS_CAP)&&
(pbpctl_dev->bp_caps&BP_PWOFF_ON_CAP))
return 1; */
if ((pbpctl_dev->bp_caps & SW_CTL_CAP)
&& (pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP)) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
DFLT_PWROFF_MASK) == DFLT_PWROFF_MASK) ? 0 : 1);
}
return BP_NOT_CAP;
}
int dis_bypass_cap_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & BP_DIS_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
DIS_BYPASS_CAP_MASK) ==
DIS_BYPASS_CAP_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int cmd_en_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
CMND_EN_MASK) == CMND_EN_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int wdt_en_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
return ((((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
WDT_EN_MASK) == WDT_EN_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int wdt_programmed(bpctl_dev_t *pbpctl_dev, int *timeout)
{
int ret = 0;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
if ((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
WDT_EN_MASK) {
u8 wdt_val;
wdt_val = read_reg(pbpctl_dev, WDT_REG_ADDR);
*timeout = (1 << wdt_val) * 100;
} else
*timeout = 0;
} else {
int curr_wdt_status = pbpctl_dev->wdt_status;
if (curr_wdt_status == WDT_STATUS_UNKNOWN)
*timeout = -1;
else
*timeout =
curr_wdt_status ==
0 ? 0 : pbpctl_dev->bypass_timer_interval;
};
} else
ret = BP_NOT_CAP;
return ret;
}
int bypass_support(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
ret =
((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
BYPASS_SUPPORT_MASK) ==
BYPASS_SUPPORT_MASK) ? 1 : 0);
} else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
ret = 1;
} else
ret = BP_NOT_CAP;
return ret;
}
int tap_support(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
ret =
((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
TAP_SUPPORT_MASK) == TAP_SUPPORT_MASK) ? 1 : 0);
} else if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
ret = 0;
} else
ret = BP_NOT_CAP;
return ret;
}
int normal_support(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
ret =
((((read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR)) &
NORMAL_UNSUPPORT_MASK) ==
NORMAL_UNSUPPORT_MASK) ? 0 : 1);
} else
ret = 1;
};
return ret;
}
int get_bp_prod_caps(bpctl_dev_t *pbpctl_dev)
{
if ((pbpctl_dev->bp_caps & SW_CTL_CAP) &&
(pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER))
return read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR);
return BP_NOT_CAP;
}
int tap_flag_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
TAP_FLAG_MASK) == TAP_FLAG_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int tap_flag_status_clear(bpctl_dev_t *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & TAP_STATUS_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
status_reg = read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR);
write_reg(pbpctl_dev, status_reg & ~TAP_FLAG_MASK,
STATUS_TAP_REG_ADDR);
return 0;
}
}
return BP_NOT_CAP;
}
int tap_change_status(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
if (pbpctl_dev->bp_caps & TAP_CAP) {
if (pbpctl_dev->bp_caps & BP_CAP) {
ret = tap_flag_status(pbpctl_dev);
tap_flag_status_clear(pbpctl_dev);
} else {
ret = bypass_from_last_read(pbpctl_dev);
bypass_status_clear(pbpctl_dev);
}
}
}
return ret;
}
int tap_off_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
TAP_OFF_MASK) == TAP_OFF_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int tap_status(bpctl_dev_t *pbpctl_dev)
{
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & TAP_CAP) {
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= 0x8) {
if (!pbpctl_dev->bp_10g)
return (((BPCTL_READ_REG
(pbpctl_dev_b,
CTRL_EXT)) &
BPCTLI_CTRL_EXT_SDP6_DATA) !=
0 ? 0 : 1);
else {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, EODSDP);
BP10G_WRITE_REG(pbpctl_dev_b, EODSDP,
(ctrl_ext |
BP10G_SDP6_DATA_OUT));
return ((BP10G_READ_REG(pbpctl_dev_b, EODSDP) &
BP10G_SDP6_DATA_IN) != 0 ? 0 : 1);
}
} else if (pbpctl_dev->media_type == bp_copper)
return (((BPCTL_READ_REG(pbpctl_dev, CTRL)) &
BPCTLI_CTRL_SWDPIN0) != 0 ? 1 : 0);
else {
if ((bypass_status_clear(pbpctl_dev)) >= 0)
return bypass_from_last_read(pbpctl_dev);
}
}
return BP_NOT_CAP;
}
int default_pwron_tap_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
DFLT_PWRON_TAP_MASK) ==
DFLT_PWRON_TAP_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int dis_tap_cap_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER)
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
DIS_TAP_CAP_MASK) ==
DIS_TAP_CAP_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int disc_flag_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
DISC_FLAG_MASK) == DISC_FLAG_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int disc_flag_status_clear(bpctl_dev_t *pbpctl_dev)
{
uint32_t status_reg = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
status_reg = read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR);
write_reg(pbpctl_dev, status_reg & ~DISC_FLAG_MASK,
STATUS_DISC_REG_ADDR);
return BP_OK;
}
}
return BP_NOT_CAP;
}
int disc_change_status(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (pbpctl_dev->bp_caps & DISC_CAP) {
ret = disc_flag_status(pbpctl_dev);
disc_flag_status_clear(pbpctl_dev);
return ret;
}
return BP_NOT_CAP;
}
int disc_off_status(bpctl_dev_t *pbpctl_dev)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
u32 ctrl_ext = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (DISCF_IF_SERIES(pbpctl_dev->subdevice))
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
DISC_OFF_MASK) == DISC_OFF_MASK) ? 1 : 0);
if (pbpctl_dev->bp_i80) {
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL_EXT)) &
BPCTLI_CTRL_EXT_SDP6_DATA) != 0 ? 1 : 0);
}
if (pbpctl_dev->bp_540) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, ESDP);
return ((BP10G_READ_REG(pbpctl_dev_b, ESDP) &
BP10G_SDP2_DATA) != 0 ? 1 : 0);
}
if (pbpctl_dev->media_type == bp_copper) {
#if 0
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
DISC_OFF_MASK) == DISC_OFF_MASK) ? 1 : 0);
#endif
if (!pbpctl_dev->bp_10g)
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL)) &
BPCTLI_CTRL_SWDPIN1) != 0 ? 1 : 0);
else
return ((BP10G_READ_REG(pbpctl_dev_b, ESDP) &
BP10G_SDP1_DATA) != 0 ? 1 : 0);
} else {
if (pbpctl_dev->bp_10g9) {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, I2CCTL);
BP10G_WRITE_REG(pbpctl_dev_b, I2CCTL,
(ctrl_ext |
BP10G_I2C_DATA_OUT));
return ((BP10G_READ_REG(pbpctl_dev_b, I2CCTL) &
BP10G_I2C_DATA_IN) != 0 ? 1 : 0);
} else if (pbpctl_dev->bp_fiber5) {
return (((BPCTL_READ_REG(pbpctl_dev_b, CTRL)) &
BPCTLI_CTRL_SWDPIN1) != 0 ? 1 : 0);
} else if (pbpctl_dev->bp_10gb) {
ctrl_ext =
BP10GB_READ_REG(pbpctl_dev, MISC_REG_GPIO);
BP10GB_WRITE_REG(pbpctl_dev, MISC_REG_GPIO,
(ctrl_ext | BP10GB_GPIO3_OE_P1)
& ~(BP10GB_GPIO3_SET_P1 |
BP10GB_GPIO3_CLR_P1));
return (((BP10GB_READ_REG
(pbpctl_dev,
MISC_REG_GPIO)) & BP10GB_GPIO3_P1) !=
0 ? 1 : 0);
}
if (!pbpctl_dev->bp_10g) {
return (((BPCTL_READ_REG
(pbpctl_dev_b,
CTRL_EXT)) &
BPCTLI_CTRL_EXT_SDP6_DATA) !=
0 ? 1 : 0);
} else {
ctrl_ext = BP10G_READ_REG(pbpctl_dev_b, EODSDP);
BP10G_WRITE_REG(pbpctl_dev_b, EODSDP,
(ctrl_ext |
BP10G_SDP6_DATA_OUT));
return (((BP10G_READ_REG(pbpctl_dev_b, EODSDP))
& BP10G_SDP6_DATA_IN) != 0 ? 1 : 0);
}
}
}
return BP_NOT_CAP;
}
static int disc_status(bpctl_dev_t *pbpctl_dev)
{
int ctrl = 0;
if (pbpctl_dev->bp_caps & DISC_CAP) {
if ((ctrl = disc_off_status(pbpctl_dev)) < 0)
return ctrl;
return ((ctrl == 0) ? 1 : 0);
}
return BP_NOT_CAP;
}
int default_pwron_disc_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
DFLT_PWRON_DISC_MASK) ==
DFLT_PWRON_DISC_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int dis_disc_cap_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & DIS_DISC_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
DIS_DISC_CAP_MASK) ==
DIS_DISC_CAP_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int disc_port_status(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1) {
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
TX_DISA_MASK) == TX_DISA_MASK) ? 1 : 0);
} else
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
TX_DISB_MASK) == TX_DISB_MASK) ? 1 : 0);
}
return ret;
}
int default_pwron_disc_port_status(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
bpctl_dev_t *pbpctl_dev_m;
if ((is_bypass_fn(pbpctl_dev)) == 1)
pbpctl_dev_m = pbpctl_dev;
else
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m == NULL)
return BP_NOT_CAP;
if (pbpctl_dev_m->bp_caps_ex & DISC_PORT_CAP_EX) {
if (is_bypass_fn(pbpctl_dev) == 1)
return ret;
/* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
else
return ret;
/* return((((read_reg(pbpctl_dev,STATUS_TAP_REG_ADDR)) & TX_DISA_MASK)==TX_DISA_MASK)?1:0); */
}
return ret;
}
int wdt_exp_mode_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver <= PXG2BPI_VER)
return 0; /* bypass mode */
else if (pbpctl_dev->bp_ext_ver == PXG2TBPI_VER)
return 1; /* tap mode */
else if (pbpctl_dev->bp_ext_ver >= PXE2TBPI_VER) {
if (pbpctl_dev->bp_ext_ver >= 0x8) {
if (((read_reg
(pbpctl_dev,
STATUS_DISC_REG_ADDR)) &
WDTE_DISC_BPN_MASK) == WDTE_DISC_BPN_MASK)
return 2;
}
return ((((read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR)) &
WDTE_TAP_BPN_MASK) ==
WDTE_TAP_BPN_MASK) ? 1 : 0);
}
}
return BP_NOT_CAP;
}
int tpl2_flag_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX) {
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
TPL2_FLAG_MASK) == TPL2_FLAG_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int tpl_hw_status(bpctl_dev_t *pbpctl_dev)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return BP_NOT_CAP;
if (TPL_IF_SERIES(pbpctl_dev->subdevice))
return (((BPCTL_READ_REG(pbpctl_dev, CTRL)) &
BPCTLI_CTRL_SWDPIN0) != 0 ? 1 : 0);
return BP_NOT_CAP;
}
int bp_wait_at_pwup_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
return ((((read_reg(pbpctl_dev, CONT_CONFIG_REG_ADDR)) &
WAIT_AT_PWUP_MASK) ==
WAIT_AT_PWUP_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int bp_hw_reset_status(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
if (pbpctl_dev->bp_ext_ver >= 0x8)
return ((((read_reg(pbpctl_dev, CONT_CONFIG_REG_ADDR)) &
EN_HW_RESET_MASK) ==
EN_HW_RESET_MASK) ? 1 : 0);
}
return BP_NOT_CAP;
}
int std_nic_status(bpctl_dev_t *pbpctl_dev)
{
int status_val = 0;
if (pbpctl_dev->bp_caps & STD_NIC_CAP) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return BP_NOT_CAP;
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
return ((((read_reg(pbpctl_dev, STATUS_DISC_REG_ADDR)) &
STD_NIC_ON_MASK) == STD_NIC_ON_MASK) ? 1 : 0);
}
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
if (pbpctl_dev->bp_caps & BP_CAP) {
status_val =
read_reg(pbpctl_dev, STATUS_REG_ADDR);
if (((!(status_val & WDT_EN_MASK))
&& ((status_val & STD_NIC_MASK) ==
STD_NIC_MASK)))
status_val = 1;
else
return 0;
}
if (pbpctl_dev->bp_caps & TAP_CAP) {
status_val =
read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR);
if ((status_val & STD_NIC_TAP_MASK) ==
STD_NIC_TAP_MASK)
status_val = 1;
else
return 0;
}
if (pbpctl_dev->bp_caps & TAP_CAP) {
if ((disc_off_status(pbpctl_dev)))
status_val = 1;
else
return 0;
}
return status_val;
}
}
return BP_NOT_CAP;
}
/******************************************************/
/**************SW_INIT*********************************/
/******************************************************/
void bypass_caps_init(bpctl_dev_t *pbpctl_dev)
{
u_int32_t ctrl_ext = 0;
bpctl_dev_t *pbpctl_dev_m = NULL;
#ifdef BYPASS_DEBUG
int ret = 0;
if (!(INTEL_IF_SERIES(adapter->bp_device_block.subdevice))) {
ret = read_reg(pbpctl_dev, VER_REG_ADDR);
printk("VER_REG reg1=%x\n", ret);
ret = read_reg(pbpctl_dev, PRODUCT_CAP_REG_ADDR);
printk("PRODUCT_CAP reg=%x\n", ret);
ret = read_reg(pbpctl_dev, STATUS_TAP_REG_ADDR);
printk("STATUS_TAP reg1=%x\n", ret);
ret = read_reg(pbpctl_dev, 0x7);
printk("SIG_REG reg1=%x\n", ret);
ret = read_reg(pbpctl_dev, STATUS_REG_ADDR);
printk("STATUS_REG_ADDR=%x\n", ret);
ret = read_reg(pbpctl_dev, WDT_REG_ADDR);
printk("WDT_REG_ADDR=%x\n", ret);
ret = read_reg(pbpctl_dev, TMRL_REG_ADDR);
printk("TMRL_REG_ADDR=%x\n", ret);
ret = read_reg(pbpctl_dev, TMRH_REG_ADDR);
printk("TMRH_REG_ADDR=%x\n", ret);
}
#endif
if ((pbpctl_dev->bp_fiber5) || (pbpctl_dev->bp_10g9)) {
pbpctl_dev->media_type = bp_fiber;
} else if (pbpctl_dev->bp_10gb) {
if (BP10GB_CX4_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->media_type = bp_cx4;
else
pbpctl_dev->media_type = bp_fiber;
}
else if (pbpctl_dev->bp_540)
pbpctl_dev->media_type = bp_none;
else if (!pbpctl_dev->bp_10g) {
ctrl_ext = BPCTL_READ_REG(pbpctl_dev, CTRL_EXT);
if ((ctrl_ext & BPCTLI_CTRL_EXT_LINK_MODE_MASK) == 0x0)
pbpctl_dev->media_type = bp_copper;
else
pbpctl_dev->media_type = bp_fiber;
} else {
if (BP10G_CX4_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->media_type = bp_cx4;
else
pbpctl_dev->media_type = bp_fiber;
}
if (is_bypass_fn(pbpctl_dev)) {
pbpctl_dev->bp_caps |= BP_PWOFF_ON_CAP;
if (pbpctl_dev->media_type == bp_fiber)
pbpctl_dev->bp_caps |=
(TX_CTL_CAP | TX_STATUS_CAP | TPL_CAP);
if (TPL_IF_SERIES(pbpctl_dev->subdevice)) {
pbpctl_dev->bp_caps |= TPL_CAP;
}
if (INTEL_IF_SERIES(pbpctl_dev->subdevice)) {
pbpctl_dev->bp_caps |=
(BP_CAP | BP_STATUS_CAP | SW_CTL_CAP |
BP_PWUP_ON_CAP | BP_PWUP_OFF_CAP | BP_PWOFF_OFF_CAP
| WD_CTL_CAP | WD_STATUS_CAP | STD_NIC_CAP |
WD_TIMEOUT_CAP);
pbpctl_dev->bp_ext_ver = OLD_IF_VER;
return;
}
if ((pbpctl_dev->bp_fw_ver == 0xff) &&
OLD_IF_SERIES(pbpctl_dev->subdevice)) {
pbpctl_dev->bp_caps |=
(BP_CAP | BP_STATUS_CAP | BP_STATUS_CHANGE_CAP |
SW_CTL_CAP | BP_PWUP_ON_CAP | WD_CTL_CAP |
WD_STATUS_CAP | WD_TIMEOUT_CAP);
pbpctl_dev->bp_ext_ver = OLD_IF_VER;
return;
}
else {
switch (pbpctl_dev->bp_fw_ver) {
case BP_FW_VER_A0:
case BP_FW_VER_A1:{
pbpctl_dev->bp_ext_ver =
(pbpctl_dev->
bp_fw_ver & EXT_VER_MASK);
break;
}
default:{
if ((bypass_sign_check(pbpctl_dev)) !=
1) {
pbpctl_dev->bp_caps = 0;
return;
}
pbpctl_dev->bp_ext_ver =
(pbpctl_dev->
bp_fw_ver & EXT_VER_MASK);
}
}
}
if (pbpctl_dev->bp_ext_ver == PXG2BPI_VER)
pbpctl_dev->bp_caps |=
(BP_CAP | BP_STATUS_CAP | BP_STATUS_CHANGE_CAP |
SW_CTL_CAP | BP_DIS_CAP | BP_DIS_STATUS_CAP |
BP_PWUP_ON_CAP | BP_PWUP_OFF_CAP | BP_PWUP_CTL_CAP
| WD_CTL_CAP | STD_NIC_CAP | WD_STATUS_CAP |
WD_TIMEOUT_CAP);
else if (pbpctl_dev->bp_ext_ver >= PXG2TBPI_VER) {
int cap_reg;
pbpctl_dev->bp_caps |=
(SW_CTL_CAP | WD_CTL_CAP | WD_STATUS_CAP |
WD_TIMEOUT_CAP);
cap_reg = get_bp_prod_caps(pbpctl_dev);
if ((cap_reg & NORMAL_UNSUPPORT_MASK) ==
NORMAL_UNSUPPORT_MASK)
pbpctl_dev->bp_caps |= NIC_CAP_NEG;
else
pbpctl_dev->bp_caps |= STD_NIC_CAP;
if ((normal_support(pbpctl_dev)) == 1)
pbpctl_dev->bp_caps |= STD_NIC_CAP;
else
pbpctl_dev->bp_caps |= NIC_CAP_NEG;
if ((cap_reg & BYPASS_SUPPORT_MASK) ==
BYPASS_SUPPORT_MASK) {
pbpctl_dev->bp_caps |=
(BP_CAP | BP_STATUS_CAP |
BP_STATUS_CHANGE_CAP | BP_DIS_CAP |
BP_DIS_STATUS_CAP | BP_PWUP_ON_CAP |
BP_PWUP_OFF_CAP | BP_PWUP_CTL_CAP);
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER7)
pbpctl_dev->bp_caps |=
BP_PWOFF_ON_CAP | BP_PWOFF_OFF_CAP |
BP_PWOFF_CTL_CAP;
}
if ((cap_reg & TAP_SUPPORT_MASK) == TAP_SUPPORT_MASK) {
pbpctl_dev->bp_caps |=
(TAP_CAP | TAP_STATUS_CAP |
TAP_STATUS_CHANGE_CAP | TAP_DIS_CAP |
TAP_DIS_STATUS_CAP | TAP_PWUP_ON_CAP |
TAP_PWUP_OFF_CAP | TAP_PWUP_CTL_CAP);
}
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER8) {
if ((cap_reg & DISC_SUPPORT_MASK) ==
DISC_SUPPORT_MASK)
pbpctl_dev->bp_caps |=
(DISC_CAP | DISC_DIS_CAP |
DISC_PWUP_CTL_CAP);
if ((cap_reg & TPL2_SUPPORT_MASK) ==
TPL2_SUPPORT_MASK) {
pbpctl_dev->bp_caps_ex |= TPL2_CAP_EX;
pbpctl_dev->bp_caps |= TPL_CAP;
pbpctl_dev->bp_tpl_flag =
tpl2_flag_status(pbpctl_dev);
}
}
if (pbpctl_dev->bp_ext_ver >= BP_FW_EXT_VER9) {
if ((cap_reg & DISC_PORT_SUPPORT_MASK) ==
DISC_PORT_SUPPORT_MASK) {
pbpctl_dev->bp_caps_ex |=
DISC_PORT_CAP_EX;
pbpctl_dev->bp_caps |=
(TX_CTL_CAP | TX_STATUS_CAP);
}
}
}
if (pbpctl_dev->bp_ext_ver >= PXG2BPI_VER) {
if ((read_reg(pbpctl_dev, STATUS_REG_ADDR)) &
WDT_EN_MASK)
pbpctl_dev->wdt_status = WDT_STATUS_EN;
else
pbpctl_dev->wdt_status = WDT_STATUS_DIS;
}
} else if ((P2BPFI_IF_SERIES(pbpctl_dev->subdevice)) ||
(PEGF5_IF_SERIES(pbpctl_dev->subdevice)) ||
(PEGF80_IF_SERIES(pbpctl_dev->subdevice)) ||
(BP10G9_IF_SERIES(pbpctl_dev->subdevice))) {
pbpctl_dev->bp_caps |= (TX_CTL_CAP | TX_STATUS_CAP);
}
if ((pbpctl_dev->subdevice & 0xa00) == 0xa00)
pbpctl_dev->bp_caps |= (TX_CTL_CAP | TX_STATUS_CAP);
if (PEG5_IF_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->bp_caps |= (TX_CTL_CAP | TX_STATUS_CAP);
if (BP10GB_IF_SERIES(pbpctl_dev->subdevice)) {
pbpctl_dev->bp_caps &= ~(TX_CTL_CAP | TX_STATUS_CAP);
}
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m != NULL) {
int cap_reg = 0;
if (pbpctl_dev_m->bp_ext_ver >= 0x9) {
cap_reg = get_bp_prod_caps(pbpctl_dev_m);
if ((cap_reg & DISC_PORT_SUPPORT_MASK) ==
DISC_PORT_SUPPORT_MASK)
pbpctl_dev->bp_caps |=
(TX_CTL_CAP | TX_STATUS_CAP);
pbpctl_dev->bp_caps_ex |= DISC_PORT_CAP_EX;
}
}
}
int bypass_off_init(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return dis_bypass_cap(pbpctl_dev);
wdt_off(pbpctl_dev);
if (pbpctl_dev->bp_caps & BP_CAP)
bypass_off(pbpctl_dev);
if (pbpctl_dev->bp_caps & TAP_CAP)
tap_off(pbpctl_dev);
cmnd_off(pbpctl_dev);
return 0;
}
void remove_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
{
#ifdef BP_SELF_TEST
bpctl_dev_t *pbpctl_dev_sl = NULL;
#endif
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
del_timer_sync(&pbpctl_dev->bp_timer);
#ifdef BP_SELF_TEST
pbpctl_dev_sl = get_status_port_fn(pbpctl_dev);
if (pbpctl_dev_sl && (pbpctl_dev_sl->ndev)) {
if ((pbpctl_dev_sl->ndev->netdev_ops)
&& (pbpctl_dev_sl->old_ops)) {
rtnl_lock();
pbpctl_dev_sl->ndev->netdev_ops =
pbpctl_dev_sl->old_ops;
pbpctl_dev_sl->old_ops = NULL;
rtnl_unlock();
}
}
#endif
}
}
int init_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
init_timer(&pbpctl_dev->bp_timer);
pbpctl_dev->bp_timer.function = &wd_reset_timer;
pbpctl_dev->bp_timer.data = (unsigned long)pbpctl_dev;
return 1;
}
return BP_NOT_CAP;
}
#ifdef BP_SELF_TEST
int bp_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
bpctl_dev_t *pbpctl_dev = NULL, *pbpctl_dev_m = NULL;
int idx_dev = 0;
struct ethhdr *eth = (struct ethhdr *)skb->data;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].ndev != NULL) && (idx_dev < device_num));
idx_dev++) {
if (bpctl_dev_arr[idx_dev].ndev == dev) {
pbpctl_dev = &bpctl_dev_arr[idx_dev];
break;
}
}
if (!pbpctl_dev)
return 1;
if ((htons(ETH_P_BPTEST) == eth->h_proto)) {
pbpctl_dev_m = get_master_port_fn(pbpctl_dev);
if (pbpctl_dev_m) {
if (bypass_status(pbpctl_dev_m)) {
cmnd_on(pbpctl_dev_m);
bypass_off(pbpctl_dev_m);
cmnd_off(pbpctl_dev_m);
}
wdt_timer_reload(pbpctl_dev_m);
}
dev_kfree_skb_irq(skb);
return 0;
}
return pbpctl_dev->hard_start_xmit_save(skb, dev);
}
#endif
int set_bypass_wd_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->reset_time != param) {
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
pbpctl_dev->reset_time =
(param <
WDT_AUTO_MIN_INT) ? WDT_AUTO_MIN_INT :
param;
else
pbpctl_dev->reset_time = param;
if (param)
mod_timer(&pbpctl_dev->bp_timer, jiffies);
}
return 0;
}
return BP_NOT_CAP;
}
int get_bypass_wd_auto(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
return pbpctl_dev->reset_time;
}
return BP_NOT_CAP;
}
#ifdef BP_SELF_TEST
int set_bp_self_test(bpctl_dev_t *pbpctl_dev, unsigned int param)
{
bpctl_dev_t *pbpctl_dev_sl = NULL;
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
pbpctl_dev->bp_self_test_flag = param == 0 ? 0 : 1;
pbpctl_dev_sl = get_status_port_fn(pbpctl_dev);
if ((pbpctl_dev_sl->ndev) && (pbpctl_dev_sl->ndev->netdev_ops)) {
rtnl_lock();
if (pbpctl_dev->bp_self_test_flag == 1) {
pbpctl_dev_sl->old_ops =
pbpctl_dev_sl->ndev->netdev_ops;
pbpctl_dev_sl->new_ops =
*pbpctl_dev_sl->old_ops;
pbpctl_dev_sl->new_ops.ndo_start_xmit =
bp_hard_start_xmit;
pbpctl_dev_sl->ndev->netdev_ops =
&pbpctl_dev_sl->new_ops;
} else if (pbpctl_dev_sl->old_ops) {
pbpctl_dev_sl->ndev->netdev_ops =
pbpctl_dev_sl->old_ops;
pbpctl_dev_sl->old_ops = NULL;
}
rtnl_unlock();
}
set_bypass_wd_auto(pbpctl_dev, param);
return 0;
}
return BP_NOT_CAP;
}
int get_bp_self_test(bpctl_dev_t *pbpctl_dev)
{
if (pbpctl_dev->bp_caps & WD_CTL_CAP) {
if (pbpctl_dev->bp_self_test_flag == 1)
return pbpctl_dev->reset_time;
else
return 0;
}
return BP_NOT_CAP;
}
#endif
/**************************************************************/
/************************* API ********************************/
/**************************************************************/
int is_bypass_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return (((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) ? 1 : 0);
}
int set_bypass_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!(pbpctl_dev->bp_caps & BP_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (!bypass_mode)
ret = bypass_off(pbpctl_dev);
else
ret = bypass_on(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
}
int get_bypass_fn(bpctl_dev_t *pbpctl_dev)
{
return bypass_status(pbpctl_dev);
}
int get_bypass_change_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return bypass_change_status(pbpctl_dev);
}
int set_dis_bypass_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if (!(pbpctl_dev->bp_caps & BP_DIS_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (dis_param)
ret = dis_bypass_cap(pbpctl_dev);
else
ret = en_bypass_cap(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
}
int get_dis_bypass_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return dis_bypass_cap_status(pbpctl_dev);
}
int set_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if (!(pbpctl_dev->bp_caps & BP_PWOFF_CTL_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (bypass_mode)
ret = bypass_state_pwroff(pbpctl_dev);
else
ret = normal_state_pwroff(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
}
int get_bypass_pwoff_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return default_pwroff_status(pbpctl_dev);
}
int set_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev, int bypass_mode)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if (!(pbpctl_dev->bp_caps & BP_PWUP_CTL_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (bypass_mode)
ret = bypass_state_pwron(pbpctl_dev);
else
ret = normal_state_pwron(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
}
int get_bypass_pwup_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return default_pwron_status(pbpctl_dev);
}
int set_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int timeout)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if (!(pbpctl_dev->bp_caps & WD_CTL_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (!timeout)
ret = wdt_off(pbpctl_dev);
else {
wdt_on(pbpctl_dev, timeout);
ret = pbpctl_dev->bypass_timer_interval;
}
cmnd_off(pbpctl_dev);
return ret;
}
int get_bypass_wd_fn(bpctl_dev_t *pbpctl_dev, int *timeout)
{
if (!pbpctl_dev)
return -1;
return wdt_programmed(pbpctl_dev, timeout);
}
int get_wd_expire_time_fn(bpctl_dev_t *pbpctl_dev, int *time_left)
{
if (!pbpctl_dev)
return -1;
return wdt_timer(pbpctl_dev, time_left);
}
int reset_bypass_wd_timer_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return wdt_timer_reload(pbpctl_dev);
}
int get_wd_set_caps_fn(bpctl_dev_t *pbpctl_dev)
{
int bp_status = 0;
unsigned int step_value = TIMEOUT_MAX_STEP + 1, bit_cnt = 0;
if (!pbpctl_dev)
return -1;
if (INTEL_IF_SERIES(pbpctl_dev->subdevice))
return BP_NOT_CAP;
while ((step_value >>= 1))
bit_cnt++;
if (is_bypass_fn(pbpctl_dev)) {
bp_status =
WD_STEP_COUNT_MASK(bit_cnt) | WDT_STEP_TIME |
WD_MIN_TIME_MASK(TIMEOUT_UNIT / 100);
} else
return -1;
return bp_status;
}
int set_std_nic_fn(bpctl_dev_t *pbpctl_dev, int nic_mode)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if (!(pbpctl_dev->bp_caps & STD_NIC_CAP))
return BP_NOT_CAP;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
if (nic_mode)
ret = std_nic_on(pbpctl_dev);
else
ret = std_nic_off(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
}
int get_std_nic_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return std_nic_status(pbpctl_dev);
}
int set_tap_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & TAP_CAP) && ((cmnd_on(pbpctl_dev)) >= 0)) {
if (!tap_mode)
tap_off(pbpctl_dev);
else
tap_on(pbpctl_dev);
cmnd_off(pbpctl_dev);
return 0;
}
return BP_NOT_CAP;
}
int get_tap_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return tap_status(pbpctl_dev);
}
int set_tap_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & TAP_PWUP_CTL_CAP)
&& ((cmnd_on(pbpctl_dev)) >= 0)) {
if (tap_mode)
ret = tap_state_pwron(pbpctl_dev);
else
ret = normal_state_pwron(pbpctl_dev);
cmnd_off(pbpctl_dev);
} else
ret = BP_NOT_CAP;
return ret;
}
int get_tap_pwup_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((ret = default_pwron_tap_status(pbpctl_dev)) < 0)
return ret;
return ((ret == 0) ? 1 : 0);
}
int get_tap_change_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return tap_change_status(pbpctl_dev);
}
int set_dis_tap_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & TAP_DIS_CAP) && ((cmnd_on(pbpctl_dev)) >= 0)) {
if (dis_param)
ret = dis_tap_cap(pbpctl_dev);
else
ret = en_tap_cap(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
} else
return BP_NOT_CAP;
}
int get_dis_tap_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return dis_tap_cap_status(pbpctl_dev);
}
int set_disc_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
{
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & DISC_CAP) && ((cmnd_on(pbpctl_dev)) >= 0)) {
if (!disc_mode)
disc_off(pbpctl_dev);
else
disc_on(pbpctl_dev);
cmnd_off(pbpctl_dev);
return BP_OK;
}
return BP_NOT_CAP;
}
int get_disc_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
ret = disc_status(pbpctl_dev);
return ret;
}
int set_disc_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & DISC_PWUP_CTL_CAP)
&& ((cmnd_on(pbpctl_dev)) >= 0)) {
if (disc_mode)
ret = disc_state_pwron(pbpctl_dev);
else
ret = normal_state_pwron(pbpctl_dev);
cmnd_off(pbpctl_dev);
} else
ret = BP_NOT_CAP;
return ret;
}
int get_disc_pwup_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
ret = default_pwron_disc_status(pbpctl_dev);
return (ret == 0 ? 1 : (ret < 0 ? BP_NOT_CAP : 0));
}
int get_disc_change_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
ret = disc_change_status(pbpctl_dev);
return ret;
}
int set_dis_disc_fn(bpctl_dev_t *pbpctl_dev, int dis_param)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & DISC_DIS_CAP)
&& ((cmnd_on(pbpctl_dev)) >= 0)) {
if (dis_param)
ret = dis_disc_cap(pbpctl_dev);
else
ret = en_disc_cap(pbpctl_dev);
cmnd_off(pbpctl_dev);
return ret;
} else
return BP_NOT_CAP;
}
int get_dis_disc_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
ret = dis_disc_cap_status(pbpctl_dev);
return ret;
}
int set_disc_port_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
return -1;
if (!disc_mode)
ret = disc_port_off(pbpctl_dev);
else
ret = disc_port_on(pbpctl_dev);
return ret;
}
int get_disc_port_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return disc_port_status(pbpctl_dev);
}
int set_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev, int disc_mode)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
return -1;
if (!disc_mode)
ret = normal_port_state_pwron(pbpctl_dev);
else
ret = disc_port_state_pwron(pbpctl_dev);
return ret;
}
int get_disc_port_pwup_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((ret = default_pwron_disc_port_status(pbpctl_dev)) < 0)
return ret;
return ((ret == 0) ? 1 : 0);
}
int get_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return wdt_exp_mode_status(pbpctl_dev);
}
int set_wd_exp_mode_fn(bpctl_dev_t *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
return wdt_exp_mode(pbpctl_dev, param);
}
int reset_cont_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
if ((ret = cmnd_on(pbpctl_dev)) < 0)
return ret;
return reset_cont(pbpctl_dev);
}
int set_tx_fn(bpctl_dev_t *pbpctl_dev, int tx_state)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & TPL_CAP) &&
(pbpctl_dev->bp_caps & SW_CTL_CAP)) {
if ((pbpctl_dev->bp_tpl_flag))
return BP_NOT_CAP;
} else if ((pbpctl_dev_b = get_master_port_fn(pbpctl_dev))) {
if ((pbpctl_dev_b->bp_caps & TPL_CAP) &&
(pbpctl_dev_b->bp_tpl_flag))
return BP_NOT_CAP;
}
return set_tx(pbpctl_dev, tx_state);
}
int set_bp_force_link_fn(int dev_num, int tx_state)
{
static bpctl_dev_t *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
return -1;
bpctl_dev_curr = &bpctl_dev_arr[dev_num];
return set_bp_force_link(bpctl_dev_curr, tx_state);
}
int set_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
return set_bypass_wd_auto(pbpctl_dev, param);
}
int get_wd_autoreset_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return get_bypass_wd_auto(pbpctl_dev);
}
#ifdef BP_SELF_TEST
int set_bp_self_test_fn(bpctl_dev_t *pbpctl_dev, int param)
{
if (!pbpctl_dev)
return -1;
return set_bp_self_test(pbpctl_dev, param);
}
int get_bp_self_test_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return get_bp_self_test(pbpctl_dev);
}
#endif
int get_bypass_caps_fn(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
return pbpctl_dev->bp_caps;
}
int get_bypass_slave_fn(bpctl_dev_t *pbpctl_dev, bpctl_dev_t **pbpctl_dev_out)
{
int idx_dev = 0;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2)) {
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL)
&& (idx_dev < device_num)); idx_dev++) {
if ((bpctl_dev_arr[idx_dev].bus == pbpctl_dev->bus)
&& (bpctl_dev_arr[idx_dev].slot ==
pbpctl_dev->slot)) {
if ((pbpctl_dev->func == 0)
&& (bpctl_dev_arr[idx_dev].func == 1)) {
*pbpctl_dev_out =
&bpctl_dev_arr[idx_dev];
return 1;
}
if ((pbpctl_dev->func == 2) &&
(bpctl_dev_arr[idx_dev].func == 3)) {
*pbpctl_dev_out =
&bpctl_dev_arr[idx_dev];
return 1;
}
}
}
return -1;
} else
return 0;
}
int is_bypass(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->func == 0) || (pbpctl_dev->func == 2))
return 1;
else
return 0;
}
int get_tx_fn(bpctl_dev_t *pbpctl_dev)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
if ((pbpctl_dev->bp_caps & TPL_CAP) &&
(pbpctl_dev->bp_caps & SW_CTL_CAP)) {
if ((pbpctl_dev->bp_tpl_flag))
return BP_NOT_CAP;
} else if ((pbpctl_dev_b = get_master_port_fn(pbpctl_dev))) {
if ((pbpctl_dev_b->bp_caps & TPL_CAP) &&
(pbpctl_dev_b->bp_tpl_flag))
return BP_NOT_CAP;
}
return tx_status(pbpctl_dev);
}
int get_bp_force_link_fn(int dev_num)
{
static bpctl_dev_t *bpctl_dev_curr;
if ((dev_num < 0) || (dev_num > device_num)
|| (bpctl_dev_arr[dev_num].pdev == NULL))
return -1;
bpctl_dev_curr = &bpctl_dev_arr[dev_num];
return bp_force_link_status(bpctl_dev_curr);
}
static int get_bypass_link_status(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->media_type == bp_fiber)
return ((BPCTL_READ_REG(pbpctl_dev, CTRL) &
BPCTLI_CTRL_SWDPIN1));
else
return ((BPCTL_READ_REG(pbpctl_dev, STATUS) &
BPCTLI_STATUS_LU));
}
static void bp_tpl_timer_fn(unsigned long param)
{
bpctl_dev_t *pbpctl_dev = (bpctl_dev_t *) param;
uint32_t link1, link2;
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!(pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
return;
if (!pbpctl_dev->bp_tpl_flag) {
set_tx(pbpctl_dev_b, 1);
set_tx(pbpctl_dev, 1);
return;
}
link1 = get_bypass_link_status(pbpctl_dev);
link2 = get_bypass_link_status(pbpctl_dev_b);
if ((link1) && (tx_status(pbpctl_dev))) {
if ((!link2) && (tx_status(pbpctl_dev_b))) {
set_tx(pbpctl_dev, 0);
} else if (!tx_status(pbpctl_dev_b)) {
set_tx(pbpctl_dev_b, 1);
}
} else if ((!link1) && (tx_status(pbpctl_dev))) {
if ((link2) && (tx_status(pbpctl_dev_b))) {
set_tx(pbpctl_dev_b, 0);
}
} else if ((link1) && (!tx_status(pbpctl_dev))) {
if ((link2) && (tx_status(pbpctl_dev_b))) {
set_tx(pbpctl_dev, 1);
}
} else if ((!link1) && (!tx_status(pbpctl_dev))) {
if ((link2) && (tx_status(pbpctl_dev_b))) {
set_tx(pbpctl_dev, 1);
}
}
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + BP_LINK_MON_DELAY * HZ);
}
void remove_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (pbpctl_dev->bp_caps & TPL_CAP) {
del_timer_sync(&pbpctl_dev->bp_tpl_timer);
pbpctl_dev->bp_tpl_flag = 0;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (pbpctl_dev_b)
set_tx(pbpctl_dev_b, 1);
set_tx(pbpctl_dev, 1);
}
return;
}
int init_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & TPL_CAP) {
init_timer(&pbpctl_dev->bp_tpl_timer);
pbpctl_dev->bp_tpl_timer.function = &bp_tpl_timer_fn;
pbpctl_dev->bp_tpl_timer.data = (unsigned long)pbpctl_dev;
return BP_OK;
}
return BP_NOT_CAP;
}
int set_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev, unsigned int param)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & TPL_CAP) {
if ((param) && (!pbpctl_dev->bp_tpl_flag)) {
pbpctl_dev->bp_tpl_flag = param;
mod_timer(&pbpctl_dev->bp_tpl_timer, jiffies + 1);
return BP_OK;
};
if ((!param) && (pbpctl_dev->bp_tpl_flag))
remove_bypass_tpl_auto(pbpctl_dev);
return BP_OK;
}
return BP_NOT_CAP;
}
int get_bypass_tpl_auto(bpctl_dev_t *pbpctl_dev)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & TPL_CAP) {
return pbpctl_dev->bp_tpl_flag;
}
return BP_NOT_CAP;
}
int set_tpl_fn(bpctl_dev_t *pbpctl_dev, int tpl_mode)
{
bpctl_dev_t *pbpctl_dev_b = NULL;
if (!pbpctl_dev)
return -1;
pbpctl_dev_b = get_status_port_fn(pbpctl_dev);
if (pbpctl_dev->bp_caps & TPL_CAP) {
if (tpl_mode) {
if ((pbpctl_dev_b = get_status_port_fn(pbpctl_dev)))
set_tx(pbpctl_dev_b, 1);
set_tx(pbpctl_dev, 1);
}
if ((TPL_IF_SERIES(pbpctl_dev->subdevice)) ||
(pbpctl_dev->bp_caps_ex & TPL2_CAP_EX)) {
pbpctl_dev->bp_tpl_flag = tpl_mode;
if (!tpl_mode)
tpl_hw_off(pbpctl_dev);
else
tpl_hw_on(pbpctl_dev);
} else
set_bypass_tpl_auto(pbpctl_dev, tpl_mode);
return 0;
}
return BP_NOT_CAP;
}
int get_tpl_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = BP_NOT_CAP;
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & TPL_CAP) {
if (pbpctl_dev->bp_caps_ex & TPL2_CAP_EX)
return tpl2_flag_status(pbpctl_dev);
ret = pbpctl_dev->bp_tpl_flag;
}
return ret;
}
int set_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
/* bp_lock(pbp_device_block); */
cmnd_on(pbpctl_dev);
if (!tap_mode)
bp_wait_at_pwup_dis(pbpctl_dev);
else
bp_wait_at_pwup_en(pbpctl_dev);
cmnd_off(pbpctl_dev);
/* bp_unlock(pbp_device_block); */
return BP_OK;
}
return BP_NOT_CAP;
}
int get_bp_wait_at_pwup_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
/* bp_lock(pbp_device_block); */
ret = bp_wait_at_pwup_status(pbpctl_dev);
/* bp_unlock(pbp_device_block); */
return ret;
}
int set_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev, int tap_mode)
{
if (!pbpctl_dev)
return -1;
if (pbpctl_dev->bp_caps & SW_CTL_CAP) {
/* bp_lock(pbp_device_block); */
cmnd_on(pbpctl_dev);
if (!tap_mode)
bp_hw_reset_dis(pbpctl_dev);
else
bp_hw_reset_en(pbpctl_dev);
cmnd_off(pbpctl_dev);
/* bp_unlock(pbp_device_block); */
return BP_OK;
}
return BP_NOT_CAP;
}
int get_bp_hw_reset_fn(bpctl_dev_t *pbpctl_dev)
{
int ret = 0;
if (!pbpctl_dev)
return -1;
/* bp_lock(pbp_device_block); */
ret = bp_hw_reset_status(pbpctl_dev);
/* bp_unlock(pbp_device_block); */
return ret;
}
int get_bypass_info_fn(bpctl_dev_t *pbpctl_dev, char *dev_name,
char *add_param)
{
if (!pbpctl_dev)
return -1;
if (!is_bypass_fn(pbpctl_dev))
return -1;
strcpy(dev_name, pbpctl_dev->name);
*add_param = pbpctl_dev->bp_fw_ver;
return 0;
}
int get_dev_idx_bsf(int bus, int slot, int func)
{
int idx_dev = 0;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL) && (idx_dev < device_num));
idx_dev++) {
if ((bus == bpctl_dev_arr[idx_dev].bus)
&& (slot == bpctl_dev_arr[idx_dev].slot)
&& (func == bpctl_dev_arr[idx_dev].func))
return idx_dev;
}
return -1;
}
static void str_low(char *str)
{
int i;
for (i = 0; i < strlen(str); i++)
if ((str[i] >= 65) && (str[i] <= 90))
str[i] += 32;
}
static unsigned long str_to_hex(char *p)
{
unsigned long hex = 0;
unsigned long length = strlen(p), shift = 0;
unsigned char dig = 0;
str_low(p);
length = strlen(p);
if (length == 0)
return 0;
do {
dig = p[--length];
dig = dig < 'a' ? (dig - '0') : (dig - 'a' + 0xa);
hex |= (dig << shift);
shift += 4;
} while (length);
return hex;
}
static int get_dev_idx(int ifindex)
{
int idx_dev = 0;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL) && (idx_dev < device_num));
idx_dev++) {
if (ifindex == bpctl_dev_arr[idx_dev].ifindex)
return idx_dev;
}
return -1;
}
static bpctl_dev_t *get_dev_idx_p(int ifindex)
{
int idx_dev = 0;
for (idx_dev = 0;
((bpctl_dev_arr[idx_dev].pdev != NULL) && (idx_dev < device_num));
idx_dev++) {
if (ifindex == bpctl_dev_arr[idx_dev].ifindex)
return &bpctl_dev_arr[idx_dev];
}
return NULL;
}
static void if_scan_init(void)
{
int idx_dev = 0;
struct net_device *dev;
int ifindex;
/* rcu_read_lock(); */
/* rtnl_lock(); */
/* rcu_read_lock(); */
for_each_netdev(&init_net, dev) {
struct ethtool_drvinfo drvinfo;
char cbuf[32];
char *buf = NULL;
char res[10];
int i = 0;
int bus = 0, slot = 0, func = 0;
ifindex = dev->ifindex;
memset(res, 0, 10);
memset(&drvinfo, 0, sizeof(struct ethtool_drvinfo));
if (dev->ethtool_ops && dev->ethtool_ops->get_drvinfo) {
memset(&drvinfo, 0, sizeof(drvinfo));
dev->ethtool_ops->get_drvinfo(dev, &drvinfo);
} else
continue;
if (!strcmp(drvinfo.bus_info, "N/A"))
continue;
memcpy(&cbuf, drvinfo.bus_info, 32);
buf = &cbuf[0];
while (*buf++ != ':') ;
for (i = 0; i < 10; i++, buf++) {
if (*buf == ':')
break;
res[i] = *buf;
}
buf++;
bus = str_to_hex(res);
memset(res, 0, 10);
for (i = 0; i < 10; i++, buf++) {
if (*buf == '.')
break;
res[i] = *buf;
}
buf++;
slot = str_to_hex(res);
func = str_to_hex(buf);
idx_dev = get_dev_idx_bsf(bus, slot, func);
if (idx_dev != -1) {
bpctl_dev_arr[idx_dev].ifindex = ifindex;
bpctl_dev_arr[idx_dev].ndev = dev;
}
}
/* rtnl_unlock(); */
/* rcu_read_unlock(); */
}
static long device_ioctl(struct file *file, /* see include/linux/fs.h */
unsigned int ioctl_num, /* number and param for ioctl */
unsigned long ioctl_param)
{
struct bpctl_cmd bpctl_cmd;
int dev_idx = 0;
bpctl_dev_t *pbpctl_dev_out;
void __user *argp = (void __user *)ioctl_param;
int ret = 0;
unsigned long flags;
static bpctl_dev_t *pbpctl_dev;
/* lock_kernel(); */
lock_bpctl();
/* local_irq_save(flags); */
/* if(!spin_trylock_irqsave(&bpvm_lock)){
local_irq_restore(flags);
unlock_bpctl();
unlock_kernel();
return -1;
} */
/* spin_lock_irqsave(&bpvm_lock, flags); */
/*
* Switch according to the ioctl called
*/
if (ioctl_num == IOCTL_TX_MSG(IF_SCAN)) {
if_scan_init();
ret = SUCCESS;
goto bp_exit;
}
if (copy_from_user(&bpctl_cmd, argp, sizeof(struct bpctl_cmd))) {
ret = -EFAULT;
goto bp_exit;
}
if (ioctl_num == IOCTL_TX_MSG(GET_DEV_NUM)) {
bpctl_cmd.out_param[0] = device_num;
if (copy_to_user
(argp, (void *)&bpctl_cmd, sizeof(struct bpctl_cmd))) {
ret = -EFAULT;
goto bp_exit;
}
ret = SUCCESS;
goto bp_exit;
}
/* lock_bpctl(); */
/* preempt_disable(); */
local_irq_save(flags);
if (!spin_trylock(&bpvm_lock)) {
local_irq_restore(flags);
unlock_bpctl();
return -1;
}
/* preempt_disable();
rcu_read_lock();
spin_lock_irqsave(&bpvm_lock, flags);
*/
if ((bpctl_cmd.in_param[5]) ||
(bpctl_cmd.in_param[6]) || (bpctl_cmd.in_param[7]))
dev_idx = get_dev_idx_bsf(bpctl_cmd.in_param[5],
bpctl_cmd.in_param[6],
bpctl_cmd.in_param[7]);
else if (bpctl_cmd.in_param[1] == 0)
dev_idx = bpctl_cmd.in_param[0];
else
dev_idx = get_dev_idx(bpctl_cmd.in_param[1]);
if (dev_idx < 0 || dev_idx > device_num) {
/* unlock_bpctl();
preempt_enable(); */
ret = -EOPNOTSUPP;
/* preempt_enable();
rcu_read_unlock(); */
spin_unlock_irqrestore(&bpvm_lock, flags);
goto bp_exit;
}
bpctl_cmd.out_param[0] = bpctl_dev_arr[dev_idx].bus;
bpctl_cmd.out_param[1] = bpctl_dev_arr[dev_idx].slot;
bpctl_cmd.out_param[2] = bpctl_dev_arr[dev_idx].func;
bpctl_cmd.out_param[3] = bpctl_dev_arr[dev_idx].ifindex;
if ((bpctl_dev_arr[dev_idx].bp_10gb)
&& (!(bpctl_dev_arr[dev_idx].ifindex))) {
printk("Please load network driver for %s adapter!\n",
bpctl_dev_arr[dev_idx].name);
bpctl_cmd.status = -1;
ret = SUCCESS;
/* preempt_enable(); */
/* rcu_read_unlock(); */
spin_unlock_irqrestore(&bpvm_lock, flags);
goto bp_exit;
}
if ((bpctl_dev_arr[dev_idx].bp_10gb) && (bpctl_dev_arr[dev_idx].ndev)) {
if (!(bpctl_dev_arr[dev_idx].ndev->flags & IFF_UP)) {
if (!(bpctl_dev_arr[dev_idx].ndev->flags & IFF_UP)) {
printk
("Please bring up network interfaces for %s adapter!\n",
bpctl_dev_arr[dev_idx].name);
bpctl_cmd.status = -1;
ret = SUCCESS;
/* preempt_enable(); */
/* rcu_read_unlock(); */
spin_unlock_irqrestore(&bpvm_lock, flags);
goto bp_exit;
}
}
}
if ((dev_idx < 0) || (dev_idx > device_num)
|| (bpctl_dev_arr[dev_idx].pdev == NULL)) {
bpctl_cmd.status = -1;
goto bpcmd_exit;
}
pbpctl_dev = &bpctl_dev_arr[dev_idx];
switch (ioctl_num) {
case IOCTL_TX_MSG(SET_BYPASS_PWOFF):
bpctl_cmd.status =
set_bypass_pwoff_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BYPASS_PWOFF):
bpctl_cmd.status = get_bypass_pwoff_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_BYPASS_PWUP):
bpctl_cmd.status =
set_bypass_pwup_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BYPASS_PWUP):
bpctl_cmd.status = get_bypass_pwup_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_BYPASS_WD):
bpctl_cmd.status =
set_bypass_wd_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BYPASS_WD):
bpctl_cmd.status =
get_bypass_wd_fn(pbpctl_dev, (int *)&(bpctl_cmd.data[0]));
break;
case IOCTL_TX_MSG(GET_WD_EXPIRE_TIME):
bpctl_cmd.status =
get_wd_expire_time_fn(pbpctl_dev,
(int *)&(bpctl_cmd.data[0]));
break;
case IOCTL_TX_MSG(RESET_BYPASS_WD_TIMER):
bpctl_cmd.status = reset_bypass_wd_timer_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_WD_SET_CAPS):
bpctl_cmd.status = get_wd_set_caps_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_STD_NIC):
bpctl_cmd.status =
set_std_nic_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_STD_NIC):
bpctl_cmd.status = get_std_nic_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_TAP):
bpctl_cmd.status =
set_tap_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_TAP):
bpctl_cmd.status = get_tap_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_TAP_CHANGE):
bpctl_cmd.status = get_tap_change_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DIS_TAP):
bpctl_cmd.status =
set_dis_tap_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DIS_TAP):
bpctl_cmd.status = get_dis_tap_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_TAP_PWUP):
bpctl_cmd.status =
set_tap_pwup_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_TAP_PWUP):
bpctl_cmd.status = get_tap_pwup_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_WD_EXP_MODE):
bpctl_cmd.status =
set_wd_exp_mode_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_WD_EXP_MODE):
bpctl_cmd.status = get_wd_exp_mode_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_DIS_BYPASS):
bpctl_cmd.status = get_dis_bypass_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DIS_BYPASS):
bpctl_cmd.status =
set_dis_bypass_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BYPASS_CHANGE):
bpctl_cmd.status = get_bypass_change_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_BYPASS):
bpctl_cmd.status = get_bypass_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_BYPASS):
bpctl_cmd.status =
set_bypass_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BYPASS_CAPS):
bpctl_cmd.status = get_bypass_caps_fn(pbpctl_dev);
/*preempt_enable(); */
/*rcu_read_unlock();*/
spin_unlock_irqrestore(&bpvm_lock, flags);
if (copy_to_user
(argp, (void *)&bpctl_cmd, sizeof(struct bpctl_cmd))) {
/*unlock_bpctl(); */
/*preempt_enable(); */
ret = -EFAULT;
goto bp_exit;
}
goto bp_exit;
case IOCTL_TX_MSG(GET_BYPASS_SLAVE):
bpctl_cmd.status =
get_bypass_slave_fn(pbpctl_dev, &pbpctl_dev_out);
if (bpctl_cmd.status == 1) {
bpctl_cmd.out_param[4] = pbpctl_dev_out->bus;
bpctl_cmd.out_param[5] = pbpctl_dev_out->slot;
bpctl_cmd.out_param[6] = pbpctl_dev_out->func;
bpctl_cmd.out_param[7] = pbpctl_dev_out->ifindex;
}
break;
case IOCTL_TX_MSG(IS_BYPASS):
bpctl_cmd.status = is_bypass(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_TX):
bpctl_cmd.status = set_tx_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_TX):
bpctl_cmd.status = get_tx_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_WD_AUTORESET):
bpctl_cmd.status =
set_wd_autoreset_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_WD_AUTORESET):
bpctl_cmd.status = get_wd_autoreset_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DISC):
bpctl_cmd.status =
set_disc_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DISC):
bpctl_cmd.status = get_disc_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_DISC_CHANGE):
bpctl_cmd.status = get_disc_change_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DIS_DISC):
bpctl_cmd.status =
set_dis_disc_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DIS_DISC):
bpctl_cmd.status = get_dis_disc_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DISC_PWUP):
bpctl_cmd.status =
set_disc_pwup_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DISC_PWUP):
bpctl_cmd.status = get_disc_pwup_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(GET_BYPASS_INFO):
bpctl_cmd.status =
get_bypass_info_fn(pbpctl_dev, (char *)&bpctl_cmd.data,
(char *)&bpctl_cmd.out_param[4]);
break;
case IOCTL_TX_MSG(SET_TPL):
bpctl_cmd.status =
set_tpl_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_TPL):
bpctl_cmd.status = get_tpl_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_BP_WAIT_AT_PWUP):
bpctl_cmd.status =
set_bp_wait_at_pwup_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BP_WAIT_AT_PWUP):
bpctl_cmd.status = get_bp_wait_at_pwup_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_BP_HW_RESET):
bpctl_cmd.status =
set_bp_hw_reset_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BP_HW_RESET):
bpctl_cmd.status = get_bp_hw_reset_fn(pbpctl_dev);
break;
#ifdef BP_SELF_TEST
case IOCTL_TX_MSG(SET_BP_SELF_TEST):
bpctl_cmd.status =
set_bp_self_test_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BP_SELF_TEST):
bpctl_cmd.status = get_bp_self_test_fn(pbpctl_dev);
break;
#endif
#if 0
case IOCTL_TX_MSG(SET_DISC_PORT):
bpctl_cmd.status =
set_disc_port_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DISC_PORT):
bpctl_cmd.status = get_disc_port_fn(pbpctl_dev);
break;
case IOCTL_TX_MSG(SET_DISC_PORT_PWUP):
bpctl_cmd.status =
set_disc_port_pwup_fn(pbpctl_dev, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_DISC_PORT_PWUP):
bpctl_cmd.status = get_disc_port_pwup_fn(pbpctl_dev);
break;
#endif
case IOCTL_TX_MSG(SET_BP_FORCE_LINK):
bpctl_cmd.status =
set_bp_force_link_fn(dev_idx, bpctl_cmd.in_param[2]);
break;
case IOCTL_TX_MSG(GET_BP_FORCE_LINK):
bpctl_cmd.status = get_bp_force_link_fn(dev_idx);
break;
default:
/* unlock_bpctl(); */
ret = -EOPNOTSUPP;
/* preempt_enable(); */
/* rcu_read_unlock();*/
spin_unlock_irqrestore(&bpvm_lock, flags);
goto bp_exit;
}
/* unlock_bpctl(); */
/* preempt_enable(); */
bpcmd_exit:
/* rcu_read_unlock(); */
spin_unlock_irqrestore(&bpvm_lock, flags);
if (copy_to_user(argp, (void *)&bpctl_cmd, sizeof(struct bpctl_cmd)))
ret = -EFAULT;
ret = SUCCESS;
bp_exit:
/* unlock_kernel(); */
/* spin_unlock_irqrestore(&bpvm_lock, flags); */
unlock_bpctl();
/* unlock_kernel(); */
return ret;
}
static const struct file_operations Fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = device_ioctl,
};
#ifndef PCI_DEVICE
#define PCI_DEVICE(vend,dev) \
.vendor = (vend), .device = (dev), \
.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
#endif
#define SILICOM_E1000BP_ETHERNET_DEVICE(device_id) {\
PCI_DEVICE(SILICOM_VID, device_id)}
typedef enum {
PXG2BPFI,
PXG2BPFIL,
PXG2BPFILX,
PXG2BPFILLX,
PXGBPI,
PXGBPIG,
PXG2TBFI,
PXG4BPI,
PXG4BPFI,
PEG4BPI,
PEG2BPI,
PEG4BPIN,
PEG2BPFI,
PEG2BPFILX,
PMCXG2BPFI,
PMCXG2BPFIN,
PEG4BPII,
PEG4BPFII,
PXG4BPFILX,
PMCXG2BPIN,
PMCXG4BPIN,
PXG2BISC1,
PEG2TBFI,
PXG2TBI,
PXG4BPFID,
PEG4BPFI,
PEG4BPIPT,
PXG6BPI,
PEG4BPIL,
PMCXG2BPIN2,
PMCXG4BPIN2,
PMCX2BPI,
PEG2BPFID,
PEG2BPFIDLX,
PMCX4BPI,
MEG2BPFILN,
MEG2BPFINX,
PEG4BPFILX,
PE10G2BPISR,
PE10G2BPILR,
MHIO8AD,
PE10G2BPICX4,
PEG2BPI5,
PEG6BPI,
PEG4BPFI5,
PEG4BPFI5LX,
MEG2BPFILXLN,
PEG2BPIX1,
MEG2BPFILXNX,
XE10G2BPIT,
XE10G2BPICX4,
XE10G2BPISR,
XE10G2BPILR,
PEG4BPIIO,
XE10G2BPIXR,
PE10GDBISR,
PE10GDBILR,
PEG2BISC6,
PEG6BPIFC,
PE10G2BPTCX4,
PE10G2BPTSR,
PE10G2BPTLR,
PE10G2BPTT,
PEG4BPI6,
PEG4BPFI6,
PEG4BPFI6LX,
PEG4BPFI6ZX,
PEG2BPI6,
PEG2BPFI6,
PEG2BPFI6LX,
PEG2BPFI6ZX,
PEG2BPFI6FLXM,
PEG4BPI6FC,
PEG4BPFI6FC,
PEG4BPFI6FCLX,
PEG4BPFI6FCZX,
PEG6BPI6,
PEG2BPI6SC6,
MEG2BPI6,
XEG2BPI6,
MEG4BPI6,
PEG2BPFI5,
PEG2BPFI5LX,
PXEG4BPFI,
M1EG2BPI6,
M1EG2BPFI6,
M1EG2BPFI6LX,
M1EG2BPFI6ZX,
M1EG4BPI6,
M1EG4BPFI6,
M1EG4BPFI6LX,
M1EG4BPFI6ZX,
M1EG6BPI6,
M1E2G4BPi80,
M1E2G4BPFi80,
M1E2G4BPFi80LX,
M1E2G4BPFi80ZX,
PE210G2SPI9,
M1E10G2BPI9CX4,
M1E10G2BPI9SR,
M1E10G2BPI9LR,
M1E10G2BPI9T,
PE210G2BPI9CX4,
PE210G2BPI9SR,
PE210G2BPI9LR,
PE210G2BPI9T,
M2EG2BPFI6,
M2EG2BPFI6LX,
M2EG2BPFI6ZX,
M2EG4BPI6,
M2EG4BPFI6,
M2EG4BPFI6LX,
M2EG4BPFI6ZX,
M2EG6BPI6,
PEG2DBI6,
PEG2DBFI6,
PEG2DBFI6LX,
PEG2DBFI6ZX,
PE2G4BPi80,
PE2G4BPFi80,
PE2G4BPFi80LX,
PE2G4BPFi80ZX,
PE2G4BPi80L,
M6E2G8BPi80A,
PE2G2BPi35,
PAC1200BPi35,
PE2G2BPFi35,
PE2G2BPFi35LX,
PE2G2BPFi35ZX,
PE2G4BPi35,
PE2G4BPi35L,
PE2G4BPFi35,
PE2G4BPFi35LX,
PE2G4BPFi35ZX,
PE2G6BPi35,
PE2G6BPi35CX,
PE2G2BPi80,
PE2G2BPFi80,
PE2G2BPFi80LX,
PE2G2BPFi80ZX,
M2E10G2BPI9CX4,
M2E10G2BPI9SR,
M2E10G2BPI9LR,
M2E10G2BPI9T,
M6E2G8BPi80,
PE210G2DBi9SR,
PE210G2DBi9SRRB,
PE210G2DBi9LR,
PE210G2DBi9LRRB,
PE310G4DBi940SR,
PE310G4BPi9T,
PE310G4BPi9SR,
PE310G4BPi9LR,
PE210G2BPi40,
} board_t;
typedef struct _bpmod_info_t {
unsigned int vendor;
unsigned int device;
unsigned int subvendor;
unsigned int subdevice;
unsigned int index;
char *bp_name;
} bpmod_info_t;
typedef struct _dev_desc {
char *name;
} dev_desc_t;
dev_desc_t dev_desc[] = {
{"Silicom Bypass PXG2BPFI-SD series adapter"},
{"Silicom Bypass PXG2BPFIL-SD series adapter"},
{"Silicom Bypass PXG2BPFILX-SD series adapter"},
{"Silicom Bypass PXG2BPFILLX-SD series adapter"},
{"Silicom Bypass PXG2BPI-SD series adapter"},
{"Silicom Bypass PXG2BPIG-SD series adapter"},
{"Silicom Bypass PXG2TBFI-SD series adapter"},
{"Silicom Bypass PXG4BPI-SD series adapter"},
{"Silicom Bypass PXG4BPFI-SD series adapter"},
{"Silicom Bypass PEG4BPI-SD series adapter"},
{"Silicom Bypass PEG2BPI-SD series adapter"},
{"Silicom Bypass PEG4BPIN-SD series adapter"},
{"Silicom Bypass PEG2BPFI-SD series adapter"},
{"Silicom Bypass PEG2BPFI-LX-SD series adapter"},
{"Silicom Bypass PMCX2BPFI-SD series adapter"},
{"Silicom Bypass PMCX2BPFI-N series adapter"},
{"Intel Bypass PEG2BPII series adapter"},
{"Intel Bypass PEG2BPFII series adapter"},
{"Silicom Bypass PXG4BPFILX-SD series adapter"},
{"Silicom Bypass PMCX2BPI-N series adapter"},
{"Silicom Bypass PMCX4BPI-N series adapter"},
{"Silicom Bypass PXG2BISC1-SD series adapter"},
{"Silicom Bypass PEG2TBFI-SD series adapter"},
{"Silicom Bypass PXG2TBI-SD series adapter"},
{"Silicom Bypass PXG4BPFID-SD series adapter"},
{"Silicom Bypass PEG4BPFI-SD series adapter"},
{"Silicom Bypass PEG4BPIPT-SD series adapter"},
{"Silicom Bypass PXG6BPI-SD series adapter"},
{"Silicom Bypass PEG4BPIL-SD series adapter"},
{"Silicom Bypass PMCX2BPI-N2 series adapter"},
{"Silicom Bypass PMCX4BPI-N2 series adapter"},
{"Silicom Bypass PMCX2BPI-SD series adapter"},
{"Silicom Bypass PEG2BPFID-SD series adapter"},
{"Silicom Bypass PEG2BPFIDLX-SD series adapter"},
{"Silicom Bypass PMCX4BPI-SD series adapter"},
{"Silicom Bypass MEG2BPFILN-SD series adapter"},
{"Silicom Bypass MEG2BPFINX-SD series adapter"},
{"Silicom Bypass PEG4BPFILX-SD series adapter"},
{"Silicom Bypass PE10G2BPISR-SD series adapter"},
{"Silicom Bypass PE10G2BPILR-SD series adapter"},
{"Silicom Bypass MHIO8AD-SD series adapter"},
{"Silicom Bypass PE10G2BPICX4-SD series adapter"},
{"Silicom Bypass PEG2BPI5-SD series adapter"},
{"Silicom Bypass PEG6BPI5-SD series adapter"},
{"Silicom Bypass PEG4BPFI5-SD series adapter"},
{"Silicom Bypass PEG4BPFI5LX-SD series adapter"},
{"Silicom Bypass MEG2BPFILXLN-SD series adapter"},
{"Silicom Bypass PEG2BPIX1-SD series adapter"},
{"Silicom Bypass MEG2BPFILXNX-SD series adapter"},
{"Silicom Bypass XE10G2BPIT-SD series adapter"},
{"Silicom Bypass XE10G2BPICX4-SD series adapter"},
{"Silicom Bypass XE10G2BPISR-SD series adapter"},
{"Silicom Bypass XE10G2BPILR-SD series adapter"},
{"Intel Bypass PEG2BPFII0 series adapter"},
{"Silicom Bypass XE10G2BPIXR series adapter"},
{"Silicom Bypass PE10G2DBISR series adapter"},
{"Silicom Bypass PEG2BI5SC6 series adapter"},
{"Silicom Bypass PEG6BPI5FC series adapter"},
{"Silicom Bypass PE10G2BPTCX4 series adapter"},
{"Silicom Bypass PE10G2BPTSR series adapter"},
{"Silicom Bypass PE10G2BPTLR series adapter"},
{"Silicom Bypass PE10G2BPTT series adapter"},
{"Silicom Bypass PEG4BPI6 series adapter"},
{"Silicom Bypass PEG4BPFI6 series adapter"},
{"Silicom Bypass PEG4BPFI6LX series adapter"},
{"Silicom Bypass PEG4BPFI6ZX series adapter"},
{"Silicom Bypass PEG2BPI6 series adapter"},
{"Silicom Bypass PEG2BPFI6 series adapter"},
{"Silicom Bypass PEG2BPFI6LX series adapter"},
{"Silicom Bypass PEG2BPFI6ZX series adapter"},
{"Silicom Bypass PEG2BPFI6FLXM series adapter"},
{"Silicom Bypass PEG4BPI6FC series adapter"},
{"Silicom Bypass PEG4BPFI6FC series adapter"},
{"Silicom Bypass PEG4BPFI6FCLX series adapter"},
{"Silicom Bypass PEG4BPFI6FCZX series adapter"},
{"Silicom Bypass PEG6BPI6 series adapter"},
{"Silicom Bypass PEG2BPI6SC6 series adapter"},
{"Silicom Bypass MEG2BPI6 series adapter"},
{"Silicom Bypass XEG2BPI6 series adapter"},
{"Silicom Bypass MEG4BPI6 series adapter"},
{"Silicom Bypass PEG2BPFI5-SD series adapter"},
{"Silicom Bypass PEG2BPFI5LX-SD series adapter"},
{"Silicom Bypass PXEG4BPFI-SD series adapter"},
{"Silicom Bypass MxEG2BPI6 series adapter"},
{"Silicom Bypass MxEG2BPFI6 series adapter"},
{"Silicom Bypass MxEG2BPFI6LX series adapter"},
{"Silicom Bypass MxEG2BPFI6ZX series adapter"},
{"Silicom Bypass MxEG4BPI6 series adapter"},
{"Silicom Bypass MxEG4BPFI6 series adapter"},
{"Silicom Bypass MxEG4BPFI6LX series adapter"},
{"Silicom Bypass MxEG4BPFI6ZX series adapter"},
{"Silicom Bypass MxEG6BPI6 series adapter"},
{"Silicom Bypass MxE2G4BPi80 series adapter"},
{"Silicom Bypass MxE2G4BPFi80 series adapter"},
{"Silicom Bypass MxE2G4BPFi80LX series adapter"},
{"Silicom Bypass MxE2G4BPFi80ZX series adapter"},
{"Silicom Bypass PE210G2SPI9 series adapter"},
{"Silicom Bypass MxE210G2BPI9CX4 series adapter"},
{"Silicom Bypass MxE210G2BPI9SR series adapter"},
{"Silicom Bypass MxE210G2BPI9LR series adapter"},
{"Silicom Bypass MxE210G2BPI9T series adapter"},
{"Silicom Bypass PE210G2BPI9CX4 series adapter"},
{"Silicom Bypass PE210G2BPI9SR series adapter"},
{"Silicom Bypass PE210G2BPI9LR series adapter"},
{"Silicom Bypass PE210G2BPI9T series adapter"},
{"Silicom Bypass M2EG2BPFI6 series adapter"},
{"Silicom Bypass M2EG2BPFI6LX series adapter"},
{"Silicom Bypass M2EG2BPFI6ZX series adapter"},
{"Silicom Bypass M2EG4BPI6 series adapter"},
{"Silicom Bypass M2EG4BPFI6 series adapter"},
{"Silicom Bypass M2EG4BPFI6LX series adapter"},
{"Silicom Bypass M2EG4BPFI6ZX series adapter"},
{"Silicom Bypass M2EG6BPI6 series adapter"},
{"Silicom Bypass PEG2DBI6 series adapter"},
{"Silicom Bypass PEG2DBFI6 series adapter"},
{"Silicom Bypass PEG2DBFI6LX series adapter"},
{"Silicom Bypass PEG2DBFI6ZX series adapter"},
{"Silicom Bypass PE2G4BPi80 series adapter"},
{"Silicom Bypass PE2G4BPFi80 series adapter"},
{"Silicom Bypass PE2G4BPFi80LX series adapter"},
{"Silicom Bypass PE2G4BPFi80ZX series adapter"},
{"Silicom Bypass PE2G4BPi80L series adapter"},
{"Silicom Bypass MxE2G8BPi80A series adapter"},
{"Silicom Bypass PE2G2BPi35 series adapter"},
{"Silicom Bypass PAC1200BPi35 series adapter"},
{"Silicom Bypass PE2G2BPFi35 series adapter"},
{"Silicom Bypass PE2G2BPFi35LX series adapter"},
{"Silicom Bypass PE2G2BPFi35ZX series adapter"},
{"Silicom Bypass PE2G4BPi35 series adapter"},
{"Silicom Bypass PE2G4BPi35L series adapter"},
{"Silicom Bypass PE2G4BPFi35 series adapter"},
{"Silicom Bypass PE2G4BPFi35LX series adapter"},
{"Silicom Bypass PE2G4BPFi35ZX series adapter"},
{"Silicom Bypass PE2G6BPi35 series adapter"},
{"Silicom Bypass PE2G6BPi35CX series adapter"},
{"Silicom Bypass PE2G2BPi80 series adapter"},
{"Silicom Bypass PE2G2BPFi80 series adapter"},
{"Silicom Bypass PE2G2BPFi80LX series adapter"},
{"Silicom Bypass PE2G2BPFi80ZX series adapter"},
{"Silicom Bypass M2E10G2BPI9CX4 series adapter"},
{"Silicom Bypass M2E10G2BPI9SR series adapter"},
{"Silicom Bypass M2E10G2BPI9LR series adapter"},
{"Silicom Bypass M2E10G2BPI9T series adapter"},
{"Silicom Bypass MxE2G8BPi80 series adapter"},
{"Silicom Bypass PE210G2DBi9SR series adapter"},
{"Silicom Bypass PE210G2DBi9SRRB series adapter"},
{"Silicom Bypass PE210G2DBi9LR series adapter"},
{"Silicom Bypass PE210G2DBi9LRRB series adapter"},
{"Silicom Bypass PE310G4DBi9-SR series adapter"},
{"Silicom Bypass PE310G4BPi9T series adapter"},
{"Silicom Bypass PE310G4BPi9SR series adapter"},
{"Silicom Bypass PE310G4BPi9LR series adapter"},
{"Silicom Bypass PE210G2BPi40T series adapter"},
{0},
};
static bpmod_info_t tx_ctl_pci_tbl[] = {
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFI_SSID, PXG2BPFI,
"PXG2BPFI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFIL_SSID, PXG2BPFIL,
"PXG2BPFIL-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFILX_SSID, PXG2BPFILX,
"PXG2BPFILX-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2BPFILLX_SSID, PXG2BPFILLX,
"PXG2BPFILLXSD"},
{0x8086, 0x1010, SILICOM_SVID, SILICOM_PXGBPI_SSID, PXGBPI,
"PXG2BPI-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PXGBPIG_SSID, PXGBPIG,
"PXG2BPIG-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG2TBFI_SSID, PXG2TBFI,
"PXG2TBFI-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PXG4BPI_SSID, PXG4BPI,
"PXG4BPI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG4BPFI_SSID, PXG4BPFI,
"PXG4BPFI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG4BPFILX_SSID, PXG4BPFILX,
"PXG4BPFILX-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PEG4BPI_SSID, PEG4BPI,
"PEXG4BPI-SD"},
{0x8086, 0x105e, SILICOM_SVID, SILICOM_PEG2BPI_SSID, PEG2BPI,
"PEG2BPI-SD"},
{0x8086, 0x105e, SILICOM_SVID, SILICOM_PEG4BPIN_SSID, PEG4BPIN,
"PEG4BPI-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG2BPFI_SSID, PEG2BPFI,
"PEG2BPFI-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG2BPFILX_SSID, PEG2BPFILX,
"PEG2BPFILX-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PMCXG2BPFI_SSID, PMCXG2BPFI,
"PMCX2BPFI-SD"},
{0x8086, 0x107a, NOKIA_PMCXG2BPFIN_SVID, NOKIA_PMCXG2BPFIN_SSID,
PMCXG2BPFIN, "PMCX2BPFI-N"},
{0x8086, INTEL_PEG4BPII_PID, 0x8086, INTEL_PEG4BPII_SSID, PEG4BPII,
"PEG4BPII"},
{0x8086, INTEL_PEG4BPIIO_PID, 0x8086, INTEL_PEG4BPIIO_SSID, PEG4BPIIO,
"PEG4BPII0"},
{0x8086, INTEL_PEG4BPFII_PID, 0x8086, INTEL_PEG4BPFII_SSID, PEG4BPFII,
"PEG4BPFII"},
{0x8086, 0x1079, NOKIA_PMCXG2BPFIN_SVID, NOKIA_PMCXG2BPIN_SSID,
PMCXG2BPIN, "PMCX2BPI-N"},
{0x8086, 0x1079, NOKIA_PMCXG2BPFIN_SVID, NOKIA_PMCXG4BPIN_SSID,
PMCXG4BPIN, "PMCX4BPI-N"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PXG2BISC1_SSID, PXG2BISC1,
"PXG2BISC1-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG2TBFI_SSID, PEG2TBFI,
"PEG2TBFI-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PXG2TBI_SSID, PXG2TBI,
"PXG2TBI-SD"},
{0x8086, 0x107a, SILICOM_SVID, SILICOM_PXG4BPFID_SSID, PXG4BPFID,
"PXG4BPFID-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG4BPFI_SSID, PEG4BPFI,
"PEG4BPFI-SD"},
{0x8086, 0x105e, SILICOM_SVID, SILICOM_PEG4BPIPT_SSID, PEG4BPIPT,
"PEG4BPIPT-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PXG6BPI_SSID, PXG6BPI,
"PXG6BPI-SD"},
{0x8086, 0x10a7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPIL_SSID /*PCI_ANY_ID */ , PEG4BPIL, "PEG4BPIL-SD"},
{0x8086, 0x1079, NOKIA_PMCXG2BPFIN_SVID, NOKIA_PMCXG2BPIN2_SSID,
PMCXG2BPIN2, "PMCX2BPI-N2"},
{0x8086, 0x1079, NOKIA_PMCXG2BPFIN_SVID, NOKIA_PMCXG4BPIN2_SSID,
PMCXG4BPIN2, "PMCX4BPI-N2"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PMCX2BPI_SSID, PMCX2BPI,
"PMCX2BPI-SD"},
{0x8086, 0x1079, SILICOM_SVID, SILICOM_PMCX4BPI_SSID, PMCX4BPI,
"PMCX4BPI-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG2BPFID_SSID, PEG2BPFID,
"PEG2BPFID-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG2BPFIDLX_SSID, PEG2BPFIDLX,
"PEG2BPFIDLXSD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_MEG2BPFILN_SSID, MEG2BPFILN,
"MEG2BPFILN-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_MEG2BPFINX_SSID, MEG2BPFINX,
"MEG2BPFINX-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PEG4BPFILX_SSID, PEG4BPFILX,
"PEG4BPFILX-SD"},
{0x8086, PCI_ANY_ID, SILICOM_SVID, SILICOM_PE10G2BPISR_SSID,
PE10G2BPISR, "PE10G2BPISR"},
{0x8086, PCI_ANY_ID, SILICOM_SVID, SILICOM_PE10G2BPILR_SSID,
PE10G2BPILR, "PE10G2BPILR"},
{0x8086, 0x10a9, SILICOM_SVID, SILICOM_MHIO8AD_SSID, MHIO8AD,
"MHIO8AD-SD"},
{0x8086, PCI_ANY_ID, SILICOM_SVID, SILICOM_PE10G2BPICX4_SSID,
PE10G2BPISR, "PE10G2BPICX4"},
{0x8086, 0x10a7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPI5_SSID /*PCI_ANY_ID */ , PEG2BPI5, "PEG2BPI5-SD"},
{0x8086, 0x10a7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG6BPI_SSID /*PCI_ANY_ID */ , PEG6BPI, "PEG6BPI5"},
{0x8086, 0x10a9, SILICOM_SVID /*PCI_ANY_ID */ , SILICOM_PEG4BPFI5_SSID,
PEG4BPFI5, "PEG4BPFI5"},
{0x8086, 0x10a9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI5LX_SSID, PEG4BPFI5LX, "PEG4BPFI5LX"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_MEG2BPFILXLN_SSID, MEG2BPFILXLN,
"MEG2BPFILXLN"},
{0x8086, 0x105e, SILICOM_SVID, SILICOM_PEG2BPIX1_SSID, PEG2BPIX1,
"PEG2BPIX1-SD"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_MEG2BPFILXNX_SSID, MEG2BPFILXNX,
"MEG2BPFILXNX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID, SILICOM_XE10G2BPIT_SSID, XE10G2BPIT,
"XE10G2BPIT"},
{0x8086, PCI_ANY_ID, SILICOM_SVID, SILICOM_XE10G2BPICX4_SSID,
XE10G2BPICX4, "XE10G2BPICX4"},
{0x8086, 0x10C6, SILICOM_SVID, SILICOM_XE10G2BPISR_SSID, XE10G2BPISR,
"XE10G2BPISR"},
{0x8086, 0x10C6, SILICOM_SVID, SILICOM_XE10G2BPILR_SSID, XE10G2BPILR,
"XE10G2BPILR"},
{0x8086, 0x10C6, NOKIA_XE10G2BPIXR_SVID, NOKIA_XE10G2BPIXR_SSID,
XE10G2BPIXR, "XE10G2BPIXR"},
{0x8086, 0x10C6, SILICOM_SVID, SILICOM_PE10GDBISR_SSID, PE10GDBISR,
"PE10G2DBISR"},
{0x8086, 0x10C6, SILICOM_SVID, SILICOM_PE10GDBILR_SSID, PE10GDBILR,
"PE10G2DBILR"},
{0x8086, 0x10a7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BISC6_SSID /*PCI_ANY_ID */ , PEG2BISC6, "PEG2BI5SC6"},
{0x8086, 0x10a7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG6BPIFC_SSID /*PCI_ANY_ID */ , PEG6BPIFC, "PEG6BPI5FC"},
{BROADCOM_VID, BROADCOM_PE10G2_PID, SILICOM_SVID,
SILICOM_PE10G2BPTCX4_SSID, PE10G2BPTCX4, "PE10G2BPTCX4"},
{BROADCOM_VID, BROADCOM_PE10G2_PID, SILICOM_SVID,
SILICOM_PE10G2BPTSR_SSID, PE10G2BPTSR, "PE10G2BPTSR"},
{BROADCOM_VID, BROADCOM_PE10G2_PID, SILICOM_SVID,
SILICOM_PE10G2BPTLR_SSID, PE10G2BPTLR, "PE10G2BPTLR"},
{BROADCOM_VID, BROADCOM_PE10G2_PID, SILICOM_SVID,
SILICOM_PE10G2BPTT_SSID, PE10G2BPTT, "PE10G2BPTT"},
/* {BROADCOM_VID, BROADCOM_PE10G2_PID, PCI_ANY_ID, PCI_ANY_ID, PE10G2BPTCX4, "PE10G2BPTCX4"}, */
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPI6_SSID /*PCI_ANY_ID */ , PEG4BPI6, "PEG4BPI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6_SSID /*PCI_ANY_ID */ , PEG4BPFI6, "PEG4BPFI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6LX_SSID /*PCI_ANY_ID */ , PEG4BPFI6LX, "PEG4BPFI6LX"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6ZX_SSID /*PCI_ANY_ID */ , PEG4BPFI6ZX, "PEG4BPFI6ZX"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPI6_SSID /*PCI_ANY_ID */ , PEG2BPI6, "PEG2BPI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPFI6_SSID /*PCI_ANY_ID */ , PEG2BPFI6, "PEG2BPFI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPFI6LX_SSID /*PCI_ANY_ID */ , PEG2BPFI6LX, "PEG2BPFI6LX"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPFI6ZX_SSID /*PCI_ANY_ID */ , PEG2BPFI6ZX, "PEG2BPFI6ZX"},
{0x8086, 0x10e7, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPFI6FLXM_SSID /*PCI_ANY_ID */ , PEG2BPFI6FLXM,
"PEG2BPFI6FLXM"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPI6FC_SSID /*PCI_ANY_ID */ , PEG4BPI6FC, "PEG4BPI6FC"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6FC_SSID /*PCI_ANY_ID */ , PEG4BPFI6FC, "PEG4BPFI6FC"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6FCLX_SSID /*PCI_ANY_ID */ , PEG4BPFI6FCLX,
"PEG4BPFI6FCLX"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG4BPFI6FCZX_SSID /*PCI_ANY_ID */ , PEG4BPFI6FCZX,
"PEG4BPFI6FCZX"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG6BPI6_SSID /*PCI_ANY_ID */ , PEG6BPI6, "PEG6BPI6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPI6SC6_SSID /*PCI_ANY_ID */ , PEG2BPI6SC6,
"PEG6BPI62SC6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_MEG2BPI6_SSID /*PCI_ANY_ID */ , MEG2BPI6, "MEG2BPI6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_XEG2BPI6_SSID /*PCI_ANY_ID */ , XEG2BPI6, "XEG2BPI6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_MEG4BPI6_SSID /*PCI_ANY_ID */ , MEG4BPI6, "MEG4BPI6"},
{0x8086, 0x10a9, SILICOM_SVID /*PCI_ANY_ID */ , SILICOM_PEG2BPFI5_SSID,
PEG2BPFI5, "PEG2BPFI5"},
{0x8086, 0x10a9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2BPFI5LX_SSID, PEG2BPFI5LX, "PEG2BPFI5LX"},
{0x8086, 0x105f, SILICOM_SVID, SILICOM_PXEG4BPFI_SSID, PXEG4BPFI,
"PXEG4BPFI-SD"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG2BPI6_SSID /*PCI_ANY_ID */ , M1EG2BPI6, "MxEG2BPI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG2BPFI6_SSID /*PCI_ANY_ID */ , M1EG2BPFI6, "MxEG2BPFI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG2BPFI6LX_SSID /*PCI_ANY_ID */ , M1EG2BPFI6LX,
"MxEG2BPFI6LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG2BPFI6ZX_SSID /*PCI_ANY_ID */ , M1EG2BPFI6ZX,
"MxEG2BPFI6ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG4BPI6_SSID /*PCI_ANY_ID */ , M1EG4BPI6, "MxEG4BPI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG4BPFI6_SSID /*PCI_ANY_ID */ , M1EG4BPFI6, "MxEG4BPFI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG4BPFI6LX_SSID /*PCI_ANY_ID */ , M1EG4BPFI6LX,
"MxEG4BPFI6LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG4BPFI6ZX_SSID /*PCI_ANY_ID */ , M1EG4BPFI6ZX,
"MxEG4BPFI6ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1EG6BPI6_SSID /*PCI_ANY_ID */ , M1EG6BPI6, "MxEG6BPI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E2G4BPi80_SSID /*PCI_ANY_ID */ , M1E2G4BPi80, "MxE2G4BPi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E2G4BPFi80_SSID /*PCI_ANY_ID */ , M1E2G4BPFi80,
"MxE2G4BPFi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E2G4BPFi80LX_SSID /*PCI_ANY_ID */ , M1E2G4BPFi80LX,
"MxE2G4BPFi80LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E2G4BPFi80ZX_SSID /*PCI_ANY_ID */ , M1E2G4BPFi80ZX,
"MxE2G4BPFi80ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG2BPFI6_SSID /*PCI_ANY_ID */ , M2EG2BPFI6, "M2EG2BPFI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG2BPFI6LX_SSID /*PCI_ANY_ID */ , M2EG2BPFI6LX,
"M2EG2BPFI6LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG2BPFI6ZX_SSID /*PCI_ANY_ID */ , M2EG2BPFI6ZX,
"M2EG2BPFI6ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG4BPI6_SSID /*PCI_ANY_ID */ , M2EG4BPI6, "M2EG4BPI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG4BPFI6_SSID /*PCI_ANY_ID */ , M2EG4BPFI6, "M2EG4BPFI6"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG4BPFI6LX_SSID /*PCI_ANY_ID */ , M2EG4BPFI6LX,
"M2EG4BPFI6LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG4BPFI6ZX_SSID /*PCI_ANY_ID */ , M2EG4BPFI6ZX,
"M2EG4BPFI6ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2EG6BPI6_SSID /*PCI_ANY_ID */ , M2EG6BPI6, "M2EG6BPI6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2DBI6_SSID /*PCI_ANY_ID */ , PEG2DBI6, "PEG2DBI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2DBFI6_SSID /*PCI_ANY_ID */ , PEG2DBFI6, "PEG2DBFI6"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2DBFI6LX_SSID /*PCI_ANY_ID */ , PEG2DBFI6LX, "PEG2DBFI6LX"},
{0x8086, 0x10e6, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PEG2DBFI6ZX_SSID /*PCI_ANY_ID */ , PEG2DBFI6ZX, "PEG2DBFI6ZX"},
{0x8086, 0x10F9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE210G2DBi9SR_SSID, PE210G2DBi9SR, "PE210G2DBi9SR"},
{0x8086, 0x10F9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE210G2DBi9LR_SSID, PE210G2DBi9LR, "PE210G2DBi9LR"},
{0x8086, 0x10F9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE310G4DBi940SR_SSID, PE310G4DBi940SR, "PE310G4DBi9SR"},
{0x8086, 0x10Fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE310G4BPi9T_SSID, PE310G4BPi9T, "PE310G4BPi9T"},
{0x8086, 0x10Fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE310G4BPi9SR_SSID, PE310G4BPi9SR, "PE310G4BPi9SR"},
{0x8086, 0x10Fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE310G4BPi9LR_SSID, PE310G4BPi9LR, "PE310G4BPi9LR"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPi80_SSID /*PCI_ANY_ID */ , PE2G4BPi80, "PE2G4BPi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi80_SSID /*PCI_ANY_ID */ , PE2G4BPFi80, "PE2G4BPFi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi80LX_SSID /*PCI_ANY_ID */ , PE2G4BPFi80LX,
"PE2G4BPFi80LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi80ZX_SSID /*PCI_ANY_ID */ , PE2G4BPFi80ZX,
"PE2G4BPFi80ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPi80L_SSID /*PCI_ANY_ID */ , PE2G4BPi80L, "PE2G4BPi80L"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M6E2G8BPi80A_SSID /*PCI_ANY_ID */ , M6E2G8BPi80A,
"MxE2G8BPi80A"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPi35_SSID /*PCI_ANY_ID */ , PE2G2BPi35, "PE2G2BPi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PAC1200BPi35_SSID /*PCI_ANY_ID */ , PAC1200BPi35,
"PAC1200BPi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi35_SSID /*PCI_ANY_ID */ , PE2G2BPFi35, "PE2G2BPFi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi35LX_SSID /*PCI_ANY_ID */ , PE2G2BPFi35LX,
"PE2G2BPFi35LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi35ZX_SSID /*PCI_ANY_ID */ , PE2G2BPFi35ZX,
"PE2G2BPFi35ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPi35_SSID /*PCI_ANY_ID */ , PE2G4BPi35, "PE2G4BPi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPi35L_SSID /*PCI_ANY_ID */ , PE2G4BPi35L, "PE2G4BPi35L"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi35_SSID /*PCI_ANY_ID */ , PE2G4BPFi35, "PE2G4BPFi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi35LX_SSID /*PCI_ANY_ID */ , PE2G4BPFi35LX,
"PE2G4BPFi35LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G4BPFi35ZX_SSID /*PCI_ANY_ID */ , PE2G4BPFi35ZX,
"PE2G4BPFi35ZX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G6BPi35_SSID /*PCI_ANY_ID */ , PE2G6BPi35, "PE2G6BPi35"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa0, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa1, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa2, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa3, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa4, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa5, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa6, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa7, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa8, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaa9, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaaa, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaab, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaac, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaad, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaae, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaaf, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab0, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab1, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab2, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab3, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab4, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab5, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab6, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab7, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab8, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xab9, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xaba, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xabb, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xabc, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xabd, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xabe, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ , 0xabf, PE2G6BPi35CX,
"PE2G6BPi35CX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPi80_SSID /*PCI_ANY_ID */ , PE2G2BPi80, "PE2G2BPi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi80_SSID /*PCI_ANY_ID */ , PE2G2BPFi80, "PE2G2BPFi80"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi80LX_SSID /*PCI_ANY_ID */ , PE2G2BPFi80LX,
"PE2G2BPFi80LX"},
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE2G2BPFi80ZX_SSID /*PCI_ANY_ID */ , PE2G2BPFi80ZX,
"PE2G2BPFi80ZX"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_MEG2BPI6_SSID /*PCI_ANY_ID */ , MEG2BPI6, "MEG2BPI6"},
{0x8086, 0x10c9, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_XEG2BPI6_SSID /*PCI_ANY_ID */ , XEG2BPI6, "XEG2BPI6"},
#if 0
{0x8086, 0x10fb, 0x8086, INTEL_PE210G2SPI9_SSID, PE210G2SPI9,
"PE210G2SPI9"},
#endif
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E10G2BPI9CX4_SSID /*PCI_ANY_ID */ , M1E10G2BPI9CX4,
"MxE210G2BPI9CX4"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E10G2BPI9SR_SSID /*PCI_ANY_ID */ , M1E10G2BPI9SR,
"MxE210G2BPI9SR"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E10G2BPI9LR_SSID /*PCI_ANY_ID */ , M1E10G2BPI9LR,
"MxE210G2BPI9LR"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M1E10G2BPI9T_SSID /*PCI_ANY_ID */ , M1E10G2BPI9T,
"MxE210G2BPI9T"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2E10G2BPI9CX4_SSID /*PCI_ANY_ID */ , M2E10G2BPI9CX4,
"M2E10G2BPI9CX4"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2E10G2BPI9SR_SSID /*PCI_ANY_ID */ , M2E10G2BPI9SR,
"M2E10G2BPI9SR"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2E10G2BPI9LR_SSID /*PCI_ANY_ID */ , M2E10G2BPI9LR,
"M2E10G2BPI9LR"},
{0x8086, 0x10fb, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M2E10G2BPI9T_SSID /*PCI_ANY_ID */ , M2E10G2BPI9T,
"M2E10G2BPI9T"},
{0x8086, 0x10fb, SILICOM_SVID, SILICOM_PE210G2BPI9CX4_SSID,
PE210G2BPI9CX4, "PE210G2BPI9CX4"},
{0x8086, 0x10fb, SILICOM_SVID, SILICOM_PE210G2BPI9SR_SSID,
PE210G2BPI9SR, "PE210G2BPI9SR"},
{0x8086, 0x10fb, SILICOM_SVID, SILICOM_PE210G2BPI9LR_SSID,
PE210G2BPI9LR, "PE210G2BPI9LR"},
{0x8086, 0x10fb, SILICOM_SVID, SILICOM_PE210G2BPI9T_SSID, PE210G2BPI9T,
"PE210G2BPI9T"},
#if 0
{0x1374, 0x2c, SILICOM_SVID, SILICOM_PXG4BPI_SSID, PXG4BPI,
"PXG4BPI-SD"},
{0x1374, 0x2d, SILICOM_SVID, SILICOM_PXG4BPFI_SSID, PXG4BPFI,
"PXG4BPFI-SD"},
{0x1374, 0x3f, SILICOM_SVID, SILICOM_PXG2TBI_SSID, PXG2TBI,
"PXG2TBI-SD"},
{0x1374, 0x3d, SILICOM_SVID, SILICOM_PXG2BISC1_SSID, PXG2BISC1,
"PXG2BISC1-SD"},
{0x1374, 0x40, SILICOM_SVID, SILICOM_PEG4BPFI_SSID, PEG4BPFI,
"PEG4BPFI-SD"},
#ifdef BP_SELF_TEST
{0x1374, 0x28, SILICOM_SVID, 0x28, PXGBPI, "PXG2BPI-SD"},
#endif
#endif
{0x8086, PCI_ANY_ID, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_M6E2G8BPi80_SSID /*PCI_ANY_ID */ , M6E2G8BPi80, "MxE2G8BPi80"},
{0x8086, 0x1528, SILICOM_SVID /*PCI_ANY_ID */ ,
SILICOM_PE210G2BPi40_SSID /*PCI_ANY_ID */ , PE210G2BPi40,
"PE210G2BPi40T"},
/* required last entry */
{0,}
};
static void find_fw(bpctl_dev_t *dev)
{
unsigned long mmio_start, mmio_len;
struct pci_dev *pdev1 = dev->pdev;
if ((OLD_IF_SERIES(dev->subdevice)) ||
(INTEL_IF_SERIES(dev->subdevice)))
dev->bp_fw_ver = 0xff;
else
dev->bp_fw_ver = bypass_fw_ver(dev);
if (dev->bp_10gb == 1 && dev->bp_fw_ver == 0xff) {
int cnt = 100;
while (cnt--) {
iounmap((void *)dev->mem_map);
mmio_start = pci_resource_start(pdev1, 0);
mmio_len = pci_resource_len(pdev1, 0);
dev->mem_map = (unsigned long)
ioremap(mmio_start, mmio_len);
dev->bp_fw_ver = bypass_fw_ver(dev);
if (dev-> bp_fw_ver == 0xa8)
break;
}
}
/* dev->bp_fw_ver=0xa8; */
printk("firmware version: 0x%x\n", dev->bp_fw_ver);
}
static int init_one(bpctl_dev_t *dev, bpmod_info_t *info, struct pci_dev *pdev1)
{
unsigned long mmio_start, mmio_len;
dev->pdev = pdev1;
mmio_start = pci_resource_start(pdev1, 0);
mmio_len = pci_resource_len(pdev1, 0);
dev->desc = dev_desc[info->index].name;
dev->name = info->bp_name;
dev->device = info->device;
dev->vendor = info->vendor;
dev->subdevice = info->subdevice;
dev->subvendor = info->subvendor;
dev->func = PCI_FUNC(pdev1->devfn);
dev->slot = PCI_SLOT(pdev1->devfn);
dev->bus = pdev1->bus->number;
dev->mem_map = (unsigned long)ioremap(mmio_start, mmio_len);
#ifdef BP_SYNC_FLAG
spin_lock_init(&dev->bypass_wr_lock);
#endif
if (BP10G9_IF_SERIES(dev->subdevice))
dev->bp_10g9 = 1;
if (BP10G_IF_SERIES(dev->subdevice))
dev->bp_10g = 1;
if (PEG540_IF_SERIES(dev->subdevice))
dev->bp_540 = 1;
if (PEGF5_IF_SERIES(dev->subdevice))
dev->bp_fiber5 = 1;
if (PEG80_IF_SERIES(dev->subdevice))
dev->bp_i80 = 1;
if (PEGF80_IF_SERIES(dev->subdevice))
dev->bp_i80 = 1;
if ((dev->subdevice & 0xa00) == 0xa00)
dev->bp_i80 = 1;
if (BP10GB_IF_SERIES(dev->subdevice)) {
if (dev->ifindex == 0) {
unregister_chrdev(major_num, DEVICE_NAME);
printk("Please load network driver for %s adapter!\n",
dev->name);
return -1;
}
if (dev->ndev && !(dev->ndev->flags & IFF_UP)) {
unregister_chrdev(major_num, DEVICE_NAME);
printk("Please bring up network interfaces for %s adapter!\n",
dev->name);
return -1;
}
dev->bp_10gb = 1;
}
if (!dev->bp_10g9) {
if (is_bypass_fn(dev)) {
printk(KERN_INFO "%s found, ",
dev->name);
find_fw(dev);
}
dev->wdt_status = WDT_STATUS_UNKNOWN;
dev->reset_time = 0;
atomic_set(&dev->wdt_busy, 0);
dev->bp_status_un = 1;
bypass_caps_init(dev);
init_bypass_wd_auto(dev);
init_bypass_tpl_auto(dev);
if (NOKIA_SERIES(dev->subdevice))
reset_cont(dev);
}
#ifdef BP_SELF_TEST
if ((dev->bp_tx_data = kzalloc(BPTEST_DATA_LEN, GFP_KERNEL))) {
memset(dev->bp_tx_data, 0xff, 6);
memset(dev->bp_tx_data + 6, 0x0, 1);
memset(dev->bp_tx_data + 7, 0xaa, 5);
*(__be16 *)(dev->bp_tx_data + 12) = htons(ETH_P_BPTEST);
} else
printk("bp_ctl: Memory allocation error!\n");
#endif
return 0;
}
/*
* Initialize the module - Register the character device
*/
static int __init bypass_init_module(void)
{
int ret_val, idx, idx_dev = 0;
struct pci_dev *pdev1 = NULL;
bpctl_dev_t *dev;
printk(BP_MOD_DESCR " v" BP_MOD_VER "\n");
ret_val = register_chrdev(major_num, DEVICE_NAME, &Fops);
if (ret_val < 0) {
printk("%s failed with %d\n", DEVICE_NAME, ret_val);
return ret_val;
}
major_num = ret_val; /* dynamic */
for (idx = 0; tx_ctl_pci_tbl[idx].vendor; idx++) {
while ((pdev1 = pci_get_subsys(tx_ctl_pci_tbl[idx].vendor,
tx_ctl_pci_tbl[idx].device,
tx_ctl_pci_tbl[idx].subvendor,
tx_ctl_pci_tbl[idx].subdevice,
pdev1))) {
device_num++;
}
}
if (!device_num) {
printk("No such device\n");
unregister_chrdev(major_num, DEVICE_NAME);
return -1;
}
bpctl_dev_arr = kmalloc((device_num) * sizeof(bpctl_dev_t), GFP_KERNEL);
if (!bpctl_dev_arr) {
printk("Allocation error\n");
unregister_chrdev(major_num, DEVICE_NAME);
return -1;
}
memset(bpctl_dev_arr, 0, ((device_num) * sizeof(bpctl_dev_t)));
pdev1 = NULL;
dev = bpctl_dev_arr;
for (idx = 0; tx_ctl_pci_tbl[idx].vendor; idx++) {
while ((pdev1 = pci_get_subsys(tx_ctl_pci_tbl[idx].vendor,
tx_ctl_pci_tbl[idx].device,
tx_ctl_pci_tbl[idx].subvendor,
tx_ctl_pci_tbl[idx].subdevice,
pdev1))) {
if (init_one(dev, &tx_ctl_pci_tbl[idx], pdev1) < 0)
return -1;
dev++;
}
}
if_scan_init();
sema_init(&bpctl_sema, 1);
spin_lock_init(&bpvm_lock);
{
bpctl_dev_t *pbpctl_dev_c = NULL;
for (idx_dev = 0, dev = bpctl_dev_arr;
idx_dev < device_num && dev->pdev;
idx_dev++, dev++) {
if (dev->bp_10g9) {
pbpctl_dev_c = get_status_port_fn(dev);
if (is_bypass_fn(dev)) {
printk(KERN_INFO "%s found, ",
dev->name);
dev->bp_fw_ver = bypass_fw_ver(dev);
printk("firmware version: 0x%x\n",
dev->bp_fw_ver);
}
dev->wdt_status = WDT_STATUS_UNKNOWN;
dev->reset_time = 0;
atomic_set(&dev->wdt_busy, 0);
dev->bp_status_un = 1;
bypass_caps_init(dev);
init_bypass_wd_auto(dev);
init_bypass_tpl_auto(dev);
}
}
}
register_netdevice_notifier(&bp_notifier_block);
#ifdef BP_PROC_SUPPORT
{
int i = 0;
/* unsigned long flags; */
/* rcu_read_lock(); */
bp_proc_create();
for (i = 0; i < device_num; i++) {
if (bpctl_dev_arr[i].ifindex) {
/* spin_lock_irqsave(&bpvm_lock, flags); */
bypass_proc_remove_dev_sd(&bpctl_dev_arr[i]);
bypass_proc_create_dev_sd(&bpctl_dev_arr[i]);
/* spin_unlock_irqrestore(&bpvm_lock, flags); */
}
}
/* rcu_read_unlock(); */
}
#endif
return 0;
}
/*
* Cleanup - unregister the appropriate file from /proc
*/
static void __exit bypass_cleanup_module(void)
{
int i;
unregister_netdevice_notifier(&bp_notifier_block);
for (i = 0; i < device_num; i++) {
/* unsigned long flags; */
#ifdef BP_PROC_SUPPORT
/* spin_lock_irqsave(&bpvm_lock, flags);
rcu_read_lock(); */
bypass_proc_remove_dev_sd(&bpctl_dev_arr[i]);
/* spin_unlock_irqrestore(&bpvm_lock, flags);
rcu_read_unlock(); */
#endif
remove_bypass_wd_auto(&bpctl_dev_arr[i]);
bpctl_dev_arr[i].reset_time = 0;
remove_bypass_tpl_auto(&bpctl_dev_arr[i]);
}
/* unmap all devices */
for (i = 0; i < device_num; i++) {
#ifdef BP_SELF_TEST
kfree(bpctl_dev_arr[i].bp_tx_data);
#endif
iounmap((void *)(bpctl_dev_arr[i].mem_map));
}
/* free all devices space */
kfree(bpctl_dev_arr);
/*
* Unregister the device
*/
unregister_chrdev(major_num, DEVICE_NAME);
}
module_init(bypass_init_module);
module_exit(bypass_cleanup_module);
int is_bypass_sd(int ifindex)
{
return is_bypass(get_dev_idx_p(ifindex));
}
int set_bypass_sd(int ifindex, int bypass_mode)
{
return set_bypass_fn(get_dev_idx_p(ifindex), bypass_mode);
}
int get_bypass_sd(int ifindex)
{
return get_bypass_fn(get_dev_idx_p(ifindex));
}
int get_bypass_change_sd(int ifindex)
{
return get_bypass_change_fn(get_dev_idx_p(ifindex));
}
int set_dis_bypass_sd(int ifindex, int dis_param)
{
return set_dis_bypass_fn(get_dev_idx_p(ifindex), dis_param);
}
int get_dis_bypass_sd(int ifindex)
{
return get_dis_bypass_fn(get_dev_idx_p(ifindex));
}
int set_bypass_pwoff_sd(int ifindex, int bypass_mode)
{
return set_bypass_pwoff_fn(get_dev_idx_p(ifindex), bypass_mode);
}
int get_bypass_pwoff_sd(int ifindex)
{
return get_bypass_pwoff_fn(get_dev_idx_p(ifindex));
}
int set_bypass_pwup_sd(int ifindex, int bypass_mode)
{
return set_bypass_pwup_fn(get_dev_idx_p(ifindex), bypass_mode);
}
int get_bypass_pwup_sd(int ifindex)
{
return get_bypass_pwup_fn(get_dev_idx_p(ifindex));
}
int set_bypass_wd_sd(int if_index, int ms_timeout, int *ms_timeout_set)
{
if ((is_bypass(get_dev_idx_p(if_index))) <= 0)
return BP_NOT_CAP;
*ms_timeout_set = set_bypass_wd_fn(get_dev_idx_p(if_index), ms_timeout);
return 0;
}
int get_bypass_wd_sd(int ifindex, int *timeout)
{
return get_bypass_wd_fn(get_dev_idx_p(ifindex), timeout);
}
int get_wd_expire_time_sd(int ifindex, int *time_left)
{
return get_wd_expire_time_fn(get_dev_idx_p(ifindex), time_left);
}
int reset_bypass_wd_timer_sd(int ifindex)
{
return reset_bypass_wd_timer_fn(get_dev_idx_p(ifindex));
}
int get_wd_set_caps_sd(int ifindex)
{
return get_wd_set_caps_fn(get_dev_idx_p(ifindex));
}
int set_std_nic_sd(int ifindex, int nic_mode)
{
return set_std_nic_fn(get_dev_idx_p(ifindex), nic_mode);
}
int get_std_nic_sd(int ifindex)
{
return get_std_nic_fn(get_dev_idx_p(ifindex));
}
int set_tap_sd(int ifindex, int tap_mode)
{
return set_tap_fn(get_dev_idx_p(ifindex), tap_mode);
}
int get_tap_sd(int ifindex)
{
return get_tap_fn(get_dev_idx_p(ifindex));
}
int set_tap_pwup_sd(int ifindex, int tap_mode)
{
return set_tap_pwup_fn(get_dev_idx_p(ifindex), tap_mode);
}
int get_tap_pwup_sd(int ifindex)
{
return get_tap_pwup_fn(get_dev_idx_p(ifindex));
}
int get_tap_change_sd(int ifindex)
{
return get_tap_change_fn(get_dev_idx_p(ifindex));
}
int set_dis_tap_sd(int ifindex, int dis_param)
{
return set_dis_tap_fn(get_dev_idx_p(ifindex), dis_param);
}
int get_dis_tap_sd(int ifindex)
{
return get_dis_tap_fn(get_dev_idx_p(ifindex));
}
int set_bp_disc_sd(int ifindex, int disc_mode)
{
return set_disc_fn(get_dev_idx_p(ifindex), disc_mode);
}
int get_bp_disc_sd(int ifindex)
{
return get_disc_fn(get_dev_idx_p(ifindex));
}
int set_bp_disc_pwup_sd(int ifindex, int disc_mode)
{
return set_disc_pwup_fn(get_dev_idx_p(ifindex), disc_mode);
}
int get_bp_disc_pwup_sd(int ifindex)
{
return get_disc_pwup_fn(get_dev_idx_p(ifindex));
}
int get_bp_disc_change_sd(int ifindex)
{
return get_disc_change_fn(get_dev_idx_p(ifindex));
}
int set_bp_dis_disc_sd(int ifindex, int dis_param)
{
return set_dis_disc_fn(get_dev_idx_p(ifindex), dis_param);
}
int get_bp_dis_disc_sd(int ifindex)
{
return get_dis_disc_fn(get_dev_idx_p(ifindex));
}
int get_wd_exp_mode_sd(int ifindex)
{
return get_wd_exp_mode_fn(get_dev_idx_p(ifindex));
}
int set_wd_exp_mode_sd(int ifindex, int param)
{
return set_wd_exp_mode_fn(get_dev_idx_p(ifindex), param);
}
int reset_cont_sd(int ifindex)
{
return reset_cont_fn(get_dev_idx_p(ifindex));
}
int set_tx_sd(int ifindex, int tx_state)
{
return set_tx_fn(get_dev_idx_p(ifindex), tx_state);
}
int set_tpl_sd(int ifindex, int tpl_state)
{
return set_tpl_fn(get_dev_idx_p(ifindex), tpl_state);
}
int set_bp_hw_reset_sd(int ifindex, int status)
{
return set_bp_hw_reset_fn(get_dev_idx_p(ifindex), status);
}
int set_wd_autoreset_sd(int ifindex, int param)
{
return set_wd_autoreset_fn(get_dev_idx_p(ifindex), param);
}
int get_wd_autoreset_sd(int ifindex)
{
return get_wd_autoreset_fn(get_dev_idx_p(ifindex));
}
int get_bypass_caps_sd(int ifindex)
{
return get_bypass_caps_fn(get_dev_idx_p(ifindex));
}
int get_bypass_slave_sd(int ifindex)
{
bpctl_dev_t *pbpctl_dev_out;
int ret = get_bypass_slave_fn(get_dev_idx_p(ifindex), &pbpctl_dev_out);
if (ret == 1)
return pbpctl_dev_out->ifindex;
return -1;
}
int get_tx_sd(int ifindex)
{
return get_tx_fn(get_dev_idx_p(ifindex));
}
int get_tpl_sd(int ifindex)
{
return get_tpl_fn(get_dev_idx_p(ifindex));
}
int get_bp_hw_reset_sd(int ifindex)
{
return get_bp_hw_reset_fn(get_dev_idx_p(ifindex));
}
int get_bypass_info_sd(int ifindex, struct bp_info *bp_info)
{
return get_bypass_info_fn(get_dev_idx_p(ifindex), bp_info->prod_name, &bp_info->fw_ver);
}
int bp_if_scan_sd(void)
{
if_scan_init();
return 0;
}
EXPORT_SYMBOL_NOVERS(is_bypass_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_slave_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_caps_sd);
EXPORT_SYMBOL_NOVERS(get_wd_set_caps_sd);
EXPORT_SYMBOL_NOVERS(set_bypass_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_change_sd);
EXPORT_SYMBOL_NOVERS(set_dis_bypass_sd);
EXPORT_SYMBOL_NOVERS(get_dis_bypass_sd);
EXPORT_SYMBOL_NOVERS(set_bypass_pwoff_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_pwoff_sd);
EXPORT_SYMBOL_NOVERS(set_bypass_pwup_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_pwup_sd);
EXPORT_SYMBOL_NOVERS(set_bypass_wd_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_wd_sd);
EXPORT_SYMBOL_NOVERS(get_wd_expire_time_sd);
EXPORT_SYMBOL_NOVERS(reset_bypass_wd_timer_sd);
EXPORT_SYMBOL_NOVERS(set_std_nic_sd);
EXPORT_SYMBOL_NOVERS(get_std_nic_sd);
EXPORT_SYMBOL_NOVERS(set_tx_sd);
EXPORT_SYMBOL_NOVERS(get_tx_sd);
EXPORT_SYMBOL_NOVERS(set_tpl_sd);
EXPORT_SYMBOL_NOVERS(get_tpl_sd);
EXPORT_SYMBOL_NOVERS(set_bp_hw_reset_sd);
EXPORT_SYMBOL_NOVERS(get_bp_hw_reset_sd);
EXPORT_SYMBOL_NOVERS(set_tap_sd);
EXPORT_SYMBOL_NOVERS(get_tap_sd);
EXPORT_SYMBOL_NOVERS(get_tap_change_sd);
EXPORT_SYMBOL_NOVERS(set_dis_tap_sd);
EXPORT_SYMBOL_NOVERS(get_dis_tap_sd);
EXPORT_SYMBOL_NOVERS(set_tap_pwup_sd);
EXPORT_SYMBOL_NOVERS(get_tap_pwup_sd);
EXPORT_SYMBOL_NOVERS(set_wd_exp_mode_sd);
EXPORT_SYMBOL_NOVERS(get_wd_exp_mode_sd);
EXPORT_SYMBOL_NOVERS(set_wd_autoreset_sd);
EXPORT_SYMBOL_NOVERS(get_wd_autoreset_sd);
EXPORT_SYMBOL_NOVERS(set_bp_disc_sd);
EXPORT_SYMBOL_NOVERS(get_bp_disc_sd);
EXPORT_SYMBOL_NOVERS(get_bp_disc_change_sd);
EXPORT_SYMBOL_NOVERS(set_bp_dis_disc_sd);
EXPORT_SYMBOL_NOVERS(get_bp_dis_disc_sd);
EXPORT_SYMBOL_NOVERS(set_bp_disc_pwup_sd);
EXPORT_SYMBOL_NOVERS(get_bp_disc_pwup_sd);
EXPORT_SYMBOL_NOVERS(get_bypass_info_sd);
EXPORT_SYMBOL_NOVERS(bp_if_scan_sd);
#define BP_PROC_DIR "bypass"
static struct proc_dir_entry *bp_procfs_dir;
int bp_proc_create(void)
{
bp_procfs_dir = proc_mkdir(BP_PROC_DIR, init_net.proc_net);
if (bp_procfs_dir == (struct proc_dir_entry *)0) {
printk(KERN_DEBUG
"Could not create procfs nicinfo directory %s\n",
BP_PROC_DIR);
return -1;
}
return 0;
}
static int procfs_add(char *proc_name, const struct file_operations *fops,
bpctl_dev_t *dev)
{
struct bypass_pfs_sd *pfs = &dev->bypass_pfs_set;
if (!proc_create_data(proc_name, 0644, pfs->bypass_entry, fops, dev))
return -1;
return 0;
}
#define RO_FOPS(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, show_##name, PDE_DATA(inode));\
} \
static const struct file_operations name##_ops = { \
.open = name##_open, \
.read = seq_read, \
.llseek = seq_lseek, \
.release = single_release, \
};
#define RW_FOPS(name) \
static int name##_open(struct inode *inode, struct file *file) \
{ \
return single_open(file, show_##name, PDE_DATA(inode));\
} \
static const struct file_operations name##_ops = { \
.open = name##_open, \
.read = seq_read, \
.write = name##_write, \
.llseek = seq_lseek, \
.release = single_release, \
};
static int show_bypass_info(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
seq_printf(m, "Name\t\t\t%s\n", dev->name);
seq_printf(m, "Firmware version\t0x%x\n", dev->bp_fw_ver);
return 0;
}
RO_FOPS(bypass_info)
static int show_bypass_slave(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
bpctl_dev_t *slave = get_status_port_fn(dev);
if (!slave)
slave = dev;
if (!slave)
seq_printf(m, "fail\n");
else if (slave->ndev)
seq_printf(m, "%s\n", slave->ndev->name);
return 0;
}
RO_FOPS(bypass_slave)
static int show_bypass_caps(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bypass_caps_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "-1\n");
else
seq_printf(m, "0x%x\n", ret);
return 0;
}
RO_FOPS(bypass_caps)
static int show_wd_set_caps(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_wd_set_caps_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "-1\n");
else
seq_printf(m, "0x%x\n", ret);
return 0;
}
RO_FOPS(wd_set_caps)
static int user_on_off(const void __user *buffer, size_t count)
{
char kbuf[256];
int length = 0;
if (count > (sizeof(kbuf) - 1))
return -1;
if (copy_from_user(&kbuf, buffer, count))
return -1;
kbuf[count] = '\0';
length = strlen(kbuf);
if (kbuf[length - 1] == '\n')
kbuf[--length] = '\0';
if (strcmp(kbuf, "on") == 0)
return 1;
if (strcmp(kbuf, "off") == 0)
return 0;
return 0;
}
static ssize_t bypass_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int bypass_param = user_on_off(buffer, count);
if (bypass_param < 0)
return -1;
set_bypass_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_bypass(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bypass_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(bypass)
static ssize_t tap_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -1;
set_tap_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_tap(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_tap_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(tap)
static ssize_t disc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -1;
set_disc_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_disc(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_disc_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(disc)
static int show_bypass_change(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bypass_change_fn(dev);
if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "fail\n");
return 0;
}
RO_FOPS(bypass_change)
static int show_tap_change(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_tap_change_fn(dev);
if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "fail\n");
return 0;
}
RO_FOPS(tap_change)
static int show_disc_change(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_disc_change_fn(dev);
if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "fail\n");
return 0;
}
RO_FOPS(disc_change)
static ssize_t bypass_wd_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
bpctl_dev_t *dev = PDE_DATA(file_inode(file));
int timeout;
int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
return ret;
set_bypass_wd_fn(dev, timeout);
return count;
}
static int show_bypass_wd(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = 0, timeout = 0;
ret = get_bypass_wd_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (timeout == -1)
seq_printf(m, "unknown\n");
else if (timeout == 0)
seq_printf(m, "disable\n");
else
seq_printf(m, "%d\n", timeout);
return 0;
}
RW_FOPS(bypass_wd)
static int show_wd_expire_time(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = 0, timeout = 0;
ret = get_wd_expire_time_fn(dev, &timeout);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (timeout == -1)
seq_printf(m, "expire\n");
else if (timeout == 0)
seq_printf(m, "disable\n");
else
seq_printf(m, "%d\n", timeout);
return 0;
}
RO_FOPS(wd_expire_time)
static ssize_t tpl_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
bpctl_dev_t *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
set_tpl_fn(dev, tpl_param);
return count;
}
static int show_tpl(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_tpl_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(tpl)
#ifdef PMC_FIX_FLAG
static ssize_t wait_at_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
bpctl_dev_t *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
set_bp_wait_at_pwup_fn(dev, tpl_param);
return count;
}
static int show_wait_at_pwup(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bp_wait_at_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(wait_at_pwup)
static ssize_t hw_reset_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
bpctl_dev_t *dev = PDE_DATA(file_inode(file));
int tpl_param = user_on_off(buffer, count);
if (tpl_param < 0)
return -1;
set_bp_hw_reset_fn(dev, tpl_param);
return count;
}
static int show_hw_reset(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bp_hw_reset_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 1)
seq_printf(m, "on\n");
else if (ret == 0)
seq_printf(m, "off\n");
return 0;
}
RW_FOPS(hw_reset)
#endif /*PMC_WAIT_FLAG */
static int show_reset_bypass_wd(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = reset_bypass_wd_timer_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "disable\n");
else if (ret == 1)
seq_printf(m, "success\n");
return 0;
}
RO_FOPS(reset_bypass_wd)
static ssize_t dis_bypass_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int bypass_param = user_on_off(buffer, count);
if (bypass_param < 0)
return -EINVAL;
set_dis_bypass_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_dis_bypass(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_dis_bypass_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(dis_bypass)
static ssize_t dis_tap_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -EINVAL;
set_dis_tap_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_dis_tap(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_dis_tap_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(dis_tap)
static ssize_t dis_disc_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -EINVAL;
set_dis_disc_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_dis_disc(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_dis_disc_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(dis_disc)
static ssize_t bypass_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int bypass_param = user_on_off(buffer, count);
if (bypass_param < 0)
return -EINVAL;
set_bypass_pwup_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_bypass_pwup(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bypass_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(bypass_pwup)
static ssize_t bypass_pwoff_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int bypass_param = user_on_off(buffer, count);
if (bypass_param < 0)
return -EINVAL;
set_bypass_pwoff_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_bypass_pwoff(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_bypass_pwoff_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(bypass_pwoff)
static ssize_t tap_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -EINVAL;
set_tap_pwup_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_tap_pwup(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_tap_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(tap_pwup)
static ssize_t disc_pwup_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int tap_param = user_on_off(buffer, count);
if (tap_param < 0)
return -EINVAL;
set_disc_pwup_fn(PDE_DATA(file_inode(file)), tap_param);
return count;
}
static int show_disc_pwup(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_disc_pwup_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(disc_pwup)
static ssize_t std_nic_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int bypass_param = user_on_off(buffer, count);
if (bypass_param < 0)
return -EINVAL;
set_std_nic_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_std_nic(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_std_nic_fn(dev);
if (ret == BP_NOT_CAP)
seq_printf(m, "fail\n");
else if (ret == 0)
seq_printf(m, "off\n");
else
seq_printf(m, "on\n");
return 0;
}
RW_FOPS(std_nic)
static ssize_t wd_exp_mode_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
char kbuf[256];
int bypass_param = 0, length = 0;
if (count > (sizeof(kbuf) - 1))
return -1;
if (copy_from_user(&kbuf, buffer, count))
return -1;
kbuf[count] = '\0';
length = strlen(kbuf);
if (kbuf[length - 1] == '\n')
kbuf[--length] = '\0';
if (strcmp(kbuf, "tap") == 0)
bypass_param = 1;
else if (strcmp(kbuf, "bypass") == 0)
bypass_param = 0;
else if (strcmp(kbuf, "disc") == 0)
bypass_param = 2;
set_wd_exp_mode_fn(PDE_DATA(file_inode(file)), bypass_param);
return count;
}
static int show_wd_exp_mode(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_wd_exp_mode_fn(dev);
if (ret == 1)
seq_printf(m, "tap\n");
else if (ret == 0)
seq_printf(m, "bypass\n");
else if (ret == 2)
seq_printf(m, "disc\n");
else
seq_printf(m, "fail\n");
return 0;
}
RW_FOPS(wd_exp_mode)
static ssize_t wd_autoreset_write(struct file *file, const char __user *buffer,
size_t count, loff_t *pos)
{
int timeout;
int ret = kstrtoint_from_user(buffer, count, 10, &timeout);
if (ret)
return ret;
set_wd_autoreset_fn(PDE_DATA(file_inode(file)), timeout);
return count;
}
static int show_wd_autoreset(struct seq_file *m, void *v)
{
bpctl_dev_t *dev = m->private;
int ret = get_wd_autoreset_fn(dev);
if (ret >= 0)
seq_printf(m, "%d\n", ret);
else
seq_printf(m, "fail\n");
return 0;
}
RW_FOPS(wd_autoreset)
int bypass_proc_create_dev_sd(bpctl_dev_t *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &(pbp_device_block->bypass_pfs_set);
static struct proc_dir_entry *procfs_dir = NULL;
int ret = 0;
if (!pbp_device_block->ndev)
return -1;
sprintf(current_pfs->dir_name, "bypass_%s",
pbp_device_block->ndev->name);
if (!bp_procfs_dir)
return -1;
/* create device proc dir */
procfs_dir = proc_mkdir(current_pfs->dir_name, bp_procfs_dir);
if (!procfs_dir) {
printk(KERN_DEBUG "Could not create procfs directory %s\n",
current_pfs->dir_name);
return -1;
}
current_pfs->bypass_entry = procfs_dir;
#define ENTRY(x) ret |= procfs_add(#x, &x##_ops, pbp_device_block)
ENTRY(bypass_info);
if (pbp_device_block->bp_caps & SW_CTL_CAP) {
/* Create set param proc's */
ENTRY(bypass_slave);
ENTRY(bypass_caps);
ENTRY(wd_set_caps);
ENTRY(bypass_wd);
ENTRY(wd_expire_time);
ENTRY(reset_bypass_wd);
ENTRY(std_nic);
if (pbp_device_block->bp_caps & BP_CAP) {
ENTRY(bypass);
ENTRY(dis_bypass);
ENTRY(bypass_pwup);
ENTRY(bypass_pwoff);
ENTRY(bypass_change);
}
if (pbp_device_block->bp_caps & TAP_CAP) {
ENTRY(tap);
ENTRY(dis_tap);
ENTRY(tap_pwup);
ENTRY(tap_change);
}
if (pbp_device_block->bp_caps & DISC_CAP) {
ENTRY(disc);
ENTRY(dis_disc);
ENTRY(disc_pwup);
ENTRY(disc_change);
}
ENTRY(wd_exp_mode);
ENTRY(wd_autoreset);
ENTRY(tpl);
#ifdef PMC_FIX_FLAG
ENTRY(wait_at_pwup);
ENTRY(hw_reset);
#endif
}
#undef ENTRY
if (ret < 0)
printk(KERN_DEBUG "Create proc entry failed\n");
return ret;
}
int bypass_proc_remove_dev_sd(bpctl_dev_t *pbp_device_block)
{
struct bypass_pfs_sd *current_pfs = &pbp_device_block->bypass_pfs_set;
remove_proc_subtree(current_pfs->dir_name, bp_procfs_dir);
current_pfs->bypass_entry = NULL;
return 0;
}
| gpl-2.0 |
epsylon3/android_kernel_samsung_i9103 | arch/arm/mach-nuc93x/time.c | 3880 | 2258 | /*
* linux/arch/arm/mach-nuc93x/time.c
*
* Copyright (c) 2009 Nuvoton technology corporation.
*
* Wan ZongShun <mcuos.com@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <asm/mach-types.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
#include <mach/system.h>
#include <mach/map.h>
#include <mach/regs-timer.h>
#define RESETINT 0x01
#define PERIOD (0x01 << 27)
#define ONESHOT (0x00 << 27)
#define COUNTEN (0x01 << 30)
#define INTEN (0x01 << 29)
#define TICKS_PER_SEC 100
#define PRESCALE 0x63 /* Divider = prescale + 1 */
unsigned int timer0_load;
static unsigned long nuc93x_gettimeoffset(void)
{
return 0;
}
/*IRQ handler for the timer*/
static irqreturn_t nuc93x_timer_interrupt(int irq, void *dev_id)
{
timer_tick();
__raw_writel(0x01, REG_TISR); /* clear TIF0 */
return IRQ_HANDLED;
}
static struct irqaction nuc93x_timer_irq = {
.name = "nuc93x Timer Tick",
.flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
.handler = nuc93x_timer_interrupt,
};
/*Set up timer reg.*/
static void nuc93x_timer_setup(void)
{
struct clk *ck_ext = clk_get(NULL, "ext");
struct clk *ck_timer = clk_get(NULL, "timer");
unsigned int rate, val = 0;
BUG_ON(IS_ERR(ck_ext) || IS_ERR(ck_timer));
clk_enable(ck_timer);
rate = clk_get_rate(ck_ext);
clk_put(ck_ext);
rate = rate / (PRESCALE + 0x01);
/* set a known state */
__raw_writel(0x00, REG_TCSR0);
__raw_writel(RESETINT, REG_TISR);
timer0_load = (rate / TICKS_PER_SEC);
__raw_writel(timer0_load, REG_TICR0);
val |= (PERIOD | COUNTEN | INTEN | PRESCALE);;
__raw_writel(val, REG_TCSR0);
}
static void __init nuc93x_timer_init(void)
{
nuc93x_timer_setup();
setup_irq(IRQ_TIMER0, &nuc93x_timer_irq);
}
struct sys_timer nuc93x_timer = {
.init = nuc93x_timer_init,
.offset = nuc93x_gettimeoffset,
.resume = nuc93x_timer_setup
};
| gpl-2.0 |
moddingg33k/android_kernel_synopsis | drivers/staging/vt6655/wpa2.c | 8232 | 12595 | /*
* Copyright (c) 1996, 2003 VIA Networking Technologies, Inc.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*
*
* File: wpa2.c
*
* Purpose: Handles the Basic Service Set & Node Database functions
*
* Functions:
*
* Revision History:
*
* Author: Yiching Chen
*
* Date: Oct. 4, 2004
*
*/
#include "wpa2.h"
#include "device.h"
#include "wmgr.h"
/*--------------------- Static Definitions -------------------------*/
static int msglevel =MSG_LEVEL_INFO;
//static int msglevel =MSG_LEVEL_DEBUG;
/*--------------------- Static Classes ----------------------------*/
/*--------------------- Static Variables --------------------------*/
const unsigned char abyOUIGK[4] = { 0x00, 0x0F, 0xAC, 0x00 };
const unsigned char abyOUIWEP40[4] = { 0x00, 0x0F, 0xAC, 0x01 };
const unsigned char abyOUIWEP104[4] = { 0x00, 0x0F, 0xAC, 0x05 };
const unsigned char abyOUITKIP[4] = { 0x00, 0x0F, 0xAC, 0x02 };
const unsigned char abyOUICCMP[4] = { 0x00, 0x0F, 0xAC, 0x04 };
const unsigned char abyOUI8021X[4] = { 0x00, 0x0F, 0xAC, 0x01 };
const unsigned char abyOUIPSK[4] = { 0x00, 0x0F, 0xAC, 0x02 };
/*--------------------- Static Functions --------------------------*/
/*--------------------- Export Variables --------------------------*/
/*--------------------- Export Functions --------------------------*/
/*+
*
* Description:
* Clear RSN information in BSSList.
*
* Parameters:
* In:
* pBSSNode - BSS list.
* Out:
* none
*
* Return Value: none.
*
-*/
void
WPA2_ClearRSN (
PKnownBSS pBSSNode
)
{
int ii;
pBSSNode->bWPA2Valid = false;
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
for (ii=0; ii < 4; ii ++)
pBSSNode->abyCSSPK[ii] = WLAN_11i_CSS_CCMP;
pBSSNode->wCSSPKCount = 1;
for (ii=0; ii < 4; ii ++)
pBSSNode->abyAKMSSAuthType[ii] = WLAN_11i_AKMSS_802_1X;
pBSSNode->wAKMSSAuthCount = 1;
pBSSNode->sRSNCapObj.bRSNCapExist = false;
pBSSNode->sRSNCapObj.wRSNCap = 0;
}
/*+
*
* Description:
* Parse RSN IE.
*
* Parameters:
* In:
* pBSSNode - BSS list.
* pRSN - Pointer to the RSN IE.
* Out:
* none
*
* Return Value: none.
*
-*/
void
WPA2vParseRSN (
PKnownBSS pBSSNode,
PWLAN_IE_RSN pRSN
)
{
int i, j;
unsigned short m = 0, n = 0;
unsigned char *pbyOUI;
bool bUseGK = false;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"WPA2_ParseRSN: [%d]\n", pRSN->len);
WPA2_ClearRSN(pBSSNode);
if (pRSN->len == 2) { // ver(2)
if ((pRSN->byElementID == WLAN_EID_RSN) && (pRSN->wVersion == 1)) {
pBSSNode->bWPA2Valid = true;
}
return;
}
if (pRSN->len < 6) { // ver(2) + GK(4)
// invalid CSS, P802.11i/D10.0, p31
return;
}
// information element header makes sense
if ((pRSN->byElementID == WLAN_EID_RSN) &&
(pRSN->wVersion == 1)) {
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Legal 802.11i RSN\n");
pbyOUI = &(pRSN->abyRSN[0]);
if ( !memcmp(pbyOUI, abyOUIWEP40, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_WEP40;
else if ( !memcmp(pbyOUI, abyOUITKIP, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_TKIP;
else if ( !memcmp(pbyOUI, abyOUICCMP, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_CCMP;
else if ( !memcmp(pbyOUI, abyOUIWEP104, 4))
pBSSNode->byCSSGK = WLAN_11i_CSS_WEP104;
else if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
// invalid CSS, P802.11i/D10.0, p32
return;
} else
// any vendor checks here
pBSSNode->byCSSGK = WLAN_11i_CSS_UNKNOWN;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"802.11i CSS: %X\n", pBSSNode->byCSSGK);
if (pRSN->len == 6) {
pBSSNode->bWPA2Valid = true;
return;
}
if (pRSN->len >= 8) { // ver(2) + GK(4) + PK count(2)
pBSSNode->wCSSPKCount = *((unsigned short *) &(pRSN->abyRSN[4]));
j = 0;
pbyOUI = &(pRSN->abyRSN[6]);
for (i = 0; (i < pBSSNode->wCSSPKCount) && (j < sizeof(pBSSNode->abyCSSPK)/sizeof(unsigned char)); i++) {
if (pRSN->len >= 8+i*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*i)
if ( !memcmp(pbyOUI, abyOUIGK, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_USE_GROUP;
bUseGK = true;
} else if ( !memcmp(pbyOUI, abyOUIWEP40, 4)) {
// Invialid CSS, continue to parsing
} else if ( !memcmp(pbyOUI, abyOUITKIP, 4)) {
if (pBSSNode->byCSSGK != WLAN_11i_CSS_CCMP)
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_TKIP;
else
; // Invialid CSS, continue to parsing
} else if ( !memcmp(pbyOUI, abyOUICCMP, 4)) {
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_CCMP;
} else if ( !memcmp(pbyOUI, abyOUIWEP104, 4)) {
// Invialid CSS, continue to parsing
} else {
// any vendor checks here
pBSSNode->abyCSSPK[j++] = WLAN_11i_CSS_UNKNOWN;
}
pbyOUI += 4;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyCSSPK[%d]: %X\n", j-1, pBSSNode->abyCSSPK[j-1]);
} else
break;
} //for
if (bUseGK == true) {
if (j != 1) {
// invalid CSS, This should be only PK CSS.
return;
}
if (pBSSNode->byCSSGK == WLAN_11i_CSS_CCMP) {
// invalid CSS, If CCMP is enable , PK can't be CSSGK.
return;
}
}
if ((pBSSNode->wCSSPKCount != 0) && (j == 0)) {
// invalid CSS, No valid PK.
return;
}
pBSSNode->wCSSPKCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wCSSPKCount: %d\n", pBSSNode->wCSSPKCount);
}
m = *((unsigned short *) &(pRSN->abyRSN[4]));
if (pRSN->len >= 10+m*4) { // ver(2) + GK(4) + PK count(2) + PKS(4*m) + AKMSS count(2)
pBSSNode->wAKMSSAuthCount = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
j = 0;
pbyOUI = &(pRSN->abyRSN[8+4*m]);
for (i = 0; (i < pBSSNode->wAKMSSAuthCount) && (j < sizeof(pBSSNode->abyAKMSSAuthType)/sizeof(unsigned char)); i++) {
if (pRSN->len >= 10+(m+i)*4+4) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSS(2)+AKS(4*i)
if ( !memcmp(pbyOUI, abyOUI8021X, 4))
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_802_1X;
else if ( !memcmp(pbyOUI, abyOUIPSK, 4))
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_PSK;
else
// any vendor checks here
pBSSNode->abyAKMSSAuthType[j++] = WLAN_11i_AKMSS_UNKNOWN;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"abyAKMSSAuthType[%d]: %X\n", j-1, pBSSNode->abyAKMSSAuthType[j-1]);
} else
break;
}
pBSSNode->wAKMSSAuthCount = (unsigned short)j;
DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"wAKMSSAuthCount: %d\n", pBSSNode->wAKMSSAuthCount);
n = *((unsigned short *) &(pRSN->abyRSN[6+4*m]));
if (pRSN->len >= 12+4*m+4*n) { // ver(2)+GK(4)+PKCnt(2)+PKS(4*m)+AKMSSCnt(2)+AKMSS(4*n)+Cap(2)
pBSSNode->sRSNCapObj.bRSNCapExist = true;
pBSSNode->sRSNCapObj.wRSNCap = *((unsigned short *) &(pRSN->abyRSN[8+4*m+4*n]));
}
}
//ignore PMKID lists bcs only (Re)Assocrequest has this field
pBSSNode->bWPA2Valid = true;
}
}
/*+
*
* Description:
* Set WPA IEs
*
* Parameters:
* In:
* pMgmtHandle - Pointer to management object
* Out:
* pRSNIEs - Pointer to the RSN IE to set.
*
* Return Value: length of IEs.
*
-*/
unsigned int
WPA2uSetIEs(
void *pMgmtHandle,
PWLAN_IE_RSN pRSNIEs
)
{
PSMgmtObject pMgmt = (PSMgmtObject) pMgmtHandle;
unsigned char *pbyBuffer = NULL;
unsigned int ii = 0;
unsigned short *pwPMKID = NULL;
if (pRSNIEs == NULL) {
return(0);
}
if (((pMgmt->eAuthenMode == WMAC_AUTH_WPA2) ||
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK)) &&
(pMgmt->pCurrBSS != NULL)) {
/* WPA2 IE */
pbyBuffer = (unsigned char *) pRSNIEs;
pRSNIEs->byElementID = WLAN_EID_RSN;
pRSNIEs->len = 6; //Version(2)+GK(4)
pRSNIEs->wVersion = 1;
//Group Key Cipher Suite
pRSNIEs->abyRSN[0] = 0x00;
pRSNIEs->abyRSN[1] = 0x0F;
pRSNIEs->abyRSN[2] = 0xAC;
if (pMgmt->byCSSGK == KEY_CTL_WEP) {
pRSNIEs->abyRSN[3] = pMgmt->pCurrBSS->byCSSGK;
} else if (pMgmt->byCSSGK == KEY_CTL_TKIP) {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_TKIP;
} else if (pMgmt->byCSSGK == KEY_CTL_CCMP) {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_CCMP;
} else {
pRSNIEs->abyRSN[3] = WLAN_11i_CSS_UNKNOWN;
}
// Pairwise Key Cipher Suite
pRSNIEs->abyRSN[4] = 1;
pRSNIEs->abyRSN[5] = 0;
pRSNIEs->abyRSN[6] = 0x00;
pRSNIEs->abyRSN[7] = 0x0F;
pRSNIEs->abyRSN[8] = 0xAC;
if (pMgmt->byCSSPK == KEY_CTL_TKIP) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_TKIP;
} else if (pMgmt->byCSSPK == KEY_CTL_CCMP) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_CCMP;
} else if (pMgmt->byCSSPK == KEY_CTL_NONE) {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_USE_GROUP;
} else {
pRSNIEs->abyRSN[9] = WLAN_11i_CSS_UNKNOWN;
}
pRSNIEs->len += 6;
// Auth Key Management Suite
pRSNIEs->abyRSN[10] = 1;
pRSNIEs->abyRSN[11] = 0;
pRSNIEs->abyRSN[12] = 0x00;
pRSNIEs->abyRSN[13] = 0x0F;
pRSNIEs->abyRSN[14] = 0xAC;
if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2PSK) {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_PSK;
} else if (pMgmt->eAuthenMode == WMAC_AUTH_WPA2) {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_802_1X;
} else {
pRSNIEs->abyRSN[15] = WLAN_11i_AKMSS_UNKNOWN;
}
pRSNIEs->len +=6;
// RSN Capabilites
if (pMgmt->pCurrBSS->sRSNCapObj.bRSNCapExist == true) {
memcpy(&pRSNIEs->abyRSN[16], &pMgmt->pCurrBSS->sRSNCapObj.wRSNCap, 2);
} else {
pRSNIEs->abyRSN[16] = 0;
pRSNIEs->abyRSN[17] = 0;
}
pRSNIEs->len +=2;
if ((pMgmt->gsPMKIDCache.BSSIDInfoCount > 0) &&
(pMgmt->bRoaming == true) &&
(pMgmt->eAuthenMode == WMAC_AUTH_WPA2)) {
// RSN PMKID
pwPMKID = (unsigned short *)(&pRSNIEs->abyRSN[18]); // Point to PMKID count
*pwPMKID = 0; // Initialize PMKID count
pbyBuffer = &pRSNIEs->abyRSN[20]; // Point to PMKID list
for (ii = 0; ii < pMgmt->gsPMKIDCache.BSSIDInfoCount; ii++) {
if ( !memcmp(&pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyBSSID[0], pMgmt->abyCurrBSSID, ETH_ALEN)) {
(*pwPMKID) ++;
memcpy(pbyBuffer, pMgmt->gsPMKIDCache.BSSIDInfo[ii].abyPMKID, 16);
pbyBuffer += 16;
}
}
if (*pwPMKID != 0) {
pRSNIEs->len += (2 + (*pwPMKID)*16);
} else {
pbyBuffer = &pRSNIEs->abyRSN[18];
}
}
return(pRSNIEs->len + WLAN_IEHDR_LEN);
}
return(0);
}
| gpl-2.0 |
helicopter88/Find5-Kernel-Source-4.2 | net/netfilter/nf_conntrack_tftp.c | 8744 | 4200 | /* (C) 2001-2002 Magnus Boden <mb@ozaba.mine.nu>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/netfilter.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <linux/netfilter/nf_conntrack_tftp.h>
MODULE_AUTHOR("Magnus Boden <mb@ozaba.mine.nu>");
MODULE_DESCRIPTION("TFTP connection tracking helper");
MODULE_LICENSE("GPL");
MODULE_ALIAS("ip_conntrack_tftp");
MODULE_ALIAS_NFCT_HELPER("tftp");
#define MAX_PORTS 8
static unsigned short ports[MAX_PORTS];
static unsigned int ports_c;
module_param_array(ports, ushort, &ports_c, 0400);
MODULE_PARM_DESC(ports, "Port numbers of TFTP servers");
unsigned int (*nf_nat_tftp_hook)(struct sk_buff *skb,
enum ip_conntrack_info ctinfo,
struct nf_conntrack_expect *exp) __read_mostly;
EXPORT_SYMBOL_GPL(nf_nat_tftp_hook);
static int tftp_help(struct sk_buff *skb,
unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo)
{
const struct tftphdr *tfh;
struct tftphdr _tftph;
struct nf_conntrack_expect *exp;
struct nf_conntrack_tuple *tuple;
unsigned int ret = NF_ACCEPT;
typeof(nf_nat_tftp_hook) nf_nat_tftp;
tfh = skb_header_pointer(skb, protoff + sizeof(struct udphdr),
sizeof(_tftph), &_tftph);
if (tfh == NULL)
return NF_ACCEPT;
switch (ntohs(tfh->opcode)) {
case TFTP_OPCODE_READ:
case TFTP_OPCODE_WRITE:
/* RRQ and WRQ works the same way */
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple);
nf_ct_dump_tuple(&ct->tuplehash[IP_CT_DIR_REPLY].tuple);
exp = nf_ct_expect_alloc(ct);
if (exp == NULL)
return NF_DROP;
tuple = &ct->tuplehash[IP_CT_DIR_REPLY].tuple;
nf_ct_expect_init(exp, NF_CT_EXPECT_CLASS_DEFAULT,
nf_ct_l3num(ct),
&tuple->src.u3, &tuple->dst.u3,
IPPROTO_UDP, NULL, &tuple->dst.u.udp.port);
pr_debug("expect: ");
nf_ct_dump_tuple(&exp->tuple);
nf_nat_tftp = rcu_dereference(nf_nat_tftp_hook);
if (nf_nat_tftp && ct->status & IPS_NAT_MASK)
ret = nf_nat_tftp(skb, ctinfo, exp);
else if (nf_ct_expect_related(exp) != 0)
ret = NF_DROP;
nf_ct_expect_put(exp);
break;
case TFTP_OPCODE_DATA:
case TFTP_OPCODE_ACK:
pr_debug("Data/ACK opcode\n");
break;
case TFTP_OPCODE_ERROR:
pr_debug("Error opcode\n");
break;
default:
pr_debug("Unknown opcode\n");
}
return ret;
}
static struct nf_conntrack_helper tftp[MAX_PORTS][2] __read_mostly;
static char tftp_names[MAX_PORTS][2][sizeof("tftp-65535")] __read_mostly;
static const struct nf_conntrack_expect_policy tftp_exp_policy = {
.max_expected = 1,
.timeout = 5 * 60,
};
static void nf_conntrack_tftp_fini(void)
{
int i, j;
for (i = 0; i < ports_c; i++) {
for (j = 0; j < 2; j++)
nf_conntrack_helper_unregister(&tftp[i][j]);
}
}
static int __init nf_conntrack_tftp_init(void)
{
int i, j, ret;
char *tmpname;
if (ports_c == 0)
ports[ports_c++] = TFTP_PORT;
for (i = 0; i < ports_c; i++) {
memset(&tftp[i], 0, sizeof(tftp[i]));
tftp[i][0].tuple.src.l3num = AF_INET;
tftp[i][1].tuple.src.l3num = AF_INET6;
for (j = 0; j < 2; j++) {
tftp[i][j].tuple.dst.protonum = IPPROTO_UDP;
tftp[i][j].tuple.src.u.udp.port = htons(ports[i]);
tftp[i][j].expect_policy = &tftp_exp_policy;
tftp[i][j].me = THIS_MODULE;
tftp[i][j].help = tftp_help;
tmpname = &tftp_names[i][j][0];
if (ports[i] == TFTP_PORT)
sprintf(tmpname, "tftp");
else
sprintf(tmpname, "tftp-%u", i);
tftp[i][j].name = tmpname;
ret = nf_conntrack_helper_register(&tftp[i][j]);
if (ret) {
printk(KERN_ERR "nf_ct_tftp: failed to register"
" helper for pf: %u port: %u\n",
tftp[i][j].tuple.src.l3num, ports[i]);
nf_conntrack_tftp_fini();
return ret;
}
}
}
return 0;
}
module_init(nf_conntrack_tftp_init);
module_exit(nf_conntrack_tftp_fini);
| gpl-2.0 |
kaber/net-next-netlink-mmap | arch/mips/kernel/perf_event.c | 9000 | 1845 | /*
* Linux performance counter support for MIPS.
*
* Copyright (C) 2010 MIPS Technologies, Inc.
* Author: Deng-Cheng Zhu
*
* This code is based on the implementation for ARM, which is in turn
* based on the sparc64 perf event code and the x86 code. Performance
* counter access is based on the MIPS Oprofile code. And the callchain
* support references the code of MIPS stacktrace.c.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/perf_event.h>
#include <asm/stacktrace.h>
/* Callchain handling code. */
/*
* Leave userspace callchain empty for now. When we find a way to trace
* the user stack callchains, we will add it here.
*/
static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
while (!kstack_end(sp)) {
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
}
}
}
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
unsigned long ra = regs->regs[31];
unsigned long pc = regs->cp0_epc;
if (raw_show_trace || !__kernel_text_address(pc)) {
unsigned long stack_page =
(unsigned long)task_stack_page(current);
if (stack_page && sp >= stack_page &&
sp <= stack_page + THREAD_SIZE - 32)
save_raw_perf_callchain(entry, sp);
return;
}
do {
perf_callchain_store(entry, pc);
if (entry->nr >= PERF_MAX_STACK_DEPTH)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
#else
save_raw_perf_callchain(entry, sp);
#endif
}
| gpl-2.0 |
Lloir/nvidia-linux-3.10 | arch/mn10300/kernel/module.c | 9768 | 4339 | /* MN10300 Kernel module helper routines
*
* Copyright (C) 2007, 2008, 2009 Red Hat, Inc. All Rights Reserved.
* Written by Mark Salter (msalter@redhat.com)
* - Derived from arch/i386/kernel/module.c
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public Licence as published by
* the Free Software Foundation; either version 2 of the Licence, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public Licence for more details.
*
* You should have received a copy of the GNU General Public Licence
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/bug.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt, ...)
#endif
static void reloc_put16(uint8_t *p, uint32_t val)
{
p[0] = val & 0xff;
p[1] = (val >> 8) & 0xff;
}
static void reloc_put24(uint8_t *p, uint32_t val)
{
reloc_put16(p, val);
p[2] = (val >> 16) & 0xff;
}
static void reloc_put32(uint8_t *p, uint32_t val)
{
reloc_put16(p, val);
reloc_put16(p+2, val >> 16);
}
/*
* apply a RELA relocation
*/
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i, sym_diff_seen = 0;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
Elf32_Addr relocation, sym_diff_val = 0;
uint8_t *location;
uint32_t value;
DEBUGP("Applying relocate section %u to %u\n",
relsec, sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* this is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* this is the symbol the relocation is referring to (note that
* all undefined symbols have been resolved by the caller) */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
/* this is the adjustment to be made */
relocation = sym->st_value + rel[i].r_addend;
if (sym_diff_seen) {
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_MN10300_32:
case R_MN10300_24:
case R_MN10300_16:
case R_MN10300_8:
relocation -= sym_diff_val;
sym_diff_seen = 0;
break;
default:
printk(KERN_ERR "module %s: Unexpected SYM_DIFF relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
switch (ELF32_R_TYPE(rel[i].r_info)) {
/* for the first four relocation types, we simply
* store the adjustment at the location given */
case R_MN10300_32:
reloc_put32(location, relocation);
break;
case R_MN10300_24:
reloc_put24(location, relocation);
break;
case R_MN10300_16:
reloc_put16(location, relocation);
break;
case R_MN10300_8:
*location = relocation;
break;
/* for the next three relocation types, we write the
* adjustment with the address subtracted over the
* value at the location given */
case R_MN10300_PCREL32:
value = relocation - (uint32_t) location;
reloc_put32(location, value);
break;
case R_MN10300_PCREL16:
value = relocation - (uint32_t) location;
reloc_put16(location, value);
break;
case R_MN10300_PCREL8:
*location = relocation - (uint32_t) location;
break;
case R_MN10300_SYM_DIFF:
/* This is used to adjust the next reloc as required
* by relaxation. */
sym_diff_seen = 1;
sym_diff_val = sym->st_value;
break;
case R_MN10300_ALIGN:
/* Just ignore the ALIGN relocs.
* Only interesting if kernel performed relaxation. */
continue;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
if (sym_diff_seen) {
printk(KERN_ERR "module %s: Nothing follows SYM_DIFF relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
return 0;
}
| gpl-2.0 |
tsiktsiris/falcon | drivers/net/wireless/ath/debug.c | 9768 | 1473 | /*
* Copyright (c) 2009 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/export.h>
#include "ath.h"
const char *ath_opmode_to_string(enum nl80211_iftype opmode)
{
switch (opmode) {
case NL80211_IFTYPE_UNSPECIFIED:
return "UNSPEC";
case NL80211_IFTYPE_ADHOC:
return "ADHOC";
case NL80211_IFTYPE_STATION:
return "STATION";
case NL80211_IFTYPE_AP:
return "AP";
case NL80211_IFTYPE_AP_VLAN:
return "AP-VLAN";
case NL80211_IFTYPE_WDS:
return "WDS";
case NL80211_IFTYPE_MONITOR:
return "MONITOR";
case NL80211_IFTYPE_MESH_POINT:
return "MESH";
case NL80211_IFTYPE_P2P_CLIENT:
return "P2P-CLIENT";
case NL80211_IFTYPE_P2P_GO:
return "P2P-GO";
default:
return "UNKNOWN";
}
}
EXPORT_SYMBOL(ath_opmode_to_string);
| gpl-2.0 |
mehrvarz/android_kernel_asus_grouper | arch/mn10300/mm/cache.c | 12072 | 1615 | /* MN10300 Cache flushing routines
*
* Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public Licence
* as published by the Free Software Foundation; either version
* 2 of the Licence, or (at your option) any later version.
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/mman.h>
#include <linux/threads.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/smp.h>
#include "cache-smp.h"
EXPORT_SYMBOL(mn10300_icache_inv);
EXPORT_SYMBOL(mn10300_icache_inv_range);
EXPORT_SYMBOL(mn10300_icache_inv_range2);
EXPORT_SYMBOL(mn10300_icache_inv_page);
EXPORT_SYMBOL(mn10300_dcache_inv);
EXPORT_SYMBOL(mn10300_dcache_inv_range);
EXPORT_SYMBOL(mn10300_dcache_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_inv_page);
#ifdef CONFIG_MN10300_CACHE_WBACK
EXPORT_SYMBOL(mn10300_dcache_flush);
EXPORT_SYMBOL(mn10300_dcache_flush_inv);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_inv_page);
EXPORT_SYMBOL(mn10300_dcache_flush_range);
EXPORT_SYMBOL(mn10300_dcache_flush_range2);
EXPORT_SYMBOL(mn10300_dcache_flush_page);
#endif
/*
* allow userspace to flush the instruction cache
*/
asmlinkage long sys_cacheflush(unsigned long start, unsigned long end)
{
if (end < start)
return -EINVAL;
flush_icache_range(start, end);
return 0;
}
| gpl-2.0 |
TeamLGOG/lge-kernel-gee | arch/cris/arch-v10/lib/old_checksum.c | 12328 | 2160 | /*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <net/checksum.h>
#include <net/module.h>
#undef PROFILE_CHECKSUM
#ifdef PROFILE_CHECKSUM
/* these are just for profiling the checksum code with an oscillioscope.. uh */
#if 0
#define BITOFF *((unsigned char *)0xb0000030) = 0xff
#define BITON *((unsigned char *)0xb0000030) = 0x0
#endif
#include <asm/io.h>
#define CBITON LED_ACTIVE_SET(1)
#define CBITOFF LED_ACTIVE_SET(0)
#define BITOFF
#define BITON
#else
#define BITOFF
#define BITON
#define CBITOFF
#define CBITON
#endif
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
#include <asm/delay.h>
__wsum csum_partial(const void *p, int len, __wsum __sum)
{
u32 sum = (__force u32)__sum;
const u16 *buff = p;
/*
* Experiments with ethernet and slip connections show that buff
* is aligned on either a 2-byte or 4-byte boundary.
*/
const void *endMarker = p + len;
const void *marker = endMarker - (len % 16);
#if 0
if((int)buff & 0x3)
printk("unaligned buff %p\n", buff);
__delay(900); /* extra delay of 90 us to test performance hit */
#endif
BITON;
while (buff < marker) {
sum += *buff++;
sum += *buff++;
sum += *buff++;
sum += *buff++;
sum += *buff++;
sum += *buff++;
sum += *buff++;
sum += *buff++;
}
marker = endMarker - (len % 2);
while (buff < marker)
sum += *buff++;
if (endMarker > buff)
sum += *(const u8 *)buff; /* add extra byte separately */
BITOFF;
return (__force __wsum)sum;
}
EXPORT_SYMBOL(csum_partial);
| gpl-2.0 |
InsomniaROM/kernel_lge_mako | arch/powerpc/boot/ns16550.c | 13864 | 1983 | /*
* 16550 serial console support.
*
* Original copied from <file:arch/ppc/boot/common/ns16550.c>
* (which had no copyright)
* Modifications: 2006 (c) MontaVista Software, Inc.
*
* Modified by: Mark A. Greer <mgreer@mvista.com>
*/
#include <stdarg.h>
#include <stddef.h>
#include "types.h"
#include "string.h"
#include "stdio.h"
#include "io.h"
#include "ops.h"
#define UART_DLL 0 /* Out: Divisor Latch Low */
#define UART_DLM 1 /* Out: Divisor Latch High */
#define UART_FCR 2 /* Out: FIFO Control Register */
#define UART_LCR 3 /* Out: Line Control Register */
#define UART_MCR 4 /* Out: Modem Control Register */
#define UART_LSR 5 /* In: Line Status Register */
#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */
#define UART_LSR_DR 0x01 /* Receiver data ready */
#define UART_MSR 6 /* In: Modem Status Register */
#define UART_SCR 7 /* I/O: Scratch Register */
static unsigned char *reg_base;
static u32 reg_shift;
static int ns16550_open(void)
{
out_8(reg_base + (UART_FCR << reg_shift), 0x06);
return 0;
}
static void ns16550_putc(unsigned char c)
{
while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_THRE) == 0);
out_8(reg_base, c);
}
static unsigned char ns16550_getc(void)
{
while ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) == 0);
return in_8(reg_base);
}
static u8 ns16550_tstc(void)
{
return ((in_8(reg_base + (UART_LSR << reg_shift)) & UART_LSR_DR) != 0);
}
int ns16550_console_init(void *devp, struct serial_console_data *scdp)
{
int n;
u32 reg_offset;
if (dt_get_virtual_reg(devp, (void **)®_base, 1) < 1)
return -1;
n = getprop(devp, "reg-offset", ®_offset, sizeof(reg_offset));
if (n == sizeof(reg_offset))
reg_base += reg_offset;
n = getprop(devp, "reg-shift", ®_shift, sizeof(reg_shift));
if (n != sizeof(reg_shift))
reg_shift = 0;
scdp->open = ns16550_open;
scdp->putc = ns16550_putc;
scdp->getc = ns16550_getc;
scdp->tstc = ns16550_tstc;
scdp->close = NULL;
return 0;
}
| gpl-2.0 |
javelinanddart/android_kernel_samsung_tasstmo | scripts/dtc/libfdt/fdt_strerror.c | 14888 | 3401 | /*
* libfdt - Flat Device Tree manipulation
* Copyright (C) 2006 David Gibson, IBM Corporation.
*
* libfdt is dual licensed: you can use it either under the terms of
* the GPL, or the BSD license, at your option.
*
* a) This library is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License as
* published by the Free Software Foundation; either version 2 of the
* License, or (at your option) any later version.
*
* This library is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this library; if not, write to the Free
* Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston,
* MA 02110-1301 USA
*
* Alternatively,
*
* b) Redistribution and use in source and binary forms, with or
* without modification, are permitted provided that the following
* conditions are met:
*
* 1. Redistributions of source code must retain the above
* copyright notice, this list of conditions and the following
* disclaimer.
* 2. Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials
* provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
* CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
* INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
* OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
* EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "libfdt_env.h"
#include <fdt.h>
#include <libfdt.h>
#include "libfdt_internal.h"
struct fdt_errtabent {
const char *str;
};
#define FDT_ERRTABENT(val) \
[(val)] = { .str = #val, }
static struct fdt_errtabent fdt_errtable[] = {
FDT_ERRTABENT(FDT_ERR_NOTFOUND),
FDT_ERRTABENT(FDT_ERR_EXISTS),
FDT_ERRTABENT(FDT_ERR_NOSPACE),
FDT_ERRTABENT(FDT_ERR_BADOFFSET),
FDT_ERRTABENT(FDT_ERR_BADPATH),
FDT_ERRTABENT(FDT_ERR_BADSTATE),
FDT_ERRTABENT(FDT_ERR_TRUNCATED),
FDT_ERRTABENT(FDT_ERR_BADMAGIC),
FDT_ERRTABENT(FDT_ERR_BADVERSION),
FDT_ERRTABENT(FDT_ERR_BADSTRUCTURE),
FDT_ERRTABENT(FDT_ERR_BADLAYOUT),
};
#define FDT_ERRTABSIZE (sizeof(fdt_errtable) / sizeof(fdt_errtable[0]))
const char *fdt_strerror(int errval)
{
if (errval > 0)
return "<valid offset/length>";
else if (errval == 0)
return "<no error>";
else if (errval > -FDT_ERRTABSIZE) {
const char *s = fdt_errtable[-errval].str;
if (s)
return s;
}
return "<unknown error>";
}
| gpl-2.0 |
miiicmueller/android_kernel_raspberryPi_rpiv2 | sound/soc/codecs/pcm1794a.c | 41 | 1603 | /*
* Driver for the PCM1794A codec
*
* Author: Florian Meier <florian.meier@koalo.de>
* Copyright 2013
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
#include <sound/soc.h>
static struct snd_soc_dai_driver pcm1794a_dai = {
.name = "pcm1794a-hifi",
.playback = {
.channels_min = 2,
.channels_max = 2,
.rates = SNDRV_PCM_RATE_8000_192000,
.formats = SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE
},
};
static struct snd_soc_codec_driver soc_codec_dev_pcm1794a;
static int pcm1794a_probe(struct platform_device *pdev)
{
return snd_soc_register_codec(&pdev->dev, &soc_codec_dev_pcm1794a,
&pcm1794a_dai, 1);
}
static int pcm1794a_remove(struct platform_device *pdev)
{
snd_soc_unregister_codec(&pdev->dev);
return 0;
}
static struct platform_driver pcm1794a_codec_driver = {
.probe = pcm1794a_probe,
.remove = pcm1794a_remove,
.driver = {
.name = "pcm1794a-codec",
.owner = THIS_MODULE,
},
};
module_platform_driver(pcm1794a_codec_driver);
MODULE_DESCRIPTION("ASoC PCM1794A codec driver");
MODULE_AUTHOR("Florian Meier <florian.meier@koalo.de>");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
itsmerajit/kernel_otus | drivers/staging/prima/CORE/SME/src/sme_common/sme_Api.c | 41 | 433866 | /*
* Copyright (c) 2012-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/**=========================================================================
\file smeApi.c
\brief Definitions for SME APIs
Copyright 2008 (c) Qualcomm, Incorporated. All Rights Reserved.
Qualcomm Confidential and Proprietary.
========================================================================*/
/*===========================================================================
EDIT HISTORY FOR FILE
This section contains comments describing changes made to the module.
Notice that changes are listed in reverse chronological order.
when who what, where, why
---------- --- --------------------------------------------------------
06/03/10 js Added support to hostapd driven
* deauth/disassoc/mic failure
===========================================================================*/
/*--------------------------------------------------------------------------
Include Files
------------------------------------------------------------------------*/
#include "smsDebug.h"
#include "sme_Api.h"
#include "csrInsideApi.h"
#include "smeInside.h"
#include "csrInternal.h"
#include "wlan_qct_wda.h"
#include "halMsgApi.h"
#include "vos_trace.h"
#include "sme_Trace.h"
#include "vos_types.h"
#include "vos_trace.h"
#include "sapApi.h"
#include "macTrace.h"
#ifdef DEBUG_ROAM_DELAY
#include "vos_utils.h"
#endif
extern tSirRetStatus uMacPostCtrlMsg(void* pSirGlobal, tSirMbMsg* pMb);
#include <wlan_qct_pal_api.h>
#define LOG_SIZE 256
#define READ_MEMORY_DUMP_CMD 9
#define TL_INIT_STATE 0
#define CSR_ACTIVE_LIST_CMD_TIMEOUT_VALUE 1000*30 //30s
// TxMB Functions
extern eHalStatus pmcPrepareCommand( tpAniSirGlobal pMac, eSmeCommandType cmdType, void *pvParam,
tANI_U32 size, tSmeCmd **ppCmd );
extern void pmcReleaseCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand );
extern void qosReleaseCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand );
extern eHalStatus p2pProcessRemainOnChannelCmd(tpAniSirGlobal pMac, tSmeCmd *p2pRemainonChn);
extern eHalStatus sme_remainOnChnRsp( tpAniSirGlobal pMac, tANI_U8 *pMsg);
extern eHalStatus sme_mgmtFrmInd( tHalHandle hHal, tpSirSmeMgmtFrameInd pSmeMgmtFrm);
extern eHalStatus sme_remainOnChnReady( tHalHandle hHal, tANI_U8* pMsg);
extern eHalStatus sme_sendActionCnf( tHalHandle hHal, tANI_U8* pMsg);
extern eHalStatus p2pProcessNoAReq(tpAniSirGlobal pMac, tSmeCmd *pNoACmd);
static eHalStatus initSmeCmdList(tpAniSirGlobal pMac);
static void smeAbortCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand, tANI_BOOLEAN fStopping );
eCsrPhyMode sme_GetPhyMode(tHalHandle hHal);
eHalStatus sme_HandleChangeCountryCode(tpAniSirGlobal pMac, void *pMsgBuf);
void sme_DisconnectConnectedSessions(tpAniSirGlobal pMac);
eHalStatus sme_HandleGenericChangeCountryCode(tpAniSirGlobal pMac, void *pMsgBuf);
eHalStatus sme_HandlePreChannelSwitchInd(tHalHandle hHal);
eHalStatus sme_HandlePostChannelSwitchInd(tHalHandle hHal);
#ifdef FEATURE_WLAN_LFR
tANI_BOOLEAN csrIsScanAllowed(tpAniSirGlobal pMac);
#endif
#ifdef WLAN_FEATURE_11W
eHalStatus sme_UnprotectedMgmtFrmInd( tHalHandle hHal,
tpSirSmeUnprotMgmtFrameInd pSmeMgmtFrm );
#endif
//Internal SME APIs
eHalStatus sme_AcquireGlobalLock( tSmeStruct *psSme)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
if(psSme)
{
if( VOS_IS_STATUS_SUCCESS( vos_lock_acquire( &psSme->lkSmeGlobalLock) ) )
{
status = eHAL_STATUS_SUCCESS;
}
}
return (status);
}
eHalStatus sme_ReleaseGlobalLock( tSmeStruct *psSme)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
if(psSme)
{
if( VOS_IS_STATUS_SUCCESS( vos_lock_release( &psSme->lkSmeGlobalLock) ) )
{
status = eHAL_STATUS_SUCCESS;
}
}
return (status);
}
static eHalStatus initSmeCmdList(tpAniSirGlobal pMac)
{
eHalStatus status;
tSmeCmd *pCmd;
tANI_U32 cmd_idx;
VOS_STATUS vosStatus;
vos_timer_t* cmdTimeoutTimer = NULL;
pMac->sme.totalSmeCmd = SME_TOTAL_COMMAND;
if (!HAL_STATUS_SUCCESS(status = csrLLOpen(pMac->hHdd,
&pMac->sme.smeCmdActiveList)))
goto end;
if (!HAL_STATUS_SUCCESS(status = csrLLOpen(pMac->hHdd,
&pMac->sme.smeCmdPendingList)))
goto end;
if (!HAL_STATUS_SUCCESS(status = csrLLOpen(pMac->hHdd,
&pMac->sme.smeScanCmdActiveList)))
goto end;
if (!HAL_STATUS_SUCCESS(status = csrLLOpen(pMac->hHdd,
&pMac->sme.smeScanCmdPendingList)))
goto end;
if (!HAL_STATUS_SUCCESS(status = csrLLOpen(pMac->hHdd,
&pMac->sme.smeCmdFreeList)))
goto end;
pCmd = vos_mem_malloc(sizeof(tSmeCmd) * pMac->sme.totalSmeCmd);
if ( NULL == pCmd )
status = eHAL_STATUS_FAILURE;
else
{
status = eHAL_STATUS_SUCCESS;
vos_mem_set(pCmd, sizeof(tSmeCmd) * pMac->sme.totalSmeCmd, 0);
pMac->sme.pSmeCmdBufAddr = pCmd;
for (cmd_idx = 0; cmd_idx < pMac->sme.totalSmeCmd; cmd_idx++)
{
csrLLInsertTail(&pMac->sme.smeCmdFreeList,
&pCmd[cmd_idx].Link, LL_ACCESS_LOCK);
}
}
/* This timer is only to debug the active list command timeout */
cmdTimeoutTimer = (vos_timer_t*)vos_mem_malloc(sizeof(vos_timer_t));
if (cmdTimeoutTimer)
{
pMac->sme.smeCmdActiveList.cmdTimeoutTimer = cmdTimeoutTimer;
vosStatus =
vos_timer_init( pMac->sme.smeCmdActiveList.cmdTimeoutTimer,
VOS_TIMER_TYPE_SW,
activeListCmdTimeoutHandle,
(void*) pMac);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Init Timer fail for active list command process time out");
vos_mem_free(pMac->sme.smeCmdActiveList.cmdTimeoutTimer);
}
else
{
pMac->sme.smeCmdActiveList.cmdTimeoutDuration =
CSR_ACTIVE_LIST_CMD_TIMEOUT_VALUE;
}
}
end:
if (!HAL_STATUS_SUCCESS(status))
smsLog(pMac, LOGE, "failed to initialize sme command list:%d\n",
status);
return (status);
}
void smeReleaseCommand(tpAniSirGlobal pMac, tSmeCmd *pCmd)
{
pCmd->command = eSmeNoCommand;
csrLLInsertTail(&pMac->sme.smeCmdFreeList, &pCmd->Link, LL_ACCESS_LOCK);
}
static void smeReleaseCmdList(tpAniSirGlobal pMac, tDblLinkList *pList)
{
tListElem *pEntry;
tSmeCmd *pCommand;
while((pEntry = csrLLRemoveHead(pList, LL_ACCESS_LOCK)) != NULL)
{
//TODO: base on command type to call release functions
//reinitialize different command types so they can be reused
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
smeAbortCommand(pMac, pCommand, eANI_BOOLEAN_TRUE);
}
}
static void purgeSmeCmdList(tpAniSirGlobal pMac)
{
//release any out standing commands back to free command list
smeReleaseCmdList(pMac, &pMac->sme.smeCmdPendingList);
smeReleaseCmdList(pMac, &pMac->sme.smeCmdActiveList);
smeReleaseCmdList(pMac, &pMac->sme.smeScanCmdPendingList);
smeReleaseCmdList(pMac, &pMac->sme.smeScanCmdActiveList);
}
void purgeSmeSessionCmdList(tpAniSirGlobal pMac, tANI_U32 sessionId,
tDblLinkList *pList)
{
//release any out standing commands back to free command list
tListElem *pEntry, *pNext;
tSmeCmd *pCommand;
tDblLinkList localList;
vos_mem_zero(&localList, sizeof(tDblLinkList));
if(!HAL_STATUS_SUCCESS(csrLLOpen(pMac->hHdd, &localList)))
{
smsLog(pMac, LOGE, FL(" failed to open list"));
return;
}
csrLLLock(pList);
pEntry = csrLLPeekHead(pList, LL_ACCESS_NOLOCK);
while(pEntry != NULL)
{
pNext = csrLLNext(pList, pEntry, LL_ACCESS_NOLOCK);
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
if(pCommand->sessionId == sessionId)
{
if(csrLLRemoveEntry(pList, pEntry, LL_ACCESS_NOLOCK))
{
csrLLInsertTail(&localList, pEntry, LL_ACCESS_NOLOCK);
}
}
pEntry = pNext;
}
csrLLUnlock(pList);
while( (pEntry = csrLLRemoveHead(&localList, LL_ACCESS_NOLOCK)) )
{
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
smeAbortCommand(pMac, pCommand, eANI_BOOLEAN_TRUE);
}
csrLLClose(&localList);
}
static eHalStatus freeSmeCmdList(tpAniSirGlobal pMac)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
purgeSmeCmdList(pMac);
csrLLClose(&pMac->sme.smeCmdPendingList);
csrLLClose(&pMac->sme.smeCmdActiveList);
csrLLClose(&pMac->sme.smeScanCmdPendingList);
csrLLClose(&pMac->sme.smeScanCmdActiveList);
csrLLClose(&pMac->sme.smeCmdFreeList);
/*destroy active list command time out timer */
vos_timer_destroy(pMac->sme.smeCmdActiveList.cmdTimeoutTimer);
vos_mem_free(pMac->sme.smeCmdActiveList.cmdTimeoutTimer);
pMac->sme.smeCmdActiveList.cmdTimeoutTimer = NULL;
status = vos_lock_acquire(&pMac->sme.lkSmeGlobalLock);
if(status != eHAL_STATUS_SUCCESS)
{
smsLog(pMac, LOGE,
FL("Failed to acquire the lock status = %d"), status);
goto done;
}
if(NULL != pMac->sme.pSmeCmdBufAddr)
{
vos_mem_free(pMac->sme.pSmeCmdBufAddr);
pMac->sme.pSmeCmdBufAddr = NULL;
}
status = vos_lock_release(&pMac->sme.lkSmeGlobalLock);
if(status != eHAL_STATUS_SUCCESS)
{
smsLog(pMac, LOGE,
FL("Failed to release the lock status = %d"), status);
}
done:
return (status);
}
void dumpCsrCommandInfo(tpAniSirGlobal pMac, tSmeCmd *pCmd)
{
switch( pCmd->command )
{
case eSmeCommandScan:
smsLog( pMac, LOGE, " scan command reason is %d", pCmd->u.scanCmd.reason );
break;
case eSmeCommandRoam:
smsLog( pMac, LOGE, " roam command reason is %d", pCmd->u.roamCmd.roamReason );
break;
case eSmeCommandWmStatusChange:
smsLog( pMac, LOGE, " WMStatusChange command type is %d", pCmd->u.wmStatusChangeCmd.Type );
break;
case eSmeCommandSetKey:
smsLog( pMac, LOGE, " setKey command auth(%d) enc(%d)",
pCmd->u.setKeyCmd.authType, pCmd->u.setKeyCmd.encType );
break;
case eSmeCommandRemoveKey:
smsLog( pMac, LOGE, " removeKey command auth(%d) enc(%d)",
pCmd->u.removeKeyCmd.authType, pCmd->u.removeKeyCmd.encType );
break;
default:
smsLog( pMac, LOGE, " default: Unhandled command %d",
pCmd->command);
break;
}
}
tSmeCmd *smeGetCommandBuffer( tpAniSirGlobal pMac )
{
tSmeCmd *pRetCmd = NULL, *pTempCmd = NULL;
tListElem *pEntry;
static int smeCommandQueueFull = 0;
pEntry = csrLLRemoveHead( &pMac->sme.smeCmdFreeList, LL_ACCESS_LOCK );
// If we can get another MS Msg buffer, then we are ok. Just link
// the entry onto the linked list. (We are using the linked list
// to keep track of tfhe message buffers).
if ( pEntry )
{
pRetCmd = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
/* reset when free list is available */
smeCommandQueueFull = 0;
}
else {
int idx = 1;
//Cannot change pRetCmd here since it needs to return later.
pEntry = csrLLPeekHead( &pMac->sme.smeCmdActiveList, LL_ACCESS_LOCK );
if( pEntry )
{
pTempCmd = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
}
smsLog( pMac, LOGE, "Out of command buffer.... command (0x%X) stuck",
(pTempCmd) ? pTempCmd->command : eSmeNoCommand );
if(pTempCmd)
{
if( eSmeCsrCommandMask & pTempCmd->command )
{
//CSR command is stuck. See what the reason code is for that command
dumpCsrCommandInfo(pMac, pTempCmd);
}
} //if(pTempCmd)
//dump what is in the pending queue
csrLLLock(&pMac->sme.smeCmdPendingList);
pEntry = csrLLPeekHead( &pMac->sme.smeCmdPendingList, LL_ACCESS_NOLOCK );
while(pEntry && !smeCommandQueueFull)
{
pTempCmd = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
/* Print only 1st five commands from pending queue. */
if (idx <= 5)
smsLog( pMac, LOGE, "Out of command buffer.... SME pending command #%d (0x%X)",
idx, pTempCmd->command );
idx++;
if( eSmeCsrCommandMask & pTempCmd->command )
{
//CSR command is stuck. See what the reason code is for that command
dumpCsrCommandInfo(pMac, pTempCmd);
}
pEntry = csrLLNext( &pMac->sme.smeCmdPendingList, pEntry, LL_ACCESS_NOLOCK );
}
/* Increament static variable so that it prints pending command only once*/
smeCommandQueueFull++;
csrLLUnlock(&pMac->sme.smeCmdPendingList);
//There may be some more command in CSR's own pending queue
csrLLLock(&pMac->roam.roamCmdPendingList);
pEntry = csrLLPeekHead( &pMac->roam.roamCmdPendingList, LL_ACCESS_NOLOCK );
while(pEntry)
{
pTempCmd = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
smsLog( pMac, LOGE, "Out of command buffer.... CSR pending command #%d (0x%X)",
idx++, pTempCmd->command );
dumpCsrCommandInfo(pMac, pTempCmd);
pEntry = csrLLNext( &pMac->roam.roamCmdPendingList, pEntry, LL_ACCESS_NOLOCK );
}
csrLLUnlock(&pMac->roam.roamCmdPendingList);
}
if( pRetCmd )
{
vos_mem_set((tANI_U8 *)&pRetCmd->command, sizeof(pRetCmd->command), 0);
vos_mem_set((tANI_U8 *)&pRetCmd->sessionId, sizeof(pRetCmd->sessionId), 0);
vos_mem_set((tANI_U8 *)&pRetCmd->u, sizeof(pRetCmd->u), 0);
}
return( pRetCmd );
}
void smePushCommand( tpAniSirGlobal pMac, tSmeCmd *pCmd, tANI_BOOLEAN fHighPriority )
{
if ( fHighPriority )
{
csrLLInsertHead( &pMac->sme.smeCmdPendingList, &pCmd->Link, LL_ACCESS_LOCK );
}
else
{
csrLLInsertTail( &pMac->sme.smeCmdPendingList, &pCmd->Link, LL_ACCESS_LOCK );
}
// process the command queue...
smeProcessPendingQueue( pMac );
return;
}
static eSmeCommandType smeIsFullPowerNeeded( tpAniSirGlobal pMac, tSmeCmd *pCommand )
{
eSmeCommandType pmcCommand = eSmeNoCommand;
tANI_BOOLEAN fFullPowerNeeded = eANI_BOOLEAN_FALSE;
tPmcState pmcState;
eHalStatus status;
do
{
pmcState = pmcGetPmcState(pMac);
status = csrIsFullPowerNeeded( pMac, pCommand, NULL, &fFullPowerNeeded );
if( !HAL_STATUS_SUCCESS(status) )
{
//PMC state is not right for the command, drop it
return ( eSmeDropCommand );
}
if( fFullPowerNeeded ) break;
fFullPowerNeeded = ( ( eSmeCommandAddTs == pCommand->command ) ||
( eSmeCommandDelTs == pCommand->command ) );
if( fFullPowerNeeded ) break;
#ifdef FEATURE_OEM_DATA_SUPPORT
fFullPowerNeeded = (pmcState == IMPS &&
eSmeCommandOemDataReq == pCommand->command);
if(fFullPowerNeeded) break;
#endif
fFullPowerNeeded = (pmcState == IMPS &&
eSmeCommandRemainOnChannel == pCommand->command);
if(fFullPowerNeeded) break;
} while(0);
if( fFullPowerNeeded )
{
switch( pmcState )
{
case IMPS:
case STANDBY:
pmcCommand = eSmeCommandExitImps;
break;
case BMPS:
pmcCommand = eSmeCommandExitBmps;
break;
case UAPSD:
pmcCommand = eSmeCommandExitUapsd;
break;
case WOWL:
pmcCommand = eSmeCommandExitWowl;
break;
default:
break;
}
}
return ( pmcCommand );
}
//For commands that need to do extra cleanup.
static void smeAbortCommand( tpAniSirGlobal pMac, tSmeCmd *pCommand, tANI_BOOLEAN fStopping )
{
if( eSmePmcCommandMask & pCommand->command )
{
pmcAbortCommand( pMac, pCommand, fStopping );
}
else if ( eSmeCsrCommandMask & pCommand->command )
{
csrAbortCommand( pMac, pCommand, fStopping );
}
else
{
switch( pCommand->command )
{
case eSmeCommandRemainOnChannel:
if (NULL != pCommand->u.remainChlCmd.callback)
{
remainOnChanCallback callback =
pCommand->u.remainChlCmd.callback;
/* process the msg */
if( callback )
{
callback(pMac, pCommand->u.remainChlCmd.callbackCtx,
eCSR_SCAN_ABORT );
}
}
smeReleaseCommand( pMac, pCommand );
break;
default:
smeReleaseCommand( pMac, pCommand );
break;
}
}
}
tListElem *csrGetCmdToProcess(tpAniSirGlobal pMac, tDblLinkList *pList,
tANI_U8 sessionId, tANI_BOOLEAN fInterlocked)
{
tListElem *pCurEntry = NULL;
tSmeCmd *pCommand;
/* Go through the list and return the command whose session id is not
* matching with the current ongoing scan cmd sessionId */
pCurEntry = csrLLPeekHead( pList, LL_ACCESS_LOCK );
while (pCurEntry)
{
pCommand = GET_BASE_ADDR(pCurEntry, tSmeCmd, Link);
if (pCommand->sessionId != sessionId)
{
smsLog(pMac, LOG1, "selected the command with different sessionId");
return pCurEntry;
}
pCurEntry = csrLLNext(pList, pCurEntry, fInterlocked);
}
smsLog(pMac, LOG1, "No command pending with different sessionId");
return NULL;
}
tANI_BOOLEAN smeProcessScanQueue(tpAniSirGlobal pMac)
{
tListElem *pEntry;
tSmeCmd *pCommand;
tListElem *pSmeEntry;
tSmeCmd *pSmeCommand;
tANI_BOOLEAN status = eANI_BOOLEAN_TRUE;
csrLLLock( &pMac->sme.smeScanCmdActiveList );
if (csrLLIsListEmpty( &pMac->sme.smeScanCmdActiveList,
LL_ACCESS_NOLOCK ))
{
if (!csrLLIsListEmpty(&pMac->sme.smeScanCmdPendingList,
LL_ACCESS_LOCK))
{
pEntry = csrLLPeekHead( &pMac->sme.smeScanCmdPendingList,
LL_ACCESS_LOCK );
if (pEntry)
{
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
//We cannot execute any command in wait-for-key state until setKey is through.
if (CSR_IS_WAIT_FOR_KEY( pMac, pCommand->sessionId))
{
if (!CSR_IS_SET_KEY_COMMAND(pCommand))
{
smsLog(pMac, LOGE,
" Cannot process command(%d) while waiting for key",
pCommand->command);
status = eANI_BOOLEAN_FALSE;
goto end;
}
}
if ((!csrLLIsListEmpty(&pMac->sme.smeCmdActiveList,
LL_ACCESS_LOCK )))
{
pSmeEntry = csrLLPeekHead(&pMac->sme.smeCmdActiveList,
LL_ACCESS_LOCK);
if (pEntry)
{
pSmeCommand = GET_BASE_ADDR(pEntry, tSmeCmd,
Link) ;
/* if scan is running on one interface and SME recei
ves the next command on the same interface then
dont the allow the command to be queued to
smeCmdPendingList. If next scan is allowed on
the same interface the CSR state machine will
get screwed up. */
if (pSmeCommand->sessionId == pCommand->sessionId)
{
status = eANI_BOOLEAN_FALSE;
goto end;
}
}
}
if ( csrLLRemoveEntry( &pMac->sme.smeScanCmdPendingList,
pEntry, LL_ACCESS_LOCK ) )
{
csrLLInsertHead( &pMac->sme.smeScanCmdActiveList,
&pCommand->Link, LL_ACCESS_NOLOCK );
switch (pCommand->command)
{
case eSmeCommandScan:
smsLog(pMac, LOG1,
" Processing scan offload command ");
csrProcessScanCommand( pMac, pCommand );
break;
default:
smsLog(pMac, LOGE,
" Something wrong, wrong command enqueued"
" to smeScanCmdPendingList");
pEntry = csrLLRemoveHead(
&pMac->sme.smeScanCmdActiveList,
LL_ACCESS_NOLOCK );
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
smeReleaseCommand( pMac, pCommand );
break;
}
}
}
}
}
end:
csrLLUnlock(&pMac->sme.smeScanCmdActiveList);
return status;
}
eHalStatus smeProcessPnoCommand(tpAniSirGlobal pMac, tSmeCmd *pCmd)
{
tpSirPNOScanReq pnoReqBuf;
tSirMsgQ msgQ;
pnoReqBuf = vos_mem_malloc(sizeof(tSirPNOScanReq));
if ( NULL == pnoReqBuf )
{
smsLog(pMac, LOGE, FL("failed to allocate memory"));
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pnoReqBuf, &(pCmd->u.pnoInfo), sizeof(tSirPNOScanReq));
smsLog(pMac, LOG1, FL("post WDA_SET_PNO_REQ comamnd"));
msgQ.type = WDA_SET_PNO_REQ;
msgQ.reserved = 0;
msgQ.bodyptr = pnoReqBuf;
msgQ.bodyval = 0;
wdaPostCtrlMsg( pMac, &msgQ);
return eHAL_STATUS_SUCCESS;
}
tANI_BOOLEAN smeProcessCommand( tpAniSirGlobal pMac )
{
tANI_BOOLEAN fContinue = eANI_BOOLEAN_FALSE;
eHalStatus status = eHAL_STATUS_SUCCESS;
tListElem *pEntry;
tSmeCmd *pCommand;
tListElem *pSmeEntry;
tSmeCmd *pSmeCommand;
eSmeCommandType pmcCommand = eSmeNoCommand;
// if the ActiveList is empty, then nothing is active so we can process a
// pending command...
//alwasy lock active list before locking pending list
csrLLLock( &pMac->sme.smeCmdActiveList );
if ( csrLLIsListEmpty( &pMac->sme.smeCmdActiveList, LL_ACCESS_NOLOCK ) )
{
if(!csrLLIsListEmpty(&pMac->sme.smeCmdPendingList, LL_ACCESS_LOCK))
{
/* If scan command is pending in the smeScanCmdActive list
* then pick the command from smeCmdPendingList which is
* not matching with the scan command session id.
* At any point of time only one command will be allowed
* on a single session. */
if ((pMac->fScanOffload) &&
(!csrLLIsListEmpty(&pMac->sme.smeScanCmdActiveList,
LL_ACCESS_LOCK)))
{
pSmeEntry = csrLLPeekHead(&pMac->sme.smeScanCmdActiveList,
LL_ACCESS_LOCK);
if (pSmeEntry)
{
pSmeCommand = GET_BASE_ADDR(pSmeEntry, tSmeCmd, Link);
pEntry = csrGetCmdToProcess(pMac,
&pMac->sme.smeCmdPendingList,
pSmeCommand->sessionId,
LL_ACCESS_LOCK);
goto sme_process_cmd;
}
}
//Peek the command
pEntry = csrLLPeekHead( &pMac->sme.smeCmdPendingList, LL_ACCESS_LOCK );
sme_process_cmd:
if( pEntry )
{
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
/* Allow only disconnect command
* in wait-for-key state until setKey is through.
*/
if( CSR_IS_WAIT_FOR_KEY( pMac, pCommand->sessionId ) &&
!CSR_IS_DISCONNECT_COMMAND( pCommand ) )
{
if( !CSR_IS_SET_KEY_COMMAND( pCommand ) )
{
csrLLUnlock( &pMac->sme.smeCmdActiveList );
smsLog(pMac, LOGE, FL("SessionId %d: Cannot process "
"command(%d) while waiting for key"),
pCommand->sessionId, pCommand->command);
fContinue = eANI_BOOLEAN_FALSE;
goto sme_process_scan_queue;
}
}
pmcCommand = smeIsFullPowerNeeded( pMac, pCommand );
if( eSmeDropCommand == pmcCommand )
{
//This command is not ok for current PMC state
if( csrLLRemoveEntry( &pMac->sme.smeCmdPendingList, pEntry, LL_ACCESS_LOCK ) )
{
smeAbortCommand( pMac, pCommand, eANI_BOOLEAN_FALSE );
}
csrLLUnlock( &pMac->sme.smeCmdActiveList );
//tell caller to continue
fContinue = eANI_BOOLEAN_TRUE;
goto sme_process_scan_queue;
}
else if( eSmeNoCommand != pmcCommand )
{
tExitBmpsInfo exitBmpsInfo;
void *pv = NULL;
tANI_U32 size = 0;
tSmeCmd *pPmcCmd = NULL;
if( eSmeCommandExitBmps == pmcCommand )
{
exitBmpsInfo.exitBmpsReason = eSME_REASON_OTHER;
pv = (void *)&exitBmpsInfo;
size = sizeof(tExitBmpsInfo);
}
//pmcCommand has to be one of the exit power save command
status = pmcPrepareCommand( pMac, pmcCommand, pv, size, &pPmcCmd );
if( HAL_STATUS_SUCCESS( status ) && pPmcCmd )
{
//Force this command to wake up the chip
csrLLInsertHead( &pMac->sme.smeCmdActiveList, &pPmcCmd->Link, LL_ACCESS_NOLOCK );
csrLLUnlock( &pMac->sme.smeCmdActiveList );
fContinue = pmcProcessCommand( pMac, pPmcCmd );
if( fContinue )
{
//The command failed, remove it
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList, &pPmcCmd->Link, LL_ACCESS_NOLOCK ) )
{
pmcReleaseCommand( pMac, pPmcCmd );
}
}
}
else
{
csrLLUnlock( &pMac->sme.smeCmdActiveList );
smsLog( pMac, LOGE, FL( "Cannot issue command(0x%X) to wake up the chip. Status = %d"), pmcCommand, status );
//Let it retry
fContinue = eANI_BOOLEAN_TRUE;
}
goto sme_process_scan_queue;
}
if ( csrLLRemoveEntry( &pMac->sme.smeCmdPendingList, pEntry, LL_ACCESS_LOCK ) )
{
// we can reuse the pCommand
// Insert the command onto the ActiveList...
csrLLInsertHead( &pMac->sme.smeCmdActiveList, &pCommand->Link, LL_ACCESS_NOLOCK );
if( pMac->deferImps )
{
/* IMPS timer is already running so stop it and
* it will get restarted when no command is pending
*/
csrScanStopIdleScanTimer( pMac );
pMac->scan.fRestartIdleScan = eANI_BOOLEAN_TRUE;
pMac->deferImps = eANI_BOOLEAN_FALSE;
}
// .... and process the command.
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_COMMAND, pCommand->sessionId, pCommand->command));
switch ( pCommand->command )
{
case eSmeCommandScan:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = csrProcessScanCommand( pMac, pCommand );
break;
case eSmeCommandRoam:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = csrRoamProcessCommand( pMac, pCommand );
if(!HAL_STATUS_SUCCESS(status))
{
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
csrReleaseCommandRoam( pMac, pCommand );
}
}
break;
case eSmeCommandWmStatusChange:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
csrRoamProcessWmStatusChangeCommand(pMac, pCommand);
break;
case eSmeCommandSetKey:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = csrRoamProcessSetKeyCommand( pMac, pCommand );
if(!HAL_STATUS_SUCCESS(status))
{
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
csrReleaseCommandSetKey( pMac, pCommand );
}
}
break;
case eSmeCommandRemoveKey:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = csrRoamProcessRemoveKeyCommand( pMac, pCommand );
if(!HAL_STATUS_SUCCESS(status))
{
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
csrReleaseCommandRemoveKey( pMac, pCommand );
}
}
break;
case eSmeCommandAddStaSession:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
csrProcessAddStaSessionCommand( pMac, pCommand );
break;
case eSmeCommandDelStaSession:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
csrProcessDelStaSessionCommand( pMac, pCommand );
break;
#ifdef FEATURE_OEM_DATA_SUPPORT
case eSmeCommandOemDataReq:
csrLLUnlock(&pMac->sme.smeCmdActiveList);
oemData_ProcessOemDataReqCommand(pMac, pCommand);
break;
#endif
case eSmeCommandRemainOnChannel:
csrLLUnlock(&pMac->sme.smeCmdActiveList);
p2pProcessRemainOnChannelCmd(pMac, pCommand);
break;
case eSmeCommandNoAUpdate:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
p2pProcessNoAReq(pMac,pCommand);
case eSmeCommandEnterImps:
case eSmeCommandExitImps:
case eSmeCommandEnterBmps:
case eSmeCommandExitBmps:
case eSmeCommandEnterUapsd:
case eSmeCommandExitUapsd:
case eSmeCommandEnterWowl:
case eSmeCommandExitWowl:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
fContinue = pmcProcessCommand( pMac, pCommand );
if( fContinue )
{
//The command failed, remove it
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
pmcReleaseCommand( pMac, pCommand );
}
}
break;
//Treat standby differently here because caller may not be able to handle
//the failure so we do our best here
case eSmeCommandEnterStandby:
if( csrIsConnStateDisconnected( pMac, pCommand->sessionId ) )
{
//It can continue
csrLLUnlock( &pMac->sme.smeCmdActiveList );
fContinue = pmcProcessCommand( pMac, pCommand );
if( fContinue )
{
//The command failed, remove it
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
pmcReleaseCommand( pMac, pCommand );
}
}
}
else
{
//Need to issue a disconnect first before processing this command
tSmeCmd *pNewCmd;
//We need to re-run the command
fContinue = eANI_BOOLEAN_TRUE;
//Pull off the standby command first
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_NOLOCK ) )
{
csrLLUnlock( &pMac->sme.smeCmdActiveList );
//Need to call CSR function here because the disconnect command
//is handled by CSR
pNewCmd = csrGetCommandBuffer( pMac );
if( NULL != pNewCmd )
{
//Put the standby command to the head of the pending list first
csrLLInsertHead( &pMac->sme.smeCmdPendingList, &pCommand->Link,
LL_ACCESS_LOCK );
pNewCmd->command = eSmeCommandRoam;
pNewCmd->u.roamCmd.roamReason = eCsrForcedDisassoc;
//Put the disassoc command before the standby command
csrLLInsertHead( &pMac->sme.smeCmdPendingList, &pNewCmd->Link,
LL_ACCESS_LOCK );
}
else
{
//Continue the command here
fContinue = pmcProcessCommand( pMac, pCommand );
if( fContinue )
{
//The command failed, remove it
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK ) )
{
pmcReleaseCommand( pMac, pCommand );
}
}
}
}
else
{
csrLLUnlock( &pMac->sme.smeCmdActiveList );
smsLog( pMac, LOGE, FL(" failed to remove standby command") );
VOS_ASSERT(0);
}
}
break;
case eSmeCommandPnoReq:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = smeProcessPnoCommand(pMac, pCommand);
if (!HAL_STATUS_SUCCESS(status)){
smsLog(pMac, LOGE,
FL("failed to post SME PNO SCAN %d"), status);
}
//We need to re-run the command
fContinue = eANI_BOOLEAN_TRUE;
if (csrLLRemoveEntry(&pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_LOCK))
{
csrReleaseCommand(pMac, pCommand);
}
break;
case eSmeCommandAddTs:
case eSmeCommandDelTs:
csrLLUnlock( &pMac->sme.smeCmdActiveList );
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
fContinue = qosProcessCommand( pMac, pCommand );
if( fContinue )
{
//The command failed, remove it
if( csrLLRemoveEntry( &pMac->sme.smeCmdActiveList,
&pCommand->Link, LL_ACCESS_NOLOCK ) )
{
//#ifndef WLAN_MDM_CODE_REDUCTION_OPT
qosReleaseCommand( pMac, pCommand );
//#endif /* WLAN_MDM_CODE_REDUCTION_OPT*/
}
}
#endif
break;
#ifdef FEATURE_WLAN_TDLS
case eSmeCommandTdlsSendMgmt:
case eSmeCommandTdlsAddPeer:
case eSmeCommandTdlsDelPeer:
case eSmeCommandTdlsLinkEstablish:
case eSmeCommandTdlsChannelSwitch: // tdlsoffchan
#ifdef FEATURE_WLAN_TDLS_INTERNAL
case eSmeCommandTdlsDiscovery:
case eSmeCommandTdlsLinkSetup:
case eSmeCommandTdlsLinkTear:
case eSmeCommandTdlsEnterUapsd:
case eSmeCommandTdlsExitUapsd:
#endif
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"sending TDLS Command 0x%x to PE", pCommand->command);
csrLLUnlock( &pMac->sme.smeCmdActiveList );
status = csrTdlsProcessCmd( pMac, pCommand );
}
break ;
#endif
default:
//something is wrong
//remove it from the active list
smsLog(pMac, LOGE, " csrProcessCommand processes an unknown command %d", pCommand->command);
pEntry = csrLLRemoveHead( &pMac->sme.smeCmdActiveList, LL_ACCESS_NOLOCK );
csrLLUnlock( &pMac->sme.smeCmdActiveList );
pCommand = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
smeReleaseCommand( pMac, pCommand );
status = eHAL_STATUS_FAILURE;
break;
}
if(!HAL_STATUS_SUCCESS(status))
{
fContinue = eANI_BOOLEAN_TRUE;
}
}//if(pEntry)
else
{
//This is odd. Some one else pull off the command.
csrLLUnlock( &pMac->sme.smeCmdActiveList );
}
}
else
{
csrLLUnlock( &pMac->sme.smeCmdActiveList );
}
}
else
{
//No command waiting
csrLLUnlock( &pMac->sme.smeCmdActiveList );
//This is only used to restart an idle mode scan, it means at least one other idle scan has finished.
if(pMac->scan.fRestartIdleScan && eANI_BOOLEAN_FALSE == pMac->scan.fCancelIdleScan)
{
tANI_U32 nTime = 0;
pMac->scan.fRestartIdleScan = eANI_BOOLEAN_FALSE;
if(!HAL_STATUS_SUCCESS(csrScanTriggerIdleScan(pMac, &nTime)))
{
csrScanStartIdleScanTimer(pMac, nTime);
}
}
}
}
else {
csrLLUnlock( &pMac->sme.smeCmdActiveList );
}
sme_process_scan_queue:
if (pMac->fScanOffload && !(smeProcessScanQueue(pMac)))
fContinue = eANI_BOOLEAN_FALSE;
return ( fContinue );
}
void smeProcessPendingQueue( tpAniSirGlobal pMac )
{
while( smeProcessCommand( pMac ) );
}
tANI_BOOLEAN smeCommandPending(tpAniSirGlobal pMac)
{
return ( !csrLLIsListEmpty( &pMac->sme.smeCmdActiveList, LL_ACCESS_NOLOCK ) ||
!csrLLIsListEmpty(&pMac->sme.smeCmdPendingList, LL_ACCESS_NOLOCK) );
}
//Global APIs
/*--------------------------------------------------------------------------
\brief sme_Open() - Initialze all SME modules and put them at idle state
The function initializes each module inside SME, PMC, CCM, CSR, etc. . Upon
successfully return, all modules are at idle state ready to start.
smeOpen must be called before any other SME APIs can be involved.
smeOpen must be called after macOpen.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME is successfully initialized.
Other status means SME is failed to be initialized
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_Open(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
do {
pMac->sme.state = SME_STATE_STOP;
pMac->sme.currDeviceMode = VOS_STA_MODE;
if( !VOS_IS_STATUS_SUCCESS( vos_lock_init( &pMac->sme.lkSmeGlobalLock ) ) )
{
smsLog( pMac, LOGE, "sme_Open failed init lock" );
status = eHAL_STATUS_FAILURE;
break;
}
status = ccmOpen(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"ccmOpen failed during initialization with status=%d", status );
break;
}
status = csrOpen(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"csrOpen failed during initialization with status=%d", status );
break;
}
status = pmcOpen(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"pmcOpen failed during initialization with status=%d", status );
break;
}
#ifdef FEATURE_WLAN_TDLS
pMac->isTdlsPowerSaveProhibited = 0;
#endif
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
status = sme_QosOpen(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"Qos open failed during initialization with status=%d", status );
break;
}
status = btcOpen(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"btcOpen open failed during initialization with status=%d", status );
break;
}
#endif
#ifdef FEATURE_OEM_DATA_SUPPORT
status = oemData_OemDataReqOpen(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog(pMac, LOGE,
"oemData_OemDataReqOpen failed during initialization with status=%d", status );
break;
}
#endif
if(!HAL_STATUS_SUCCESS((status = initSmeCmdList(pMac))))
break;
{
v_PVOID_t pvosGCtx = vos_get_global_context(VOS_MODULE_ID_SAP, NULL);
if ( NULL == pvosGCtx ){
smsLog( pMac, LOGE, "WLANSAP_Open open failed during initialization");
status = eHAL_STATUS_FAILURE;
break;
}
status = WLANSAP_Open( pvosGCtx );
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"WLANSAP_Open open failed during initialization with status=%d", status );
break;
}
}
#if defined WLAN_FEATURE_VOWIFI
status = rrmOpen(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE,
"rrmOpen open failed during initialization with status=%d", status );
break;
}
#endif
#if defined WLAN_FEATURE_VOWIFI_11R
sme_FTOpen(pMac);
#endif
sme_p2pOpen(pMac);
smeTraceInit(pMac);
}while (0);
return status;
}
/*--------------------------------------------------------------------------
\brief sme_set11dinfo() - Set the 11d information about valid channels
and there power using information from nvRAM
This function is called only for AP.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\Param pSmeConfigParams - a pointer to a caller allocated object of
typedef struct _smeConfigParams.
\return eHAL_STATUS_SUCCESS - SME update the config parameters successfully.
Other status means SME is failed to update the config parameters.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_set11dinfo(tHalHandle hHal, tpSmeConfigParams pSmeConfigParams)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SET_11DINFO, NO_SESSION, 0));
if (NULL == pSmeConfigParams ) {
smsLog( pMac, LOGE,
"Empty config param structure for SME, nothing to update");
return status;
}
status = csrSetChannels(hHal, &pSmeConfigParams->csrConfig );
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrChangeDefaultConfigParam failed with status=%d",
status );
}
return status;
}
/*--------------------------------------------------------------------------
\brief sme_getSoftApDomain() - Get the current regulatory domain of softAp.
This is a synchronous call
\param hHal - The handle returned by HostapdAdapter.
\Param v_REGDOMAIN_t - The current Regulatory Domain requested for SoftAp.
\return eHAL_STATUS_SUCCESS - SME successfully completed the request.
Other status means, failed to get the current regulatory domain.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_getSoftApDomain(tHalHandle hHal, v_REGDOMAIN_t *domainIdSoftAp)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_GET_SOFTAP_DOMAIN, NO_SESSION, 0));
if (NULL == domainIdSoftAp ) {
smsLog( pMac, LOGE, "Uninitialized domain Id");
return status;
}
*domainIdSoftAp = pMac->scan.domainIdCurrent;
status = eHAL_STATUS_SUCCESS;
return status;
}
eHalStatus sme_setRegInfo(tHalHandle hHal, tANI_U8 *apCntryCode)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SET_REGINFO, NO_SESSION, 0));
if (NULL == apCntryCode ) {
smsLog( pMac, LOGE, "Empty Country Code, nothing to update");
return status;
}
status = csrSetRegInfo(hHal, apCntryCode );
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrSetRegInfo failed with status=%d",
status );
}
return status;
}
#ifdef FEATURE_WLAN_SCAN_PNO
/*--------------------------------------------------------------------------
\brief sme_UpdateChannelConfig() - Update channel configuration in RIVA.
It is used at driver start up to inform RIVA of the default channel
configuration.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update the channel config successfully.
Other status means SME is failed to update the channel config.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateChannelConfig(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_UPDATE_CHANNEL_CONFIG, NO_SESSION, 0));
pmcUpdateScanParams( pMac, &(pMac->roam.configParam),
&pMac->scan.base20MHzChannels, FALSE);
return eHAL_STATUS_SUCCESS;
}
#endif // FEATURE_WLAN_SCAN_PNLO
eHalStatus sme_UpdateChannelList(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = csrUpdateChannelList(pMac);
if (eHAL_STATUS_SUCCESS != status)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"failed to update the supported channel list");
}
return status;
}
/*--------------------------------------------------------------------------
\brief sme_UpdateConfig() - Change configurations for all SME moduels
The function updates some configuration for modules in SME, CCM, CSR, etc
during SMEs close open sequence.
Modules inside SME apply the new configuration at the next transaction.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\Param pSmeConfigParams - a pointer to a caller allocated object of
typedef struct _smeConfigParams.
\return eHAL_STATUS_SUCCESS - SME update the config parameters successfully.
Other status means SME is failed to update the config parameters.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateConfig(tHalHandle hHal, tpSmeConfigParams pSmeConfigParams)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_UPDATE_CONFIG, NO_SESSION, 0));
if (NULL == pSmeConfigParams ) {
smsLog( pMac, LOGE,
"Empty config param structure for SME, nothing to update");
return status;
}
status = csrChangeDefaultConfigParam(pMac, &pSmeConfigParams->csrConfig);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrChangeDefaultConfigParam failed with status=%d",
status );
}
#if defined WLAN_FEATURE_P2P_INTERNAL
status = p2pChangeDefaultConfigParam(pMac, &pSmeConfigParams->p2pConfig);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "p2pChangeDefaultConfigParam failed with status=%d",
status );
}
#endif
#if defined WLAN_FEATURE_VOWIFI
status = rrmChangeDefaultConfigParam(hHal, &pSmeConfigParams->rrmConfig);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "rrmChangeDefaultConfigParam failed with status=%d",
status );
}
#endif
//For SOC, CFG is set before start
//We don't want to apply global CFG in connect state because that may cause some side affect
if(
csrIsAllSessionDisconnected( pMac) )
{
csrSetGlobalCfgs(pMac);
}
/* update the directed scan offload setting */
pMac->fScanOffload = pSmeConfigParams->fScanOffload;
/* Enable channel bonding mode in 2.4GHz */
if ((pSmeConfigParams->csrConfig.channelBondingMode24GHz == TRUE) &&
(IS_HT40_OBSS_SCAN_FEATURE_ENABLE))
{
ccmCfgSetInt(hHal,WNI_CFG_CHANNEL_BONDING_24G,
eANI_BOOLEAN_TRUE, NULL,eANI_BOOLEAN_FALSE);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_HIGH,
"Setting channelBondingMode24GHz:%d " ,
pSmeConfigParams->csrConfig.channelBondingMode24GHz);
}
if (pMac->fScanOffload)
{
/* If scan offload is enabled then lim has allow the sending of
scan request to firmware even in powersave mode. The firmware has
to take care of exiting from power save mode */
status = ccmCfgSetInt(hHal, WNI_CFG_SCAN_IN_POWERSAVE,
eANI_BOOLEAN_TRUE, NULL, eANI_BOOLEAN_FALSE);
if (eHAL_STATUS_SUCCESS != status)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Could not pass on WNI_CFG_SCAN_IN_POWERSAVE to CCM");
}
}
pMac->isCoalesingInIBSSAllowed =
pSmeConfigParams->csrConfig.isCoalesingInIBSSAllowed;
pMac->fEnableDebugLog = pSmeConfigParams->fEnableDebugLog;
pMac->fDeferIMPSTime = pSmeConfigParams->fDeferIMPSTime;
return status;
}
#ifdef WLAN_FEATURE_GTK_OFFLOAD
void sme_ProcessGetGtkInfoRsp( tHalHandle hHal,
tpSirGtkOffloadGetInfoRspParams pGtkOffloadGetInfoRsp)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return ;
}
if (pMac->pmc.GtkOffloadGetInfoCB == NULL)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: HDD callback is null", __func__);
return ;
}
pMac->pmc.GtkOffloadGetInfoCB(pMac->pmc.GtkOffloadGetInfoCBContext,
pGtkOffloadGetInfoRsp);
}
#endif
/* ---------------------------------------------------------------------------
\fn sme_ChangeConfigParams
\brief The SME API exposed for HDD to provide config params to SME during
SMEs stop -> start sequence.
If HDD changed the domain that will cause a reset. This function will
provide the new set of 11d information for the new domain. Currrently this
API provides info regarding 11d only at reset but we can extend this for
other params (PMC, QoS) which needs to be initialized again at reset.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\Param
pUpdateConfigParam - a pointer to a structure (tCsrUpdateConfigParam) that
currently provides 11d related information like Country code,
Regulatory domain, valid channel list, Tx power per channel, a
list with active/passive scan allowed per valid channel.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ChangeConfigParams(tHalHandle hHal,
tCsrUpdateConfigParam *pUpdateConfigParam)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (NULL == pUpdateConfigParam ) {
smsLog( pMac, LOGE,
"Empty config param structure for SME, nothing to reset");
return status;
}
status = csrChangeConfigParams(pMac, pUpdateConfigParam);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrUpdateConfigParam failed with status=%d",
status );
}
return status;
}
/*--------------------------------------------------------------------------
\brief sme_HDDReadyInd() - SME sends eWNI_SME_SYS_READY_IND to PE to inform
that the NIC is ready tio run.
The function is called by HDD at the end of initialization stage so PE/HAL can
enable the NIC to running state.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - eWNI_SME_SYS_READY_IND is sent to PE
successfully.
Other status means SME failed to send the message to PE.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_HDDReadyInd(tHalHandle hHal)
{
tSirSmeReadyReq Msg;
eHalStatus status = eHAL_STATUS_FAILURE;
tPmcPowerState powerState;
tPmcSwitchState hwWlanSwitchState;
tPmcSwitchState swWlanSwitchState;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_HDDREADYIND, NO_SESSION, 0));
do
{
Msg.messageType = eWNI_SME_SYS_READY_IND;
Msg.length = sizeof( tSirSmeReadyReq );
if (eSIR_FAILURE != uMacPostCtrlMsg( hHal, (tSirMbMsg*)&Msg ))
{
status = eHAL_STATUS_SUCCESS;
}
else
{
smsLog( pMac, LOGE,
"uMacPostCtrlMsg failed to send eWNI_SME_SYS_READY_IND");
break;
}
status = pmcQueryPowerState( hHal, &powerState,
&hwWlanSwitchState, &swWlanSwitchState );
if ( ! HAL_STATUS_SUCCESS( status ) )
{
smsLog( pMac, LOGE, "pmcQueryPowerState failed with status=%d",
status );
break;
}
if ( (ePMC_SWITCH_OFF != hwWlanSwitchState) &&
(ePMC_SWITCH_OFF != swWlanSwitchState) )
{
status = csrReady(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) )
{
smsLog( pMac, LOGE, "csrReady failed with status=%d", status );
break;
}
status = pmcReady(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) )
{
smsLog( pMac, LOGE, "pmcReady failed with status=%d", status );
break;
}
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
if(VOS_STATUS_SUCCESS != btcReady(hHal))
{
status = eHAL_STATUS_FAILURE;
smsLog( pMac, LOGE, "btcReady failed");
break;
}
#endif
#if defined WLAN_FEATURE_VOWIFI
if(VOS_STATUS_SUCCESS != rrmReady(hHal))
{
status = eHAL_STATUS_FAILURE;
smsLog( pMac, LOGE, "rrmReady failed");
break;
}
#endif
}
pMac->sme.state = SME_STATE_READY;
} while( 0 );
return status;
}
/*--------------------------------------------------------------------------
\brief sme_Start() - Put all SME modules at ready state.
The function starts each module in SME, PMC, CCM, CSR, etc. . Upon
successfully return, all modules are ready to run.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME is ready.
Other status means SME is failed to start
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_Start(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
do
{
status = csrStart(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrStart failed during smeStart with status=%d",
status );
break;
}
status = pmcStart(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "pmcStart failed during smeStart with status=%d",
status );
break;
}
status = WLANSAP_Start(vos_get_global_context(VOS_MODULE_ID_SAP, NULL));
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "WLANSAP_Start failed during smeStart with status=%d",
status );
break;
}
pMac->sme.state = SME_STATE_START;
}while (0);
return status;
}
#ifdef WLAN_FEATURE_PACKET_FILTERING
/******************************************************************************
*
* Name: sme_PCFilterMatchCountResponseHandler
*
* Description:
* Invoke Packet Coalescing Filter Match Count callback routine
*
* Parameters:
* hHal - HAL handle for device
* pMsg - Pointer to tRcvFltPktMatchRsp structure
*
* Returns: eHalStatus
*
******************************************************************************/
eHalStatus sme_PCFilterMatchCountResponseHandler(tHalHandle hHal, void* pMsg)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status = eHAL_STATUS_SUCCESS;
tpSirRcvFltPktMatchRsp pRcvFltPktMatchRsp = (tpSirRcvFltPktMatchRsp)pMsg;
if (NULL == pMsg)
{
smsLog(pMac, LOGE, "in %s msg ptr is NULL", __func__);
status = eHAL_STATUS_FAILURE;
}
else
{
smsLog(pMac, LOG2, "SME: entering "
"sme_FilterMatchCountResponseHandler");
/* Call Packet Coalescing Filter Match Count callback routine. */
if (pMac->pmc.FilterMatchCountCB != NULL)
pMac->pmc.FilterMatchCountCB(pMac->pmc.FilterMatchCountCBContext,
pRcvFltPktMatchRsp);
smsLog(pMac, LOG1, "%s: status=0x%x", __func__,
pRcvFltPktMatchRsp->status);
pMac->pmc.FilterMatchCountCB = NULL;
pMac->pmc.FilterMatchCountCBContext = NULL;
}
return(status);
}
#endif // WLAN_FEATURE_PACKET_FILTERING
#ifdef WLAN_FEATURE_11W
/*------------------------------------------------------------------
*
* Handle the unprotected management frame indication from LIM and
* forward it to HDD.
*
*------------------------------------------------------------------*/
eHalStatus sme_UnprotectedMgmtFrmInd( tHalHandle hHal,
tpSirSmeUnprotMgmtFrameInd pSmeMgmtFrm)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
tCsrRoamInfo pRoamInfo = {0};
tANI_U32 SessionId = pSmeMgmtFrm->sessionId;
pRoamInfo.nFrameLength = pSmeMgmtFrm->frameLen;
pRoamInfo.pbFrames = pSmeMgmtFrm->frameBuf;
pRoamInfo.frameType = pSmeMgmtFrm->frameType;
/* forward the mgmt frame to HDD */
csrRoamCallCallback(pMac, SessionId, &pRoamInfo, 0, eCSR_ROAM_UNPROT_MGMT_FRAME_IND, 0);
return status;
}
#endif
#if defined(FEATURE_WLAN_ESE) && defined(FEATURE_WLAN_ESE_UPLOAD)
/*------------------------------------------------------------------
*
* Handle the tsm ie indication from LIM and forward it to HDD.
*
*------------------------------------------------------------------*/
eHalStatus sme_TsmIeInd(tHalHandle hHal, tSirSmeTsmIEInd *pSmeTsmIeInd)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
tCsrRoamInfo pRoamInfo = {0};
tANI_U32 SessionId = pSmeTsmIeInd->sessionId;
pRoamInfo.tsmIe.tsid= pSmeTsmIeInd->tsmIe.tsid;
pRoamInfo.tsmIe.state= pSmeTsmIeInd->tsmIe.state;
pRoamInfo.tsmIe.msmt_interval= pSmeTsmIeInd->tsmIe.msmt_interval;
/* forward the tsm ie information to HDD */
csrRoamCallCallback(pMac, SessionId, &pRoamInfo, 0, eCSR_ROAM_TSM_IE_IND, 0);
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_SetCCKMIe
\brief function to store the CCKM IE passed from supplicant and use it while packing
reassociation request
\param hHal - HAL handle for device
\param pCckmIe - pointer to CCKM IE data
\param pCckmIeLen - length of the CCKM IE
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_SetCCKMIe(tHalHandle hHal, tANI_U8 sessionId,
tANI_U8 *pCckmIe, tANI_U8 cckmIeLen)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
csrSetCCKMIe(pMac, sessionId, pCckmIe, cckmIeLen);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_SetEseBeaconRequest
\brief function to set Ese beacon request parameters
\param hHal - HAL handle for device
\param sessionId - Session id
\param pEseBcnReq - pointer to Ese beacon request
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_SetEseBeaconRequest(tHalHandle hHal, const tANI_U8 sessionId,
const tCsrEseBeaconReq* pEseBcnReq)
{
eHalStatus status = eSIR_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tpSirBeaconReportReqInd pSmeBcnReportReq = NULL;
tCsrEseBeaconReqParams *pBeaconReq = NULL;
tANI_U8 counter = 0;
tCsrRoamSession *pSession = CSR_GET_SESSION(pMac, sessionId);
tpRrmSMEContext pSmeRrmContext = &pMac->rrm.rrmSmeContext;
/* Store the info in RRM context */
vos_mem_copy(&pSmeRrmContext->eseBcnReqInfo, pEseBcnReq, sizeof(tCsrEseBeaconReq));
//Prepare the request to send to SME.
pSmeBcnReportReq = vos_mem_malloc(sizeof( tSirBeaconReportReqInd ));
if(NULL == pSmeBcnReportReq)
{
smsLog(pMac, LOGP, "Memory Allocation Failure!!! Ese BcnReq Ind to SME");
return eSIR_FAILURE;
}
smsLog(pMac, LOGE, "Sending Beacon Report Req to SME");
vos_mem_zero( pSmeBcnReportReq, sizeof( tSirBeaconReportReqInd ));
pSmeBcnReportReq->messageType = eWNI_SME_BEACON_REPORT_REQ_IND;
pSmeBcnReportReq->length = sizeof( tSirBeaconReportReqInd );
vos_mem_copy( pSmeBcnReportReq->bssId, pSession->connectedProfile.bssid, sizeof(tSirMacAddr) );
pSmeBcnReportReq->channelInfo.channelNum = 255;
pSmeBcnReportReq->channelList.numChannels = pEseBcnReq->numBcnReqIe;
pSmeBcnReportReq->msgSource = eRRM_MSG_SOURCE_ESE_UPLOAD;
for (counter = 0; counter < pEseBcnReq->numBcnReqIe; counter++)
{
pBeaconReq = (tCsrEseBeaconReqParams *)&pEseBcnReq->bcnReq[counter];
pSmeBcnReportReq->fMeasurementtype[counter] = pBeaconReq->scanMode;
pSmeBcnReportReq->measurementDuration[counter] = SYS_TU_TO_MS(pBeaconReq->measurementDuration);
pSmeBcnReportReq->channelList.channelNumber[counter] = pBeaconReq->channel;
}
sme_RrmProcessBeaconReportReqInd(pMac, pSmeBcnReportReq);
return status;
}
#endif /* FEATURE_WLAN_ESE && FEATURE_WLAN_ESE_UPLOAD */
/* ---------------------------------------------------------------------------
\fn sme_getBcnMissRate
\brief function sends 'WDA_GET_BCN_MISS_RATE_REQ' to WDA layer,
\param hHal - HAL handle for device.
\param sessionId - session ID.
\- return Success or Failure.
-------------------------------------------------------------------------*/
eHalStatus sme_getBcnMissRate(tHalHandle hHal, tANI_U8 sessionId, void *callback, void *data)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
VOS_STATUS vosStatus = VOS_STATUS_E_FAILURE;
vos_msg_t vosMessage;
tSirBcnMissRateReq *pMsg;
tCsrRoamSession *pSession;
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
pSession = CSR_GET_SESSION( pMac, sessionId );
if (!pSession)
{
smsLog(pMac, LOGE, FL("session %d not found"), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg = (tSirBcnMissRateReq *) vos_mem_malloc(sizeof(tSirBcnMissRateReq));
if (NULL == pMsg)
{
smsLog(pMac, LOGE, FL("failed to allocated memory"));
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pMsg->bssid, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
pMsg->msgLen = sizeof(tSirBcnMissRateReq);
pMsg->callback = callback;
pMsg->data = data;
vosMessage.type = WDA_GET_BCN_MISS_RATE_REQ;
vosMessage.bodyptr = pMsg;
vosMessage.reserved = 0;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Post Set TM Level MSG fail", __func__);
vos_mem_free(pMsg);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme);
return eHAL_STATUS_SUCCESS;
}
return eHAL_STATUS_FAILURE;
}
eHalStatus sme_EncryptMsgResponseHandler(tHalHandle hHal,
tpSirEncryptedDataRspParams pEncRspParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return eHAL_STATUS_FAILURE;
}
if (pMac->sme.pEncMsgInfoParams.pEncMsgCbk == NULL)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: HDD callback is null", __func__);
return eHAL_STATUS_FAILURE;
}
pMac->sme.pEncMsgInfoParams.pEncMsgCbk(pMac->sme.pEncMsgInfoParams.pUserData,
&pEncRspParams->encryptedDataRsp);
return eHAL_STATUS_SUCCESS;
}
/*--------------------------------------------------------------------------
\brief sme_ProcessMsg() - The main message processor for SME.
The function is called by a message dispatcher when to process a message
targeted for SME.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\param pMsg - A pointer to a caller allocated object of tSirMsgQ.
\return eHAL_STATUS_SUCCESS - SME successfully process the message.
Other status means SME failed to process the message to HAL.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_ProcessMsg(tHalHandle hHal, vos_msg_t* pMsg)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (pMsg == NULL) {
smsLog( pMac, LOGE, "Empty message for SME, nothing to process");
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( SME_IS_START(pMac) )
{
switch (pMsg->type) { // TODO: Will be modified to do a range check for msgs instead of having cases for each msgs
case eWNI_PMC_ENTER_BMPS_RSP:
case eWNI_PMC_EXIT_BMPS_RSP:
case eWNI_PMC_EXIT_BMPS_IND:
case eWNI_PMC_ENTER_IMPS_RSP:
case eWNI_PMC_EXIT_IMPS_RSP:
case eWNI_PMC_SMPS_STATE_IND:
case eWNI_PMC_ENTER_UAPSD_RSP:
case eWNI_PMC_EXIT_UAPSD_RSP:
case eWNI_PMC_ENTER_WOWL_RSP:
case eWNI_PMC_EXIT_WOWL_RSP:
//PMC
if (pMsg->bodyptr)
{
pmcMessageProcessor(hHal, pMsg->bodyptr);
status = eHAL_STATUS_SUCCESS;
vos_mem_free(pMsg->bodyptr);
} else {
smsLog( pMac, LOGE, "Empty rsp message for PMC, nothing to process");
}
break;
case WNI_CFG_SET_CNF:
case WNI_CFG_DNLD_CNF:
case WNI_CFG_GET_RSP:
case WNI_CFG_ADD_GRP_ADDR_CNF:
case WNI_CFG_DEL_GRP_ADDR_CNF:
//CCM
if (pMsg->bodyptr)
{
ccmCfgCnfMsgHandler(hHal, pMsg->bodyptr);
status = eHAL_STATUS_SUCCESS;
vos_mem_free(pMsg->bodyptr);
} else {
smsLog( pMac, LOGE, "Empty rsp message for CCM, nothing to process");
}
break;
case eWNI_SME_ADDTS_RSP:
case eWNI_SME_DELTS_RSP:
case eWNI_SME_DELTS_IND:
#ifdef WLAN_FEATURE_VOWIFI_11R
case eWNI_SME_FT_AGGR_QOS_RSP:
#endif
//QoS
if (pMsg->bodyptr)
{
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
status = sme_QosMsgProcessor(pMac, pMsg->type, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
#endif
} else {
smsLog( pMac, LOGE, "Empty rsp message for QoS, nothing to process");
}
break;
#if defined WLAN_FEATURE_VOWIFI
case eWNI_SME_NEIGHBOR_REPORT_IND:
case eWNI_SME_BEACON_REPORT_REQ_IND:
#if defined WLAN_VOWIFI_DEBUG
smsLog( pMac, LOGE, "Received RRM message. Message Id = %d", pMsg->type );
#endif
if ( pMsg->bodyptr )
{
status = sme_RrmMsgProcessor( pMac, pMsg->type, pMsg->bodyptr );
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty message for RRM, nothing to process");
}
break;
#endif
#ifdef FEATURE_OEM_DATA_SUPPORT
//Handle the eWNI_SME_OEM_DATA_RSP:
case eWNI_SME_OEM_DATA_RSP:
if(pMsg->bodyptr)
{
status = sme_HandleOemDataRsp(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for oemData_ (eWNI_SME_OEM_DATA_RSP), nothing to process");
}
smeProcessPendingQueue( pMac );
break;
#endif
case eWNI_SME_ADD_STA_SELF_RSP:
if(pMsg->bodyptr)
{
status = csrProcessAddStaSessionRsp(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_ADD_STA_SELF_RSP), nothing to process");
}
break;
case eWNI_SME_DEL_STA_SELF_RSP:
if(pMsg->bodyptr)
{
status = csrProcessDelStaSessionRsp(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_DEL_STA_SELF_RSP), nothing to process");
}
break;
case eWNI_SME_REMAIN_ON_CHN_RSP:
if(pMsg->bodyptr)
{
status = sme_remainOnChnRsp(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_REMAIN_ON_CHN_RSP), nothing to process");
}
break;
case eWNI_SME_REMAIN_ON_CHN_RDY_IND:
if(pMsg->bodyptr)
{
status = sme_remainOnChnReady(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_REMAIN_ON_CHN_RDY_IND), nothing to process");
}
break;
case eWNI_SME_MGMT_FRM_IND:
if(pMsg->bodyptr)
{
sme_mgmtFrmInd(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_MGMT_FRM_IND), nothing to process");
}
break;
case eWNI_SME_ACTION_FRAME_SEND_CNF:
if(pMsg->bodyptr)
{
status = sme_sendActionCnf(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for meas (eWNI_SME_ACTION_FRAME_SEND_CNF), nothing to process");
}
break;
case eWNI_SME_COEX_IND:
if(pMsg->bodyptr)
{
tSirSmeCoexInd *pSmeCoexInd = (tSirSmeCoexInd *)pMsg->bodyptr;
if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_DISABLE_AGGREGATION_IN_2p4)
{
smsLog( pMac, LOG1, FL("SIR_COEX_IND_TYPE_DISABLE_AGGREGATION_IN_2p4"));
sme_RequestFullPower(hHal, NULL, NULL, eSME_REASON_OTHER);
pMac->isCoexScoIndSet = 1;
}
else if (pSmeCoexInd->coexIndType == SIR_COEX_IND_TYPE_ENABLE_AGGREGATION_IN_2p4)
{
smsLog( pMac, LOG1, FL("SIR_COEX_IND_TYPE_ENABLE_AGGREGATION_IN_2p4"));
pMac->isCoexScoIndSet = 0;
sme_RequestBmps(hHal, NULL, NULL);
}
status = btcHandleCoexInd((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for meas (eWNI_SME_COEX_IND), nothing to process");
}
break;
#ifdef FEATURE_WLAN_SCAN_PNO
case eWNI_SME_PREF_NETWORK_FOUND_IND:
if(pMsg->bodyptr)
{
status = sme_PreferredNetworkFoundInd((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for meas (eWNI_SME_PREF_NETWORK_FOUND_IND), nothing to process");
}
break;
#endif // FEATURE_WLAN_SCAN_PNO
case eWNI_SME_TX_PER_HIT_IND:
if (pMac->sme.pTxPerHitCallback)
{
pMac->sme.pTxPerHitCallback(pMac->sme.pTxPerHitCbContext);
}
break;
case eWNI_SME_CHANGE_COUNTRY_CODE:
if(pMsg->bodyptr)
{
status = sme_HandleChangeCountryCode((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for message (eWNI_SME_CHANGE_COUNTRY_CODE), nothing to process");
}
break;
case eWNI_SME_GENERIC_CHANGE_COUNTRY_CODE:
if (pMsg->bodyptr)
{
status = sme_HandleGenericChangeCountryCode((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for message (eWNI_SME_GENERIC_CHANGE_COUNTRY_CODE), nothing to process");
}
break;
#ifdef WLAN_FEATURE_PACKET_FILTERING
case eWNI_PMC_PACKET_COALESCING_FILTER_MATCH_COUNT_RSP:
if(pMsg->bodyptr)
{
status = sme_PCFilterMatchCountResponseHandler((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for meas "
"(PACKET_COALESCING_FILTER_MATCH_COUNT_RSP), nothing to process");
}
break;
#endif // WLAN_FEATURE_PACKET_FILTERING
case eWNI_SME_PRE_SWITCH_CHL_IND:
{
status = sme_HandlePreChannelSwitchInd(pMac);
break;
}
case eWNI_SME_POST_SWITCH_CHL_IND:
{
status = sme_HandlePostChannelSwitchInd(pMac);
break;
}
#ifdef WLAN_WAKEUP_EVENTS
case eWNI_SME_WAKE_REASON_IND:
if(pMsg->bodyptr)
{
status = sme_WakeReasonIndCallback((void *)pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for meas (eWNI_SME_WAKE_REASON_IND), nothing to process");
}
break;
#endif // WLAN_WAKEUP_EVENTS
#ifdef FEATURE_WLAN_TDLS
/*
* command rescived from PE, SME tdls msg processor shall be called
* to process commands recieved from PE
*/
case eWNI_SME_TDLS_SEND_MGMT_RSP:
case eWNI_SME_TDLS_ADD_STA_RSP:
case eWNI_SME_TDLS_DEL_STA_RSP:
case eWNI_SME_TDLS_DEL_STA_IND:
case eWNI_SME_TDLS_DEL_ALL_PEER_IND:
case eWNI_SME_MGMT_FRM_TX_COMPLETION_IND:
case eWNI_SME_TDLS_LINK_ESTABLISH_RSP:
case eWNI_SME_TDLS_CHANNEL_SWITCH_RSP:
#ifdef FEATURE_WLAN_TDLS_INTERNAL
case eWNI_SME_TDLS_DISCOVERY_START_RSP:
case eWNI_SME_TDLS_DISCOVERY_START_IND:
case eWNI_SME_TDLS_LINK_START_RSP:
case eWNI_SME_TDLS_LINK_START_IND:
case eWNI_SME_TDLS_TEARDOWN_RSP:
case eWNI_SME_TDLS_TEARDOWN_IND:
case eWNI_SME_ADD_TDLS_PEER_IND:
case eWNI_SME_DELETE_TDLS_PEER_IND:
#endif
{
if (pMsg->bodyptr)
{
status = tdlsMsgProcessor(pMac, pMsg->type, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for TDLS, \
nothing to process");
}
break;
}
#endif
#ifdef WLAN_FEATURE_11W
case eWNI_SME_UNPROT_MGMT_FRM_IND:
if (pMsg->bodyptr)
{
sme_UnprotectedMgmtFrmInd(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for meas (eWNI_SME_UNPROT_MGMT_FRM_IND), nothing to process");
}
break;
#endif
#if defined(FEATURE_WLAN_ESE) && defined(FEATURE_WLAN_ESE_UPLOAD)
case eWNI_SME_TSM_IE_IND:
{
if (pMsg->bodyptr)
{
sme_TsmIeInd(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for (eWNI_SME_TSM_IE_IND), nothing to process");
}
break;
}
#endif /* FEATURE_WLAN_ESE && FEATURE_WLAN_ESE_UPLOAD */
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
case eWNI_SME_ROAM_SCAN_OFFLOAD_RSP:
status = csrRoamOffloadScanRspHdlr((void *)pMac, pMsg->bodyval);
break;
#endif // WLAN_FEATURE_ROAM_SCAN_OFFLOAD
#ifdef WLAN_FEATURE_GTK_OFFLOAD
case eWNI_PMC_GTK_OFFLOAD_GETINFO_RSP:
if (pMsg->bodyptr)
{
sme_ProcessGetGtkInfoRsp(pMac, pMsg->bodyptr);
vos_mem_zero(pMsg->bodyptr,
sizeof(tSirGtkOffloadGetInfoRspParams));
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE, "Empty rsp message for (eWNI_PMC_GTK_OFFLOAD_GETINFO_RSP), nothing to process");
}
break ;
#endif
#ifdef FEATURE_WLAN_LPHB
/* LPHB timeout indication arrived, send IND to client */
case eWNI_SME_LPHB_IND:
if (pMac->sme.pLphbIndCb)
{
pMac->sme.pLphbIndCb(pMac->pAdapter, pMsg->bodyptr);
}
vos_mem_free(pMsg->bodyptr);
break;
#endif /* FEATURE_WLAN_LPHB */
#ifdef FEATURE_WLAN_CH_AVOID
/* LPHB timeout indication arrived, send IND to client */
case eWNI_SME_CH_AVOID_IND:
if (pMac->sme.pChAvoidNotificationCb)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: CH avoid notification", __func__);
pMac->sme.pChAvoidNotificationCb(pMac->pAdapter, pMsg->bodyptr);
}
vos_mem_free(pMsg->bodyptr);
break;
#endif /* FEATURE_WLAN_CH_AVOID */
case eWNI_SME_ENCRYPT_MSG_RSP:
if (pMsg->bodyptr)
{
sme_EncryptMsgResponseHandler(pMac, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog(pMac, LOGE,
"Empty rsp message for (eWNI_SME_ENCRYPT_MSG_RSP),"
" nothing to process");
}
break ;
default:
if ( ( pMsg->type >= eWNI_SME_MSG_TYPES_BEGIN )
&& ( pMsg->type <= eWNI_SME_MSG_TYPES_END ) )
{
//CSR
if (pMsg->bodyptr)
{
status = csrMsgProcessor(hHal, pMsg->bodyptr);
vos_mem_free(pMsg->bodyptr);
}
else
{
smsLog( pMac, LOGE, "Empty rsp message for CSR, nothing to process");
}
}
else
{
smsLog( pMac, LOGW, "Unknown message type %d, nothing to process",
pMsg->type);
if (pMsg->bodyptr)
{
vos_mem_free(pMsg->bodyptr);
}
}
}//switch
} //SME_IS_START
else
{
smsLog( pMac, LOGW, "message type %d in stop state ignored", pMsg->type);
if (pMsg->bodyptr)
{
vos_mem_free(pMsg->bodyptr);
}
}
sme_ReleaseGlobalLock( &pMac->sme );
}
else
{
smsLog( pMac, LOGW, "Locking failed, bailing out");
if (pMsg->bodyptr)
{
vos_mem_free(pMsg->bodyptr);
}
}
return status;
}
//No need to hold the global lock here because this function can only be called
//after sme_Stop.
v_VOID_t sme_FreeMsg( tHalHandle hHal, vos_msg_t* pMsg )
{
if( pMsg )
{
if (pMsg->bodyptr)
{
vos_mem_free(pMsg->bodyptr);
}
}
}
/*--------------------------------------------------------------------------
\brief sme_Stop() - Stop all SME modules and put them at idle state
The function stops each module in SME, PMC, CCM, CSR, etc. . Upon
return, all modules are at idle state ready to start.
This is a synchronous call
\param hHal - The handle returned by macOpen
\param tHalStopType - reason for stopping
\return eHAL_STATUS_SUCCESS - SME is stopped.
Other status means SME is failed to stop but caller should still
consider SME is stopped.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_Stop(tHalHandle hHal, tHalStopType stopType)
{
eHalStatus status = eHAL_STATUS_FAILURE;
eHalStatus fail_status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = WLANSAP_Stop(vos_get_global_context(VOS_MODULE_ID_SAP, NULL));
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "WLANSAP_Stop failed during smeStop with status=%d",
status );
fail_status = status;
}
p2pStop(hHal);
status = pmcStop(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "pmcStop failed during smeStop with status=%d",
status );
fail_status = status;
}
status = csrStop(pMac, stopType);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrStop failed during smeStop with status=%d",
status );
fail_status = status;
}
ccmStop(hHal);
purgeSmeCmdList(pMac);
if (!HAL_STATUS_SUCCESS( fail_status )) {
status = fail_status;
}
pMac->sme.state = SME_STATE_STOP;
return status;
}
/*--------------------------------------------------------------------------
\brief sme_Close() - Release all SME modules and their resources.
The function release each module in SME, PMC, CCM, CSR, etc. . Upon
return, all modules are at closed state.
No SME APIs can be involved after smeClose except smeOpen.
smeClose must be called before macClose.
This is a synchronous call
\param hHal - The handle returned by macOpen
\return eHAL_STATUS_SUCCESS - SME is successfully close.
Other status means SME is failed to be closed but caller still cannot
call any other SME functions except smeOpen.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_Close(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
eHalStatus fail_status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = csrClose(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "csrClose failed during sme close with status=%d",
status );
fail_status = status;
}
status = WLANSAP_Close(vos_get_global_context(VOS_MODULE_ID_SAP, NULL));
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "WLANSAP_close failed during sme close with status=%d",
status );
fail_status = status;
}
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
status = btcClose(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "BTC close failed during sme close with status=%d",
status );
fail_status = status;
}
status = sme_QosClose(pMac);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "Qos close failed during sme close with status=%d",
status );
fail_status = status;
}
#endif
#ifdef FEATURE_OEM_DATA_SUPPORT
status = oemData_OemDataReqClose(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "OEM DATA REQ close failed during sme close with status=%d",
status );
fail_status = status;
}
#endif
status = ccmClose(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "ccmClose failed during sme close with status=%d",
status );
fail_status = status;
}
status = pmcClose(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "pmcClose failed during sme close with status=%d",
status );
fail_status = status;
}
#if defined WLAN_FEATURE_VOWIFI
status = rrmClose(hHal);
if ( ! HAL_STATUS_SUCCESS( status ) ) {
smsLog( pMac, LOGE, "RRM close failed during sme close with status=%d",
status );
fail_status = status;
}
#endif
#if defined WLAN_FEATURE_VOWIFI_11R
sme_FTClose(hHal);
#endif
sme_p2pClose(hHal);
freeSmeCmdList(pMac);
if( !VOS_IS_STATUS_SUCCESS( vos_lock_destroy( &pMac->sme.lkSmeGlobalLock ) ) )
{
fail_status = eHAL_STATUS_FAILURE;
}
if (!HAL_STATUS_SUCCESS( fail_status )) {
status = fail_status;
}
pMac->sme.state = SME_STATE_STOP;
return status;
}
#ifdef FEATURE_WLAN_LFR
tANI_BOOLEAN csrIsScanAllowed(tpAniSirGlobal pMac)
{
#if 0
switch(pMac->roam.neighborRoamInfo.neighborRoamState) {
case eCSR_NEIGHBOR_ROAM_STATE_REPORT_SCAN:
case eCSR_NEIGHBOR_ROAM_STATE_PREAUTHENTICATING:
case eCSR_NEIGHBOR_ROAM_STATE_PREAUTH_DONE:
case eCSR_NEIGHBOR_ROAM_STATE_REASSOCIATING:
return eANI_BOOLEAN_FALSE;
default:
return eANI_BOOLEAN_TRUE;
}
#else
/*
* TODO: always return TRUE for now until
* we figure out why we could be stuck in
* one of the roaming states forever.
*/
return eANI_BOOLEAN_TRUE;
#endif
}
#endif
/* ---------------------------------------------------------------------------
\fn sco_isScanAllowed
\brief check for scan interface connection status
\param pMac - Pointer to the global MAC parameter structure
\param pScanReq - scan request structure.
\return tANI_BOOLEAN TRUE to allow scan otherwise FALSE
---------------------------------------------------------------------------*/
tANI_BOOLEAN sco_isScanAllowed(tpAniSirGlobal pMac, tCsrScanRequest *pscanReq)
{
tANI_BOOLEAN ret;
if (pscanReq->p2pSearch)
ret = csrIsP2pSessionConnected(pMac);
else
ret = csrIsStaSessionConnected(pMac);
return !ret;
}
/* ---------------------------------------------------------------------------
\fn sme_ScanRequest
\brief a wrapper function to Request a 11d or full scan from CSR.
This is an asynchronous call
\param pScanRequestID - pointer to an object to get back the request ID
\param callback - a callback function that scan calls upon finish, will not
be called if csrScanRequest returns error
\param pContext - a pointer passed in for the callback
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ScanRequest(tHalHandle hHal, tANI_U8 sessionId, tCsrScanRequest *pscanReq,
tANI_U32 *pScanRequestID,
csrScanCompleteCallback callback, void *pContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_REQ, sessionId, pscanReq->scanType));
smsLog(pMac, LOG2, FL("enter"));
do
{
if(pMac->scan.fScanEnable &&
(pMac->isCoexScoIndSet ? sco_isScanAllowed(pMac, pscanReq) : TRUE))
{
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
{
#ifdef FEATURE_WLAN_LFR
if(csrIsScanAllowed(pMac))
{
#endif
status = csrScanRequest( hHal, sessionId, pscanReq,
pScanRequestID, callback, pContext );
if ( !HAL_STATUS_SUCCESS( status ) )
{
smsLog(pMac, LOGE, FL("csrScanRequest failed"
" SId=%d"), sessionId);
}
#ifdef FEATURE_WLAN_LFR
}
else
{
smsLog(pMac, LOGE, FL("Scan denied in state %s"
"(sub-state %s)"),
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState),
macTraceGetcsrRoamSubState(
pMac->roam.curSubState[sessionId]));
/*HandOff is in progress. So schedule this scan later*/
status = eHAL_STATUS_RESOURCES;
}
#endif
}
sme_ReleaseGlobalLock( &pMac->sme );
} //sme_AcquireGlobalLock success
else
{
smsLog(pMac, LOGE, FL("sme_AcquireGlobalLock failed"));
}
} //if(pMac->scan.fScanEnable)
else
{
smsLog(pMac, LOGE, FL("fScanEnable %d isCoexScoIndSet: %d "),
pMac->scan.fScanEnable, pMac->isCoexScoIndSet);
status = eHAL_STATUS_RESOURCES;
}
} while( 0 );
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanGetResult
\brief a wrapper function to request scan results from CSR.
This is a synchronous call
\param pFilter - If pFilter is NULL, all cached results are returned
\param phResult - an object for the result.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ScanGetResult(tHalHandle hHal, tANI_U8 sessionId, tCsrScanResultFilter *pFilter,
tScanResultHandle *phResult)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_GET_RESULTS, sessionId,0 ));
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanGetResult( hHal, pFilter, phResult );
sme_ReleaseGlobalLock( &pMac->sme );
}
smsLog(pMac, LOG2, FL("exit status %d"), status);
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanFlushResult
\brief a wrapper function to request CSR to clear scan results.
This is a synchronous call
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ScanFlushResult(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_FLUSH_RESULTS, sessionId,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanFlushResult( hHal );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_FilterScanResults
\brief a wrapper function to request CSR to clear scan results.
This is a synchronous call
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_FilterScanResults(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(macTraceNew(pMac, VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_FLUSH_RESULTS, sessionId,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
csrScanFilterResults(pMac);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
eHalStatus sme_ScanFlushP2PResult(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_FLUSH_P2PRESULTS, sessionId,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanFlushSelectiveResult( hHal, VOS_TRUE );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanResultGetFirst
\brief a wrapper function to request CSR to returns the first element of
scan result.
This is a synchronous call
\param hScanResult - returned from csrScanGetResult
\return tCsrScanResultInfo * - NULL if no result
---------------------------------------------------------------------------*/
tCsrScanResultInfo *sme_ScanResultGetFirst(tHalHandle hHal,
tScanResultHandle hScanResult)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tCsrScanResultInfo *pRet = NULL;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_RESULT_GETFIRST, NO_SESSION,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
pRet = csrScanResultGetFirst( pMac, hScanResult );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (pRet);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanResultGetNext
\brief a wrapper function to request CSR to returns the next element of
scan result. It can be called without calling csrScanResultGetFirst
first
This is a synchronous call
\param hScanResult - returned from csrScanGetResult
\return Null if no result or reach the end
---------------------------------------------------------------------------*/
tCsrScanResultInfo *sme_ScanResultGetNext(tHalHandle hHal,
tScanResultHandle hScanResult)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tCsrScanResultInfo *pRet = NULL;
MTRACE(vos_trace(VOS_MODULE_ID_SME ,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_RESULT_GETNEXT, NO_SESSION,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
pRet = csrScanResultGetNext( pMac, hScanResult );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (pRet);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanSetBGScanparams
\brief a wrapper function to request CSR to set BG scan params in PE
This is a synchronous call
\param pScanReq - BG scan request structure
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ScanSetBGScanparams(tHalHandle hHal, tANI_U8 sessionId, tCsrBGScanRequest *pScanReq)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if( NULL != pScanReq )
{
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanSetBGScanparams( hHal, pScanReq );
sme_ReleaseGlobalLock( &pMac->sme );
}
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanResultPurge
\brief a wrapper function to request CSR to remove all items(tCsrScanResult)
in the list and free memory for each item
This is a synchronous call
\param hScanResult - returned from csrScanGetResult. hScanResult is
considered gone by
calling this function and even before this function reutrns.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_ScanResultPurge(tHalHandle hHal, tScanResultHandle hScanResult)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_SCAN_RESULT_PURGE, NO_SESSION,0 ));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanResultPurge( hHal, hScanResult );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanGetPMKIDCandidateList
\brief a wrapper function to return the PMKID candidate list
This is a synchronous call
\param pPmkidList - caller allocated buffer point to an array of
tPmkidCandidateInfo
\param pNumItems - pointer to a variable that has the number of
tPmkidCandidateInfo allocated when retruning, this is
either the number needed or number of items put into
pPmkidList
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough and pNumItems
has the number of tPmkidCandidateInfo.
\Note: pNumItems is a number of tPmkidCandidateInfo,
not sizeof(tPmkidCandidateInfo) * something
---------------------------------------------------------------------------*/
eHalStatus sme_ScanGetPMKIDCandidateList(tHalHandle hHal, tANI_U8 sessionId,
tPmkidCandidateInfo *pPmkidList,
tANI_U32 *pNumItems )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanGetPMKIDCandidateList( pMac, sessionId, pPmkidList, pNumItems );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/*----------------------------------------------------------------------------
\fn sme_RoamRegisterLinkQualityIndCallback
\brief
a wrapper function to allow HDD to register a callback handler with CSR for
link quality indications.
Only one callback may be registered at any time.
In order to deregister the callback, a NULL cback may be provided.
Registration happens in the task context of the caller.
\param callback - Call back being registered
\param pContext - user data
DEPENDENCIES: After CSR open
\return eHalStatus
-----------------------------------------------------------------------------*/
eHalStatus sme_RoamRegisterLinkQualityIndCallback(tHalHandle hHal, tANI_U8 sessionId,
csrRoamLinkQualityIndCallback callback,
void *pContext)
{
return(csrRoamRegisterLinkQualityIndCallback((tpAniSirGlobal)hHal, callback, pContext));
}
/* ---------------------------------------------------------------------------
\fn sme_RoamRegisterCallback
\brief a wrapper function to allow HDD to register a callback with CSR.
Unlike scan, roam has one callback for all the roam requests
\param callback - a callback function that roam calls upon when state changes
\param pContext - a pointer passed in for the callback
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_RoamRegisterCallback(tHalHandle hHal,
csrRoamCompleteCallback callback,
void *pContext)
{
return(csrRoamRegisterCallback((tpAniSirGlobal)hHal, callback, pContext));
}
eCsrPhyMode sme_GetPhyMode(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
return pMac->roam.configParam.phyMode;
}
/* ---------------------------------------------------------------------------
\fn sme_GetChannelBondingMode5G
\brief get the channel bonding mode for 5G band
\param hHal - HAL handle
\return channel bonding mode for 5G
---------------------------------------------------------------------------*/
tANI_U32 sme_GetChannelBondingMode5G(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tSmeConfigParams smeConfig;
sme_GetConfigParam(pMac, &smeConfig);
return smeConfig.csrConfig.channelBondingMode5GHz;
}
/* ---------------------------------------------------------------------------
\fn sme_GetChannelBondingMode24G
\brief get the channel bonding mode for 2.4G band
\param hHal - HAL handle
\return channel bonding mode for 2.4G
---------------------------------------------------------------------------*/
tANI_U32 sme_GetChannelBondingMode24G(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tSmeConfigParams smeConfig;
sme_GetConfigParam(pMac, &smeConfig);
return smeConfig.csrConfig.channelBondingMode24GHz;
}
/* ---------------------------------------------------------------------------
\fn sme_RoamConnect
\brief a wrapper function to request CSR to inititiate an association
This is an asynchronous call.
\param sessionId - the sessionId returned by sme_OpenSession.
\param pProfile - description of the network to which to connect
\param hBssListIn - a list of BSS descriptor to roam to. It is returned
from csrScanGetResult
\param pRoamId - to get back the request ID
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_RoamConnect(tHalHandle hHal, tANI_U8 sessionId, tCsrRoamProfile *pProfile,
tANI_U32 *pRoamId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (!pMac)
{
return eHAL_STATUS_FAILURE;
}
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_MSG_CONNECT, sessionId, 0));
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamConnect( pMac, sessionId, pProfile, NULL, pRoamId );
}
else
{
smsLog(pMac, LOGE, FL("invalid sessionID %d"), sessionId);
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
else
{
smsLog(pMac, LOGE, FL("sme_AcquireGlobalLock failed"));
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetPhyMode
\brief Changes the PhyMode.
\param hHal - The handle returned by macOpen.
\param phyMode new phyMode which is to set
\return eHalStatus SUCCESS.
-------------------------------------------------------------------------------*/
eHalStatus sme_SetPhyMode(tHalHandle hHal, eCsrPhyMode phyMode)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: invalid context", __func__);
return eHAL_STATUS_FAILURE;
}
pMac->roam.configParam.phyMode = phyMode;
pMac->roam.configParam.uCfgDot11Mode = csrGetCfgDot11ModeFromCsrPhyMode(NULL,
pMac->roam.configParam.phyMode,
pMac->roam.configParam.ProprietaryRatesEnabled);
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_RoamReassoc
\brief a wrapper function to request CSR to inititiate a re-association
\param pProfile - can be NULL to join the currently connected AP. In that
case modProfileFields should carry the modified field(s) which could trigger
reassoc
\param modProfileFields - fields which are part of tCsrRoamConnectedProfile
that might need modification dynamically once STA is up & running and this
could trigger a reassoc
\param pRoamId - to get back the request ID
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamReassoc(tHalHandle hHal, tANI_U8 sessionId, tCsrRoamProfile *pProfile,
tCsrRoamModifyProfileFields modProfileFields,
tANI_U32 *pRoamId, v_BOOL_t fForce)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ROAM_REASSOC, sessionId, 0));
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
if((NULL == pProfile) && (fForce == 1))
{
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
/* to force the AP initiate fresh 802.1x authentication need to clear
* the PMKID cache for that set the following boolean. this is needed
* by the HS 2.0 passpoint certification 5.2.a and b testcases */
pSession->fIgnorePMKIDCache = TRUE;
status = csrReassoc( pMac, sessionId, &modProfileFields, pRoamId , fForce);
}
else
{
status = csrRoamReassoc( pMac, sessionId, pProfile, modProfileFields, pRoamId );
}
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamConnectToLastProfile
\brief a wrapper function to request CSR to disconnect and reconnect with
the same profile
This is an asynchronous call.
\return eHalStatus. It returns fail if currently connected
---------------------------------------------------------------------------*/
eHalStatus sme_RoamConnectToLastProfile(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamConnectToLastProfile( pMac, sessionId );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamDisconnect
\brief a wrapper function to request CSR to disconnect from a network
This is an asynchronous call.
\param reason -- To indicate the reason for disconnecting. Currently, only
eCSR_DISCONNECT_REASON_MIC_ERROR is meanful.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_RoamDisconnect(tHalHandle hHal, tANI_U8 sessionId, eCsrRoamDisconnectReason reason)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ROAM_DISCONNECT, sessionId, reason));
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamDisconnect( pMac, sessionId, reason );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamStopBss
\brief To stop BSS for Soft AP. This is an asynchronous API.
\param hHal - Global structure
\param sessionId - sessionId of SoftAP
\return eHalStatus SUCCESS Roam callback will be called to indicate actual results
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamStopBss(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamIssueStopBssCmd( pMac, sessionId, eANI_BOOLEAN_FALSE );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamDisconnectSta
\brief To disassociate a station. This is an asynchronous API.
\param hHal - Global structure
\param sessionId - sessionId of SoftAP
\param pPeerMacAddr - Caller allocated memory filled with peer MAC address (6 bytes)
\return eHalStatus SUCCESS Roam callback will be called to indicate actual results
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamDisconnectSta(tHalHandle hHal, tANI_U8 sessionId,
tANI_U8 *pPeerMacAddr)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( NULL == pMac )
{
VOS_ASSERT(0);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamIssueDisassociateStaCmd( pMac, sessionId, pPeerMacAddr,
eSIR_MAC_DEAUTH_LEAVING_BSS_REASON);
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamDeauthSta
\brief To disassociate a station. This is an asynchronous API.
\param hHal - Global structure
\param sessionId - sessionId of SoftAP
\param pDelStaParams -Pointer to parameters of the station to deauthenticate
\return eHalStatus SUCCESS Roam callback will be called to indicate actual results
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamDeauthSta(tHalHandle hHal, tANI_U8 sessionId,
struct tagCsrDelStaParams *pDelStaParams)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( NULL == pMac )
{
VOS_ASSERT(0);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamIssueDeauthStaCmd( pMac, sessionId, pDelStaParams);
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamTKIPCounterMeasures
\brief To start or stop TKIP counter measures. This is an asynchronous API.
\param sessionId - sessionId of SoftAP
\param pPeerMacAddr - Caller allocated memory filled with peer MAC address (6 bytes)
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamTKIPCounterMeasures(tHalHandle hHal, tANI_U8 sessionId,
tANI_BOOLEAN bEnable)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( NULL == pMac )
{
VOS_ASSERT(0);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamIssueTkipCounterMeasures( pMac, sessionId, bEnable);
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetAssociatedStas
\brief To probe the list of associated stations from various modules of CORE stack.
\This is an asynchronous API.
\param sessionId - sessionId of SoftAP
\param modId - Module from whom list of associtated stations is to be probed.
If an invalid module is passed then by default VOS_MODULE_ID_PE will be probed
\param pUsrContext - Opaque HDD context
\param pfnSapEventCallback - Sap event callback in HDD
\param pAssocBuf - Caller allocated memory to be filled with associatd stations info
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamGetAssociatedStas(tHalHandle hHal, tANI_U8 sessionId,
VOS_MODULE_ID modId, void *pUsrContext,
void *pfnSapEventCallback, tANI_U8 *pAssocStasBuf)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( NULL == pMac )
{
VOS_ASSERT(0);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetAssociatedStas( pMac, sessionId, modId, pUsrContext, pfnSapEventCallback, pAssocStasBuf );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetWpsSessionOverlap
\brief To get the WPS PBC session overlap information.
\This is an asynchronous API.
\param sessionId - sessionId of SoftAP
\param pUsrContext - Opaque HDD context
\param pfnSapEventCallback - Sap event callback in HDD
\pRemoveMac - pointer to Mac address which needs to be removed from session
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamGetWpsSessionOverlap(tHalHandle hHal, tANI_U8 sessionId,
void *pUsrContext, void
*pfnSapEventCallback, v_MACADDR_t pRemoveMac)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( NULL == pMac )
{
VOS_ASSERT(0);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetWpsSessionOverlap( pMac, sessionId, pUsrContext, pfnSapEventCallback, pRemoveMac);
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetConnectState
\brief a wrapper function to request CSR to return the current connect state
of Roaming
This is a synchronous call.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetConnectState(tHalHandle hHal, tANI_U8 sessionId, eCsrConnectState *pState)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetConnectState( pMac, sessionId, pState );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetConnectProfile
\brief a wrapper function to request CSR to return the current connect
profile. Caller must call csrRoamFreeConnectProfile after it is done
and before reuse for another csrRoamGetConnectProfile call.
This is a synchronous call.
\param pProfile - pointer to a caller allocated structure
tCsrRoamConnectedProfile
\return eHalStatus. Failure if not connected
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetConnectProfile(tHalHandle hHal, tANI_U8 sessionId,
tCsrRoamConnectedProfile *pProfile)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ROAM_GET_CONNECTPROFILE, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetConnectProfile( pMac, sessionId, pProfile );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamFreeConnectProfile
\brief a wrapper function to request CSR to free and reinitialize the
profile returned previously by csrRoamGetConnectProfile.
This is a synchronous call.
\param pProfile - pointer to a caller allocated structure
tCsrRoamConnectedProfile
\return eHalStatus.
---------------------------------------------------------------------------*/
eHalStatus sme_RoamFreeConnectProfile(tHalHandle hHal,
tCsrRoamConnectedProfile *pProfile)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ROAM_FREE_CONNECTPROFILE, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamFreeConnectProfile( pMac, pProfile );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamSetPMKIDCache
\brief a wrapper function to request CSR to return the PMKID candidate list
This is a synchronous call.
\param pPMKIDCache - caller allocated buffer point to an array of
tPmkidCacheInfo
\param numItems - a variable that has the number of tPmkidCacheInfo
allocated when retruning, this is either the number needed
or number of items put into pPMKIDCache
\param update_entire_cache - this bool value specifies if the entire pmkid
cache should be overwritten or should it be
updated entry by entry.
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough and pNumItems has the number of
tPmkidCacheInfo.
\Note: pNumItems is a number of tPmkidCacheInfo,
not sizeof(tPmkidCacheInfo) * something
---------------------------------------------------------------------------*/
eHalStatus sme_RoamSetPMKIDCache( tHalHandle hHal, tANI_U8 sessionId,
tPmkidCacheInfo *pPMKIDCache,
tANI_U32 numItems,
tANI_BOOLEAN update_entire_cache )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ROAM_SET_PMKIDCACHE, sessionId, numItems));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamSetPMKIDCache( pMac, sessionId, pPMKIDCache,
numItems, update_entire_cache );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
eHalStatus sme_RoamDelPMKIDfromCache( tHalHandle hHal, tANI_U8 sessionId,
tANI_U8 *pBSSId,
tANI_BOOLEAN flush_cache )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamDelPMKIDfromCache( pMac, sessionId,
pBSSId, flush_cache );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetSecurityReqIE
\brief a wrapper function to request CSR to return the WPA or RSN or WAPI IE CSR
passes to PE to JOIN request or START_BSS request
This is a synchronous call.
\param pLen - caller allocated memory that has the length of pBuf as input.
Upon returned, *pLen has the needed or IE length in pBuf.
\param pBuf - Caller allocated memory that contain the IE field, if any,
upon return
\param secType - Specifies whether looking for WPA/WPA2/WAPI IE
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetSecurityReqIE(tHalHandle hHal, tANI_U8 sessionId, tANI_U32 *pLen,
tANI_U8 *pBuf, eCsrSecurityType secType)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetWpaRsnReqIE( hHal, sessionId, pLen, pBuf );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetSecurityRspIE
\brief a wrapper function to request CSR to return the WPA or RSN or WAPI IE from
the beacon or probe rsp if connected
This is a synchronous call.
\param pLen - caller allocated memory that has the length of pBuf as input.
Upon returned, *pLen has the needed or IE length in pBuf.
\param pBuf - Caller allocated memory that contain the IE field, if any,
upon return
\param secType - Specifies whether looking for WPA/WPA2/WAPI IE
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetSecurityRspIE(tHalHandle hHal, tANI_U8 sessionId, tANI_U32 *pLen,
tANI_U8 *pBuf, eCsrSecurityType secType)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetWpaRsnRspIE( pMac, sessionId, pLen, pBuf );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetNumPMKIDCache
\brief a wrapper function to request CSR to return number of PMKID cache
entries
This is a synchronous call.
\return tANI_U32 - the number of PMKID cache entries
---------------------------------------------------------------------------*/
tANI_U32 sme_RoamGetNumPMKIDCache(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U32 numPmkidCache = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
numPmkidCache = csrRoamGetNumPMKIDCache( pMac, sessionId );
status = eHAL_STATUS_SUCCESS;
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (numPmkidCache);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetPMKIDCache
\brief a wrapper function to request CSR to return PMKID cache from CSR
This is a synchronous call.
\param pNum - caller allocated memory that has the space of the number of
pBuf tPmkidCacheInfo as input. Upon returned, *pNum has the
needed or actually number in tPmkidCacheInfo.
\param pPmkidCache - Caller allocated memory that contains PMKID cache, if
any, upon return
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetPMKIDCache(tHalHandle hHal, tANI_U8 sessionId, tANI_U32 *pNum,
tPmkidCacheInfo *pPmkidCache)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrRoamGetPMKIDCache( pMac, sessionId, pNum, pPmkidCache );
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetConfigParam
\brief a wrapper function that HDD calls to get the global settings
currently maintained by CSR.
This is a synchronous call.
\param pParam - caller allocated memory
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetConfigParam(tHalHandle hHal, tSmeConfigParams *pParam)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_CONFIGPARAM, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetConfigParam(pMac, &pParam->csrConfig);
if (status != eHAL_STATUS_SUCCESS)
{
smsLog( pMac, LOGE, "%s csrGetConfigParam failed", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return status;
}
#if defined WLAN_FEATURE_P2P_INTERNAL
status = p2pGetConfigParam(pMac, &pParam->p2pConfig);
if (status != eHAL_STATUS_SUCCESS)
{
smsLog( pMac, LOGE, "%s p2pGetConfigParam failed", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return status;
}
#endif
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_CfgSetInt
\brief a wrapper function that HDD calls to set parameters in CFG.
This is a synchronous call.
\param cfgId - Configuration Parameter ID (type) for STA.
\param ccmValue - The information related to Configuration Parameter ID
which needs to be saved in CFG
\param callback - To be registered by CSR with CCM. Once the CFG done with
saving the information in the database, it notifies CCM &
then the callback will be invoked to notify.
\param toBeSaved - To save the request for future reference
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_CfgSetInt(tHalHandle hHal, tANI_U32 cfgId, tANI_U32 ccmValue,
tCcmCfgSetCallback callback, eAniBoolean toBeSaved)
{
return(ccmCfgSetInt(hHal, cfgId, ccmValue, callback, toBeSaved));
}
/* ---------------------------------------------------------------------------
\fn sme_CfgSetStr
\brief a wrapper function that HDD calls to set parameters in CFG.
This is a synchronous call.
\param cfgId - Configuration Parameter ID (type) for STA.
\param pStr - Pointer to the byte array which carries the information needs
to be saved in CFG
\param length - Length of the data to be saved
\param callback - To be registered by CSR with CCM. Once the CFG done with
saving the information in the database, it notifies CCM &
then the callback will be invoked to notify.
\param toBeSaved - To save the request for future reference
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_CfgSetStr(tHalHandle hHal, tANI_U32 cfgId, tANI_U8 *pStr,
tANI_U32 length, tCcmCfgSetCallback callback,
eAniBoolean toBeSaved)
{
return(ccmCfgSetStr(hHal, cfgId, pStr, length, callback, toBeSaved));
}
/* ---------------------------------------------------------------------------
\fn sme_GetModifyProfileFields
\brief HDD or SME - QOS calls this function to get the current values of
connected profile fields, changing which can cause reassoc.
This function must be called after CFG is downloaded and STA is in connected
state. Also, make sure to call this function to get the current profile
fields before calling the reassoc. So that pModifyProfileFields will have
all the latest values plus the one(s) has been updated as part of reassoc
request.
\param pModifyProfileFields - pointer to the connected profile fields
changing which can cause reassoc
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_GetModifyProfileFields(tHalHandle hHal, tANI_U8 sessionId,
tCsrRoamModifyProfileFields * pModifyProfileFields)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_MODPROFFIELDS, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
status = csrGetModifyProfileFields(pMac, sessionId, pModifyProfileFields);
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_HT40StopOBSSScan
\brief HDD or SME - Command to stop the OBSS scan
THis is implemented only for debugging purpose.
As per spec while operating in 2.4GHz OBSS scan shouldnt be stopped.
\param sessionId - sessionId
changing which can cause reassoc
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HT40StopOBSSScan(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if( CSR_IS_SESSION_VALID( pMac, sessionId ) )
{
csrHT40StopOBSSScan( pMac, sessionId );
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid session sessionId %d", __func__,sessionId);
status = eHAL_STATUS_INVALID_PARAMETER;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/*--------------------------------------------------------------------------
\fn sme_SetConfigPowerSave
\brief Wrapper fn to change power save configuration in SME (PMC) module.
For BMPS related configuration, this function also updates the CFG
and sends a message to FW to pick up the new values. Note: Calling
this function only updates the configuration and does not enable
the specified power save mode.
\param hHal - The handle returned by macOpen.
\param psMode - Power Saving mode being modified
\param pConfigParams - a pointer to a caller allocated object of type
tPmcSmpsConfigParams or tPmcBmpsConfigParams or tPmcImpsConfigParams
\return eHalStatus
--------------------------------------------------------------------------*/
eHalStatus sme_SetConfigPowerSave(tHalHandle hHal, tPmcPowerSavingMode psMode,
void *pConfigParams)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_CONFIG_PWRSAVE, NO_SESSION, 0));
if (NULL == pConfigParams ) {
smsLog( pMac, LOGE, "Empty config param structure for PMC, "
"nothing to update");
return eHAL_STATUS_FAILURE;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcSetConfigPowerSave(hHal, psMode, pConfigParams);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/*--------------------------------------------------------------------------
\fn sme_GetConfigPowerSave
\brief Wrapper fn to retrieve power save configuration in SME (PMC) module
\param hHal - The handle returned by macOpen.
\param psMode - Power Saving mode
\param pConfigParams - a pointer to a caller allocated object of type
tPmcSmpsConfigParams or tPmcBmpsConfigParams or tPmcImpsConfigParams
\return eHalStatus
--------------------------------------------------------------------------*/
eHalStatus sme_GetConfigPowerSave(tHalHandle hHal, tPmcPowerSavingMode psMode,
void *pConfigParams)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_CONFIG_PWRSAVE, NO_SESSION, 0));
if (NULL == pConfigParams ) {
smsLog( pMac, LOGE, "Empty config param structure for PMC, "
"nothing to update");
return eHAL_STATUS_FAILURE;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcGetConfigPowerSave(hHal, psMode, pConfigParams);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_SignalPowerEvent
\brief Signals to PMC that a power event has occurred. Used for putting
the chip into deep sleep mode.
\param hHal - The handle returned by macOpen.
\param event - the event that has occurred
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SignalPowerEvent (tHalHandle hHal, tPmcPowerEvent event)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcSignalPowerEvent(hHal, event);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_EnablePowerSave
\brief Enables one of the power saving modes.
\param hHal - The handle returned by macOpen.
\param psMode - The power saving mode to enable. If BMPS mode is enabled
while the chip is operating in Full Power, PMC will start
a timer that will try to put the chip in BMPS mode after
expiry.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_EnablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ENABLE_PWRSAVE, NO_SESSION, psMode));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcEnablePowerSave(hHal, psMode);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_DisablePowerSave
\brief Disables one of the power saving modes.
\param hHal - The handle returned by macOpen.
\param psMode - The power saving mode to disable. Disabling does not imply
that device will be brought out of the current PS mode. This
is purely a configuration API.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_DisablePowerSave (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DISABLE_PWRSAVE, NO_SESSION, psMode));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcDisablePowerSave(hHal, psMode);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
+ \fn sme_SetHostPowerSave
+ \brief Enables BMPS logic to be controlled by User level apps
+ \param hHal - The handle returned by macOpen.
+ \param psMode - The power saving mode to disable. Disabling does not imply
+ that device will be brought out of the current PS mode. This
+ is purely a configuration API.
+ \return eHalStatus
+ ---------------------------------------------------------------------------*/
eHalStatus sme_SetHostPowerSave (tHalHandle hHal, v_BOOL_t psMode)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
pMac->pmc.isHostPsEn = psMode;
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_StartAutoBmpsTimer
\brief Starts a timer that periodically polls all the registered
module for entry into Bmps mode. This timer is started only if BMPS is
enabled and whenever the device is in full power.
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_StartAutoBmpsTimer ( tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_START_AUTO_BMPSTIMER, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcStartAutoBmpsTimer(hHal);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_StopAutoBmpsTimer
\brief Stops the Auto BMPS Timer that was started using sme_startAutoBmpsTimer
Stopping the timer does not cause a device state change. Only the timer
is stopped. If "Full Power" is desired, use the sme_RequestFullPower API
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_StopAutoBmpsTimer ( tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_STOP_AUTO_BMPSTIMER, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcStopAutoBmpsTimer(hHal);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_QueryPowerState
\brief Returns the current power state of the device.
\param hHal - The handle returned by macOpen.
\param pPowerState - pointer to location to return power state (LOW or HIGH)
\param pSwWlanSwitchState - ptr to location to return SW WLAN Switch state
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_QueryPowerState (
tHalHandle hHal,
tPmcPowerState *pPowerState,
tPmcSwitchState *pSwWlanSwitchState)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcQueryPowerState (hHal, pPowerState, NULL, pSwWlanSwitchState);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_IsPowerSaveEnabled
\brief Checks if the device is able to enter a particular power save mode
This does not imply that the device is in a particular PS mode
\param hHal - The handle returned by macOpen.
\param psMode - the power saving mode
\return eHalStatus
---------------------------------------------------------------------------*/
tANI_BOOLEAN sme_IsPowerSaveEnabled (tHalHandle hHal, tPmcPowerSavingMode psMode)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_BOOLEAN result = false;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_IS_PWRSAVE_ENABLED, NO_SESSION, psMode));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
result = pmcIsPowerSaveEnabled(hHal, psMode);
sme_ReleaseGlobalLock( &pMac->sme );
return result;
}
return false;
}
/* ---------------------------------------------------------------------------
\fn sme_RequestFullPower
\brief Request that the device be brought to full power state. When the
device enters Full Power PMC will start a BMPS timer if BMPS PS mode
is enabled. On timer expiry PMC will attempt to put the device in
BMPS mode if following holds true:
- BMPS mode is enabled
- Polling of all modules through the Power Save Check routine passes
- STA is associated to an access point
\param hHal - The handle returned by macOpen.
\param - callbackRoutine Callback routine invoked in case of success/failure
\return eHalStatus - status
eHAL_STATUS_SUCCESS - device brought to full power state
eHAL_STATUS_FAILURE - device cannot be brought to full power state
eHAL_STATUS_PMC_PENDING - device is being brought to full power state,
---------------------------------------------------------------------------*/
eHalStatus sme_RequestFullPower (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext,
tRequestFullPowerReason fullPowerReason)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REQUEST_FULLPOWER, NO_SESSION, fullPowerReason));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRequestFullPower(hHal, callbackRoutine, callbackContext, fullPowerReason);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RequestBmps
\brief Request that the device be put in BMPS state. Request will be
accepted only if BMPS mode is enabled and power save check routine
passes.
\param hHal - The handle returned by macOpen.
\param - callbackRoutine Callback routine invoked in case of success/failure
\return eHalStatus
eHAL_STATUS_SUCCESS - device is in BMPS state
eHAL_STATUS_FAILURE - device cannot be brought to BMPS state
eHAL_STATUS_PMC_PENDING - device is being brought to BMPS state
---------------------------------------------------------------------------*/
eHalStatus sme_RequestBmps (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REQUEST_BMPS, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRequestBmps(hHal, callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetDHCPTillPowerActiveFlag
\brief Sets/Clears DHCP related flag in PMC to disable/enable auto BMPS
entry by PMC
\param hHal - The handle returned by macOpen.
---------------------------------------------------------------------------*/
void sme_SetDHCPTillPowerActiveFlag(tHalHandle hHal, tANI_U8 flag)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_DHCP_FLAG, NO_SESSION, flag));
// Set/Clear the DHCP flag which will disable/enable auto BMPS entery by PMC
pMac->pmc.remainInPowerActiveTillDHCP = flag;
}
/* ---------------------------------------------------------------------------
\fn sme_StartUapsd
\brief Request that the device be put in UAPSD state. If the device is in
Full Power it will be put in BMPS mode first and then into UAPSD
mode.
\param hHal - The handle returned by macOpen.
\param - callbackRoutine Callback routine invoked in case of success/failure
eHAL_STATUS_SUCCESS - device is in UAPSD state
eHAL_STATUS_FAILURE - device cannot be brought to UAPSD state
eHAL_STATUS_PMC_PENDING - device is being brought to UAPSD state
eHAL_STATUS_PMC_DISABLED - UAPSD is disabled or BMPS mode is disabled
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_StartUapsd (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcStartUapsd(hHal, callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_StopUapsd
\brief Request that the device be put out of UAPSD state. Device will be
put in in BMPS state after stop UAPSD completes.
\param hHal - The handle returned by macOpen.
\return eHalStatus
eHAL_STATUS_SUCCESS - device is put out of UAPSD and back in BMPS state
eHAL_STATUS_FAILURE - device cannot be brought out of UAPSD state
---------------------------------------------------------------------------*/
eHalStatus sme_StopUapsd (tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcStopUapsd(hHal);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RequestStandby
\brief Request that the device be put in standby. It is HDD's responsibility
to bring the chip to full power and do a disassoc before calling
this API.
\param hHal - The handle returned by macOpen.
\param - callbackRoutine Callback routine invoked in case of success/failure
\return eHalStatus
eHAL_STATUS_SUCCESS - device is in Standby mode
eHAL_STATUS_FAILURE - device cannot be put in standby mode
eHAL_STATUS_PMC_PENDING - device is being put in standby mode
---------------------------------------------------------------------------*/
eHalStatus sme_RequestStandby (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, eHalStatus status),
void *callbackContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REQUEST_STANDBY, NO_SESSION, 0));
smsLog( pMac, LOG1, FL(" called") );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRequestStandby(hHal, callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RegisterPowerSaveCheck
\brief Register a power save check routine that is called whenever
the device is about to enter one of the power save modes.
\param hHal - The handle returned by macOpen.
\param checkRoutine - Power save check routine to be registered
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully registered
eHAL_STATUS_FAILURE - not successfully registered
---------------------------------------------------------------------------*/
eHalStatus sme_RegisterPowerSaveCheck (
tHalHandle hHal,
tANI_BOOLEAN (*checkRoutine) (void *checkContext), void *checkContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRegisterPowerSaveCheck (hHal, checkRoutine, checkContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_Register11dScanDoneCallback
\brief Register a routine of type csrScanCompleteCallback which is
called whenever an 11d scan is done
\param hHal - The handle returned by macOpen.
\param callback - 11d scan complete routine to be registered
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_Register11dScanDoneCallback (
tHalHandle hHal,
csrScanCompleteCallback callback)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
pMac->scan.callback11dScanDone = callback;
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_DeregisterPowerSaveCheck
\brief Deregister a power save check routine
\param hHal - The handle returned by macOpen.
\param checkRoutine - Power save check routine to be deregistered
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully deregistered
eHAL_STATUS_FAILURE - not successfully deregistered
---------------------------------------------------------------------------*/
eHalStatus sme_DeregisterPowerSaveCheck (
tHalHandle hHal,
tANI_BOOLEAN (*checkRoutine) (void *checkContext))
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcDeregisterPowerSaveCheck (hHal, checkRoutine);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RegisterDeviceStateUpdateInd
\brief Register a callback routine that is called whenever
the device enters a new device state (Full Power, BMPS, UAPSD)
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Callback routine to be registered
\param callbackContext - Cookie to be passed back during callback
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully registered
eHAL_STATUS_FAILURE - not successfully registered
---------------------------------------------------------------------------*/
eHalStatus sme_RegisterDeviceStateUpdateInd (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, tPmcState pmcState),
void *callbackContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRegisterDeviceStateUpdateInd (hHal, callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_DeregisterDeviceStateUpdateInd
\brief Deregister a routine that was registered for device state changes
\param hHal - The handle returned by macOpen.
\param callbackRoutine - Callback routine to be deregistered
\return eHalStatus
eHAL_STATUS_SUCCESS - successfully deregistered
eHAL_STATUS_FAILURE - not successfully deregistered
---------------------------------------------------------------------------*/
eHalStatus sme_DeregisterDeviceStateUpdateInd (
tHalHandle hHal,
void (*callbackRoutine) (void *callbackContext, tPmcState pmcState))
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcDeregisterDeviceStateUpdateInd (hHal, callbackRoutine);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_WowlAddBcastPattern
\brief Add a pattern for Pattern Byte Matching in Wowl mode. Firmware will
do a pattern match on these patterns when Wowl is enabled during BMPS
mode. Note that Firmware performs the pattern matching only on
broadcast frames and while Libra is in BMPS mode.
\param hHal - The handle returned by macOpen.
\param pattern - Pattern to be added
\return eHalStatus
eHAL_STATUS_FAILURE Cannot add pattern
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus sme_WowlAddBcastPattern (
tHalHandle hHal,
tpSirWowlAddBcastPtrn pattern,
tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_WOWL_ADDBCAST_PATTERN, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcWowlAddBcastPattern (hHal, pattern, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_WowlDelBcastPattern
\brief Delete a pattern that was added for Pattern Byte Matching.
\param hHal - The handle returned by macOpen.
\param pattern - Pattern to be deleted
\return eHalStatus
eHAL_STATUS_FAILURE Cannot delete pattern
eHAL_STATUS_SUCCESS Request accepted.
---------------------------------------------------------------------------*/
eHalStatus sme_WowlDelBcastPattern (
tHalHandle hHal,
tpSirWowlDelBcastPtrn pattern,
tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_WOWL_DELBCAST_PATTERN, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcWowlDelBcastPattern (hHal, pattern, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_EnterWowl
\brief This is the SME API exposed to HDD to request enabling of WOWL mode.
WoWLAN works on top of BMPS mode. If the device is not in BMPS mode,
SME will will cache the information that WOWL has been enabled and
attempt to put the device in BMPS. On entry into BMPS, SME will
enable the WOWL mode.
Note 1: If we exit BMPS mode (someone requests full power), we
will NOT resume WOWL when we go back to BMPS again. Request for full
power (while in WOWL mode) means disable WOWL and go to full power.
Note 2: Both UAPSD and WOWL work on top of BMPS. On entry into BMPS, SME
will give priority to UAPSD and enable only UAPSD if both UAPSD and WOWL
are required. Currently there is no requirement or use case to support
UAPSD and WOWL at the same time.
\param hHal - The handle returned by macOpen.
\param enterWowlCallbackRoutine - Callback routine provided by HDD.
Used for success/failure notification by SME
\param enterWowlCallbackContext - A cookie passed by HDD, that is passed back to HDD
at the time of callback.
\param wakeReasonIndCB - Callback routine provided by HDD.
Used for Wake Reason Indication by SME
\param wakeReasonIndCBContext - A cookie passed by HDD, that is passed back to HDD
at the time of callback.
\return eHalStatus
eHAL_STATUS_SUCCESS Device is already in WoWLAN mode
eHAL_STATUS_FAILURE Device cannot enter WoWLAN mode.
eHAL_STATUS_PMC_PENDING Request accepted. SME will enable WOWL after
BMPS mode is entered.
---------------------------------------------------------------------------*/
eHalStatus sme_EnterWowl (
tHalHandle hHal,
void (*enterWowlCallbackRoutine) (void *callbackContext, eHalStatus status),
void *enterWowlCallbackContext,
#ifdef WLAN_WAKEUP_EVENTS
void (*wakeIndicationCB) (void *callbackContext, tpSirWakeReasonInd pWakeReasonInd),
void *wakeIndicationCBContext,
#endif // WLAN_WAKEUP_EVENTS
tpSirSmeWowlEnterParams wowlEnterParams, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ENTER_WOWL, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcEnterWowl (hHal, enterWowlCallbackRoutine, enterWowlCallbackContext,
#ifdef WLAN_WAKEUP_EVENTS
wakeIndicationCB, wakeIndicationCBContext,
#endif // WLAN_WAKEUP_EVENTS
wowlEnterParams, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ExitWowl
\brief This is the SME API exposed to HDD to request exit from WoWLAN mode.
SME will initiate exit from WoWLAN mode and device will be put in BMPS
mode.
\param hHal - The handle returned by macOpen.
\return eHalStatus
eHAL_STATUS_FAILURE Device cannot exit WoWLAN mode.
eHAL_STATUS_SUCCESS Request accepted to exit WoWLAN mode.
---------------------------------------------------------------------------*/
eHalStatus sme_ExitWowl (tHalHandle hHal, tWowlExitSource wowlExitSrc)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXIT_WOWL, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcExitWowl (hHal, wowlExitSrc);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamSetKey
\brief To set encryption key. This function should be called only when connected
This is an asynchronous API.
\param pSetKeyInfo - pointer to a caller allocated object of tCsrSetContextInfo
\param pRoamId Upon success return, this is the id caller can use to identify the request in roamcallback
\return eHalStatus SUCCESS Roam callback will be called indicate actually results
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamSetKey(tHalHandle hHal, tANI_U8 sessionId, tCsrRoamSetKey *pSetKey, tANI_U32 *pRoamId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U32 roamId;
tANI_U32 i;
tCsrRoamSession *pSession = NULL;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_KEY, sessionId, 0));
if (pSetKey->keyLength > CSR_MAX_KEY_LEN)
{
smsLog(pMac, LOGE, FL("Invalid key length %d"), pSetKey->keyLength);
return eHAL_STATUS_FAILURE;
}
/*Once Setkey is done, we can go in BMPS*/
if(pSetKey->keyLength)
{
pMac->pmc.remainInPowerActiveTillDHCP = FALSE;
smsLog(pMac, LOG1, FL("Reset remainInPowerActiveTillDHCP"
" to allow BMPS"));
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
roamId = GET_NEXT_ROAM_ID(&pMac->roam);
if(pRoamId)
{
*pRoamId = roamId;
}
smsLog(pMac, LOG2, FL("keyLength %d"), pSetKey->keyLength);
for(i=0; i<pSetKey->keyLength; i++)
smsLog(pMac, LOG2, FL("%02x"), pSetKey->Key[i]);
smsLog(pMac, LOG2, "\n sessionId=%d roamId=%d", sessionId, roamId);
pSession = CSR_GET_SESSION(pMac, sessionId);
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
if(CSR_IS_INFRA_AP(&pSession->connectedProfile))
{
if(pSetKey->keyDirection == eSIR_TX_DEFAULT)
{
if ( ( eCSR_ENCRYPT_TYPE_WEP40 == pSetKey->encType ) ||
( eCSR_ENCRYPT_TYPE_WEP40_STATICKEY == pSetKey->encType ))
{
pSession->pCurRoamProfile->negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_WEP40_STATICKEY;
}
if ( ( eCSR_ENCRYPT_TYPE_WEP104 == pSetKey->encType ) ||
( eCSR_ENCRYPT_TYPE_WEP104_STATICKEY == pSetKey->encType ))
{
pSession->pCurRoamProfile->negotiatedUCEncryptionType = eCSR_ENCRYPT_TYPE_WEP104_STATICKEY;
}
}
}
status = csrRoamSetKey ( pMac, sessionId, pSetKey, roamId );
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef DEBUG_ROAM_DELAY
//Store sent PTK key time
if(pSetKey->keyDirection == eSIR_TX_RX)
{
vos_record_roam_event(e_HDD_SET_PTK_REQ, NULL, 0);
}
else if(pSetKey->keyDirection == eSIR_RX_ONLY)
{
vos_record_roam_event(e_HDD_SET_GTK_REQ, NULL, 0);
}
else
{
return (status);
}
#endif
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamRemoveKey
\brief To set encryption key. This is an asynchronous API.
\param pRemoveKey - pointer to a caller allocated object of tCsrRoamRemoveKey
\param pRoamId Upon success return, this is the id caller can use to identify the request in roamcallback
\return eHalStatus SUCCESS Roam callback will be called indicate actually results
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamRemoveKey(tHalHandle hHal, tANI_U8 sessionId,
tCsrRoamRemoveKey *pRemoveKey, tANI_U32 *pRoamId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U32 roamId;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REMOVE_KEY, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
roamId = GET_NEXT_ROAM_ID(&pMac->roam);
if(pRoamId)
{
*pRoamId = roamId;
}
status = csrRoamIssueRemoveKeyCommand( pMac, sessionId, pRemoveKey, roamId );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetRssi
\brief a wrapper function that client calls to register a callback to get RSSI
\param callback - SME sends back the requested stats using the callback
\param staId - The station ID for which the stats is requested for
\param pContext - user context to be passed back along with the callback
\param pVosContext - vos context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetRssi(tHalHandle hHal,
tCsrRssiCallback callback,
tANI_U8 staId, tCsrBssid bssId,
void *pContext, void* pVosContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetRssi( pMac, callback,
staId, bssId, pContext, pVosContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetSnr
\brief a wrapper function that client calls to register a callback to
get SNR
\param callback - SME sends back the requested stats using the callback
\param staId - The station ID for which the stats is requested for
\param pContext - user context to be passed back along with the callback
\param pVosContext - vos context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetSnr(tHalHandle hHal,
tCsrSnrCallback callback,
tANI_U8 staId, tCsrBssid bssId,
void *pContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetSnr(pMac, callback,
staId, bssId, pContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#if defined WLAN_FEATURE_VOWIFI_11R || defined FEATURE_WLAN_ESE || defined(FEATURE_WLAN_LFR)
/* ---------------------------------------------------------------------------
\fn sme_GetRoamRssi
\brief a wrapper function that client calls to register a callback to get Roam RSSI
\param callback - SME sends back the requested stats using the callback
\param staId - The station ID for which the stats is requested for
\param pContext - user context to be passed back along with the callback
\param pVosContext - vos context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetRoamRssi(tHalHandle hHal,
tCsrRssiCallback callback,
tANI_U8 staId, tCsrBssid bssId,
void *pContext, void* pVosContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetRoamRssi( pMac, callback,
staId, bssId, pContext, pVosContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif
#if defined(FEATURE_WLAN_ESE) && defined(FEATURE_WLAN_ESE_UPLOAD)
/* ---------------------------------------------------------------------------
\fn sme_GetTsmStats
\brief a wrapper function that client calls to register a callback to get TSM Stats
\param callback - SME sends back the requested stats using the callback
\param staId - The station ID for which the stats is requested for
\param pContext - user context to be passed back along with the callback
\param pVosContext - vos context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetTsmStats(tHalHandle hHal,
tCsrTsmStatsCallback callback,
tANI_U8 staId, tCsrBssid bssId,
void *pContext, void* pVosContext, tANI_U8 tid)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetTsmStats( pMac, callback,
staId, bssId, pContext, pVosContext, tid);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif
/* ---------------------------------------------------------------------------
\fn sme_GetStatistics
\brief a wrapper function that client calls to register a callback to get
different PHY level statistics from CSR.
\param requesterId - different client requesting for statistics, HDD, UMA/GAN etc
\param statsMask - The different category/categories of stats requester is looking for
\param callback - SME sends back the requested stats using the callback
\param periodicity - If requester needs periodic update in millisec, 0 means
it's an one time request
\param cache - If requester is happy with cached stats
\param staId - The station ID for which the stats is requested for
\param pContext - user context to be passed back along with the callback
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetStatistics(tHalHandle hHal, eCsrStatsRequesterType requesterId,
tANI_U32 statsMask,
tCsrStatsCallback callback,
tANI_U32 periodicity, tANI_BOOLEAN cache,
tANI_U8 staId, void *pContext)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_STATS, NO_SESSION, periodicity));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetStatistics( pMac, requesterId , statsMask, callback,
periodicity, cache, staId, pContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn smeGetTLSTAState
\helper function to get the TL STA State whenever the function is called.
\param staId - The staID to be passed to the TL
to get the relevant TL STA State
\return the state as tANI_U16
---------------------------------------------------------------------------*/
tANI_U16 smeGetTLSTAState(tHalHandle hHal, tANI_U8 staId)
{
tANI_U16 tlSTAState = TL_INIT_STATE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_FAILURE;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
tlSTAState = csrGetTLSTAState( pMac, staId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return tlSTAState;
}
/* ---------------------------------------------------------------------------
\fn sme_GetCountryCode
\brief To return the current country code. If no country code is applied, default country code is
used to fill the buffer.
If 11d supported is turned off, an error is return and the last applied/default country code is used.
This is a synchronous API.
\param pBuf - pointer to a caller allocated buffer for returned country code.
\param pbLen For input, this parameter indicates how big is the buffer.
Upon return, this parameter has the number of bytes for country. If pBuf
doesn't have enough space, this function returns
fail status and this parameter contains the number that is needed.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_GetCountryCode(tHalHandle hHal, tANI_U8 *pBuf, tANI_U8 *pbLen)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_CNTRYCODE, NO_SESSION, 0));
return ( csrGetCountryCode( pMac, pBuf, pbLen ) );
}
/* ---------------------------------------------------------------------------
\fn sme_SetCountryCode
\brief To change the current/default country code.
If 11d supported is turned off, an error is return.
This is a synchronous API.
\param pCountry - pointer to a caller allocated buffer for the country code.
\param pfRestartNeeded A pointer to caller allocated memory, upon successful return, it indicates
whether a reset is required.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_SetCountryCode(tHalHandle hHal, tANI_U8 *pCountry, tANI_BOOLEAN *pfRestartNeeded)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_CNTRYCODE, NO_SESSION, 0));
return ( csrSetCountryCode( pMac, pCountry, pfRestartNeeded ) );
}
/* ---------------------------------------------------------------------------
\fn sme_ResetCountryCodeInformation
\brief this function is to reset the country code current being used back to EEPROM default
this includes channel list and power setting. This is a synchronous API.
\param pfRestartNeeded - pointer to a caller allocated space. Upon successful return, it indicates whether
a restart is needed to apply the change
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_ResetCountryCodeInformation(tHalHandle hHal, tANI_BOOLEAN *pfRestartNeeded)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrResetCountryCodeInformation( pMac, pfRestartNeeded ) );
}
/* ---------------------------------------------------------------------------
\fn sme_GetSupportedCountryCode
\brief this function is to get a list of the country code current being supported
\param pBuf - Caller allocated buffer with at least 3 bytes, upon success return,
this has the country code list. 3 bytes for each country code. This may be NULL if
caller wants to know the needed byte count.
\param pbLen - Caller allocated, as input, it indicates the length of pBuf. Upon success return,
this contains the length of the data in pBuf. If pbuf is NULL, as input, *pbLen should be 0.
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_GetSupportedCountryCode(tHalHandle hHal, tANI_U8 *pBuf, tANI_U32 *pbLen)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrGetSupportedCountryCode( pMac, pBuf, pbLen ) );
}
/* ---------------------------------------------------------------------------
\fn sme_GetCurrentRegulatoryDomain
\brief this function is to get the current regulatory domain. This is a synchronous API.
This function must be called after CFG is downloaded and all the band/mode setting already passed into
SME. The function fails if 11d support is turned off.
\param pDomain - Caller allocated buffer to return the current domain.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_GetCurrentRegulatoryDomain(tHalHandle hHal, v_REGDOMAIN_t *pDomain)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
if( pDomain )
{
if( csrIs11dSupported( pMac ) )
{
*pDomain = csrGetCurrentRegulatoryDomain( pMac );
status = eHAL_STATUS_SUCCESS;
}
else
{
status = eHAL_STATUS_FAILURE;
}
}
return ( status );
}
/* ---------------------------------------------------------------------------
\fn sme_SetRegulatoryDomain
\brief this function is to set the current regulatory domain.
This function must be called after CFG is downloaded and all the band/mode setting already passed into
SME. This is a synchronous API.
\param domainId - indicate the domain (defined in the driver) needs to set to.
See v_REGDOMAIN_t for definition
\param pfRestartNeeded - pointer to a caller allocated space. Upon successful return, it indicates whether
a restart is needed to apply the change
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_SetRegulatoryDomain(tHalHandle hHal, v_REGDOMAIN_t domainId, tANI_BOOLEAN *pfRestartNeeded)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrSetRegulatoryDomain( pMac, domainId, pfRestartNeeded ) );
}
/* ---------------------------------------------------------------------------
\fn sme_GetRegulatoryDomainForCountry
\brief To return a regulatory domain base on a country code. This is a synchronous API.
\param pCountry - pointer to a caller allocated buffer for input country code.
\param pDomainId Upon successful return, it is the domain that country belongs to.
If it is NULL, returning success means that the country code is known.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_GetRegulatoryDomainForCountry(tHalHandle hHal, tANI_U8 *pCountry, v_REGDOMAIN_t *pDomainId)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return csrGetRegulatoryDomainForCountry(pMac, pCountry, pDomainId,
COUNTRY_QUERY);
}
/* ---------------------------------------------------------------------------
\fn sme_GetSupportedRegulatoryDomains
\brief To return a list of supported regulatory domains. This is a synchronous API.
\param pDomains - pointer to a caller allocated buffer for returned regulatory domains.
\param pNumDomains For input, this parameter indicates howm many domains pDomains can hold.
Upon return, this parameter has the number for supported domains. If pDomains
doesn't have enough space for all the supported domains, this function returns
fail status and this parameter contains the number that is needed.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_GetSupportedRegulatoryDomains(tHalHandle hHal, v_REGDOMAIN_t *pDomains, tANI_U32 *pNumDomains)
{
eHalStatus status = eHAL_STATUS_INVALID_PARAMETER;
//We support all domains for now
if( pNumDomains )
{
if( NUM_REG_DOMAINS <= *pNumDomains )
{
status = eHAL_STATUS_SUCCESS;
}
*pNumDomains = NUM_REG_DOMAINS;
}
if( HAL_STATUS_SUCCESS( status ) )
{
if( pDomains )
{
pDomains[0] = REGDOMAIN_FCC;
pDomains[1] = REGDOMAIN_ETSI;
pDomains[2] = REGDOMAIN_JAPAN;
pDomains[3] = REGDOMAIN_WORLD;
pDomains[4] = REGDOMAIN_N_AMER_EXC_FCC;
pDomains[5] = REGDOMAIN_APAC;
pDomains[6] = REGDOMAIN_KOREA;
pDomains[7] = REGDOMAIN_HI_5GHZ;
pDomains[8] = REGDOMAIN_NO_5GHZ;
}
else
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
}
return ( status );
}
//some support functions
tANI_BOOLEAN sme_Is11dSupported(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrIs11dSupported( pMac ) );
}
tANI_BOOLEAN sme_Is11hSupported(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrIs11hSupported( pMac ) );
}
tANI_BOOLEAN sme_IsWmmSupported(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return ( csrIsWmmSupported( pMac ) );
}
//Upper layer to get the list of the base channels to scan for passively 11d info from csr
eHalStatus sme_ScanGetBaseChannels( tHalHandle hHal, tCsrChannelInfo * pChannelInfo )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return(csrScanGetBaseChannels(pMac,pChannelInfo) );
}
/* ---------------------------------------------------------------------------
\fn sme_ChangeCountryCode
\brief Change Country code from upperlayer during WLAN driver operation.
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param pCountry New Country Code String
\param sendRegHint If we want to send reg hint to nl80211
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_ChangeCountryCode( tHalHandle hHal,
tSmeChangeCountryCallback callback,
tANI_U8 *pCountry,
void *pContext,
void* pVosContext,
tAniBool countryFromUserSpace,
tAniBool sendRegHint )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t msg;
tAniChangeCountryCodeReq *pMsg;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CHANGE_CNTRYCODE, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
smsLog(pMac, LOG1, FL(" called"));
if ((pMac->roam.configParam.Is11dSupportEnabledOriginal == true) &&
(!pMac->roam.configParam.fSupplicantCountryCodeHasPriority))
{
smsLog(pMac, LOGW, "Set Country Code Fail since the STA is associated and userspace does not have priority ");
sme_ReleaseGlobalLock( &pMac->sme );
status = eHAL_STATUS_FAILURE;
return status;
}
pMsg = vos_mem_malloc(sizeof(tAniChangeCountryCodeReq));
if ( NULL == pMsg )
{
smsLog(pMac, LOGE, " csrChangeCountryCode: failed to allocate mem for req");
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg->msgType = pal_cpu_to_be16((tANI_U16)eWNI_SME_CHANGE_COUNTRY_CODE);
pMsg->msgLen = (tANI_U16)sizeof(tAniChangeCountryCodeReq);
vos_mem_copy(pMsg->countryCode, pCountry, 3);
pMsg->countryFromUserSpace = countryFromUserSpace;
pMsg->sendRegHint = sendRegHint;
pMsg->changeCCCallback = callback;
pMsg->pDevContext = pContext;
pMsg->pVosContext = pVosContext;
msg.type = eWNI_SME_CHANGE_COUNTRY_CODE;
msg.bodyptr = pMsg;
msg.reserved = 0;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MQ_ID_SME, &msg))
{
smsLog(pMac, LOGE, " sme_ChangeCountryCode failed to post msg to self ");
vos_mem_free((void *)pMsg);
status = eHAL_STATUS_FAILURE;
}
smsLog(pMac, LOG1, FL(" returned"));
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/*--------------------------------------------------------------------------
\fn sme_GenericChangeCountryCode
\brief Change Country code from upperlayer during WLAN driver operation.
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param pCountry New Country Code String
\param reg_domain regulatory domain
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-----------------------------------------------------------------------------*/
eHalStatus sme_GenericChangeCountryCode( tHalHandle hHal,
tANI_U8 *pCountry,
v_REGDOMAIN_t reg_domain)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t msg;
tAniGenericChangeCountryCodeReq *pMsg;
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return status;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
smsLog(pMac, LOG1, FL(" called"));
pMsg = vos_mem_malloc(sizeof(tAniGenericChangeCountryCodeReq));
if (NULL == pMsg)
{
smsLog(pMac, LOGE, " sme_GenericChangeCountryCode: failed to allocate mem for req");
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg->msgType = pal_cpu_to_be16((tANI_U16)eWNI_SME_GENERIC_CHANGE_COUNTRY_CODE);
pMsg->msgLen = (tANI_U16)sizeof(tAniGenericChangeCountryCodeReq);
vos_mem_copy(pMsg->countryCode, pCountry, 2);
pMsg->countryCode[2] = ' '; /* For ASCII space */
pMsg->domain_index = reg_domain;
msg.type = eWNI_SME_GENERIC_CHANGE_COUNTRY_CODE;
msg.bodyptr = pMsg;
msg.reserved = 0;
if (VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MQ_ID_SME, &msg))
{
smsLog(pMac, LOGE, "sme_GenericChangeCountryCode failed to post msg to self");
vos_mem_free(pMsg);
status = eHAL_STATUS_FAILURE;
}
smsLog(pMac, LOG1, FL(" returned"));
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_InitChannels
\brief Used to initialize CSR channel lists while driver loading
\param hHal - global pMac structure
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_InitChannels(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return status;
}
status = csrInitChannels(pMac);
return status;
}
#ifdef CONFIG_ENABLE_LINUX_REG
/*-------------------------------------------------------------------------
\fn sme_InitChannelsForCC
\brief Used to issue regulatory hint to user
\param hHal - global pMac structure
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
--------------------------------------------------------------------------*/
eHalStatus sme_InitChannelsForCC(tHalHandle hHal, driver_load_type init)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return status;
}
status = csrInitChannelsForCC(pMac, init);
return status;
}
#endif
/* ---------------------------------------------------------------------------
\fn sme_DHCPStartInd
\brief API to signal the FW about the DHCP Start event.
\param hHal - HAL handle for device.
\param device_mode - mode(AP,SAP etc) of the device.
\param macAddr - MAC address of the device.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
--------------------------------------------------------------------------*/
eHalStatus sme_DHCPStartInd( tHalHandle hHal,
tANI_U8 device_mode,
tANI_U8 sessionId )
{
eHalStatus status;
VOS_STATUS vosStatus;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t vosMessage;
tAniDHCPInd *pMsg;
tCsrRoamSession *pSession;
status = sme_AcquireGlobalLock(&pMac->sme);
if ( eHAL_STATUS_SUCCESS == status)
{
pSession = CSR_GET_SESSION( pMac, sessionId );
if (!pSession)
{
smsLog(pMac, LOGE, FL("session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg = (tAniDHCPInd*)vos_mem_malloc(sizeof(tAniDHCPInd));
if (NULL == pMsg)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for dhcp start", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg->msgType = WDA_DHCP_START_IND;
pMsg->msgLen = (tANI_U16)sizeof(tAniDHCPInd);
pMsg->device_mode = device_mode;
vos_mem_copy(pMsg->macAddr, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
vosMessage.type = WDA_DHCP_START_IND;
vosMessage.bodyptr = pMsg;
vosMessage.reserved = 0;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Post DHCP Start MSG fail", __func__);
vos_mem_free(pMsg);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_DHCPStopInd
\brief API to signal the FW about the DHCP complete event.
\param hHal - HAL handle for device.
\param device_mode - mode(AP, SAP etc) of the device.
\param macAddr - MAC address of the device.
\return eHalStatus SUCCESS.
FAILURE or RESOURCES The API finished and failed.
--------------------------------------------------------------------------*/
eHalStatus sme_DHCPStopInd( tHalHandle hHal,
tANI_U8 device_mode,
tANI_U8 sessionId )
{
eHalStatus status;
VOS_STATUS vosStatus;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t vosMessage;
tAniDHCPInd *pMsg;
tCsrRoamSession *pSession;
status = sme_AcquireGlobalLock(&pMac->sme);
if ( eHAL_STATUS_SUCCESS == status)
{
pSession = CSR_GET_SESSION( pMac, sessionId );
if (!pSession)
{
smsLog(pMac, LOGE, FL("session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg = (tAniDHCPInd*)vos_mem_malloc(sizeof(tAniDHCPInd));
if (NULL == pMsg)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for dhcp stop", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
pMsg->msgType = WDA_DHCP_STOP_IND;
pMsg->msgLen = (tANI_U16)sizeof(tAniDHCPInd);
pMsg->device_mode = device_mode;
vos_mem_copy(pMsg->macAddr, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
vosMessage.type = WDA_DHCP_STOP_IND;
vosMessage.bodyptr = pMsg;
vosMessage.reserved = 0;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Post DHCP Stop MSG fail", __func__);
vos_mem_free(pMsg);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_BtcSignalBtEvent
\brief API to signal Bluetooth (BT) event to the WLAN driver. Based on the
BT event type and the current operating mode of Libra (full power,
BMPS, UAPSD etc), appropriate Bluetooth Coexistence (BTC) strategy
would be employed.
\param hHal - The handle returned by macOpen.
\param pBtEvent - Pointer to a caller allocated object of type tSmeBtEvent
Caller owns the memory and is responsible for freeing it.
\return VOS_STATUS
VOS_STATUS_E_FAILURE BT Event not passed to HAL. This can happen
if BTC execution mode is set to BTC_WLAN_ONLY
or BTC_PTA_ONLY.
VOS_STATUS_SUCCESS BT Event passed to HAL
---------------------------------------------------------------------------*/
VOS_STATUS sme_BtcSignalBtEvent (tHalHandle hHal, tpSmeBtEvent pBtEvent)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_BTC_SIGNALEVENT, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
status = btcSignalBTEvent (hHal, pBtEvent);
sme_ReleaseGlobalLock( &pMac->sme );
}
#endif
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_BtcSetConfig
\brief API to change the current Bluetooth Coexistence (BTC) configuration
This function should be invoked only after CFG download has completed.
Calling it after sme_HDDReadyInd is recommended.
\param hHal - The handle returned by macOpen.
\param pSmeBtcConfig - Pointer to a caller allocated object of type tSmeBtcConfig.
Caller owns the memory and is responsible for freeing it.
\return VOS_STATUS
VOS_STATUS_E_FAILURE Config not passed to HAL.
VOS_STATUS_SUCCESS Config passed to HAL
---------------------------------------------------------------------------*/
VOS_STATUS sme_BtcSetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_BTC_SETCONFIG, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
status = btcSetConfig (hHal, pSmeBtcConfig);
sme_ReleaseGlobalLock( &pMac->sme );
}
#endif
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_BtcGetConfig
\brief API to retrieve the current Bluetooth Coexistence (BTC) configuration
\param hHal - The handle returned by macOpen.
\param pSmeBtcConfig - Pointer to a caller allocated object of type
tSmeBtcConfig. Caller owns the memory and is responsible
for freeing it.
\return VOS_STATUS
VOS_STATUS_E_FAILURE - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_BtcGetConfig (tHalHandle hHal, tpSmeBtcConfig pSmeBtcConfig)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
#ifndef WLAN_MDM_CODE_REDUCTION_OPT
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_BTC_GETCONFIG, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
status = btcGetConfig (hHal, pSmeBtcConfig);
sme_ReleaseGlobalLock( &pMac->sme );
}
#endif
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetCfgPrivacy
\brief API to set configure privacy parameters
\param hHal - The handle returned by macOpen.
\param pProfile - Pointer CSR Roam profile.
\param fPrivacy - This parameter indicates status of privacy
\return void
---------------------------------------------------------------------------*/
void sme_SetCfgPrivacy( tHalHandle hHal,
tCsrRoamProfile *pProfile,
tANI_BOOLEAN fPrivacy
)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_CFGPRIVACY, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
csrSetCfgPrivacy(pMac, pProfile, fPrivacy);
sme_ReleaseGlobalLock( &pMac->sme );
}
}
#if defined WLAN_FEATURE_VOWIFI
/* ---------------------------------------------------------------------------
\fn sme_NeighborReportRequest
\brief API to request neighbor report.
\param hHal - The handle returned by macOpen.
\param pRrmNeighborReq - Pointer to a caller allocated object of type
tRrmNeighborReq. Caller owns the memory and is responsible
for freeing it.
\return VOS_STATUS
VOS_STATUS_E_FAILURE - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_NeighborReportRequest (tHalHandle hHal, tANI_U8 sessionId,
tpRrmNeighborReq pRrmNeighborReq, tpRrmNeighborRspCallbackInfo callbackInfo)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_NEIGHBOR_REPORTREQ, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
status = sme_RrmNeighborReportRequest (hHal, sessionId, pRrmNeighborReq, callbackInfo);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif
//The following are debug APIs to support direct read/write register/memory
//They are placed in SME because HW cannot be access when in LOW_POWER state
//AND not connected. The knowledge and synchronization is done in SME
//sme_DbgReadRegister
//Caller needs to validate the input values
VOS_STATUS sme_DbgReadRegister(tHalHandle hHal, v_U32_t regAddr, v_U32_t *pRegValue)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tPmcPowerState PowerState;
tANI_U32 sessionId = 0;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DBG_READREG, NO_SESSION, 0));
/* 1) To make Quarky work in FTM mode **************************************/
if(eDRIVER_TYPE_MFG == pMac->gDriverType)
{
if (eWLAN_PAL_STATUS_SUCCESS == wpalDbgReadRegister(regAddr, pRegValue))
{
return VOS_STATUS_SUCCESS;
}
return VOS_STATUS_E_FAILURE;
}
/* 2) NON FTM mode driver *************************************************/
/* Acquire SME global lock */
if (eHAL_STATUS_SUCCESS != sme_AcquireGlobalLock(&pMac->sme))
{
return VOS_STATUS_E_FAILURE;
}
if(HAL_STATUS_SUCCESS(pmcQueryPowerState(pMac, &PowerState, NULL, NULL)))
{
/* Are we not in IMPS mode? Or are we in connected? Then we're safe*/
if(!csrIsConnStateDisconnected(pMac, sessionId) || (ePMC_LOW_POWER != PowerState))
{
if (eWLAN_PAL_STATUS_SUCCESS == wpalDbgReadRegister(regAddr, pRegValue))
{
status = VOS_STATUS_SUCCESS;
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
/* This is a hack for Qualky/pttWniSocket
Current implementation doesn't allow pttWniSocket to inform Qualky an error */
if ( VOS_STATUS_SUCCESS != status )
{
*pRegValue = 0xDEADBEEF;
status = VOS_STATUS_SUCCESS;
}
/* Release SME global lock */
sme_ReleaseGlobalLock(&pMac->sme);
return (status);
}
//sme_DbgWriteRegister
//Caller needs to validate the input values
VOS_STATUS sme_DbgWriteRegister(tHalHandle hHal, v_U32_t regAddr, v_U32_t regValue)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tPmcPowerState PowerState;
tANI_U32 sessionId = 0;
/* 1) To make Quarky work in FTM mode **************************************/
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DBG_WRITEREG, NO_SESSION, 0));
if(eDRIVER_TYPE_MFG == pMac->gDriverType)
{
if (eWLAN_PAL_STATUS_SUCCESS == wpalDbgWriteRegister(regAddr, regValue))
{
return VOS_STATUS_SUCCESS;
}
return VOS_STATUS_E_FAILURE;
}
/* 2) NON FTM mode driver *************************************************/
/* Acquire SME global lock */
if (eHAL_STATUS_SUCCESS != sme_AcquireGlobalLock(&pMac->sme))
{
return VOS_STATUS_E_FAILURE;
}
if(HAL_STATUS_SUCCESS(pmcQueryPowerState(pMac, &PowerState, NULL, NULL)))
{
/* Are we not in IMPS mode? Or are we in connected? Then we're safe*/
if(!csrIsConnStateDisconnected(pMac, sessionId) || (ePMC_LOW_POWER != PowerState))
{
if (eWLAN_PAL_STATUS_SUCCESS == wpalDbgWriteRegister(regAddr, regValue))
{
status = VOS_STATUS_SUCCESS;
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
/* Release SME global lock */
sme_ReleaseGlobalLock(&pMac->sme);
return (status);
}
//sme_DbgReadMemory
//Caller needs to validate the input values
//pBuf caller allocated buffer has the length of nLen
VOS_STATUS sme_DbgReadMemory(tHalHandle hHal, v_U32_t memAddr, v_U8_t *pBuf, v_U32_t nLen)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tPmcPowerState PowerState;
tANI_U32 sessionId = 0;
tANI_U32 cmd = READ_MEMORY_DUMP_CMD;
tANI_U32 arg1 = memAddr;
tANI_U32 arg2 = nLen/4;
tANI_U32 arg3 = 4;
tANI_U32 arg4 = 0;
/* 1) To make Quarky work in FTM mode **************************************/
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DBG_READMEM, NO_SESSION, 0));
if(eDRIVER_TYPE_MFG == pMac->gDriverType)
{
if (VOS_STATUS_SUCCESS == WDA_HALDumpCmdReq(pMac, cmd, arg1, arg2, arg3, arg4, (tANI_U8*)pBuf))
{
return VOS_STATUS_SUCCESS;
}
return VOS_STATUS_E_FAILURE;
}
/* 2) NON FTM mode driver *************************************************/
/* Acquire SME global lock */
if (eHAL_STATUS_SUCCESS != sme_AcquireGlobalLock(&pMac->sme))
{
return VOS_STATUS_E_FAILURE;
}
if(HAL_STATUS_SUCCESS(pmcQueryPowerState(pMac, &PowerState, NULL, NULL)))
{
/* Are we not in IMPS mode? Or are we in connected? Then we're safe*/
if(!csrIsConnStateDisconnected(pMac, sessionId) || (ePMC_LOW_POWER != PowerState))
{
if (VOS_STATUS_SUCCESS == WDA_HALDumpCmdReq(pMac, cmd, arg1, arg2, arg3, arg4, (tANI_U8 *)pBuf))
{
status = VOS_STATUS_SUCCESS;
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
/* This is a hack for Qualky/pttWniSocket
Current implementation doesn't allow pttWniSocket to inform Qualky an error */
if (VOS_STATUS_SUCCESS != status)
{
vos_mem_set(pBuf, nLen, 0xCD);
status = VOS_STATUS_SUCCESS;
smsLog(pMac, LOGE, FL(" filled with 0xCD because it cannot access the hardware"));
}
/* Release SME lock */
sme_ReleaseGlobalLock(&pMac->sme);
return (status);
}
//sme_DbgWriteMemory
//Caller needs to validate the input values
VOS_STATUS sme_DbgWriteMemory(tHalHandle hHal, v_U32_t memAddr, v_U8_t *pBuf, v_U32_t nLen)
{
VOS_STATUS status = VOS_STATUS_E_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tPmcPowerState PowerState;
tANI_U32 sessionId = 0;
/* 1) To make Quarky work in FTM mode **************************************/
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DBG_WRITEMEM, NO_SESSION, 0));
if(eDRIVER_TYPE_MFG == pMac->gDriverType)
{
{
return VOS_STATUS_SUCCESS;
}
return VOS_STATUS_E_FAILURE;
}
/* 2) NON FTM mode driver *************************************************/
/* Acquire SME global lock */
if (eHAL_STATUS_SUCCESS != sme_AcquireGlobalLock(&pMac->sme))
{
return VOS_STATUS_E_FAILURE;
}
if(HAL_STATUS_SUCCESS(pmcQueryPowerState(pMac, &PowerState, NULL, NULL)))
{
/* Are we not in IMPS mode? Or are we in connected? Then we're safe*/
if(!csrIsConnStateDisconnected(pMac, sessionId) || (ePMC_LOW_POWER != PowerState))
{
if (eWLAN_PAL_STATUS_SUCCESS == wpalDbgWriteMemory(memAddr, (void *)pBuf, nLen))
{
status = VOS_STATUS_SUCCESS;
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
else
{
status = VOS_STATUS_E_FAILURE;
}
}
/* Release Global lock */
sme_ReleaseGlobalLock(&pMac->sme);
return (status);
}
void pmcLog(tpAniSirGlobal pMac, tANI_U32 loglevel, const char *pString, ...)
{
VOS_TRACE_LEVEL vosDebugLevel;
char logBuffer[LOG_SIZE];
va_list marker;
/* getting proper Debug level */
vosDebugLevel = getVosDebugLevel(loglevel);
/* extracting arguments from pstring */
va_start( marker, pString );
vsnprintf(logBuffer, LOG_SIZE, pString, marker);
VOS_TRACE(VOS_MODULE_ID_PMC, vosDebugLevel, "%s", logBuffer);
va_end( marker );
}
void smsLog(tpAniSirGlobal pMac, tANI_U32 loglevel, const char *pString,...)
{
#ifdef WLAN_DEBUG
// Verify against current log level
if ( loglevel > pMac->utils.gLogDbgLevel[LOG_INDEX_FOR_MODULE( SIR_SMS_MODULE_ID )] )
return;
else
{
va_list marker;
va_start( marker, pString ); /* Initialize variable arguments. */
logDebug(pMac, SIR_SMS_MODULE_ID, loglevel, pString, marker);
va_end( marker ); /* Reset variable arguments. */
}
#endif
}
/* ---------------------------------------------------------------------------
\fn sme_GetWcnssWlanCompiledVersion
\brief This API returns the version of the WCNSS WLAN API with
which the HOST driver was built
\param hHal - The handle returned by macOpen.
\param pVersion - Points to the Version structure to be filled
\return VOS_STATUS
VOS_STATUS_E_INVAL - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_GetWcnssWlanCompiledVersion(tHalHandle hHal,
tSirVersionType *pVersion)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
v_CONTEXT_t vosContext = vos_get_global_context(VOS_MODULE_ID_SME, NULL);
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
if( pVersion != NULL )
{
status = WDA_GetWcnssWlanCompiledVersion(vosContext, pVersion);
}
else
{
status = VOS_STATUS_E_INVAL;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetWcnssWlanReportedVersion
\brief This API returns the version of the WCNSS WLAN API with
which the WCNSS driver reports it was built
\param hHal - The handle returned by macOpen.
\param pVersion - Points to the Version structure to be filled
\return VOS_STATUS
VOS_STATUS_E_INVAL - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_GetWcnssWlanReportedVersion(tHalHandle hHal,
tSirVersionType *pVersion)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
v_CONTEXT_t vosContext = vos_get_global_context(VOS_MODULE_ID_SME, NULL);
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
if( pVersion != NULL )
{
status = WDA_GetWcnssWlanReportedVersion(vosContext, pVersion);
}
else
{
status = VOS_STATUS_E_INVAL;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetWcnssSoftwareVersion
\brief This API returns the version string of the WCNSS driver
\param hHal - The handle returned by macOpen.
\param pVersion - Points to the Version string buffer to be filled
\param versionBufferSize - THe size of the Version string buffer
\return VOS_STATUS
VOS_STATUS_E_INVAL - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_GetWcnssSoftwareVersion(tHalHandle hHal,
tANI_U8 *pVersion,
tANI_U32 versionBufferSize)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
v_CONTEXT_t vosContext = vos_get_global_context(VOS_MODULE_ID_SME, NULL);
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
if( pVersion != NULL )
{
status = WDA_GetWcnssSoftwareVersion(vosContext, pVersion,
versionBufferSize);
}
else
{
status = VOS_STATUS_E_INVAL;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetWcnssHardwareVersion
\brief This API returns the version string of the WCNSS hardware
\param hHal - The handle returned by macOpen.
\param pVersion - Points to the Version string buffer to be filled
\param versionBufferSize - THe size of the Version string buffer
\return VOS_STATUS
VOS_STATUS_E_INVAL - failure
VOS_STATUS_SUCCESS success
---------------------------------------------------------------------------*/
VOS_STATUS sme_GetWcnssHardwareVersion(tHalHandle hHal,
tANI_U8 *pVersion,
tANI_U32 versionBufferSize)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
v_CONTEXT_t vosContext = vos_get_global_context(VOS_MODULE_ID_SME, NULL);
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
if( pVersion != NULL )
{
status = WDA_GetWcnssHardwareVersion(vosContext, pVersion,
versionBufferSize);
}
else
{
status = VOS_STATUS_E_INVAL;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#ifdef FEATURE_WLAN_WAPI
/* ---------------------------------------------------------------------------
\fn sme_RoamSetBKIDCache
\brief The SME API exposed to HDD to allow HDD to provde SME the BKID
candidate list.
\param hHal - Handle to the HAL. The HAL handle is returned by the HAL after
it is opened (by calling halOpen).
\param pBKIDCache - caller allocated buffer point to an array of tBkidCacheInfo
\param numItems - a variable that has the number of tBkidCacheInfo allocated
when retruning, this is the number of items put into pBKIDCache
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough and pNumItems has the number of tBkidCacheInfo.
---------------------------------------------------------------------------*/
eHalStatus sme_RoamSetBKIDCache( tHalHandle hHal, tANI_U32 sessionId, tBkidCacheInfo *pBKIDCache,
tANI_U32 numItems )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamSetBKIDCache( pMac, sessionId, pBKIDCache, numItems );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetBKIDCache
\brief The SME API exposed to HDD to allow HDD to request SME to return its
BKID cache.
\param hHal - Handle to the HAL. The HAL handle is returned by the HAL after
it is opened (by calling halOpen).
\param pNum - caller allocated memory that has the space of the number of
tBkidCacheInfo as input. Upon returned, *pNum has the needed number of entries
in SME cache.
\param pBkidCache - Caller allocated memory that contains BKID cache, if any,
upon return
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough.
---------------------------------------------------------------------------*/
eHalStatus sme_RoamGetBKIDCache(tHalHandle hHal, tANI_U32 *pNum,
tBkidCacheInfo *pBkidCache)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
smsLog(pMac, LOGE, FL(" !!!!!!!!!!!!!!!!!!SessionId is hardcoded"));
status = csrRoamGetBKIDCache( pMac, 0, pNum, pBkidCache );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamGetNumBKIDCache
\brief The SME API exposed to HDD to allow HDD to request SME to return the
number of BKID cache entries.
\param hHal - Handle to the HAL. The HAL handle is returned by the HAL after
it is opened (by calling halOpen).
\return tANI_U32 - the number of BKID cache entries.
---------------------------------------------------------------------------*/
tANI_U32 sme_RoamGetNumBKIDCache(tHalHandle hHal, tANI_U32 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U32 numBkidCache = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
numBkidCache = csrRoamGetNumBKIDCache( pMac, sessionId );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (numBkidCache);
}
/* ---------------------------------------------------------------------------
\fn sme_ScanGetBKIDCandidateList
\brief a wrapper function to return the BKID candidate list
\param pBkidList - caller allocated buffer point to an array of
tBkidCandidateInfo
\param pNumItems - pointer to a variable that has the number of
tBkidCandidateInfo allocated when retruning, this is
either the number needed or number of items put into
pPmkidList
\return eHalStatus - when fail, it usually means the buffer allocated is not
big enough and pNumItems
has the number of tBkidCandidateInfo.
\Note: pNumItems is a number of tBkidCandidateInfo,
not sizeof(tBkidCandidateInfo) * something
---------------------------------------------------------------------------*/
eHalStatus sme_ScanGetBKIDCandidateList(tHalHandle hHal, tANI_U32 sessionId,
tBkidCandidateInfo *pBkidList,
tANI_U32 *pNumItems )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrScanGetBKIDCandidateList( pMac, sessionId, pBkidList, pNumItems );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif /* FEATURE_WLAN_WAPI */
#ifdef FEATURE_OEM_DATA_SUPPORT
/*****************************************************************************
OEM DATA related modifications and function additions
*****************************************************************************/
/* ---------------------------------------------------------------------------
\fn sme_getOemDataRsp
\brief a wrapper function to obtain the OEM DATA RSP
\param pOemDataRsp - A pointer to the response object
\param pContext - a pointer passed in for the callback
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_getOemDataRsp(tHalHandle hHal,
tOemDataRsp **pOemDataRsp)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
do
{
//acquire the lock for the sme object
status = sme_AcquireGlobalLock(&pMac->sme);
if(!HAL_STATUS_SUCCESS(status))
{
break;
}
if(pMac->oemData.pOemDataRsp != NULL)
{
*pOemDataRsp = pMac->oemData.pOemDataRsp;
}
else
{
status = eHAL_STATUS_FAILURE;
}
//release the lock for the sme object
sme_ReleaseGlobalLock( &pMac->sme );
} while(0);
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_OemDataReq
\brief a wrapper function for OEM DATA REQ
\param sessionId - session id to be used.
\param pOemDataReqId - pointer to an object to get back the request ID
\param callback - a callback function that is called upon finish
\param pContext - a pointer passed in for the callback
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_OemDataReq(tHalHandle hHal,
tANI_U8 sessionId,
tOemDataReqConfig *pOemDataReqConfig,
tANI_U32 *pOemDataReqID,
oemData_OemDataReqCompleteCallback callback,
void *pContext)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
do
{
//acquire the lock for the sme object
status = sme_AcquireGlobalLock(&pMac->sme);
if(HAL_STATUS_SUCCESS(status))
{
tANI_U32 lOemDataReqId = pMac->oemData.oemDataReqID++; //let it wrap around
if(pOemDataReqID)
{
*pOemDataReqID = lOemDataReqId;
}
else
{
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
status = oemData_OemDataReq(hHal, sessionId, pOemDataReqConfig, pOemDataReqID, callback, pContext);
//release the lock for the sme object
sme_ReleaseGlobalLock( &pMac->sme );
}
} while(0);
smsLog(pMac, LOGW, "exiting function %s", __func__);
return(status);
}
#endif /*FEATURE_OEM_DATA_SUPPORT*/
/*--------------------------------------------------------------------------
\brief sme_OpenSession() - Open a session for scan/roam operation.
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param callback - A pointer to the function caller specifies for roam/connect status indication
\param pContext - The context passed with callback
\param pSelfMacAddr - Caller allocated memory filled with self MAC address (6 bytes)
\param pbSessionId - pointer to a caller allocated buffer for returned session ID
\return eHAL_STATUS_SUCCESS - session is opened. sessionId returned.
Other status means SME is failed to open the session.
eHAL_STATUS_RESOURCES - no more session available.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_OpenSession(tHalHandle hHal, csrRoamCompleteCallback callback,
void *pContext, tANI_U8 *pSelfMacAddr,
tANI_U8 *pbSessionId)
{
eHalStatus status;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if( NULL == pbSessionId )
{
status = eHAL_STATUS_INVALID_PARAMETER;
}
else
{
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamOpenSession(pMac, callback, pContext,
pSelfMacAddr, pbSessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
}
if( NULL != pbSessionId )
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_OPEN_SESSION,*pbSessionId, 0));
return ( status );
}
/*--------------------------------------------------------------------------
\brief sme_CloseSession() - Open a session for scan/roam operation.
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param sessionId - A previous opened session's ID.
\return eHAL_STATUS_SUCCESS - session is closed.
Other status means SME is failed to open the session.
eHAL_STATUS_INVALID_PARAMETER - session is not opened.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_CloseSession(tHalHandle hHal, tANI_U8 sessionId,
csrRoamSessionCloseCallback callback, void *pContext)
{
eHalStatus status;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CLOSE_SESSION, sessionId, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamCloseSession( pMac, sessionId, FALSE,
callback, pContext );
sme_ReleaseGlobalLock( &pMac->sme );
}
return ( status );
}
/* ---------------------------------------------------------------------------
\fn sme_RoamUpdateAPWPSIE
\brief To update AP's WPS IE. This function should be called after SME AP session is created
This is an asynchronous API.
\param pAPWPSIES - pointer to a caller allocated object of tSirAPWPSIEs
\return eHalStatus SUCCESS
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamUpdateAPWPSIE(tHalHandle hHal, tANI_U8 sessionId, tSirAPWPSIEs *pAPWPSIES)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamUpdateAPWPSIE( pMac, sessionId, pAPWPSIES );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_RoamUpdateAPWPARSNIEs
\brief To update AP's WPA/RSN IEs. This function should be called after SME AP session is created
This is an asynchronous API.
\param pAPSirRSNie - pointer to a caller allocated object of tSirRSNie with WPS/RSN IEs
\return eHalStatus SUCCESS
FAILURE or RESOURCES The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_RoamUpdateAPWPARSNIEs(tHalHandle hHal, tANI_U8 sessionId, tSirRSNie * pAPSirRSNie)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrRoamUpdateWPARSNIEs( pMac, sessionId, pAPSirRSNie);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_ChangeMCCBeaconInterval
\brief To update P2P-GO beaconInterval. This function should be called after
disassociating all the station is done
This is an asynchronous API.
\param
\return eHalStatus SUCCESS
FAILURE or RESOURCES
The API finished and failed.
-------------------------------------------------------------------------------*/
eHalStatus sme_ChangeMCCBeaconInterval(tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
smsLog(pMac, LOG1, FL("Update Beacon PARAMS "));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrSendChngMCCBeaconInterval( pMac, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/*-------------------------------------------------------------------------------*
\fn sme_sendBTAmpEvent
\brief to receive the coex priorty request from BT-AMP PAL
and send the BT_AMP link state to HAL
\param btAmpEvent - btAmpEvent
\return eHalStatus: SUCCESS : BTAmp event successfully sent to HAL
FAILURE: API failed
-------------------------------------------------------------------------------*/
eHalStatus sme_sendBTAmpEvent(tHalHandle hHal, tSmeBtAmpEvent btAmpEvent)
{
vos_msg_t msg;
tpSmeBtAmpEvent ptrSmeBtAmpEvent = NULL;
eHalStatus status = eHAL_STATUS_FAILURE;
ptrSmeBtAmpEvent = vos_mem_malloc(sizeof(tSmeBtAmpEvent));
if (NULL == ptrSmeBtAmpEvent)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to allocate memory for BTAmp event", __func__);
return status;
}
vos_mem_copy(ptrSmeBtAmpEvent, (void*)&btAmpEvent, sizeof(tSmeBtAmpEvent));
msg.type = WDA_SIGNAL_BTAMP_EVENT;
msg.reserved = 0;
msg.bodyptr = ptrSmeBtAmpEvent;
//status = halFW_SendBTAmpEventMesg(pMac, event);
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to post SIR_HAL_SIGNAL_BTAMP_EVENT message to HAL", __func__);
vos_mem_free(ptrSmeBtAmpEvent);
return status;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn smeIssueFastRoamNeighborAPEvent
\brief API to trigger fast BSS roam independent of RSSI triggers
\param hHal - The handle returned by macOpen.
\param bssid - Pointer to the BSSID to roam to.
\param fastRoamTrig - Trigger to Scan or roam
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus smeIssueFastRoamNeighborAPEvent (tHalHandle hHal,
tANI_U8 *bssid,
tSmeFastRoamTrigger fastRoamTrig)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tpCsrNeighborRoamControlInfo pNeighborRoamInfo = &pMac->roam.neighborRoamInfo;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: invoked", __func__);
if (eSME_ROAM_TRIGGER_SCAN == fastRoamTrig)
{
smsLog(pMac, LOG1, FL("CFG Channel list scan... "));
pNeighborRoamInfo->cfgRoamEn = eSME_ROAM_TRIGGER_SCAN;
vos_mem_copy((void *)(&pNeighborRoamInfo->cfgRoambssId),
(void *)bssid, sizeof(tSirMacAddr));
smsLog(pMac, LOG1, "Calling Roam Look Up down Event BSSID "
MAC_ADDRESS_STR, MAC_ADDR_ARRAY(pNeighborRoamInfo->cfgRoambssId));
vosStatus = csrNeighborRoamTransitToCFGChanScan(pMac);
if (VOS_STATUS_SUCCESS != vosStatus)
{
smsLog(pMac, LOGE,
FL("CFG Channel list scan state failed with status %d "),
vosStatus);
}
}
else if (eSME_ROAM_TRIGGER_FAST_ROAM == fastRoamTrig)
{
vos_mem_copy((void *)(&pNeighborRoamInfo->cfgRoambssId),
(void *)bssid, sizeof(tSirMacAddr));
pNeighborRoamInfo->cfgRoamEn = eSME_ROAM_TRIGGER_FAST_ROAM;
smsLog(pMac, LOG1, "Roam to BSSID "MAC_ADDRESS_STR,
MAC_ADDR_ARRAY(pNeighborRoamInfo->cfgRoambssId));
vosStatus = csrNeighborRoamReassocIndCallback(pMac->roam.gVosContext,
0,
pMac,
0);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
smsLog(pMac,
LOGE,
FL(" Call to csrNeighborRoamReassocIndCallback failed, status = %d"),
vosStatus);
}
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return vosStatus;
}
/* ---------------------------------------------------------------------------
\fn sme_SetHostOffload
\brief API to set the host offload feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the offload request.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetHostOffload (tHalHandle hHal, tANI_U8 sessionId,
tpSirHostOffloadReq pRequest)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_FAILURE;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_HOSTOFFLOAD, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
#ifdef WLAN_NS_OFFLOAD
if(SIR_IPV6_NS_OFFLOAD == pRequest->offloadType)
{
status = pmcSetNSOffload( hHal, pRequest, sessionId);
}
else
#endif //WLAN_NS_OFFLOAD
{
status = pmcSetHostOffload (hHal, pRequest, sessionId);
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#ifdef WLAN_FEATURE_GTK_OFFLOAD
/* ---------------------------------------------------------------------------
\fn sme_SetGTKOffload
\brief API to set GTK offload information.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the GTK offload request.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetGTKOffload (tHalHandle hHal, tpSirGtkOffloadParams pRequest,
tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_GTKOFFLOAD, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
status = pmcSetGTKOffload( hHal, pRequest, sessionId );
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetGTKOffload
\brief API to get GTK offload information.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the GTK offload response.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_GetGTKOffload (tHalHandle hHal, GTKOffloadGetInfoCallback callbackRoutine,
void *callbackContext, tANI_U8 sessionId )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_GTKOFFLOAD, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
pmcGetGTKOffload(hHal, callbackRoutine, callbackContext, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif // WLAN_FEATURE_GTK_OFFLOAD
/* ---------------------------------------------------------------------------
\fn sme_SetKeepAlive
\brief API to set the Keep Alive feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the Keep Alive request.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetKeepAlive (tHalHandle hHal, tANI_U8 sessionId,
tpSirKeepAliveReq pRequest)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
status = pmcSetKeepAlive (hHal, pRequest, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#ifdef FEATURE_WLAN_SCAN_PNO
/* ---------------------------------------------------------------------------
\fn sme_SetPreferredNetworkList
\brief API to set the Preferred Network List Offload feature.
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the offload request.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetPreferredNetworkList (tHalHandle hHal, tpSirPNOScanReq pRequest, tANI_U8 sessionId, void (*callbackRoutine) (void *callbackContext, tSirPrefNetworkFoundInd *pPrefNetworkFoundInd), void *callbackContext )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
pmcSetPreferredNetworkList(hHal, pRequest, sessionId, callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
eHalStatus sme_SetRSSIFilter(tHalHandle hHal, v_U8_t rssiThreshold)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
pmcSetRssiFilter(hHal, rssiThreshold);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
#endif // FEATURE_WLAN_SCAN_PNO
eHalStatus sme_SetPowerParams(tHalHandle hHal, tSirSetPowerParamsReq* pwParams, tANI_BOOLEAN forced)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_POWERPARAMS, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
pmcSetPowerParams(hHal, pwParams, forced);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_AbortMacScan
\brief API to cancel MAC scan.
\param hHal - The handle returned by macOpen.
\param sessionId - sessionId on which we need to abort scan.
\param reason - Reason to abort the scan.
\return tSirAbortScanStatus Abort scan status
---------------------------------------------------------------------------*/
tSirAbortScanStatus sme_AbortMacScan(tHalHandle hHal, tANI_U8 sessionId,
eCsrAbortReason reason)
{
tSirAbortScanStatus scanAbortStatus = eSIR_ABORT_SCAN_FAILURE;
eHalStatus status;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_ABORT_MACSCAN, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
scanAbortStatus = csrScanAbortMacScan(pMac, sessionId, reason);
sme_ReleaseGlobalLock( &pMac->sme );
}
return ( scanAbortStatus );
}
/* ----------------------------------------------------------------------------
\fn sme_GetOperationChannel
\brief API to get current channel on which STA is parked
this function gives channel information only of infra station or IBSS station
\param hHal, pointer to memory location and sessionId
\returns eHAL_STATUS_SUCCESS
eHAL_STATUS_FAILURE
-------------------------------------------------------------------------------*/
eHalStatus sme_GetOperationChannel(tHalHandle hHal, tANI_U32 *pChannel, tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tCsrRoamSession *pSession;
if (CSR_IS_SESSION_VALID( pMac, sessionId ))
{
pSession = CSR_GET_SESSION( pMac, sessionId );
if(( pSession->connectedProfile.BSSType == eCSR_BSS_TYPE_INFRASTRUCTURE ) ||
( pSession->connectedProfile.BSSType == eCSR_BSS_TYPE_IBSS ) ||
( pSession->connectedProfile.BSSType == eCSR_BSS_TYPE_INFRA_AP ) ||
( pSession->connectedProfile.BSSType == eCSR_BSS_TYPE_START_IBSS ))
{
*pChannel =pSession->connectedProfile.operationChannel;
return eHAL_STATUS_SUCCESS;
}
}
return eHAL_STATUS_FAILURE;
}// sme_GetOperationChannel ends here
/* ---------------------------------------------------------------------------
\fn sme_RegisterMgtFrame
\brief To register managment frame of specified type and subtype.
\param frameType - type of the frame that needs to be passed to HDD.
\param matchData - data which needs to be matched before passing frame
to HDD.
\param matchDataLen - Length of matched data.
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_RegisterMgmtFrame(tHalHandle hHal, tANI_U8 sessionId,
tANI_U16 frameType, tANI_U8* matchData, tANI_U16 matchLen)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REGISTER_MGMTFR, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
tSirRegisterMgmtFrame *pMsg;
tANI_U16 len;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
if( !pSession->sessionActive )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s Invalid Sessionid", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
len = sizeof(tSirRegisterMgmtFrame) + matchLen;
pMsg = vos_mem_malloc(len);
if ( NULL == pMsg )
status = eHAL_STATUS_FAILURE;
else
{
vos_mem_set(pMsg, len, 0);
pMsg->messageType = eWNI_SME_REGISTER_MGMT_FRAME_REQ;
pMsg->length = len;
pMsg->sessionId = sessionId;
pMsg->registerFrame = VOS_TRUE;
pMsg->frameType = frameType;
pMsg->matchLen = matchLen;
vos_mem_copy(pMsg->matchData, matchData, matchLen);
status = palSendMBMessage(pMac->hHdd, pMsg);
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_DeregisterMgtFrame
\brief To De-register managment frame of specified type and subtype.
\param frameType - type of the frame that needs to be passed to HDD.
\param matchData - data which needs to be matched before passing frame
to HDD.
\param matchDataLen - Length of matched data.
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_DeregisterMgmtFrame(tHalHandle hHal, tANI_U8 sessionId,
tANI_U16 frameType, tANI_U8* matchData, tANI_U16 matchLen)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_DEREGISTER_MGMTFR, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
tSirRegisterMgmtFrame *pMsg;
tANI_U16 len;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
if( !pSession->sessionActive )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s Invalid Sessionid", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
len = sizeof(tSirRegisterMgmtFrame) + matchLen;
pMsg = vos_mem_malloc(len);
if ( NULL == pMsg )
status = eHAL_STATUS_FAILURE;
else
{
vos_mem_set(pMsg, len, 0);
pMsg->messageType = eWNI_SME_REGISTER_MGMT_FRAME_REQ;
pMsg->length = len;
pMsg->registerFrame = VOS_FALSE;
pMsg->frameType = frameType;
pMsg->matchLen = matchLen;
vos_mem_copy(pMsg->matchData, matchData, matchLen);
status = palSendMBMessage(pMac->hHdd, pMsg);
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_RemainOnChannel
\brief API to request remain on channel for 'x' duration. used in p2p in listen state
\param hHal - The handle returned by macOpen.
\param pRequest - channel
\param duration - duration in ms
\param callback - HDD registered callback to process reaminOnChannelRsp
\param context - HDD Callback param
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_RemainOnChannel(tHalHandle hHal, tANI_U8 sessionId,
tANI_U8 channel, tANI_U32 duration,
remainOnChanCallback callback,
void *pContext,
tANI_U8 isP2PProbeReqAllowed)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_REMAIN_ONCHAN, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
status = p2pRemainOnChannel (hHal, sessionId, channel, duration, callback, pContext,
isP2PProbeReqAllowed
#ifdef WLAN_FEATURE_P2P_INTERNAL
, eP2PRemainOnChnReasonUnknown
#endif
);
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ReportProbeReq
\brief API to enable/disable forwarding of probeReq to apps in p2p.
\param hHal - The handle returned by macOpen.
\param falg: to set the Probe request forarding to wpa_supplicant in listen state in p2p
\return eHalStatus
---------------------------------------------------------------------------*/
#ifndef WLAN_FEATURE_CONCURRENT_P2P
eHalStatus sme_ReportProbeReq(tHalHandle hHal, tANI_U8 flag)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
do
{
//acquire the lock for the sme object
status = sme_AcquireGlobalLock(&pMac->sme);
if(HAL_STATUS_SUCCESS(status))
{
/* call set in context */
pMac->p2pContext.probeReqForwarding = flag;
//release the lock for the sme object
sme_ReleaseGlobalLock( &pMac->sme );
}
} while(0);
smsLog(pMac, LOGW, "exiting function %s", __func__);
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_updateP2pIe
\brief API to set the P2p Ie in p2p context
\param hHal - The handle returned by macOpen.
\param p2pIe - Ptr to p2pIe from HDD.
\param p2pIeLength: length of p2pIe
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_updateP2pIe(tHalHandle hHal, void *p2pIe, tANI_U32 p2pIeLength)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
//acquire the lock for the sme object
status = sme_AcquireGlobalLock(&pMac->sme);
if(HAL_STATUS_SUCCESS(status))
{
if(NULL != pMac->p2pContext.probeRspIe){
vos_mem_free(pMac->p2pContext.probeRspIe);
pMac->p2pContext.probeRspIeLength = 0;
}
pMac->p2pContext.probeRspIe = vos_mem_malloc(p2pIeLength);
if (NULL == pMac->p2pContext.probeRspIe)
{
smsLog(pMac, LOGE, "%s: Unable to allocate P2P IE", __func__);
pMac->p2pContext.probeRspIeLength = 0;
status = eHAL_STATUS_FAILURE;
}
else
{
pMac->p2pContext.probeRspIeLength = p2pIeLength;
sirDumpBuf( pMac, SIR_LIM_MODULE_ID, LOG2,
pMac->p2pContext.probeRspIe,
pMac->p2pContext.probeRspIeLength );
vos_mem_copy((tANI_U8 *)pMac->p2pContext.probeRspIe, p2pIe,
p2pIeLength);
}
//release the lock for the sme object
sme_ReleaseGlobalLock( &pMac->sme );
}
smsLog(pMac, LOG2, "exiting function %s", __func__);
return(status);
}
#endif
/* ---------------------------------------------------------------------------
\fn sme_sendAction
\brief API to send action frame from supplicant.
\param hHal - The handle returned by macOpen.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_sendAction(tHalHandle hHal, tANI_U8 sessionId,
const tANI_U8 *pBuf, tANI_U32 len,
tANI_U16 wait, tANI_BOOLEAN noack)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SEND_ACTION, sessionId, 0));
//acquire the lock for the sme object
status = sme_AcquireGlobalLock(&pMac->sme);
if(HAL_STATUS_SUCCESS(status))
{
p2pSendAction(hHal, sessionId, pBuf, len, wait, noack);
//release the lock for the sme object
sme_ReleaseGlobalLock( &pMac->sme );
}
smsLog(pMac, LOGW, "exiting function %s", __func__);
return(status);
}
eHalStatus sme_CancelRemainOnChannel(tHalHandle hHal, tANI_U8 sessionId )
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CANCEL_REMAIN_ONCHAN, sessionId, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
status = p2pCancelRemainOnChannel (hHal, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
//Power Save Related
eHalStatus sme_p2pSetPs(tHalHandle hHal, tP2pPsConfig * data)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
status = p2pSetPs (hHal, data);
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ConfigureRxpFilter
\brief
SME will pass this request to lower mac to set/reset the filter on RXP for
multicast & broadcast traffic.
\param
hHal - The handle returned by macOpen.
filterMask- Currently the API takes a 1 or 0 (set or reset) as filter.
Basically to enable/disable the filter (to filter "all" mcbc traffic) based
on this param. In future we can use this as a mask to set various types of
filters as suggested below:
FILTER_ALL_MULTICAST:
FILTER_ALL_BROADCAST:
FILTER_ALL_MULTICAST_BROADCAST:
\return eHalStatus
--------------------------------------------------------------------------- */
eHalStatus sme_ConfigureRxpFilter( tHalHandle hHal,
tpSirWlanSetRxpFilters wlanRxpFilterParam)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CONFIG_RXPFIL, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
/* serialize the req through MC thread */
vosMessage.bodyptr = wlanRxpFilterParam;
vosMessage.type = WDA_CFG_RXP_FILTER_REQ;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ConfigureSuspendInd
\brief
SME will pass this request to lower mac to Indicate that the wlan needs to
be suspended
\param
hHal - The handle returned by macOpen.
wlanSuspendParam- Depicts the wlan suspend params
\return eHalStatus
--------------------------------------------------------------------------- */
eHalStatus sme_ConfigureSuspendInd( tHalHandle hHal,
tpSirWlanSuspendParam wlanSuspendParam)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CONFIG_SUSPENDIND, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
/* serialize the req through MC thread */
vosMessage.bodyptr = wlanSuspendParam;
vosMessage.type = WDA_WLAN_SUSPEND_IND;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ConfigureResumeReq
\brief
SME will pass this request to lower mac to Indicate that the wlan needs to
be Resumed
\param
hHal - The handle returned by macOpen.
wlanResumeParam- Depicts the wlan resume params
\return eHalStatus
--------------------------------------------------------------------------- */
eHalStatus sme_ConfigureResumeReq( tHalHandle hHal,
tpSirWlanResumeParam wlanResumeParam)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CONFIG_RESUMEREQ, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
/* serialize the req through MC thread */
vosMessage.bodyptr = wlanResumeParam;
vosMessage.type = WDA_WLAN_RESUME_REQ;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_GetInfraSessionId
\brief To get the session ID for infra session, if connected
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\return sessionid, -1 if infra session is not connected
-------------------------------------------------------------------------------*/
tANI_S8 sme_GetInfraSessionId(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tANI_S8 sessionid = -1;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
sessionid = csrGetInfraSessionId( pMac);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (sessionid);
}
/* ---------------------------------------------------------------------------
\fn sme_GetInfraOperationChannel
\brief To get the operating channel for infra session, if connected
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param sessionId - the sessionId returned by sme_OpenSession.
\return operating channel, 0 if infra session is not connected
-------------------------------------------------------------------------------*/
tANI_U8 sme_GetInfraOperationChannel( tHalHandle hHal, tANI_U8 sessionId)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U8 channel = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
channel = csrGetInfraOperationChannel( pMac, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (channel);
}
//This routine will return poerating channel on which other BSS is operating to be used for concurrency mode.
//If other BSS is not up or not connected it will return 0
tANI_U8 sme_GetConcurrentOperationChannel( tHalHandle hHal )
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tANI_U8 channel = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
channel = csrGetConcurrentOperationChannel( pMac );
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO_HIGH, "%s: "
" Other Concurrent Channel = %d", __func__,channel);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (channel);
}
#ifdef FEATURE_WLAN_SCAN_PNO
/******************************************************************************
*
* Name: sme_PreferredNetworkFoundInd
*
* Description:
* Invoke Preferred Network Found Indication
*
* Parameters:
* hHal - HAL handle for device
* pMsg - found network description
*
* Returns: eHalStatus
*
******************************************************************************/
eHalStatus sme_PreferredNetworkFoundInd (tHalHandle hHal, void* pMsg)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status = eHAL_STATUS_SUCCESS;
tSirPrefNetworkFoundInd *pPrefNetworkFoundInd = (tSirPrefNetworkFoundInd *)pMsg;
v_U8_t dumpSsId[SIR_MAC_MAX_SSID_LENGTH + 1];
tANI_U8 ssIdLength = 0;
if (NULL == pMsg)
{
smsLog(pMac, LOGE, "in %s msg ptr is NULL", __func__);
status = eHAL_STATUS_FAILURE;
}
else
{
if (pPrefNetworkFoundInd->ssId.length > 0)
{
ssIdLength = CSR_MIN(SIR_MAC_MAX_SSID_LENGTH,
pPrefNetworkFoundInd->ssId.length);
vos_mem_copy(dumpSsId, pPrefNetworkFoundInd->ssId.ssId, ssIdLength);
dumpSsId[ssIdLength] = 0;
smsLog(pMac, LOG1, FL(" SSID=%s frame length %d"),
dumpSsId, pPrefNetworkFoundInd->frameLength);
/* Flush scan results, So as to avoid indication/updation of
* stale entries, which may not have aged out during APPS collapse
*/
sme_ScanFlushResult(hHal,0);
//Save the frame to scan result
if (pPrefNetworkFoundInd->mesgLen > sizeof(tSirPrefNetworkFoundInd))
{
//we may have a frame
status = csrScanSavePreferredNetworkFound(pMac,
pPrefNetworkFoundInd);
if (!HAL_STATUS_SUCCESS(status))
{
smsLog(pMac, LOGE, FL(" fail to save preferred network"));
}
}
else
{
smsLog(pMac, LOGE, FL(" not enough data length %u needed %zu"),
pPrefNetworkFoundInd->mesgLen, sizeof(tSirPrefNetworkFoundInd));
}
/* Call Preferred Netowrk Found Indication callback routine. */
if (HAL_STATUS_SUCCESS(status) && (pMac->pmc.prefNetwFoundCB != NULL))
{
pMac->pmc.prefNetwFoundCB(
pMac->pmc.preferredNetworkFoundIndCallbackContext,
pPrefNetworkFoundInd);
}
}
else
{
smsLog(pMac, LOGE, "%s: callback failed - SSID is NULL", __func__);
status = eHAL_STATUS_FAILURE;
}
}
return(status);
}
#endif // FEATURE_WLAN_SCAN_PNO
eHalStatus sme_GetCfgValidChannels(tHalHandle hHal, tANI_U8 *aValidChannels, tANI_U32 *len)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrGetCfgValidChannels(pMac, aValidChannels, len);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetTxPerTracking
\brief Set Tx PER tracking configuration parameters
\param hHal - The handle returned by macOpen.
\param pTxPerTrackingConf - Tx PER configuration parameters
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_SetTxPerTracking(tHalHandle hHal,
void (*pCallbackfn) (void *pCallbackContext),
void *pCallbackContext,
tpSirTxPerTrackingParam pTxPerTrackingParam)
{
vos_msg_t msg;
tpSirTxPerTrackingParam pTxPerTrackingParamReq = NULL;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if ( eHAL_STATUS_SUCCESS == sme_AcquireGlobalLock( &pMac->sme ) )
{
pMac->sme.pTxPerHitCallback = pCallbackfn;
pMac->sme.pTxPerHitCbContext = pCallbackContext;
sme_ReleaseGlobalLock( &pMac->sme );
}
// free this memory in failure case or WDA request callback function
pTxPerTrackingParamReq = vos_mem_malloc(sizeof(tSirTxPerTrackingParam));
if (NULL == pTxPerTrackingParamReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for tSirTxPerTrackingParam", __func__);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pTxPerTrackingParamReq, (void*)pTxPerTrackingParam,
sizeof(tSirTxPerTrackingParam));
msg.type = WDA_SET_TX_PER_TRACKING_REQ;
msg.reserved = 0;
msg.bodyptr = pTxPerTrackingParamReq;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_TX_PER_TRACKING_REQ message to WDA", __func__);
vos_mem_free(pTxPerTrackingParamReq);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_HandleChangeCountryCode
\brief Change Country code, Reg Domain and channel list
\details Country Code Priority
0 = 11D > Configured Country > NV
1 = Configured Country > 11D > NV
If Supplicant country code is priority than 11d is disabled.
If 11D is enabled, we update the country code after every scan.
Hence when Supplicant country code is priority, we don't need 11D info.
Country code from Supplicant is set as current courtry code.
User can send reset command XX (instead of country code) to reset the
country code to default values which is read from NV.
In case of reset, 11D is enabled and default NV code is Set as current country code
If 11D is priority,
Than Supplicant country code code is set to default code. But 11D code is set as current country code
\param pMac - The handle returned by macOpen.
\param pMsgBuf - MSG Buffer
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HandleChangeCountryCode(tpAniSirGlobal pMac, void *pMsgBuf)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tAniChangeCountryCodeReq *pMsg;
v_REGDOMAIN_t domainIdIoctl;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
static uNvTables nvTables;
pMsg = (tAniChangeCountryCodeReq *)pMsgBuf;
/* if the reset Supplicant country code command is triggered, enable 11D, reset the NV country code and return */
if( VOS_TRUE == vos_mem_compare(pMsg->countryCode, SME_INVALID_COUNTRY_CODE, 2) )
{
pMac->roam.configParam.Is11dSupportEnabled = pMac->roam.configParam.Is11dSupportEnabledOriginal;
vosStatus = vos_nv_readDefaultCountryTable( &nvTables );
/* read the country code from NV and use it */
if ( VOS_IS_STATUS_SUCCESS(vosStatus) )
{
vos_mem_copy(pMsg->countryCode,
nvTables.defaultCountryTable.countryCode,
WNI_CFG_COUNTRY_CODE_LEN);
}
else
{
status = eHAL_STATUS_FAILURE;
return status;
}
/* Update the 11d country to default country from NV bin so that when
* callback is received for this default country, driver will not
* disable the 11d taking it as valid country by user.
*/
smsLog(pMac, LOG1,
FL("Set default country code (%c%c) from NV as invalid country received"),
pMsg->countryCode[0],pMsg->countryCode[1]);
vos_mem_copy(pMac->scan.countryCode11d, pMsg->countryCode,
WNI_CFG_COUNTRY_CODE_LEN);
}
else
{
/* if Supplicant country code has priority, disable 11d */
if(pMac->roam.configParam.fSupplicantCountryCodeHasPriority &&
pMsg->countryFromUserSpace)
{
pMac->roam.configParam.Is11dSupportEnabled = eANI_BOOLEAN_FALSE;
}
}
/* WEXT set country code means
* 11D should be supported?
* 11D Channel should be enforced?
* 11D Country code should be matched?
* 11D Reg Domian should be matched?
* Country string changed */
if(pMac->roam.configParam.Is11dSupportEnabled &&
pMac->roam.configParam.fEnforce11dChannels &&
pMac->roam.configParam.fEnforceCountryCodeMatch &&
pMac->roam.configParam.fEnforceDefaultDomain &&
!csrSave11dCountryString(pMac, pMsg->countryCode, eANI_BOOLEAN_TRUE))
{
/* All 11D related options are already enabled
* Country string is not changed
* Do not need do anything for country code change request */
return eHAL_STATUS_SUCCESS;
}
/* Set Current Country code and Current Regulatory domain */
status = csrSetCountryCode(pMac, pMsg->countryCode, NULL);
if(eHAL_STATUS_SUCCESS != status)
{
/* Supplicant country code failed. So give 11D priority */
pMac->roam.configParam.Is11dSupportEnabled = pMac->roam.configParam.Is11dSupportEnabledOriginal;
smsLog(pMac, LOGE, "Set Country Code Fail %d", status);
return status;
}
/* overwrite the defualt country code */
vos_mem_copy(pMac->scan.countryCodeDefault,
pMac->scan.countryCodeCurrent,
WNI_CFG_COUNTRY_CODE_LEN);
/* Get Domain ID from country code */
status = csrGetRegulatoryDomainForCountry(pMac,
pMac->scan.countryCodeCurrent,
(v_REGDOMAIN_t *) &domainIdIoctl,
COUNTRY_QUERY);
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to get regId %d"), domainIdIoctl );
return status;
}
else if (REGDOMAIN_WORLD == domainIdIoctl)
{
/* Supplicant country code is invalid, so we are on world mode now. So
give 11D chance to update */
pMac->roam.configParam.Is11dSupportEnabled = pMac->roam.configParam.Is11dSupportEnabledOriginal;
smsLog(pMac, LOG1, FL("Country Code unrecognized by driver"));
}
status = WDA_SetRegDomain(pMac, domainIdIoctl, pMsg->sendRegHint);
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to set regId %d"), domainIdIoctl );
return status;
}
else
{
//if 11d has priority, clear currentCountryBssid & countryCode11d to get
//set again if we find AP with 11d info during scan
if (!pMac->roam.configParam.fSupplicantCountryCodeHasPriority)
{
smsLog( pMac, LOGW, FL("Clearing currentCountryBssid, countryCode11d"));
vos_mem_zero(&pMac->scan.currentCountryBssid, sizeof(tCsrBssid));
vos_mem_zero( pMac->scan.countryCode11d, sizeof( pMac->scan.countryCode11d ) );
}
}
#ifndef CONFIG_ENABLE_LINUX_REG
/* set to default domain ID */
pMac->scan.domainIdDefault = pMac->scan.domainIdCurrent;
/* get the channels based on new cc */
status = csrInitGetChannels( pMac );
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to get Channels "));
return status;
}
/* reset info based on new cc, and we are done */
csrResetCountryInformation(pMac, eANI_BOOLEAN_TRUE, eANI_BOOLEAN_TRUE);
/* Country code Changed, Purge Only scan result
* which does not have channel number belong to 11d
* channel list
*/
csrScanFilterResults(pMac);
#endif
if( pMsg->changeCCCallback )
{
((tSmeChangeCountryCallback)(pMsg->changeCCCallback))((void *)pMsg->pDevContext);
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_HandleChangeCountryCodeByUser
\brief Change Country code, Reg Domain and channel list
If Supplicant country code is priority than 11d is disabled.
If 11D is enabled, we update the country code after every scan.
Hence when Supplicant country code is priority, we don't need 11D info.
Country code from Supplicant is set as current country code.
\param pMac - The handle returned by macOpen.
\param pMsg - Carrying new CC & domain set in kernel by user
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HandleChangeCountryCodeByUser(tpAniSirGlobal pMac,
tAniGenericChangeCountryCodeReq *pMsg)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
v_REGDOMAIN_t reg_domain_id;
v_BOOL_t is11dCountry = VOS_FALSE;
smsLog(pMac, LOG1, FL(" called"));
reg_domain_id = (v_REGDOMAIN_t)pMsg->domain_index;
if (memcmp(pMsg->countryCode, pMac->scan.countryCode11d,
VOS_COUNTRY_CODE_LEN) == 0)
{
is11dCountry = VOS_TRUE;
}
/* Set the country code given by userspace when 11dOriginal is FALSE
* when 11doriginal is True,is11dCountry =0 and
* fSupplicantCountryCodeHasPriority = 0, then revert the country code,
* and return failure
*/
if (pMac->roam.configParam.Is11dSupportEnabledOriginal == true)
{
if ((!is11dCountry) && (!pMac->roam.configParam.fSupplicantCountryCodeHasPriority)&&
(!pMac->roam.configParam.fEnforceCountryCode) )
{
smsLog( pMac, LOGW, FL(" incorrect country being set, nullify this request"));
status = csrGetRegulatoryDomainForCountry(pMac,
pMac->scan.countryCode11d,
(v_REGDOMAIN_t *) ®_domain_id,
COUNTRY_IE);
return eHAL_STATUS_FAILURE;
}
}
pMac->roam.configParam.fEnforceCountryCode = eANI_BOOLEAN_FALSE;
/* if Supplicant country code has priority, disable 11d */
if (!is11dCountry && pMac->roam.configParam.fSupplicantCountryCodeHasPriority)
{
pMac->roam.configParam.Is11dSupportEnabled = eANI_BOOLEAN_FALSE;
smsLog( pMac, LOG1, FL(" 11d is being disabled"));
}
vos_mem_copy(pMac->scan.countryCodeCurrent, pMsg->countryCode,
WNI_CFG_COUNTRY_CODE_LEN);
status = WDA_SetRegDomain(pMac, reg_domain_id, eSIR_TRUE);
if (VOS_FALSE == is11dCountry )
{
/* overwrite the defualt country code */
vos_mem_copy(pMac->scan.countryCodeDefault,
pMac->scan.countryCodeCurrent, WNI_CFG_COUNTRY_CODE_LEN);
/* set to default domain ID */
pMac->scan.domainIdDefault = pMac->scan.domainIdCurrent;
}
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to set regId %d"), reg_domain_id );
return status;
}
else
{
//if 11d has priority, clear currentCountryBssid & countryCode11d to get
//set again if we find AP with 11d info during scan
if((!pMac->roam.configParam.fSupplicantCountryCodeHasPriority) &&
(VOS_FALSE == is11dCountry ))
{
smsLog( pMac, LOGW, FL("Clearing currentCountryBssid, countryCode11d"));
vos_mem_zero(&pMac->scan.currentCountryBssid, sizeof(tCsrBssid));
vos_mem_zero( pMac->scan.countryCode11d, sizeof( pMac->scan.countryCode11d ) );
}
}
/* get the channels based on new cc */
status = csrInitGetChannels(pMac);
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to get Channels "));
return status;
}
/* reset info based on new cc, and we are done */
csrResetCountryInformation(pMac, eANI_BOOLEAN_TRUE, eANI_BOOLEAN_TRUE);
if (VOS_TRUE == is11dCountry)
{
pMac->scan.f11dInfoApplied = eANI_BOOLEAN_TRUE;
pMac->scan.f11dInfoReset = eANI_BOOLEAN_FALSE;
}
/* Country code Changed, Purge Only scan result
* which does not have channel number belong to 11d
* channel list
*/
csrScanFilterResults(pMac);
// Do active scans after the country is set by User hints or Country IE
pMac->scan.curScanType = eSIR_ACTIVE_SCAN;
sme_DisconnectConnectedSessions(pMac);
smsLog(pMac, LOG1, FL(" returned"));
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_HandleChangeCountryCodeByCore
\brief Update Country code in the driver if set by kernel as world
If 11D is enabled, we update the country code after every scan & notify kernel.
This is to make sure kernel & driver are in sync in case of CC found in
driver but not in kernel database
\param pMac - The handle returned by macOpen.
\param pMsg - Carrying new CC set in kernel
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HandleChangeCountryCodeByCore(tpAniSirGlobal pMac, tAniGenericChangeCountryCodeReq *pMsg)
{
eHalStatus status;
smsLog(pMac, LOG1, FL(" called"));
//this is to make sure kernel & driver are in sync in case of CC found in
//driver but not in kernel database
if (('0' == pMsg->countryCode[0]) && ('0' == pMsg->countryCode[1]))
{
smsLog( pMac, LOGW, FL("Setting countryCode11d & countryCodeCurrent to world CC"));
vos_mem_copy(pMac->scan.countryCode11d, pMsg->countryCode,
WNI_CFG_COUNTRY_CODE_LEN);
vos_mem_copy(pMac->scan.countryCodeCurrent, pMsg->countryCode,
WNI_CFG_COUNTRY_CODE_LEN);
}
status = WDA_SetRegDomain(pMac, REGDOMAIN_WORLD, eSIR_TRUE);
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to set regId") );
return status;
}
else
{
status = csrInitGetChannels(pMac);
if ( status != eHAL_STATUS_SUCCESS )
{
smsLog( pMac, LOGE, FL(" fail to get Channels "));
}
else
{
csrInitChannelList(pMac);
}
}
/* Country code Changed, Purge Only scan result
* which does not have channel number belong to 11d
* channel list
*/
csrScanFilterResults(pMac);
smsLog(pMac, LOG1, FL(" returned"));
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_DisconnectConnectedSessions
\brief Disconnect STA and P2P client session if channel is not supported
If new country code does not support the channel on which STA/P2P client
is connetced, it sends the disconnect to the AP/P2P GO
\param pMac - The handle returned by macOpen
\return eHalStatus
-------------------------------------------------------------------------------*/
void sme_DisconnectConnectedSessions(tpAniSirGlobal pMac)
{
v_U8_t i, sessionId, isChanFound = false;
tANI_U8 currChannel;
for (sessionId=0; sessionId < CSR_ROAM_SESSION_MAX; sessionId++)
{
if (csrIsSessionClientAndConnected(pMac, sessionId))
{
isChanFound = false;
//Session is connected.Check the channel
currChannel = csrGetInfraOperationChannel(pMac, sessionId);
smsLog(pMac, LOGW, "Current Operating channel : %d, session :%d",
currChannel, sessionId);
for (i=0; i < pMac->scan.base20MHzChannels.numChannels; i++)
{
if (pMac->scan.base20MHzChannels.channelList[i] == currChannel)
{
isChanFound = true;
break;
}
}
if (!isChanFound)
{
for (i=0; i < pMac->scan.base40MHzChannels.numChannels; i++)
{
if (pMac->scan.base40MHzChannels.channelList[i] == currChannel)
{
isChanFound = true;
break;
}
}
}
if (!isChanFound)
{
smsLog(pMac, LOGW, "%s : Disconnect Session :%d", __func__, sessionId);
csrRoamDisconnect(pMac, sessionId, eCSR_DISCONNECT_REASON_UNSPECIFIED);
}
}
}
}
/* ---------------------------------------------------------------------------
\fn sme_HandleGenericChangeCountryCode
\brief Change Country code, Reg Domain and channel list
If Supplicant country code is priority than 11d is disabled.
If 11D is enabled, we update the country code after every scan.
Hence when Supplicant country code is priority, we don't need 11D info.
Country code from kernel is set as current country code.
\param pMac - The handle returned by macOpen.
\param pMsgBuf - message buffer
\return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HandleGenericChangeCountryCode(tpAniSirGlobal pMac, void *pMsgBuf)
{
tAniGenericChangeCountryCodeReq *pMsg;
v_REGDOMAIN_t reg_domain_id;
smsLog(pMac, LOG1, FL(" called"));
pMsg = (tAniGenericChangeCountryCodeReq *)pMsgBuf;
reg_domain_id = (v_REGDOMAIN_t)pMsg->domain_index;
if (REGDOMAIN_COUNT == reg_domain_id)
{
sme_HandleChangeCountryCodeByCore(pMac, pMsg);
}
else
{
sme_HandleChangeCountryCodeByUser(pMac, pMsg);
}
smsLog(pMac, LOG1, FL(" returned"));
return eHAL_STATUS_SUCCESS;
}
#ifdef WLAN_FEATURE_PACKET_FILTERING
eHalStatus sme_8023MulticastList (tHalHandle hHal, tANI_U8 sessionId, tpSirRcvFltMcAddrList pMulticastAddrs)
{
tpSirRcvFltMcAddrList pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = NULL;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: "
"ulMulticastAddrCnt=%d, multicastAddr[0]=%p", __func__,
pMulticastAddrs->ulMulticastAddrCnt,
pMulticastAddrs->multicastAddr[0]);
/*
*Find the connected Infra / P2P_client connected session
*/
if (CSR_IS_SESSION_VALID(pMac, sessionId) &&
csrIsConnStateInfra(pMac, sessionId))
{
pSession = CSR_GET_SESSION( pMac, sessionId );
}
if(pSession == NULL )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Unable to find "
"the right session", __func__);
return eHAL_STATUS_FAILURE;
}
pRequestBuf = vos_mem_malloc(sizeof(tSirRcvFltMcAddrList));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to "
"allocate memory for 8023 Multicast List request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
if( !csrIsConnStateConnectedInfra (pMac, sessionId ))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Ignoring the "
"indication as we are not connected", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pRequestBuf, pMulticastAddrs, sizeof(tSirRcvFltMcAddrList));
vos_mem_copy(pRequestBuf->selfMacAddr, pSession->selfMacAddr,
sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf->bssId, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
msg.type = WDA_8023_MULTICAST_LIST_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to "
"post WDA_8023_MULTICAST_LIST message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
eHalStatus sme_ReceiveFilterSetFilter(tHalHandle hHal, tpSirRcvPktFilterCfgType pRcvPktFilterCfg,
tANI_U8 sessionId)
{
tpSirRcvPktFilterCfgType pRequestBuf;
v_SINT_t allocSize;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
v_U8_t idx=0;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: filterType=%d, "
"filterId = %d", __func__,
pRcvPktFilterCfg->filterType, pRcvPktFilterCfg->filterId);
allocSize = sizeof(tSirRcvPktFilterCfgType);
pRequestBuf = vos_mem_malloc(allocSize);
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to "
"allocate memory for Receive Filter Set Filter request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
if( NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Session Not found ", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pRcvPktFilterCfg->selfMacAddr, pSession->selfMacAddr,
sizeof(tSirMacAddr));
vos_mem_copy(pRcvPktFilterCfg->bssId, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf, pRcvPktFilterCfg, allocSize);
msg.type = WDA_RECEIVE_FILTER_SET_FILTER_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "Pkt Flt Req : "
"FT %d FID %d ",
pRequestBuf->filterType, pRequestBuf->filterId);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "Pkt Flt Req : "
"params %d CT %d",
pRequestBuf->numFieldParams, pRequestBuf->coalesceTime);
for (idx=0; idx<pRequestBuf->numFieldParams; idx++)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"Proto %d Comp Flag %d ",
pRequestBuf->paramsData[idx].protocolLayer,
pRequestBuf->paramsData[idx].cmpFlag);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"Data Offset %d Data Len %d",
pRequestBuf->paramsData[idx].dataOffset,
pRequestBuf->paramsData[idx].dataLength);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"CData: %d:%d:%d:%d:%d:%d",
pRequestBuf->paramsData[idx].compareData[0],
pRequestBuf->paramsData[idx].compareData[1],
pRequestBuf->paramsData[idx].compareData[2],
pRequestBuf->paramsData[idx].compareData[3],
pRequestBuf->paramsData[idx].compareData[4],
pRequestBuf->paramsData[idx].compareData[5]);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"MData: %d:%d:%d:%d:%d:%d",
pRequestBuf->paramsData[idx].dataMask[0],
pRequestBuf->paramsData[idx].dataMask[1],
pRequestBuf->paramsData[idx].dataMask[2],
pRequestBuf->paramsData[idx].dataMask[3],
pRequestBuf->paramsData[idx].dataMask[4],
pRequestBuf->paramsData[idx].dataMask[5]);
}
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post "
"WDA_RECEIVE_FILTER_SET_FILTER message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
eHalStatus sme_GetFilterMatchCount(tHalHandle hHal,
FilterMatchCountCallback callbackRoutine,
void *callbackContext,
tANI_U8 sessionId)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "+%s", __func__);
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme)))
{
pmcGetFilterMatchCount(hHal, callbackRoutine, callbackContext, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "-%s", __func__);
return (status);
}
eHalStatus sme_ReceiveFilterClearFilter(tHalHandle hHal, tpSirRcvFltPktClearParam pRcvFltPktClearParam,
tANI_U8 sessionId)
{
tpSirRcvFltPktClearParam pRequestBuf;
vos_msg_t msg;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO, "%s: filterId = %d", __func__,
pRcvFltPktClearParam->filterId);
pRequestBuf = vos_mem_malloc(sizeof(tSirRcvFltPktClearParam));
if (NULL == pRequestBuf)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for Receive Filter "
"Clear Filter request", __func__);
return eHAL_STATUS_FAILED_ALLOC;
}
if( NULL == pSession )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Session Not find ", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pRcvFltPktClearParam->selfMacAddr, pSession->selfMacAddr,
sizeof(tSirMacAddr));
vos_mem_copy(pRcvFltPktClearParam->bssId, pSession->connectedProfile.bssid,
sizeof(tSirMacAddr));
vos_mem_copy(pRequestBuf, pRcvFltPktClearParam, sizeof(tSirRcvFltPktClearParam));
msg.type = WDA_RECEIVE_FILTER_CLEAR_FILTER_REQ;
msg.reserved = 0;
msg.bodyptr = pRequestBuf;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post "
"WDA_RECEIVE_FILTER_CLEAR_FILTER message to WDA", __func__);
vos_mem_free(pRequestBuf);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
#endif // WLAN_FEATURE_PACKET_FILTERING
/* ---------------------------------------------------------------------------
\fn sme_PreChannelSwitchIndFullPowerCB
\brief call back function for the PMC full power request because of pre
channel switch.
\param callbackContext
\param status
---------------------------------------------------------------------------*/
void sme_PreChannelSwitchIndFullPowerCB(void *callbackContext,
eHalStatus status)
{
tpAniSirGlobal pMac = (tpAniSirGlobal)callbackContext;
tSirMbMsg *pMsg;
tANI_U16 msgLen;
msgLen = (tANI_U16)(sizeof( tSirMbMsg ));
pMsg = vos_mem_malloc(msgLen);
if ( NULL != pMsg )
{
vos_mem_set(pMsg, msgLen, 0);
pMsg->type = pal_cpu_to_be16((tANI_U16)eWNI_SME_PRE_CHANNEL_SWITCH_FULL_POWER);
pMsg->msgLen = pal_cpu_to_be16(msgLen);
status = palSendMBMessage(pMac->hHdd, pMsg);
}
return;
}
/* ---------------------------------------------------------------------------
\fn sme_HandlePreChannelSwitchInd
\brief Processes the indcation from PE for pre-channel switch.
\param hHal
\- The handle returned by macOpen. return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_HandlePreChannelSwitchInd(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRequestFullPower(hHal, sme_PreChannelSwitchIndFullPowerCB,
pMac, eSME_FULL_PWR_NEEDED_BY_CHANNEL_SWITCH);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_HandlePostChannelSwitchInd
\brief Processes the indcation from PE for post-channel switch.
\param hHal
\- The handle returned by macOpen. return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_HandlePostChannelSwitchInd(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = pmcRequestBmps(hHal, NULL, NULL);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (status);
}
/* ---------------------------------------------------------------------------
\fn sme_IsChannelValid
\brief To check if the channel is valid for currently established domain
This is a synchronous API.
\param hHal - The handle returned by macOpen.
\param channel - channel to verify
\return TRUE/FALSE, TRUE if channel is valid
-------------------------------------------------------------------------------*/
tANI_BOOLEAN sme_IsChannelValid(tHalHandle hHal, tANI_U8 channel)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tANI_BOOLEAN valid = FALSE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
valid = csrRoamIsChannelValid( pMac, channel);
sme_ReleaseGlobalLock( &pMac->sme );
}
return (valid);
}
/* ---------------------------------------------------------------------------
\fn sme_SetFreqBand
\brief Used to set frequency band.
\param hHal
\eBand band value to be configured
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_SetFreqBand(tHalHandle hHal, eCsrBand eBand)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrSetBand(hHal, eBand);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_GetFreqBand
\brief Used to get the current band settings.
\param hHal
\pBand pointer to hold band value
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_GetFreqBand(tHalHandle hHal, eCsrBand *pBand)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
*pBand = csrGetCurrentBand( hHal );
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#ifdef WLAN_WAKEUP_EVENTS
/******************************************************************************
\fn sme_WakeReasonIndCallback
\brief
a callback function called when SME received eWNI_SME_WAKE_REASON_IND event from WDA
\param hHal - HAL handle for device
\param pMsg - Message body passed from WDA; includes Wake Reason Indication parameter
\return eHalStatus
******************************************************************************/
eHalStatus sme_WakeReasonIndCallback (tHalHandle hHal, void* pMsg)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status = eHAL_STATUS_SUCCESS;
tSirWakeReasonInd *pWakeReasonInd = (tSirWakeReasonInd *)pMsg;
if (NULL == pMsg)
{
smsLog(pMac, LOGE, "in %s msg ptr is NULL", __func__);
status = eHAL_STATUS_FAILURE;
}
else
{
smsLog(pMac, LOG2, "SME: entering sme_WakeReasonIndCallback");
/* Call Wake Reason Indication callback routine. */
if (pMac->pmc.wakeReasonIndCB != NULL)
pMac->pmc.wakeReasonIndCB(pMac->pmc.wakeReasonIndCBContext, pWakeReasonInd);
smsLog(pMac, LOG1, "Wake Reason Indication in %s(), reason=%d", __func__, pWakeReasonInd->ulReason);
}
return(status);
}
#endif // WLAN_WAKEUP_EVENTS
/* ---------------------------------------------------------------------------
\fn sme_SetMaxTxPower
\brief Set the Maximum Transmit Power dynamically. Note: this setting will
not persist over reboots.
\param hHal
\param pBssid BSSID to set the power cap for
\param pBssid pSelfMacAddress self MAC Address
\param pBssid power to set in dB
\- return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_SetMaxTxPower(tHalHandle hHal, tSirMacAddr pBssid,
tSirMacAddr pSelfMacAddress, v_S7_t dB)
{
vos_msg_t msg;
tpMaxTxPowerParams pMaxTxParams = NULL;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_MAXTXPOW, NO_SESSION, 0));
pMaxTxParams = vos_mem_malloc(sizeof(tMaxTxPowerParams));
if (NULL == pMaxTxParams)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to allocate memory for pMaxTxParams", __func__);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pMaxTxParams->bssId, pBssid, SIR_MAC_ADDR_LENGTH);
vos_mem_copy(pMaxTxParams->selfStaMacAddr, pSelfMacAddress,
SIR_MAC_ADDR_LENGTH);
pMaxTxParams->power = dB;
msg.type = WDA_SET_MAX_TX_POWER_REQ;
msg.reserved = 0;
msg.bodyptr = pMaxTxParams;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: Not able to post WDA_SET_MAX_TX_POWER_REQ message to WDA", __func__);
vos_mem_free(pMaxTxParams);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_SetMaxTxPowerPerBand
\brief Set the Maximum Transmit Power specific to band dynamically.
Note: this setting will not persist over reboots.
\param band
\param power to set in dB
\- return eHalStatus
----------------------------------------------------------------------------*/
eHalStatus sme_SetMaxTxPowerPerBand(eCsrBand band, v_S7_t dB)
{
vos_msg_t msg;
tpMaxTxPowerPerBandParams pMaxTxPowerPerBandParams = NULL;
pMaxTxPowerPerBandParams = vos_mem_malloc(sizeof(tMaxTxPowerPerBandParams));
if (NULL == pMaxTxPowerPerBandParams)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s:Not able to allocate memory for pMaxTxPowerPerBandParams",
__func__);
return eHAL_STATUS_FAILURE;
}
pMaxTxPowerPerBandParams->power = dB;
pMaxTxPowerPerBandParams->bandInfo = band;
msg.type = WDA_SET_MAX_TX_POWER_PER_BAND_REQ;
msg.reserved = 0;
msg.bodyptr = pMaxTxPowerPerBandParams;
if (VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s:Not able to post WDA_SET_MAX_TX_POWER_PER_BAND_REQ",
__func__);
vos_mem_free(pMaxTxPowerPerBandParams);
return eHAL_STATUS_FAILURE;
}
return eHAL_STATUS_SUCCESS;
}
/* ---------------------------------------------------------------------------
\fn sme_SetTxPower
\brief Set Transmit Power dynamically. Note: this setting will
not persist over reboots.
\param hHal
\param sessionId Target Session ID
\param mW power to set in mW
\- return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_SetTxPower(tHalHandle hHal, v_U8_t sessionId, v_U8_t mW)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_TXPOW, NO_SESSION, 0));
smsLog(pMac, LOG1, FL("set tx power %dmW"), mW);
status = sme_AcquireGlobalLock(&pMac->sme);
if (HAL_STATUS_SUCCESS(status))
{
status = csrSetTxPower(pMac, sessionId, mW);
sme_ReleaseGlobalLock(&pMac->sme);
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_HideSSID
\brief hide/show SSID dynamically. Note: this setting will
not persist over reboots.
\param hHal
\param sessionId
\param ssidHidden 0 - Broadcast SSID, 1 - Disable broadcast SSID
\- return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_HideSSID(tHalHandle hHal, v_U8_t sessionId, v_U8_t ssidHidden)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tANI_U16 len;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
tpSirUpdateParams pMsg;
tCsrRoamSession *pSession = CSR_GET_SESSION( pMac, sessionId );
if(!pSession)
{
smsLog(pMac, LOGE, FL(" session %d not found "), sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
if( !pSession->sessionActive )
VOS_ASSERT(0);
/* Create the message and send to lim */
len = sizeof(tSirUpdateParams);
pMsg = vos_mem_malloc(len);
if ( NULL == pMsg )
status = eHAL_STATUS_FAILURE;
else
{
vos_mem_set(pMsg, sizeof(tSirUpdateParams), 0);
pMsg->messageType = eWNI_SME_HIDE_SSID_REQ;
pMsg->length = len;
/* Data starts from here */
pMsg->sessionId = sessionId;
pMsg->ssidHidden = ssidHidden;
status = palSendMBMessage(pMac->hHdd, pMsg);
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_SetTmLevel
\brief Set Thermal Mitigation Level to RIVA
\param hHal - The handle returned by macOpen.
\param newTMLevel - new Thermal Mitigation Level
\param tmMode - Thermal Mitigation handle mode, default 0
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetTmLevel(tHalHandle hHal, v_U16_t newTMLevel, v_U16_t tmMode)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tAniSetTmLevelReq *setTmLevelReq = NULL;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_TMLEVEL, NO_SESSION, 0));
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
setTmLevelReq = (tAniSetTmLevelReq *)vos_mem_malloc(sizeof(tAniSetTmLevelReq));
if (NULL == setTmLevelReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for sme_SetTmLevel", __func__);
sme_ReleaseGlobalLock( &pMac->sme );
return eHAL_STATUS_FAILURE;
}
setTmLevelReq->tmMode = tmMode;
setTmLevelReq->newTmLevel = newTMLevel;
/* serialize the req through MC thread */
vosMessage.bodyptr = setTmLevelReq;
vosMessage.type = WDA_SET_TM_LEVEL_REQ;
vosStatus = vos_mq_post_message( VOS_MQ_ID_WDA, &vosMessage );
if ( !VOS_IS_STATUS_SUCCESS(vosStatus) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Post Set TM Level MSG fail", __func__);
vos_mem_free(setTmLevelReq);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return(status);
}
/*---------------------------------------------------------------------------
\brief sme_featureCapsExchange() - SME interface to exchange capabilities between
Host and FW.
\param hHal - HAL handle for device
\return NONE
---------------------------------------------------------------------------*/
void sme_featureCapsExchange( tHalHandle hHal)
{
v_CONTEXT_t vosContext = vos_get_global_context(VOS_MODULE_ID_SME, NULL);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_CAPS_EXCH, NO_SESSION, 0));
WDA_featureCapsExchange(vosContext);
}
/*---------------------------------------------------------------------------
\brief sme_disableFeatureCapablity() - SME interface to disable Active mode offload capablity
in Host.
\param hHal - HAL handle for device
\return NONE
---------------------------------------------------------------------------*/
void sme_disableFeatureCapablity(tANI_U8 feature_index)
{
WDA_disableCapablityFeature(feature_index);
}
/* ---------------------------------------------------------------------------
\fn sme_GetDefaultCountryCode
\brief Get the default country code from NV
\param hHal
\param pCountry
\- return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_GetDefaultCountryCodeFrmNv(tHalHandle hHal, tANI_U8 *pCountry)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_DEFCCNV, NO_SESSION, 0));
return csrGetDefaultCountryCodeFrmNv(pMac, pCountry);
}
/* ---------------------------------------------------------------------------
\fn sme_GetCurrentCountryCode
\brief Get the current country code
\param hHal
\param pCountry
\- return eHalStatus
-------------------------------------------------------------------------------*/
eHalStatus sme_GetCurrentCountryCode(tHalHandle hHal, tANI_U8 *pCountry)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_CURCC, NO_SESSION, 0));
return csrGetCurrentCountryCode(pMac, pCountry);
}
/* ---------------------------------------------------------------------------
\fn sme_transportDebug
\brief Dynamically monitoring Transport channels
Private IOCTL will querry transport channel status if driver loaded
\param hHal Upper MAC context
\param displaySnapshot Display transport channel snapshot option
\param toggleStallDetect Enable stall detect feature
This feature will take effect to data performance
Not integrate till fully verification
\- return NONE
-------------------------------------------------------------------------*/
void sme_transportDebug(tHalHandle hHal, v_BOOL_t displaySnapshot, v_BOOL_t toggleStallDetect)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: invalid context", __func__);
return;
}
WDA_TransportChannelDebug(pMac, displaySnapshot, toggleStallDetect);
}
/* ---------------------------------------------------------------------------
\fn sme_ResetPowerValuesFor5G
\brief Reset the power values for 5G band with NV power values.
\param hHal - HAL handle for device
\- return NONE
-------------------------------------------------------------------------*/
void sme_ResetPowerValuesFor5G (tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT (hHal);
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_RESET_PW5G, NO_SESSION, 0));
csrSaveChannelPowerForBand(pMac, eANI_BOOLEAN_TRUE);
csrApplyPower2Current(pMac); // Store the channel+power info in the global place: Cfg
}
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
/* ---------------------------------------------------------------------------
\fn sme_UpdateRoamPrefer5GHz
\brief enable/disable Roam prefer 5G runtime option
This function is called through dynamic setConfig callback function
to configure the Roam prefer 5G runtime option
\param hHal - HAL handle for device
\param nRoamPrefer5GHz Enable/Disable Roam prefer 5G runtime option
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateRoamPrefer5GHz(tHalHandle hHal, v_BOOL_t nRoamPrefer5GHz)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_UPDATE_RP5G, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: gRoamPrefer5GHz is changed from %d to %d", __func__,
pMac->roam.configParam.nRoamPrefer5GHz,
nRoamPrefer5GHz);
pMac->roam.configParam.nRoamPrefer5GHz = nRoamPrefer5GHz;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_setRoamIntraBand
\brief enable/disable Intra band roaming
This function is called through dynamic setConfig callback function
to configure the intra band roaming
\param hHal - HAL handle for device
\param nRoamIntraBand Enable/Disable Intra band roaming
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_setRoamIntraBand(tHalHandle hHal, const v_BOOL_t nRoamIntraBand)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_ROAMIBAND, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: gRoamIntraBand is changed from %d to %d", __func__,
pMac->roam.configParam.nRoamIntraBand,
nRoamIntraBand);
pMac->roam.configParam.nRoamIntraBand = nRoamIntraBand;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateRoamScanNProbes
\brief function to update roam scan N probes
This function is called through dynamic setConfig callback function
to update roam scan N probes
\param hHal - HAL handle for device
\param nProbes number of probe requests to be sent out
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateRoamScanNProbes(tHalHandle hHal, const v_U8_t nProbes)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: gRoamScanNProbes is changed from %d to %d", __func__,
pMac->roam.configParam.nProbes,
nProbes);
pMac->roam.configParam.nProbes = nProbes;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_NPROBES_CHANGED);
}
#endif
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateRoamScanHomeAwayTime
\brief function to update roam scan Home away time
This function is called through dynamic setConfig callback function
to update roam scan home away time
\param hHal - HAL handle for device
\param nRoamScanAwayTime Scan home away time
\param bSendOffloadCmd If TRUE then send offload command to firmware
If FALSE then command is not sent to firmware
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateRoamScanHomeAwayTime(tHalHandle hHal, const v_U16_t nRoamScanHomeAwayTime,
const eAniBoolean bSendOffloadCmd)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: gRoamScanHomeAwayTime is changed from %d to %d", __func__,
pMac->roam.configParam.nRoamScanHomeAwayTime,
nRoamScanHomeAwayTime);
pMac->roam.configParam.nRoamScanHomeAwayTime = nRoamScanHomeAwayTime;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled && bSendOffloadCmd)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_HOME_AWAY_TIME_CHANGED);
}
#endif
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_getRoamIntraBand
\brief get Intra band roaming
\param hHal - HAL handle for device
\- return Success or failure
-------------------------------------------------------------------------*/
v_BOOL_t sme_getRoamIntraBand(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_GET_ROAMIBAND, NO_SESSION, 0));
return pMac->roam.configParam.nRoamIntraBand;
}
/* ---------------------------------------------------------------------------
\fn sme_getRoamScanNProbes
\brief get N Probes
\param hHal - HAL handle for device
\- return Success or failure
-------------------------------------------------------------------------*/
v_U8_t sme_getRoamScanNProbes(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.nProbes;
}
/* ---------------------------------------------------------------------------
\fn sme_getRoamScanHomeAwayTime
\brief get Roam scan home away time
\param hHal - HAL handle for device
\- return Success or failure
-------------------------------------------------------------------------*/
v_U16_t sme_getRoamScanHomeAwayTime(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.nRoamScanHomeAwayTime;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateImmediateRoamRssiDiff
\brief Update nImmediateRoamRssiDiff
This function is called through dynamic setConfig callback function
to configure nImmediateRoamRssiDiff
Usage: adb shell iwpriv wlan0 setConfig gImmediateRoamRssiDiff=[0 .. 125]
\param hHal - HAL handle for device
\param nImmediateRoamRssiDiff - minimum rssi difference between potential
candidate and current AP.
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateImmediateRoamRssiDiff(tHalHandle hHal, v_U8_t nImmediateRoamRssiDiff)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_UPDATE_IMMRSSIDIFF, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set immediate roam rssi diff to"
"%d - old value is %d - roam state is %s",
nImmediateRoamRssiDiff,
pMac->roam.configParam.nImmediateRoamRssiDiff,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.nImmediateRoamRssiDiff = nImmediateRoamRssiDiff;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateRoamRssiDiff
\brief Update RoamRssiDiff
This function is called through dynamic setConfig callback function
to configure RoamRssiDiff
Usage: adb shell iwpriv wlan0 setConfig RoamRssiDiff=[0 .. 125]
\param hHal - HAL handle for device
\param RoamRssiDiff - minimum rssi difference between potential
candidate and current AP.
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateRoamRssiDiff(tHalHandle hHal, v_U8_t RoamRssiDiff)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_UPDATE_RSSIDIFF, NO_SESSION, 0));
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set roam rssi diff to %d"
" - old value is %d - roam state is %s",
RoamRssiDiff,
pMac->roam.configParam.RoamRssiDiff,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.RoamRssiDiff = RoamRssiDiff;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG, REASON_RSSI_DIFF_CHANGED);
}
#endif
return status ;
}
/*--------------------------------------------------------------------------
\brief sme_UpdateFastTransitionEnabled() - enable/disable Fast Transition support at runtime
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
isFastTransitionEnabled.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update isFastTransitionEnabled config successfully.
Other status means SME is failed to update isFastTransitionEnabled.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateFastTransitionEnabled(tHalHandle hHal,
v_BOOL_t isFastTransitionEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_UPDATE_FTENABLED, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: FastTransitionEnabled is changed from %d to %d", __func__,
pMac->roam.configParam.isFastTransitionEnabled,
isFastTransitionEnabled);
pMac->roam.configParam.isFastTransitionEnabled = isFastTransitionEnabled;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateWESMode
\brief Update WES Mode
This function is called through dynamic setConfig callback function
to configure isWESModeEnabled
\param hHal - HAL handle for device
\return eHAL_STATUS_SUCCESS - SME update isWESModeEnabled config successfully.
Other status means SME is failed to update isWESModeEnabled.
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateWESMode(tHalHandle hHal, v_BOOL_t isWESModeEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set WES Mode to %d"
"- old value is %d - roam state is %s",
isWESModeEnabled,
pMac->roam.configParam.isWESModeEnabled,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.isWESModeEnabled = isWESModeEnabled;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_SetRoamScanControl
\brief Set roam scan control
This function is called to set roam scan control
if roam scan control is set to 0, roaming scan cache is cleared
any value other than 0 is treated as invalid value
\param hHal - HAL handle for device
\return eHAL_STATUS_SUCCESS - SME update config successfully.
Other status means SME failure to update
-------------------------------------------------------------------------*/
eHalStatus sme_SetRoamScanControl(tHalHandle hHal, v_BOOL_t roamScanControl)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_SET_SCANCTRL, NO_SESSION, 0));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set roam scan control to %d"
" - old value is %d - roam state is %s",
roamScanControl,
pMac->roam.configParam.nRoamScanControl,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.nRoamScanControl = roamScanControl;
if ( 0 == roamScanControl)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully cleared roam scan cache");
csrFlushCfgBgScanRoamChannelList(pMac);
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG, REASON_FLUSH_CHANNEL_LIST);
}
#endif
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
#endif /* (WLAN_FEATURE_VOWIFI_11R) || (FEATURE_WLAN_ESE) || (FEATURE_WLAN_LFR) */
#ifdef FEATURE_WLAN_LFR
/*--------------------------------------------------------------------------
\brief sme_UpdateIsFastRoamIniFeatureEnabled() - enable/disable LFR support at runtime
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
isFastRoamIniFeatureEnabled.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update isFastRoamIniFeatureEnabled config successfully.
Other status means SME is failed to update isFastRoamIniFeatureEnabled.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateIsFastRoamIniFeatureEnabled(tHalHandle hHal,
const v_BOOL_t isFastRoamIniFeatureEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (pMac->roam.configParam.isFastRoamIniFeatureEnabled == isFastRoamIniFeatureEnabled)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: FastRoam is already enabled or disabled, nothing to do (returning) old(%d) new(%d)", __func__,
pMac->roam.configParam.isFastRoamIniFeatureEnabled,
isFastRoamIniFeatureEnabled);
return eHAL_STATUS_SUCCESS;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: FastRoamEnabled is changed from %d to %d", __func__,
pMac->roam.configParam.isFastRoamIniFeatureEnabled,
isFastRoamIniFeatureEnabled);
pMac->roam.configParam.isFastRoamIniFeatureEnabled = isFastRoamIniFeatureEnabled;
csrNeighborRoamUpdateFastRoamingEnabled(pMac, isFastRoamIniFeatureEnabled);
return eHAL_STATUS_SUCCESS;
}
/*--------------------------------------------------------------------------
\brief sme_ConfigFwrRoaming() - enable/disable LFR support at runtime
When Supplicant issue enabled / disable fwr based roaming on the basis
of the Bssid modification in network block ( e.g. AutoJoin mody N/W block)
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME (enabled/disabled) offload scan successfully.
Other status means SME is failed to (enabled/disabled) offload scan.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_ConfigFwrRoaming(tHalHandle hHal,
const v_BOOL_t isFastRoamEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (!pMac->roam.configParam.isFastRoamIniFeatureEnabled)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: FastRoam is disabled through ini", __func__);
return eHAL_STATUS_FAILURE;
}
csrNeighborRoamUpdateFastRoamingEnabled(pMac, isFastRoamEnabled);
return eHAL_STATUS_SUCCESS;
}
/*--------------------------------------------------------------------------
\brief sme_UpdateIsMAWCIniFeatureEnabled() -
Enable/disable LFR MAWC support at runtime
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
isMAWCIniFeatureEnabled.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update MAWCEnabled config successfully.
Other status means SME is failed to update MAWCEnabled.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateIsMAWCIniFeatureEnabled(tHalHandle hHal,
const v_BOOL_t MAWCEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: MAWCEnabled is changed from %d to %d", __func__,
pMac->roam.configParam.MAWCEnabled,
MAWCEnabled);
pMac->roam.configParam.MAWCEnabled = MAWCEnabled;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
/*--------------------------------------------------------------------------
\brief sme_UpdateEnableFastRoamInConcurrency() - enable/disable LFR if Concurrent session exists
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS
Other status means SME is failed
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateEnableFastRoamInConcurrency(tHalHandle hHal,
v_BOOL_t bFastRoamInConIniFeatureEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
pMac->roam.configParam.bFastRoamInConIniFeatureEnabled = bFastRoamInConIniFeatureEnabled;
if (0 == pMac->roam.configParam.isRoamOffloadScanEnabled)
{
pMac->roam.configParam.bFastRoamInConIniFeatureEnabled = 0;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#endif
#endif /* FEATURE_WLAN_LFR */
#ifdef FEATURE_WLAN_ESE
/*--------------------------------------------------------------------------
\brief sme_UpdateIsEseFeatureEnabled() - enable/disable Ese support at runtime
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
isEseIniFeatureEnabled.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update isEseIniFeatureEnabled config successfully.
Other status means SME is failed to update isEseIniFeatureEnabled.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateIsEseFeatureEnabled(tHalHandle hHal,
const v_BOOL_t isEseIniFeatureEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (pMac->roam.configParam.isEseIniFeatureEnabled == isEseIniFeatureEnabled)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: Ese Mode is already enabled or disabled, nothing to do (returning) old(%d) new(%d)", __func__,
pMac->roam.configParam.isEseIniFeatureEnabled,
isEseIniFeatureEnabled);
return eHAL_STATUS_SUCCESS;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: EseEnabled is changed from %d to %d", __func__,
pMac->roam.configParam.isEseIniFeatureEnabled,
isEseIniFeatureEnabled);
pMac->roam.configParam.isEseIniFeatureEnabled = isEseIniFeatureEnabled;
csrNeighborRoamUpdateEseModeEnabled(pMac, isEseIniFeatureEnabled);
if(TRUE == isEseIniFeatureEnabled)
{
sme_UpdateFastTransitionEnabled(hHal, TRUE);
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG, REASON_ESE_INI_CFG_CHANGED);
}
#endif
return eHAL_STATUS_SUCCESS;
}
#endif /* FEATURE_WLAN_ESE */
/*--------------------------------------------------------------------------
\brief sme_UpdateConfigFwRssiMonitoring() - enable/disable firmware RSSI Monitoring at runtime
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
fEnableFwRssiMonitoring.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update fEnableFwRssiMonitoring. config successfully.
Other status means SME is failed to update fEnableFwRssiMonitoring.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateConfigFwRssiMonitoring(tHalHandle hHal,
v_BOOL_t fEnableFwRssiMonitoring)
{
eHalStatus halStatus = eHAL_STATUS_SUCCESS;
if (ccmCfgSetInt(hHal, WNI_CFG_PS_ENABLE_RSSI_MONITOR, fEnableFwRssiMonitoring,
NULL, eANI_BOOLEAN_FALSE)==eHAL_STATUS_FAILURE)
{
halStatus = eHAL_STATUS_FAILURE;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"Failure: Could not pass on WNI_CFG_PS_RSSI_MONITOR configuration info to CCM");
}
return (halStatus);
}
#ifdef WLAN_FEATURE_NEIGHBOR_ROAMING
/*--------------------------------------------------------------------------
\brief sme_setNeighborLookupRssiThreshold() - update neighbor lookup rssi threshold
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_setNeighborLookupRssiThreshold(tHalHandle hHal,
v_U8_t neighborLookupRssiThreshold)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrNeighborRoamSetLookupRssiThreshold(pMac, neighborLookupRssiThreshold);
if (HAL_STATUS_SUCCESS(status))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set Lookup threshold to %d"
" - old value is %d - roam state is %s",
neighborLookupRssiThreshold,
pMac->roam.configParam.neighborRoamConfig.nNeighborLookupRssiThreshold,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborLookupRssiThreshold =
neighborLookupRssiThreshold;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/*--------------------------------------------------------------------------
\brief sme_setNeighborReassocRssiThreshold() - update neighbor reassoc rssi threshold
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_setNeighborReassocRssiThreshold(tHalHandle hHal,
v_U8_t neighborReassocRssiThreshold)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set Reassoc threshold to %d"
"- old value is %d - roam state is %s",
neighborReassocRssiThreshold,
pMac->roam.configParam.neighborRoamConfig.nNeighborReassocRssiThreshold,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborReassocRssiThreshold =
neighborReassocRssiThreshold;
pMac->roam.neighborRoamInfo.cfgParams.neighborReassocThreshold =
neighborReassocRssiThreshold;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/*--------------------------------------------------------------------------
\brief sme_getNeighborLookupRssiThreshold() - get neighbor lookup rssi threshold
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
v_U8_t sme_getNeighborLookupRssiThreshold(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.neighborRoamConfig.nNeighborLookupRssiThreshold;
}
/*--------------------------------------------------------------------------
\brief sme_setNeighborScanRefreshPeriod() - set neighbor scan results refresh period
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_setNeighborScanRefreshPeriod(tHalHandle hHal,
v_U16_t neighborScanResultsRefreshPeriod)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set roam scan refresh period to %d"
" - old value is %d - roam state is %s",
neighborScanResultsRefreshPeriod,
pMac->roam.configParam.neighborRoamConfig.nNeighborResultsRefreshPeriod,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborResultsRefreshPeriod =
neighborScanResultsRefreshPeriod;
pMac->roam.neighborRoamInfo.cfgParams.neighborResultsRefreshPeriod =
neighborScanResultsRefreshPeriod;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_NEIGHBOR_SCAN_REFRESH_PERIOD_CHANGED);
}
#endif
return status ;
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
/*--------------------------------------------------------------------------
\brief sme_UpdateRoamScanOffloadEnabled() - enable/disable roam scan offload feaure
It is used at in the REG_DYNAMIC_VARIABLE macro definition of
gRoamScanOffloadEnabled.
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successfully.
Other status means SME is failed to update.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_UpdateRoamScanOffloadEnabled(tHalHandle hHal,
v_BOOL_t nRoamScanOffloadEnabled)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: gRoamScanOffloadEnabled is changed from %d to %d", __func__,
pMac->roam.configParam.isRoamOffloadScanEnabled,
nRoamScanOffloadEnabled);
pMac->roam.configParam.isRoamOffloadScanEnabled = nRoamScanOffloadEnabled;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
#endif
/*--------------------------------------------------------------------------
\brief sme_getNeighborScanRefreshPeriod() - get neighbor scan results refresh period
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return v_U16_t - Neighbor scan results refresh period value
\sa
--------------------------------------------------------------------------*/
v_U16_t sme_getNeighborScanRefreshPeriod(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.neighborRoamConfig.nNeighborResultsRefreshPeriod;
}
/*--------------------------------------------------------------------------
\brief sme_getEmptyScanRefreshPeriod() - get empty scan refresh period
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
v_U16_t sme_getEmptyScanRefreshPeriod(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.neighborRoamConfig.nEmptyScanRefreshPeriod;
}
/* ---------------------------------------------------------------------------
\fn sme_UpdateEmptyScanRefreshPeriod
\brief Update nEmptyScanRefreshPeriod
This function is called through dynamic setConfig callback function
to configure nEmptyScanRefreshPeriod
Usage: adb shell iwpriv wlan0 setConfig nEmptyScanRefreshPeriod=[0 .. 60]
\param hHal - HAL handle for device
\param nEmptyScanRefreshPeriod - scan period following empty scan results.
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_UpdateEmptyScanRefreshPeriod(tHalHandle hHal, v_U16_t nEmptyScanRefreshPeriod)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set roam scan period to %d -"
"old value is %d - roam state is %s",
nEmptyScanRefreshPeriod,
pMac->roam.configParam.neighborRoamConfig.nEmptyScanRefreshPeriod,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nEmptyScanRefreshPeriod = nEmptyScanRefreshPeriod;
pMac->roam.neighborRoamInfo.cfgParams.emptyScanRefreshPeriod = nEmptyScanRefreshPeriod;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_EMPTY_SCAN_REF_PERIOD_CHANGED);
}
#endif
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_setNeighborScanMinChanTime
\brief Update nNeighborScanMinChanTime
This function is called through dynamic setConfig callback function
to configure gNeighborScanChannelMinTime
Usage: adb shell iwpriv wlan0 setConfig gNeighborScanChannelMinTime=[0 .. 60]
\param hHal - HAL handle for device
\param nNeighborScanMinChanTime - Channel minimum dwell time
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_setNeighborScanMinChanTime(tHalHandle hHal, const v_U16_t nNeighborScanMinChanTime)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set channel min dwell time to %d"
" - old value is %d - roam state is %s",
nNeighborScanMinChanTime,
pMac->roam.configParam.neighborRoamConfig.nNeighborScanMinChanTime,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborScanMinChanTime = nNeighborScanMinChanTime;
pMac->roam.neighborRoamInfo.cfgParams.minChannelScanTime = nNeighborScanMinChanTime;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_setNeighborScanMaxChanTime
\brief Update nNeighborScanMaxChanTime
This function is called through dynamic setConfig callback function
to configure gNeighborScanChannelMaxTime
Usage: adb shell iwpriv wlan0 setConfig gNeighborScanChannelMaxTime=[0 .. 60]
\param hHal - HAL handle for device
\param nNeighborScanMinChanTime - Channel maximum dwell time
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_setNeighborScanMaxChanTime(tHalHandle hHal, const v_U16_t nNeighborScanMaxChanTime)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set channel max dwell time to %d"
" - old value is %d - roam state is %s",
nNeighborScanMaxChanTime,
pMac->roam.configParam.neighborRoamConfig.nNeighborScanMaxChanTime,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborScanMaxChanTime = nNeighborScanMaxChanTime;
pMac->roam.neighborRoamInfo.cfgParams.maxChannelScanTime = nNeighborScanMaxChanTime;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_SCAN_CH_TIME_CHANGED);
}
#endif
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_getNeighborScanMinChanTime
\brief get neighbor scan min channel time
\param hHal - The handle returned by macOpen.
\return v_U16_t - channel min time value
-------------------------------------------------------------------------*/
v_U16_t sme_getNeighborScanMinChanTime(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.neighborRoamInfo.cfgParams.minChannelScanTime;
}
/* ---------------------------------------------------------------------------
\fn sme_getNeighborScanMaxChanTime
\brief get neighbor scan max channel time
\param hHal - The handle returned by macOpen.
\return v_U16_t - channel max time value
-------------------------------------------------------------------------*/
v_U16_t sme_getNeighborScanMaxChanTime(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.neighborRoamInfo.cfgParams.maxChannelScanTime;
}
/* ---------------------------------------------------------------------------
\fn sme_setNeighborScanPeriod
\brief Update nNeighborScanPeriod
This function is called through dynamic setConfig callback function
to configure nNeighborScanPeriod
Usage: adb shell iwpriv wlan0 setConfig nNeighborScanPeriod=[0 .. 1000]
\param hHal - HAL handle for device
\param nNeighborScanPeriod - neighbor scan period
\- return Success or failure
-------------------------------------------------------------------------*/
eHalStatus sme_setNeighborScanPeriod(tHalHandle hHal, const v_U16_t nNeighborScanPeriod)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set neighbor scan period to %d"
" - old value is %d - roam state is %s",
nNeighborScanPeriod,
pMac->roam.configParam.neighborRoamConfig.nNeighborScanTimerPeriod,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
pMac->roam.configParam.neighborRoamConfig.nNeighborScanTimerPeriod = nNeighborScanPeriod;
pMac->roam.neighborRoamInfo.cfgParams.neighborScanPeriod = nNeighborScanPeriod;
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG,
REASON_SCAN_HOME_TIME_CHANGED);
}
#endif
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_getNeighborScanPeriod
\brief get neighbor scan period
\param hHal - The handle returned by macOpen.
\return v_U16_t - neighbor scan period
-------------------------------------------------------------------------*/
v_U16_t sme_getNeighborScanPeriod(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.neighborRoamInfo.cfgParams.neighborScanPeriod;
}
#endif
#if defined (WLAN_FEATURE_VOWIFI_11R) || defined (FEATURE_WLAN_ESE) || defined(FEATURE_WLAN_LFR)
/*--------------------------------------------------------------------------
\brief sme_getRoamRssiDiff() - get Roam rssi diff
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return v_U16_t - Rssi diff value
\sa
--------------------------------------------------------------------------*/
v_U8_t sme_getRoamRssiDiff(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.RoamRssiDiff;
}
/*--------------------------------------------------------------------------
\brief sme_ChangeRoamScanChannelList() - Change roam scan channel list
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_ChangeRoamScanChannelList(tHalHandle hHal, tANI_U8 *pChannelList,
tANI_U8 numChannels)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
tpCsrNeighborRoamControlInfo pNeighborRoamInfo = &pMac->roam.neighborRoamInfo;
tANI_U8 oldChannelList[WNI_CFG_VALID_CHANNEL_LIST_LEN*2] = {0};
tANI_U8 newChannelList[WNI_CFG_VALID_CHANNEL_LIST_LEN*2] = {0};
tANI_U8 i = 0, j = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if (NULL != pNeighborRoamInfo->cfgParams.channelInfo.ChannelList)
{
for (i = 0; i < pNeighborRoamInfo->cfgParams.channelInfo.numOfChannels; i++)
{
if (j < sizeof(oldChannelList))
{
j += snprintf(oldChannelList + j, sizeof(oldChannelList) - j," %d",
pNeighborRoamInfo->cfgParams.channelInfo.ChannelList[i]);
}
else
{
break;
}
}
}
csrFlushCfgBgScanRoamChannelList(pMac);
csrCreateBgScanRoamChannelList(pMac, pChannelList, numChannels);
sme_SetRoamScanControl(hHal, 1);
if (NULL != pNeighborRoamInfo->cfgParams.channelInfo.ChannelList)
{
j = 0;
for (i = 0; i < pNeighborRoamInfo->cfgParams.channelInfo.numOfChannels; i++)
{
if (j < sizeof(oldChannelList))
{
j += snprintf(newChannelList + j, sizeof(newChannelList) - j," %d",
pNeighborRoamInfo->cfgParams.channelInfo.ChannelList[i]);
}
else
{
break;
}
}
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"LFR runtime successfully set roam scan channels to %s"
"- old value is %s - roam state is %s",
newChannelList, oldChannelList,
macTraceGetNeighbourRoamState(
pMac->roam.neighborRoamInfo.neighborRoamState));
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG, REASON_CHANNEL_LIST_CHANGED);
}
#endif
return status ;
}
#ifdef FEATURE_WLAN_ESE_UPLOAD
/*--------------------------------------------------------------------------
\brief sme_SetEseRoamScanChannelList() - set ese roam scan channel list
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_SetEseRoamScanChannelList(tHalHandle hHal,
tANI_U8 *pChannelList,
tANI_U8 numChannels)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
tpCsrNeighborRoamControlInfo pNeighborRoamInfo = &pMac->roam.neighborRoamInfo;
tpCsrChannelInfo currChannelListInfo = &pNeighborRoamInfo->roamChannelInfo.currentChannelListInfo;
tANI_U8 oldChannelList[WNI_CFG_VALID_CHANNEL_LIST_LEN*2] = {0};
tANI_U8 newChannelList[128] = {0};
tANI_U8 i = 0, j = 0;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if (NULL != currChannelListInfo->ChannelList)
{
for (i = 0; i < currChannelListInfo->numOfChannels; i++)
{
j += snprintf(oldChannelList + j, sizeof(oldChannelList) - j," %d",
currChannelListInfo->ChannelList[i]);
}
}
status = csrCreateRoamScanChannelList(pMac, pChannelList, numChannels, csrGetCurrentBand(hHal));
if ( HAL_STATUS_SUCCESS( status ))
{
if (NULL != currChannelListInfo->ChannelList)
{
j = 0;
for (i = 0; i < currChannelListInfo->numOfChannels; i++)
{
j += snprintf(newChannelList + j, sizeof(newChannelList) - j," %d",
currChannelListInfo->ChannelList[i]);
}
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"ESE roam scan channel list successfully set to %s - old value is %s - roam state is %s",
newChannelList, oldChannelList,
macTraceGetNeighbourRoamState(pMac->roam.neighborRoamInfo.neighborRoamState));
}
sme_ReleaseGlobalLock( &pMac->sme );
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
if (pMac->roam.configParam.isRoamOffloadScanEnabled)
{
csrRoamOffloadScan(pMac, ROAM_SCAN_OFFLOAD_UPDATE_CFG, REASON_CHANNEL_LIST_CHANGED);
}
#endif
return status ;
}
#endif
/*--------------------------------------------------------------------------
\brief sme_getRoamScanChannelList() - get roam scan channel list
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return eHAL_STATUS_SUCCESS - SME update config successful.
Other status means SME is failed to update
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_getRoamScanChannelList(tHalHandle hHal, tANI_U8 *pChannelList,
tANI_U8 *pNumChannels)
{
int i = 0;
tANI_U8 *pOutPtr = pChannelList;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tpCsrNeighborRoamControlInfo pNeighborRoamInfo = &pMac->roam.neighborRoamInfo;
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if (NULL == pNeighborRoamInfo->cfgParams.channelInfo.ChannelList)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_WARN,
"Roam Scan channel list is NOT yet initialized");
*pNumChannels = 0;
sme_ReleaseGlobalLock( &pMac->sme );
return status;
}
*pNumChannels = pNeighborRoamInfo->cfgParams.channelInfo.numOfChannels;
for (i = 0; i < (*pNumChannels); i++)
{
pOutPtr[i] = pNeighborRoamInfo->cfgParams.channelInfo.ChannelList[i];
}
pOutPtr[i] = '\0';
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/*--------------------------------------------------------------------------
\brief sme_getIsEseFeatureEnabled() - get Ese feature enabled or not
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return TRUE (1) - if the Ese feature is enabled
FALSE (0) - if feature is disabled (compile or runtime)
\sa
--------------------------------------------------------------------------*/
tANI_BOOLEAN sme_getIsEseFeatureEnabled(tHalHandle hHal)
{
#ifdef FEATURE_WLAN_ESE
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return csrRoamIsEseIniFeatureEnabled(pMac);
#else
return eANI_BOOLEAN_FALSE;
#endif
}
/*--------------------------------------------------------------------------
\brief sme_GetWESMode() - get WES Mode
This is a synchronous call
\param hHal - The handle returned by macOpen
\return v_U8_t - WES Mode Enabled(1)/Disabled(0)
\sa
--------------------------------------------------------------------------*/
v_BOOL_t sme_GetWESMode(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.isWESModeEnabled;
}
/*--------------------------------------------------------------------------
\brief sme_GetRoamScanControl() - get scan control
This is a synchronous call
\param hHal - The handle returned by macOpen.
\return v_BOOL_t - Enabled(1)/Disabled(0)
\sa
--------------------------------------------------------------------------*/
v_BOOL_t sme_GetRoamScanControl(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.nRoamScanControl;
}
#endif
/*--------------------------------------------------------------------------
\brief sme_getIsLfrFeatureEnabled() - get LFR feature enabled or not
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return TRUE (1) - if the feature is enabled
FALSE (0) - if feature is disabled (compile or runtime)
\sa
--------------------------------------------------------------------------*/
tANI_BOOLEAN sme_getIsLfrFeatureEnabled(tHalHandle hHal)
{
#ifdef FEATURE_WLAN_LFR
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.isFastRoamIniFeatureEnabled;
#else
return eANI_BOOLEAN_FALSE;
#endif
}
/*--------------------------------------------------------------------------
\brief sme_getIsFtFeatureEnabled() - get FT feature enabled or not
This is a synchronuous call
\param hHal - The handle returned by macOpen.
\return TRUE (1) - if the feature is enabled
FALSE (0) - if feature is disabled (compile or runtime)
\sa
--------------------------------------------------------------------------*/
tANI_BOOLEAN sme_getIsFtFeatureEnabled(tHalHandle hHal)
{
#ifdef WLAN_FEATURE_VOWIFI_11R
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
return pMac->roam.configParam.isFastTransitionEnabled;
#else
return eANI_BOOLEAN_FALSE;
#endif
}
/* ---------------------------------------------------------------------------
\fn sme_IsFeatureSupportedByFW
\brief Check if a feature is enabled by FW
\param featEnumValue - Enumeration value from placeHolderInCapBitmap
\- return 1/0 (TRUE/FALSE)
-------------------------------------------------------------------------*/
tANI_U8 sme_IsFeatureSupportedByFW(tANI_U8 featEnumValue)
{
return IS_FEATURE_SUPPORTED_BY_FW(featEnumValue);
}
/* ---------------------------------------------------------------------------
\fn sme_IsFeatureSupportedByDriver
\brief Check if a feature is enabled by Driver
\param featEnumValue - Enumeration value from placeHolderInCapBitmap
\- return 1/0 (TRUE/FALSE)
-------------------------------------------------------------------------*/
tANI_U8 sme_IsFeatureSupportedByDriver(tANI_U8 featEnumValue)
{
return IS_FEATURE_SUPPORTED_BY_DRIVER(featEnumValue);
}
#ifdef FEATURE_WLAN_TDLS
/* ---------------------------------------------------------------------------
\fn sme_SendTdlsMgmtFrame
\brief API to send TDLS management frames.
\param peerMac - peer's Mac Adress.
\param tdlsLinkEstablishParams - TDLS Peer Link Establishment Parameters
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_SendTdlsLinkEstablishParams(tHalHandle hHal,
tANI_U8 sessionId,
tSirMacAddr peerMac,
tCsrTdlsLinkEstablishParams *tdlsLinkEstablishParams)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrTdlsSendLinkEstablishParams(hHal, sessionId, peerMac, tdlsLinkEstablishParams) ;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
// tdlsoffchan
/* ---------------------------------------------------------------------------
\fn sme_SendTdlsChanSwitchReq
\brief API to send TDLS management frames.
\param peerMac - peer's Mac Adress.
\param tdlsLinkEstablishParams - TDLS Peer Link Establishment Parameters
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_SendTdlsChanSwitchReq(tHalHandle hHal,
tANI_U8 sessionId,
tSirMacAddr peerMac,
tANI_S32 tdlsOffCh,
tANI_S32 tdlsOffChBwOffset,
tANI_U8 tdlsSwMode)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrTdlsSendChanSwitchReq(hHal, sessionId, peerMac,
tdlsOffCh, tdlsOffChBwOffset,
tdlsSwMode);
}
sme_ReleaseGlobalLock( &pMac->sme );
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_SendTdlsMgmtFrame
\brief API to send TDLS management frames.
\param peerMac - peer's Mac Adress.
\param frame_type - Type of TDLS mgmt frame to be sent.
\param dialog - dialog token used in the frame.
\param status - status to be incuded in the frame.
\param peerCapability - peer cpabilities
\param buf - additional IEs to be included
\param len - lenght of additional Ies
\param responder - Tdls request type
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_SendTdlsMgmtFrame(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac,
tANI_U8 frame_type, tANI_U8 dialog, tANI_U16 statusCode, tANI_U32 peerCapability, tANI_U8 *buf, tANI_U8 len, tANI_U8 responder)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tCsrTdlsSendMgmt sendTdlsReq = {{0}} ;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
vos_mem_copy(sendTdlsReq.peerMac, peerMac, sizeof(tSirMacAddr)) ;
sendTdlsReq.frameType = frame_type;
sendTdlsReq.buf = buf;
sendTdlsReq.len = len;
sendTdlsReq.dialog = dialog;
sendTdlsReq.statusCode = statusCode;
sendTdlsReq.responder = responder;
sendTdlsReq.peerCapability = peerCapability;
status = csrTdlsSendMgmtReq(hHal, sessionId, &sendTdlsReq) ;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_ChangeTdlsPeerSta
\brief API to Update TDLS peer sta parameters.
\param peerMac - peer's Mac Adress.
\param staParams - Peer Station Parameters
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_ChangeTdlsPeerSta(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac,
tCsrStaParams *pstaParams)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (NULL == pstaParams)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s :pstaParams is NULL",__func__);
return eHAL_STATUS_FAILURE;
}
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrTdlsChangePeerSta(hHal, sessionId, peerMac,pstaParams);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_AddTdlsPeerSta
\brief API to Add TDLS peer sta entry.
\param peerMac - peer's Mac Adress.
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_AddTdlsPeerSta(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrTdlsAddPeerSta(hHal, sessionId, peerMac);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_DeleteTdlsPeerSta
\brief API to Delete TDLS peer sta entry.
\param peerMac - peer's Mac Adress.
\- return VOS_STATUS_SUCCES
-------------------------------------------------------------------------*/
VOS_STATUS sme_DeleteTdlsPeerSta(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
status = csrTdlsDelPeerSta(hHal, sessionId, peerMac) ;
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
/* ---------------------------------------------------------------------------
\fn sme_SetTdlsPowerSaveProhibited
\API to set/reset the isTdlsPowerSaveProhibited.
\- return void
-------------------------------------------------------------------------*/
void sme_SetTdlsPowerSaveProhibited(tHalHandle hHal, v_BOOL_t val)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->isTdlsPowerSaveProhibited = val;
return;
}
#endif
/* ---------------------------------------------------------------------------
\fn sme_IsPmcBmps
\API to Check if PMC state is BMPS.
\- return v_BOOL_t
-------------------------------------------------------------------------*/
v_BOOL_t sme_IsPmcBmps(tHalHandle hHal)
{
return (BMPS == pmcGetPmcState(hHal));
}
#ifdef FEATURE_WLAN_TDLS_INTERNAL
/*
* SME API to start TDLS discovery Procedure
*/
VOS_STATUS sme_StartTdlsDiscoveryReq(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tCsrTdlsDisRequest disReq = {{0}} ;
vos_mem_copy(disReq.peerMac, peerMac, sizeof(tSirMacAddr)) ;
status = csrTdlsDiscoveryReq(hHal, sessionId, &disReq) ;
return status ;
}
/*
* Process TDLS discovery results
*/
v_U8_t sme_GetTdlsDiscoveryResult(tHalHandle hHal,
tSmeTdlsDisResult *disResult, v_U8_t listType)
{
tCsrTdlsPeerLinkinfo *peerLinkInfo = NULL ;
tSirTdlsPeerInfo *peerInfo = NULL ;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
tCsrTdlsCtxStruct *disInfo = &pMac->tdlsCtx ;
tDblLinkList *peerList = &disInfo->tdlsPotentialPeerList ;
tListElem *pEntry = NULL ;
v_U8_t peerCnt = 0 ;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
("TDLS peer count = %d"),disInfo->tdlsPeerCount ) ;
pEntry = csrLLPeekHead( peerList, LL_ACCESS_LOCK );
while(pEntry)
{
peerLinkInfo = GET_BASE_ADDR( pEntry, tCsrTdlsPeerLinkinfo,
tdlsPeerStaLink) ;
peerInfo = &peerLinkInfo->tdlsDisPeerInfo ;
switch(listType)
{
case TDLS_SETUP_LIST:
{
if(TDLS_LINK_SETUP_STATE == peerInfo->tdlsPeerState)
{
vos_mem_copy(disResult[peerCnt].tdlsPeerMac,
peerInfo->peerMac, sizeof(tSirMacAddr));
disResult[peerCnt].tdlsPeerRssi = peerInfo->tdlsPeerRssi ;
peerCnt++ ;
}
break ;
}
case TDLS_DIS_LIST:
{
vos_mem_copy(disResult[peerCnt].tdlsPeerMac,
peerInfo->peerMac, sizeof(tSirMacAddr));
disResult[peerCnt].tdlsPeerRssi = peerInfo->tdlsPeerRssi ;
peerCnt++ ;
break ;
}
default:
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
("unknown list type ")) ;
break ;
}
}
pEntry = csrLLNext( peerList, pEntry, LL_ACCESS_LOCK) ;
}
return peerCnt ;
}
/*
* SME API to start TDLS link setup Procedure.
*/
VOS_STATUS sme_StartTdlsLinkSetupReq(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tCsrTdlsSetupRequest setupReq = {{0}} ;
vos_mem_copy(setupReq.peerMac, peerMac, sizeof(tSirMacAddr)) ;
status = csrTdlsSetupReq(hHal, sessionId, &setupReq) ;
return status ;
}
/*
* SME API to start TDLS link Teardown Procedure.
*/
VOS_STATUS sme_StartTdlsLinkTeardownReq(tHalHandle hHal, tANI_U8 sessionId, tSirMacAddr peerMac)
{
VOS_STATUS status = VOS_STATUS_SUCCESS;
tCsrTdlsTeardownRequest teardownReq = {{0}} ;
vos_mem_copy(teardownReq.peerMac, peerMac, sizeof(tSirMacAddr)) ;
status = csrTdlsTeardownReq(hHal, sessionId, &teardownReq) ;
return status ;
}
#endif /* FEATURE_WLAN_TDLS */
eHalStatus sme_UpdateDfsSetting(tHalHandle hHal, tANI_U8 fUpdateEnableDFSChnlScan)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
smsLog(pMac, LOG2, FL("enter"));
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
pMac->scan.fEnableDFSChnlScan = fUpdateEnableDFSChnlScan;
sme_ReleaseGlobalLock( &pMac->sme );
}
smsLog(pMac, LOG2, FL("exit status %d"), status);
return (status);
}
/*
* SME API to enable/disable WLAN driver initiated SSR
*/
void sme_UpdateEnableSSR(tHalHandle hHal, tANI_BOOLEAN enableSSR)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock(&pMac->sme);
if (HAL_STATUS_SUCCESS(status))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_DEBUG,
"SSR level is changed %d", enableSSR);
/* not serializing this messsage, as this is only going
* to set a variable in WDA/WDI
*/
WDA_SetEnableSSR(enableSSR);
sme_ReleaseGlobalLock(&pMac->sme);
}
return;
}
/*
* SME API to determine the channel bonding mode
*/
VOS_STATUS sme_SelectCBMode(tHalHandle hHal, eCsrPhyMode eCsrPhyMode, tANI_U8 channel)
{
tSmeConfigParams smeConfig;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (
#ifdef WLAN_FEATURE_11AC
eCSR_DOT11_MODE_11ac != eCsrPhyMode &&
eCSR_DOT11_MODE_11ac_ONLY != eCsrPhyMode &&
#endif
eCSR_DOT11_MODE_11n != eCsrPhyMode &&
eCSR_DOT11_MODE_11n_ONLY != eCsrPhyMode &&
eCSR_DOT11_MODE_11a != eCsrPhyMode &&
eCSR_DOT11_MODE_11a_ONLY != eCsrPhyMode &&
eCSR_DOT11_MODE_abg != eCsrPhyMode
)
{
return VOS_STATUS_SUCCESS;
}
/* If channel bonding mode is not required */
if ( !pMac->roam.configParam.channelBondingMode5GHz ) {
return VOS_STATUS_SUCCESS;
}
vos_mem_zero(&smeConfig, sizeof (tSmeConfigParams));
sme_GetConfigParam(pMac, &smeConfig);
#ifdef WLAN_FEATURE_11AC
if ( eCSR_DOT11_MODE_11ac == eCsrPhyMode ||
eCSR_DOT11_MODE_11ac_ONLY == eCsrPhyMode )
{
if ( channel== 36 || channel == 52 || channel == 100 ||
channel == 116 || channel == 149 )
{
smeConfig.csrConfig.channelBondingMode5GHz =
PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_LOW - 1;
}
else if ( channel == 40 || channel == 56 || channel == 104 ||
channel == 120 || channel == 153 )
{
smeConfig.csrConfig.channelBondingMode5GHz =
PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_LOW - 1;
}
else if ( channel == 44 || channel == 60 || channel == 108 ||
channel == 124 || channel == 157 )
{
smeConfig.csrConfig.channelBondingMode5GHz =
PHY_QUADRUPLE_CHANNEL_20MHZ_LOW_40MHZ_HIGH -1;
}
else if ( channel == 48 || channel == 64 || channel == 112 ||
channel == 128 || channel == 144 || channel == 161 )
{
smeConfig.csrConfig.channelBondingMode5GHz =
PHY_QUADRUPLE_CHANNEL_20MHZ_HIGH_40MHZ_HIGH - 1;
}
else if ( channel == 165 )
{
smeConfig.csrConfig.channelBondingMode5GHz = 0;
}
}
#endif
if ( eCSR_DOT11_MODE_11n == eCsrPhyMode ||
eCSR_DOT11_MODE_11n_ONLY == eCsrPhyMode )
{
if ( channel== 40 || channel == 48 || channel == 56 ||
channel == 64 || channel == 104 || channel == 112 ||
channel == 120 || channel == 128 || channel == 136 ||
channel == 144 || channel == 153 || channel == 161 )
{
smeConfig.csrConfig.channelBondingMode5GHz = 1;
}
else if ( channel== 36 || channel == 44 || channel == 52 ||
channel == 60 || channel == 100 || channel == 108 ||
channel == 116 || channel == 124 || channel == 132 ||
channel == 140 || channel == 149 || channel == 157 )
{
smeConfig.csrConfig.channelBondingMode5GHz = 2;
}
else if ( channel == 165 )
{
smeConfig.csrConfig.channelBondingMode5GHz = 0;
}
}
/*
for 802.11a phy mode, channel bonding should be zero.
From default config, it is set as PHY_DOUBLE_CHANNEL_HIGH_PRIMARY = 3
through csrChangeDefaultConfigParam function. We will override this
value here.
*/
if ( eCSR_DOT11_MODE_11a == eCsrPhyMode ||
eCSR_DOT11_MODE_11a_ONLY == eCsrPhyMode ||
eCSR_DOT11_MODE_abg == eCsrPhyMode)
{
smeConfig.csrConfig.channelBondingMode5GHz = 0;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_WARN,
"cbmode selected=%d", smeConfig.csrConfig.channelBondingMode5GHz);
sme_UpdateConfig (pMac, &smeConfig);
return VOS_STATUS_SUCCESS;
}
/*--------------------------------------------------------------------------
\brief sme_SetCurrDeviceMode() - Sets the current operating device mode.
\param hHal - The handle returned by macOpen.
\param currDeviceMode - Current operating device mode.
--------------------------------------------------------------------------*/
void sme_SetCurrDeviceMode (tHalHandle hHal, tVOS_CON_MODE currDeviceMode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->sme.currDeviceMode = currDeviceMode;
return;
}
#ifdef WLAN_FEATURE_ROAM_SCAN_OFFLOAD
/*--------------------------------------------------------------------------
\brief sme_HandoffRequest() - a wrapper function to Request a handoff
from CSR.
This is a synchronous call
\param hHal - The handle returned by macOpen
\param pHandoffInfo - info provided by HDD with the handoff request (namely:
BSSID, channel etc.)
\return eHAL_STATUS_SUCCESS - SME passed the request to CSR successfully.
Other status means SME is failed to send the request.
\sa
--------------------------------------------------------------------------*/
eHalStatus sme_HandoffRequest(tHalHandle hHal,
tCsrHandoffRequest *pHandoffInfo)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: invoked", __func__);
status = csrHandoffRequest(pMac, pHandoffInfo);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status ;
}
#endif
/*
* SME API to check if there is any infra station or
* P2P client is connected
*/
VOS_STATUS sme_isSta_p2p_clientConnected(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if(csrIsInfraConnected(pMac))
{
return VOS_STATUS_SUCCESS;
}
return VOS_STATUS_E_FAILURE;
}
#ifdef FEATURE_WLAN_LPHB
/* ---------------------------------------------------------------------------
\fn sme_LPHBConfigReq
\API to make configuration LPHB within FW.
\param hHal - The handle returned by macOpen
\param lphdReq - LPHB request argument by client
\param pCallbackfn - LPHB timeout notification callback function pointer
\- return Configuration message posting status, SUCCESS or Fail
-------------------------------------------------------------------------*/
eHalStatus sme_LPHBConfigReq
(
tHalHandle hHal,
tSirLPHBReq *lphdReq,
void (*pCallbackfn)(void *pAdapter, void *indParam)
)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
status = sme_AcquireGlobalLock(&pMac->sme);
if (eHAL_STATUS_SUCCESS == status)
{
if ((LPHB_SET_EN_PARAMS_INDID == lphdReq->cmd) &&
(NULL == pCallbackfn) &&
(NULL == pMac->sme.pLphbIndCb))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Indication Call back did not registered", __func__);
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_FAILURE;
}
else if (NULL != pCallbackfn)
{
pMac->sme.pLphbIndCb = pCallbackfn;
}
/* serialize the req through MC thread */
vosMessage.bodyptr = lphdReq;
vosMessage.type = WDA_LPHB_CONF_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Post Config LPHB MSG fail", __func__);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
#endif /* FEATURE_WLAN_LPHB */
/*--------------------------------------------------------------------------
\brief sme_enable_disable_split_scan() - a wrapper function to set the split
scan parameter.
This is a synchronous call
\param hHal - The handle returned by macOpen
\return NONE.
\sa
--------------------------------------------------------------------------*/
void sme_enable_disable_split_scan (tHalHandle hHal, tANI_U8 nNumStaChan,
tANI_U8 nNumP2PChan)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
pMac->roam.configParam.nNumStaChanCombinedConc = nNumStaChan;
pMac->roam.configParam.nNumP2PChanCombinedConc = nNumP2PChan;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: SCAN nNumStaChanCombinedConc : %d,"
"nNumP2PChanCombinedConc : %d ",
__func__, nNumStaChan, nNumP2PChan);
return;
}
/* ---------------------------------------------------------------------------
\fn sme_AddPeriodicTxPtrn
\brief API to Periodic TX Pattern Offload feature
\param hHal - The handle returned by macOpen
\param addPeriodicTxPtrnParams - Pointer to the add pattern structure
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_AddPeriodicTxPtrn(tHalHandle hHal, tSirAddPeriodicTxPtrn
*addPeriodicTxPtrnParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status;
vos_msg_t msg;
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme)))
{
msg.type = WDA_ADD_PERIODIC_TX_PTRN_IND;
msg.bodyptr = addPeriodicTxPtrnParams;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,"%s: Not able "
"to post WDA_ADD_PERIODIC_TX_PTRN_IND to WDA!",
__func__);
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_SUCCESS;
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_DelPeriodicTxPtrn
\brief API to Periodic TX Pattern Offload feature
\param hHal - The handle returned by macOpen
\param delPeriodicTxPtrnParams - Pointer to the delete pattern structure
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_DelPeriodicTxPtrn(tHalHandle hHal, tSirDelPeriodicTxPtrn
*delPeriodicTxPtrnParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status;
vos_msg_t msg;
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme)))
{
msg.type = WDA_DEL_PERIODIC_TX_PTRN_IND;
msg.bodyptr = delPeriodicTxPtrnParams;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,"%s: Not able "
"to post WDA_DEL_PERIODIC_TX_PTRN_IND to WDA!",
__func__);
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_SUCCESS;
}
return status;
}
void smeGetCommandQStatus( tHalHandle hHal )
{
tSmeCmd *pTempCmd = NULL;
tListElem *pEntry;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (NULL == pMac)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_FATAL,
"%s: pMac is null", __func__);
return;
}
pEntry = csrLLPeekHead( &pMac->sme.smeCmdActiveList, LL_ACCESS_LOCK );
if( pEntry )
{
pTempCmd = GET_BASE_ADDR( pEntry, tSmeCmd, Link );
}
smsLog( pMac, LOGE, "Currently smeCmdActiveList has command (0x%X)",
(pTempCmd) ? pTempCmd->command : eSmeNoCommand );
if(pTempCmd)
{
if( eSmeCsrCommandMask & pTempCmd->command )
{
//CSR command is stuck. See what the reason code is for that command
dumpCsrCommandInfo(pMac, pTempCmd);
}
} //if(pTempCmd)
smsLog( pMac, LOGE, "Currently smeCmdPendingList has %d commands",
csrLLCount(&pMac->sme.smeCmdPendingList));
smsLog( pMac, LOGE, "Currently roamCmdPendingList has %d commands",
csrLLCount(&pMac->roam.roamCmdPendingList));
return;
}
#ifdef FEATURE_WLAN_BATCH_SCAN
/* ---------------------------------------------------------------------------
\fn sme_SetBatchScanReq
\brief API to set batch scan request in FW
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the batch request.
\param sessionId - session ID
\param callbackRoutine - HDD callback which needs to be invoked after
getting set batch scan response from FW
\param callbackContext - pAdapter context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetBatchScanReq
(
tHalHandle hHal, tSirSetBatchScanReq *pRequest, tANI_U8 sessionId,
void (*callbackRoutine) (void *callbackCtx, tSirSetBatchScanRsp *pRsp),
void *callbackContext
)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if (!pMac)
{
return eHAL_STATUS_FAILURE;
}
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
status = pmcSetBatchScanReq(hHal, pRequest, sessionId, callbackRoutine,
callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_SendRateUpdateInd
\brief API to Update rate
\param hHal - The handle returned by macOpen
\param rateUpdateParams - Pointer to rate update params
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SendRateUpdateInd(tHalHandle hHal, tSirRateUpdateInd *rateUpdateParams)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
eHalStatus status;
vos_msg_t msg;
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme)))
{
msg.type = WDA_RATE_UPDATE_IND;
msg.bodyptr = rateUpdateParams;
if (!VOS_IS_STATUS_SUCCESS(vos_mq_post_message(VOS_MODULE_ID_WDA, &msg)))
{
VOS_TRACE( VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,"%s: Not able "
"to post WDA_SET_RMC_RATE_IND to WDA!",
__func__);
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock(&pMac->sme);
return eHAL_STATUS_SUCCESS;
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_TriggerBatchScanResultInd
\brief API to trigger batch scan result indications from FW
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to get batch request.
\param sessionId - session ID
\param callbackRoutine - HDD callback which needs to be invoked after
getting batch scan result indication from FW
\param callbackContext - pAdapter context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_TriggerBatchScanResultInd
(
tHalHandle hHal, tSirTriggerBatchScanResultInd *pRequest, tANI_U8 sessionId,
void (*callbackRoutine) (void *callbackCtx, void *pRsp),
void *callbackContext
)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
status = pmcTriggerBatchScanResultInd(hHal, pRequest, sessionId,
callbackRoutine, callbackContext);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_StopBatchScanInd
\brief API to stop batch scan request in FW
\param hHal - The handle returned by macOpen.
\param pRequest - Pointer to the batch request.
\param sessionId - session ID
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_StopBatchScanInd
(
tHalHandle hHal, tSirStopBatchScanInd *pRequest, tANI_U8 sessionId
)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
status = pmcStopBatchScanInd(hHal, pRequest, sessionId);
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#endif
#ifdef FEATURE_WLAN_CH_AVOID
/* ---------------------------------------------------------------------------
\fn sme_AddChAvoidCallback
\brief Used to plug in callback function
Which notify channel may not be used with SAP or P2PGO mode.
Notification come from FW.
\param hHal
\param pCallbackfn : callback function pointer should be plugged in
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_AddChAvoidCallback
(
tHalHandle hHal,
void (*pCallbackfn)(void *pAdapter, void *indParam)
)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
"%s: Plug in CH AVOID CB", __func__);
status = sme_AcquireGlobalLock(&pMac->sme);
if (eHAL_STATUS_SUCCESS == status)
{
if (NULL != pCallbackfn)
{
pMac->sme.pChAvoidNotificationCb = pCallbackfn;
}
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
#endif /* FEATURE_WLAN_CH_AVOID */
void activeListCmdTimeoutHandle(void *userData)
{
if (NULL == userData)
return;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Active List command timeout Cmd List Count %d", __func__,
csrLLCount(&((tpAniSirGlobal) userData)->sme.smeCmdActiveList) );
smeGetCommandQStatus((tHalHandle) userData);
}
#ifdef WLAN_FEATURE_LINK_LAYER_STATS
/* ---------------------------------------------------------------------------
\fn sme_LLStatsSetReq
\brief API to set link layer stats request to FW
\param hHal - The handle returned by macOpen.
\Param pStatsReq - a pointer to a caller allocated object of
typedef struct tSirLLStatsSetReq, signifying the parameters to link layer
stats set.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_LLStatsSetReq(tHalHandle hHal,
tSirLLStatsSetReq *pLinkLayerStatsSetReq)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t msg;
eHalStatus status = eHAL_STATUS_FAILURE;
tSirLLStatsSetReq *plinkLayerSetReq;
plinkLayerSetReq = vos_mem_malloc(sizeof(*plinkLayerSetReq));
if ( !plinkLayerSetReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_LINK_LAYER_STATS_SET_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*plinkLayerSetReq = *pLinkLayerStatsSetReq;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
msg.type = WDA_LINK_LAYER_STATS_SET_REQ;
msg.reserved = 0;
msg.bodyptr = plinkLayerSetReq;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to post SIR_HAL_LL_STATS_SET message to HAL", __func__);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"sme_AcquireGlobalLock error", __func__);
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_LLStatsGetReq
\brief API to get link layer stats request to FW
\param hHal - The handle returned by macOpen.
\Param pStatsReq - a pointer to a caller allocated object of
typedef struct tSirLLStatsGetReq, signifying the parameters to link layer
stats get.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_LLStatsGetReq(tHalHandle hHal,
tSirLLStatsGetReq *pLinkLayerStatsGetReq)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t msg;
eHalStatus status = eHAL_STATUS_FAILURE;
tSirLLStatsGetReq *pGetStatsReq;
pGetStatsReq = vos_mem_malloc(sizeof(*pGetStatsReq));
if ( !pGetStatsReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_LINK_LAYER_STATS_GET_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pGetStatsReq = *pLinkLayerStatsGetReq;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
msg.type = WDA_LINK_LAYER_STATS_GET_REQ;
msg.reserved = 0;
msg.bodyptr = pGetStatsReq;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to post SIR_HAL_LL_STATS_GET message to HAL", __func__);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"sme_AcquireGlobalLock error", __func__);
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_LLStatsClearReq
\brief API to clear link layer stats request to FW
\param hHal - The handle returned by macOpen.
\Param pStatsReq - a pointer to a caller allocated object of
typedef struct tSirLLStatsClearReq, signifying the parameters to link layer
stats clear.
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_LLStatsClearReq(tHalHandle hHal,
tSirLLStatsClearReq *pLinkLayerStatsClear)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
vos_msg_t msg;
eHalStatus status = eHAL_STATUS_FAILURE;
tSirLLStatsClearReq *pClearStatsReq;
pClearStatsReq = vos_mem_malloc(sizeof(*pClearStatsReq));
if ( !pClearStatsReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_LINK_LAYER_STATS_CLEAR_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pClearStatsReq = *pLinkLayerStatsClear;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
msg.type = WDA_LINK_LAYER_STATS_CLEAR_REQ;
msg.reserved = 0;
msg.bodyptr = pClearStatsReq;
if(VOS_STATUS_SUCCESS != vos_mq_post_message(VOS_MODULE_ID_WDA, &msg))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"Not able to post SIR_HAL_LL_STATS_CLEAR message to HAL", __func__);
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
else
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR, "%s: "
"sme_AcquireGlobalLock error", __func__);
}
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_SetLinkLayerStatsIndCB
\brief API to trigger Link Layer Statistic indications from FW
\param hHal - The handle returned by macOpen.
\param sessionId - session ID
\param callbackRoutine - HDD callback which needs to be invoked after
getting Link Layer Statistics from FW
\param callbackContext - pAdapter context
\return eHalStatus
---------------------------------------------------------------------------*/
eHalStatus sme_SetLinkLayerStatsIndCB
(
tHalHandle hHal,
void (*callbackRoutine) (void *callbackCtx, int indType, void *pRsp,
tANI_U8 *macAddr)
)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme )))
{
if (NULL != callbackRoutine)
{
pMac->sme.pLinkLayerStatsIndCallback = callbackRoutine;
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#endif /* WLAN_FEATURE_LINK_LAYER_STATS */
eHalStatus sme_UpdateConnectDebug(tHalHandle hHal, tANI_U32 set_value)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->fEnableDebugLog = set_value;
return (status);
}
VOS_STATUS sme_UpdateDSCPtoUPMapping( tHalHandle hHal,
sme_QosWmmUpType *dscpmapping,
v_U8_t sessionId )
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
eHalStatus status = eHAL_STATUS_SUCCESS;
v_U8_t i, j, peSessionId;
tCsrRoamSession *pCsrSession = NULL;
tpPESession pSession = NULL;
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
if (!CSR_IS_SESSION_VALID( pMac, sessionId ))
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Invalid session Id %u", __func__, sessionId);
sme_ReleaseGlobalLock( &pMac->sme);
return eHAL_STATUS_FAILURE;
}
pCsrSession = CSR_GET_SESSION( pMac, sessionId );
if (pCsrSession == NULL)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: CSR Session lookup fails %u", __func__, sessionId);
sme_ReleaseGlobalLock( &pMac->sme);
return eHAL_STATUS_FAILURE;
}
pSession = peFindSessionByBssid( pMac,
pCsrSession->connectedProfile.bssid, &peSessionId );
if (pSession == NULL)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Session lookup fails for BSSID", __func__);
sme_ReleaseGlobalLock( &pMac->sme);
return eHAL_STATUS_FAILURE;
}
if ( !pSession->QosMapSet.present )
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: QOS Mapping IE not present", __func__);
sme_ReleaseGlobalLock( &pMac->sme);
return eHAL_STATUS_FAILURE;
}
else
{
for (i = 0; i < SME_QOS_WMM_UP_MAX; i++)
{
for (j = pSession->QosMapSet.dscp_range[i][0];
j <= pSession->QosMapSet.dscp_range[i][1]; j++)
{
if ((pSession->QosMapSet.dscp_range[i][0] == 255) &&
(pSession->QosMapSet.dscp_range[i][1] == 255))
{
dscpmapping[j]= 0;
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: User Priority %d is not used in mapping",
__func__, i);
break;
}
else
{
dscpmapping[j]= i;
}
}
}
for (i = 0; i< pSession->QosMapSet.num_dscp_exceptions; i++)
{
if (pSession->QosMapSet.dscp_exceptions[i][0] != 255)
{
dscpmapping[pSession->QosMapSet.dscp_exceptions[i][0] ] =
pSession->QosMapSet.dscp_exceptions[i][1];
}
}
}
}
sme_ReleaseGlobalLock( &pMac->sme);
return status;
}
tANI_BOOLEAN sme_Is11dCountrycode(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
if (VOS_TRUE == vos_mem_compare(pMac->scan.countryCodeCurrent,
pMac->scan.countryCode11d, 2))
{
return eANI_BOOLEAN_TRUE;
}
else
{
return eANI_BOOLEAN_FALSE;
}
}
tANI_BOOLEAN sme_SpoofMacAddrReq(tHalHandle hHal, v_MACADDR_t *macaddr)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tANI_U16 len;
if ( eHAL_STATUS_SUCCESS == ( status = sme_AcquireGlobalLock( &pMac->sme ) ) )
{
tpSirSpoofMacAddrReq pMsg;
/* Create the message and send to lim */
len = sizeof(tSirSpoofMacAddrReq);
pMsg = vos_mem_malloc(len);
if ( NULL == pMsg )
status = eHAL_STATUS_FAILURE;
else
{
vos_mem_set(pMsg, sizeof(tSirSpoofMacAddrReq), 0);
pMsg->messageType = eWNI_SME_MAC_SPOOF_ADDR_IND;
pMsg->length = len;
/* Data starts from here */
vos_mem_copy(pMsg->macAddr, macaddr->bytes, VOS_MAC_ADDRESS_LEN);
status = palSendMBMessage(pMac->hHdd, pMsg);
}
sme_ReleaseGlobalLock( &pMac->sme );
}
return status;
}
#ifdef WLAN_FEATURE_EXTSCAN
/* ---------------------------------------------------------------------------
\fn sme_GetValidChannelsByBand
\brief SME API to fetch all valid channel filtered by band
\param hHal
\param wifiBand: RF band information
\param aValidChannels: Array to store channel info
\param len: number of channels
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_GetValidChannelsByBand (tHalHandle hHal, tANI_U8 wifiBand,
tANI_U32 *aValidChannels, tANI_U8 *pNumChannels)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tANI_U8 chanList[WNI_CFG_VALID_CHANNEL_LIST_LEN] = {0};
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tANI_U8 numChannels = 0;
tANI_U8 i = 0;
tANI_U32 totValidChannels = WNI_CFG_VALID_CHANNEL_LIST_LEN;
if (!aValidChannels || !pNumChannels) {
smsLog(pMac, VOS_TRACE_LEVEL_ERROR,
FL("Output channel list/NumChannels is NULL"));
return eHAL_STATUS_INVALID_PARAMETER;
}
if ((wifiBand < WIFI_BAND_UNSPECIFIED) || (wifiBand >= WIFI_BAND_MAX)) {
smsLog(pMac, VOS_TRACE_LEVEL_ERROR,
FL("Invalid wifiBand (%d)"), wifiBand);
return eHAL_STATUS_INVALID_PARAMETER;
}
status = sme_GetCfgValidChannels(hHal, &chanList[0],
&totValidChannels);
if (!HAL_STATUS_SUCCESS(status)) {
smsLog(pMac, VOS_TRACE_LEVEL_ERROR,
FL("Failed to get valid channel list (err=%d)"), status);
return status;
}
switch (wifiBand) {
case WIFI_BAND_UNSPECIFIED:
smsLog(pMac, VOS_TRACE_LEVEL_INFO, FL("Unspecified wifiBand, "
"return all (%d) valid channels"), totValidChannels);
numChannels = totValidChannels;
for (i = 0; i < numChannels; i++)
aValidChannels[i] = vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_BG:
smsLog(pMac, VOS_TRACE_LEVEL_INFO, FL("WIFI_BAND_BG (2.4 GHz)"));
for (i = 0; i < totValidChannels; i++)
if (CSR_IS_CHANNEL_24GHZ(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_A:
smsLog(pMac, VOS_TRACE_LEVEL_INFO,
FL("WIFI_BAND_A (5 GHz without DFS)"));
for (i = 0; i < totValidChannels; i++)
if (CSR_IS_CHANNEL_5GHZ(chanList[i]) &&
!CSR_IS_CHANNEL_DFS(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_ABG:
smsLog(pMac, VOS_TRACE_LEVEL_INFO,
FL("WIFI_BAND_ABG (2.4 GHz + 5 GHz; no DFS)"));
for (i = 0; i < totValidChannels; i++)
if ((CSR_IS_CHANNEL_24GHZ(chanList[i]) ||
CSR_IS_CHANNEL_5GHZ(chanList[i])) &&
!CSR_IS_CHANNEL_DFS(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_A_DFS_ONLY:
smsLog(pMac, VOS_TRACE_LEVEL_INFO,
FL("WIFI_BAND_A_DFS (5 GHz DFS only)"));
for (i = 0; i < totValidChannels; i++)
if (CSR_IS_CHANNEL_5GHZ(chanList[i]) &&
CSR_IS_CHANNEL_DFS(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_A_WITH_DFS:
smsLog(pMac, VOS_TRACE_LEVEL_INFO,
FL("WIFI_BAND_A_WITH_DFS (5 GHz with DFS)"));
for (i = 0; i < totValidChannels; i++)
if (CSR_IS_CHANNEL_5GHZ(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
case WIFI_BAND_ABG_WITH_DFS:
smsLog(pMac, VOS_TRACE_LEVEL_INFO,
FL("WIFI_BAND_ABG_WITH_DFS (2.4 GHz + 5 GHz with DFS)"));
for (i = 0; i < totValidChannels; i++)
if (CSR_IS_CHANNEL_24GHZ(chanList[i]) ||
CSR_IS_CHANNEL_5GHZ(chanList[i]))
aValidChannels[numChannels++] =
vos_chan_to_freq(chanList[i]);
break;
default:
smsLog(pMac, VOS_TRACE_LEVEL_ERROR,
FL("Unknown wifiBand (%d))"), wifiBand);
return eHAL_STATUS_INVALID_PARAMETER;
break;
}
*pNumChannels = numChannels;
return status;
}
/* ---------------------------------------------------------------------------
\fn sme_EXTScanGetCapabilities
\brief SME API to fetch Extended Scan capabilities
\param hHal
\param pReq: Extended Scan capabilities structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_EXTScanGetCapabilities (tHalHandle hHal,
tSirGetEXTScanCapabilitiesReqParams *pReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirGetEXTScanCapabilitiesReqParams *pGetEXTScanCapabilitiesReq;
pGetEXTScanCapabilitiesReq =
vos_mem_malloc(sizeof(*pGetEXTScanCapabilitiesReq));
if ( !pGetEXTScanCapabilitiesReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_GET_CAPABILITIES_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pGetEXTScanCapabilitiesReq = *pReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_GET_CAPABILITIES, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pGetEXTScanCapabilitiesReq;
vosMessage.type = WDA_EXTSCAN_GET_CAPABILITIES_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_EXTScanStart
\brief SME API to issue Extended Scan start
\param hHal
\param pStartCmd: Extended Scan start structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_EXTScanStart (tHalHandle hHal,
tSirEXTScanStartReqParams *pStartCmd)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanStartReqParams *pextScanStartReq;
pextScanStartReq = vos_mem_malloc(sizeof(*pextScanStartReq));
if ( !pextScanStartReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_START_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pextScanStartReq = *pStartCmd;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_START, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pextScanStartReq;
vosMessage.type = WDA_EXTSCAN_START_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_EXTScanStop
\brief SME API to issue Extended Scan stop
\param hHal
\param pStopReq: Extended Scan stop structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_EXTScanStop(tHalHandle hHal, tSirEXTScanStopReqParams *pStopReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanStopReqParams *pEXTScanStopReq;
pEXTScanStopReq = vos_mem_malloc(sizeof(*pEXTScanStopReq));
if ( !pEXTScanStopReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_STOP_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanStopReq = *pStopReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_STOP, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme)))
{
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanStopReq;
vosMessage.type = WDA_EXTSCAN_STOP_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
{
status = eHAL_STATUS_FAILURE;
}
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetBssHotlist
\brief SME API to set BSSID hotlist
\param hHal
\param pSetHotListReq: Extended Scan set hotlist structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_SetBssHotlist (tHalHandle hHal,
tSirEXTScanSetBssidHotListReqParams *pSetHotListReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanSetBssidHotListReqParams *pEXTScanSetBssidHotlistReq;
pEXTScanSetBssidHotlistReq =
vos_mem_malloc(sizeof(*pEXTScanSetBssidHotlistReq));
if ( !pEXTScanSetBssidHotlistReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_SET_BSSID_HOTLIST_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanSetBssidHotlistReq = *pSetHotListReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_SET_BSS_HOTLIST, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanSetBssidHotlistReq;
vosMessage.type = WDA_EXTSCAN_SET_BSSID_HOTLIST_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ResetBssHotlist
\brief SME API to reset BSSID hotlist
\param hHal
\param pSetHotListReq: Extended Scan set hotlist structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_ResetBssHotlist (tHalHandle hHal,
tSirEXTScanResetBssidHotlistReqParams *pResetReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanResetBssidHotlistReqParams *pEXTScanHotlistResetReq;
pEXTScanHotlistResetReq = vos_mem_malloc(sizeof(*pEXTScanHotlistResetReq));
if ( !pEXTScanHotlistResetReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_RESET_BSSID_HOTLIST_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanHotlistResetReq = *pResetReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_RESET_BSS_HOTLIST, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanHotlistResetReq;
vosMessage.type = WDA_EXTSCAN_RESET_BSSID_HOTLIST_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_SetSignificantChange
\brief SME API to set significant change
\param hHal
\param pSetSignificantChangeReq: Extended Scan set significant change structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_SetSignificantChange (tHalHandle hHal,
tSirEXTScanSetSignificantChangeReqParams *pSetSignificantChangeReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanSetSignificantChangeReqParams *pEXTScanSetSignificantReq;
pEXTScanSetSignificantReq = vos_mem_malloc(sizeof(*pEXTScanSetSignificantReq));
if ( !pEXTScanSetSignificantReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_SET_SIGNF_RSSI_CHANGE_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanSetSignificantReq = *pSetSignificantChangeReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_SET_SIGNF_CHANGE, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanSetSignificantReq;
vosMessage.type = WDA_EXTSCAN_SET_SIGNF_RSSI_CHANGE_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_ResetSignificantChange
\brief SME API to reset significant change
\param hHal
\param pResetReq: Extended Scan reset significant change structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_ResetSignificantChange (tHalHandle hHal,
tSirEXTScanResetSignificantChangeReqParams *pResetReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanResetSignificantChangeReqParams *pEXTScanResetSignificantReq;
pEXTScanResetSignificantReq =
vos_mem_malloc(sizeof(*pEXTScanResetSignificantReq));
if ( !pEXTScanResetSignificantReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_RESET_SIGNF_RSSI_CHANGE_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanResetSignificantReq = *pResetReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_RESET_SIGNF_CHANGE, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanResetSignificantReq;
vosMessage.type = WDA_EXTSCAN_RESET_SIGNF_RSSI_CHANGE_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
/* ---------------------------------------------------------------------------
\fn sme_getCachedResults
\brief SME API to get cached results
\param hHal
\param pCachedResultsReq: Extended Scan get cached results structure
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_getCachedResults (tHalHandle hHal,
tSirEXTScanGetCachedResultsReqParams *pCachedResultsReq)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
tSirEXTScanGetCachedResultsReqParams *pEXTScanCachedResultsReq;
pEXTScanCachedResultsReq =
vos_mem_malloc(sizeof(*pEXTScanCachedResultsReq));
if ( !pEXTScanCachedResultsReq)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"WDA_EXTSCAN_GET_CACHED_RESULTS_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
*pEXTScanCachedResultsReq = *pCachedResultsReq;
MTRACE(vos_trace(VOS_MODULE_ID_SME,
TRACE_CODE_SME_RX_HDD_EXTSCAN_GET_CACHED_RESULTS, NO_SESSION, 0));
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEXTScanCachedResultsReq;
vosMessage.type = WDA_EXTSCAN_GET_CACHED_RESULTS_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
eHalStatus sme_EXTScanRegisterCallback (tHalHandle hHal,
void (*pEXTScanIndCb)(void *, const tANI_U16, void *),
void *callbackContext)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
pMac->sme.pEXTScanIndCb = pEXTScanIndCb;
pMac->sme.pEXTScanCallbackContext = callbackContext;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
void sme_SetMiracastMode (tHalHandle hHal,tANI_U8 mode)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->miracast_mode = mode;
}
#endif /* WLAN_FEATURE_EXTSCAN */
void sme_resetCoexEevent(tHalHandle hHal)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
if (pMac == NULL)
{
printk("btc: %s pMac is NULL \n",__func__);
return;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
FL("isCoexScoIndSet: %d"), pMac->isCoexScoIndSet);
if (pMac->isCoexScoIndSet)
{
pMac->isCoexScoIndSet = 0;
ccmCfgSetInt(pMac, WNI_CFG_DEL_ALL_RX_TX_BA_SESSIONS_2_4_G_BTC, 0,
NULL, eANI_BOOLEAN_FALSE);
}
return;
}
void sme_disable_dfs_channel(tHalHandle hHal, bool disbale_dfs)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
pMac->scan.fEnableDFSChnlScan = !disbale_dfs;
csrDisableDfsChannel(pMac);
}
/* ---------------------------------------------------------------------------
\fn sme_Encryptmsgsend
\brief SME API to issue encrypt message request
\param hHal
\param pCmd: Data to be encrypted
\- return eHalStatus
-------------------------------------------------------------------------*/
eHalStatus sme_Encryptmsgsend (tHalHandle hHal,
u8 *pCmd,
int length,
pEncryptMsgRSPCb encMsgCbk)
{
eHalStatus status = eHAL_STATUS_SUCCESS;
VOS_STATUS vosStatus = VOS_STATUS_SUCCESS;
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
vos_msg_t vosMessage;
u8 *pEncryptMsg;
pEncryptMsg = vos_mem_malloc(length);
if ( !pEncryptMsg)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
"%s: Not able to allocate memory for "
"SIR_HAL_ENCRYPT_MSG_REQ",
__func__);
return eHAL_STATUS_FAILURE;
}
vos_mem_copy(pEncryptMsg, pCmd, length);
if (eHAL_STATUS_SUCCESS == (status = sme_AcquireGlobalLock(&pMac->sme))) {
pMac->sme.pEncMsgInfoParams.pEncMsgCbk = encMsgCbk;
pMac->sme.pEncMsgInfoParams.pUserData = hHal;
/* Serialize the req through MC thread */
vosMessage.bodyptr = pEncryptMsg;
vosMessage.type = SIR_HAL_ENCRYPT_MSG_REQ;
vosStatus = vos_mq_post_message(VOS_MQ_ID_WDA, &vosMessage);
if (!VOS_IS_STATUS_SUCCESS(vosStatus))
status = eHAL_STATUS_FAILURE;
sme_ReleaseGlobalLock(&pMac->sme);
}
return(status);
}
tANI_BOOLEAN sme_IsCoexScoIndicationSet(tHalHandle hHal)
{
eHalStatus status = eHAL_STATUS_FAILURE;
tANI_BOOLEAN valid = FALSE;
tpAniSirGlobal pMac = PMAC_STRUCT( hHal );
status = sme_AcquireGlobalLock( &pMac->sme );
if ( HAL_STATUS_SUCCESS( status ) )
{
valid = pMac->isCoexScoIndSet;
}
sme_ReleaseGlobalLock( &pMac->sme );
return (valid);
}
eHalStatus sme_SetMiracastVendorConfig(tHalHandle hHal,
tANI_U32 iniNumBuffAdvert , tANI_U32 set_value)
{
tpAniSirGlobal pMac = PMAC_STRUCT(hHal);
tANI_U8 mcsSet[SIZE_OF_SUPPORTED_MCS_SET];
tANI_U32 val = SIZE_OF_SUPPORTED_MCS_SET;
if (ccmCfgGetStr(hHal, WNI_CFG_SUPPORTED_MCS_SET, mcsSet, &val)
!= eHAL_STATUS_SUCCESS)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("failed to get ini param, WNI_CFG_SUPPORTED_MCS_SET"));
return eHAL_STATUS_FAILURE;
}
if (set_value)
{
if (pMac->miracastVendorConfig)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
FL(" Miracast tuning already enabled!!"));
return eHAL_STATUS_SUCCESS;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
FL("Enable Miracast tuning by disabling 64QAM rates, setting 4 blocks for aggregation and disabling probe response for broadcast probe in P2P-GO mode"));
if (ccmCfgSetInt(hHal, WNI_CFG_NUM_BUFF_ADVERT, 4,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("Failure: Could not set WNI_CFG_NUM_BUFF_ADVERT"));
return eHAL_STATUS_FAILURE;
}
/* Disable 64QAM rates ie (MCS 5,6 and 7)
*/
mcsSet[0]=0x1F;
}
else
{
if (!pMac->miracastVendorConfig)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
FL(" Miracast tuning already disabled!!"));
return eHAL_STATUS_SUCCESS;
}
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_INFO,
FL("Disable Miracast tuning by enabling all MCS rates, setting %d blocks for aggregation and enabling probe response for broadcast probe in P2P-GO mode"),
iniNumBuffAdvert);
if (ccmCfgSetInt(hHal, WNI_CFG_NUM_BUFF_ADVERT, iniNumBuffAdvert,
NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("Failure: Could not set WNI_CFG_NUM_BUFF_ADVERT"));
return eHAL_STATUS_FAILURE;
}
/* Enable all MCS rates)
*/
mcsSet[0]=0xFF;
}
if (ccmCfgSetStr(hHal, WNI_CFG_SUPPORTED_MCS_SET, mcsSet,
val, NULL, eANI_BOOLEAN_FALSE) == eHAL_STATUS_FAILURE)
{
VOS_TRACE(VOS_MODULE_ID_SME, VOS_TRACE_LEVEL_ERROR,
FL("Failure: Could not set WNI_CFG_SUPPORTED_MCS_SET"));
return eHAL_STATUS_FAILURE;
}
pMac->miracastVendorConfig = set_value;
return eHAL_STATUS_SUCCESS;
}
| gpl-2.0 |
tifler/linux-mainline | drivers/pinctrl/samsung/pinctrl-samsung.c | 41 | 34878 | /*
* pin-controller/pin-mux/pin-config/gpio-driver for Samsung's SoC's.
*
* Copyright (c) 2012 Samsung Electronics Co., Ltd.
* http://www.samsung.com
* Copyright (c) 2012 Linaro Ltd
* http://www.linaro.org
*
* Author: Thomas Abraham <thomas.ab@samsung.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This driver implements the Samsung pinctrl driver. It supports setting up of
* pinmux and pinconf configurations. The gpiolib interface is also included.
* External interrupt (gpio and wakeup) support are not included in this driver
* but provides extensions to which platform specific implementation of the gpio
* and wakeup interrupts can be hooked to.
*/
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/io.h>
#include <linux/slab.h>
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/irqdomain.h>
#include <linux/spinlock.h>
#include <linux/syscore_ops.h>
#include "../core.h"
#include "pinctrl-samsung.h"
#define GROUP_SUFFIX "-grp"
#define GSUFFIX_LEN sizeof(GROUP_SUFFIX)
#define FUNCTION_SUFFIX "-mux"
#define FSUFFIX_LEN sizeof(FUNCTION_SUFFIX)
/* list of all possible config options supported */
static struct pin_config {
const char *property;
enum pincfg_type param;
} cfg_params[] = {
{ "samsung,pin-pud", PINCFG_TYPE_PUD },
{ "samsung,pin-drv", PINCFG_TYPE_DRV },
{ "samsung,pin-con-pdn", PINCFG_TYPE_CON_PDN },
{ "samsung,pin-pud-pdn", PINCFG_TYPE_PUD_PDN },
{ "samsung,pin-val", PINCFG_TYPE_DAT },
};
/* Global list of devices (struct samsung_pinctrl_drv_data) */
static LIST_HEAD(drvdata_list);
static unsigned int pin_base;
static inline struct samsung_pin_bank *gc_to_pin_bank(struct gpio_chip *gc)
{
return container_of(gc, struct samsung_pin_bank, gpio_chip);
}
static int samsung_get_group_count(struct pinctrl_dev *pctldev)
{
struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->nr_groups;
}
static const char *samsung_get_group_name(struct pinctrl_dev *pctldev,
unsigned group)
{
struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
return pmx->pin_groups[group].name;
}
static int samsung_get_group_pins(struct pinctrl_dev *pctldev,
unsigned group,
const unsigned **pins,
unsigned *num_pins)
{
struct samsung_pinctrl_drv_data *pmx = pinctrl_dev_get_drvdata(pctldev);
*pins = pmx->pin_groups[group].pins;
*num_pins = pmx->pin_groups[group].num_pins;
return 0;
}
static int reserve_map(struct device *dev, struct pinctrl_map **map,
unsigned *reserved_maps, unsigned *num_maps,
unsigned reserve)
{
unsigned old_num = *reserved_maps;
unsigned new_num = *num_maps + reserve;
struct pinctrl_map *new_map;
if (old_num >= new_num)
return 0;
new_map = krealloc(*map, sizeof(*new_map) * new_num, GFP_KERNEL);
if (!new_map) {
dev_err(dev, "krealloc(map) failed\n");
return -ENOMEM;
}
memset(new_map + old_num, 0, (new_num - old_num) * sizeof(*new_map));
*map = new_map;
*reserved_maps = new_num;
return 0;
}
static int add_map_mux(struct pinctrl_map **map, unsigned *reserved_maps,
unsigned *num_maps, const char *group,
const char *function)
{
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
(*map)[*num_maps].type = PIN_MAP_TYPE_MUX_GROUP;
(*map)[*num_maps].data.mux.group = group;
(*map)[*num_maps].data.mux.function = function;
(*num_maps)++;
return 0;
}
static int add_map_configs(struct device *dev, struct pinctrl_map **map,
unsigned *reserved_maps, unsigned *num_maps,
const char *group, unsigned long *configs,
unsigned num_configs)
{
unsigned long *dup_configs;
if (WARN_ON(*num_maps == *reserved_maps))
return -ENOSPC;
dup_configs = kmemdup(configs, num_configs * sizeof(*dup_configs),
GFP_KERNEL);
if (!dup_configs) {
dev_err(dev, "kmemdup(configs) failed\n");
return -ENOMEM;
}
(*map)[*num_maps].type = PIN_MAP_TYPE_CONFIGS_GROUP;
(*map)[*num_maps].data.configs.group_or_pin = group;
(*map)[*num_maps].data.configs.configs = dup_configs;
(*map)[*num_maps].data.configs.num_configs = num_configs;
(*num_maps)++;
return 0;
}
static int add_config(struct device *dev, unsigned long **configs,
unsigned *num_configs, unsigned long config)
{
unsigned old_num = *num_configs;
unsigned new_num = old_num + 1;
unsigned long *new_configs;
new_configs = krealloc(*configs, sizeof(*new_configs) * new_num,
GFP_KERNEL);
if (!new_configs) {
dev_err(dev, "krealloc(configs) failed\n");
return -ENOMEM;
}
new_configs[old_num] = config;
*configs = new_configs;
*num_configs = new_num;
return 0;
}
static void samsung_dt_free_map(struct pinctrl_dev *pctldev,
struct pinctrl_map *map,
unsigned num_maps)
{
int i;
for (i = 0; i < num_maps; i++)
if (map[i].type == PIN_MAP_TYPE_CONFIGS_GROUP)
kfree(map[i].data.configs.configs);
kfree(map);
}
static int samsung_dt_subnode_to_map(struct samsung_pinctrl_drv_data *drvdata,
struct device *dev,
struct device_node *np,
struct pinctrl_map **map,
unsigned *reserved_maps,
unsigned *num_maps)
{
int ret, i;
u32 val;
unsigned long config;
unsigned long *configs = NULL;
unsigned num_configs = 0;
unsigned reserve;
struct property *prop;
const char *group;
bool has_func = false;
ret = of_property_read_u32(np, "samsung,pin-function", &val);
if (!ret)
has_func = true;
for (i = 0; i < ARRAY_SIZE(cfg_params); i++) {
ret = of_property_read_u32(np, cfg_params[i].property, &val);
if (!ret) {
config = PINCFG_PACK(cfg_params[i].param, val);
ret = add_config(dev, &configs, &num_configs, config);
if (ret < 0)
goto exit;
/* EINVAL=missing, which is fine since it's optional */
} else if (ret != -EINVAL) {
dev_err(dev, "could not parse property %s\n",
cfg_params[i].property);
}
}
reserve = 0;
if (has_func)
reserve++;
if (num_configs)
reserve++;
ret = of_property_count_strings(np, "samsung,pins");
if (ret < 0) {
dev_err(dev, "could not parse property samsung,pins\n");
goto exit;
}
reserve *= ret;
ret = reserve_map(dev, map, reserved_maps, num_maps, reserve);
if (ret < 0)
goto exit;
of_property_for_each_string(np, "samsung,pins", prop, group) {
if (has_func) {
ret = add_map_mux(map, reserved_maps,
num_maps, group, np->full_name);
if (ret < 0)
goto exit;
}
if (num_configs) {
ret = add_map_configs(dev, map, reserved_maps,
num_maps, group, configs,
num_configs);
if (ret < 0)
goto exit;
}
}
ret = 0;
exit:
kfree(configs);
return ret;
}
static int samsung_dt_node_to_map(struct pinctrl_dev *pctldev,
struct device_node *np_config,
struct pinctrl_map **map,
unsigned *num_maps)
{
struct samsung_pinctrl_drv_data *drvdata;
unsigned reserved_maps;
struct device_node *np;
int ret;
drvdata = pinctrl_dev_get_drvdata(pctldev);
reserved_maps = 0;
*map = NULL;
*num_maps = 0;
if (!of_get_child_count(np_config))
return samsung_dt_subnode_to_map(drvdata, pctldev->dev,
np_config, map,
&reserved_maps,
num_maps);
for_each_child_of_node(np_config, np) {
ret = samsung_dt_subnode_to_map(drvdata, pctldev->dev, np, map,
&reserved_maps, num_maps);
if (ret < 0) {
samsung_dt_free_map(pctldev, *map, *num_maps);
return ret;
}
}
return 0;
}
/* list of pinctrl callbacks for the pinctrl core */
static const struct pinctrl_ops samsung_pctrl_ops = {
.get_groups_count = samsung_get_group_count,
.get_group_name = samsung_get_group_name,
.get_group_pins = samsung_get_group_pins,
.dt_node_to_map = samsung_dt_node_to_map,
.dt_free_map = samsung_dt_free_map,
};
/* check if the selector is a valid pin function selector */
static int samsung_get_functions_count(struct pinctrl_dev *pctldev)
{
struct samsung_pinctrl_drv_data *drvdata;
drvdata = pinctrl_dev_get_drvdata(pctldev);
return drvdata->nr_functions;
}
/* return the name of the pin function specified */
static const char *samsung_pinmux_get_fname(struct pinctrl_dev *pctldev,
unsigned selector)
{
struct samsung_pinctrl_drv_data *drvdata;
drvdata = pinctrl_dev_get_drvdata(pctldev);
return drvdata->pmx_functions[selector].name;
}
/* return the groups associated for the specified function selector */
static int samsung_pinmux_get_groups(struct pinctrl_dev *pctldev,
unsigned selector, const char * const **groups,
unsigned * const num_groups)
{
struct samsung_pinctrl_drv_data *drvdata;
drvdata = pinctrl_dev_get_drvdata(pctldev);
*groups = drvdata->pmx_functions[selector].groups;
*num_groups = drvdata->pmx_functions[selector].num_groups;
return 0;
}
/*
* given a pin number that is local to a pin controller, find out the pin bank
* and the register base of the pin bank.
*/
static void pin_to_reg_bank(struct samsung_pinctrl_drv_data *drvdata,
unsigned pin, void __iomem **reg, u32 *offset,
struct samsung_pin_bank **bank)
{
struct samsung_pin_bank *b;
b = drvdata->pin_banks;
while ((pin >= b->pin_base) &&
((b->pin_base + b->nr_pins - 1) < pin))
b++;
*reg = drvdata->virt_base + b->pctl_offset;
*offset = pin - b->pin_base;
if (bank)
*bank = b;
}
/* enable or disable a pinmux function */
static void samsung_pinmux_setup(struct pinctrl_dev *pctldev, unsigned selector,
unsigned group, bool enable)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
void __iomem *reg;
u32 mask, shift, data, pin_offset;
unsigned long flags;
const struct samsung_pmx_func *func;
const struct samsung_pin_group *grp;
drvdata = pinctrl_dev_get_drvdata(pctldev);
func = &drvdata->pmx_functions[selector];
grp = &drvdata->pin_groups[group];
pin_to_reg_bank(drvdata, grp->pins[0] - drvdata->pin_base,
®, &pin_offset, &bank);
type = bank->type;
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
shift = pin_offset * type->fld_width[PINCFG_TYPE_FUNC];
if (shift >= 32) {
/* Some banks have two config registers */
shift -= 32;
reg += 4;
}
spin_lock_irqsave(&bank->slock, flags);
data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
data &= ~(mask << shift);
if (enable)
data |= func->val << shift;
writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
spin_unlock_irqrestore(&bank->slock, flags);
}
/* enable a specified pinmux by writing to registers */
static int samsung_pinmux_set_mux(struct pinctrl_dev *pctldev,
unsigned selector,
unsigned group)
{
samsung_pinmux_setup(pctldev, selector, group, true);
return 0;
}
/* list of pinmux callbacks for the pinmux vertical in pinctrl core */
static const struct pinmux_ops samsung_pinmux_ops = {
.get_functions_count = samsung_get_functions_count,
.get_function_name = samsung_pinmux_get_fname,
.get_function_groups = samsung_pinmux_get_groups,
.set_mux = samsung_pinmux_set_mux,
};
/* set or get the pin config settings for a specified pin */
static int samsung_pinconf_rw(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config, bool set)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
void __iomem *reg_base;
enum pincfg_type cfg_type = PINCFG_UNPACK_TYPE(*config);
u32 data, width, pin_offset, mask, shift;
u32 cfg_value, cfg_reg;
unsigned long flags;
drvdata = pinctrl_dev_get_drvdata(pctldev);
pin_to_reg_bank(drvdata, pin - drvdata->pin_base, ®_base,
&pin_offset, &bank);
type = bank->type;
if (cfg_type >= PINCFG_TYPE_NUM || !type->fld_width[cfg_type])
return -EINVAL;
width = type->fld_width[cfg_type];
cfg_reg = type->reg_offset[cfg_type];
spin_lock_irqsave(&bank->slock, flags);
mask = (1 << width) - 1;
shift = pin_offset * width;
data = readl(reg_base + cfg_reg);
if (set) {
cfg_value = PINCFG_UNPACK_VALUE(*config);
data &= ~(mask << shift);
data |= (cfg_value << shift);
writel(data, reg_base + cfg_reg);
} else {
data >>= shift;
data &= mask;
*config = PINCFG_PACK(cfg_type, data);
}
spin_unlock_irqrestore(&bank->slock, flags);
return 0;
}
/* set the pin config settings for a specified pin */
static int samsung_pinconf_set(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *configs, unsigned num_configs)
{
int i, ret;
for (i = 0; i < num_configs; i++) {
ret = samsung_pinconf_rw(pctldev, pin, &configs[i], true);
if (ret < 0)
return ret;
} /* for each config */
return 0;
}
/* get the pin config settings for a specified pin */
static int samsung_pinconf_get(struct pinctrl_dev *pctldev, unsigned int pin,
unsigned long *config)
{
return samsung_pinconf_rw(pctldev, pin, config, false);
}
/* set the pin config settings for a specified pin group */
static int samsung_pinconf_group_set(struct pinctrl_dev *pctldev,
unsigned group, unsigned long *configs,
unsigned num_configs)
{
struct samsung_pinctrl_drv_data *drvdata;
const unsigned int *pins;
unsigned int cnt;
drvdata = pinctrl_dev_get_drvdata(pctldev);
pins = drvdata->pin_groups[group].pins;
for (cnt = 0; cnt < drvdata->pin_groups[group].num_pins; cnt++)
samsung_pinconf_set(pctldev, pins[cnt], configs, num_configs);
return 0;
}
/* get the pin config settings for a specified pin group */
static int samsung_pinconf_group_get(struct pinctrl_dev *pctldev,
unsigned int group, unsigned long *config)
{
struct samsung_pinctrl_drv_data *drvdata;
const unsigned int *pins;
drvdata = pinctrl_dev_get_drvdata(pctldev);
pins = drvdata->pin_groups[group].pins;
samsung_pinconf_get(pctldev, pins[0], config);
return 0;
}
/* list of pinconfig callbacks for pinconfig vertical in the pinctrl code */
static const struct pinconf_ops samsung_pinconf_ops = {
.pin_config_get = samsung_pinconf_get,
.pin_config_set = samsung_pinconf_set,
.pin_config_group_get = samsung_pinconf_group_get,
.pin_config_group_set = samsung_pinconf_group_set,
};
/* gpiolib gpio_set callback function */
static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
{
struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
const struct samsung_pin_bank_type *type = bank->type;
unsigned long flags;
void __iomem *reg;
u32 data;
reg = bank->drvdata->virt_base + bank->pctl_offset;
spin_lock_irqsave(&bank->slock, flags);
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data &= ~(1 << offset);
if (value)
data |= 1 << offset;
writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
spin_unlock_irqrestore(&bank->slock, flags);
}
/* gpiolib gpio_get callback function */
static int samsung_gpio_get(struct gpio_chip *gc, unsigned offset)
{
void __iomem *reg;
u32 data;
struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
const struct samsung_pin_bank_type *type = bank->type;
reg = bank->drvdata->virt_base + bank->pctl_offset;
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data >>= offset;
data &= 1;
return data;
}
/*
* The calls to gpio_direction_output() and gpio_direction_input()
* leads to this function call.
*/
static int samsung_gpio_set_direction(struct gpio_chip *gc,
unsigned offset, bool input)
{
const struct samsung_pin_bank_type *type;
struct samsung_pin_bank *bank;
struct samsung_pinctrl_drv_data *drvdata;
void __iomem *reg;
u32 data, mask, shift;
unsigned long flags;
bank = gc_to_pin_bank(gc);
type = bank->type;
drvdata = bank->drvdata;
reg = drvdata->virt_base + bank->pctl_offset +
type->reg_offset[PINCFG_TYPE_FUNC];
mask = (1 << type->fld_width[PINCFG_TYPE_FUNC]) - 1;
shift = offset * type->fld_width[PINCFG_TYPE_FUNC];
if (shift >= 32) {
/* Some banks have two config registers */
shift -= 32;
reg += 4;
}
spin_lock_irqsave(&bank->slock, flags);
data = readl(reg);
data &= ~(mask << shift);
if (!input)
data |= FUNC_OUTPUT << shift;
writel(data, reg);
spin_unlock_irqrestore(&bank->slock, flags);
return 0;
}
/* gpiolib gpio_direction_input callback function. */
static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
return samsung_gpio_set_direction(gc, offset, true);
}
/* gpiolib gpio_direction_output callback function. */
static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
samsung_gpio_set(gc, offset, value);
return samsung_gpio_set_direction(gc, offset, false);
}
/*
* gpiolib gpio_to_irq callback function. Creates a mapping between a GPIO pin
* and a virtual IRQ, if not already present.
*/
static int samsung_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
{
struct samsung_pin_bank *bank = gc_to_pin_bank(gc);
unsigned int virq;
if (!bank->irq_domain)
return -ENXIO;
virq = irq_create_mapping(bank->irq_domain, offset);
return (virq) ? : -ENXIO;
}
static struct samsung_pin_group *samsung_pinctrl_create_groups(
struct device *dev,
struct samsung_pinctrl_drv_data *drvdata,
unsigned int *cnt)
{
struct pinctrl_desc *ctrldesc = &drvdata->pctl;
struct samsung_pin_group *groups, *grp;
const struct pinctrl_pin_desc *pdesc;
int i;
groups = devm_kzalloc(dev, ctrldesc->npins * sizeof(*groups),
GFP_KERNEL);
if (!groups)
return ERR_PTR(-EINVAL);
grp = groups;
pdesc = ctrldesc->pins;
for (i = 0; i < ctrldesc->npins; ++i, ++pdesc, ++grp) {
grp->name = pdesc->name;
grp->pins = &pdesc->number;
grp->num_pins = 1;
}
*cnt = ctrldesc->npins;
return groups;
}
static int samsung_pinctrl_create_function(struct device *dev,
struct samsung_pinctrl_drv_data *drvdata,
struct device_node *func_np,
struct samsung_pmx_func *func)
{
int npins;
int ret;
int i;
if (of_property_read_u32(func_np, "samsung,pin-function", &func->val))
return 0;
npins = of_property_count_strings(func_np, "samsung,pins");
if (npins < 1) {
dev_err(dev, "invalid pin list in %s node", func_np->name);
return -EINVAL;
}
func->name = func_np->full_name;
func->groups = devm_kzalloc(dev, npins * sizeof(char *), GFP_KERNEL);
if (!func->groups)
return -ENOMEM;
for (i = 0; i < npins; ++i) {
const char *gname;
ret = of_property_read_string_index(func_np, "samsung,pins",
i, &gname);
if (ret) {
dev_err(dev,
"failed to read pin name %d from %s node\n",
i, func_np->name);
return ret;
}
func->groups[i] = gname;
}
func->num_groups = npins;
return 1;
}
static struct samsung_pmx_func *samsung_pinctrl_create_functions(
struct device *dev,
struct samsung_pinctrl_drv_data *drvdata,
unsigned int *cnt)
{
struct samsung_pmx_func *functions, *func;
struct device_node *dev_np = dev->of_node;
struct device_node *cfg_np;
unsigned int func_cnt = 0;
int ret;
/*
* Iterate over all the child nodes of the pin controller node
* and create pin groups and pin function lists.
*/
for_each_child_of_node(dev_np, cfg_np) {
struct device_node *func_np;
if (!of_get_child_count(cfg_np)) {
if (!of_find_property(cfg_np,
"samsung,pin-function", NULL))
continue;
++func_cnt;
continue;
}
for_each_child_of_node(cfg_np, func_np) {
if (!of_find_property(func_np,
"samsung,pin-function", NULL))
continue;
++func_cnt;
}
}
functions = devm_kzalloc(dev, func_cnt * sizeof(*functions),
GFP_KERNEL);
if (!functions) {
dev_err(dev, "failed to allocate memory for function list\n");
return ERR_PTR(-EINVAL);
}
func = functions;
/*
* Iterate over all the child nodes of the pin controller node
* and create pin groups and pin function lists.
*/
func_cnt = 0;
for_each_child_of_node(dev_np, cfg_np) {
struct device_node *func_np;
if (!of_get_child_count(cfg_np)) {
ret = samsung_pinctrl_create_function(dev, drvdata,
cfg_np, func);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
++func;
++func_cnt;
}
continue;
}
for_each_child_of_node(cfg_np, func_np) {
ret = samsung_pinctrl_create_function(dev, drvdata,
func_np, func);
if (ret < 0)
return ERR_PTR(ret);
if (ret > 0) {
++func;
++func_cnt;
}
}
}
*cnt = func_cnt;
return functions;
}
/*
* Parse the information about all the available pin groups and pin functions
* from device node of the pin-controller. A pin group is formed with all
* the pins listed in the "samsung,pins" property.
*/
static int samsung_pinctrl_parse_dt(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
struct device *dev = &pdev->dev;
struct samsung_pin_group *groups;
struct samsung_pmx_func *functions;
unsigned int grp_cnt = 0, func_cnt = 0;
groups = samsung_pinctrl_create_groups(dev, drvdata, &grp_cnt);
if (IS_ERR(groups)) {
dev_err(dev, "failed to parse pin groups\n");
return PTR_ERR(groups);
}
functions = samsung_pinctrl_create_functions(dev, drvdata, &func_cnt);
if (IS_ERR(functions)) {
dev_err(dev, "failed to parse pin functions\n");
return PTR_ERR(functions);
}
drvdata->pin_groups = groups;
drvdata->nr_groups = grp_cnt;
drvdata->pmx_functions = functions;
drvdata->nr_functions = func_cnt;
return 0;
}
/* register the pinctrl interface with the pinctrl subsystem */
static int samsung_pinctrl_register(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
struct pinctrl_desc *ctrldesc = &drvdata->pctl;
struct pinctrl_pin_desc *pindesc, *pdesc;
struct samsung_pin_bank *pin_bank;
char *pin_names;
int pin, bank, ret;
ctrldesc->name = "samsung-pinctrl";
ctrldesc->owner = THIS_MODULE;
ctrldesc->pctlops = &samsung_pctrl_ops;
ctrldesc->pmxops = &samsung_pinmux_ops;
ctrldesc->confops = &samsung_pinconf_ops;
pindesc = devm_kzalloc(&pdev->dev, sizeof(*pindesc) *
drvdata->nr_pins, GFP_KERNEL);
if (!pindesc) {
dev_err(&pdev->dev, "mem alloc for pin descriptors failed\n");
return -ENOMEM;
}
ctrldesc->pins = pindesc;
ctrldesc->npins = drvdata->nr_pins;
/* dynamically populate the pin number and pin name for pindesc */
for (pin = 0, pdesc = pindesc; pin < ctrldesc->npins; pin++, pdesc++)
pdesc->number = pin + drvdata->pin_base;
/*
* allocate space for storing the dynamically generated names for all
* the pins which belong to this pin-controller.
*/
pin_names = devm_kzalloc(&pdev->dev, sizeof(char) * PIN_NAME_LENGTH *
drvdata->nr_pins, GFP_KERNEL);
if (!pin_names) {
dev_err(&pdev->dev, "mem alloc for pin names failed\n");
return -ENOMEM;
}
/* for each pin, the name of the pin is pin-bank name + pin number */
for (bank = 0; bank < drvdata->nr_banks; bank++) {
pin_bank = &drvdata->pin_banks[bank];
for (pin = 0; pin < pin_bank->nr_pins; pin++) {
sprintf(pin_names, "%s-%d", pin_bank->name, pin);
pdesc = pindesc + pin_bank->pin_base + pin;
pdesc->name = pin_names;
pin_names += PIN_NAME_LENGTH;
}
}
ret = samsung_pinctrl_parse_dt(pdev, drvdata);
if (ret)
return ret;
drvdata->pctl_dev = pinctrl_register(ctrldesc, &pdev->dev, drvdata);
if (IS_ERR(drvdata->pctl_dev)) {
dev_err(&pdev->dev, "could not register pinctrl driver\n");
return PTR_ERR(drvdata->pctl_dev);
}
for (bank = 0; bank < drvdata->nr_banks; ++bank) {
pin_bank = &drvdata->pin_banks[bank];
pin_bank->grange.name = pin_bank->name;
pin_bank->grange.id = bank;
pin_bank->grange.pin_base = drvdata->pin_base
+ pin_bank->pin_base;
pin_bank->grange.base = pin_bank->gpio_chip.base;
pin_bank->grange.npins = pin_bank->gpio_chip.ngpio;
pin_bank->grange.gc = &pin_bank->gpio_chip;
pinctrl_add_gpio_range(drvdata->pctl_dev, &pin_bank->grange);
}
return 0;
}
static int samsung_gpio_request(struct gpio_chip *chip, unsigned offset)
{
return pinctrl_request_gpio(chip->base + offset);
}
static void samsung_gpio_free(struct gpio_chip *chip, unsigned offset)
{
pinctrl_free_gpio(chip->base + offset);
}
static const struct gpio_chip samsung_gpiolib_chip = {
.request = samsung_gpio_request,
.free = samsung_gpio_free,
.set = samsung_gpio_set,
.get = samsung_gpio_get,
.direction_input = samsung_gpio_direction_input,
.direction_output = samsung_gpio_direction_output,
.to_irq = samsung_gpio_to_irq,
.owner = THIS_MODULE,
};
/* register the gpiolib interface with the gpiolib subsystem */
static int samsung_gpiolib_register(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
struct gpio_chip *gc;
int ret;
int i;
for (i = 0; i < drvdata->nr_banks; ++i, ++bank) {
bank->gpio_chip = samsung_gpiolib_chip;
gc = &bank->gpio_chip;
gc->base = drvdata->pin_base + bank->pin_base;
gc->ngpio = bank->nr_pins;
gc->dev = &pdev->dev;
gc->of_node = bank->of_node;
gc->label = bank->name;
ret = gpiochip_add(gc);
if (ret) {
dev_err(&pdev->dev, "failed to register gpio_chip %s, error code: %d\n",
gc->label, ret);
goto fail;
}
}
return 0;
fail:
for (--i, --bank; i >= 0; --i, --bank)
gpiochip_remove(&bank->gpio_chip);
return ret;
}
/* unregister the gpiolib interface with the gpiolib subsystem */
static int samsung_gpiolib_unregister(struct platform_device *pdev,
struct samsung_pinctrl_drv_data *drvdata)
{
struct samsung_pin_bank *bank = drvdata->pin_banks;
int i;
for (i = 0; i < drvdata->nr_banks; ++i, ++bank)
gpiochip_remove(&bank->gpio_chip);
return 0;
}
static const struct of_device_id samsung_pinctrl_dt_match[];
/* retrieve the soc specific data */
static const struct samsung_pin_ctrl *
samsung_pinctrl_get_soc_data(struct samsung_pinctrl_drv_data *d,
struct platform_device *pdev)
{
int id;
const struct of_device_id *match;
struct device_node *node = pdev->dev.of_node;
struct device_node *np;
const struct samsung_pin_bank_data *bdata;
const struct samsung_pin_ctrl *ctrl;
struct samsung_pin_bank *bank;
int i;
id = of_alias_get_id(node, "pinctrl");
if (id < 0) {
dev_err(&pdev->dev, "failed to get alias id\n");
return ERR_PTR(-ENOENT);
}
match = of_match_node(samsung_pinctrl_dt_match, node);
ctrl = (struct samsung_pin_ctrl *)match->data + id;
d->suspend = ctrl->suspend;
d->resume = ctrl->resume;
d->nr_banks = ctrl->nr_banks;
d->pin_banks = devm_kcalloc(&pdev->dev, d->nr_banks,
sizeof(*d->pin_banks), GFP_KERNEL);
if (!d->pin_banks)
return ERR_PTR(-ENOMEM);
bank = d->pin_banks;
bdata = ctrl->pin_banks;
for (i = 0; i < ctrl->nr_banks; ++i, ++bdata, ++bank) {
bank->type = bdata->type;
bank->pctl_offset = bdata->pctl_offset;
bank->nr_pins = bdata->nr_pins;
bank->eint_func = bdata->eint_func;
bank->eint_type = bdata->eint_type;
bank->eint_mask = bdata->eint_mask;
bank->eint_offset = bdata->eint_offset;
bank->name = bdata->name;
spin_lock_init(&bank->slock);
bank->drvdata = d;
bank->pin_base = d->nr_pins;
d->nr_pins += bank->nr_pins;
}
for_each_child_of_node(node, np) {
if (!of_find_property(np, "gpio-controller", NULL))
continue;
bank = d->pin_banks;
for (i = 0; i < d->nr_banks; ++i, ++bank) {
if (!strcmp(bank->name, np->name)) {
bank->of_node = np;
break;
}
}
}
d->pin_base = pin_base;
pin_base += d->nr_pins;
return ctrl;
}
static int samsung_pinctrl_probe(struct platform_device *pdev)
{
struct samsung_pinctrl_drv_data *drvdata;
const struct samsung_pin_ctrl *ctrl;
struct device *dev = &pdev->dev;
struct resource *res;
int ret;
if (!dev->of_node) {
dev_err(dev, "device tree node not found\n");
return -ENODEV;
}
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata) {
dev_err(dev, "failed to allocate memory for driver's "
"private data\n");
return -ENOMEM;
}
ctrl = samsung_pinctrl_get_soc_data(drvdata, pdev);
if (IS_ERR(ctrl)) {
dev_err(&pdev->dev, "driver data not available\n");
return PTR_ERR(ctrl);
}
drvdata->dev = dev;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
drvdata->virt_base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(drvdata->virt_base))
return PTR_ERR(drvdata->virt_base);
res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
if (res)
drvdata->irq = res->start;
ret = samsung_gpiolib_register(pdev, drvdata);
if (ret)
return ret;
ret = samsung_pinctrl_register(pdev, drvdata);
if (ret) {
samsung_gpiolib_unregister(pdev, drvdata);
return ret;
}
if (ctrl->eint_gpio_init)
ctrl->eint_gpio_init(drvdata);
if (ctrl->eint_wkup_init)
ctrl->eint_wkup_init(drvdata);
platform_set_drvdata(pdev, drvdata);
/* Add to the global list */
list_add_tail(&drvdata->node, &drvdata_list);
return 0;
}
#ifdef CONFIG_PM
/**
* samsung_pinctrl_suspend_dev - save pinctrl state for suspend for a device
*
* Save data for all banks handled by this device.
*/
static void samsung_pinctrl_suspend_dev(
struct samsung_pinctrl_drv_data *drvdata)
{
void __iomem *virt_base = drvdata->virt_base;
int i;
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
void __iomem *reg = virt_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
/* Registers without a powerdown config aren't lost */
if (!widths[PINCFG_TYPE_CON_PDN])
continue;
for (type = 0; type < PINCFG_TYPE_NUM; type++)
if (widths[type])
bank->pm_save[type] = readl(reg + offs[type]);
if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
/* Some banks have two config registers */
bank->pm_save[PINCFG_TYPE_NUM] =
readl(reg + offs[PINCFG_TYPE_FUNC] + 4);
pr_debug("Save %s @ %p (con %#010x %08x)\n",
bank->name, reg,
bank->pm_save[PINCFG_TYPE_FUNC],
bank->pm_save[PINCFG_TYPE_NUM]);
} else {
pr_debug("Save %s @ %p (con %#010x)\n", bank->name,
reg, bank->pm_save[PINCFG_TYPE_FUNC]);
}
}
if (drvdata->suspend)
drvdata->suspend(drvdata);
}
/**
* samsung_pinctrl_resume_dev - restore pinctrl state from suspend for a device
*
* Restore one of the banks that was saved during suspend.
*
* We don't bother doing anything complicated to avoid glitching lines since
* we're called before pad retention is turned off.
*/
static void samsung_pinctrl_resume_dev(struct samsung_pinctrl_drv_data *drvdata)
{
void __iomem *virt_base = drvdata->virt_base;
int i;
if (drvdata->resume)
drvdata->resume(drvdata);
for (i = 0; i < drvdata->nr_banks; i++) {
struct samsung_pin_bank *bank = &drvdata->pin_banks[i];
void __iomem *reg = virt_base + bank->pctl_offset;
const u8 *offs = bank->type->reg_offset;
const u8 *widths = bank->type->fld_width;
enum pincfg_type type;
/* Registers without a powerdown config aren't lost */
if (!widths[PINCFG_TYPE_CON_PDN])
continue;
if (widths[PINCFG_TYPE_FUNC] * bank->nr_pins > 32) {
/* Some banks have two config registers */
pr_debug("%s @ %p (con %#010x %08x => %#010x %08x)\n",
bank->name, reg,
readl(reg + offs[PINCFG_TYPE_FUNC]),
readl(reg + offs[PINCFG_TYPE_FUNC] + 4),
bank->pm_save[PINCFG_TYPE_FUNC],
bank->pm_save[PINCFG_TYPE_NUM]);
writel(bank->pm_save[PINCFG_TYPE_NUM],
reg + offs[PINCFG_TYPE_FUNC] + 4);
} else {
pr_debug("%s @ %p (con %#010x => %#010x)\n", bank->name,
reg, readl(reg + offs[PINCFG_TYPE_FUNC]),
bank->pm_save[PINCFG_TYPE_FUNC]);
}
for (type = 0; type < PINCFG_TYPE_NUM; type++)
if (widths[type])
writel(bank->pm_save[type], reg + offs[type]);
}
}
/**
* samsung_pinctrl_suspend - save pinctrl state for suspend
*
* Save data for all banks across all devices.
*/
static int samsung_pinctrl_suspend(void)
{
struct samsung_pinctrl_drv_data *drvdata;
list_for_each_entry(drvdata, &drvdata_list, node) {
samsung_pinctrl_suspend_dev(drvdata);
}
return 0;
}
/**
* samsung_pinctrl_resume - restore pinctrl state for suspend
*
* Restore data for all banks across all devices.
*/
static void samsung_pinctrl_resume(void)
{
struct samsung_pinctrl_drv_data *drvdata;
list_for_each_entry_reverse(drvdata, &drvdata_list, node) {
samsung_pinctrl_resume_dev(drvdata);
}
}
#else
#define samsung_pinctrl_suspend NULL
#define samsung_pinctrl_resume NULL
#endif
static struct syscore_ops samsung_pinctrl_syscore_ops = {
.suspend = samsung_pinctrl_suspend,
.resume = samsung_pinctrl_resume,
};
static const struct of_device_id samsung_pinctrl_dt_match[] = {
#ifdef CONFIG_PINCTRL_EXYNOS
{ .compatible = "samsung,exynos3250-pinctrl",
.data = (void *)exynos3250_pin_ctrl },
{ .compatible = "samsung,exynos4210-pinctrl",
.data = (void *)exynos4210_pin_ctrl },
{ .compatible = "samsung,exynos4x12-pinctrl",
.data = (void *)exynos4x12_pin_ctrl },
{ .compatible = "samsung,exynos4415-pinctrl",
.data = (void *)exynos4415_pin_ctrl },
{ .compatible = "samsung,exynos5250-pinctrl",
.data = (void *)exynos5250_pin_ctrl },
{ .compatible = "samsung,exynos5260-pinctrl",
.data = (void *)exynos5260_pin_ctrl },
{ .compatible = "samsung,exynos5420-pinctrl",
.data = (void *)exynos5420_pin_ctrl },
{ .compatible = "samsung,exynos5433-pinctrl",
.data = (void *)exynos5433_pin_ctrl },
{ .compatible = "samsung,s5pv210-pinctrl",
.data = (void *)s5pv210_pin_ctrl },
{ .compatible = "samsung,exynos7-pinctrl",
.data = (void *)exynos7_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C64XX
{ .compatible = "samsung,s3c64xx-pinctrl",
.data = s3c64xx_pin_ctrl },
#endif
#ifdef CONFIG_PINCTRL_S3C24XX
{ .compatible = "samsung,s3c2412-pinctrl",
.data = s3c2412_pin_ctrl },
{ .compatible = "samsung,s3c2416-pinctrl",
.data = s3c2416_pin_ctrl },
{ .compatible = "samsung,s3c2440-pinctrl",
.data = s3c2440_pin_ctrl },
{ .compatible = "samsung,s3c2450-pinctrl",
.data = s3c2450_pin_ctrl },
#endif
{},
};
MODULE_DEVICE_TABLE(of, samsung_pinctrl_dt_match);
static struct platform_driver samsung_pinctrl_driver = {
.probe = samsung_pinctrl_probe,
.driver = {
.name = "samsung-pinctrl",
.of_match_table = samsung_pinctrl_dt_match,
},
};
static int __init samsung_pinctrl_drv_register(void)
{
/*
* Register syscore ops for save/restore of registers across suspend.
* It's important to ensure that this driver is running at an earlier
* initcall level than any arch-specific init calls that install syscore
* ops that turn off pad retention (like exynos_pm_resume).
*/
register_syscore_ops(&samsung_pinctrl_syscore_ops);
return platform_driver_register(&samsung_pinctrl_driver);
}
postcore_initcall(samsung_pinctrl_drv_register);
static void __exit samsung_pinctrl_drv_unregister(void)
{
platform_driver_unregister(&samsung_pinctrl_driver);
}
module_exit(samsung_pinctrl_drv_unregister);
MODULE_AUTHOR("Thomas Abraham <thomas.ab@samsung.com>");
MODULE_DESCRIPTION("Samsung pinctrl driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
tkhsu/quick-qemu | hw/virtio/virtio-balloon.c | 41 | 12822 | /*
* Virtio Balloon Device
*
* Copyright IBM, Corp. 2008
* Copyright (C) 2011 Red Hat, Inc.
* Copyright (C) 2011 Amit Shah <amit.shah@redhat.com>
*
* Authors:
* Anthony Liguori <aliguori@us.ibm.com>
*
* This work is licensed under the terms of the GNU GPL, version 2. See
* the COPYING file in the top-level directory.
*
*/
#include "qemu/iov.h"
#include "qemu/timer.h"
#include "qemu-common.h"
#include "hw/virtio/virtio.h"
#include "hw/i386/pc.h"
#include "cpu.h"
#include "sysemu/balloon.h"
#include "hw/virtio/virtio-balloon.h"
#include "sysemu/kvm.h"
#include "exec/address-spaces.h"
#include "qapi/visitor.h"
#include "qapi-event.h"
#if defined(__linux__)
#include <sys/mman.h>
#endif
#include "hw/virtio/virtio-bus.h"
#include "hw/virtio/virtio-access.h"
static void balloon_page(void *addr, int deflate)
{
#if defined(__linux__)
if (!kvm_enabled() || kvm_has_sync_mmu())
qemu_madvise(addr, TARGET_PAGE_SIZE,
deflate ? QEMU_MADV_WILLNEED : QEMU_MADV_DONTNEED);
#endif
}
static const char *balloon_stat_names[] = {
[VIRTIO_BALLOON_S_SWAP_IN] = "stat-swap-in",
[VIRTIO_BALLOON_S_SWAP_OUT] = "stat-swap-out",
[VIRTIO_BALLOON_S_MAJFLT] = "stat-major-faults",
[VIRTIO_BALLOON_S_MINFLT] = "stat-minor-faults",
[VIRTIO_BALLOON_S_MEMFREE] = "stat-free-memory",
[VIRTIO_BALLOON_S_MEMTOT] = "stat-total-memory",
[VIRTIO_BALLOON_S_NR] = NULL
};
/*
* reset_stats - Mark all items in the stats array as unset
*
* This function needs to be called at device initialization and before
* updating to a set of newly-generated stats. This will ensure that no
* stale values stick around in case the guest reports a subset of the supported
* statistics.
*/
static inline void reset_stats(VirtIOBalloon *dev)
{
int i;
for (i = 0; i < VIRTIO_BALLOON_S_NR; dev->stats[i++] = -1);
}
static bool balloon_stats_supported(const VirtIOBalloon *s)
{
VirtIODevice *vdev = VIRTIO_DEVICE(s);
return vdev->guest_features & (1 << VIRTIO_BALLOON_F_STATS_VQ);
}
static bool balloon_stats_enabled(const VirtIOBalloon *s)
{
return s->stats_poll_interval > 0;
}
static void balloon_stats_destroy_timer(VirtIOBalloon *s)
{
if (balloon_stats_enabled(s)) {
timer_del(s->stats_timer);
timer_free(s->stats_timer);
s->stats_timer = NULL;
s->stats_poll_interval = 0;
}
}
static void balloon_stats_change_timer(VirtIOBalloon *s, int64_t secs)
{
timer_mod(s->stats_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + secs * 1000);
}
static void balloon_stats_poll_cb(void *opaque)
{
VirtIOBalloon *s = opaque;
VirtIODevice *vdev = VIRTIO_DEVICE(s);
if (!balloon_stats_supported(s)) {
/* re-schedule */
balloon_stats_change_timer(s, s->stats_poll_interval);
return;
}
virtqueue_push(s->svq, &s->stats_vq_elem, s->stats_vq_offset);
virtio_notify(vdev, s->svq);
}
static void balloon_stats_get_all(Object *obj, struct Visitor *v,
void *opaque, const char *name, Error **errp)
{
Error *err = NULL;
VirtIOBalloon *s = opaque;
int i;
visit_start_struct(v, NULL, "guest-stats", name, 0, &err);
if (err) {
goto out;
}
visit_type_int(v, &s->stats_last_update, "last-update", &err);
if (err) {
goto out_end;
}
visit_start_struct(v, NULL, NULL, "stats", 0, &err);
if (err) {
goto out_end;
}
for (i = 0; !err && i < VIRTIO_BALLOON_S_NR; i++) {
visit_type_int64(v, (int64_t *) &s->stats[i], balloon_stat_names[i],
&err);
}
error_propagate(errp, err);
err = NULL;
visit_end_struct(v, &err);
out_end:
error_propagate(errp, err);
err = NULL;
visit_end_struct(v, &err);
out:
error_propagate(errp, err);
}
static void balloon_stats_get_poll_interval(Object *obj, struct Visitor *v,
void *opaque, const char *name,
Error **errp)
{
VirtIOBalloon *s = opaque;
visit_type_int(v, &s->stats_poll_interval, name, errp);
}
static void balloon_stats_set_poll_interval(Object *obj, struct Visitor *v,
void *opaque, const char *name,
Error **errp)
{
VirtIOBalloon *s = opaque;
Error *local_err = NULL;
int64_t value;
visit_type_int(v, &value, name, &local_err);
if (local_err) {
error_propagate(errp, local_err);
return;
}
if (value < 0) {
error_setg(errp, "timer value must be greater than zero");
return;
}
if (value > UINT32_MAX) {
error_setg(errp, "timer value is too big");
return;
}
if (value == s->stats_poll_interval) {
return;
}
if (value == 0) {
/* timer=0 disables the timer */
balloon_stats_destroy_timer(s);
return;
}
if (balloon_stats_enabled(s)) {
/* timer interval change */
s->stats_poll_interval = value;
balloon_stats_change_timer(s, value);
return;
}
/* create a new timer */
g_assert(s->stats_timer == NULL);
s->stats_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL, balloon_stats_poll_cb, s);
s->stats_poll_interval = value;
balloon_stats_change_timer(s, 0);
}
static void virtio_balloon_handle_output(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
VirtQueueElement elem;
MemoryRegionSection section;
while (virtqueue_pop(vq, &elem)) {
size_t offset = 0;
uint32_t pfn;
while (iov_to_buf(elem.out_sg, elem.out_num, offset, &pfn, 4) == 4) {
ram_addr_t pa;
ram_addr_t addr;
int p = virtio_ldl_p(vdev, &pfn);
pa = (ram_addr_t) p << VIRTIO_BALLOON_PFN_SHIFT;
offset += 4;
/* FIXME: remove get_system_memory(), but how? */
section = memory_region_find(get_system_memory(), pa, 1);
if (!int128_nz(section.size) || !memory_region_is_ram(section.mr))
continue;
/* Using memory_region_get_ram_ptr is bending the rules a bit, but
should be OK because we only want a single page. */
addr = section.offset_within_region;
balloon_page(memory_region_get_ram_ptr(section.mr) + addr,
!!(vq == s->dvq));
memory_region_unref(section.mr);
}
virtqueue_push(vq, &elem, offset);
virtio_notify(vdev, vq);
}
}
static void virtio_balloon_receive_stats(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
VirtQueueElement *elem = &s->stats_vq_elem;
VirtIOBalloonStat stat;
size_t offset = 0;
qemu_timeval tv;
if (!virtqueue_pop(vq, elem)) {
goto out;
}
/* Initialize the stats to get rid of any stale values. This is only
* needed to handle the case where a guest supports fewer stats than it
* used to (ie. it has booted into an old kernel).
*/
reset_stats(s);
while (iov_to_buf(elem->out_sg, elem->out_num, offset, &stat, sizeof(stat))
== sizeof(stat)) {
uint16_t tag = virtio_tswap16(vdev, stat.tag);
uint64_t val = virtio_tswap64(vdev, stat.val);
offset += sizeof(stat);
if (tag < VIRTIO_BALLOON_S_NR)
s->stats[tag] = val;
}
s->stats_vq_offset = offset;
if (qemu_gettimeofday(&tv) < 0) {
fprintf(stderr, "warning: %s: failed to get time of day\n", __func__);
goto out;
}
s->stats_last_update = tv.tv_sec;
out:
if (balloon_stats_enabled(s)) {
balloon_stats_change_timer(s, s->stats_poll_interval);
}
}
static void virtio_balloon_get_config(VirtIODevice *vdev, uint8_t *config_data)
{
VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
struct virtio_balloon_config config;
config.num_pages = cpu_to_le32(dev->num_pages);
config.actual = cpu_to_le32(dev->actual);
memcpy(config_data, &config, sizeof(struct virtio_balloon_config));
}
static void virtio_balloon_set_config(VirtIODevice *vdev,
const uint8_t *config_data)
{
VirtIOBalloon *dev = VIRTIO_BALLOON(vdev);
struct virtio_balloon_config config;
uint32_t oldactual = dev->actual;
memcpy(&config, config_data, sizeof(struct virtio_balloon_config));
dev->actual = le32_to_cpu(config.actual);
if (dev->actual != oldactual) {
qapi_event_send_balloon_change(ram_size -
((ram_addr_t) dev->actual << VIRTIO_BALLOON_PFN_SHIFT),
&error_abort);
}
}
static uint32_t virtio_balloon_get_features(VirtIODevice *vdev, uint32_t f)
{
f |= (1 << VIRTIO_BALLOON_F_STATS_VQ);
return f;
}
static void virtio_balloon_stat(void *opaque, BalloonInfo *info)
{
VirtIOBalloon *dev = opaque;
info->actual = ram_size - ((uint64_t) dev->actual <<
VIRTIO_BALLOON_PFN_SHIFT);
}
static void virtio_balloon_to_target(void *opaque, ram_addr_t target)
{
VirtIOBalloon *dev = VIRTIO_BALLOON(opaque);
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
if (target > ram_size) {
target = ram_size;
}
if (target) {
dev->num_pages = (ram_size - target) >> VIRTIO_BALLOON_PFN_SHIFT;
virtio_notify_config(vdev);
}
}
static void virtio_balloon_save(QEMUFile *f, void *opaque)
{
virtio_save(VIRTIO_DEVICE(opaque), f);
}
static void virtio_balloon_save_device(VirtIODevice *vdev, QEMUFile *f)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
qemu_put_be32(f, s->num_pages);
qemu_put_be32(f, s->actual);
}
static int virtio_balloon_load(QEMUFile *f, void *opaque, int version_id)
{
if (version_id != 1)
return -EINVAL;
return virtio_load(VIRTIO_DEVICE(opaque), f, version_id);
}
static int virtio_balloon_load_device(VirtIODevice *vdev, QEMUFile *f,
int version_id)
{
VirtIOBalloon *s = VIRTIO_BALLOON(vdev);
s->num_pages = qemu_get_be32(f);
s->actual = qemu_get_be32(f);
return 0;
}
static void virtio_balloon_device_realize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBalloon *s = VIRTIO_BALLOON(dev);
int ret;
virtio_init(vdev, "virtio-balloon", VIRTIO_ID_BALLOON,
sizeof(struct virtio_balloon_config));
ret = qemu_add_balloon_handler(virtio_balloon_to_target,
virtio_balloon_stat, s);
if (ret < 0) {
error_setg(errp, "Adding balloon handler failed");
virtio_cleanup(vdev);
return;
}
s->ivq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->dvq = virtio_add_queue(vdev, 128, virtio_balloon_handle_output);
s->svq = virtio_add_queue(vdev, 128, virtio_balloon_receive_stats);
reset_stats(s);
register_savevm(dev, "virtio-balloon", -1, 1,
virtio_balloon_save, virtio_balloon_load, s);
object_property_add(OBJECT(dev), "guest-stats", "guest statistics",
balloon_stats_get_all, NULL, NULL, s, NULL);
object_property_add(OBJECT(dev), "guest-stats-polling-interval", "int",
balloon_stats_get_poll_interval,
balloon_stats_set_poll_interval,
NULL, s, NULL);
}
static void virtio_balloon_device_unrealize(DeviceState *dev, Error **errp)
{
VirtIODevice *vdev = VIRTIO_DEVICE(dev);
VirtIOBalloon *s = VIRTIO_BALLOON(dev);
balloon_stats_destroy_timer(s);
qemu_remove_balloon_handler(s);
unregister_savevm(dev, "virtio-balloon", s);
virtio_cleanup(vdev);
}
static Property virtio_balloon_properties[] = {
DEFINE_PROP_END_OF_LIST(),
};
static void virtio_balloon_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->props = virtio_balloon_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
vdc->realize = virtio_balloon_device_realize;
vdc->unrealize = virtio_balloon_device_unrealize;
vdc->get_config = virtio_balloon_get_config;
vdc->set_config = virtio_balloon_set_config;
vdc->get_features = virtio_balloon_get_features;
vdc->save = virtio_balloon_save_device;
vdc->load = virtio_balloon_load_device;
}
static const TypeInfo virtio_balloon_info = {
.name = TYPE_VIRTIO_BALLOON,
.parent = TYPE_VIRTIO_DEVICE,
.instance_size = sizeof(VirtIOBalloon),
.class_init = virtio_balloon_class_init,
};
static void virtio_register_types(void)
{
type_register_static(&virtio_balloon_info);
}
type_init(virtio_register_types)
| gpl-2.0 |
mmlr/qemu-haiku | roms/u-boot/drivers/spi/cf_qspi.c | 41 | 9775 | /*
* Freescale Coldfire Queued SPI driver
*
* NOTE:
* This driver is written to transfer 8 bit at-a-time and uses the dedicated
* SPI slave select pins as bit-banged GPIO to work with spi_flash subsystem.
*
* Copyright (C) 2011 Ruggedcom, Inc.
* Richard Retanubun (richardretanubun@freescale.com)
*
* SPDX-License-Identifier: GPL-2.0+
*/
#include <common.h>
#include <malloc.h>
#include <spi.h>
#include <asm/immap.h>
#include <asm/io.h>
DECLARE_GLOBAL_DATA_PTR;
#define clamp(x, low, high) (min(max(low, x), high))
#define to_cf_qspi_slave(s) container_of(s, struct cf_qspi_slave, s)
struct cf_qspi_slave {
struct spi_slave slave; /* Specific bus:cs ID for each device */
qspi_t *regs; /* Pointer to SPI controller registers */
u16 qmr; /* QMR: Queued Mode Register */
u16 qwr; /* QWR: Queued Wrap Register */
u16 qcr; /* QCR: Queued Command Ram */
};
/* Register write wrapper functions */
static void write_qmr(volatile qspi_t *qspi, u16 val) { qspi->mr = val; }
static void write_qdlyr(volatile qspi_t *qspi, u16 val) { qspi->dlyr = val; }
static void write_qwr(volatile qspi_t *qspi, u16 val) { qspi->wr = val; }
static void write_qir(volatile qspi_t *qspi, u16 val) { qspi->ir = val; }
static void write_qar(volatile qspi_t *qspi, u16 val) { qspi->ar = val; }
static void write_qdr(volatile qspi_t *qspi, u16 val) { qspi->dr = val; }
/* Register read wrapper functions */
static u16 read_qdlyr(volatile qspi_t *qspi) { return qspi->dlyr; }
static u16 read_qwr(volatile qspi_t *qspi) { return qspi->wr; }
static u16 read_qir(volatile qspi_t *qspi) { return qspi->ir; }
static u16 read_qdr(volatile qspi_t *qspi) { return qspi->dr; }
/* These call points may be different for each ColdFire CPU */
extern void cfspi_port_conf(void);
static void cfspi_cs_activate(uint bus, uint cs, uint cs_active_high);
static void cfspi_cs_deactivate(uint bus, uint cs, uint cs_active_high);
int spi_claim_bus(struct spi_slave *slave)
{
return 0;
}
void spi_release_bus(struct spi_slave *slave)
{
}
__attribute__((weak))
void spi_init(void)
{
cfspi_port_conf();
}
__attribute__((weak))
void spi_cs_activate(struct spi_slave *slave)
{
struct cf_qspi_slave *dev = to_cf_qspi_slave(slave);
cfspi_cs_activate(slave->bus, slave->cs, !(dev->qwr & QSPI_QWR_CSIV));
}
__attribute__((weak))
void spi_cs_deactivate(struct spi_slave *slave)
{
struct cf_qspi_slave *dev = to_cf_qspi_slave(slave);
cfspi_cs_deactivate(slave->bus, slave->cs, !(dev->qwr & QSPI_QWR_CSIV));
}
__attribute__((weak))
int spi_cs_is_valid(unsigned int bus, unsigned int cs)
{
/* Only 1 bus and 4 chipselect per controller */
if (bus == 0 && (cs >= 0 && cs < 4))
return 1;
else
return 0;
}
void spi_free_slave(struct spi_slave *slave)
{
struct cf_qspi_slave *dev = to_cf_qspi_slave(slave);
free(dev);
}
/* Translate information given by spi_setup_slave to members of cf_qspi_slave */
struct spi_slave *spi_setup_slave(unsigned int bus, unsigned int cs,
unsigned int max_hz, unsigned int mode)
{
struct cf_qspi_slave *dev = NULL;
if (!spi_cs_is_valid(bus, cs))
return NULL;
dev = spi_alloc_slave(struct cf_qspi_slave, bus, cs);
if (!dev)
return NULL;
/* Initialize to known value */
dev->regs = (qspi_t *)MMAP_QSPI;
dev->qmr = 0;
dev->qwr = 0;
dev->qcr = 0;
/* Map max_hz to QMR[BAUD] */
if (max_hz == 0) /* Go as fast as possible */
dev->qmr = 2u;
else /* Get the closest baud rate */
dev->qmr = clamp(((gd->bus_clk >> 2) + max_hz - 1)/max_hz,
2u, 255u);
/* Map mode to QMR[CPOL] and QMR[CPHA] */
if (mode & SPI_CPOL)
dev->qmr |= QSPI_QMR_CPOL;
if (mode & SPI_CPHA)
dev->qmr |= QSPI_QMR_CPHA;
/* Hardcode bit length to 8 bit per transter */
dev->qmr |= QSPI_QMR_BITS_8;
/* Set QMR[MSTR] to enable QSPI as master */
dev->qmr |= QSPI_QMR_MSTR;
/*
* Set QCR and QWR to default values for spi flash operation.
* If more custom QCR and QRW are needed, overload mode variable
*/
dev->qcr = (QSPI_QDR_CONT | QSPI_QDR_BITSE);
if (!(mode & SPI_CS_HIGH))
dev->qwr |= QSPI_QWR_CSIV;
return &dev->slave;
}
/* Transfer 8 bit at a time */
int spi_xfer(struct spi_slave *slave, unsigned int bitlen, const void *dout,
void *din, unsigned long flags)
{
struct cf_qspi_slave *dev = to_cf_qspi_slave(slave);
volatile qspi_t *qspi = dev->regs;
u8 *txbuf = (u8 *)dout;
u8 *rxbuf = (u8 *)din;
u32 count = DIV_ROUND_UP(bitlen, 8);
u32 n, i = 0;
/* Sanitize arguments */
if (slave == NULL) {
printf("%s: NULL slave ptr\n", __func__);
return -1;
}
if (flags & SPI_XFER_BEGIN)
spi_cs_activate(slave);
/* There is something to send, lets process it. spi_xfer is also called
* just to toggle chip select, so bitlen of 0 is valid */
if (count > 0) {
/*
* NOTE: Since chip select is driven as a bit-bang-ed GPIO
* using spi_cs_activate() and spi_cs_deactivate(),
* the chip select settings inside the controller
* (i.e. QCR[CONT] and QWR[CSIV]) are moot. The bits are set to
* keep the controller settings consistent with the actual
* operation of the bus.
*/
/* Write the slave device's settings for the controller.*/
write_qmr(qspi, dev->qmr);
write_qwr(qspi, dev->qwr);
/* Limit transfer to 16 at a time */
n = min(count, 16u);
do {
/* Setup queue end point */
write_qwr(qspi, ((read_qwr(qspi) & QSPI_QWR_ENDQP_MASK)
| QSPI_QWR_ENDQP((n-1))));
/* Write Command RAM */
write_qar(qspi, QSPI_QAR_CMD);
for (i = 0; i < n; ++i)
write_qdr(qspi, dev->qcr);
/* Write TxBuf, if none given, fill with ZEROes */
write_qar(qspi, QSPI_QAR_TRANS);
if (txbuf) {
for (i = 0; i < n; ++i)
write_qdr(qspi, *txbuf++);
} else {
for (i = 0; i < n; ++i)
write_qdr(qspi, 0);
}
/* Clear QIR[SPIF] by writing a 1 to it */
write_qir(qspi, read_qir(qspi) | QSPI_QIR_SPIF);
/* Set QDLYR[SPE] to start sending */
write_qdlyr(qspi, read_qdlyr(qspi) | QSPI_QDLYR_SPE);
/* Poll QIR[SPIF] for transfer completion */
while ((read_qir(qspi) & QSPI_QIR_SPIF) != 1)
udelay(1);
/* If given read RxBuf, load data to it */
if (rxbuf) {
write_qar(qspi, QSPI_QAR_RECV);
for (i = 0; i < n; ++i)
*rxbuf++ = read_qdr(qspi);
}
/* Decrement count */
count -= n;
} while (count);
}
if (flags & SPI_XFER_END)
spi_cs_deactivate(slave);
return 0;
}
/* Each MCF CPU may have different pin assignments for chip selects. */
#if defined(CONFIG_M5271)
/* Assert chip select, val = [1|0] , dir = out, mode = GPIO */
void cfspi_cs_activate(uint bus, uint cs, uint cs_active_high)
{
debug("%s: bus %d cs %d cs_active_high %d\n",
__func__, bus, cs, cs_active_high);
switch (cs) {
case 0: /* QSPI_CS[0] = PQSPI[3] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PPDSDR_QSPI, 0x08);
else
mbar_writeByte(MCF_GPIO_PCLRR_QSPI, 0xF7);
mbar_writeByte(MCF_GPIO_PDDR_QSPI,
mbar_readByte(MCF_GPIO_PDDR_QSPI) | 0x08);
mbar_writeByte(MCF_GPIO_PAR_QSPI,
mbar_readByte(MCF_GPIO_PAR_QSPI) & 0xDF);
break;
case 1: /* QSPI_CS[1] = PQSPI[4] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PPDSDR_QSPI, 0x10);
else
mbar_writeByte(MCF_GPIO_PCLRR_QSPI, 0xEF);
mbar_writeByte(MCF_GPIO_PDDR_QSPI,
mbar_readByte(MCF_GPIO_PDDR_QSPI) | 0x10);
mbar_writeByte(MCF_GPIO_PAR_QSPI,
mbar_readByte(MCF_GPIO_PAR_QSPI) & 0x3F);
break;
case 2: /* QSPI_CS[2] = PTIMER[7] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PPDSDR_TIMER, 0x80);
else
mbar_writeByte(MCF_GPIO_PCLRR_TIMER, 0x7F);
mbar_writeByte(MCF_GPIO_PDDR_TIMER,
mbar_readByte(MCF_GPIO_PDDR_TIMER) | 0x80);
mbar_writeShort(MCF_GPIO_PAR_TIMER,
mbar_readShort(MCF_GPIO_PAR_TIMER) & 0x3FFF);
break;
case 3: /* QSPI_CS[3] = PTIMER[3] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PPDSDR_TIMER, 0x08);
else
mbar_writeByte(MCF_GPIO_PCLRR_TIMER, 0xF7);
mbar_writeByte(MCF_GPIO_PDDR_TIMER,
mbar_readByte(MCF_GPIO_PDDR_TIMER) | 0x08);
mbar_writeShort(MCF_GPIO_PAR_TIMER,
mbar_readShort(MCF_GPIO_PAR_TIMER) & 0xFF3F);
break;
}
}
/* Deassert chip select, val = [1|0], dir = in, mode = GPIO
* direction set as IN to undrive the pin, external pullup/pulldown will bring
* bus to deassert state.
*/
void cfspi_cs_deactivate(uint bus, uint cs, uint cs_active_high)
{
debug("%s: bus %d cs %d cs_active_high %d\n",
__func__, bus, cs, cs_active_high);
switch (cs) {
case 0: /* QSPI_CS[0] = PQSPI[3] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PCLRR_QSPI, 0xF7);
else
mbar_writeByte(MCF_GPIO_PPDSDR_QSPI, 0x08);
mbar_writeByte(MCF_GPIO_PDDR_QSPI,
mbar_readByte(MCF_GPIO_PDDR_QSPI) & 0xF7);
mbar_writeByte(MCF_GPIO_PAR_QSPI,
mbar_readByte(MCF_GPIO_PAR_QSPI) & 0xDF);
break;
case 1: /* QSPI_CS[1] = PQSPI[4] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PCLRR_QSPI, 0xEF);
else
mbar_writeByte(MCF_GPIO_PPDSDR_QSPI, 0x10);
mbar_writeByte(MCF_GPIO_PDDR_QSPI,
mbar_readByte(MCF_GPIO_PDDR_QSPI) & 0xEF);
mbar_writeByte(MCF_GPIO_PAR_QSPI,
mbar_readByte(MCF_GPIO_PAR_QSPI) & 0x3F);
break;
case 2: /* QSPI_CS[2] = PTIMER[7] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PCLRR_TIMER, 0x7F);
else
mbar_writeByte(MCF_GPIO_PPDSDR_TIMER, 0x80);
mbar_writeByte(MCF_GPIO_PDDR_TIMER,
mbar_readByte(MCF_GPIO_PDDR_TIMER) & 0x7F);
mbar_writeShort(MCF_GPIO_PAR_TIMER,
mbar_readShort(MCF_GPIO_PAR_TIMER) & 0x3FFF);
break;
case 3: /* QSPI_CS[3] = PTIMER[3] */
if (cs_active_high)
mbar_writeByte(MCF_GPIO_PCLRR_TIMER, 0xF7);
else
mbar_writeByte(MCF_GPIO_PPDSDR_TIMER, 0x08);
mbar_writeByte(MCF_GPIO_PDDR_TIMER,
mbar_readByte(MCF_GPIO_PDDR_TIMER) & 0xF7);
mbar_writeShort(MCF_GPIO_PAR_TIMER,
mbar_readShort(MCF_GPIO_PAR_TIMER) & 0xFF3F);
break;
}
}
#endif /* CONFIG_M5271 */
| gpl-2.0 |
mupuf/linux-nouveau-pm | arch/arm/kvm/coproc_a15.c | 297 | 4670 | /*
* Copyright (C) 2012 - Virtual Open Systems and Columbia University
* Authors: Rusty Russell <rusty@rustcorp.au>
* Christoffer Dall <c.dall@virtualopensystems.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, version 2, as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
#include <linux/kvm_host.h>
#include <asm/cputype.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
#include <linux/init.h>
static void reset_mpidr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
/*
* Compute guest MPIDR:
* (Even if we present only one VCPU to the guest on an SMP
* host we don't set the U bit in the MPIDR, or vice versa, as
* revealing the underlying hardware properties is likely to
* be the best choice).
*/
vcpu->arch.cp15[c0_MPIDR] = (read_cpuid_mpidr() & ~MPIDR_LEVEL_MASK)
| (vcpu->vcpu_id & MPIDR_LEVEL_MASK);
}
#include "coproc.h"
/* A15 TRM 4.3.28: RO WI */
static bool access_actlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
return true;
}
/* A15 TRM 4.3.60: R/O. */
static bool access_cbar(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
if (p->is_write)
return write_to_read_only(vcpu, p);
return read_zero(vcpu, p);
}
/* A15 TRM 4.3.48: R/O WI. */
static bool access_l2ctlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
return true;
}
static void reset_l2ctlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
u32 l2ctlr, ncores;
asm volatile("mrc p15, 1, %0, c9, c0, 2\n" : "=r" (l2ctlr));
l2ctlr &= ~(3 << 24);
ncores = atomic_read(&vcpu->kvm->online_vcpus) - 1;
l2ctlr |= (ncores & 3) << 24;
vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
}
static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
u32 actlr;
/* ACTLR contains SMP bit: make sure you create all cpus first! */
asm volatile("mrc p15, 0, %0, c1, c0, 1\n" : "=r" (actlr));
/* Make the SMP bit consistent with the guest configuration */
if (atomic_read(&vcpu->kvm->online_vcpus) > 1)
actlr |= 1U << 6;
else
actlr &= ~(1U << 6);
vcpu->arch.cp15[c1_ACTLR] = actlr;
}
/* A15 TRM 4.3.49: R/O WI (even if NSACR.NS_L2ERR, a write of 1 is ignored). */
static bool access_l2ectlr(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
const struct coproc_reg *r)
{
if (p->is_write)
return ignore_write(vcpu, p);
*vcpu_reg(vcpu, p->Rt1) = 0;
return true;
}
/*
* A15-specific CP15 registers.
* Important: Must be sorted ascending by CRn, CRM, Op1, Op2
*/
static const struct coproc_reg a15_regs[] = {
/* MPIDR: we use VMPIDR for guest access. */
{ CRn( 0), CRm( 0), Op1( 0), Op2( 5), is32,
NULL, reset_mpidr, c0_MPIDR },
/* SCTLR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 0), is32,
NULL, reset_val, c1_SCTLR, 0x00C50078 },
/* ACTLR: trapped by HCR.TAC bit. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 1), is32,
access_actlr, reset_actlr, c1_ACTLR },
/* CPACR: swapped by interrupt.S. */
{ CRn( 1), CRm( 0), Op1( 0), Op2( 2), is32,
NULL, reset_val, c1_CPACR, 0x00000000 },
/*
* L2CTLR access (guest wants to know #CPUs).
*/
{ CRn( 9), CRm( 0), Op1( 1), Op2( 2), is32,
access_l2ctlr, reset_l2ctlr, c9_L2CTLR },
{ CRn( 9), CRm( 0), Op1( 1), Op2( 3), is32, access_l2ectlr},
/* The Configuration Base Address Register. */
{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
static struct kvm_coproc_target_table a15_target_table = {
.target = KVM_ARM_TARGET_CORTEX_A15,
.table = a15_regs,
.num = ARRAY_SIZE(a15_regs),
};
static int __init coproc_a15_init(void)
{
unsigned int i;
for (i = 1; i < ARRAY_SIZE(a15_regs); i++)
BUG_ON(cmp_reg(&a15_regs[i-1],
&a15_regs[i]) >= 0);
kvm_register_target_coproc_table(&a15_target_table);
return 0;
}
late_initcall(coproc_a15_init);
| gpl-2.0 |
georgewhr/dbwrt | sound/soc/jz4740/qi_lb60.c | 297 | 3621 | /*
* Copyright (C) 2009, Lars-Peter Clausen <lars@metafoo.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 675 Mass Ave, Cambridge, MA 02139, USA.
*
*/
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/timer.h>
#include <linux/interrupt.h>
#include <linux/platform_device.h>
#include <sound/core.h>
#include <sound/pcm.h>
#include <sound/soc.h>
#include <linux/gpio.h>
#define QI_LB60_SND_GPIO JZ_GPIO_PORTB(29)
#define QI_LB60_AMP_GPIO JZ_GPIO_PORTD(4)
static int qi_lb60_spk_event(struct snd_soc_dapm_widget *widget,
struct snd_kcontrol *ctrl, int event)
{
int on = !SND_SOC_DAPM_EVENT_OFF(event);
gpio_set_value(QI_LB60_SND_GPIO, on);
gpio_set_value(QI_LB60_AMP_GPIO, on);
return 0;
}
static const struct snd_soc_dapm_widget qi_lb60_widgets[] = {
SND_SOC_DAPM_SPK("Speaker", qi_lb60_spk_event),
SND_SOC_DAPM_MIC("Mic", NULL),
};
static const struct snd_soc_dapm_route qi_lb60_routes[] = {
{"Mic", NULL, "MIC"},
{"Speaker", NULL, "LOUT"},
{"Speaker", NULL, "ROUT"},
};
#define QI_LB60_DAIFMT (SND_SOC_DAIFMT_I2S | \
SND_SOC_DAIFMT_NB_NF | \
SND_SOC_DAIFMT_CBM_CFM)
static int qi_lb60_codec_init(struct snd_soc_pcm_runtime *rtd)
{
struct snd_soc_codec *codec = rtd->codec;
struct snd_soc_dai *cpu_dai = rtd->cpu_dai;
struct snd_soc_dapm_context *dapm = &codec->dapm;
int ret;
snd_soc_dapm_nc_pin(dapm, "LIN");
snd_soc_dapm_nc_pin(dapm, "RIN");
ret = snd_soc_dai_set_fmt(cpu_dai, QI_LB60_DAIFMT);
if (ret < 0) {
dev_err(codec->dev, "Failed to set cpu dai format: %d\n", ret);
return ret;
}
return 0;
}
static struct snd_soc_dai_link qi_lb60_dai = {
.name = "jz4740",
.stream_name = "jz4740",
.cpu_dai_name = "jz4740-i2s",
.platform_name = "jz4740-i2s",
.codec_dai_name = "jz4740-hifi",
.codec_name = "jz4740-codec",
.init = qi_lb60_codec_init,
};
static struct snd_soc_card qi_lb60 = {
.name = "QI LB60",
.owner = THIS_MODULE,
.dai_link = &qi_lb60_dai,
.num_links = 1,
.dapm_widgets = qi_lb60_widgets,
.num_dapm_widgets = ARRAY_SIZE(qi_lb60_widgets),
.dapm_routes = qi_lb60_routes,
.num_dapm_routes = ARRAY_SIZE(qi_lb60_routes),
};
static const struct gpio qi_lb60_gpios[] = {
{ QI_LB60_SND_GPIO, GPIOF_OUT_INIT_LOW, "SND" },
{ QI_LB60_AMP_GPIO, GPIOF_OUT_INIT_LOW, "AMP" },
};
static int qi_lb60_probe(struct platform_device *pdev)
{
struct snd_soc_card *card = &qi_lb60;
int ret;
ret = gpio_request_array(qi_lb60_gpios, ARRAY_SIZE(qi_lb60_gpios));
if (ret)
return ret;
card->dev = &pdev->dev;
ret = snd_soc_register_card(card);
if (ret) {
dev_err(&pdev->dev, "snd_soc_register_card() failed: %d\n",
ret);
gpio_free_array(qi_lb60_gpios, ARRAY_SIZE(qi_lb60_gpios));
}
return ret;
}
static int qi_lb60_remove(struct platform_device *pdev)
{
struct snd_soc_card *card = platform_get_drvdata(pdev);
snd_soc_unregister_card(card);
gpio_free_array(qi_lb60_gpios, ARRAY_SIZE(qi_lb60_gpios));
return 0;
}
static struct platform_driver qi_lb60_driver = {
.driver = {
.name = "qi-lb60-audio",
.owner = THIS_MODULE,
},
.probe = qi_lb60_probe,
.remove = qi_lb60_remove,
};
module_platform_driver(qi_lb60_driver);
MODULE_AUTHOR("Lars-Peter Clausen <lars@metafoo.de>");
MODULE_DESCRIPTION("ALSA SoC QI LB60 Audio support");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:qi-lb60-audio");
| gpl-2.0 |
kasi86/linux | arch/mips/alchemy/devboards/db1300.c | 297 | 22474 | /*
* DBAu1300 init and platform device setup.
*
* (c) 2009 Manuel Lauss <manuel.lauss@googlemail.com>
*/
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/gpio.h>
#include <linux/gpio_keys.h>
#include <linux/init.h>
#include <linux/input.h> /* KEY_* codes */
#include <linux/i2c.h>
#include <linux/io.h>
#include <linux/leds.h>
#include <linux/ata_platform.h>
#include <linux/mmc/host.h>
#include <linux/module.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h>
#include <linux/platform_device.h>
#include <linux/smsc911x.h>
#include <linux/wm97xx.h>
#include <asm/mach-au1x00/au1000.h>
#include <asm/mach-au1x00/gpio-au1300.h>
#include <asm/mach-au1x00/au1100_mmc.h>
#include <asm/mach-au1x00/au1200fb.h>
#include <asm/mach-au1x00/au1xxx_dbdma.h>
#include <asm/mach-au1x00/au1xxx_psc.h>
#include <asm/mach-db1x00/bcsr.h>
#include <asm/mach-au1x00/prom.h>
#include "platform.h"
/* FPGA (external mux) interrupt sources */
#define DB1300_FIRST_INT (ALCHEMY_GPIC_INT_LAST + 1)
#define DB1300_IDE_INT (DB1300_FIRST_INT + 0)
#define DB1300_ETH_INT (DB1300_FIRST_INT + 1)
#define DB1300_CF_INT (DB1300_FIRST_INT + 2)
#define DB1300_VIDEO_INT (DB1300_FIRST_INT + 4)
#define DB1300_HDMI_INT (DB1300_FIRST_INT + 5)
#define DB1300_DC_INT (DB1300_FIRST_INT + 6)
#define DB1300_FLASH_INT (DB1300_FIRST_INT + 7)
#define DB1300_CF_INSERT_INT (DB1300_FIRST_INT + 8)
#define DB1300_CF_EJECT_INT (DB1300_FIRST_INT + 9)
#define DB1300_AC97_INT (DB1300_FIRST_INT + 10)
#define DB1300_AC97_PEN_INT (DB1300_FIRST_INT + 11)
#define DB1300_SD1_INSERT_INT (DB1300_FIRST_INT + 12)
#define DB1300_SD1_EJECT_INT (DB1300_FIRST_INT + 13)
#define DB1300_OTG_VBUS_OC_INT (DB1300_FIRST_INT + 14)
#define DB1300_HOST_VBUS_OC_INT (DB1300_FIRST_INT + 15)
#define DB1300_LAST_INT (DB1300_FIRST_INT + 15)
/* SMSC9210 CS */
#define DB1300_ETH_PHYS_ADDR 0x19000000
#define DB1300_ETH_PHYS_END 0x197fffff
/* ATA CS */
#define DB1300_IDE_PHYS_ADDR 0x18800000
#define DB1300_IDE_REG_SHIFT 5
#define DB1300_IDE_PHYS_LEN (16 << DB1300_IDE_REG_SHIFT)
/* NAND CS */
#define DB1300_NAND_PHYS_ADDR 0x20000000
#define DB1300_NAND_PHYS_END 0x20000fff
static struct i2c_board_info db1300_i2c_devs[] __initdata = {
{ I2C_BOARD_INFO("wm8731", 0x1b), }, /* I2S audio codec */
{ I2C_BOARD_INFO("ne1619", 0x2d), }, /* adm1025-compat hwmon */
};
/* multifunction pins to assign to GPIO controller */
static int db1300_gpio_pins[] __initdata = {
AU1300_PIN_LCDPWM0, AU1300_PIN_PSC2SYNC1, AU1300_PIN_WAKE1,
AU1300_PIN_WAKE2, AU1300_PIN_WAKE3, AU1300_PIN_FG3AUX,
AU1300_PIN_EXTCLK1,
-1, /* terminator */
};
/* multifunction pins to assign to device functions */
static int db1300_dev_pins[] __initdata = {
/* wake-from-str pins 0-3 */
AU1300_PIN_WAKE0,
/* external clock sources for PSC0 */
AU1300_PIN_EXTCLK0,
/* 8bit MMC interface on SD0: 6-9 */
AU1300_PIN_SD0DAT4, AU1300_PIN_SD0DAT5, AU1300_PIN_SD0DAT6,
AU1300_PIN_SD0DAT7,
/* UART1 pins: 11-18 */
AU1300_PIN_U1RI, AU1300_PIN_U1DCD, AU1300_PIN_U1DSR,
AU1300_PIN_U1CTS, AU1300_PIN_U1RTS, AU1300_PIN_U1DTR,
AU1300_PIN_U1RX, AU1300_PIN_U1TX,
/* UART0 pins: 19-24 */
AU1300_PIN_U0RI, AU1300_PIN_U0DCD, AU1300_PIN_U0DSR,
AU1300_PIN_U0CTS, AU1300_PIN_U0RTS, AU1300_PIN_U0DTR,
/* UART2: 25-26 */
AU1300_PIN_U2RX, AU1300_PIN_U2TX,
/* UART3: 27-28 */
AU1300_PIN_U3RX, AU1300_PIN_U3TX,
/* LCD controller PWMs, ext pixclock: 30-31 */
AU1300_PIN_LCDPWM1, AU1300_PIN_LCDCLKIN,
/* SD1 interface: 32-37 */
AU1300_PIN_SD1DAT0, AU1300_PIN_SD1DAT1, AU1300_PIN_SD1DAT2,
AU1300_PIN_SD1DAT3, AU1300_PIN_SD1CMD, AU1300_PIN_SD1CLK,
/* SD2 interface: 38-43 */
AU1300_PIN_SD2DAT0, AU1300_PIN_SD2DAT1, AU1300_PIN_SD2DAT2,
AU1300_PIN_SD2DAT3, AU1300_PIN_SD2CMD, AU1300_PIN_SD2CLK,
/* PSC0/1 clocks: 44-45 */
AU1300_PIN_PSC0CLK, AU1300_PIN_PSC1CLK,
/* PSCs: 46-49/50-53/54-57/58-61 */
AU1300_PIN_PSC0SYNC0, AU1300_PIN_PSC0SYNC1, AU1300_PIN_PSC0D0,
AU1300_PIN_PSC0D1,
AU1300_PIN_PSC1SYNC0, AU1300_PIN_PSC1SYNC1, AU1300_PIN_PSC1D0,
AU1300_PIN_PSC1D1,
AU1300_PIN_PSC2SYNC0, AU1300_PIN_PSC2D0,
AU1300_PIN_PSC2D1,
AU1300_PIN_PSC3SYNC0, AU1300_PIN_PSC3SYNC1, AU1300_PIN_PSC3D0,
AU1300_PIN_PSC3D1,
/* PCMCIA interface: 62-70 */
AU1300_PIN_PCE2, AU1300_PIN_PCE1, AU1300_PIN_PIOS16,
AU1300_PIN_PIOR, AU1300_PIN_PWE, AU1300_PIN_PWAIT,
AU1300_PIN_PREG, AU1300_PIN_POE, AU1300_PIN_PIOW,
/* camera interface H/V sync inputs: 71-72 */
AU1300_PIN_CIMLS, AU1300_PIN_CIMFS,
/* PSC2/3 clocks: 73-74 */
AU1300_PIN_PSC2CLK, AU1300_PIN_PSC3CLK,
-1, /* terminator */
};
static void __init db1300_gpio_config(void)
{
int *i;
i = &db1300_dev_pins[0];
while (*i != -1)
au1300_pinfunc_to_dev(*i++);
i = &db1300_gpio_pins[0];
while (*i != -1)
au1300_gpio_direction_input(*i++);/* implies pin_to_gpio */
au1300_set_dbdma_gpio(1, AU1300_PIN_FG3AUX);
}
/**********************************************************************/
static void au1300_nand_cmd_ctrl(struct mtd_info *mtd, int cmd,
unsigned int ctrl)
{
struct nand_chip *this = mtd_to_nand(mtd);
unsigned long ioaddr = (unsigned long)this->IO_ADDR_W;
ioaddr &= 0xffffff00;
if (ctrl & NAND_CLE) {
ioaddr += MEM_STNAND_CMD;
} else if (ctrl & NAND_ALE) {
ioaddr += MEM_STNAND_ADDR;
} else {
/* assume we want to r/w real data by default */
ioaddr += MEM_STNAND_DATA;
}
this->IO_ADDR_R = this->IO_ADDR_W = (void __iomem *)ioaddr;
if (cmd != NAND_CMD_NONE) {
__raw_writeb(cmd, this->IO_ADDR_W);
wmb();
}
}
static int au1300_nand_device_ready(struct mtd_info *mtd)
{
return alchemy_rdsmem(AU1000_MEM_STSTAT) & 1;
}
static struct mtd_partition db1300_nand_parts[] = {
{
.name = "NAND FS 0",
.offset = 0,
.size = 8 * 1024 * 1024,
},
{
.name = "NAND FS 1",
.offset = MTDPART_OFS_APPEND,
.size = MTDPART_SIZ_FULL
},
};
struct platform_nand_data db1300_nand_platdata = {
.chip = {
.nr_chips = 1,
.chip_offset = 0,
.nr_partitions = ARRAY_SIZE(db1300_nand_parts),
.partitions = db1300_nand_parts,
.chip_delay = 20,
},
.ctrl = {
.dev_ready = au1300_nand_device_ready,
.cmd_ctrl = au1300_nand_cmd_ctrl,
},
};
static struct resource db1300_nand_res[] = {
[0] = {
.start = DB1300_NAND_PHYS_ADDR,
.end = DB1300_NAND_PHYS_ADDR + 0xff,
.flags = IORESOURCE_MEM,
},
};
static struct platform_device db1300_nand_dev = {
.name = "gen_nand",
.num_resources = ARRAY_SIZE(db1300_nand_res),
.resource = db1300_nand_res,
.id = -1,
.dev = {
.platform_data = &db1300_nand_platdata,
}
};
/**********************************************************************/
static struct resource db1300_eth_res[] = {
[0] = {
.start = DB1300_ETH_PHYS_ADDR,
.end = DB1300_ETH_PHYS_END,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_ETH_INT,
.end = DB1300_ETH_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct smsc911x_platform_config db1300_eth_config = {
.phy_interface = PHY_INTERFACE_MODE_MII,
.irq_polarity = SMSC911X_IRQ_POLARITY_ACTIVE_LOW,
.irq_type = SMSC911X_IRQ_TYPE_PUSH_PULL,
.flags = SMSC911X_USE_32BIT,
};
static struct platform_device db1300_eth_dev = {
.name = "smsc911x",
.id = -1,
.num_resources = ARRAY_SIZE(db1300_eth_res),
.resource = db1300_eth_res,
.dev = {
.platform_data = &db1300_eth_config,
},
};
/**********************************************************************/
static struct resource au1300_psc1_res[] = {
[0] = {
.start = AU1300_PSC1_PHYS_ADDR,
.end = AU1300_PSC1_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC1_INT,
.end = AU1300_PSC1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC1_TX,
.end = AU1300_DSCR_CMD0_PSC1_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC1_RX,
.end = AU1300_DSCR_CMD0_PSC1_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_ac97_dev = {
.name = "au1xpsc_ac97",
.id = 1, /* PSC ID. match with AC97 codec ID! */
.num_resources = ARRAY_SIZE(au1300_psc1_res),
.resource = au1300_psc1_res,
};
/**********************************************************************/
static struct resource au1300_psc2_res[] = {
[0] = {
.start = AU1300_PSC2_PHYS_ADDR,
.end = AU1300_PSC2_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC2_INT,
.end = AU1300_PSC2_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC2_TX,
.end = AU1300_DSCR_CMD0_PSC2_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC2_RX,
.end = AU1300_DSCR_CMD0_PSC2_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2s_dev = {
.name = "au1xpsc_i2s",
.id = 2, /* PSC ID */
.num_resources = ARRAY_SIZE(au1300_psc2_res),
.resource = au1300_psc2_res,
};
/**********************************************************************/
static struct resource au1300_psc3_res[] = {
[0] = {
.start = AU1300_PSC3_PHYS_ADDR,
.end = AU1300_PSC3_PHYS_ADDR + 0x0fff,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_PSC3_INT,
.end = AU1300_PSC3_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_PSC3_TX,
.end = AU1300_DSCR_CMD0_PSC3_TX,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_PSC3_RX,
.end = AU1300_DSCR_CMD0_PSC3_RX,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_i2c_dev = {
.name = "au1xpsc_smbus",
.id = 0, /* bus number */
.num_resources = ARRAY_SIZE(au1300_psc3_res),
.resource = au1300_psc3_res,
};
/**********************************************************************/
/* proper key assignments when facing the LCD panel. For key assignments
* according to the schematics swap up with down and left with right.
* I chose to use it to emulate the arrow keys of a keyboard.
*/
static struct gpio_keys_button db1300_5waysw_arrowkeys[] = {
{
.code = KEY_DOWN,
.gpio = AU1300_PIN_LCDPWM0,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-down",
},
{
.code = KEY_UP,
.gpio = AU1300_PIN_PSC2SYNC1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-up",
},
{
.code = KEY_RIGHT,
.gpio = AU1300_PIN_WAKE3,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-right",
},
{
.code = KEY_LEFT,
.gpio = AU1300_PIN_WAKE2,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-left",
},
{
.code = KEY_ENTER,
.gpio = AU1300_PIN_WAKE1,
.type = EV_KEY,
.debounce_interval = 1,
.active_low = 1,
.desc = "5waysw-push",
},
};
static struct gpio_keys_platform_data db1300_5waysw_data = {
.buttons = db1300_5waysw_arrowkeys,
.nbuttons = ARRAY_SIZE(db1300_5waysw_arrowkeys),
.rep = 1,
.name = "db1300-5wayswitch",
};
static struct platform_device db1300_5waysw_dev = {
.name = "gpio-keys",
.dev = {
.platform_data = &db1300_5waysw_data,
},
};
/**********************************************************************/
static struct pata_platform_info db1300_ide_info = {
.ioport_shift = DB1300_IDE_REG_SHIFT,
};
#define IDE_ALT_START (14 << DB1300_IDE_REG_SHIFT)
static struct resource db1300_ide_res[] = {
[0] = {
.start = DB1300_IDE_PHYS_ADDR,
.end = DB1300_IDE_PHYS_ADDR + IDE_ALT_START - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = DB1300_IDE_PHYS_ADDR + IDE_ALT_START,
.end = DB1300_IDE_PHYS_ADDR + DB1300_IDE_PHYS_LEN - 1,
.flags = IORESOURCE_MEM,
},
[2] = {
.start = DB1300_IDE_INT,
.end = DB1300_IDE_INT,
.flags = IORESOURCE_IRQ,
},
};
static struct platform_device db1300_ide_dev = {
.dev = {
.platform_data = &db1300_ide_info,
},
.name = "pata_platform",
.resource = db1300_ide_res,
.num_resources = ARRAY_SIZE(db1300_ide_res),
};
/**********************************************************************/
static irqreturn_t db1300_mmc_cd(int irq, void *ptr)
{
void(*mmc_cd)(struct mmc_host *, unsigned long);
/* disable the one currently screaming. No other way to shut it up */
if (irq == DB1300_SD1_INSERT_INT) {
disable_irq_nosync(DB1300_SD1_INSERT_INT);
enable_irq(DB1300_SD1_EJECT_INT);
} else {
disable_irq_nosync(DB1300_SD1_EJECT_INT);
enable_irq(DB1300_SD1_INSERT_INT);
}
/* link against CONFIG_MMC=m. We can only be called once MMC core has
* initialized the controller, so symbol_get() should always succeed.
*/
mmc_cd = symbol_get(mmc_detect_change);
mmc_cd(ptr, msecs_to_jiffies(500));
symbol_put(mmc_detect_change);
return IRQ_HANDLED;
}
static int db1300_mmc_card_readonly(void *mmc_host)
{
/* it uses SD1 interface, but the DB1200's SD0 bit in the CPLD */
return bcsr_read(BCSR_STATUS) & BCSR_STATUS_SD0WP;
}
static int db1300_mmc_card_inserted(void *mmc_host)
{
return bcsr_read(BCSR_SIGSTAT) & (1 << 12); /* insertion irq signal */
}
static int db1300_mmc_cd_setup(void *mmc_host, int en)
{
int ret;
if (en) {
ret = request_irq(DB1300_SD1_INSERT_INT, db1300_mmc_cd, 0,
"sd_insert", mmc_host);
if (ret)
goto out;
ret = request_irq(DB1300_SD1_EJECT_INT, db1300_mmc_cd, 0,
"sd_eject", mmc_host);
if (ret) {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
goto out;
}
if (db1300_mmc_card_inserted(mmc_host))
enable_irq(DB1300_SD1_EJECT_INT);
else
enable_irq(DB1300_SD1_INSERT_INT);
} else {
free_irq(DB1300_SD1_INSERT_INT, mmc_host);
free_irq(DB1300_SD1_EJECT_INT, mmc_host);
}
ret = 0;
out:
return ret;
}
static void db1300_mmcled_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED0, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED0);
}
static struct led_classdev db1300_mmc_led = {
.brightness_set = db1300_mmcled_set,
};
struct au1xmmc_platform_data db1300_sd1_platdata = {
.cd_setup = db1300_mmc_cd_setup,
.card_inserted = db1300_mmc_card_inserted,
.card_readonly = db1300_mmc_card_readonly,
.led = &db1300_mmc_led,
};
static struct resource au1300_sd1_res[] = {
[0] = {
.start = AU1300_SD1_PHYS_ADDR,
.end = AU1300_SD1_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD1_INT,
.end = AU1300_SD1_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX1,
.end = AU1300_DSCR_CMD0_SDMS_TX1,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX1,
.end = AU1300_DSCR_CMD0_SDMS_RX1,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd1_dev = {
.dev = {
.platform_data = &db1300_sd1_platdata,
},
.name = "au1xxx-mmc",
.id = 1,
.resource = au1300_sd1_res,
.num_resources = ARRAY_SIZE(au1300_sd1_res),
};
/**********************************************************************/
static int db1300_movinand_inserted(void *mmc_host)
{
return 0; /* disable for now, it doesn't work yet */
}
static int db1300_movinand_readonly(void *mmc_host)
{
return 0;
}
static void db1300_movinand_led_set(struct led_classdev *led,
enum led_brightness brightness)
{
if (brightness != LED_OFF)
bcsr_mod(BCSR_LEDS, BCSR_LEDS_LED1, 0);
else
bcsr_mod(BCSR_LEDS, 0, BCSR_LEDS_LED1);
}
static struct led_classdev db1300_movinand_led = {
.brightness_set = db1300_movinand_led_set,
};
struct au1xmmc_platform_data db1300_sd0_platdata = {
.card_inserted = db1300_movinand_inserted,
.card_readonly = db1300_movinand_readonly,
.led = &db1300_movinand_led,
.mask_host_caps = MMC_CAP_NEEDS_POLL,
};
static struct resource au1300_sd0_res[] = {
[0] = {
.start = AU1100_SD0_PHYS_ADDR,
.end = AU1100_SD0_PHYS_ADDR,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_SD0_INT,
.end = AU1300_SD0_INT,
.flags = IORESOURCE_IRQ,
},
[2] = {
.start = AU1300_DSCR_CMD0_SDMS_TX0,
.end = AU1300_DSCR_CMD0_SDMS_TX0,
.flags = IORESOURCE_DMA,
},
[3] = {
.start = AU1300_DSCR_CMD0_SDMS_RX0,
.end = AU1300_DSCR_CMD0_SDMS_RX0,
.flags = IORESOURCE_DMA,
},
};
static struct platform_device db1300_sd0_dev = {
.dev = {
.platform_data = &db1300_sd0_platdata,
},
.name = "au1xxx-mmc",
.id = 0,
.resource = au1300_sd0_res,
.num_resources = ARRAY_SIZE(au1300_sd0_res),
};
/**********************************************************************/
static struct platform_device db1300_wm9715_dev = {
.name = "wm9712-codec",
.id = 1, /* ID of PSC for AC97 audio, see asoc glue! */
};
static struct platform_device db1300_ac97dma_dev = {
.name = "au1xpsc-pcm",
.id = 1, /* PSC ID */
};
static struct platform_device db1300_i2sdma_dev = {
.name = "au1xpsc-pcm",
.id = 2, /* PSC ID */
};
static struct platform_device db1300_sndac97_dev = {
.name = "db1300-ac97",
};
static struct platform_device db1300_sndi2s_dev = {
.name = "db1300-i2s",
};
/**********************************************************************/
static int db1300fb_panel_index(void)
{
return 9; /* DB1300_800x480 */
}
static int db1300fb_panel_init(void)
{
/* Apply power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD,
BCSR_BOARD_LCDBL);
return 0;
}
static int db1300fb_panel_shutdown(void)
{
/* Remove power (Vee/Vdd logic is inverted on Panel DB1300_800x480) */
bcsr_mod(BCSR_BOARD, BCSR_BOARD_LCDBL,
BCSR_BOARD_LCDVEE | BCSR_BOARD_LCDVDD);
return 0;
}
static struct au1200fb_platdata db1300fb_pd = {
.panel_index = db1300fb_panel_index,
.panel_init = db1300fb_panel_init,
.panel_shutdown = db1300fb_panel_shutdown,
};
static struct resource au1300_lcd_res[] = {
[0] = {
.start = AU1200_LCD_PHYS_ADDR,
.end = AU1200_LCD_PHYS_ADDR + 0x800 - 1,
.flags = IORESOURCE_MEM,
},
[1] = {
.start = AU1300_LCD_INT,
.end = AU1300_LCD_INT,
.flags = IORESOURCE_IRQ,
}
};
static u64 au1300_lcd_dmamask = DMA_BIT_MASK(32);
static struct platform_device db1300_lcd_dev = {
.name = "au1200-lcd",
.id = 0,
.dev = {
.dma_mask = &au1300_lcd_dmamask,
.coherent_dma_mask = DMA_BIT_MASK(32),
.platform_data = &db1300fb_pd,
},
.num_resources = ARRAY_SIZE(au1300_lcd_res),
.resource = au1300_lcd_res,
};
/**********************************************************************/
static void db1300_wm97xx_irqen(struct wm97xx *wm, int enable)
{
if (enable)
enable_irq(DB1300_AC97_PEN_INT);
else
disable_irq_nosync(DB1300_AC97_PEN_INT);
}
static struct wm97xx_mach_ops db1300_wm97xx_ops = {
.irq_enable = db1300_wm97xx_irqen,
.irq_gpio = WM97XX_GPIO_3,
};
static int db1300_wm97xx_probe(struct platform_device *pdev)
{
struct wm97xx *wm = platform_get_drvdata(pdev);
/* external pendown indicator */
wm97xx_config_gpio(wm, WM97XX_GPIO_13, WM97XX_GPIO_IN,
WM97XX_GPIO_POL_LOW, WM97XX_GPIO_STICKY,
WM97XX_GPIO_WAKE);
/* internal "virtual" pendown gpio */
wm97xx_config_gpio(wm, WM97XX_GPIO_3, WM97XX_GPIO_OUT,
WM97XX_GPIO_POL_LOW, WM97XX_GPIO_NOTSTICKY,
WM97XX_GPIO_NOWAKE);
wm->pen_irq = DB1300_AC97_PEN_INT;
return wm97xx_register_mach_ops(wm, &db1300_wm97xx_ops);
}
static struct platform_driver db1300_wm97xx_driver = {
.driver.name = "wm97xx-touch",
.driver.owner = THIS_MODULE,
.probe = db1300_wm97xx_probe,
};
/**********************************************************************/
static struct platform_device *db1300_dev[] __initdata = {
&db1300_eth_dev,
&db1300_i2c_dev,
&db1300_5waysw_dev,
&db1300_nand_dev,
&db1300_ide_dev,
&db1300_sd0_dev,
&db1300_sd1_dev,
&db1300_lcd_dev,
&db1300_ac97_dev,
&db1300_i2s_dev,
&db1300_wm9715_dev,
&db1300_ac97dma_dev,
&db1300_i2sdma_dev,
&db1300_sndac97_dev,
&db1300_sndi2s_dev,
};
int __init db1300_dev_setup(void)
{
int swapped, cpldirq;
struct clk *c;
/* setup CPLD IRQ muxer */
cpldirq = au1300_gpio_to_irq(AU1300_PIN_EXTCLK1);
irq_set_irq_type(cpldirq, IRQ_TYPE_LEVEL_HIGH);
bcsr_init_irq(DB1300_FIRST_INT, DB1300_LAST_INT, cpldirq);
/* insert/eject IRQs: one always triggers so don't enable them
* when doing request_irq() on them. DB1200 has this bug too.
*/
irq_set_status_flags(DB1300_SD1_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_SD1_EJECT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_INSERT_INT, IRQ_NOAUTOEN);
irq_set_status_flags(DB1300_CF_EJECT_INT, IRQ_NOAUTOEN);
/*
* setup board
*/
prom_get_ethernet_addr(&db1300_eth_config.mac[0]);
i2c_register_board_info(0, db1300_i2c_devs,
ARRAY_SIZE(db1300_i2c_devs));
if (platform_driver_register(&db1300_wm97xx_driver))
pr_warn("DB1300: failed to init touch pen irq support!\n");
/* Audio PSC clock is supplied by codecs (PSC1, 2) */
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC1_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
__raw_writel(PSC_SEL_CLK_SERCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC2_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* I2C driver wants 50MHz, get as close as possible */
c = clk_get(NULL, "psc3_intclk");
if (!IS_ERR(c)) {
clk_set_rate(c, 50000000);
clk_prepare_enable(c);
clk_put(c);
}
__raw_writel(PSC_SEL_CLK_INTCLK,
(void __iomem *)KSEG1ADDR(AU1300_PSC3_PHYS_ADDR) + PSC_SEL_OFFSET);
wmb();
/* enable power to USB ports */
bcsr_mod(BCSR_RESETS, 0, BCSR_RESETS_USBHPWR | BCSR_RESETS_OTGPWR);
/* although it is socket #0, it uses the CPLD bits which previous boards
* have used for socket #1.
*/
db1x_register_pcmcia_socket(
AU1000_PCMCIA_ATTR_PHYS_ADDR,
AU1000_PCMCIA_ATTR_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_MEM_PHYS_ADDR,
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x00400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x00010000 - 1,
DB1300_CF_INT, DB1300_CF_INSERT_INT, 0, DB1300_CF_EJECT_INT, 1);
swapped = bcsr_read(BCSR_STATUS) & BCSR_STATUS_DB1200_SWAPBOOT;
db1x_register_norflash(64 << 20, 2, swapped);
return platform_add_devices(db1300_dev, ARRAY_SIZE(db1300_dev));
}
int __init db1300_board_setup(void)
{
unsigned short whoami;
bcsr_init(DB1300_BCSR_PHYS_ADDR,
DB1300_BCSR_PHYS_ADDR + DB1300_BCSR_HEXLED_OFS);
whoami = bcsr_read(BCSR_WHOAMI);
if (BCSR_WHOAMI_BOARD(whoami) != BCSR_WHOAMI_DB1300)
return -ENODEV;
db1300_gpio_config();
printk(KERN_INFO "NetLogic DBAu1300 Development Platform.\n\t"
"BoardID %d CPLD Rev %d DaughtercardID %d\n",
BCSR_WHOAMI_BOARD(whoami), BCSR_WHOAMI_CPLD(whoami),
BCSR_WHOAMI_DCID(whoami));
/* enable UARTs, YAMON only enables #2 */
alchemy_uart_enable(AU1300_UART0_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART1_PHYS_ADDR);
alchemy_uart_enable(AU1300_UART3_PHYS_ADDR);
return 0;
}
| gpl-2.0 |
NX511J-dev/kernel_zte_nx511j | drivers/staging/prima/CORE/MAC/src/pe/lim/limProcessBeaconFrame.c | 297 | 11858 | /*
* Copyright (c) 2011-2015 The Linux Foundation. All rights reserved.
*
* Previously licensed under the ISC license by Qualcomm Atheros, Inc.
*
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all
* copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL
* WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE
* AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
* DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR
* PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
* TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/*
* This file was originally distributed by Qualcomm Atheros, Inc.
* under proprietary terms before Copyright ownership was assigned
* to the Linux Foundation.
*/
/*
*
* This file limProcessBeaconFrame.cc contains the code
* for processing Received Beacon Frame.
* Author: Chandra Modumudi
* Date: 03/01/02
* History:-
* Date Modified by Modification Information
* --------------------------------------------------------------------
*
*/
#include "wniCfgSta.h"
#include "aniGlobal.h"
#include "cfgApi.h"
#include "schApi.h"
#include "utilsApi.h"
#include "limTypes.h"
#include "limUtils.h"
#include "limAssocUtils.h"
#include "limPropExtsUtils.h"
#include "limSerDesUtils.h"
/**
* limProcessBeaconFrame
*
*FUNCTION:
* This function is called by limProcessMessageQueue() upon Beacon
* frame reception.
*
*LOGIC:
*
*ASSUMPTIONS:
*
*NOTE:
* 1. Beacons received in 'normal' state in IBSS are handled by
* Beacon Processing module.
*
* @param pMac - Pointer to Global MAC structure
* @param *pRxPacketInfo - A pointer to RX packet info structure
* @return None
*/
void
limProcessBeaconFrame(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo,tpPESession psessionEntry)
{
tpSirMacMgmtHdr pHdr;
tSchBeaconStruct *pBeacon;
pMac->lim.gLimNumBeaconsRcvd++;
/* here is it required to increment session specific heartBeat beacon counter */
pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo);
PELOG2(limLog(pMac, LOG2, FL("Received Beacon frame with length=%d from "),
WDA_GET_RX_MPDU_LEN(pRxPacketInfo));
limPrintMacAddr(pMac, pHdr->sa, LOG2);)
if (!pMac->fScanOffload)
{
if (limDeactivateMinChannelTimerDuringScan(pMac) != eSIR_SUCCESS)
return;
}
/**
* Expect Beacon only when
* 1. STA is in Scan mode waiting for Beacon/Probe response or
* 2. STA is waiting for Beacon/Probe Respose Frame
* to announce join success.
* 3. STA/AP is in Learn mode
*/
if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE) ||
(psessionEntry->limMlmState == eLIM_MLM_WT_JOIN_BEACON_STATE)
|| pMac->fScanOffload
)
{
pBeacon = vos_mem_malloc(sizeof(tSchBeaconStruct));
if ( NULL == pBeacon )
{
limLog(pMac, LOGE, FL("Unable to allocate memory in limProcessBeaconFrame") );
return;
}
// Parse received Beacon
if (sirConvertBeaconFrame2Struct(pMac, (tANI_U8 *) pRxPacketInfo,
pBeacon) != eSIR_SUCCESS)
{
// Received wrongly formatted/invalid Beacon.
// Ignore it and move on.
limLog(pMac, LOGW,
FL("Received invalid Beacon in state %d"),
psessionEntry->limMlmState);
limPrintMlmState(pMac, LOGW, psessionEntry->limMlmState);
if ((!psessionEntry->currentBssBeaconCnt) &&
(sirCompareMacAddr( psessionEntry->bssId, pHdr->sa)))
limParseBeaconForTim(pMac, (tANI_U8 *) pRxPacketInfo, psessionEntry);
vos_mem_free(pBeacon);
return;
}
/*during scanning, when any session is active, and beacon/Pr belongs to
one of the session, fill up the following, TBD - HB couter */
if ((!psessionEntry->lastBeaconDtimPeriod) &&
(sirCompareMacAddr( psessionEntry->bssId, pBeacon->bssid)))
{
vos_mem_copy(( tANI_U8* )&psessionEntry->lastBeaconTimeStamp,
( tANI_U8* )pBeacon->timeStamp, sizeof(tANI_U64) );
psessionEntry->lastBeaconDtimCount = pBeacon->tim.dtimCount;
psessionEntry->lastBeaconDtimPeriod= pBeacon->tim.dtimPeriod;
psessionEntry->currentBssBeaconCnt++;
}
MTRACE(macTrace(pMac, TRACE_CODE_RX_MGMT_TSF, 0, pBeacon->timeStamp[0]);)
MTRACE(macTrace(pMac, TRACE_CODE_RX_MGMT_TSF, 0, pBeacon->timeStamp[1]);)
if (pMac->fScanOffload)
{
limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo,
eANI_BOOLEAN_FALSE, eANI_BOOLEAN_TRUE);
}
if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE))
{
limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo,
((pMac->lim.gLimHalScanState == eLIM_HAL_SCANNING_STATE) ?
eANI_BOOLEAN_TRUE : eANI_BOOLEAN_FALSE),
eANI_BOOLEAN_FALSE);
/* Calling dfsChannelList which will convert DFS channel
* to Active channel for x secs if this channel is DFS channel */
limSetDFSChannelList(pMac, pBeacon->channelNumber,
&pMac->lim.dfschannelList);
}
else if (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE)
{
}
else if (psessionEntry->limMlmState == eLIM_MLM_WT_JOIN_BEACON_STATE)
{
if( psessionEntry->beacon != NULL )
{
vos_mem_free(psessionEntry->beacon);
psessionEntry->beacon = NULL;
}
psessionEntry->bcnLen = WDA_GET_RX_PAYLOAD_LEN(pRxPacketInfo);
psessionEntry->beacon = vos_mem_malloc(psessionEntry->bcnLen);
if ( NULL == psessionEntry->beacon )
{
PELOGE(limLog(pMac, LOGE, FL("Unable to allocate memory to store beacon"));)
}
else
{
//Store the Beacon/ProbeRsp. This is sent to csr/hdd in join cnf response.
vos_mem_copy(psessionEntry->beacon, WDA_GET_RX_MPDU_DATA(pRxPacketInfo),
psessionEntry->bcnLen);
}
// STA in WT_JOIN_BEACON_STATE (IBSS)
limCheckAndAnnounceJoinSuccess(pMac, pBeacon, pHdr,psessionEntry);
} // if (pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE)
vos_mem_free(pBeacon);
} // if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) || ...
else
{
// Ignore Beacon frame in all other states
if (psessionEntry->limMlmState == eLIM_MLM_JOINED_STATE ||
psessionEntry->limMlmState == eLIM_MLM_BSS_STARTED_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME2_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME3_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_AUTH_FRAME4_STATE ||
psessionEntry->limMlmState == eLIM_MLM_AUTH_RSP_TIMEOUT_STATE ||
psessionEntry->limMlmState == eLIM_MLM_AUTHENTICATED_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_ASSOC_RSP_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_REASSOC_RSP_STATE ||
psessionEntry->limMlmState == eLIM_MLM_ASSOCIATED_STATE ||
psessionEntry->limMlmState == eLIM_MLM_REASSOCIATED_STATE ||
psessionEntry->limMlmState == eLIM_MLM_WT_ASSOC_CNF_STATE ||
limIsReassocInProgress(pMac,psessionEntry)) {
// nothing unexpected about beacon in these states
pMac->lim.gLimNumBeaconsIgnored++;
}
else
{
PELOG1(limLog(pMac, LOG1, FL("Received Beacon in unexpected state %d"),
psessionEntry->limMlmState);
limPrintMlmState(pMac, LOG1, psessionEntry->limMlmState);)
#ifdef WLAN_DEBUG
pMac->lim.gLimUnexpBcnCnt++;
#endif
}
}
return;
} /*** end limProcessBeaconFrame() ***/
/**---------------------------------------------------------------
\fn limProcessBeaconFrameNoSession
\brief This function is called by limProcessMessageQueue()
\ upon Beacon reception.
\
\param pMac
\param *pRxPacketInfo - A pointer to Rx packet info structure
\return None
------------------------------------------------------------------*/
void
limProcessBeaconFrameNoSession(tpAniSirGlobal pMac, tANI_U8 *pRxPacketInfo)
{
tpSirMacMgmtHdr pHdr;
tSchBeaconStruct *pBeacon;
pMac->lim.gLimNumBeaconsRcvd++;
pHdr = WDA_GET_RX_MAC_HEADER(pRxPacketInfo);
limLog(pMac, LOG2, FL("Received Beacon frame with length=%d from "),
WDA_GET_RX_MPDU_LEN(pRxPacketInfo));
limPrintMacAddr(pMac, pHdr->sa, LOG2);
if (!pMac->fScanOffload)
{
if (limDeactivateMinChannelTimerDuringScan(pMac) != eSIR_SUCCESS)
return;
}
/**
* No session has been established. Expect Beacon only when
* 1. STA is in Scan mode waiting for Beacon/Probe response or
* 2. STA/AP is in Learn mode
*/
if ((pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE))
{
pBeacon = vos_mem_malloc(sizeof(tSchBeaconStruct));
if ( NULL == pBeacon )
{
limLog(pMac, LOGE, FL("Unable to allocate memory in limProcessBeaconFrameNoSession") );
return;
}
if (sirConvertBeaconFrame2Struct(pMac, (tANI_U8 *) pRxPacketInfo, pBeacon) != eSIR_SUCCESS)
{
// Received wrongly formatted/invalid Beacon. Ignore and move on.
limLog(pMac, LOGW, FL("Received invalid Beacon in global MLM state %d"), pMac->lim.gLimMlmState);
limPrintMlmState(pMac, LOGW, pMac->lim.gLimMlmState);
vos_mem_free(pBeacon);
return;
}
if ( (pMac->lim.gLimMlmState == eLIM_MLM_WT_PROBE_RESP_STATE) ||
(pMac->lim.gLimMlmState == eLIM_MLM_PASSIVE_SCAN_STATE) )
{
limCheckAndAddBssDescription(pMac, pBeacon, pRxPacketInfo,
eANI_BOOLEAN_TRUE, eANI_BOOLEAN_FALSE);
/* Calling dfsChannelList which will convert DFS channel
* to Active channel for x secs if this channel is DFS channel */
limSetDFSChannelList(pMac, pBeacon->channelNumber,
&pMac->lim.dfschannelList);
}
else if (pMac->lim.gLimMlmState == eLIM_MLM_LEARN_STATE)
{
} // end of eLIM_MLM_LEARN_STATE)
vos_mem_free(pBeacon);
} // end of (eLIM_MLM_WT_PROBE_RESP_STATE) || (eLIM_MLM_PASSIVE_SCAN_STATE)
else
{
limLog(pMac, LOG1, FL("Rcvd Beacon in unexpected MLM state %s (%d)"),
limMlmStateStr(pMac->lim.gLimMlmState), pMac->lim.gLimMlmState);
limPrintMlmState(pMac, LOG1, pMac->lim.gLimMlmState);
#ifdef WLAN_DEBUG
pMac->lim.gLimUnexpBcnCnt++;
#endif
}
return;
} /*** end limProcessBeaconFrameNoSession() ***/
| gpl-2.0 |
bradfa/linux | drivers/media/i2c/mt9m032.c | 297 | 25305 | /*
* Driver for MT9M032 CMOS Image Sensor from Micron
*
* Copyright (C) 2010-2011 Lund Engineering
* Contact: Gil Lund <gwlund@lundeng.com>
* Author: Martin Hostettler <martin@neutronstar.dyndns.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
* 02110-1301 USA
*/
#include <linux/delay.h>
#include <linux/i2c.h>
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/math64.h>
#include <linux/module.h>
#include <linux/mutex.h>
#include <linux/slab.h>
#include <linux/v4l2-mediabus.h>
#include <media/media-entity.h>
#include <media/i2c/mt9m032.h>
#include <media/v4l2-ctrls.h>
#include <media/v4l2-device.h>
#include <media/v4l2-subdev.h>
#include "aptina-pll.h"
/*
* width and height include active boundary and black parts
*
* column 0- 15 active boundary
* column 16-1455 image
* column 1456-1471 active boundary
* column 1472-1599 black
*
* row 0- 51 black
* row 53- 59 active boundary
* row 60-1139 image
* row 1140-1147 active boundary
* row 1148-1151 black
*/
#define MT9M032_PIXEL_ARRAY_WIDTH 1600
#define MT9M032_PIXEL_ARRAY_HEIGHT 1152
#define MT9M032_CHIP_VERSION 0x00
#define MT9M032_CHIP_VERSION_VALUE 0x1402
#define MT9M032_ROW_START 0x01
#define MT9M032_ROW_START_MIN 0
#define MT9M032_ROW_START_MAX 1152
#define MT9M032_ROW_START_DEF 60
#define MT9M032_COLUMN_START 0x02
#define MT9M032_COLUMN_START_MIN 0
#define MT9M032_COLUMN_START_MAX 1600
#define MT9M032_COLUMN_START_DEF 16
#define MT9M032_ROW_SIZE 0x03
#define MT9M032_ROW_SIZE_MIN 32
#define MT9M032_ROW_SIZE_MAX 1152
#define MT9M032_ROW_SIZE_DEF 1080
#define MT9M032_COLUMN_SIZE 0x04
#define MT9M032_COLUMN_SIZE_MIN 32
#define MT9M032_COLUMN_SIZE_MAX 1600
#define MT9M032_COLUMN_SIZE_DEF 1440
#define MT9M032_HBLANK 0x05
#define MT9M032_VBLANK 0x06
#define MT9M032_VBLANK_MAX 0x7ff
#define MT9M032_SHUTTER_WIDTH_HIGH 0x08
#define MT9M032_SHUTTER_WIDTH_LOW 0x09
#define MT9M032_SHUTTER_WIDTH_MIN 1
#define MT9M032_SHUTTER_WIDTH_MAX 1048575
#define MT9M032_SHUTTER_WIDTH_DEF 1943
#define MT9M032_PIX_CLK_CTRL 0x0a
#define MT9M032_PIX_CLK_CTRL_INV_PIXCLK 0x8000
#define MT9M032_RESTART 0x0b
#define MT9M032_RESET 0x0d
#define MT9M032_PLL_CONFIG1 0x11
#define MT9M032_PLL_CONFIG1_PREDIV_MASK 0x3f
#define MT9M032_PLL_CONFIG1_MUL_SHIFT 8
#define MT9M032_READ_MODE1 0x1e
#define MT9M032_READ_MODE1_OUTPUT_BAD_FRAMES (1 << 13)
#define MT9M032_READ_MODE1_MAINTAIN_FRAME_RATE (1 << 12)
#define MT9M032_READ_MODE1_XOR_LINE_VALID (1 << 11)
#define MT9M032_READ_MODE1_CONT_LINE_VALID (1 << 10)
#define MT9M032_READ_MODE1_INVERT_TRIGGER (1 << 9)
#define MT9M032_READ_MODE1_SNAPSHOT (1 << 8)
#define MT9M032_READ_MODE1_GLOBAL_RESET (1 << 7)
#define MT9M032_READ_MODE1_BULB_EXPOSURE (1 << 6)
#define MT9M032_READ_MODE1_INVERT_STROBE (1 << 5)
#define MT9M032_READ_MODE1_STROBE_ENABLE (1 << 4)
#define MT9M032_READ_MODE1_STROBE_START_TRIG1 (0 << 2)
#define MT9M032_READ_MODE1_STROBE_START_EXP (1 << 2)
#define MT9M032_READ_MODE1_STROBE_START_SHUTTER (2 << 2)
#define MT9M032_READ_MODE1_STROBE_START_TRIG2 (3 << 2)
#define MT9M032_READ_MODE1_STROBE_END_TRIG1 (0 << 0)
#define MT9M032_READ_MODE1_STROBE_END_EXP (1 << 0)
#define MT9M032_READ_MODE1_STROBE_END_SHUTTER (2 << 0)
#define MT9M032_READ_MODE1_STROBE_END_TRIG2 (3 << 0)
#define MT9M032_READ_MODE2 0x20
#define MT9M032_READ_MODE2_VFLIP_SHIFT 15
#define MT9M032_READ_MODE2_HFLIP_SHIFT 14
#define MT9M032_READ_MODE2_ROW_BLC 0x40
#define MT9M032_GAIN_GREEN1 0x2b
#define MT9M032_GAIN_BLUE 0x2c
#define MT9M032_GAIN_RED 0x2d
#define MT9M032_GAIN_GREEN2 0x2e
/* write only */
#define MT9M032_GAIN_ALL 0x35
#define MT9M032_GAIN_DIGITAL_MASK 0x7f
#define MT9M032_GAIN_DIGITAL_SHIFT 8
#define MT9M032_GAIN_AMUL_SHIFT 6
#define MT9M032_GAIN_ANALOG_MASK 0x3f
#define MT9M032_FORMATTER1 0x9e
#define MT9M032_FORMATTER1_PLL_P1_6 (1 << 8)
#define MT9M032_FORMATTER1_PARALLEL (1 << 12)
#define MT9M032_FORMATTER2 0x9f
#define MT9M032_FORMATTER2_DOUT_EN 0x1000
#define MT9M032_FORMATTER2_PIXCLK_EN 0x2000
/*
* The available MT9M032 datasheet is missing documentation for register 0x10
* MT9P031 seems to be close enough, so use constants from that datasheet for
* now.
* But keep the name MT9P031 to remind us, that this isn't really confirmed
* for this sensor.
*/
#define MT9P031_PLL_CONTROL 0x10
#define MT9P031_PLL_CONTROL_PWROFF 0x0050
#define MT9P031_PLL_CONTROL_PWRON 0x0051
#define MT9P031_PLL_CONTROL_USEPLL 0x0052
struct mt9m032 {
struct v4l2_subdev subdev;
struct media_pad pad;
struct mt9m032_platform_data *pdata;
unsigned int pix_clock;
struct v4l2_ctrl_handler ctrls;
struct {
struct v4l2_ctrl *hflip;
struct v4l2_ctrl *vflip;
};
struct mutex lock; /* Protects streaming, format, interval and crop */
bool streaming;
struct v4l2_mbus_framefmt format;
struct v4l2_rect crop;
struct v4l2_fract frame_interval;
};
#define to_mt9m032(sd) container_of(sd, struct mt9m032, subdev)
#define to_dev(sensor) \
(&((struct i2c_client *)v4l2_get_subdevdata(&(sensor)->subdev))->dev)
static int mt9m032_read(struct i2c_client *client, u8 reg)
{
return i2c_smbus_read_word_swapped(client, reg);
}
static int mt9m032_write(struct i2c_client *client, u8 reg, const u16 data)
{
return i2c_smbus_write_word_swapped(client, reg, data);
}
static u32 mt9m032_row_time(struct mt9m032 *sensor, unsigned int width)
{
unsigned int effective_width;
u32 ns;
effective_width = width + 716; /* empirical value */
ns = div_u64(1000000000ULL * effective_width, sensor->pix_clock);
dev_dbg(to_dev(sensor), "MT9M032 line time: %u ns\n", ns);
return ns;
}
static int mt9m032_update_timing(struct mt9m032 *sensor,
struct v4l2_fract *interval)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
struct v4l2_rect *crop = &sensor->crop;
unsigned int min_vblank;
unsigned int vblank;
u32 row_time;
if (!interval)
interval = &sensor->frame_interval;
row_time = mt9m032_row_time(sensor, crop->width);
vblank = div_u64(1000000000ULL * interval->numerator,
(u64)row_time * interval->denominator)
- crop->height;
if (vblank > MT9M032_VBLANK_MAX) {
/* hardware limits to 11 bit values */
interval->denominator = 1000;
interval->numerator =
div_u64((crop->height + MT9M032_VBLANK_MAX) *
(u64)row_time * interval->denominator,
1000000000ULL);
vblank = div_u64(1000000000ULL * interval->numerator,
(u64)row_time * interval->denominator)
- crop->height;
}
/* enforce minimal 1.6ms blanking time. */
min_vblank = 1600000 / row_time;
vblank = clamp_t(unsigned int, vblank, min_vblank, MT9M032_VBLANK_MAX);
return mt9m032_write(client, MT9M032_VBLANK, vblank);
}
static int mt9m032_update_geom_timing(struct mt9m032 *sensor)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
int ret;
ret = mt9m032_write(client, MT9M032_COLUMN_SIZE,
sensor->crop.width - 1);
if (!ret)
ret = mt9m032_write(client, MT9M032_ROW_SIZE,
sensor->crop.height - 1);
if (!ret)
ret = mt9m032_write(client, MT9M032_COLUMN_START,
sensor->crop.left);
if (!ret)
ret = mt9m032_write(client, MT9M032_ROW_START,
sensor->crop.top);
if (!ret)
ret = mt9m032_update_timing(sensor, NULL);
return ret;
}
static int update_formatter2(struct mt9m032 *sensor, bool streaming)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
u16 reg_val = MT9M032_FORMATTER2_DOUT_EN
| 0x0070; /* parts reserved! */
/* possibly for changing to 14-bit mode */
if (streaming)
reg_val |= MT9M032_FORMATTER2_PIXCLK_EN; /* pixclock enable */
return mt9m032_write(client, MT9M032_FORMATTER2, reg_val);
}
static int mt9m032_setup_pll(struct mt9m032 *sensor)
{
static const struct aptina_pll_limits limits = {
.ext_clock_min = 8000000,
.ext_clock_max = 16500000,
.int_clock_min = 2000000,
.int_clock_max = 24000000,
.out_clock_min = 322000000,
.out_clock_max = 693000000,
.pix_clock_max = 99000000,
.n_min = 1,
.n_max = 64,
.m_min = 16,
.m_max = 255,
.p1_min = 6,
.p1_max = 7,
};
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
struct mt9m032_platform_data *pdata = sensor->pdata;
struct aptina_pll pll;
u16 reg_val;
int ret;
pll.ext_clock = pdata->ext_clock;
pll.pix_clock = pdata->pix_clock;
ret = aptina_pll_calculate(&client->dev, &limits, &pll);
if (ret < 0)
return ret;
sensor->pix_clock = pdata->pix_clock;
ret = mt9m032_write(client, MT9M032_PLL_CONFIG1,
(pll.m << MT9M032_PLL_CONFIG1_MUL_SHIFT) |
((pll.n - 1) & MT9M032_PLL_CONFIG1_PREDIV_MASK));
if (!ret)
ret = mt9m032_write(client, MT9P031_PLL_CONTROL,
MT9P031_PLL_CONTROL_PWRON |
MT9P031_PLL_CONTROL_USEPLL);
if (!ret) /* more reserved, Continuous, Master Mode */
ret = mt9m032_write(client, MT9M032_READ_MODE1, 0x8000 |
MT9M032_READ_MODE1_STROBE_START_EXP |
MT9M032_READ_MODE1_STROBE_END_SHUTTER);
if (!ret) {
reg_val = (pll.p1 == 6 ? MT9M032_FORMATTER1_PLL_P1_6 : 0)
| MT9M032_FORMATTER1_PARALLEL | 0x001e; /* 14-bit */
ret = mt9m032_write(client, MT9M032_FORMATTER1, reg_val);
}
return ret;
}
/* -----------------------------------------------------------------------------
* Subdev pad operations
*/
static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_mbus_code_enum *code)
{
if (code->index != 0)
return -EINVAL;
code->code = MEDIA_BUS_FMT_Y8_1X8;
return 0;
}
static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_frame_size_enum *fse)
{
if (fse->index != 0 || fse->code != MEDIA_BUS_FMT_Y8_1X8)
return -EINVAL;
fse->min_width = MT9M032_COLUMN_SIZE_DEF;
fse->max_width = MT9M032_COLUMN_SIZE_DEF;
fse->min_height = MT9M032_ROW_SIZE_DEF;
fse->max_height = MT9M032_ROW_SIZE_DEF;
return 0;
}
/**
* __mt9m032_get_pad_crop() - get crop rect
* @sensor: pointer to the sensor struct
* @cfg: v4l2_subdev_pad_config for getting the try crop rect from
* @which: select try or active crop rect
*
* Returns a pointer the current active or fh relative try crop rect
*/
static struct v4l2_rect *
__mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
return v4l2_subdev_get_try_crop(&sensor->subdev, cfg, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->crop;
default:
return NULL;
}
}
/**
* __mt9m032_get_pad_format() - get format
* @sensor: pointer to the sensor struct
* @cfg: v4l2_subdev_pad_config for getting the try format from
* @which: select try or active format
*
* Returns a pointer the current active or fh relative try format
*/
static struct v4l2_mbus_framefmt *
__mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_pad_config *cfg,
enum v4l2_subdev_format_whence which)
{
switch (which) {
case V4L2_SUBDEV_FORMAT_TRY:
return v4l2_subdev_get_try_format(&sensor->subdev, cfg, 0);
case V4L2_SUBDEV_FORMAT_ACTIVE:
return &sensor->format;
default:
return NULL;
}
}
static int mt9m032_get_pad_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
mutex_lock(&sensor->lock);
fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_format(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_format *fmt)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
int ret;
mutex_lock(&sensor->lock);
if (sensor->streaming && fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
ret = -EBUSY;
goto done;
}
/* Scaling is not supported, the format is thus fixed. */
fmt->format = *__mt9m032_get_pad_format(sensor, cfg, fmt->which);
ret = 0;
done:
mutex_unlock(&sensor->lock);
return ret;
}
static int mt9m032_get_pad_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
mutex_lock(&sensor->lock);
sel->r = *__mt9m032_get_pad_crop(sensor, cfg, sel->which);
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_pad_selection(struct v4l2_subdev *subdev,
struct v4l2_subdev_pad_config *cfg,
struct v4l2_subdev_selection *sel)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
struct v4l2_mbus_framefmt *format;
struct v4l2_rect *__crop;
struct v4l2_rect rect;
int ret = 0;
if (sel->target != V4L2_SEL_TGT_CROP)
return -EINVAL;
mutex_lock(&sensor->lock);
if (sensor->streaming && sel->which == V4L2_SUBDEV_FORMAT_ACTIVE) {
ret = -EBUSY;
goto done;
}
/* Clamp the crop rectangle boundaries and align them to a multiple of 2
* pixels to ensure a GRBG Bayer pattern.
*/
rect.left = clamp(ALIGN(sel->r.left, 2), MT9M032_COLUMN_START_MIN,
MT9M032_COLUMN_START_MAX);
rect.top = clamp(ALIGN(sel->r.top, 2), MT9M032_ROW_START_MIN,
MT9M032_ROW_START_MAX);
rect.width = clamp_t(unsigned int, ALIGN(sel->r.width, 2),
MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX);
rect.height = clamp_t(unsigned int, ALIGN(sel->r.height, 2),
MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX);
rect.width = min_t(unsigned int, rect.width,
MT9M032_PIXEL_ARRAY_WIDTH - rect.left);
rect.height = min_t(unsigned int, rect.height,
MT9M032_PIXEL_ARRAY_HEIGHT - rect.top);
__crop = __mt9m032_get_pad_crop(sensor, cfg, sel->which);
if (rect.width != __crop->width || rect.height != __crop->height) {
/* Reset the output image size if the crop rectangle size has
* been modified.
*/
format = __mt9m032_get_pad_format(sensor, cfg, sel->which);
format->width = rect.width;
format->height = rect.height;
}
*__crop = rect;
sel->r = rect;
if (sel->which == V4L2_SUBDEV_FORMAT_ACTIVE)
ret = mt9m032_update_geom_timing(sensor);
done:
mutex_unlock(&sensor->lock);
return ret;
}
static int mt9m032_get_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_interval *fi)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
mutex_lock(&sensor->lock);
memset(fi, 0, sizeof(*fi));
fi->interval = sensor->frame_interval;
mutex_unlock(&sensor->lock);
return 0;
}
static int mt9m032_set_frame_interval(struct v4l2_subdev *subdev,
struct v4l2_subdev_frame_interval *fi)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
int ret;
mutex_lock(&sensor->lock);
if (sensor->streaming) {
ret = -EBUSY;
goto done;
}
/* Avoid divisions by 0. */
if (fi->interval.denominator == 0)
fi->interval.denominator = 1;
ret = mt9m032_update_timing(sensor, &fi->interval);
if (!ret)
sensor->frame_interval = fi->interval;
done:
mutex_unlock(&sensor->lock);
return ret;
}
static int mt9m032_s_stream(struct v4l2_subdev *subdev, int streaming)
{
struct mt9m032 *sensor = to_mt9m032(subdev);
int ret;
mutex_lock(&sensor->lock);
ret = update_formatter2(sensor, streaming);
if (!ret)
sensor->streaming = streaming;
mutex_unlock(&sensor->lock);
return ret;
}
/* -----------------------------------------------------------------------------
* V4L2 subdev core operations
*/
#ifdef CONFIG_VIDEO_ADV_DEBUG
static int mt9m032_g_register(struct v4l2_subdev *sd,
struct v4l2_dbg_register *reg)
{
struct mt9m032 *sensor = to_mt9m032(sd);
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
int val;
if (reg->reg > 0xff)
return -EINVAL;
val = mt9m032_read(client, reg->reg);
if (val < 0)
return -EIO;
reg->size = 2;
reg->val = val;
return 0;
}
static int mt9m032_s_register(struct v4l2_subdev *sd,
const struct v4l2_dbg_register *reg)
{
struct mt9m032 *sensor = to_mt9m032(sd);
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
if (reg->reg > 0xff)
return -EINVAL;
return mt9m032_write(client, reg->reg, reg->val);
}
#endif
/* -----------------------------------------------------------------------------
* V4L2 subdev control operations
*/
static int update_read_mode2(struct mt9m032 *sensor, bool vflip, bool hflip)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
int reg_val = (vflip << MT9M032_READ_MODE2_VFLIP_SHIFT)
| (hflip << MT9M032_READ_MODE2_HFLIP_SHIFT)
| MT9M032_READ_MODE2_ROW_BLC
| 0x0007;
return mt9m032_write(client, MT9M032_READ_MODE2, reg_val);
}
static int mt9m032_set_gain(struct mt9m032 *sensor, s32 val)
{
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
int digital_gain_val; /* in 1/8th (0..127) */
int analog_mul; /* 0 or 1 */
int analog_gain_val; /* in 1/16th. (0..63) */
u16 reg_val;
digital_gain_val = 51; /* from setup example */
if (val < 63) {
analog_mul = 0;
analog_gain_val = val;
} else {
analog_mul = 1;
analog_gain_val = val / 2;
}
/* a_gain = (1 + analog_mul) + (analog_gain_val + 1) / 16 */
/* overall_gain = a_gain * (1 + digital_gain_val / 8) */
reg_val = ((digital_gain_val & MT9M032_GAIN_DIGITAL_MASK)
<< MT9M032_GAIN_DIGITAL_SHIFT)
| ((analog_mul & 1) << MT9M032_GAIN_AMUL_SHIFT)
| (analog_gain_val & MT9M032_GAIN_ANALOG_MASK);
return mt9m032_write(client, MT9M032_GAIN_ALL, reg_val);
}
static int mt9m032_try_ctrl(struct v4l2_ctrl *ctrl)
{
if (ctrl->id == V4L2_CID_GAIN && ctrl->val >= 63) {
/* round because of multiplier used for values >= 63 */
ctrl->val &= ~1;
}
return 0;
}
static int mt9m032_set_ctrl(struct v4l2_ctrl *ctrl)
{
struct mt9m032 *sensor =
container_of(ctrl->handler, struct mt9m032, ctrls);
struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev);
int ret;
switch (ctrl->id) {
case V4L2_CID_GAIN:
return mt9m032_set_gain(sensor, ctrl->val);
case V4L2_CID_HFLIP:
/* case V4L2_CID_VFLIP: -- In the same cluster */
return update_read_mode2(sensor, sensor->vflip->val,
sensor->hflip->val);
case V4L2_CID_EXPOSURE:
ret = mt9m032_write(client, MT9M032_SHUTTER_WIDTH_HIGH,
(ctrl->val >> 16) & 0xffff);
if (ret < 0)
return ret;
return mt9m032_write(client, MT9M032_SHUTTER_WIDTH_LOW,
ctrl->val & 0xffff);
}
return 0;
}
static const struct v4l2_ctrl_ops mt9m032_ctrl_ops = {
.s_ctrl = mt9m032_set_ctrl,
.try_ctrl = mt9m032_try_ctrl,
};
/* -------------------------------------------------------------------------- */
static const struct v4l2_subdev_core_ops mt9m032_core_ops = {
#ifdef CONFIG_VIDEO_ADV_DEBUG
.g_register = mt9m032_g_register,
.s_register = mt9m032_s_register,
#endif
};
static const struct v4l2_subdev_video_ops mt9m032_video_ops = {
.s_stream = mt9m032_s_stream,
.g_frame_interval = mt9m032_get_frame_interval,
.s_frame_interval = mt9m032_set_frame_interval,
};
static const struct v4l2_subdev_pad_ops mt9m032_pad_ops = {
.enum_mbus_code = mt9m032_enum_mbus_code,
.enum_frame_size = mt9m032_enum_frame_size,
.get_fmt = mt9m032_get_pad_format,
.set_fmt = mt9m032_set_pad_format,
.set_selection = mt9m032_set_pad_selection,
.get_selection = mt9m032_get_pad_selection,
};
static const struct v4l2_subdev_ops mt9m032_ops = {
.core = &mt9m032_core_ops,
.video = &mt9m032_video_ops,
.pad = &mt9m032_pad_ops,
};
/* -----------------------------------------------------------------------------
* Driver initialization and probing
*/
static int mt9m032_probe(struct i2c_client *client,
const struct i2c_device_id *devid)
{
struct mt9m032_platform_data *pdata = client->dev.platform_data;
struct i2c_adapter *adapter = client->adapter;
struct mt9m032 *sensor;
int chip_version;
int ret;
if (pdata == NULL) {
dev_err(&client->dev, "No platform data\n");
return -EINVAL;
}
if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) {
dev_warn(&client->dev,
"I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n");
return -EIO;
}
if (!client->dev.platform_data)
return -ENODEV;
sensor = devm_kzalloc(&client->dev, sizeof(*sensor), GFP_KERNEL);
if (sensor == NULL)
return -ENOMEM;
mutex_init(&sensor->lock);
sensor->pdata = pdata;
v4l2_i2c_subdev_init(&sensor->subdev, client, &mt9m032_ops);
sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
chip_version = mt9m032_read(client, MT9M032_CHIP_VERSION);
if (chip_version != MT9M032_CHIP_VERSION_VALUE) {
dev_err(&client->dev, "MT9M032 not detected, wrong version "
"0x%04x\n", chip_version);
ret = -ENODEV;
goto error_sensor;
}
dev_info(&client->dev, "MT9M032 detected at address 0x%02x\n",
client->addr);
sensor->frame_interval.numerator = 1;
sensor->frame_interval.denominator = 30;
sensor->crop.left = MT9M032_COLUMN_START_DEF;
sensor->crop.top = MT9M032_ROW_START_DEF;
sensor->crop.width = MT9M032_COLUMN_SIZE_DEF;
sensor->crop.height = MT9M032_ROW_SIZE_DEF;
sensor->format.width = sensor->crop.width;
sensor->format.height = sensor->crop.height;
sensor->format.code = MEDIA_BUS_FMT_Y8_1X8;
sensor->format.field = V4L2_FIELD_NONE;
sensor->format.colorspace = V4L2_COLORSPACE_SRGB;
v4l2_ctrl_handler_init(&sensor->ctrls, 5);
v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops,
V4L2_CID_GAIN, 0, 127, 1, 64);
sensor->hflip = v4l2_ctrl_new_std(&sensor->ctrls,
&mt9m032_ctrl_ops,
V4L2_CID_HFLIP, 0, 1, 1, 0);
sensor->vflip = v4l2_ctrl_new_std(&sensor->ctrls,
&mt9m032_ctrl_ops,
V4L2_CID_VFLIP, 0, 1, 1, 0);
v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops,
V4L2_CID_EXPOSURE, MT9M032_SHUTTER_WIDTH_MIN,
MT9M032_SHUTTER_WIDTH_MAX, 1,
MT9M032_SHUTTER_WIDTH_DEF);
v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops,
V4L2_CID_PIXEL_RATE, pdata->pix_clock,
pdata->pix_clock, 1, pdata->pix_clock);
if (sensor->ctrls.error) {
ret = sensor->ctrls.error;
dev_err(&client->dev, "control initialization error %d\n", ret);
goto error_ctrl;
}
v4l2_ctrl_cluster(2, &sensor->hflip);
sensor->subdev.ctrl_handler = &sensor->ctrls;
sensor->pad.flags = MEDIA_PAD_FL_SOURCE;
ret = media_entity_pads_init(&sensor->subdev.entity, 1, &sensor->pad);
if (ret < 0)
goto error_ctrl;
ret = mt9m032_write(client, MT9M032_RESET, 1); /* reset on */
if (ret < 0)
goto error_entity;
ret = mt9m032_write(client, MT9M032_RESET, 0); /* reset off */
if (ret < 0)
goto error_entity;
ret = mt9m032_setup_pll(sensor);
if (ret < 0)
goto error_entity;
usleep_range(10000, 11000);
ret = v4l2_ctrl_handler_setup(&sensor->ctrls);
if (ret < 0)
goto error_entity;
/* SIZE */
ret = mt9m032_update_geom_timing(sensor);
if (ret < 0)
goto error_entity;
ret = mt9m032_write(client, 0x41, 0x0000); /* reserved !!! */
if (ret < 0)
goto error_entity;
ret = mt9m032_write(client, 0x42, 0x0003); /* reserved !!! */
if (ret < 0)
goto error_entity;
ret = mt9m032_write(client, 0x43, 0x0003); /* reserved !!! */
if (ret < 0)
goto error_entity;
ret = mt9m032_write(client, 0x7f, 0x0000); /* reserved !!! */
if (ret < 0)
goto error_entity;
if (sensor->pdata->invert_pixclock) {
ret = mt9m032_write(client, MT9M032_PIX_CLK_CTRL,
MT9M032_PIX_CLK_CTRL_INV_PIXCLK);
if (ret < 0)
goto error_entity;
}
ret = mt9m032_write(client, MT9M032_RESTART, 1); /* Restart on */
if (ret < 0)
goto error_entity;
msleep(100);
ret = mt9m032_write(client, MT9M032_RESTART, 0); /* Restart off */
if (ret < 0)
goto error_entity;
msleep(100);
ret = update_formatter2(sensor, false);
if (ret < 0)
goto error_entity;
return ret;
error_entity:
media_entity_cleanup(&sensor->subdev.entity);
error_ctrl:
v4l2_ctrl_handler_free(&sensor->ctrls);
error_sensor:
mutex_destroy(&sensor->lock);
return ret;
}
static int mt9m032_remove(struct i2c_client *client)
{
struct v4l2_subdev *subdev = i2c_get_clientdata(client);
struct mt9m032 *sensor = to_mt9m032(subdev);
v4l2_device_unregister_subdev(subdev);
v4l2_ctrl_handler_free(&sensor->ctrls);
media_entity_cleanup(&subdev->entity);
mutex_destroy(&sensor->lock);
return 0;
}
static const struct i2c_device_id mt9m032_id_table[] = {
{ MT9M032_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, mt9m032_id_table);
static struct i2c_driver mt9m032_i2c_driver = {
.driver = {
.name = MT9M032_NAME,
},
.probe = mt9m032_probe,
.remove = mt9m032_remove,
.id_table = mt9m032_id_table,
};
module_i2c_driver(mt9m032_i2c_driver);
MODULE_AUTHOR("Martin Hostettler <martin@neutronstar.dyndns.org>");
MODULE_DESCRIPTION("MT9M032 camera sensor driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
eskyuu/linux | drivers/gpu/drm/nouveau/nvkm/subdev/mc/g94.c | 553 | 1470 | /*
* Copyright 2012 Red Hat Inc.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
* OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
* ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
* OTHER DEALINGS IN THE SOFTWARE.
*
* Authors: Ben Skeggs
*/
#include "nv04.h"
struct nvkm_oclass *
g94_mc_oclass = &(struct nvkm_mc_oclass) {
.base.handle = NV_SUBDEV(MC, 0x94),
.base.ofuncs = &(struct nvkm_ofuncs) {
.ctor = nv04_mc_ctor,
.dtor = _nvkm_mc_dtor,
.init = nv50_mc_init,
.fini = _nvkm_mc_fini,
},
.intr = nv50_mc_intr,
.msi_rearm = nv40_mc_msi_rearm,
}.base;
| gpl-2.0 |
rukin5197/android_kernel_htc_msm7x30 | net/mac80211/main.c | 809 | 29654 | /*
* Copyright 2002-2005, Instant802 Networks, Inc.
* Copyright 2005-2006, Devicescape Software, Inc.
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <net/mac80211.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/skbuff.h>
#include <linux/etherdevice.h>
#include <linux/if_arp.h>
#include <linux/rtnetlink.h>
#include <linux/bitmap.h>
#include <linux/pm_qos_params.h>
#include <linux/inetdevice.h>
#include <net/net_namespace.h>
#include <net/cfg80211.h>
#include "ieee80211_i.h"
#include "driver-ops.h"
#include "rate.h"
#include "mesh.h"
#include "wep.h"
#include "led.h"
#include "cfg.h"
#include "debugfs.h"
static struct lock_class_key ieee80211_rx_skb_queue_class;
void ieee80211_configure_filter(struct ieee80211_local *local)
{
u64 mc;
unsigned int changed_flags;
unsigned int new_flags = 0;
if (atomic_read(&local->iff_promiscs))
new_flags |= FIF_PROMISC_IN_BSS;
if (atomic_read(&local->iff_allmultis))
new_flags |= FIF_ALLMULTI;
if (local->monitors || local->scanning)
new_flags |= FIF_BCN_PRBRESP_PROMISC;
if (local->fif_probe_req || local->probe_req_reg)
new_flags |= FIF_PROBE_REQ;
if (local->fif_fcsfail)
new_flags |= FIF_FCSFAIL;
if (local->fif_plcpfail)
new_flags |= FIF_PLCPFAIL;
if (local->fif_control)
new_flags |= FIF_CONTROL;
if (local->fif_other_bss)
new_flags |= FIF_OTHER_BSS;
if (local->fif_pspoll)
new_flags |= FIF_PSPOLL;
spin_lock_bh(&local->filter_lock);
changed_flags = local->filter_flags ^ new_flags;
mc = drv_prepare_multicast(local, &local->mc_list);
spin_unlock_bh(&local->filter_lock);
/* be a bit nasty */
new_flags |= (1<<31);
drv_configure_filter(local, changed_flags, &new_flags, mc);
WARN_ON(new_flags & (1<<31));
local->filter_flags = new_flags & ~(1<<31);
}
static void ieee80211_reconfig_filter(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, reconfig_filter);
ieee80211_configure_filter(local);
}
/*
* Returns true if we are logically configured to be on
* the operating channel AND the hardware-conf is currently
* configured on the operating channel. Compares channel-type
* as well.
*/
bool ieee80211_cfg_on_oper_channel(struct ieee80211_local *local)
{
struct ieee80211_channel *chan, *scan_chan;
enum nl80211_channel_type channel_type;
/* This logic needs to match logic in ieee80211_hw_config */
if (local->scan_channel) {
chan = local->scan_channel;
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
if (chan == local->oper_channel)
channel_type = local->_oper_channel_type;
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
}
if (chan != local->oper_channel ||
channel_type != local->_oper_channel_type)
return false;
/* Check current hardware-config against oper_channel. */
if ((local->oper_channel != local->hw.conf.channel) ||
(local->_oper_channel_type != local->hw.conf.channel_type))
return false;
return true;
}
int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
{
struct ieee80211_channel *chan, *scan_chan;
int ret = 0;
int power;
enum nl80211_channel_type channel_type;
u32 offchannel_flag;
might_sleep();
scan_chan = local->scan_channel;
/* If this off-channel logic ever changes, ieee80211_on_oper_channel
* may need to change as well.
*/
offchannel_flag = local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (scan_chan) {
chan = scan_chan;
/* If scanning on oper channel, use whatever channel-type
* is currently in use.
*/
if (chan == local->oper_channel)
channel_type = local->_oper_channel_type;
else
channel_type = NL80211_CHAN_NO_HT;
} else if (local->tmp_channel) {
chan = scan_chan = local->tmp_channel;
channel_type = local->tmp_channel_type;
} else {
chan = local->oper_channel;
channel_type = local->_oper_channel_type;
}
if (chan != local->oper_channel ||
channel_type != local->_oper_channel_type)
local->hw.conf.flags |= IEEE80211_CONF_OFFCHANNEL;
else
local->hw.conf.flags &= ~IEEE80211_CONF_OFFCHANNEL;
offchannel_flag ^= local->hw.conf.flags & IEEE80211_CONF_OFFCHANNEL;
if (offchannel_flag || chan != local->hw.conf.channel ||
channel_type != local->hw.conf.channel_type) {
local->hw.conf.channel = chan;
local->hw.conf.channel_type = channel_type;
changed |= IEEE80211_CONF_CHANGE_CHANNEL;
}
if (!conf_is_ht(&local->hw.conf)) {
/*
* mac80211.h documents that this is only valid
* when the channel is set to an HT type, and
* that otherwise STATIC is used.
*/
local->hw.conf.smps_mode = IEEE80211_SMPS_STATIC;
} else if (local->hw.conf.smps_mode != local->smps_mode) {
local->hw.conf.smps_mode = local->smps_mode;
changed |= IEEE80211_CONF_CHANGE_SMPS;
}
if ((local->scanning & SCAN_SW_SCANNING) ||
(local->scanning & SCAN_HW_SCANNING))
power = chan->max_power;
else
power = local->power_constr_level ?
(chan->max_power - local->power_constr_level) :
chan->max_power;
if (local->user_power_level >= 0)
power = min(power, local->user_power_level);
if (local->hw.conf.power_level != power) {
changed |= IEEE80211_CONF_CHANGE_POWER;
local->hw.conf.power_level = power;
}
if (changed && local->open_count) {
ret = drv_config(local, changed);
/*
* Goal:
* HW reconfiguration should never fail, the driver has told
* us what it can support so it should live up to that promise.
*
* Current status:
* rfkill is not integrated with mac80211 and a
* configuration command can thus fail if hardware rfkill
* is enabled
*
* FIXME: integrate rfkill with mac80211 and then add this
* WARN_ON() back
*
*/
/* WARN_ON(ret); */
}
return ret;
}
void ieee80211_bss_info_change_notify(struct ieee80211_sub_if_data *sdata,
u32 changed)
{
struct ieee80211_local *local = sdata->local;
static const u8 zero[ETH_ALEN] = { 0 };
if (!changed)
return;
if (sdata->vif.type == NL80211_IFTYPE_STATION) {
/*
* While not associated, claim a BSSID of all-zeroes
* so that drivers don't do any weird things with the
* BSSID at that time.
*/
if (sdata->vif.bss_conf.assoc)
sdata->vif.bss_conf.bssid = sdata->u.mgd.bssid;
else
sdata->vif.bss_conf.bssid = zero;
} else if (sdata->vif.type == NL80211_IFTYPE_ADHOC)
sdata->vif.bss_conf.bssid = sdata->u.ibss.bssid;
else if (sdata->vif.type == NL80211_IFTYPE_AP)
sdata->vif.bss_conf.bssid = sdata->vif.addr;
else if (sdata->vif.type == NL80211_IFTYPE_WDS)
sdata->vif.bss_conf.bssid = NULL;
else if (ieee80211_vif_is_mesh(&sdata->vif)) {
sdata->vif.bss_conf.bssid = zero;
} else {
WARN_ON(1);
return;
}
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_ADHOC:
case NL80211_IFTYPE_WDS:
case NL80211_IFTYPE_MESH_POINT:
break;
default:
/* do not warn to simplify caller in scan.c */
changed &= ~BSS_CHANGED_BEACON_ENABLED;
if (WARN_ON(changed & BSS_CHANGED_BEACON))
return;
break;
}
if (changed & BSS_CHANGED_BEACON_ENABLED) {
if (local->quiescing || !ieee80211_sdata_running(sdata) ||
test_bit(SDATA_STATE_OFFCHANNEL, &sdata->state)) {
sdata->vif.bss_conf.enable_beacon = false;
} else {
/*
* Beacon should be enabled, but AP mode must
* check whether there is a beacon configured.
*/
switch (sdata->vif.type) {
case NL80211_IFTYPE_AP:
sdata->vif.bss_conf.enable_beacon =
!!sdata->u.ap.beacon;
break;
case NL80211_IFTYPE_ADHOC:
sdata->vif.bss_conf.enable_beacon =
!!sdata->u.ibss.presp;
break;
#ifdef CONFIG_MAC80211_MESH
case NL80211_IFTYPE_MESH_POINT:
sdata->vif.bss_conf.enable_beacon =
!!sdata->u.mesh.mesh_id_len;
break;
#endif
default:
/* not reached */
WARN_ON(1);
break;
}
}
}
drv_bss_info_changed(local, sdata, &sdata->vif.bss_conf, changed);
}
u32 ieee80211_reset_erp_info(struct ieee80211_sub_if_data *sdata)
{
sdata->vif.bss_conf.use_cts_prot = false;
sdata->vif.bss_conf.use_short_preamble = false;
sdata->vif.bss_conf.use_short_slot = false;
return BSS_CHANGED_ERP_CTS_PROT |
BSS_CHANGED_ERP_PREAMBLE |
BSS_CHANGED_ERP_SLOT;
}
static void ieee80211_tasklet_handler(unsigned long data)
{
struct ieee80211_local *local = (struct ieee80211_local *) data;
struct sk_buff *skb;
while ((skb = skb_dequeue(&local->skb_queue)) ||
(skb = skb_dequeue(&local->skb_queue_unreliable))) {
switch (skb->pkt_type) {
case IEEE80211_RX_MSG:
/* Clear skb->pkt_type in order to not confuse kernel
* netstack. */
skb->pkt_type = 0;
ieee80211_rx(local_to_hw(local), skb);
break;
case IEEE80211_TX_STATUS_MSG:
skb->pkt_type = 0;
ieee80211_tx_status(local_to_hw(local), skb);
break;
default:
WARN(1, "mac80211: Packet is of unknown type %d\n",
skb->pkt_type);
dev_kfree_skb(skb);
break;
}
}
}
static void ieee80211_restart_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, restart_work);
/* wait for scan work complete */
flush_workqueue(local->workqueue);
mutex_lock(&local->mtx);
WARN(test_bit(SCAN_HW_SCANNING, &local->scanning) ||
local->sched_scanning,
"%s called with hardware scan in progress\n", __func__);
mutex_unlock(&local->mtx);
rtnl_lock();
ieee80211_scan_cancel(local);
ieee80211_reconfig(local);
rtnl_unlock();
}
void ieee80211_restart_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
trace_api_restart_hw(local);
wiphy_info(hw->wiphy,
"Hardware restart was requested\n");
/* use this reason, ieee80211_reconfig will unblock it */
ieee80211_stop_queues_by_reason(hw,
IEEE80211_QUEUE_STOP_REASON_SUSPEND);
schedule_work(&local->restart_work);
}
EXPORT_SYMBOL(ieee80211_restart_hw);
static void ieee80211_recalc_smps_work(struct work_struct *work)
{
struct ieee80211_local *local =
container_of(work, struct ieee80211_local, recalc_smps);
mutex_lock(&local->iflist_mtx);
ieee80211_recalc_smps(local);
mutex_unlock(&local->iflist_mtx);
}
#ifdef CONFIG_INET
static int ieee80211_ifa_changed(struct notifier_block *nb,
unsigned long data, void *arg)
{
struct in_ifaddr *ifa = arg;
struct ieee80211_local *local =
container_of(nb, struct ieee80211_local,
ifa_notifier);
struct net_device *ndev = ifa->ifa_dev->dev;
struct wireless_dev *wdev = ndev->ieee80211_ptr;
struct in_device *idev;
struct ieee80211_sub_if_data *sdata;
struct ieee80211_bss_conf *bss_conf;
struct ieee80211_if_managed *ifmgd;
int c = 0;
/* Make sure it's our interface that got changed */
if (!wdev)
return NOTIFY_DONE;
if (wdev->wiphy != local->hw.wiphy)
return NOTIFY_DONE;
sdata = IEEE80211_DEV_TO_SUB_IF(ndev);
bss_conf = &sdata->vif.bss_conf;
if (!ieee80211_sdata_running(sdata))
return NOTIFY_DONE;
/* ARP filtering is only supported in managed mode */
if (sdata->vif.type != NL80211_IFTYPE_STATION)
return NOTIFY_DONE;
idev = __in_dev_get_rtnl(sdata->dev);
if (!idev)
return NOTIFY_DONE;
ifmgd = &sdata->u.mgd;
mutex_lock(&ifmgd->mtx);
/* Copy the addresses to the bss_conf list */
ifa = idev->ifa_list;
while (c < IEEE80211_BSS_ARP_ADDR_LIST_LEN && ifa) {
bss_conf->arp_addr_list[c] = ifa->ifa_address;
ifa = ifa->ifa_next;
c++;
}
/* If not all addresses fit the list, disable filtering */
if (ifa) {
sdata->arp_filter_state = false;
c = 0;
} else {
sdata->arp_filter_state = true;
}
bss_conf->arp_addr_cnt = c;
/* Configure driver only if associated */
if (ifmgd->associated) {
bss_conf->arp_filter_enabled = sdata->arp_filter_state;
ieee80211_bss_info_change_notify(sdata,
BSS_CHANGED_ARP_FILTER);
}
mutex_unlock(&ifmgd->mtx);
return NOTIFY_DONE;
}
#endif
static int ieee80211_napi_poll(struct napi_struct *napi, int budget)
{
struct ieee80211_local *local =
container_of(napi, struct ieee80211_local, napi);
return local->ops->napi_poll(&local->hw, budget);
}
void ieee80211_napi_schedule(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
napi_schedule(&local->napi);
}
EXPORT_SYMBOL(ieee80211_napi_schedule);
void ieee80211_napi_complete(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
napi_complete(&local->napi);
}
EXPORT_SYMBOL(ieee80211_napi_complete);
/* There isn't a lot of sense in it, but you can transmit anything you like */
static const struct ieee80211_txrx_stypes
ieee80211_default_mgmt_stypes[NUM_NL80211_IFTYPES] = {
[NL80211_IFTYPE_ADHOC] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_STATION] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_AP] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_AP_VLAN] = {
/* copy AP */
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_P2P_CLIENT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4),
},
[NL80211_IFTYPE_P2P_GO] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_REASSOC_REQ >> 4) |
BIT(IEEE80211_STYPE_PROBE_REQ >> 4) |
BIT(IEEE80211_STYPE_DISASSOC >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4) |
BIT(IEEE80211_STYPE_ACTION >> 4),
},
[NL80211_IFTYPE_MESH_POINT] = {
.tx = 0xffff,
.rx = BIT(IEEE80211_STYPE_ACTION >> 4) |
BIT(IEEE80211_STYPE_AUTH >> 4) |
BIT(IEEE80211_STYPE_DEAUTH >> 4),
},
};
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len,
const struct ieee80211_ops *ops)
{
struct ieee80211_local *local;
int priv_size, i;
struct wiphy *wiphy;
/* Ensure 32-byte alignment of our private data and hw private data.
* We use the wiphy priv data for both our ieee80211_local and for
* the driver's private data
*
* In memory it'll be like this:
*
* +-------------------------+
* | struct wiphy |
* +-------------------------+
* | struct ieee80211_local |
* +-------------------------+
* | driver's private data |
* +-------------------------+
*
*/
priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len;
wiphy = wiphy_new(&mac80211_config_ops, priv_size);
if (!wiphy)
return NULL;
wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes;
wiphy->privid = mac80211_wiphy_privid;
wiphy->flags |= WIPHY_FLAG_NETNS_OK |
WIPHY_FLAG_4ADDR_AP |
WIPHY_FLAG_4ADDR_STATION;
if (!ops->set_key)
wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
wiphy->bss_priv_size = sizeof(struct ieee80211_bss);
local = wiphy_priv(wiphy);
local->hw.wiphy = wiphy;
local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN);
BUG_ON(!ops->tx);
BUG_ON(!ops->start);
BUG_ON(!ops->stop);
BUG_ON(!ops->config);
BUG_ON(!ops->add_interface);
BUG_ON(!ops->remove_interface);
BUG_ON(!ops->configure_filter);
local->ops = ops;
/* set up some defaults */
local->hw.queues = 1;
local->hw.max_rates = 1;
local->hw.max_report_rates = 0;
local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF;
local->hw.conf.long_frame_max_tx_count = wiphy->retry_long;
local->hw.conf.short_frame_max_tx_count = wiphy->retry_short;
local->user_power_level = -1;
local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES;
local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN;
INIT_LIST_HEAD(&local->interfaces);
__hw_addr_init(&local->mc_list);
mutex_init(&local->iflist_mtx);
mutex_init(&local->mtx);
mutex_init(&local->key_mtx);
spin_lock_init(&local->filter_lock);
spin_lock_init(&local->queue_stop_reason_lock);
/*
* The rx_skb_queue is only accessed from tasklets,
* but other SKB queues are used from within IRQ
* context. Therefore, this one needs a different
* locking class so our direct, non-irq-safe use of
* the queue's lock doesn't throw lockdep warnings.
*/
skb_queue_head_init_class(&local->rx_skb_queue,
&ieee80211_rx_skb_queue_class);
INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work);
ieee80211_work_init(local);
INIT_WORK(&local->restart_work, ieee80211_restart_work);
INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter);
INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work);
local->smps_mode = IEEE80211_SMPS_OFF;
INIT_WORK(&local->dynamic_ps_enable_work,
ieee80211_dynamic_ps_enable_work);
INIT_WORK(&local->dynamic_ps_disable_work,
ieee80211_dynamic_ps_disable_work);
setup_timer(&local->dynamic_ps_timer,
ieee80211_dynamic_ps_timer, (unsigned long) local);
INIT_WORK(&local->sched_scan_stopped_work,
ieee80211_sched_scan_stopped_work);
sta_info_init(local);
for (i = 0; i < IEEE80211_MAX_QUEUES; i++) {
skb_queue_head_init(&local->pending[i]);
atomic_set(&local->agg_queue_stop[i], 0);
}
tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending,
(unsigned long)local);
tasklet_init(&local->tasklet,
ieee80211_tasklet_handler,
(unsigned long) local);
skb_queue_head_init(&local->skb_queue);
skb_queue_head_init(&local->skb_queue_unreliable);
/* init dummy netdev for use w/ NAPI */
init_dummy_netdev(&local->napi_dev);
ieee80211_led_names(local);
ieee80211_hw_roc_setup(local);
return local_to_hw(local);
}
EXPORT_SYMBOL(ieee80211_alloc_hw);
int ieee80211_register_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
int result, i;
enum ieee80211_band band;
int channels, max_bitrates;
bool supp_ht;
static const u32 cipher_suites[] = {
/* keep WEP first, it may be removed below */
WLAN_CIPHER_SUITE_WEP40,
WLAN_CIPHER_SUITE_WEP104,
WLAN_CIPHER_SUITE_TKIP,
WLAN_CIPHER_SUITE_CCMP,
/* keep last -- depends on hw flags! */
WLAN_CIPHER_SUITE_AES_CMAC
};
if ((hw->wiphy->wowlan.flags || hw->wiphy->wowlan.n_patterns)
#ifdef CONFIG_PM
&& (!local->ops->suspend || !local->ops->resume)
#endif
)
return -EINVAL;
if (hw->max_report_rates == 0)
hw->max_report_rates = hw->max_rates;
/*
* generic code guarantees at least one band,
* set this very early because much code assumes
* that hw.conf.channel is assigned
*/
channels = 0;
max_bitrates = 0;
supp_ht = false;
for (band = 0; band < IEEE80211_NUM_BANDS; band++) {
struct ieee80211_supported_band *sband;
sband = local->hw.wiphy->bands[band];
if (!sband)
continue;
if (!local->oper_channel) {
/* init channel we're on */
local->hw.conf.channel =
local->oper_channel = &sband->channels[0];
local->hw.conf.channel_type = NL80211_CHAN_NO_HT;
}
channels += sband->n_channels;
if (max_bitrates < sband->n_bitrates)
max_bitrates = sband->n_bitrates;
supp_ht = supp_ht || sband->ht_cap.ht_supported;
}
local->int_scan_req = kzalloc(sizeof(*local->int_scan_req) +
sizeof(void *) * channels, GFP_KERNEL);
if (!local->int_scan_req)
return -ENOMEM;
/* if low-level driver supports AP, we also support VLAN */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_AP)) {
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
}
/* mac80211 always supports monitor */
hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_MONITOR);
hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_MONITOR);
/*
* mac80211 doesn't support more than 1 channel, and also not more
* than one IBSS interface
*/
for (i = 0; i < hw->wiphy->n_iface_combinations; i++) {
const struct ieee80211_iface_combination *c;
int j;
c = &hw->wiphy->iface_combinations[i];
if (c->num_different_channels > 1)
return -EINVAL;
for (j = 0; j < c->n_limits; j++)
if ((c->limits[j].types & BIT(NL80211_IFTYPE_ADHOC)) &&
c->limits[j].max > 1)
return -EINVAL;
}
#ifndef CONFIG_MAC80211_MESH
/* mesh depends on Kconfig, but drivers should set it if they want */
local->hw.wiphy->interface_modes &= ~BIT(NL80211_IFTYPE_MESH_POINT);
#endif
/* if the underlying driver supports mesh, mac80211 will (at least)
* provide routing of mesh authentication frames to userspace */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_MESH_POINT))
local->hw.wiphy->flags |= WIPHY_FLAG_MESH_AUTH;
/* mac80211 supports control port protocol changing */
local->hw.wiphy->flags |= WIPHY_FLAG_CONTROL_PORT_PROTOCOL;
if (local->hw.flags & IEEE80211_HW_SIGNAL_DBM)
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
else if (local->hw.flags & IEEE80211_HW_SIGNAL_UNSPEC)
local->hw.wiphy->signal_type = CFG80211_SIGNAL_TYPE_UNSPEC;
WARN((local->hw.flags & IEEE80211_HW_SUPPORTS_UAPSD)
&& (local->hw.flags & IEEE80211_HW_PS_NULLFUNC_STACK),
"U-APSD not supported with HW_PS_NULLFUNC_STACK\n");
/*
* Calculate scan IE length -- we need this to alloc
* memory and to subtract from the driver limit. It
* includes the DS Params, (extended) supported rates, and HT
* information -- SSID is the driver's responsibility.
*/
local->scan_ies_len = 4 + max_bitrates /* (ext) supp rates */ +
3 /* DS Params */;
if (supp_ht)
local->scan_ies_len += 2 + sizeof(struct ieee80211_ht_cap);
if (!local->ops->hw_scan) {
/* For hw_scan, driver needs to set these up. */
local->hw.wiphy->max_scan_ssids = 4;
local->hw.wiphy->max_scan_ie_len = IEEE80211_MAX_DATA_LEN;
}
/*
* If the driver supports any scan IEs, then assume the
* limit includes the IEs mac80211 will add, otherwise
* leave it at zero and let the driver sort it out; we
* still pass our IEs to the driver but userspace will
* not be allowed to in that case.
*/
if (local->hw.wiphy->max_scan_ie_len)
local->hw.wiphy->max_scan_ie_len -= local->scan_ies_len;
/* Set up cipher suites unless driver already did */
if (!local->hw.wiphy->cipher_suites) {
local->hw.wiphy->cipher_suites = cipher_suites;
local->hw.wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
if (!(local->hw.flags & IEEE80211_HW_MFP_CAPABLE))
local->hw.wiphy->n_cipher_suites--;
}
if (IS_ERR(local->wep_tx_tfm) || IS_ERR(local->wep_rx_tfm)) {
if (local->hw.wiphy->cipher_suites == cipher_suites) {
local->hw.wiphy->cipher_suites += 2;
local->hw.wiphy->n_cipher_suites -= 2;
} else {
u32 *suites;
int r, w = 0;
/* Filter out WEP */
suites = kmemdup(
local->hw.wiphy->cipher_suites,
sizeof(u32) * local->hw.wiphy->n_cipher_suites,
GFP_KERNEL);
if (!suites)
return -ENOMEM;
for (r = 0; r < local->hw.wiphy->n_cipher_suites; r++) {
u32 suite = local->hw.wiphy->cipher_suites[r];
if (suite == WLAN_CIPHER_SUITE_WEP40 ||
suite == WLAN_CIPHER_SUITE_WEP104)
continue;
suites[w++] = suite;
}
local->hw.wiphy->cipher_suites = suites;
local->hw.wiphy->n_cipher_suites = w;
local->wiphy_ciphers_allocated = true;
}
}
if (!local->ops->remain_on_channel)
local->hw.wiphy->max_remain_on_channel_duration = 5000;
if (local->ops->sched_scan_start)
local->hw.wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
result = wiphy_register(local->hw.wiphy);
if (result < 0)
goto fail_wiphy_register;
/*
* We use the number of queues for feature tests (QoS, HT) internally
* so restrict them appropriately.
*/
if (hw->queues > IEEE80211_MAX_QUEUES)
hw->queues = IEEE80211_MAX_QUEUES;
local->workqueue =
alloc_ordered_workqueue(wiphy_name(local->hw.wiphy), 0);
if (!local->workqueue) {
result = -ENOMEM;
goto fail_workqueue;
}
/*
* The hardware needs headroom for sending the frame,
* and we need some headroom for passing the frame to monitor
* interfaces, but never both at the same time.
*/
#ifndef __CHECKER__
BUILD_BUG_ON(IEEE80211_TX_STATUS_HEADROOM !=
sizeof(struct ieee80211_tx_status_rtap_hdr));
#endif
local->tx_headroom = max_t(unsigned int , local->hw.extra_tx_headroom,
sizeof(struct ieee80211_tx_status_rtap_hdr));
debugfs_hw_add(local);
/*
* if the driver doesn't specify a max listen interval we
* use 5 which should be a safe default
*/
if (local->hw.max_listen_interval == 0)
local->hw.max_listen_interval = 5;
local->hw.conf.listen_interval = local->hw.max_listen_interval;
local->dynamic_ps_forced_timeout = -1;
result = ieee80211_wep_init(local);
if (result < 0)
wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n",
result);
rtnl_lock();
result = ieee80211_init_rate_ctrl_alg(local,
hw->rate_control_algorithm);
if (result < 0) {
wiphy_debug(local->hw.wiphy,
"Failed to initialize rate control algorithm\n");
goto fail_rate;
}
/* add one default STA interface if supported */
if (local->hw.wiphy->interface_modes & BIT(NL80211_IFTYPE_STATION)) {
result = ieee80211_if_add(local, "wlan%d", NULL,
NL80211_IFTYPE_STATION, NULL);
if (result)
wiphy_warn(local->hw.wiphy,
"Failed to add default virtual iface\n");
}
rtnl_unlock();
ieee80211_led_init(local);
local->network_latency_notifier.notifier_call =
ieee80211_max_network_latency;
result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
if (result) {
rtnl_lock();
goto fail_pm_qos;
}
#ifdef CONFIG_INET
local->ifa_notifier.notifier_call = ieee80211_ifa_changed;
result = register_inetaddr_notifier(&local->ifa_notifier);
if (result)
goto fail_ifa;
#endif
netif_napi_add(&local->napi_dev, &local->napi, ieee80211_napi_poll,
local->hw.napi_weight);
return 0;
#ifdef CONFIG_INET
fail_ifa:
pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
rtnl_lock();
#endif
fail_pm_qos:
ieee80211_led_exit(local);
ieee80211_remove_interfaces(local);
fail_rate:
rtnl_unlock();
ieee80211_wep_free(local);
sta_info_stop(local);
destroy_workqueue(local->workqueue);
fail_workqueue:
wiphy_unregister(local->hw.wiphy);
fail_wiphy_register:
if (local->wiphy_ciphers_allocated)
kfree(local->hw.wiphy->cipher_suites);
kfree(local->int_scan_req);
return result;
}
EXPORT_SYMBOL(ieee80211_register_hw);
void ieee80211_unregister_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
tasklet_kill(&local->tx_pending_tasklet);
tasklet_kill(&local->tasklet);
pm_qos_remove_notifier(PM_QOS_NETWORK_LATENCY,
&local->network_latency_notifier);
#ifdef CONFIG_INET
unregister_inetaddr_notifier(&local->ifa_notifier);
#endif
rtnl_lock();
/*
* At this point, interface list manipulations are fine
* because the driver cannot be handing us frames any
* more and the tasklet is killed.
*/
ieee80211_remove_interfaces(local);
rtnl_unlock();
/*
* Now all work items will be gone, but the
* timer might still be armed, so delete it
*/
del_timer_sync(&local->work_timer);
cancel_work_sync(&local->restart_work);
cancel_work_sync(&local->reconfig_filter);
ieee80211_clear_tx_pending(local);
sta_info_stop(local);
rate_control_deinitialize(local);
if (skb_queue_len(&local->skb_queue) ||
skb_queue_len(&local->skb_queue_unreliable))
wiphy_warn(local->hw.wiphy, "skb_queue not empty\n");
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
skb_queue_purge(&local->rx_skb_queue);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
ieee80211_wep_free(local);
ieee80211_led_exit(local);
kfree(local->int_scan_req);
}
EXPORT_SYMBOL(ieee80211_unregister_hw);
void ieee80211_free_hw(struct ieee80211_hw *hw)
{
struct ieee80211_local *local = hw_to_local(hw);
mutex_destroy(&local->iflist_mtx);
mutex_destroy(&local->mtx);
if (local->wiphy_ciphers_allocated)
kfree(local->hw.wiphy->cipher_suites);
wiphy_free(local->hw.wiphy);
}
EXPORT_SYMBOL(ieee80211_free_hw);
static int __init ieee80211_init(void)
{
struct sk_buff *skb;
int ret;
BUILD_BUG_ON(sizeof(struct ieee80211_tx_info) > sizeof(skb->cb));
BUILD_BUG_ON(offsetof(struct ieee80211_tx_info, driver_data) +
IEEE80211_TX_INFO_DRIVER_DATA_SIZE > sizeof(skb->cb));
ret = rc80211_minstrel_init();
if (ret)
return ret;
ret = rc80211_minstrel_ht_init();
if (ret)
goto err_minstrel;
ret = rc80211_pid_init();
if (ret)
goto err_pid;
ret = ieee80211_iface_init();
if (ret)
goto err_netdev;
return 0;
err_netdev:
rc80211_pid_exit();
err_pid:
rc80211_minstrel_ht_exit();
err_minstrel:
rc80211_minstrel_exit();
return ret;
}
static void __exit ieee80211_exit(void)
{
rc80211_pid_exit();
rc80211_minstrel_ht_exit();
rc80211_minstrel_exit();
if (mesh_allocated)
ieee80211s_stop();
ieee80211_iface_exit();
rcu_barrier();
}
subsys_initcall(ieee80211_init);
module_exit(ieee80211_exit);
MODULE_DESCRIPTION("IEEE 802.11 subsystem");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Meninblack007/almighty_kernel | kernel/power/main.c | 809 | 15354 | /*
* kernel/power/main.c - PM subsystem core functionality.
*
* Copyright (c) 2003 Patrick Mochel
* Copyright (c) 2003 Open Source Development Lab
*
* This file is released under the GPLv2
*
*/
#include <linux/export.h>
#include <linux/kobject.h>
#include <linux/string.h>
#include <linux/resume-trace.h>
#include <linux/workqueue.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include "power.h"
DEFINE_MUTEX(pm_mutex);
#ifdef CONFIG_PM_SLEEP
/* Routines for PM-transition notifications */
static BLOCKING_NOTIFIER_HEAD(pm_chain_head);
int register_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_register(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(register_pm_notifier);
int unregister_pm_notifier(struct notifier_block *nb)
{
return blocking_notifier_chain_unregister(&pm_chain_head, nb);
}
EXPORT_SYMBOL_GPL(unregister_pm_notifier);
int pm_notifier_call_chain(unsigned long val)
{
int ret = blocking_notifier_call_chain(&pm_chain_head, val, NULL);
return notifier_to_errno(ret);
}
/* If set, devices may be suspended and resumed asynchronously. */
int pm_async_enabled = 1;
static ssize_t pm_async_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_async_enabled);
}
static ssize_t pm_async_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_async_enabled = val;
return n;
}
power_attr(pm_async);
#ifdef CONFIG_PM_DEBUG
int pm_test_level = TEST_NONE;
static const char * const pm_tests[__TEST_AFTER_LAST] = {
[TEST_NONE] = "none",
[TEST_CORE] = "core",
[TEST_CPUS] = "processors",
[TEST_PLATFORM] = "platform",
[TEST_DEVICES] = "devices",
[TEST_FREEZER] = "freezer",
};
static ssize_t pm_test_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
int level;
for (level = TEST_FIRST; level <= TEST_MAX; level++)
if (pm_tests[level]) {
if (level == pm_test_level)
s += sprintf(s, "[%s] ", pm_tests[level]);
else
s += sprintf(s, "%s ", pm_tests[level]);
}
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
return (s - buf);
}
static ssize_t pm_test_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
const char * const *s;
int level;
char *p;
int len;
int error = -EINVAL;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
lock_system_sleep();
level = TEST_FIRST;
for (s = &pm_tests[level]; level <= TEST_MAX; s++, level++)
if (*s && len == strlen(*s) && !strncmp(buf, *s, len)) {
pm_test_level = level;
error = 0;
break;
}
unlock_system_sleep();
return error ? error : n;
}
power_attr(pm_test);
#endif /* CONFIG_PM_DEBUG */
#ifdef CONFIG_DEBUG_FS
static char *suspend_step_name(enum suspend_stat_step step)
{
switch (step) {
case SUSPEND_FREEZE:
return "freeze";
case SUSPEND_PREPARE:
return "prepare";
case SUSPEND_SUSPEND:
return "suspend";
case SUSPEND_SUSPEND_NOIRQ:
return "suspend_noirq";
case SUSPEND_RESUME_NOIRQ:
return "resume_noirq";
case SUSPEND_RESUME:
return "resume";
default:
return "";
}
}
static int suspend_stats_show(struct seq_file *s, void *unused)
{
int i, index, last_dev, last_errno, last_step;
last_dev = suspend_stats.last_failed_dev + REC_FAILED_NUM - 1;
last_dev %= REC_FAILED_NUM;
last_errno = suspend_stats.last_failed_errno + REC_FAILED_NUM - 1;
last_errno %= REC_FAILED_NUM;
last_step = suspend_stats.last_failed_step + REC_FAILED_NUM - 1;
last_step %= REC_FAILED_NUM;
seq_printf(s, "%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n"
"%s: %d\n%s: %d\n%s: %d\n%s: %d\n%s: %d\n",
"success", suspend_stats.success,
"fail", suspend_stats.fail,
"failed_freeze", suspend_stats.failed_freeze,
"failed_prepare", suspend_stats.failed_prepare,
"failed_suspend", suspend_stats.failed_suspend,
"failed_suspend_late",
suspend_stats.failed_suspend_late,
"failed_suspend_noirq",
suspend_stats.failed_suspend_noirq,
"failed_resume", suspend_stats.failed_resume,
"failed_resume_early",
suspend_stats.failed_resume_early,
"failed_resume_noirq",
suspend_stats.failed_resume_noirq);
seq_printf(s, "failures:\n last_failed_dev:\t%-s\n",
suspend_stats.failed_devs[last_dev]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_dev + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_stats.failed_devs[index]);
}
seq_printf(s, " last_failed_errno:\t%-d\n",
suspend_stats.errno[last_errno]);
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_errno + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-d\n",
suspend_stats.errno[index]);
}
seq_printf(s, " last_failed_step:\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[last_step]));
for (i = 1; i < REC_FAILED_NUM; i++) {
index = last_step + REC_FAILED_NUM - i;
index %= REC_FAILED_NUM;
seq_printf(s, "\t\t\t%-s\n",
suspend_step_name(
suspend_stats.failed_steps[index]));
}
return 0;
}
static int suspend_stats_open(struct inode *inode, struct file *file)
{
return single_open(file, suspend_stats_show, NULL);
}
static const struct file_operations suspend_stats_operations = {
.open = suspend_stats_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init pm_debugfs_init(void)
{
debugfs_create_file("suspend_stats", S_IFREG | S_IRUGO,
NULL, NULL, &suspend_stats_operations);
return 0;
}
late_initcall(pm_debugfs_init);
#endif /* CONFIG_DEBUG_FS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_SLEEP_DEBUG
/*
* pm_print_times: print time taken by devices to suspend and resume.
*
* show() returns whether printing of suspend and resume times is enabled.
* store() accepts 0 or 1. 0 disables printing and 1 enables it.
*/
bool pm_print_times_enabled;
static ssize_t pm_print_times_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", pm_print_times_enabled);
}
static ssize_t pm_print_times_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
if (val > 1)
return -EINVAL;
pm_print_times_enabled = !!val;
return n;
}
power_attr(pm_print_times);
static inline void pm_print_times_init(void)
{
pm_print_times_enabled = !!initcall_debug;
}
#else /* !CONFIG_PP_SLEEP_DEBUG */
static inline void pm_print_times_init(void) {}
#endif /* CONFIG_PM_SLEEP_DEBUG */
struct kobject *power_kobj;
/**
* state - control system power state.
*
* show() returns what states are supported, which is hard-coded to
* 'standby' (Power-On Suspend), 'mem' (Suspend-to-RAM), and
* 'disk' (Suspend-to-Disk).
*
* store() accepts one of those strings, translates it into the
* proper enumerated value, and initiates a suspend transition.
*/
static ssize_t state_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
char *s = buf;
#ifdef CONFIG_SUSPEND
int i;
for (i = 0; i < PM_SUSPEND_MAX; i++) {
if (pm_states[i] && valid_state(i))
s += sprintf(s,"%s ", pm_states[i]);
}
#endif
#ifdef CONFIG_HIBERNATION
s += sprintf(s, "%s\n", "disk");
#else
if (s != buf)
/* convert the last space to a newline */
*(s-1) = '\n';
#endif
return (s - buf);
}
static suspend_state_t decode_state(const char *buf, size_t n)
{
#ifdef CONFIG_SUSPEND
suspend_state_t state = PM_SUSPEND_MIN;
const char * const *s;
#endif
char *p;
int len;
p = memchr(buf, '\n', n);
len = p ? p - buf : n;
/* Check hibernation first. */
if (len == 4 && !strncmp(buf, "disk", len))
return PM_SUSPEND_MAX;
#ifdef CONFIG_SUSPEND
for (s = &pm_states[state]; state < PM_SUSPEND_MAX; s++, state++)
if (*s && len == strlen(*s) && !strncmp(buf, *s, len))
return state;
#endif
return PM_SUSPEND_ON;
}
static ssize_t state_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
state = decode_state(buf, n);
if (state < PM_SUSPEND_MAX)
error = pm_suspend(state);
else if (state == PM_SUSPEND_MAX)
error = hibernate();
else
error = -EINVAL;
out:
pm_autosleep_unlock();
return error ? error : n;
}
power_attr(state);
#ifdef CONFIG_PM_SLEEP
/*
* The 'wakeup_count' attribute, along with the functions defined in
* drivers/base/power/wakeup.c, provides a means by which wakeup events can be
* handled in a non-racy way.
*
* If a wakeup event occurs when the system is in a sleep state, it simply is
* woken up. In turn, if an event that would wake the system up from a sleep
* state occurs when it is undergoing a transition to that sleep state, the
* transition should be aborted. Moreover, if such an event occurs when the
* system is in the working state, an attempt to start a transition to the
* given sleep state should fail during certain period after the detection of
* the event. Using the 'state' attribute alone is not sufficient to satisfy
* these requirements, because a wakeup event may occur exactly when 'state'
* is being written to and may be delivered to user space right before it is
* frozen, so the event will remain only partially processed until the system is
* woken up by another event. In particular, it won't cause the transition to
* a sleep state to be aborted.
*
* This difficulty may be overcome if user space uses 'wakeup_count' before
* writing to 'state'. It first should read from 'wakeup_count' and store
* the read value. Then, after carrying out its own preparations for the system
* transition to a sleep state, it should write the stored value to
* 'wakeup_count'. If that fails, at least one wakeup event has occurred since
* 'wakeup_count' was read and 'state' should not be written to. Otherwise, it
* is allowed to write to 'state', but the transition will be aborted if there
* are any wakeup events detected after 'wakeup_count' was written to.
*/
static ssize_t wakeup_count_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
unsigned int val;
return pm_get_wakeup_count(&val, true) ?
sprintf(buf, "%u\n", val) : -EINTR;
}
static ssize_t wakeup_count_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned int val;
int error;
error = pm_autosleep_lock();
if (error)
return error;
if (pm_autosleep_state() > PM_SUSPEND_ON) {
error = -EBUSY;
goto out;
}
error = -EINVAL;
if (sscanf(buf, "%u", &val) == 1) {
if (pm_save_wakeup_count(val))
error = n;
}
out:
pm_autosleep_unlock();
return error;
}
power_attr(wakeup_count);
#ifdef CONFIG_PM_AUTOSLEEP
static ssize_t autosleep_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
suspend_state_t state = pm_autosleep_state();
if (state == PM_SUSPEND_ON)
return sprintf(buf, "off\n");
#ifdef CONFIG_SUSPEND
if (state < PM_SUSPEND_MAX)
return sprintf(buf, "%s\n", valid_state(state) ?
pm_states[state] : "error");
#endif
#ifdef CONFIG_HIBERNATION
return sprintf(buf, "disk\n");
#else
return sprintf(buf, "error");
#endif
}
static ssize_t autosleep_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
suspend_state_t state = decode_state(buf, n);
int error;
if (state == PM_SUSPEND_ON
&& strcmp(buf, "off") && strcmp(buf, "off\n"))
return -EINVAL;
error = pm_autosleep_set_state(state);
return error ? error : n;
}
power_attr(autosleep);
#endif /* CONFIG_PM_AUTOSLEEP */
#ifdef CONFIG_PM_WAKELOCKS
static ssize_t wake_lock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, true);
}
static ssize_t wake_lock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_lock(buf);
return error ? error : n;
}
power_attr(wake_lock);
static ssize_t wake_unlock_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return pm_show_wakelocks(buf, false);
}
static ssize_t wake_unlock_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
int error = pm_wake_unlock(buf);
return error ? error : n;
}
power_attr(wake_unlock);
#endif /* CONFIG_PM_WAKELOCKS */
#endif /* CONFIG_PM_SLEEP */
#ifdef CONFIG_PM_TRACE
int pm_trace_enabled;
static ssize_t pm_trace_show(struct kobject *kobj, struct kobj_attribute *attr,
char *buf)
{
return sprintf(buf, "%d\n", pm_trace_enabled);
}
static ssize_t
pm_trace_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
int val;
if (sscanf(buf, "%d", &val) == 1) {
pm_trace_enabled = !!val;
return n;
}
return -EINVAL;
}
power_attr(pm_trace);
static ssize_t pm_trace_dev_match_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
return show_trace_dev_match(buf, PAGE_SIZE);
}
static ssize_t
pm_trace_dev_match_store(struct kobject *kobj, struct kobj_attribute *attr,
const char *buf, size_t n)
{
return -EINVAL;
}
power_attr(pm_trace_dev_match);
#endif /* CONFIG_PM_TRACE */
#ifdef CONFIG_FREEZER
static ssize_t pm_freeze_timeout_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", freeze_timeout_msecs);
}
static ssize_t pm_freeze_timeout_store(struct kobject *kobj,
struct kobj_attribute *attr,
const char *buf, size_t n)
{
unsigned long val;
if (kstrtoul(buf, 10, &val))
return -EINVAL;
freeze_timeout_msecs = val;
return n;
}
power_attr(pm_freeze_timeout);
#endif /* CONFIG_FREEZER*/
static struct attribute * g[] = {
&state_attr.attr,
#ifdef CONFIG_PM_TRACE
&pm_trace_attr.attr,
&pm_trace_dev_match_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP
&pm_async_attr.attr,
&wakeup_count_attr.attr,
#ifdef CONFIG_PM_AUTOSLEEP
&autosleep_attr.attr,
#endif
#ifdef CONFIG_PM_WAKELOCKS
&wake_lock_attr.attr,
&wake_unlock_attr.attr,
#endif
#ifdef CONFIG_PM_DEBUG
&pm_test_attr.attr,
#endif
#ifdef CONFIG_PM_SLEEP_DEBUG
&pm_print_times_attr.attr,
#endif
#endif
#ifdef CONFIG_FREEZER
&pm_freeze_timeout_attr.attr,
#endif
NULL,
};
static struct attribute_group attr_group = {
.attrs = g,
};
#ifdef CONFIG_PM_RUNTIME
struct workqueue_struct *pm_wq;
EXPORT_SYMBOL_GPL(pm_wq);
static int __init pm_start_workqueue(void)
{
pm_wq = alloc_workqueue("pm", WQ_FREEZABLE, 0);
return pm_wq ? 0 : -ENOMEM;
}
#else
static inline int pm_start_workqueue(void) { return 0; }
#endif
static int __init pm_init(void)
{
int error = pm_start_workqueue();
if (error)
return error;
hibernate_image_size_init();
hibernate_reserved_size_init();
power_kobj = kobject_create_and_add("power", NULL);
if (!power_kobj)
return -ENOMEM;
error = sysfs_create_group(power_kobj, &attr_group);
if (error)
return error;
pm_print_times_init();
return pm_autosleep_init();
}
core_initcall(pm_init);
| gpl-2.0 |
androidbftab1/bf-kernel-4.2 | kernel/hung_task.c | 809 | 5853 | /*
* Detect Hung Task
*
* kernel/hung_task.c - kernel thread for detecting tasks stuck in D state
*
*/
#include <linux/mm.h>
#include <linux/cpu.h>
#include <linux/nmi.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/kthread.h>
#include <linux/lockdep.h>
#include <linux/export.h>
#include <linux/sysctl.h>
#include <linux/utsname.h>
#include <trace/events/sched.h>
/*
* The number of tasks checked:
*/
int __read_mostly sysctl_hung_task_check_count = PID_MAX_LIMIT;
/*
* Limit number of tasks checked in a batch.
*
* This value controls the preemptibility of khungtaskd since preemption
* is disabled during the critical section. It also controls the size of
* the RCU grace period. So it needs to be upper-bound.
*/
#define HUNG_TASK_BATCHING 1024
/*
* Zero means infinite timeout - no checking done:
*/
unsigned long __read_mostly sysctl_hung_task_timeout_secs = CONFIG_DEFAULT_HUNG_TASK_TIMEOUT;
int __read_mostly sysctl_hung_task_warnings = 10;
static int __read_mostly did_panic;
static struct task_struct *watchdog_task;
/*
* Should we panic (and reboot, if panic_timeout= is set) when a
* hung task is detected:
*/
unsigned int __read_mostly sysctl_hung_task_panic =
CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE;
static int __init hung_task_panic_setup(char *str)
{
int rc = kstrtouint(str, 0, &sysctl_hung_task_panic);
if (rc)
return rc;
return 1;
}
__setup("hung_task_panic=", hung_task_panic_setup);
static int
hung_task_panic(struct notifier_block *this, unsigned long event, void *ptr)
{
did_panic = 1;
return NOTIFY_DONE;
}
static struct notifier_block panic_block = {
.notifier_call = hung_task_panic,
};
static void check_hung_task(struct task_struct *t, unsigned long timeout)
{
unsigned long switch_count = t->nvcsw + t->nivcsw;
/*
* Ensure the task is not frozen.
* Also, skip vfork and any other user process that freezer should skip.
*/
if (unlikely(t->flags & (PF_FROZEN | PF_FREEZER_SKIP)))
return;
/*
* When a freshly created task is scheduled once, changes its state to
* TASK_UNINTERRUPTIBLE without having ever been switched out once, it
* musn't be checked.
*/
if (unlikely(!switch_count))
return;
if (switch_count != t->last_switch_count) {
t->last_switch_count = switch_count;
return;
}
trace_sched_process_hang(t);
if (!sysctl_hung_task_warnings)
return;
if (sysctl_hung_task_warnings > 0)
sysctl_hung_task_warnings--;
/*
* Ok, the task did not get scheduled for more than 2 minutes,
* complain:
*/
pr_err("INFO: task %s:%d blocked for more than %ld seconds.\n",
t->comm, t->pid, timeout);
pr_err(" %s %s %.*s\n",
print_tainted(), init_utsname()->release,
(int)strcspn(init_utsname()->version, " "),
init_utsname()->version);
pr_err("\"echo 0 > /proc/sys/kernel/hung_task_timeout_secs\""
" disables this message.\n");
sched_show_task(t);
debug_show_held_locks(t);
touch_nmi_watchdog();
if (sysctl_hung_task_panic) {
trigger_all_cpu_backtrace();
panic("hung_task: blocked tasks");
}
}
/*
* To avoid extending the RCU grace period for an unbounded amount of time,
* periodically exit the critical section and enter a new one.
*
* For preemptible RCU it is sufficient to call rcu_read_unlock in order
* to exit the grace period. For classic RCU, a reschedule is required.
*/
static bool rcu_lock_break(struct task_struct *g, struct task_struct *t)
{
bool can_cont;
get_task_struct(g);
get_task_struct(t);
rcu_read_unlock();
cond_resched();
rcu_read_lock();
can_cont = pid_alive(g) && pid_alive(t);
put_task_struct(t);
put_task_struct(g);
return can_cont;
}
/*
* Check whether a TASK_UNINTERRUPTIBLE does not get woken up for
* a really long time (120 seconds). If that happens, print out
* a warning.
*/
static void check_hung_uninterruptible_tasks(unsigned long timeout)
{
int max_count = sysctl_hung_task_check_count;
int batch_count = HUNG_TASK_BATCHING;
struct task_struct *g, *t;
/*
* If the system crashed already then all bets are off,
* do not report extra hung tasks:
*/
if (test_taint(TAINT_DIE) || did_panic)
return;
rcu_read_lock();
for_each_process_thread(g, t) {
if (!max_count--)
goto unlock;
if (!--batch_count) {
batch_count = HUNG_TASK_BATCHING;
if (!rcu_lock_break(g, t))
goto unlock;
}
/* use "==" to skip the TASK_KILLABLE tasks waiting on NFS */
if (t->state == TASK_UNINTERRUPTIBLE)
check_hung_task(t, timeout);
}
unlock:
rcu_read_unlock();
}
static unsigned long timeout_jiffies(unsigned long timeout)
{
/* timeout of 0 will disable the watchdog */
return timeout ? timeout * HZ : MAX_SCHEDULE_TIMEOUT;
}
/*
* Process updating of timeout sysctl
*/
int proc_dohung_task_timeout_secs(struct ctl_table *table, int write,
void __user *buffer,
size_t *lenp, loff_t *ppos)
{
int ret;
ret = proc_doulongvec_minmax(table, write, buffer, lenp, ppos);
if (ret || !write)
goto out;
wake_up_process(watchdog_task);
out:
return ret;
}
static atomic_t reset_hung_task = ATOMIC_INIT(0);
void reset_hung_task_detector(void)
{
atomic_set(&reset_hung_task, 1);
}
EXPORT_SYMBOL_GPL(reset_hung_task_detector);
/*
* kthread which checks for tasks stuck in D state
*/
static int watchdog(void *dummy)
{
set_user_nice(current, 0);
for ( ; ; ) {
unsigned long timeout = sysctl_hung_task_timeout_secs;
while (schedule_timeout_interruptible(timeout_jiffies(timeout)))
timeout = sysctl_hung_task_timeout_secs;
if (atomic_xchg(&reset_hung_task, 0))
continue;
check_hung_uninterruptible_tasks(timeout);
}
return 0;
}
static int __init hung_task_init(void)
{
atomic_notifier_chain_register(&panic_notifier_list, &panic_block);
watchdog_task = kthread_run(watchdog, NULL, "khungtaskd");
return 0;
}
subsys_initcall(hung_task_init);
| gpl-2.0 |
tvall43/linux-stable | drivers/staging/cptm1217/clearpad_tm1217.c | 2089 | 17621 | /*
* clearpad_tm1217.c - Touch Screen driver for Synaptics Clearpad
* TM1217 controller
*
* Copyright (C) 2008 Intel Corp
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; ifnot, write to the Free Software Foundation, Inc.,
* 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
*
* Questions/Comments/Bug fixes to Ramesh Agarwal (ramesh.agarwal@intel.com)
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*/
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/i2c.h>
#include <linux/timer.h>
#include <linux/gpio.h>
#include <linux/hrtimer.h>
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/slab.h>
#include "cp_tm1217.h"
#define CPTM1217_DEVICE_NAME "cptm1217"
#define CPTM1217_DRIVER_NAME CPTM1217_DEVICE_NAME
#define MAX_TOUCH_SUPPORTED 2
#define TOUCH_SUPPORTED 1
#define SAMPLING_FREQ 80 /* Frequency in HZ */
#define DELAY_BTWIN_SAMPLE (1000 / SAMPLING_FREQ)
#define WAIT_FOR_RESPONSE 5 /* 5msec just works */
#define MAX_RETRIES 5 /* As above */
#define INCREMENTAL_DELAY 5 /* As above */
/* Regster Definitions */
#define TMA1217_DEV_STATUS 0x13 /* Device Status */
#define TMA1217_INT_STATUS 0x14 /* Interrupt Status */
/* Controller can detect up to 2 possible finger touches.
* Each finger touch provides 12 bit X Y co-ordinates, the values are split
* across 2 registers, and an 8 bit Z value */
#define TMA1217_FINGER_STATE 0x18 /* Finger State */
#define TMA1217_FINGER1_X_HIGHER8 0x19 /* Higher 8 bit of X coordinate */
#define TMA1217_FINGER1_Y_HIGHER8 0x1A /* Higher 8 bit of Y coordinate */
#define TMA1217_FINGER1_XY_LOWER4 0x1B /* Lower 4 bits of X and Y */
#define TMA1217_FINGER1_Z_VALUE 0x1D /* 8 bit Z value for finger 1 */
#define TMA1217_FINGER2_X_HIGHER8 0x1E /* Higher 8 bit of X coordinate */
#define TMA1217_FINGER2_Y_HIGHER8 0x1F /* Higher 8 bit of Y coordinate */
#define TMA1217_FINGER2_XY_LOWER4 0x20 /* Lower 4 bits of X and Y */
#define TMA1217_FINGER2_Z_VALUE 0x22 /* 8 bit Z value for finger 2 */
#define TMA1217_DEVICE_CTRL 0x23 /* Device Control */
#define TMA1217_INTERRUPT_ENABLE 0x24 /* Interrupt Enable */
#define TMA1217_REPORT_MODE 0x2B /* Reporting Mode */
#define TMA1217_MAX_X_LOWER8 0x31 /* Bit 0-7 for Max X */
#define TMA1217_MAX_X_HIGHER4 0x32 /* Bit 8-11 for Max X */
#define TMA1217_MAX_Y_LOWER8 0x33 /* Bit 0-7 for Max Y */
#define TMA1217_MAX_Y_HIGHER4 0x34 /* Bit 8-11 for Max Y */
#define TMA1217_DEVICE_CMD_RESET 0x67 /* Device CMD reg for reset */
#define TMA1217_DEVICE_CMD_REZERO 0x69 /* Device CMD reg for rezero */
#define TMA1217_MANUFACTURER_ID 0x73 /* Manufacturer Id */
#define TMA1217_PRODUCT_FAMILY 0x75 /* Product Family */
#define TMA1217_FIRMWARE_REVISION 0x76 /* Firmware Revision */
#define TMA1217_SERIAL_NO_HIGH 0x7C /* Bit 8-15 of device serial no. */
#define TMA1217_SERIAL_NO_LOW 0x7D /* Bit 0-7 of device serial no. */
#define TMA1217_PRODUCT_ID_START 0x7E /* Start address for 10 byte ID */
#define TMA1217_DEVICE_CAPABILITY 0x8B /* Reporting capability */
/*
* The touch position structure.
*/
struct touch_state {
int x;
int y;
bool button;
};
/* Device Specific info given by the controller */
struct cp_dev_info {
u16 maxX;
u16 maxY;
};
/* Vendor related info given by the controller */
struct cp_vendor_info {
u8 vendor_id;
u8 product_family;
u8 firmware_rev;
u16 serial_no;
};
/*
* Private structure to store the device details
*/
struct cp_tm1217_device {
struct i2c_client *client;
struct device *dev;
struct cp_vendor_info vinfo;
struct cp_dev_info dinfo;
struct input_dev_info {
char phys[32];
char name[128];
struct input_dev *input;
struct touch_state touch;
} cp_input_info[MAX_TOUCH_SUPPORTED];
int thread_running;
struct mutex thread_mutex;
int gpio;
};
/* The following functions are used to read/write registers on the device
* as per the RMI prorocol. Technically, a page select should be written
* before doing read/write but since the register offsets are below 0xFF
* we can use the default value of page which is 0x00
*/
static int cp_tm1217_read(struct cp_tm1217_device *ts,
u8 *req, int size)
{
int i, retval;
/* Send the address */
retval = i2c_master_send(ts->client, &req[0], 1);
if (retval != 1) {
dev_err(ts->dev, "cp_tm1217: I2C send failed\n");
return retval;
}
msleep(WAIT_FOR_RESPONSE);
for (i = 0; i < MAX_RETRIES; i++) {
retval = i2c_master_recv(ts->client, &req[1], size);
if (retval == size) {
break;
} else {
msleep(INCREMENTAL_DELAY);
dev_dbg(ts->dev, "cp_tm1217: Retry count is %d\n", i);
}
}
if (retval != size)
dev_err(ts->dev, "cp_tm1217: Read from device failed\n");
return retval;
}
static int cp_tm1217_write(struct cp_tm1217_device *ts,
u8 *req, int size)
{
int retval;
/* Send the address and the data to be written */
retval = i2c_master_send(ts->client, &req[0], size + 1);
if (retval != size + 1) {
dev_err(ts->dev, "cp_tm1217: I2C write failed: %d\n", retval);
return retval;
}
/* Wait for the write to complete. TBD why this is required */
msleep(WAIT_FOR_RESPONSE);
return size;
}
static int cp_tm1217_mask_interrupt(struct cp_tm1217_device *ts)
{
u8 req[2];
int retval;
req[0] = TMA1217_INTERRUPT_ENABLE;
req[1] = 0x0;
retval = cp_tm1217_write(ts, req, 1);
if (retval != 1)
return -EIO;
return 0;
}
static int cp_tm1217_unmask_interrupt(struct cp_tm1217_device *ts)
{
u8 req[2];
int retval;
req[0] = TMA1217_INTERRUPT_ENABLE;
req[1] = 0xa;
retval = cp_tm1217_write(ts, req, 1);
if (retval != 1)
return -EIO;
return 0;
}
static void process_touch(struct cp_tm1217_device *ts, int index)
{
int retval;
struct input_dev_info *input_info =
(struct input_dev_info *)&ts->cp_input_info[index];
u8 xy_data[6];
if (index == 0)
xy_data[0] = TMA1217_FINGER1_X_HIGHER8;
else
xy_data[0] = TMA1217_FINGER2_X_HIGHER8;
retval = cp_tm1217_read(ts, xy_data, 5);
if (retval < 5) {
dev_err(ts->dev, "cp_tm1217: XY read from device failed\n");
return;
}
/* Note: Currently not using the Z values but may be requried in
the future. */
input_info->touch.x = (xy_data[1] << 4)
| (xy_data[3] & 0x0F);
input_info->touch.y = (xy_data[2] << 4)
| ((xy_data[3] & 0xF0) >> 4);
input_report_abs(input_info->input, ABS_X, input_info->touch.x);
input_report_abs(input_info->input, ABS_Y, input_info->touch.y);
input_sync(input_info->input);
}
static void cp_tm1217_get_data(struct cp_tm1217_device *ts)
{
u8 req[2];
int retval, i, finger_touched = 0;
do {
req[0] = TMA1217_FINGER_STATE;
retval = cp_tm1217_read(ts, req, 1);
if (retval != 1) {
dev_err(ts->dev,
"cp_tm1217: Read from device failed\n");
continue;
}
finger_touched = 0;
/* Start sampling until the pressure is below
threshold */
for (i = 0; i < TOUCH_SUPPORTED; i++) {
if (req[1] & 0x3) {
finger_touched++;
if (ts->cp_input_info[i].touch.button == 0) {
/* send the button touch event */
input_report_key(
ts->cp_input_info[i].input,
BTN_TOUCH, 1);
ts->cp_input_info[i].touch.button = 1;
}
process_touch(ts, i);
} else {
if (ts->cp_input_info[i].touch.button == 1) {
/* send the button release event */
input_report_key(
ts->cp_input_info[i].input,
BTN_TOUCH, 0);
input_sync(ts->cp_input_info[i].input);
ts->cp_input_info[i].touch.button = 0;
}
}
req[1] = req[1] >> 2;
}
msleep(DELAY_BTWIN_SAMPLE);
} while (finger_touched > 0);
}
static irqreturn_t cp_tm1217_sample_thread(int irq, void *handle)
{
struct cp_tm1217_device *ts = (struct cp_tm1217_device *) handle;
u8 req[2];
int retval;
/* Chedk if another thread is already running */
mutex_lock(&ts->thread_mutex);
if (ts->thread_running == 1) {
mutex_unlock(&ts->thread_mutex);
return IRQ_HANDLED;
} else {
ts->thread_running = 1;
mutex_unlock(&ts->thread_mutex);
}
/* Mask the interrupts */
retval = cp_tm1217_mask_interrupt(ts);
/* Read the Interrupt Status register to find the cause of the
Interrupt */
req[0] = TMA1217_INT_STATUS;
retval = cp_tm1217_read(ts, req, 1);
if (retval != 1)
goto exit_thread;
if (!(req[1] & 0x8))
goto exit_thread;
cp_tm1217_get_data(ts);
exit_thread:
/* Unmask the interrupts before going to sleep */
retval = cp_tm1217_unmask_interrupt(ts);
mutex_lock(&ts->thread_mutex);
ts->thread_running = 0;
mutex_unlock(&ts->thread_mutex);
return IRQ_HANDLED;
}
static int cp_tm1217_init_data(struct cp_tm1217_device *ts)
{
int retval;
u8 req[2];
/* Read the vendor id/ fw revision etc. Ignoring return check as this
is non critical info */
req[0] = TMA1217_MANUFACTURER_ID;
retval = cp_tm1217_read(ts, req, 1);
ts->vinfo.vendor_id = req[1];
req[0] = TMA1217_PRODUCT_FAMILY;
retval = cp_tm1217_read(ts, req, 1);
ts->vinfo.product_family = req[1];
req[0] = TMA1217_FIRMWARE_REVISION;
retval = cp_tm1217_read(ts, req, 1);
ts->vinfo.firmware_rev = req[1];
req[0] = TMA1217_SERIAL_NO_HIGH;
retval = cp_tm1217_read(ts, req, 1);
ts->vinfo.serial_no = (req[1] << 8);
req[0] = TMA1217_SERIAL_NO_LOW;
retval = cp_tm1217_read(ts, req, 1);
ts->vinfo.serial_no = ts->vinfo.serial_no | req[1];
req[0] = TMA1217_MAX_X_HIGHER4;
retval = cp_tm1217_read(ts, req, 1);
ts->dinfo.maxX = (req[1] & 0xF) << 8;
req[0] = TMA1217_MAX_X_LOWER8;
retval = cp_tm1217_read(ts, req, 1);
ts->dinfo.maxX = ts->dinfo.maxX | req[1];
req[0] = TMA1217_MAX_Y_HIGHER4;
retval = cp_tm1217_read(ts, req, 1);
ts->dinfo.maxY = (req[1] & 0xF) << 8;
req[0] = TMA1217_MAX_Y_LOWER8;
retval = cp_tm1217_read(ts, req, 1);
ts->dinfo.maxY = ts->dinfo.maxY | req[1];
return 0;
}
/*
* Set up a GPIO for use as the interrupt. We can't simply do this at
* boot time because the GPIO drivers themselves may not be around at
* boot/firmware set up time to do the work. Instead defer it to driver
* detection.
*/
static int cp_tm1217_setup_gpio_irq(struct cp_tm1217_device *ts)
{
int retval;
/* Hook up the irq handler */
retval = gpio_request(ts->gpio, "cp_tm1217_touch");
if (retval < 0) {
dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
retval);
return retval;
}
retval = gpio_direction_input(ts->gpio);
if (retval < 0) {
dev_err(ts->dev,
"cp_tm1217: GPIO direction configuration failed, error %d\n",
retval);
gpio_free(ts->gpio);
return retval;
}
retval = gpio_to_irq(ts->gpio);
if (retval < 0) {
dev_err(ts->dev,
"cp_tm1217: GPIO to IRQ failed, error %d\n", retval);
gpio_free(ts->gpio);
}
dev_dbg(ts->dev,
"cp_tm1217: Got IRQ number is %d for GPIO %d\n",
retval, ts->gpio);
return retval;
}
static int cp_tm1217_probe(struct i2c_client *client,
const struct i2c_device_id *id)
{
struct cp_tm1217_device *ts;
struct input_dev *input_dev;
struct input_dev_info *input_info;
struct cp_tm1217_platform_data *pdata;
u8 req[2];
int i, retval;
/* No pdata is fine - we then use "normal" IRQ mode */
pdata = client->dev.platform_data;
ts = kzalloc(sizeof(struct cp_tm1217_device), GFP_KERNEL);
if (!ts)
return -ENOMEM;
ts->client = client;
ts->dev = &client->dev;
i2c_set_clientdata(client, ts);
ts->thread_running = 0;
mutex_init(&ts->thread_mutex);
/* Reset the Controller */
req[0] = TMA1217_DEVICE_CMD_RESET;
req[1] = 0x1;
retval = cp_tm1217_write(ts, req, 1);
if (retval != 1) {
dev_err(ts->dev, "cp_tm1217: Controller reset failed\n");
kfree(ts);
return -EIO;
}
/* Clear up the interrupt status from reset. */
req[0] = TMA1217_INT_STATUS;
retval = cp_tm1217_read(ts, req, 1);
/* Mask all the interrupts */
retval = cp_tm1217_mask_interrupt(ts);
/* Read the controller information */
cp_tm1217_init_data(ts);
/* The following code will register multiple event devices when
multi-pointer is enabled, the code has not been tested
with MPX */
for (i = 0; i < TOUCH_SUPPORTED; i++) {
input_dev = input_allocate_device();
if (input_dev == NULL) {
dev_err(ts->dev,
"cp_tm1217:Input Device Struct alloc failed\n");
retval = -ENOMEM;
goto fail;
}
input_info = &ts->cp_input_info[i];
snprintf(input_info->name, sizeof(input_info->name),
"cp_tm1217_touchscreen_%d", i);
input_dev->name = input_info->name;
snprintf(input_info->phys, sizeof(input_info->phys),
"%s/input%d", dev_name(&client->dev), i);
input_dev->phys = input_info->phys;
input_dev->id.bustype = BUS_I2C;
input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS);
input_dev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH);
input_set_abs_params(input_dev, ABS_X, 0, ts->dinfo.maxX, 0, 0);
input_set_abs_params(input_dev, ABS_Y, 0, ts->dinfo.maxY, 0, 0);
retval = input_register_device(input_dev);
if (retval) {
dev_err(ts->dev,
"Input dev registration failed for %s\n",
input_dev->name);
input_free_device(input_dev);
goto fail;
}
input_info->input = input_dev;
}
/* Setup the reporting mode to send an interrupt only when
finger arrives or departs. */
req[0] = TMA1217_REPORT_MODE;
req[1] = 0x02;
retval = cp_tm1217_write(ts, req, 1);
/* Setup the device to no sleep mode for now and make it configured */
req[0] = TMA1217_DEVICE_CTRL;
req[1] = 0x84;
retval = cp_tm1217_write(ts, req, 1);
/* Check for the status of the device */
req[0] = TMA1217_DEV_STATUS;
retval = cp_tm1217_read(ts, req, 1);
if (req[1] != 0) {
dev_err(ts->dev,
"cp_tm1217: Device Status 0x%x != 0: config failed\n",
req[1]);
retval = -EIO;
goto fail;
}
if (pdata && pdata->gpio) {
ts->gpio = pdata->gpio;
retval = cp_tm1217_setup_gpio_irq(ts);
} else
retval = client->irq;
if (retval < 0) {
dev_err(ts->dev, "cp_tm1217: GPIO request failed error %d\n",
retval);
goto fail;
}
client->irq = retval;
retval = request_threaded_irq(client->irq,
NULL, cp_tm1217_sample_thread,
IRQF_TRIGGER_FALLING, "cp_tm1217_touch", ts);
if (retval < 0) {
dev_err(ts->dev, "cp_tm1217: Request IRQ error %d\n", retval);
goto fail_gpio;
}
/* Unmask the interrupts */
retval = cp_tm1217_unmask_interrupt(ts);
if (retval == 0)
return 0;
free_irq(client->irq, ts);
fail_gpio:
if (ts->gpio)
gpio_free(ts->gpio);
fail:
/* Clean up before returning failure */
for (i = 0; i < TOUCH_SUPPORTED; i++) {
if (ts->cp_input_info[i].input) {
input_unregister_device(ts->cp_input_info[i].input);
input_free_device(ts->cp_input_info[i].input);
}
}
kfree(ts);
return retval;
}
#ifdef CONFIG_PM_SLEEP
/*
* cp_tm1217 suspend
*
*/
static int cp_tm1217_suspend(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cp_tm1217_device *ts = i2c_get_clientdata(client);
u8 req[2];
int retval;
/* Put the controller to sleep */
req[0] = TMA1217_DEVICE_CTRL;
retval = cp_tm1217_read(ts, req, 1);
req[1] = (req[1] & 0xF8) | 0x1;
retval = cp_tm1217_write(ts, req, 1);
if (device_may_wakeup(&client->dev))
enable_irq_wake(client->irq);
return 0;
}
/*
* cp_tm1217_resume
*
*/
static int cp_tm1217_resume(struct device *dev)
{
struct i2c_client *client = to_i2c_client(dev);
struct cp_tm1217_device *ts = i2c_get_clientdata(client);
u8 req[2];
int retval;
/* Take the controller out of sleep */
req[0] = TMA1217_DEVICE_CTRL;
retval = cp_tm1217_read(ts, req, 1);
req[1] = (req[1] & 0xF8) | 0x4;
retval = cp_tm1217_write(ts, req, 1);
/* Restore the register settings sinc the power to the
could have been cut off */
/* Setup the reporting mode to send an interrupt only when
finger arrives or departs. */
req[0] = TMA1217_REPORT_MODE;
req[1] = 0x02;
retval = cp_tm1217_write(ts, req, 1);
/* Setup the device to no sleep mode for now and make it configured */
req[0] = TMA1217_DEVICE_CTRL;
req[1] = 0x84;
retval = cp_tm1217_write(ts, req, 1);
/* Setup the interrupt mask */
retval = cp_tm1217_unmask_interrupt(ts);
if (device_may_wakeup(&client->dev))
disable_irq_wake(client->irq);
return 0;
}
#endif
static SIMPLE_DEV_PM_OPS(cp_tm1217_pm_ops, cp_tm1217_suspend,
cp_tm1217_resume);
/*
* cp_tm1217_remove
*
*/
static int cp_tm1217_remove(struct i2c_client *client)
{
struct cp_tm1217_device *ts = i2c_get_clientdata(client);
int i;
free_irq(client->irq, ts);
if (ts->gpio)
gpio_free(ts->gpio);
for (i = 0; i < TOUCH_SUPPORTED; i++)
input_unregister_device(ts->cp_input_info[i].input);
kfree(ts);
return 0;
}
static struct i2c_device_id cp_tm1217_idtable[] = {
{ CPTM1217_DEVICE_NAME, 0 },
{ }
};
MODULE_DEVICE_TABLE(i2c, cp_tm1217_idtable);
static struct i2c_driver cp_tm1217_driver = {
.driver = {
.owner = THIS_MODULE,
.name = CPTM1217_DRIVER_NAME,
.pm = &cp_tm1217_pm_ops,
},
.id_table = cp_tm1217_idtable,
.probe = cp_tm1217_probe,
.remove = cp_tm1217_remove,
};
module_i2c_driver(cp_tm1217_driver);
MODULE_AUTHOR("Ramesh Agarwal <ramesh.agarwal@intel.com>");
MODULE_DESCRIPTION("Synaptics TM1217 TouchScreen Driver");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
embeddedarm/linux-3.10-ts4800 | drivers/staging/comedi/drivers/dt2814.c | 2089 | 7911 | /*
comedi/drivers/dt2814.c
Hardware driver for Data Translation DT2814
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 1998 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: dt2814
Description: Data Translation DT2814
Author: ds
Status: complete
Devices: [Data Translation] DT2814 (dt2814)
Configuration options:
[0] - I/O port base address
[1] - IRQ
This card has 16 analog inputs multiplexed onto a 12 bit ADC. There
is a minimally useful onboard clock. The base frequency for the
clock is selected by jumpers, and the clock divider can be selected
via programmed I/O. Unfortunately, the clock divider can only be
a power of 10, from 1 to 10^7, of which only 3 or 4 are useful. In
addition, the clock does not seem to be very accurate.
*/
#include <linux/interrupt.h>
#include "../comedidev.h"
#include <linux/ioport.h>
#include <linux/delay.h>
#include "comedi_fc.h"
#define DT2814_SIZE 2
#define DT2814_CSR 0
#define DT2814_DATA 1
/*
* flags
*/
#define DT2814_FINISH 0x80
#define DT2814_ERR 0x40
#define DT2814_BUSY 0x20
#define DT2814_ENB 0x10
#define DT2814_CHANMASK 0x0f
struct dt2814_private {
int ntrig;
int curadchan;
};
#define DT2814_TIMEOUT 10
#define DT2814_MAX_SPEED 100000 /* Arbitrary 10 khz limit */
static int dt2814_ai_insn_read(struct comedi_device *dev,
struct comedi_subdevice *s,
struct comedi_insn *insn, unsigned int *data)
{
int n, i, hi, lo;
int chan;
int status = 0;
for (n = 0; n < insn->n; n++) {
chan = CR_CHAN(insn->chanspec);
outb(chan, dev->iobase + DT2814_CSR);
for (i = 0; i < DT2814_TIMEOUT; i++) {
status = inb(dev->iobase + DT2814_CSR);
printk(KERN_INFO "dt2814: status: %02x\n", status);
udelay(10);
if (status & DT2814_FINISH)
break;
}
if (i >= DT2814_TIMEOUT) {
printk(KERN_INFO "dt2814: status: %02x\n", status);
return -ETIMEDOUT;
}
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
data[n] = (hi << 4) | (lo >> 4);
}
return n;
}
static int dt2814_ns_to_timer(unsigned int *ns, unsigned int flags)
{
int i;
unsigned int f;
/* XXX ignores flags */
f = 10000; /* ns */
for (i = 0; i < 8; i++) {
if ((2 * (*ns)) < (f * 11))
break;
f *= 10;
}
*ns = f;
return i;
}
static int dt2814_ai_cmdtest(struct comedi_device *dev,
struct comedi_subdevice *s, struct comedi_cmd *cmd)
{
int err = 0;
int tmp;
/* Step 1 : check if triggers are trivially valid */
err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER);
err |= cfc_check_trigger_src(&cmd->convert_src, TRIG_NOW);
err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
if (err)
return 1;
/* Step 2a : make sure trigger sources are unique */
err |= cfc_check_trigger_is_unique(cmd->stop_src);
/* Step 2b : and mutually compatible */
if (err)
return 2;
/* Step 3: check if arguments are trivially valid */
err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg, 1000000000);
err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
DT2814_MAX_SPEED);
err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->chanlist_len);
if (cmd->stop_src == TRIG_COUNT)
err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 2);
else /* TRIG_NONE */
err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
if (err)
return 3;
/* step 4: fix up any arguments */
tmp = cmd->scan_begin_arg;
dt2814_ns_to_timer(&cmd->scan_begin_arg, cmd->flags & TRIG_ROUND_MASK);
if (tmp != cmd->scan_begin_arg)
err++;
if (err)
return 4;
return 0;
}
static int dt2814_ai_cmd(struct comedi_device *dev, struct comedi_subdevice *s)
{
struct dt2814_private *devpriv = dev->private;
struct comedi_cmd *cmd = &s->async->cmd;
int chan;
int trigvar;
trigvar =
dt2814_ns_to_timer(&cmd->scan_begin_arg,
cmd->flags & TRIG_ROUND_MASK);
chan = CR_CHAN(cmd->chanlist[0]);
devpriv->ntrig = cmd->stop_arg;
outb(chan | DT2814_ENB | (trigvar << 5), dev->iobase + DT2814_CSR);
return 0;
}
static irqreturn_t dt2814_interrupt(int irq, void *d)
{
int lo, hi;
struct comedi_device *dev = d;
struct dt2814_private *devpriv = dev->private;
struct comedi_subdevice *s;
int data;
if (!dev->attached) {
comedi_error(dev, "spurious interrupt");
return IRQ_HANDLED;
}
s = &dev->subdevices[0];
hi = inb(dev->iobase + DT2814_DATA);
lo = inb(dev->iobase + DT2814_DATA);
data = (hi << 4) | (lo >> 4);
if (!(--devpriv->ntrig)) {
int i;
outb(0, dev->iobase + DT2814_CSR);
/* note: turning off timed mode triggers another
sample. */
for (i = 0; i < DT2814_TIMEOUT; i++) {
if (inb(dev->iobase + DT2814_CSR) & DT2814_FINISH)
break;
}
inb(dev->iobase + DT2814_DATA);
inb(dev->iobase + DT2814_DATA);
s->async->events |= COMEDI_CB_EOA;
}
comedi_event(dev, s);
return IRQ_HANDLED;
}
static int dt2814_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct dt2814_private *devpriv;
int i, irq;
int ret;
struct comedi_subdevice *s;
ret = comedi_request_region(dev, it->options[0], DT2814_SIZE);
if (ret)
return ret;
outb(0, dev->iobase + DT2814_CSR);
udelay(100);
if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR) {
printk(KERN_ERR "reset error (fatal)\n");
return -EIO;
}
i = inb(dev->iobase + DT2814_DATA);
i = inb(dev->iobase + DT2814_DATA);
irq = it->options[1];
#if 0
if (irq < 0) {
save_flags(flags);
sti();
irqs = probe_irq_on();
outb(0, dev->iobase + DT2814_CSR);
udelay(100);
irq = probe_irq_off(irqs);
restore_flags(flags);
if (inb(dev->iobase + DT2814_CSR) & DT2814_ERR)
printk(KERN_DEBUG "error probing irq (bad)\n");
i = inb(dev->iobase + DT2814_DATA);
i = inb(dev->iobase + DT2814_DATA);
}
#endif
dev->irq = 0;
if (irq > 0) {
if (request_irq(irq, dt2814_interrupt, 0, "dt2814", dev)) {
printk(KERN_WARNING "(irq %d unavailable)\n", irq);
} else {
printk(KERN_INFO "( irq = %d )\n", irq);
dev->irq = irq;
}
} else if (irq == 0) {
printk(KERN_WARNING "(no irq)\n");
} else {
#if 0
printk(KERN_DEBUG "(probe returned multiple irqs--bad)\n");
#else
printk(KERN_WARNING "(irq probe not implemented)\n");
#endif
}
ret = comedi_alloc_subdevices(dev, 1);
if (ret)
return ret;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
s = &dev->subdevices[0];
dev->read_subdev = s;
s->type = COMEDI_SUBD_AI;
s->subdev_flags = SDF_READABLE | SDF_GROUND | SDF_CMD_READ;
s->n_chan = 16; /* XXX */
s->len_chanlist = 1;
s->insn_read = dt2814_ai_insn_read;
s->do_cmd = dt2814_ai_cmd;
s->do_cmdtest = dt2814_ai_cmdtest;
s->maxdata = 0xfff;
s->range_table = &range_unknown; /* XXX */
return 0;
}
static struct comedi_driver dt2814_driver = {
.driver_name = "dt2814",
.module = THIS_MODULE,
.attach = dt2814_attach,
.detach = comedi_legacy_detach,
};
module_comedi_driver(dt2814_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Altaf-Mahdi/android_kernel_oneplus_msm8994 | drivers/of/of_i2c.c | 2089 | 2934 | /*
* OF helpers for the I2C API
*
* Copyright (c) 2008 Jochen Friedrich <jochen@scram.de>
*
* Based on a previous patch from Jon Smirl <jonsmirl@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*/
#include <linux/i2c.h>
#include <linux/irq.h>
#include <linux/of.h>
#include <linux/of_i2c.h>
#include <linux/of_irq.h>
#include <linux/module.h>
void of_i2c_register_devices(struct i2c_adapter *adap)
{
void *result;
struct device_node *node;
/* Only register child devices if the adapter has a node pointer set */
if (!adap->dev.of_node)
return;
dev_dbg(&adap->dev, "of_i2c: walking child nodes\n");
for_each_available_child_of_node(adap->dev.of_node, node) {
struct i2c_board_info info = {};
struct dev_archdata dev_ad = {};
const __be32 *addr;
int len;
dev_dbg(&adap->dev, "of_i2c: register %s\n", node->full_name);
if (of_modalias_node(node, info.type, sizeof(info.type)) < 0) {
dev_err(&adap->dev, "of_i2c: modalias failure on %s\n",
node->full_name);
continue;
}
addr = of_get_property(node, "reg", &len);
if (!addr || (len < sizeof(int))) {
dev_err(&adap->dev, "of_i2c: invalid reg on %s\n",
node->full_name);
continue;
}
info.addr = be32_to_cpup(addr);
if (info.addr > (1 << 10) - 1) {
dev_err(&adap->dev, "of_i2c: invalid addr=%x on %s\n",
info.addr, node->full_name);
continue;
}
info.irq = irq_of_parse_and_map(node, 0);
info.of_node = of_node_get(node);
info.archdata = &dev_ad;
if (of_get_property(node, "wakeup-source", NULL))
info.flags |= I2C_CLIENT_WAKE;
request_module("%s%s", I2C_MODULE_PREFIX, info.type);
result = i2c_new_device(adap, &info);
if (result == NULL) {
dev_err(&adap->dev, "of_i2c: Failure registering %s\n",
node->full_name);
of_node_put(node);
irq_dispose_mapping(info.irq);
continue;
}
}
}
EXPORT_SYMBOL(of_i2c_register_devices);
static int of_dev_node_match(struct device *dev, void *data)
{
return dev->of_node == data;
}
/* must call put_device() when done with returned i2c_client device */
struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
{
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, node,
of_dev_node_match);
if (!dev)
return NULL;
return i2c_verify_client(dev);
}
EXPORT_SYMBOL(of_find_i2c_device_by_node);
/* must call put_device() when done with returned i2c_adapter device */
struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
{
struct device *dev;
dev = bus_find_device(&i2c_bus_type, NULL, node,
of_dev_node_match);
if (!dev)
return NULL;
return i2c_verify_adapter(dev);
}
EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
MODULE_LICENSE("GPL");
| gpl-2.0 |
Tesla-Redux-Devices/android_kernel_samsung_trlte | arch/tile/kernel/smp.c | 2345 | 5550 | /*
* Copyright 2010 Tilera Corporation. All Rights Reserved.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, version 2.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
* NON INFRINGEMENT. See the GNU General Public License for
* more details.
*
* TILE SMP support routines.
*/
#include <linux/smp.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/irq.h>
#include <linux/module.h>
#include <asm/cacheflush.h>
HV_Topology smp_topology __write_once;
EXPORT_SYMBOL(smp_topology);
#if CHIP_HAS_IPI()
static unsigned long __iomem *ipi_mappings[NR_CPUS];
#endif
/*
* Top-level send_IPI*() functions to send messages to other cpus.
*/
/* Set by smp_send_stop() to avoid recursive panics. */
static int stopping_cpus;
static void __send_IPI_many(HV_Recipient *recip, int nrecip, int tag)
{
int sent = 0;
while (sent < nrecip) {
int rc = hv_send_message(recip, nrecip,
(HV_VirtAddr)&tag, sizeof(tag));
if (rc < 0) {
if (!stopping_cpus) /* avoid recursive panic */
panic("hv_send_message returned %d", rc);
break;
}
WARN_ONCE(rc == 0, "hv_send_message() returned zero\n");
sent += rc;
}
}
void send_IPI_single(int cpu, int tag)
{
HV_Recipient recip = {
.y = cpu / smp_width,
.x = cpu % smp_width,
.state = HV_TO_BE_SENT
};
__send_IPI_many(&recip, 1, tag);
}
void send_IPI_many(const struct cpumask *mask, int tag)
{
HV_Recipient recip[NR_CPUS];
int cpu;
int nrecip = 0;
int my_cpu = smp_processor_id();
for_each_cpu(cpu, mask) {
HV_Recipient *r;
BUG_ON(cpu == my_cpu);
r = &recip[nrecip++];
r->y = cpu / smp_width;
r->x = cpu % smp_width;
r->state = HV_TO_BE_SENT;
}
__send_IPI_many(recip, nrecip, tag);
}
void send_IPI_allbutself(int tag)
{
struct cpumask mask;
cpumask_copy(&mask, cpu_online_mask);
cpumask_clear_cpu(smp_processor_id(), &mask);
send_IPI_many(&mask, tag);
}
/*
* Functions related to starting/stopping cpus.
*/
/* Handler to start the current cpu. */
static void smp_start_cpu_interrupt(void)
{
get_irq_regs()->pc = start_cpu_function_addr;
}
/* Handler to stop the current cpu. */
static void smp_stop_cpu_interrupt(void)
{
set_cpu_online(smp_processor_id(), 0);
arch_local_irq_disable_all();
for (;;)
asm("nap; nop");
}
/* This function calls the 'stop' function on all other CPUs in the system. */
void smp_send_stop(void)
{
stopping_cpus = 1;
send_IPI_allbutself(MSG_TAG_STOP_CPU);
}
/* On panic, just wait; we may get an smp_send_stop() later on. */
void panic_smp_self_stop(void)
{
while (1)
asm("nap; nop");
}
/*
* Dispatch code called from hv_message_intr() for HV_MSG_TILE hv messages.
*/
void evaluate_message(int tag)
{
switch (tag) {
case MSG_TAG_START_CPU: /* Start up a cpu */
smp_start_cpu_interrupt();
break;
case MSG_TAG_STOP_CPU: /* Sent to shut down slave CPU's */
smp_stop_cpu_interrupt();
break;
case MSG_TAG_CALL_FUNCTION_MANY: /* Call function on cpumask */
generic_smp_call_function_interrupt();
break;
case MSG_TAG_CALL_FUNCTION_SINGLE: /* Call function on one other CPU */
generic_smp_call_function_single_interrupt();
break;
default:
panic("Unknown IPI message tag %d", tag);
break;
}
}
/*
* flush_icache_range() code uses smp_call_function().
*/
struct ipi_flush {
unsigned long start;
unsigned long end;
};
static void ipi_flush_icache_range(void *info)
{
struct ipi_flush *flush = (struct ipi_flush *) info;
__flush_icache_range(flush->start, flush->end);
}
void flush_icache_range(unsigned long start, unsigned long end)
{
struct ipi_flush flush = { start, end };
preempt_disable();
on_each_cpu(ipi_flush_icache_range, &flush, 1);
preempt_enable();
}
/* Called when smp_send_reschedule() triggers IRQ_RESCHEDULE. */
static irqreturn_t handle_reschedule_ipi(int irq, void *token)
{
__get_cpu_var(irq_stat).irq_resched_count++;
scheduler_ipi();
return IRQ_HANDLED;
}
static struct irqaction resched_action = {
.handler = handle_reschedule_ipi,
.name = "resched",
.dev_id = handle_reschedule_ipi /* unique token */,
};
void __init ipi_init(void)
{
#if CHIP_HAS_IPI()
int cpu;
/* Map IPI trigger MMIO addresses. */
for_each_possible_cpu(cpu) {
HV_Coord tile;
HV_PTE pte;
unsigned long offset;
tile.x = cpu_x(cpu);
tile.y = cpu_y(cpu);
if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
panic("Failed to initialize IPI for cpu %d\n", cpu);
offset = PFN_PHYS(pte_pfn(pte));
ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
}
#endif
/* Bind handle_reschedule_ipi() to IRQ_RESCHEDULE. */
tile_irq_activate(IRQ_RESCHEDULE, TILE_IRQ_PERCPU);
BUG_ON(setup_irq(IRQ_RESCHEDULE, &resched_action));
}
#if CHIP_HAS_IPI()
void smp_send_reschedule(int cpu)
{
WARN_ON(cpu_is_offline(cpu));
/*
* We just want to do an MMIO store. The traditional writeq()
* functions aren't really correct here, since they're always
* directed at the PCI shim. For now, just do a raw store,
* casting away the __iomem attribute.
*/
((unsigned long __force *)ipi_mappings[cpu])[IRQ_RESCHEDULE] = 0;
}
#else
void smp_send_reschedule(int cpu)
{
HV_Coord coord;
WARN_ON(cpu_is_offline(cpu));
coord.y = cpu_y(cpu);
coord.x = cpu_x(cpu);
hv_trigger_ipi(coord, IRQ_RESCHEDULE);
}
#endif /* CHIP_HAS_IPI() */
| gpl-2.0 |
knightkill3r/FalcoKernel | drivers/net/wireless/rtlwifi/rtl8192cu/mac.c | 2601 | 33469 | /******************************************************************************
*
* Copyright(c) 2009-2010 Realtek Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
*
* Contact Information:
* wlanfae <wlanfae@realtek.com>
* Realtek Corporation, No. 2, Innovation Road II, Hsinchu Science Park,
* Hsinchu 300, Taiwan.
*
* Larry Finger <Larry.Finger@lwfinger.net>
*
****************************************************************************/
#include <linux/module.h>
#include "../wifi.h"
#include "../pci.h"
#include "../usb.h"
#include "../ps.h"
#include "../cam.h"
#include "reg.h"
#include "def.h"
#include "phy.h"
#include "rf.h"
#include "dm.h"
#include "mac.h"
#include "trx.h"
/* macro to shorten lines */
#define LINK_Q ui_link_quality
#define RX_EVM rx_evm_percentage
#define RX_SIGQ rx_mimo_signalquality
void rtl92c_read_chip_version(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
enum version_8192c chip_version = VERSION_UNKNOWN;
u32 value32;
value32 = rtl_read_dword(rtlpriv, REG_SYS_CFG);
if (value32 & TRP_VAUX_EN) {
chip_version = (value32 & TYPE_ID) ? VERSION_TEST_CHIP_92C :
VERSION_TEST_CHIP_88C;
} else {
/* Normal mass production chip. */
chip_version = NORMAL_CHIP;
chip_version |= ((value32 & TYPE_ID) ? CHIP_92C : 0);
chip_version |= ((value32 & VENDOR_ID) ? CHIP_VENDOR_UMC : 0);
/* RTL8723 with BT function. */
chip_version |= ((value32 & BT_FUNC) ? CHIP_8723 : 0);
if (IS_VENDOR_UMC(chip_version))
chip_version |= ((value32 & CHIP_VER_RTL_MASK) ?
CHIP_VENDOR_UMC_B_CUT : 0);
if (IS_92C_SERIAL(chip_version)) {
value32 = rtl_read_dword(rtlpriv, REG_HPON_FSM);
chip_version |= ((CHIP_BONDING_IDENTIFIER(value32) ==
CHIP_BONDING_92C_1T2R) ? CHIP_92C_1T2R : 0);
} else if (IS_8723_SERIES(chip_version)) {
value32 = rtl_read_dword(rtlpriv, REG_GPIO_OUTSTS);
chip_version |= ((value32 & RF_RL_ID) ?
CHIP_8723_DRV_REV : 0);
}
}
rtlhal->version = (enum version_8192c)chip_version;
switch (rtlhal->version) {
case VERSION_NORMAL_TSMC_CHIP_92C_1T2R:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_B_CHIP_92C.\n"));
break;
case VERSION_NORMAL_TSMC_CHIP_92C:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_92C.\n"));
break;
case VERSION_NORMAL_TSMC_CHIP_88C:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_TSMC_CHIP_88C.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_A_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP_i"
"92C_1T2R_A_CUT.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_92C_A_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP_"
"92C_A_CUT.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_88C_A_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
"_88C_A_CUT.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_92C_1T2R_B_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
"_92C_1T2R_B_CUT.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_92C_B_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
"_92C_B_CUT.\n"));
break;
case VERSION_NORMAL_UMC_CHIP_88C_B_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMAL_UMC_CHIP"
"_88C_B_CUT.\n"));
break;
case VERSION_NORMA_UMC_CHIP_8723_1T1R_A_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMA_UMC_CHIP"
"_8723_1T1R_A_CUT.\n"));
break;
case VERSION_NORMA_UMC_CHIP_8723_1T1R_B_CUT:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_NORMA_UMC_CHIP"
"_8723_1T1R_B_CUT.\n"));
break;
case VERSION_TEST_CHIP_92C:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_TEST_CHIP_92C.\n"));
break;
case VERSION_TEST_CHIP_88C:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: VERSION_TEST_CHIP_88C.\n"));
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_TRACE,
("Chip Version ID: ???????????????.\n"));
break;
}
if (IS_92C_SERIAL(rtlhal->version))
rtlphy->rf_type =
(IS_92C_1T2R(rtlhal->version)) ? RF_1T2R : RF_2T2R;
else
rtlphy->rf_type = RF_1T1R;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD,
("Chip RF Type: %s\n", (rtlphy->rf_type == RF_2T2R) ?
"RF_2T2R" : "RF_1T1R"));
if (get_rf_type(rtlphy) == RF_1T1R)
rtlpriv->dm.rfpath_rxenable[0] = true;
else
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
RT_TRACE(rtlpriv, COMP_INIT, DBG_LOUD, ("VersionID = 0x%4x\n",
rtlhal->version));
}
/**
* writeLLT - LLT table write access
* @io: io callback
* @address: LLT logical address.
* @data: LLT data content
*
* Realtek hardware access function.
*
*/
bool rtl92c_llt_write(struct ieee80211_hw *hw, u32 address, u32 data)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
bool status = true;
long count = 0;
u32 value = _LLT_INIT_ADDR(address) |
_LLT_INIT_DATA(data) | _LLT_OP(_LLT_WRITE_ACCESS);
rtl_write_dword(rtlpriv, REG_LLT_INIT, value);
do {
value = rtl_read_dword(rtlpriv, REG_LLT_INIT);
if (_LLT_NO_ACTIVE == _LLT_OP_VALUE(value))
break;
if (count > POLLING_LLT_THRESHOLD) {
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("Failed to polling write LLT done at"
" address %d! _LLT_OP_VALUE(%x)\n",
address, _LLT_OP_VALUE(value)));
status = false;
break;
}
} while (++count);
return status;
}
/**
* rtl92c_init_LLT_table - Init LLT table
* @io: io callback
* @boundary:
*
* Realtek hardware access function.
*
*/
bool rtl92c_init_llt_table(struct ieee80211_hw *hw, u32 boundary)
{
bool rst = true;
u32 i;
for (i = 0; i < (boundary - 1); i++) {
rst = rtl92c_llt_write(hw, i , i + 1);
if (true != rst) {
printk(KERN_ERR "===> %s #1 fail\n", __func__);
return rst;
}
}
/* end of list */
rst = rtl92c_llt_write(hw, (boundary - 1), 0xFF);
if (true != rst) {
printk(KERN_ERR "===> %s #2 fail\n", __func__);
return rst;
}
/* Make the other pages as ring buffer
* This ring buffer is used as beacon buffer if we config this MAC
* as two MAC transfer.
* Otherwise used as local loopback buffer.
*/
for (i = boundary; i < LLT_LAST_ENTRY_OF_TX_PKT_BUFFER; i++) {
rst = rtl92c_llt_write(hw, i, (i + 1));
if (true != rst) {
printk(KERN_ERR "===> %s #3 fail\n", __func__);
return rst;
}
}
/* Let last entry point to the start entry of ring buffer */
rst = rtl92c_llt_write(hw, LLT_LAST_ENTRY_OF_TX_PKT_BUFFER, boundary);
if (true != rst) {
printk(KERN_ERR "===> %s #4 fail\n", __func__);
return rst;
}
return rst;
}
void rtl92c_set_key(struct ieee80211_hw *hw, u32 key_index,
u8 *p_macaddr, bool is_group, u8 enc_algo,
bool is_wepkey, bool clear_all)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
u8 *macaddr = p_macaddr;
u32 entry_id = 0;
bool is_pairwise = false;
static u8 cam_const_addr[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}
};
static u8 cam_const_broad[] = {
0xff, 0xff, 0xff, 0xff, 0xff, 0xff
};
if (clear_all) {
u8 idx = 0;
u8 cam_offset = 0;
u8 clear_number = 5;
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG, ("clear_all\n"));
for (idx = 0; idx < clear_number; idx++) {
rtl_cam_mark_invalid(hw, cam_offset + idx);
rtl_cam_empty_entry(hw, cam_offset + idx);
if (idx < 5) {
memset(rtlpriv->sec.key_buf[idx], 0,
MAX_KEY_LEN);
rtlpriv->sec.key_len[idx] = 0;
}
}
} else {
switch (enc_algo) {
case WEP40_ENCRYPTION:
enc_algo = CAM_WEP40;
break;
case WEP104_ENCRYPTION:
enc_algo = CAM_WEP104;
break;
case TKIP_ENCRYPTION:
enc_algo = CAM_TKIP;
break;
case AESCCMP_ENCRYPTION:
enc_algo = CAM_AES;
break;
default:
RT_TRACE(rtlpriv, COMP_ERR, DBG_EMERG,
("iillegal switch case\n"));
enc_algo = CAM_TKIP;
break;
}
if (is_wepkey || rtlpriv->sec.use_defaultkey) {
macaddr = cam_const_addr[key_index];
entry_id = key_index;
} else {
if (is_group) {
macaddr = cam_const_broad;
entry_id = key_index;
} else {
key_index = PAIRWISE_KEYIDX;
entry_id = CAM_PAIRWISE_KEY_POSITION;
is_pairwise = true;
}
}
if (rtlpriv->sec.key_len[key_index] == 0) {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
("delete one entry\n"));
rtl_cam_delete_one_entry(hw, p_macaddr, entry_id);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
("The insert KEY length is %d\n",
rtlpriv->sec.key_len[PAIRWISE_KEYIDX]));
RT_TRACE(rtlpriv, COMP_SEC, DBG_LOUD,
("The insert KEY is %x %x\n",
rtlpriv->sec.key_buf[0][0],
rtlpriv->sec.key_buf[0][1]));
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
("add one entry\n"));
if (is_pairwise) {
RT_PRINT_DATA(rtlpriv, COMP_SEC, DBG_LOUD,
"Pairwiase Key content :",
rtlpriv->sec.pairwise_key,
rtlpriv->sec.
key_len[PAIRWISE_KEYIDX]);
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
("set Pairwiase key\n"));
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.
key_buf[key_index]);
} else {
RT_TRACE(rtlpriv, COMP_SEC, DBG_DMESG,
("set group key\n"));
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
rtl_cam_add_one_entry(hw,
rtlefuse->dev_addr,
PAIRWISE_KEYIDX,
CAM_PAIRWISE_KEY_POSITION,
enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf
[entry_id]);
}
rtl_cam_add_one_entry(hw, macaddr, key_index,
entry_id, enc_algo,
CAM_CONFIG_NO_USEDK,
rtlpriv->sec.key_buf[entry_id]);
}
}
}
}
u32 rtl92c_get_txdma_status(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtl_read_dword(rtlpriv, REG_TXDMA_STATUS);
}
void rtl92c_enable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
if (IS_HARDWARE_TYPE_8192CE(rtlhal)) {
rtl_write_dword(rtlpriv, REG_HIMR, rtlpci->irq_mask[0] &
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlpci->irq_mask[1] &
0xFFFFFFFF);
rtlpci->irq_enabled = true;
} else {
rtl_write_dword(rtlpriv, REG_HIMR, rtlusb->irq_mask[0] &
0xFFFFFFFF);
rtl_write_dword(rtlpriv, REG_HIMRE, rtlusb->irq_mask[1] &
0xFFFFFFFF);
rtlusb->irq_enabled = true;
}
}
void rtl92c_init_interrupt(struct ieee80211_hw *hw)
{
rtl92c_enable_interrupt(hw);
}
void rtl92c_disable_interrupt(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtl_priv(hw));
struct rtl_pci *rtlpci = rtl_pcidev(rtl_pcipriv(hw));
struct rtl_usb *rtlusb = rtl_usbdev(rtl_usbpriv(hw));
rtl_write_dword(rtlpriv, REG_HIMR, IMR8190_DISABLED);
rtl_write_dword(rtlpriv, REG_HIMRE, IMR8190_DISABLED);
if (IS_HARDWARE_TYPE_8192CE(rtlhal))
rtlpci->irq_enabled = false;
else if (IS_HARDWARE_TYPE_8192CU(rtlhal))
rtlusb->irq_enabled = false;
}
void rtl92c_set_qos(struct ieee80211_hw *hw, int aci)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
u32 u4b_ac_param;
rtl92c_dm_init_edca_turbo(hw);
u4b_ac_param = (u32) mac->ac[aci].aifs;
u4b_ac_param |=
((u32) le16_to_cpu(mac->ac[aci].cw_min) & 0xF) <<
AC_PARAM_ECW_MIN_OFFSET;
u4b_ac_param |=
((u32) le16_to_cpu(mac->ac[aci].cw_max) & 0xF) <<
AC_PARAM_ECW_MAX_OFFSET;
u4b_ac_param |= (u32) le16_to_cpu(mac->ac[aci].tx_op) <<
AC_PARAM_TXOP_OFFSET;
RT_TRACE(rtlpriv, COMP_QOS, DBG_LOUD,
("queue:%x, ac_param:%x\n", aci, u4b_ac_param));
switch (aci) {
case AC1_BK:
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, u4b_ac_param);
break;
case AC0_BE:
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, u4b_ac_param);
break;
case AC2_VI:
rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, u4b_ac_param);
break;
case AC3_VO:
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, u4b_ac_param);
break;
default:
RT_ASSERT(false, ("invalid aci: %d !\n", aci));
break;
}
}
/*-------------------------------------------------------------------------
* HW MAC Address
*-------------------------------------------------------------------------*/
void rtl92c_set_mac_addr(struct ieee80211_hw *hw, const u8 *addr)
{
u32 i;
struct rtl_priv *rtlpriv = rtl_priv(hw);
for (i = 0 ; i < ETH_ALEN ; i++)
rtl_write_byte(rtlpriv, (REG_MACID + i), *(addr+i));
RT_TRACE(rtlpriv, COMP_CMD, DBG_DMESG, ("MAC Address: %02X-%02X-%02X-"
"%02X-%02X-%02X\n",
rtl_read_byte(rtlpriv, REG_MACID),
rtl_read_byte(rtlpriv, REG_MACID+1),
rtl_read_byte(rtlpriv, REG_MACID+2),
rtl_read_byte(rtlpriv, REG_MACID+3),
rtl_read_byte(rtlpriv, REG_MACID+4),
rtl_read_byte(rtlpriv, REG_MACID+5)));
}
void rtl92c_init_driver_info_size(struct ieee80211_hw *hw, u8 size)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_RX_DRVINFO_SZ, size);
}
int rtl92c_set_network_type(struct ieee80211_hw *hw, enum nl80211_iftype type)
{
u8 value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
switch (type) {
case NL80211_IFTYPE_UNSPECIFIED:
value = NT_NO_LINK;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Set Network type to NO LINK!\n"));
break;
case NL80211_IFTYPE_ADHOC:
value = NT_LINK_AD_HOC;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Set Network type to Ad Hoc!\n"));
break;
case NL80211_IFTYPE_STATION:
value = NT_LINK_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Set Network type to STA!\n"));
break;
case NL80211_IFTYPE_AP:
value = NT_AS_AP;
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Set Network type to AP!\n"));
break;
default:
RT_TRACE(rtlpriv, COMP_INIT, DBG_DMESG,
("Network type %d not support!\n", type));
return -EOPNOTSUPP;
}
rtl_write_byte(rtlpriv, (REG_CR + 2), value);
return 0;
}
void rtl92c_init_network_type(struct ieee80211_hw *hw)
{
rtl92c_set_network_type(hw, NL80211_IFTYPE_UNSPECIFIED);
}
void rtl92c_init_adaptive_ctrl(struct ieee80211_hw *hw)
{
u16 value16;
u32 value32;
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* Response Rate Set */
value32 = rtl_read_dword(rtlpriv, REG_RRSR);
value32 &= ~RATE_BITMAP_ALL;
value32 |= RATE_RRSR_CCK_ONLY_1M;
rtl_write_dword(rtlpriv, REG_RRSR, value32);
/* SIFS (used in NAV) */
value16 = _SPEC_SIFS_CCK(0x10) | _SPEC_SIFS_OFDM(0x10);
rtl_write_word(rtlpriv, REG_SPEC_SIFS, value16);
/* Retry Limit */
value16 = _LRL(0x30) | _SRL(0x30);
rtl_write_dword(rtlpriv, REG_RL, value16);
}
void rtl92c_init_rate_fallback(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* Set Data Auto Rate Fallback Retry Count register. */
rtl_write_dword(rtlpriv, REG_DARFRC, 0x00000000);
rtl_write_dword(rtlpriv, REG_DARFRC+4, 0x10080404);
rtl_write_dword(rtlpriv, REG_RARFRC, 0x04030201);
rtl_write_dword(rtlpriv, REG_RARFRC+4, 0x08070605);
}
static void rtl92c_set_cck_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
u8 ctx_sifs)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_SIFS_CCK, trx_sifs);
rtl_write_byte(rtlpriv, (REG_SIFS_CCK + 1), ctx_sifs);
}
static void rtl92c_set_ofdm_sifs(struct ieee80211_hw *hw, u8 trx_sifs,
u8 ctx_sifs)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_SIFS_OFDM, trx_sifs);
rtl_write_byte(rtlpriv, (REG_SIFS_OFDM + 1), ctx_sifs);
}
void rtl92c_init_edca_param(struct ieee80211_hw *hw,
u16 queue, u16 txop, u8 cw_min, u8 cw_max, u8 aifs)
{
/* sequence: VO, VI, BE, BK ==> the same as 92C hardware design.
* referenc : enum nl80211_txq_q or ieee80211_set_wmm_default function.
*/
u32 value;
struct rtl_priv *rtlpriv = rtl_priv(hw);
value = (u32)aifs;
value |= ((u32)cw_min & 0xF) << 8;
value |= ((u32)cw_max & 0xF) << 12;
value |= (u32)txop << 16;
/* 92C hardware register sequence is the same as queue number. */
rtl_write_dword(rtlpriv, (REG_EDCA_VO_PARAM + (queue * 4)), value);
}
void rtl92c_init_edca(struct ieee80211_hw *hw)
{
u16 value16;
struct rtl_priv *rtlpriv = rtl_priv(hw);
/* disable EDCCA count down, to reduce collison and retry */
value16 = rtl_read_word(rtlpriv, REG_RD_CTRL);
value16 |= DIS_EDCA_CNT_DWN;
rtl_write_word(rtlpriv, REG_RD_CTRL, value16);
/* Update SIFS timing. ??????????
* pHalData->SifsTime = 0x0e0e0a0a; */
rtl92c_set_cck_sifs(hw, 0xa, 0xa);
rtl92c_set_ofdm_sifs(hw, 0xe, 0xe);
/* Set CCK/OFDM SIFS to be 10us. */
rtl_write_word(rtlpriv, REG_SIFS_CCK, 0x0a0a);
rtl_write_word(rtlpriv, REG_SIFS_OFDM, 0x1010);
rtl_write_word(rtlpriv, REG_PROT_MODE_CTRL, 0x0204);
rtl_write_dword(rtlpriv, REG_BAR_MODE_CTRL, 0x014004);
/* TXOP */
rtl_write_dword(rtlpriv, REG_EDCA_BE_PARAM, 0x005EA42B);
rtl_write_dword(rtlpriv, REG_EDCA_BK_PARAM, 0x0000A44F);
rtl_write_dword(rtlpriv, REG_EDCA_VI_PARAM, 0x005EA324);
rtl_write_dword(rtlpriv, REG_EDCA_VO_PARAM, 0x002FA226);
/* PIFS */
rtl_write_byte(rtlpriv, REG_PIFS, 0x1C);
/* AGGR BREAK TIME Register */
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
rtl_write_word(rtlpriv, REG_NAV_PROT_LEN, 0x0040);
rtl_write_byte(rtlpriv, REG_BCNDMATIM, 0x02);
rtl_write_byte(rtlpriv, REG_ATIMWND, 0x02);
}
void rtl92c_init_ampdu_aggregation(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_dword(rtlpriv, REG_AGGLEN_LMT, 0x99997631);
rtl_write_byte(rtlpriv, REG_AGGR_BREAK_TIME, 0x16);
/* init AMPDU aggregation number, tuning for Tx's TP, */
rtl_write_word(rtlpriv, 0x4CA, 0x0708);
}
void rtl92c_init_beacon_max_error(struct ieee80211_hw *hw, bool infra_mode)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_BCN_MAX_ERR, 0xFF);
}
void rtl92c_init_rdg_setting(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_byte(rtlpriv, REG_RD_CTRL, 0xFF);
rtl_write_word(rtlpriv, REG_RD_NAV_NXT, 0x200);
rtl_write_byte(rtlpriv, REG_RD_RESP_PKT_TH, 0x05);
}
void rtl92c_init_retry_function(struct ieee80211_hw *hw)
{
u8 value8;
struct rtl_priv *rtlpriv = rtl_priv(hw);
value8 = rtl_read_byte(rtlpriv, REG_FWHW_TXQ_CTRL);
value8 |= EN_AMPDU_RTY_NEW;
rtl_write_byte(rtlpriv, REG_FWHW_TXQ_CTRL, value8);
/* Set ACK timeout */
rtl_write_byte(rtlpriv, REG_ACKTO, 0x40);
}
void rtl92c_init_beacon_parameters(struct ieee80211_hw *hw,
enum version_8192c version)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_hal *rtlhal = rtl_hal(rtlpriv);
rtl_write_word(rtlpriv, REG_TBTT_PROHIBIT, 0x6404);/* ms */
rtl_write_byte(rtlpriv, REG_DRVERLYINT, DRIVER_EARLY_INT_TIME);/*ms*/
rtl_write_byte(rtlpriv, REG_BCNDMATIM, BCN_DMA_ATIME_INT_TIME);
if (IS_NORMAL_CHIP(rtlhal->version))
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x660F);
else
rtl_write_word(rtlpriv, REG_BCNTCFG, 0x66FF);
}
void rtl92c_disable_fast_edca(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, REG_FAST_EDCA_CTRL, 0);
}
void rtl92c_set_min_space(struct ieee80211_hw *hw, bool is2T)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u8 value = is2T ? MAX_MSS_DENSITY_2T : MAX_MSS_DENSITY_1T;
rtl_write_byte(rtlpriv, REG_AMPDU_MIN_SPACE, value);
}
u16 rtl92c_get_mgt_filter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtl_read_word(rtlpriv, REG_RXFLTMAP0);
}
void rtl92c_set_mgt_filter(struct ieee80211_hw *hw, u16 filter)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, REG_RXFLTMAP0, filter);
}
u16 rtl92c_get_ctrl_filter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtl_read_word(rtlpriv, REG_RXFLTMAP1);
}
void rtl92c_set_ctrl_filter(struct ieee80211_hw *hw, u16 filter)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, REG_RXFLTMAP1, filter);
}
u16 rtl92c_get_data_filter(struct ieee80211_hw *hw)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
return rtl_read_word(rtlpriv, REG_RXFLTMAP2);
}
void rtl92c_set_data_filter(struct ieee80211_hw *hw, u16 filter)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
rtl_write_word(rtlpriv, REG_RXFLTMAP2, filter);
}
/*==============================================================*/
static u8 _rtl92c_query_rxpwrpercentage(char antpower)
{
if ((antpower <= -100) || (antpower >= 20))
return 0;
else if (antpower >= 0)
return 100;
else
return 100 + antpower;
}
static u8 _rtl92c_evm_db_to_percentage(char value)
{
char ret_val;
ret_val = value;
if (ret_val >= 0)
ret_val = 0;
if (ret_val <= -33)
ret_val = -33;
ret_val = 0 - ret_val;
ret_val *= 3;
if (ret_val == 99)
ret_val = 100;
return ret_val;
}
static long _rtl92c_translate_todbm(struct ieee80211_hw *hw,
u8 signal_strength_index)
{
long signal_power;
signal_power = (long)((signal_strength_index + 1) >> 1);
signal_power -= 95;
return signal_power;
}
static long _rtl92c_signal_scale_mapping(struct ieee80211_hw *hw,
long currsig)
{
long retsig;
if (currsig >= 61 && currsig <= 100)
retsig = 90 + ((currsig - 60) / 4);
else if (currsig >= 41 && currsig <= 60)
retsig = 78 + ((currsig - 40) / 2);
else if (currsig >= 31 && currsig <= 40)
retsig = 66 + (currsig - 30);
else if (currsig >= 21 && currsig <= 30)
retsig = 54 + (currsig - 20);
else if (currsig >= 5 && currsig <= 20)
retsig = 42 + (((currsig - 5) * 2) / 3);
else if (currsig == 4)
retsig = 36;
else if (currsig == 3)
retsig = 27;
else if (currsig == 2)
retsig = 18;
else if (currsig == 1)
retsig = 9;
else
retsig = currsig;
return retsig;
}
static void _rtl92c_query_rxphystatus(struct ieee80211_hw *hw,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
struct rx_fwinfo_92c *p_drvinfo,
bool packet_match_bssid,
bool packet_toself,
bool packet_beacon)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
struct phy_sts_cck_8192s_t *cck_buf;
s8 rx_pwr_all = 0, rx_pwr[4];
u8 rf_rx_num = 0, evm, pwdb_all;
u8 i, max_spatial_stream;
u32 rssi, total_rssi = 0;
bool in_powersavemode = false;
bool is_cck_rate;
is_cck_rate = RX_HAL_IS_CCK_RATE(pdesc);
pstats->packet_matchbssid = packet_match_bssid;
pstats->packet_toself = packet_toself;
pstats->is_cck = is_cck_rate;
pstats->packet_beacon = packet_beacon;
pstats->is_cck = is_cck_rate;
pstats->RX_SIGQ[0] = -1;
pstats->RX_SIGQ[1] = -1;
if (is_cck_rate) {
u8 report, cck_highpwr;
cck_buf = (struct phy_sts_cck_8192s_t *)p_drvinfo;
if (!in_powersavemode)
cck_highpwr = rtlphy->cck_high_power;
else
cck_highpwr = false;
if (!cck_highpwr) {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = cck_buf->cck_agc_rpt & 0xc0;
report = report >> 6;
switch (report) {
case 0x3:
rx_pwr_all = -46 - (cck_agc_rpt & 0x3e);
break;
case 0x2:
rx_pwr_all = -26 - (cck_agc_rpt & 0x3e);
break;
case 0x1:
rx_pwr_all = -12 - (cck_agc_rpt & 0x3e);
break;
case 0x0:
rx_pwr_all = 16 - (cck_agc_rpt & 0x3e);
break;
}
} else {
u8 cck_agc_rpt = cck_buf->cck_agc_rpt;
report = p_drvinfo->cfosho[0] & 0x60;
report = report >> 5;
switch (report) {
case 0x3:
rx_pwr_all = -46 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x2:
rx_pwr_all = -26 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x1:
rx_pwr_all = -12 - ((cck_agc_rpt & 0x1f) << 1);
break;
case 0x0:
rx_pwr_all = 16 - ((cck_agc_rpt & 0x1f) << 1);
break;
}
}
pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->recvsignalpower = rx_pwr_all;
if (packet_match_bssid) {
u8 sq;
if (pstats->rx_pwdb_all > 40)
sq = 100;
else {
sq = cck_buf->sq_rpt;
if (sq > 64)
sq = 0;
else if (sq < 20)
sq = 100;
else
sq = ((64 - sq) * 100) / 44;
}
pstats->signalquality = sq;
pstats->RX_SIGQ[0] = sq;
pstats->RX_SIGQ[1] = -1;
}
} else {
rtlpriv->dm.rfpath_rxenable[0] =
rtlpriv->dm.rfpath_rxenable[1] = true;
for (i = RF90_PATH_A; i < RF90_PATH_MAX; i++) {
if (rtlpriv->dm.rfpath_rxenable[i])
rf_rx_num++;
rx_pwr[i] =
((p_drvinfo->gain_trsw[i] & 0x3f) * 2) - 110;
rssi = _rtl92c_query_rxpwrpercentage(rx_pwr[i]);
total_rssi += rssi;
rtlpriv->stats.rx_snr_db[i] =
(long)(p_drvinfo->rxsnr[i] / 2);
if (packet_match_bssid)
pstats->rx_mimo_signalstrength[i] = (u8) rssi;
}
rx_pwr_all = ((p_drvinfo->pwdb_all >> 1) & 0x7f) - 110;
pwdb_all = _rtl92c_query_rxpwrpercentage(rx_pwr_all);
pstats->rx_pwdb_all = pwdb_all;
pstats->rxpower = rx_pwr_all;
pstats->recvsignalpower = rx_pwr_all;
if (GET_RX_DESC_RX_MCS(pdesc) &&
GET_RX_DESC_RX_MCS(pdesc) >= DESC92C_RATEMCS8 &&
GET_RX_DESC_RX_MCS(pdesc) <= DESC92C_RATEMCS15)
max_spatial_stream = 2;
else
max_spatial_stream = 1;
for (i = 0; i < max_spatial_stream; i++) {
evm = _rtl92c_evm_db_to_percentage(p_drvinfo->rxevm[i]);
if (packet_match_bssid) {
if (i == 0)
pstats->signalquality =
(u8) (evm & 0xff);
pstats->RX_SIGQ[i] =
(u8) (evm & 0xff);
}
}
}
if (is_cck_rate)
pstats->signalstrength =
(u8) (_rtl92c_signal_scale_mapping(hw, pwdb_all));
else if (rf_rx_num != 0)
pstats->signalstrength =
(u8) (_rtl92c_signal_scale_mapping
(hw, total_rssi /= rf_rx_num));
}
static void _rtl92c_process_ui_rssi(struct ieee80211_hw *hw,
struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_phy *rtlphy = &(rtlpriv->phy);
u8 rfpath;
u32 last_rssi, tmpval;
if (pstats->packet_toself || pstats->packet_beacon) {
rtlpriv->stats.rssi_calculate_cnt++;
if (rtlpriv->stats.ui_rssi.total_num++ >=
PHY_RSSI_SLID_WIN_MAX) {
rtlpriv->stats.ui_rssi.total_num =
PHY_RSSI_SLID_WIN_MAX;
last_rssi =
rtlpriv->stats.ui_rssi.elements[rtlpriv->
stats.ui_rssi.index];
rtlpriv->stats.ui_rssi.total_val -= last_rssi;
}
rtlpriv->stats.ui_rssi.total_val += pstats->signalstrength;
rtlpriv->stats.ui_rssi.elements[rtlpriv->stats.ui_rssi.
index++] = pstats->signalstrength;
if (rtlpriv->stats.ui_rssi.index >= PHY_RSSI_SLID_WIN_MAX)
rtlpriv->stats.ui_rssi.index = 0;
tmpval = rtlpriv->stats.ui_rssi.total_val /
rtlpriv->stats.ui_rssi.total_num;
rtlpriv->stats.signal_strength =
_rtl92c_translate_todbm(hw, (u8) tmpval);
pstats->rssi = rtlpriv->stats.signal_strength;
}
if (!pstats->is_cck && pstats->packet_toself) {
for (rfpath = RF90_PATH_A; rfpath < rtlphy->num_total_rfpath;
rfpath++) {
if (!rtl8192_phy_check_is_legal_rfpath(hw, rfpath))
continue;
if (rtlpriv->stats.rx_rssi_percentage[rfpath] == 0) {
rtlpriv->stats.rx_rssi_percentage[rfpath] =
pstats->rx_mimo_signalstrength[rfpath];
}
if (pstats->rx_mimo_signalstrength[rfpath] >
rtlpriv->stats.rx_rssi_percentage[rfpath]) {
rtlpriv->stats.rx_rssi_percentage[rfpath] =
((rtlpriv->stats.
rx_rssi_percentage[rfpath] *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_mimo_signalstrength[rfpath])) /
(RX_SMOOTH_FACTOR);
rtlpriv->stats.rx_rssi_percentage[rfpath] =
rtlpriv->stats.rx_rssi_percentage[rfpath] +
1;
} else {
rtlpriv->stats.rx_rssi_percentage[rfpath] =
((rtlpriv->stats.
rx_rssi_percentage[rfpath] *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_mimo_signalstrength[rfpath])) /
(RX_SMOOTH_FACTOR);
}
}
}
}
static void _rtl92c_update_rxsignalstatistics(struct ieee80211_hw *hw,
struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
int weighting = 0;
if (rtlpriv->stats.recv_signal_power == 0)
rtlpriv->stats.recv_signal_power = pstats->recvsignalpower;
if (pstats->recvsignalpower > rtlpriv->stats.recv_signal_power)
weighting = 5;
else if (pstats->recvsignalpower < rtlpriv->stats.recv_signal_power)
weighting = (-5);
rtlpriv->stats.recv_signal_power =
(rtlpriv->stats.recv_signal_power * 5 +
pstats->recvsignalpower + weighting) / 6;
}
static void _rtl92c_process_pwdb(struct ieee80211_hw *hw,
struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
long undecorated_smoothed_pwdb = 0;
if (mac->opmode == NL80211_IFTYPE_ADHOC) {
return;
} else {
undecorated_smoothed_pwdb =
rtlpriv->dm.undecorated_smoothed_pwdb;
}
if (pstats->packet_toself || pstats->packet_beacon) {
if (undecorated_smoothed_pwdb < 0)
undecorated_smoothed_pwdb = pstats->rx_pwdb_all;
if (pstats->rx_pwdb_all > (u32) undecorated_smoothed_pwdb) {
undecorated_smoothed_pwdb =
(((undecorated_smoothed_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
undecorated_smoothed_pwdb = undecorated_smoothed_pwdb
+ 1;
} else {
undecorated_smoothed_pwdb =
(((undecorated_smoothed_pwdb) *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->rx_pwdb_all)) / (RX_SMOOTH_FACTOR);
}
rtlpriv->dm.undecorated_smoothed_pwdb =
undecorated_smoothed_pwdb;
_rtl92c_update_rxsignalstatistics(hw, pstats);
}
}
static void _rtl92c_process_LINK_Q(struct ieee80211_hw *hw,
struct rtl_stats *pstats)
{
struct rtl_priv *rtlpriv = rtl_priv(hw);
u32 last_evm = 0, n_stream, tmpval;
if (pstats->signalquality != 0) {
if (pstats->packet_toself || pstats->packet_beacon) {
if (rtlpriv->stats.LINK_Q.total_num++ >=
PHY_LINKQUALITY_SLID_WIN_MAX) {
rtlpriv->stats.LINK_Q.total_num =
PHY_LINKQUALITY_SLID_WIN_MAX;
last_evm =
rtlpriv->stats.LINK_Q.elements
[rtlpriv->stats.LINK_Q.index];
rtlpriv->stats.LINK_Q.total_val -=
last_evm;
}
rtlpriv->stats.LINK_Q.total_val +=
pstats->signalquality;
rtlpriv->stats.LINK_Q.elements
[rtlpriv->stats.LINK_Q.index++] =
pstats->signalquality;
if (rtlpriv->stats.LINK_Q.index >=
PHY_LINKQUALITY_SLID_WIN_MAX)
rtlpriv->stats.LINK_Q.index = 0;
tmpval = rtlpriv->stats.LINK_Q.total_val /
rtlpriv->stats.LINK_Q.total_num;
rtlpriv->stats.signal_quality = tmpval;
rtlpriv->stats.last_sigstrength_inpercent = tmpval;
for (n_stream = 0; n_stream < 2;
n_stream++) {
if (pstats->RX_SIGQ[n_stream] != -1) {
if (!rtlpriv->stats.RX_EVM[n_stream]) {
rtlpriv->stats.RX_EVM[n_stream]
= pstats->RX_SIGQ[n_stream];
}
rtlpriv->stats.RX_EVM[n_stream] =
((rtlpriv->stats.RX_EVM
[n_stream] *
(RX_SMOOTH_FACTOR - 1)) +
(pstats->RX_SIGQ
[n_stream] * 1)) /
(RX_SMOOTH_FACTOR);
}
}
}
} else {
;
}
}
static void _rtl92c_process_phyinfo(struct ieee80211_hw *hw,
u8 *buffer,
struct rtl_stats *pcurrent_stats)
{
if (!pcurrent_stats->packet_matchbssid &&
!pcurrent_stats->packet_beacon)
return;
_rtl92c_process_ui_rssi(hw, pcurrent_stats);
_rtl92c_process_pwdb(hw, pcurrent_stats);
_rtl92c_process_LINK_Q(hw, pcurrent_stats);
}
void rtl92c_translate_rx_signal_stuff(struct ieee80211_hw *hw,
struct sk_buff *skb,
struct rtl_stats *pstats,
struct rx_desc_92c *pdesc,
struct rx_fwinfo_92c *p_drvinfo)
{
struct rtl_mac *mac = rtl_mac(rtl_priv(hw));
struct rtl_efuse *rtlefuse = rtl_efuse(rtl_priv(hw));
struct ieee80211_hdr *hdr;
u8 *tmp_buf;
u8 *praddr;
u8 *psaddr;
__le16 fc;
u16 type, cpu_fc;
bool packet_matchbssid, packet_toself, packet_beacon;
tmp_buf = skb->data + pstats->rx_drvinfo_size + pstats->rx_bufshift;
hdr = (struct ieee80211_hdr *)tmp_buf;
fc = hdr->frame_control;
cpu_fc = le16_to_cpu(fc);
type = WLAN_FC_GET_TYPE(fc);
praddr = hdr->addr1;
psaddr = hdr->addr2;
packet_matchbssid =
((IEEE80211_FTYPE_CTL != type) &&
(!compare_ether_addr(mac->bssid,
(cpu_fc & IEEE80211_FCTL_TODS) ?
hdr->addr1 : (cpu_fc & IEEE80211_FCTL_FROMDS) ?
hdr->addr2 : hdr->addr3)) &&
(!pstats->hwerror) && (!pstats->crc) && (!pstats->icv));
packet_toself = packet_matchbssid &&
(!compare_ether_addr(praddr, rtlefuse->dev_addr));
if (ieee80211_is_beacon(fc))
packet_beacon = true;
_rtl92c_query_rxphystatus(hw, pstats, pdesc, p_drvinfo,
packet_matchbssid, packet_toself,
packet_beacon);
_rtl92c_process_phyinfo(hw, tmp_buf, pstats);
}
| gpl-2.0 |
iBluemind/android_kernel_lge_su760 | drivers/media/common/tuners/tda18218.c | 3113 | 8191 | /*
* NXP TDA18218HN silicon tuner driver
*
* Copyright (C) 2010 Antti Palosaari <crope@iki.fi>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
#include "tda18218.h"
#include "tda18218_priv.h"
static int debug;
module_param(debug, int, 0644);
MODULE_PARM_DESC(debug, "Turn on/off debugging (default:off).");
/* write multiple registers */
static int tda18218_wr_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
{
int ret = 0;
u8 buf[1+len], quotient, remainder, i, msg_len, msg_len_max;
struct i2c_msg msg[1] = {
{
.addr = priv->cfg->i2c_address,
.flags = 0,
.buf = buf,
}
};
msg_len_max = priv->cfg->i2c_wr_max - 1;
quotient = len / msg_len_max;
remainder = len % msg_len_max;
msg_len = msg_len_max;
for (i = 0; (i <= quotient && remainder); i++) {
if (i == quotient) /* set len of the last msg */
msg_len = remainder;
msg[0].len = msg_len + 1;
buf[0] = reg + i * msg_len_max;
memcpy(&buf[1], &val[i * msg_len_max], msg_len);
ret = i2c_transfer(priv->i2c, msg, 1);
if (ret != 1)
break;
}
if (ret == 1) {
ret = 0;
} else {
warn("i2c wr failed ret:%d reg:%02x len:%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* read multiple registers */
static int tda18218_rd_regs(struct tda18218_priv *priv, u8 reg, u8 *val, u8 len)
{
int ret;
u8 buf[reg+len]; /* we must start read always from reg 0x00 */
struct i2c_msg msg[2] = {
{
.addr = priv->cfg->i2c_address,
.flags = 0,
.len = 1,
.buf = "\x00",
}, {
.addr = priv->cfg->i2c_address,
.flags = I2C_M_RD,
.len = sizeof(buf),
.buf = buf,
}
};
ret = i2c_transfer(priv->i2c, msg, 2);
if (ret == 2) {
memcpy(val, &buf[reg], len);
ret = 0;
} else {
warn("i2c rd failed ret:%d reg:%02x len:%d", ret, reg, len);
ret = -EREMOTEIO;
}
return ret;
}
/* write single register */
static int tda18218_wr_reg(struct tda18218_priv *priv, u8 reg, u8 val)
{
return tda18218_wr_regs(priv, reg, &val, 1);
}
/* read single register */
static int tda18218_rd_reg(struct tda18218_priv *priv, u8 reg, u8 *val)
{
return tda18218_rd_regs(priv, reg, val, 1);
}
static int tda18218_set_params(struct dvb_frontend *fe,
struct dvb_frontend_parameters *params)
{
struct tda18218_priv *priv = fe->tuner_priv;
int ret;
u8 buf[3], i, BP_Filter, LP_Fc;
u32 LO_Frac;
/* TODO: find out correct AGC algorithm */
u8 agc[][2] = {
{ R20_AGC11, 0x60 },
{ R23_AGC21, 0x02 },
{ R20_AGC11, 0xa0 },
{ R23_AGC21, 0x09 },
{ R20_AGC11, 0xe0 },
{ R23_AGC21, 0x0c },
{ R20_AGC11, 0x40 },
{ R23_AGC21, 0x01 },
{ R20_AGC11, 0x80 },
{ R23_AGC21, 0x08 },
{ R20_AGC11, 0xc0 },
{ R23_AGC21, 0x0b },
{ R24_AGC22, 0x1c },
{ R24_AGC22, 0x0c },
};
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
/* low-pass filter cut-off frequency */
switch (params->u.ofdm.bandwidth) {
case BANDWIDTH_6_MHZ:
LP_Fc = 0;
LO_Frac = params->frequency + 4000000;
break;
case BANDWIDTH_7_MHZ:
LP_Fc = 1;
LO_Frac = params->frequency + 3500000;
break;
case BANDWIDTH_8_MHZ:
default:
LP_Fc = 2;
LO_Frac = params->frequency + 4000000;
break;
}
/* band-pass filter */
if (LO_Frac < 188000000)
BP_Filter = 3;
else if (LO_Frac < 253000000)
BP_Filter = 4;
else if (LO_Frac < 343000000)
BP_Filter = 5;
else
BP_Filter = 6;
buf[0] = (priv->regs[R1A_IF1] & ~7) | BP_Filter; /* BP_Filter */
buf[1] = (priv->regs[R1B_IF2] & ~3) | LP_Fc; /* LP_Fc */
buf[2] = priv->regs[R1C_AGC2B];
ret = tda18218_wr_regs(priv, R1A_IF1, buf, 3);
if (ret)
goto error;
buf[0] = (LO_Frac / 1000) >> 12; /* LO_Frac_0 */
buf[1] = (LO_Frac / 1000) >> 4; /* LO_Frac_1 */
buf[2] = (LO_Frac / 1000) << 4 |
(priv->regs[R0C_MD5] & 0x0f); /* LO_Frac_2 */
ret = tda18218_wr_regs(priv, R0A_MD3, buf, 3);
if (ret)
goto error;
buf[0] = priv->regs[R0F_MD8] | (1 << 6); /* Freq_prog_Start */
ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
if (ret)
goto error;
buf[0] = priv->regs[R0F_MD8] & ~(1 << 6); /* Freq_prog_Start */
ret = tda18218_wr_regs(priv, R0F_MD8, buf, 1);
if (ret)
goto error;
/* trigger AGC */
for (i = 0; i < ARRAY_SIZE(agc); i++) {
ret = tda18218_wr_reg(priv, agc[i][0], agc[i][1]);
if (ret)
goto error;
}
error:
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
dbg("%s: failed ret:%d", __func__, ret);
return ret;
}
static int tda18218_sleep(struct dvb_frontend *fe)
{
struct tda18218_priv *priv = fe->tuner_priv;
int ret;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
/* standby */
ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
dbg("%s: failed ret:%d", __func__, ret);
return ret;
}
static int tda18218_init(struct dvb_frontend *fe)
{
struct tda18218_priv *priv = fe->tuner_priv;
int ret;
/* TODO: calibrations */
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
ret = tda18218_wr_regs(priv, R00_ID, priv->regs, TDA18218_NUM_REGS);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
if (ret)
dbg("%s: failed ret:%d", __func__, ret);
return ret;
}
static int tda18218_release(struct dvb_frontend *fe)
{
kfree(fe->tuner_priv);
fe->tuner_priv = NULL;
return 0;
}
static const struct dvb_tuner_ops tda18218_tuner_ops = {
.info = {
.name = "NXP TDA18218",
.frequency_min = 174000000,
.frequency_max = 864000000,
.frequency_step = 1000,
},
.release = tda18218_release,
.init = tda18218_init,
.sleep = tda18218_sleep,
.set_params = tda18218_set_params,
};
struct dvb_frontend *tda18218_attach(struct dvb_frontend *fe,
struct i2c_adapter *i2c, struct tda18218_config *cfg)
{
struct tda18218_priv *priv = NULL;
u8 val;
int ret;
/* chip default registers values */
static u8 def_regs[] = {
0xc0, 0x88, 0x00, 0x8e, 0x03, 0x00, 0x00, 0xd0, 0x00, 0x40,
0x00, 0x00, 0x07, 0xff, 0x84, 0x09, 0x00, 0x13, 0x00, 0x00,
0x01, 0x84, 0x09, 0xf0, 0x19, 0x0a, 0x8e, 0x69, 0x98, 0x01,
0x00, 0x58, 0x10, 0x40, 0x8c, 0x00, 0x0c, 0x48, 0x85, 0xc9,
0xa7, 0x00, 0x00, 0x00, 0x30, 0x81, 0x80, 0x00, 0x39, 0x00,
0x8a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xf6, 0xf6
};
priv = kzalloc(sizeof(struct tda18218_priv), GFP_KERNEL);
if (priv == NULL)
return NULL;
priv->cfg = cfg;
priv->i2c = i2c;
fe->tuner_priv = priv;
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 1); /* open I2C-gate */
/* check if the tuner is there */
ret = tda18218_rd_reg(priv, R00_ID, &val);
dbg("%s: ret:%d chip ID:%02x", __func__, ret, val);
if (ret || val != def_regs[R00_ID]) {
kfree(priv);
return NULL;
}
info("NXP TDA18218HN successfully identified.");
memcpy(&fe->ops.tuner_ops, &tda18218_tuner_ops,
sizeof(struct dvb_tuner_ops));
memcpy(priv->regs, def_regs, sizeof(def_regs));
/* loop-through enabled chip default register values */
if (priv->cfg->loop_through) {
priv->regs[R17_PD1] = 0xb0;
priv->regs[R18_PD2] = 0x59;
}
/* standby */
ret = tda18218_wr_reg(priv, R17_PD1, priv->regs[R17_PD1] | (1 << 0));
if (ret)
dbg("%s: failed ret:%d", __func__, ret);
if (fe->ops.i2c_gate_ctrl)
fe->ops.i2c_gate_ctrl(fe, 0); /* close I2C-gate */
return fe;
}
EXPORT_SYMBOL(tda18218_attach);
MODULE_DESCRIPTION("NXP TDA18218HN silicon tuner driver");
MODULE_AUTHOR("Antti Palosaari <crope@iki.fi>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
AngSanley/angsanleykernel-nokia-normandy | drivers/target/iscsi/iscsi_target_erl0.c | 3625 | 28851 | /******************************************************************************
* This file contains error recovery level zero functions used by
* the iSCSI Target driver.
*
* \u00a9 Copyright 2007-2011 RisingTide Systems LLC.
*
* Licensed to the Linux Foundation under the General Public License (GPL) version 2.
*
* Author: Nicholas A. Bellinger <nab@linux-iscsi.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
******************************************************************************/
#include <scsi/iscsi_proto.h>
#include <target/target_core_base.h>
#include <target/target_core_fabric.h>
#include "iscsi_target_core.h"
#include "iscsi_target_seq_pdu_list.h"
#include "iscsi_target_tq.h"
#include "iscsi_target_erl0.h"
#include "iscsi_target_erl1.h"
#include "iscsi_target_erl2.h"
#include "iscsi_target_util.h"
#include "iscsi_target.h"
/*
* Used to set values in struct iscsi_cmd that iscsit_dataout_check_sequence()
* checks against to determine a PDU's Offset+Length is within the current
* DataOUT Sequence. Used for DataSequenceInOrder=Yes only.
*/
void iscsit_set_dataout_sequence_values(
struct iscsi_cmd *cmd)
{
struct iscsi_conn *conn = cmd->conn;
/*
* Still set seq_start_offset and seq_end_offset for Unsolicited
* DataOUT, even if DataSequenceInOrder=No.
*/
if (cmd->unsolicited_data) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->write_data_done +
(cmd->data_length >
conn->sess->sess_ops->FirstBurstLength) ?
conn->sess->sess_ops->FirstBurstLength : cmd->data_length);
return;
}
if (!conn->sess->sess_ops->DataSequenceInOrder)
return;
if (!cmd->seq_start_offset && !cmd->seq_end_offset) {
cmd->seq_start_offset = cmd->write_data_done;
cmd->seq_end_offset = (cmd->data_length >
conn->sess->sess_ops->MaxBurstLength) ?
(cmd->write_data_done +
conn->sess->sess_ops->MaxBurstLength) : cmd->data_length;
} else {
cmd->seq_start_offset = cmd->seq_end_offset;
cmd->seq_end_offset = ((cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength) >=
cmd->data_length) ? cmd->data_length :
(cmd->seq_end_offset +
conn->sess->sess_ops->MaxBurstLength);
}
}
static int iscsit_dataout_within_command_recovery_check(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
struct iscsi_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* We do the within-command recovery checks here as it is
* the first function called in iscsi_check_pre_dataout().
* Basically, if we are in within-command recovery and
* the PDU does not contain the offset the sequence needs,
* dump the payload.
*
* This only applies to DataPDUInOrder=Yes, for
* DataPDUInOrder=No we only re-request the failed PDU
* and check that all PDUs in a sequence are received
* upon end of sequence.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->cmd_flags & ICF_WITHIN_COMMAND_RECOVERY) &&
(cmd->write_data_done != hdr->offset))
goto dump;
cmd->cmd_flags &= ~ICF_WITHIN_COMMAND_RECOVERY;
} else {
struct iscsi_seq *seq;
seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
* Set the struct iscsi_seq pointer to reuse later.
*/
cmd->seq_ptr = seq;
if (conn->sess->sess_ops->DataPDUInOrder) {
if ((seq->status ==
DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
((seq->offset != hdr->offset) ||
(seq->data_sn != hdr->datasn)))
goto dump;
} else {
if ((seq->status ==
DATAOUT_SEQUENCE_WITHIN_COMMAND_RECOVERY) &&
(seq->data_sn != hdr->datasn))
goto dump;
}
if (seq->status == DATAOUT_SEQUENCE_COMPLETE)
goto dump;
if (seq->status != DATAOUT_SEQUENCE_COMPLETE)
seq->status = 0;
}
return DATAOUT_NORMAL;
dump:
pr_err("Dumping DataOUT PDU Offset: %u Length: %d DataSN:"
" 0x%08x\n", hdr->offset, payload_length, hdr->datasn);
return iscsit_dump_data_payload(conn, payload_length, 1);
}
static int iscsit_dataout_check_unsolicited_sequence(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
u32 first_burst_len;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if ((hdr->offset < cmd->seq_start_offset) ||
((hdr->offset + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Unsolicited Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
hdr->offset, payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
return DATAOUT_CANNOT_RECOVER;
}
first_burst_len = (cmd->first_burst_len + payload_length);
if (first_burst_len > conn->sess->sess_ops->FirstBurstLength) {
pr_err("Total %u bytes exceeds FirstBurstLength: %u"
" for this Unsolicited DataOut Burst.\n",
first_burst_len, conn->sess->sess_ops->FirstBurstLength);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
}
/*
* Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
* checks for the current Unsolicited DataOUT Sequence.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
/*
* Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
* sequence checks are handled in
* iscsit_dataout_datapduinorder_no_fbit().
*/
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
if ((first_burst_len != cmd->data_length) &&
(first_burst_len != conn->sess->sess_ops->FirstBurstLength)) {
pr_err("Unsolicited non-immediate data"
" received %u does not equal FirstBurstLength: %u, and"
" does not equal ExpXferLen %u.\n", first_burst_len,
conn->sess->sess_ops->FirstBurstLength,
cmd->data_length);
transport_send_check_condition_and_sense(&cmd->se_cmd,
TCM_INCORRECT_AMOUNT_OF_DATA, 0);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (first_burst_len == conn->sess->sess_ops->FirstBurstLength) {
pr_err("Command ITT: 0x%08x reached"
" FirstBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
" error.\n", cmd->init_task_tag,
conn->sess->sess_ops->FirstBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
if (first_burst_len == cmd->data_length) {
pr_err("Command ITT: 0x%08x reached"
" ExpXferLen: %u, but ISCSI_FLAG_CMD_FINAL is not set. protocol"
" error.\n", cmd->init_task_tag, cmd->data_length);
return DATAOUT_CANNOT_RECOVER;
}
}
out:
return DATAOUT_NORMAL;
}
static int iscsit_dataout_check_sequence(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
u32 next_burst_len;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* For DataSequenceInOrder=Yes: Check that the offset and offset+length
* is within range as defined by iscsi_set_dataout_sequence_values().
*
* For DataSequenceInOrder=No: Check that an struct iscsi_seq exists for
* offset+length tuple.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
/*
* Due to possibility of recovery DataOUT sent by the initiator
* fullfilling an Recovery R2T, it's best to just dump the
* payload here, instead of erroring out.
*/
if ((hdr->offset < cmd->seq_start_offset) ||
((hdr->offset + payload_length) > cmd->seq_end_offset)) {
pr_err("Command ITT: 0x%08x with Offset: %u,"
" Length: %u outside of Sequence %u:%u while"
" DataSequenceInOrder=Yes.\n", cmd->init_task_tag,
hdr->offset, payload_length, cmd->seq_start_offset,
cmd->seq_end_offset);
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
next_burst_len = (cmd->next_burst_len + payload_length);
} else {
seq = iscsit_get_seq_holder(cmd, hdr->offset, payload_length);
if (!seq)
return DATAOUT_CANNOT_RECOVER;
/*
* Set the struct iscsi_seq pointer to reuse later.
*/
cmd->seq_ptr = seq;
if (seq->status == DATAOUT_SEQUENCE_COMPLETE) {
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return DATAOUT_WITHIN_COMMAND_RECOVERY;
}
next_burst_len = (seq->next_burst_len + payload_length);
}
if (next_burst_len > conn->sess->sess_ops->MaxBurstLength) {
pr_err("Command ITT: 0x%08x, NextBurstLength: %u and"
" Length: %u exceeds MaxBurstLength: %u. protocol"
" error.\n", cmd->init_task_tag,
(next_burst_len - payload_length),
payload_length, conn->sess->sess_ops->MaxBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
/*
* Perform various MaxBurstLength and ISCSI_FLAG_CMD_FINAL sanity
* checks for the current DataOUT Sequence.
*/
if (hdr->flags & ISCSI_FLAG_CMD_FINAL) {
/*
* Ignore ISCSI_FLAG_CMD_FINAL checks while DataPDUInOrder=No, end of
* sequence checks are handled in
* iscsit_dataout_datapduinorder_no_fbit().
*/
if (!conn->sess->sess_ops->DataPDUInOrder)
goto out;
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((next_burst_len <
conn->sess->sess_ops->MaxBurstLength) &&
((cmd->write_data_done + payload_length) <
cmd->data_length)) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (next_burst_len < seq->xfer_len) {
pr_err("Command ITT: 0x%08x set ISCSI_FLAG_CMD_FINAL"
" before end of DataOUT sequence, protocol"
" error.\n", cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
}
} else {
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (next_burst_len ==
conn->sess->sess_ops->MaxBurstLength) {
pr_err("Command ITT: 0x%08x reached"
" MaxBurstLength: %u, but ISCSI_FLAG_CMD_FINAL is"
" not set, protocol error.", cmd->init_task_tag,
conn->sess->sess_ops->MaxBurstLength);
return DATAOUT_CANNOT_RECOVER;
}
if ((cmd->write_data_done + payload_length) ==
cmd->data_length) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
} else {
if (next_burst_len == seq->xfer_len) {
pr_err("Command ITT: 0x%08x reached"
" last DataOUT PDU in sequence but ISCSI_FLAG_"
"CMD_FINAL is not set, protocol error.\n",
cmd->init_task_tag);
return DATAOUT_CANNOT_RECOVER;
}
}
}
out:
return DATAOUT_NORMAL;
}
static int iscsit_dataout_check_datasn(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
int dump = 0, recovery = 0;
u32 data_sn = 0;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* Considering the target has no method of re-requesting DataOUT
* by DataSN, if we receieve a greater DataSN than expected we
* assume the functions for DataPDUInOrder=[Yes,No] below will
* handle it.
*
* If the DataSN is less than expected, dump the payload.
*/
if (conn->sess->sess_ops->DataSequenceInOrder)
data_sn = cmd->data_sn;
else {
struct iscsi_seq *seq = cmd->seq_ptr;
data_sn = seq->data_sn;
}
if (hdr->datasn > data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" higher than expected 0x%08x.\n", cmd->init_task_tag,
hdr->datasn, data_sn);
recovery = 1;
goto recover;
} else if (hdr->datasn < data_sn) {
pr_err("Command ITT: 0x%08x, received DataSN: 0x%08x"
" lower than expected 0x%08x, discarding payload.\n",
cmd->init_task_tag, hdr->datasn, data_sn);
dump = 1;
goto dump;
}
return DATAOUT_NORMAL;
recover:
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to perform within-command recovery"
" while ERL=0.\n");
return DATAOUT_CANNOT_RECOVER;
}
dump:
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return (recovery || dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY :
DATAOUT_NORMAL;
}
static int iscsit_dataout_pre_datapduinorder_yes(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
int dump = 0, recovery = 0;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
/*
* For DataSequenceInOrder=Yes: If the offset is greater than the global
* DataPDUInOrder=Yes offset counter in struct iscsi_cmd a protcol error has
* occured and fail the connection.
*
* For DataSequenceInOrder=No: If the offset is greater than the per
* sequence DataPDUInOrder=Yes offset counter in struct iscsi_seq a protocol
* error has occured and fail the connection.
*/
if (conn->sess->sess_ops->DataSequenceInOrder) {
if (hdr->offset != cmd->write_data_done) {
pr_err("Command ITT: 0x%08x, received offset"
" %u different than expected %u.\n", cmd->init_task_tag,
hdr->offset, cmd->write_data_done);
recovery = 1;
goto recover;
}
} else {
struct iscsi_seq *seq = cmd->seq_ptr;
if (hdr->offset > seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u greater than expected %u.\n", cmd->init_task_tag,
hdr->offset, seq->offset);
recovery = 1;
goto recover;
} else if (hdr->offset < seq->offset) {
pr_err("Command ITT: 0x%08x, received offset"
" %u less than expected %u, discarding payload.\n",
cmd->init_task_tag, hdr->offset, seq->offset);
dump = 1;
goto dump;
}
}
return DATAOUT_NORMAL;
recover:
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to perform within-command recovery"
" while ERL=0.\n");
return DATAOUT_CANNOT_RECOVER;
}
dump:
if (iscsit_dump_data_payload(conn, payload_length, 1) < 0)
return DATAOUT_CANNOT_RECOVER;
return (recovery) ? iscsit_recover_dataout_sequence(cmd,
hdr->offset, payload_length) :
(dump) ? DATAOUT_WITHIN_COMMAND_RECOVERY : DATAOUT_NORMAL;
}
static int iscsit_dataout_pre_datapduinorder_no(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
struct iscsi_pdu *pdu;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
pdu = iscsit_get_pdu_holder(cmd, hdr->offset, payload_length);
if (!pdu)
return DATAOUT_CANNOT_RECOVER;
cmd->pdu_ptr = pdu;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
case ISCSI_PDU_CRC_FAILED:
case ISCSI_PDU_TIMED_OUT:
break;
case ISCSI_PDU_RECEIVED_OK:
pr_err("Command ITT: 0x%08x received already gotten"
" Offset: %u, Length: %u\n", cmd->init_task_tag,
hdr->offset, payload_length);
return iscsit_dump_data_payload(cmd->conn, payload_length, 1);
default:
return DATAOUT_CANNOT_RECOVER;
}
return DATAOUT_NORMAL;
}
static int iscsit_dataout_update_r2t(struct iscsi_cmd *cmd, u32 offset, u32 length)
{
struct iscsi_r2t *r2t;
if (cmd->unsolicited_data)
return 0;
r2t = iscsit_get_r2t_for_eos(cmd, offset, length);
if (!r2t)
return -1;
spin_lock_bh(&cmd->r2t_lock);
r2t->seq_complete = 1;
cmd->outstanding_r2ts--;
spin_unlock_bh(&cmd->r2t_lock);
return 0;
}
static int iscsit_dataout_update_datapduinorder_no(
struct iscsi_cmd *cmd,
u32 data_sn,
int f_bit)
{
int ret = 0;
struct iscsi_pdu *pdu = cmd->pdu_ptr;
pdu->data_sn = data_sn;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
case ISCSI_PDU_CRC_FAILED:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
case ISCSI_PDU_TIMED_OUT:
pdu->status = ISCSI_PDU_RECEIVED_OK;
break;
default:
return DATAOUT_CANNOT_RECOVER;
}
if (f_bit) {
ret = iscsit_dataout_datapduinorder_no_fbit(cmd, pdu);
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
return DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_passed(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
int ret, send_r2t = 0;
struct iscsi_conn *conn = cmd->conn;
struct iscsi_seq *seq = NULL;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (cmd->unsolicited_data) {
if ((cmd->first_burst_len + payload_length) ==
conn->sess->sess_ops->FirstBurstLength) {
if (iscsit_dataout_update_r2t(cmd, hdr->offset,
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(cmd,
hdr->datasn, (hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
cmd->first_burst_len += payload_length;
if (conn->sess->sess_ops->DataSequenceInOrder)
cmd->data_sn++;
else {
seq = cmd->seq_ptr;
seq->data_sn++;
seq->offset += payload_length;
}
if (send_r2t) {
if (seq)
seq->status = DATAOUT_SEQUENCE_COMPLETE;
cmd->first_burst_len = 0;
cmd->unsolicited_data = 0;
}
} else {
if (conn->sess->sess_ops->DataSequenceInOrder) {
if ((cmd->next_burst_len + payload_length) ==
conn->sess->sess_ops->MaxBurstLength) {
if (iscsit_dataout_update_r2t(cmd, hdr->offset,
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
cmd, hdr->datasn,
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
cmd->next_burst_len += payload_length;
cmd->data_sn++;
if (send_r2t)
cmd->next_burst_len = 0;
} else {
seq = cmd->seq_ptr;
if ((seq->next_burst_len + payload_length) ==
seq->xfer_len) {
if (iscsit_dataout_update_r2t(cmd, hdr->offset,
payload_length) < 0)
return DATAOUT_CANNOT_RECOVER;
send_r2t = 1;
}
if (!conn->sess->sess_ops->DataPDUInOrder) {
ret = iscsit_dataout_update_datapduinorder_no(
cmd, hdr->datasn,
(hdr->flags & ISCSI_FLAG_CMD_FINAL));
if (ret == DATAOUT_CANNOT_RECOVER)
return ret;
}
seq->data_sn++;
seq->offset += payload_length;
seq->next_burst_len += payload_length;
if (send_r2t) {
seq->next_burst_len = 0;
seq->status = DATAOUT_SEQUENCE_COMPLETE;
}
}
}
if (send_r2t && conn->sess->sess_ops->DataSequenceInOrder)
cmd->data_sn = 0;
cmd->write_data_done += payload_length;
return (cmd->write_data_done == cmd->data_length) ?
DATAOUT_SEND_TO_TRANSPORT : (send_r2t) ?
DATAOUT_SEND_R2T : DATAOUT_NORMAL;
}
static int iscsit_dataout_post_crc_failed(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
struct iscsi_conn *conn = cmd->conn;
struct iscsi_pdu *pdu;
struct iscsi_data *hdr = (struct iscsi_data *) buf;
u32 payload_length = ntoh24(hdr->dlength);
if (conn->sess->sess_ops->DataPDUInOrder)
goto recover;
/*
* The rest of this function is only called when DataPDUInOrder=No.
*/
pdu = cmd->pdu_ptr;
switch (pdu->status) {
case ISCSI_PDU_NOT_RECEIVED:
pdu->status = ISCSI_PDU_CRC_FAILED;
break;
case ISCSI_PDU_CRC_FAILED:
break;
case ISCSI_PDU_TIMED_OUT:
pdu->status = ISCSI_PDU_CRC_FAILED;
break;
default:
return DATAOUT_CANNOT_RECOVER;
}
recover:
return iscsit_recover_dataout_sequence(cmd, hdr->offset, payload_length);
}
/*
* Called from iscsit_handle_data_out() before DataOUT Payload is received
* and CRC computed.
*/
extern int iscsit_check_pre_dataout(
struct iscsi_cmd *cmd,
unsigned char *buf)
{
int ret;
struct iscsi_conn *conn = cmd->conn;
ret = iscsit_dataout_within_command_recovery_check(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
ret = iscsit_dataout_check_datasn(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
if (cmd->unsolicited_data) {
ret = iscsit_dataout_check_unsolicited_sequence(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
} else {
ret = iscsit_dataout_check_sequence(cmd, buf);
if ((ret == DATAOUT_WITHIN_COMMAND_RECOVERY) ||
(ret == DATAOUT_CANNOT_RECOVER))
return ret;
}
return (conn->sess->sess_ops->DataPDUInOrder) ?
iscsit_dataout_pre_datapduinorder_yes(cmd, buf) :
iscsit_dataout_pre_datapduinorder_no(cmd, buf);
}
/*
* Called from iscsit_handle_data_out() after DataOUT Payload is received
* and CRC computed.
*/
int iscsit_check_post_dataout(
struct iscsi_cmd *cmd,
unsigned char *buf,
u8 data_crc_failed)
{
struct iscsi_conn *conn = cmd->conn;
cmd->dataout_timeout_retries = 0;
if (!data_crc_failed)
return iscsit_dataout_post_crc_passed(cmd, buf);
else {
if (!conn->sess->sess_ops->ErrorRecoveryLevel) {
pr_err("Unable to recover from DataOUT CRC"
" failure while ERL=0, closing session.\n");
iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
1, 0, buf, cmd);
return DATAOUT_CANNOT_RECOVER;
}
iscsit_add_reject_from_cmd(ISCSI_REASON_DATA_DIGEST_ERROR,
0, 0, buf, cmd);
return iscsit_dataout_post_crc_failed(cmd, buf);
}
}
static void iscsit_handle_time2retain_timeout(unsigned long data)
{
struct iscsi_session *sess = (struct iscsi_session *) data;
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
spin_lock_bh(&se_tpg->session_lock);
if (sess->time2retain_timer_flags & ISCSI_TF_STOP) {
spin_unlock_bh(&se_tpg->session_lock);
return;
}
if (atomic_read(&sess->session_reinstatement)) {
pr_err("Exiting Time2Retain handler because"
" session_reinstatement=1\n");
spin_unlock_bh(&se_tpg->session_lock);
return;
}
sess->time2retain_timer_flags |= ISCSI_TF_EXPIRED;
pr_err("Time2Retain timer expired for SID: %u, cleaning up"
" iSCSI session.\n", sess->sid);
{
struct iscsi_tiqn *tiqn = tpg->tpg_tiqn;
if (tiqn) {
spin_lock(&tiqn->sess_err_stats.lock);
strcpy(tiqn->sess_err_stats.last_sess_fail_rem_name,
(void *)sess->sess_ops->InitiatorName);
tiqn->sess_err_stats.last_sess_failure_type =
ISCSI_SESS_ERR_CXN_TIMEOUT;
tiqn->sess_err_stats.cxn_timeout_errors++;
sess->conn_timeout_errors++;
spin_unlock(&tiqn->sess_err_stats.lock);
}
}
spin_unlock_bh(&se_tpg->session_lock);
target_put_session(sess->se_sess);
}
extern void iscsit_start_time2retain_handler(struct iscsi_session *sess)
{
int tpg_active;
/*
* Only start Time2Retain timer when the assoicated TPG is still in
* an ACTIVE (eg: not disabled or shutdown) state.
*/
spin_lock(&ISCSI_TPG_S(sess)->tpg_state_lock);
tpg_active = (ISCSI_TPG_S(sess)->tpg_state == TPG_STATE_ACTIVE);
spin_unlock(&ISCSI_TPG_S(sess)->tpg_state_lock);
if (!tpg_active)
return;
if (sess->time2retain_timer_flags & ISCSI_TF_RUNNING)
return;
pr_debug("Starting Time2Retain timer for %u seconds on"
" SID: %u\n", sess->sess_ops->DefaultTime2Retain, sess->sid);
init_timer(&sess->time2retain_timer);
sess->time2retain_timer.expires =
(get_jiffies_64() + sess->sess_ops->DefaultTime2Retain * HZ);
sess->time2retain_timer.data = (unsigned long)sess;
sess->time2retain_timer.function = iscsit_handle_time2retain_timeout;
sess->time2retain_timer_flags &= ~ISCSI_TF_STOP;
sess->time2retain_timer_flags |= ISCSI_TF_RUNNING;
add_timer(&sess->time2retain_timer);
}
/*
* Called with spin_lock_bh(&struct se_portal_group->session_lock) held
*/
extern int iscsit_stop_time2retain_timer(struct iscsi_session *sess)
{
struct iscsi_portal_group *tpg = ISCSI_TPG_S(sess);
struct se_portal_group *se_tpg = &tpg->tpg_se_tpg;
if (sess->time2retain_timer_flags & ISCSI_TF_EXPIRED)
return -1;
if (!(sess->time2retain_timer_flags & ISCSI_TF_RUNNING))
return 0;
sess->time2retain_timer_flags |= ISCSI_TF_STOP;
spin_unlock_bh(&se_tpg->session_lock);
del_timer_sync(&sess->time2retain_timer);
spin_lock_bh(&se_tpg->session_lock);
sess->time2retain_timer_flags &= ~ISCSI_TF_RUNNING;
pr_debug("Stopped Time2Retain Timer for SID: %u\n",
sess->sid);
return 0;
}
void iscsit_connection_reinstatement_rcfr(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
goto sleep;
}
if (atomic_read(&conn->transport_failed)) {
spin_unlock_bh(&conn->state_lock);
goto sleep;
}
spin_unlock_bh(&conn->state_lock);
iscsi_thread_set_force_reinstatement(conn);
sleep:
wait_for_completion(&conn->conn_wait_rcfr_comp);
complete(&conn->conn_post_wait_comp);
}
void iscsit_cause_connection_reinstatement(struct iscsi_conn *conn, int sleep)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (atomic_read(&conn->transport_failed)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (atomic_read(&conn->connection_reinstatement)) {
spin_unlock_bh(&conn->state_lock);
return;
}
if (iscsi_thread_set_force_reinstatement(conn) < 0) {
spin_unlock_bh(&conn->state_lock);
return;
}
atomic_set(&conn->connection_reinstatement, 1);
if (!sleep) {
spin_unlock_bh(&conn->state_lock);
return;
}
atomic_set(&conn->sleep_on_conn_wait_comp, 1);
spin_unlock_bh(&conn->state_lock);
wait_for_completion(&conn->conn_wait_comp);
complete(&conn->conn_post_wait_comp);
}
void iscsit_fall_back_to_erl0(struct iscsi_session *sess)
{
pr_debug("Falling back to ErrorRecoveryLevel=0 for SID:"
" %u\n", sess->sid);
atomic_set(&sess->session_fall_back_to_erl0, 1);
}
static void iscsit_handle_connection_cleanup(struct iscsi_conn *conn)
{
struct iscsi_session *sess = conn->sess;
if ((sess->sess_ops->ErrorRecoveryLevel == 2) &&
!atomic_read(&sess->session_reinstatement) &&
!atomic_read(&sess->session_fall_back_to_erl0))
iscsit_connection_recovery_transport_reset(conn);
else {
pr_debug("Performing cleanup for failed iSCSI"
" Connection ID: %hu from %s\n", conn->cid,
sess->sess_ops->InitiatorName);
iscsit_close_connection(conn);
}
}
extern void iscsit_take_action_for_connection_exit(struct iscsi_conn *conn)
{
spin_lock_bh(&conn->state_lock);
if (atomic_read(&conn->connection_exit)) {
spin_unlock_bh(&conn->state_lock);
return;
}
atomic_set(&conn->connection_exit, 1);
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT) {
spin_unlock_bh(&conn->state_lock);
iscsit_close_connection(conn);
return;
}
if (conn->conn_state == TARG_CONN_STATE_CLEANUP_WAIT) {
spin_unlock_bh(&conn->state_lock);
return;
}
pr_debug("Moving to TARG_CONN_STATE_CLEANUP_WAIT.\n");
conn->conn_state = TARG_CONN_STATE_CLEANUP_WAIT;
spin_unlock_bh(&conn->state_lock);
iscsit_handle_connection_cleanup(conn);
}
/*
* This is the simple function that makes the magic of
* sync and steering happen in the follow paradoxical order:
*
* 0) Receive conn->of_marker (bytes left until next OFMarker)
* bytes into an offload buffer. When we pass the exact number
* of bytes in conn->of_marker, iscsit_dump_data_payload() and hence
* rx_data() will automatically receive the identical u32 marker
* values and store it in conn->of_marker_offset;
* 1) Now conn->of_marker_offset will contain the offset to the start
* of the next iSCSI PDU. Dump these remaining bytes into another
* offload buffer.
* 2) We are done!
* Next byte in the TCP stream will contain the next iSCSI PDU!
* Cool Huh?!
*/
int iscsit_recover_from_unknown_opcode(struct iscsi_conn *conn)
{
/*
* Make sure the remaining bytes to next maker is a sane value.
*/
if (conn->of_marker > (conn->conn_ops->OFMarkInt * 4)) {
pr_err("Remaining bytes to OFMarker: %u exceeds"
" OFMarkInt bytes: %u.\n", conn->of_marker,
conn->conn_ops->OFMarkInt * 4);
return -1;
}
pr_debug("Advancing %u bytes in TCP stream to get to the"
" next OFMarker.\n", conn->of_marker);
if (iscsit_dump_data_payload(conn, conn->of_marker, 0) < 0)
return -1;
/*
* Make sure the offset marker we retrived is a valid value.
*/
if (conn->of_marker_offset > (ISCSI_HDR_LEN + (ISCSI_CRC_LEN * 2) +
conn->conn_ops->MaxRecvDataSegmentLength)) {
pr_err("OfMarker offset value: %u exceeds limit.\n",
conn->of_marker_offset);
return -1;
}
pr_debug("Discarding %u bytes of TCP stream to get to the"
" next iSCSI Opcode.\n", conn->of_marker_offset);
if (iscsit_dump_data_payload(conn, conn->of_marker_offset, 0) < 0)
return -1;
return 0;
}
| gpl-2.0 |
lozohcum/kernel | arch/arm/plat-samsung/clock.c | 4649 | 9802 | /* linux/arch/arm/plat-s3c24xx/clock.c
*
* Copyright 2004-2005 Simtec Electronics
* Ben Dooks <ben@simtec.co.uk>
*
* S3C24XX Core clock control support
*
* Based on, and code from linux/arch/arm/mach-versatile/clock.c
**
** Copyright (C) 2004 ARM Limited.
** Written by Deep Blue Solutions Limited.
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/list.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/platform_device.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/ioport.h>
#include <linux/clk.h>
#include <linux/spinlock.h>
#include <linux/io.h>
#if defined(CONFIG_DEBUG_FS)
#include <linux/debugfs.h>
#endif
#include <mach/hardware.h>
#include <asm/irq.h>
#include <plat/cpu-freq.h>
#include <plat/clock.h>
#include <plat/cpu.h>
#include <linux/serial_core.h>
#include <plat/regs-serial.h> /* for s3c24xx_uart_devs */
/* clock information */
static LIST_HEAD(clocks);
/* We originally used an mutex here, but some contexts (see resume)
* are calling functions such as clk_set_parent() with IRQs disabled
* causing an BUG to be triggered.
*/
DEFINE_SPINLOCK(clocks_lock);
/* Global watchdog clock used by arch_wtd_reset() callback */
struct clk *s3c2410_wdtclk;
static int __init s3c_wdt_reset_init(void)
{
s3c2410_wdtclk = clk_get(NULL, "watchdog");
if (IS_ERR(s3c2410_wdtclk))
printk(KERN_WARNING "%s: warning: cannot get watchdog clock\n", __func__);
return 0;
}
arch_initcall(s3c_wdt_reset_init);
/* enable and disable calls for use with the clk struct */
static int clk_null_enable(struct clk *clk, int enable)
{
return 0;
}
int clk_enable(struct clk *clk)
{
unsigned long flags;
if (IS_ERR(clk) || clk == NULL)
return -EINVAL;
clk_enable(clk->parent);
spin_lock_irqsave(&clocks_lock, flags);
if ((clk->usage++) == 0)
(clk->enable)(clk, 1);
spin_unlock_irqrestore(&clocks_lock, flags);
return 0;
}
void clk_disable(struct clk *clk)
{
unsigned long flags;
if (IS_ERR(clk) || clk == NULL)
return;
spin_lock_irqsave(&clocks_lock, flags);
if ((--clk->usage) == 0)
(clk->enable)(clk, 0);
spin_unlock_irqrestore(&clocks_lock, flags);
clk_disable(clk->parent);
}
unsigned long clk_get_rate(struct clk *clk)
{
if (IS_ERR(clk))
return 0;
if (clk->rate != 0)
return clk->rate;
if (clk->ops != NULL && clk->ops->get_rate != NULL)
return (clk->ops->get_rate)(clk);
if (clk->parent != NULL)
return clk_get_rate(clk->parent);
return clk->rate;
}
long clk_round_rate(struct clk *clk, unsigned long rate)
{
if (!IS_ERR(clk) && clk->ops && clk->ops->round_rate)
return (clk->ops->round_rate)(clk, rate);
return rate;
}
int clk_set_rate(struct clk *clk, unsigned long rate)
{
int ret;
if (IS_ERR(clk))
return -EINVAL;
/* We do not default just do a clk->rate = rate as
* the clock may have been made this way by choice.
*/
WARN_ON(clk->ops == NULL);
WARN_ON(clk->ops && clk->ops->set_rate == NULL);
if (clk->ops == NULL || clk->ops->set_rate == NULL)
return -EINVAL;
spin_lock(&clocks_lock);
ret = (clk->ops->set_rate)(clk, rate);
spin_unlock(&clocks_lock);
return ret;
}
struct clk *clk_get_parent(struct clk *clk)
{
return clk->parent;
}
int clk_set_parent(struct clk *clk, struct clk *parent)
{
int ret = 0;
if (IS_ERR(clk))
return -EINVAL;
spin_lock(&clocks_lock);
if (clk->ops && clk->ops->set_parent)
ret = (clk->ops->set_parent)(clk, parent);
spin_unlock(&clocks_lock);
return ret;
}
EXPORT_SYMBOL(clk_enable);
EXPORT_SYMBOL(clk_disable);
EXPORT_SYMBOL(clk_get_rate);
EXPORT_SYMBOL(clk_round_rate);
EXPORT_SYMBOL(clk_set_rate);
EXPORT_SYMBOL(clk_get_parent);
EXPORT_SYMBOL(clk_set_parent);
/* base clocks */
int clk_default_setrate(struct clk *clk, unsigned long rate)
{
clk->rate = rate;
return 0;
}
struct clk_ops clk_ops_def_setrate = {
.set_rate = clk_default_setrate,
};
struct clk clk_xtal = {
.name = "xtal",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
};
struct clk clk_ext = {
.name = "ext",
};
struct clk clk_epll = {
.name = "epll",
};
struct clk clk_mpll = {
.name = "mpll",
.ops = &clk_ops_def_setrate,
};
struct clk clk_upll = {
.name = "upll",
.parent = NULL,
.ctrlbit = 0,
};
struct clk clk_f = {
.name = "fclk",
.rate = 0,
.parent = &clk_mpll,
.ctrlbit = 0,
};
struct clk clk_h = {
.name = "hclk",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
.ops = &clk_ops_def_setrate,
};
struct clk clk_p = {
.name = "pclk",
.rate = 0,
.parent = NULL,
.ctrlbit = 0,
.ops = &clk_ops_def_setrate,
};
struct clk clk_usb_bus = {
.name = "usb-bus",
.rate = 0,
.parent = &clk_upll,
};
struct clk s3c24xx_uclk = {
.name = "uclk",
};
/* initialise the clock system */
/**
* s3c24xx_register_clock() - register a clock
* @clk: The clock to register
*
* Add the specified clock to the list of clocks known by the system.
*/
int s3c24xx_register_clock(struct clk *clk)
{
if (clk->enable == NULL)
clk->enable = clk_null_enable;
/* fill up the clk_lookup structure and register it*/
clk->lookup.dev_id = clk->devname;
clk->lookup.con_id = clk->name;
clk->lookup.clk = clk;
clkdev_add(&clk->lookup);
return 0;
}
/**
* s3c24xx_register_clocks() - register an array of clock pointers
* @clks: Pointer to an array of struct clk pointers
* @nr_clks: The number of clocks in the @clks array.
*
* Call s3c24xx_register_clock() for all the clock pointers contained
* in the @clks list. Returns the number of failures.
*/
int s3c24xx_register_clocks(struct clk **clks, int nr_clks)
{
int fails = 0;
for (; nr_clks > 0; nr_clks--, clks++) {
if (s3c24xx_register_clock(*clks) < 0) {
struct clk *clk = *clks;
printk(KERN_ERR "%s: failed to register %p: %s\n",
__func__, clk, clk->name);
fails++;
}
}
return fails;
}
/**
* s3c_register_clocks() - register an array of clocks
* @clkp: Pointer to the first clock in the array.
* @nr_clks: Number of clocks to register.
*
* Call s3c24xx_register_clock() on the @clkp array given, printing an
* error if it fails to register the clock (unlikely).
*/
void __init s3c_register_clocks(struct clk *clkp, int nr_clks)
{
int ret;
for (; nr_clks > 0; nr_clks--, clkp++) {
ret = s3c24xx_register_clock(clkp);
if (ret < 0) {
printk(KERN_ERR "Failed to register clock %s (%d)\n",
clkp->name, ret);
}
}
}
/**
* s3c_disable_clocks() - disable an array of clocks
* @clkp: Pointer to the first clock in the array.
* @nr_clks: Number of clocks to register.
*
* for internal use only at initialisation time. disable the clocks in the
* @clkp array.
*/
void __init s3c_disable_clocks(struct clk *clkp, int nr_clks)
{
for (; nr_clks > 0; nr_clks--, clkp++)
(clkp->enable)(clkp, 0);
}
/* initialise all the clocks */
int __init s3c24xx_register_baseclocks(unsigned long xtal)
{
printk(KERN_INFO "S3C24XX Clocks, Copyright 2004 Simtec Electronics\n");
clk_xtal.rate = xtal;
/* register our clocks */
if (s3c24xx_register_clock(&clk_xtal) < 0)
printk(KERN_ERR "failed to register master xtal\n");
if (s3c24xx_register_clock(&clk_mpll) < 0)
printk(KERN_ERR "failed to register mpll clock\n");
if (s3c24xx_register_clock(&clk_upll) < 0)
printk(KERN_ERR "failed to register upll clock\n");
if (s3c24xx_register_clock(&clk_f) < 0)
printk(KERN_ERR "failed to register cpu fclk\n");
if (s3c24xx_register_clock(&clk_h) < 0)
printk(KERN_ERR "failed to register cpu hclk\n");
if (s3c24xx_register_clock(&clk_p) < 0)
printk(KERN_ERR "failed to register cpu pclk\n");
return 0;
}
#if defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS)
/* debugfs support to trace clock tree hierarchy and attributes */
static struct dentry *clk_debugfs_root;
static int clk_debugfs_register_one(struct clk *c)
{
int err;
struct dentry *d;
struct clk *pa = c->parent;
char s[255];
char *p = s;
p += sprintf(p, "%s", c->devname);
d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
if (!d)
return -ENOMEM;
c->dent = d;
d = debugfs_create_u8("usecount", S_IRUGO, c->dent, (u8 *)&c->usage);
if (!d) {
err = -ENOMEM;
goto err_out;
}
d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
if (!d) {
err = -ENOMEM;
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(c->dent);
return err;
}
static int clk_debugfs_register(struct clk *c)
{
int err;
struct clk *pa = c->parent;
if (pa && !pa->dent) {
err = clk_debugfs_register(pa);
if (err)
return err;
}
if (!c->dent) {
err = clk_debugfs_register_one(c);
if (err)
return err;
}
return 0;
}
static int __init clk_debugfs_init(void)
{
struct clk *c;
struct dentry *d;
int err;
d = debugfs_create_dir("clock", NULL);
if (!d)
return -ENOMEM;
clk_debugfs_root = d;
list_for_each_entry(c, &clocks, list) {
err = clk_debugfs_register(c);
if (err)
goto err_out;
}
return 0;
err_out:
debugfs_remove_recursive(clk_debugfs_root);
return err;
}
late_initcall(clk_debugfs_init);
#endif /* defined(CONFIG_PM_DEBUG) && defined(CONFIG_DEBUG_FS) */
| gpl-2.0 |
maxfu/android_kernel_armada_pxa1088 | arch/mips/kernel/cevt-sb1250.c | 4649 | 4400 | /*
* Copyright (C) 2000, 2001 Broadcom Corporation
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <asm/addrspace.h>
#include <asm/io.h>
#include <asm/time.h>
#include <asm/sibyte/sb1250.h>
#include <asm/sibyte/sb1250_regs.h>
#include <asm/sibyte/sb1250_int.h>
#include <asm/sibyte/sb1250_scd.h>
#define IMR_IP2_VAL K_INT_MAP_I0
#define IMR_IP3_VAL K_INT_MAP_I1
#define IMR_IP4_VAL K_INT_MAP_I2
/*
* The general purpose timer ticks at 1MHz independent if
* the rest of the system
*/
static void sibyte_set_mode(enum clock_event_mode mode,
struct clock_event_device *evt)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
switch (mode) {
case CLOCK_EVT_MODE_PERIODIC:
__raw_writeq(0, cfg);
__raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
cfg);
break;
case CLOCK_EVT_MODE_ONESHOT:
/* Stop the timer until we actually program a shot */
case CLOCK_EVT_MODE_SHUTDOWN:
__raw_writeq(0, cfg);
break;
case CLOCK_EVT_MODE_UNUSED: /* shuddup gcc */
case CLOCK_EVT_MODE_RESUME:
;
}
}
static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
{
unsigned int cpu = smp_processor_id();
void __iomem *cfg, *init;
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
__raw_writeq(0, cfg);
__raw_writeq(delta - 1, init);
__raw_writeq(M_SCD_TIMER_ENABLE, cfg);
return 0;
}
static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd = dev_id;
void __iomem *cfg;
unsigned long tmode;
if (cd->mode == CLOCK_EVT_MODE_PERIODIC)
tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
else
tmode = 0;
/* ACK interrupt */
cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
____raw_writeq(tmode, cfg);
cd->event_handler(cd);
return IRQ_HANDLED;
}
static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
static DEFINE_PER_CPU(struct irqaction, sibyte_hpt_irqaction);
static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
void __cpuinit sb1250_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
unsigned int irq = K_INT_TIMER_0 + cpu;
struct irqaction *action = &per_cpu(sibyte_hpt_irqaction, cpu);
struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
/* Only have 4 general purpose timers, and we use last one as hpt */
BUG_ON(cpu > 2);
sprintf(name, "sb1250-counter-%d", cpu);
cd->name = name;
cd->features = CLOCK_EVT_FEAT_PERIODIC |
CLOCK_EVT_FEAT_ONESHOT;
clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
cd->min_delta_ns = clockevent_delta2ns(2, cd);
cd->rating = 200;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = sibyte_next_event;
cd->set_mode = sibyte_set_mode;
clockevents_register_device(cd);
sb1250_mask_irq(cpu, irq);
/*
* Map the timer interrupt to IP[4] of this cpu
*/
__raw_writeq(IMR_IP4_VAL,
IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
(irq << 3)));
sb1250_unmask_irq(cpu, irq);
action->handler = sibyte_counter_handler;
action->flags = IRQF_PERCPU | IRQF_TIMER;
action->name = name;
action->dev_id = cd;
irq_set_affinity(irq, cpumask_of(cpu));
setup_irq(irq, action);
}
| gpl-2.0 |
shugaoye/goldfish | arch/arm/mach-orion5x/tsx09-common.c | 4905 | 3243 | /*
* QNAP TS-x09 Boards common functions
*
* Maintainers: Lennert Buytenhek <buytenh@marvell.com>
* Byron Bradley <byron.bbradley@gmail.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/kernel.h>
#include <linux/pci.h>
#include <linux/mv643xx_eth.h>
#include <linux/timex.h>
#include <linux/serial_reg.h>
#include "tsx09-common.h"
#include "common.h"
/*****************************************************************************
* QNAP TS-x09 specific power off method via UART1-attached PIC
****************************************************************************/
#define UART1_REG(x) (UART1_VIRT_BASE + ((UART_##x) << 2))
void qnap_tsx09_power_off(void)
{
/* 19200 baud divisor */
const unsigned divisor = ((orion5x_tclk + (8 * 19200)) / (16 * 19200));
pr_info("%s: triggering power-off...\n", __func__);
/* hijack uart1 and reset into sane state (19200,8n1) */
writel(0x83, UART1_REG(LCR));
writel(divisor & 0xff, UART1_REG(DLL));
writel((divisor >> 8) & 0xff, UART1_REG(DLM));
writel(0x03, UART1_REG(LCR));
writel(0x00, UART1_REG(IER));
writel(0x00, UART1_REG(FCR));
writel(0x00, UART1_REG(MCR));
/* send the power-off command 'A' to PIC */
writel('A', UART1_REG(TX));
}
/*****************************************************************************
* Ethernet
****************************************************************************/
struct mv643xx_eth_platform_data qnap_tsx09_eth_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static int __init qnap_tsx09_parse_hex_nibble(char n)
{
if (n >= '0' && n <= '9')
return n - '0';
if (n >= 'A' && n <= 'F')
return n - 'A' + 10;
if (n >= 'a' && n <= 'f')
return n - 'a' + 10;
return -1;
}
static int __init qnap_tsx09_parse_hex_byte(const char *b)
{
int hi;
int lo;
hi = qnap_tsx09_parse_hex_nibble(b[0]);
lo = qnap_tsx09_parse_hex_nibble(b[1]);
if (hi < 0 || lo < 0)
return -1;
return (hi << 4) | lo;
}
static int __init qnap_tsx09_check_mac_addr(const char *addr_str)
{
u_int8_t addr[6];
int i;
for (i = 0; i < 6; i++) {
int byte;
/*
* Enforce "xx:xx:xx:xx:xx:xx\n" format.
*/
if (addr_str[(i * 3) + 2] != ((i < 5) ? ':' : '\n'))
return -1;
byte = qnap_tsx09_parse_hex_byte(addr_str + (i * 3));
if (byte < 0)
return -1;
addr[i] = byte;
}
printk(KERN_INFO "tsx09: found ethernet mac address ");
for (i = 0; i < 6; i++)
printk("%.2x%s", addr[i], (i < 5) ? ":" : ".\n");
memcpy(qnap_tsx09_eth_data.mac_addr, addr, 6);
return 0;
}
/*
* The 'NAS Config' flash partition has an ext2 filesystem which
* contains a file that has the ethernet MAC address in plain text
* (format "xx:xx:xx:xx:xx:xx\n").
*/
void __init qnap_tsx09_find_mac_addr(u32 mem_base, u32 size)
{
unsigned long addr;
for (addr = mem_base; addr < (mem_base + size); addr += 1024) {
char *nor_page;
int ret = 0;
nor_page = ioremap(addr, 1024);
if (nor_page != NULL) {
ret = qnap_tsx09_check_mac_addr(nor_page);
iounmap(nor_page);
}
if (ret == 0)
break;
}
}
| gpl-2.0 |
KylinUI/android_kernel_motorola_msm8960dt-common | drivers/media/radio/dsbr100.c | 4905 | 16552 | /* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21.
The device plugs into both the USB and an analog audio input, so this thing
only deals with initialisation and frequency setting, the
audio data has to be handled by a sound driver.
Major issue: I can't find out where the device reports the signal
strength, and indeed the windows software appearantly just looks
at the stereo indicator as well. So, scanning will only find
stereo stations. Sad, but I can't help it.
Also, the windows program sends oodles of messages over to the
device, and I couldn't figure out their meaning. My suspicion
is that they don't have any:-)
You might find some interesting stuff about this module at
http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
History:
Version 0.46:
Removed usb_dsbr100_open/close calls and radio->users counter. Also,
radio->muted changed to radio->status and suspend/resume calls updated.
Version 0.45:
Converted to v4l2_device.
Version 0.44:
Add suspend/resume functions, fix unplug of device,
a lot of cleanups and fixes by Alexey Klimov <klimov.linux@gmail.com>
Version 0.43:
Oliver Neukum: avoided DMA coherency issue
Version 0.42:
Converted dsbr100 to use video_ioctl2
by Douglas Landgraf <dougsland@gmail.com>
Version 0.41-ac1:
Alan Cox: Some cleanups and fixes
Version 0.41:
Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
Version 0.40:
Markus: Updates for 2.6.x kernels, code layout changes, name sanitizing
Version 0.30:
Markus: Updates for 2.5.x kernel and more ISO compliant source
Version 0.25:
PSL and Markus: Cleanup, radio now doesn't stop on device close
Version 0.24:
Markus: Hope I got these silly VIDEO_TUNER_LOW issues finally
right. Some minor cleanup, improved standalone compilation
Version 0.23:
Markus: Sign extension bug fixed by declaring transfer_buffer unsigned
Version 0.22:
Markus: Some (brown bag) cleanup in what VIDIOCSTUNER returns,
thanks to Mike Cox for pointing the problem out.
Version 0.21:
Markus: Minor cleanup, warnings if something goes wrong, lame attempt
to adhere to Documentation/CodingStyle
Version 0.2:
Brad Hards <bradh@dynamite.com.au>: Fixes to make it work as non-module
Markus: Copyright clarification
Version 0.01: Markus: initial release
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
/*
* Version Information
*/
#define DRIVER_VERSION "0.4.7"
#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
#define DSB100_VENDOR 0x04b4
#define DSB100_PRODUCT 0x1002
/* Commands the device appears to understand */
#define DSB100_TUNE 1
#define DSB100_ONOFF 2
#define TB_LEN 16
/* Frequency limits in MHz -- these are European values. For Japanese
devices, that would be 76 and 91. */
#define FREQ_MIN 87.5
#define FREQ_MAX 108.0
#define FREQ_MUL 16000
/* defines for radio->status */
#define STARTED 0
#define STOPPED 1
#define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev)
static int usb_dsbr100_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void usb_dsbr100_disconnect(struct usb_interface *intf);
static int usb_dsbr100_suspend(struct usb_interface *intf,
pm_message_t message);
static int usb_dsbr100_resume(struct usb_interface *intf);
static int radio_nr = -1;
module_param(radio_nr, int, 0);
/* Data for one (physical) device */
struct dsbr100_device {
struct usb_device *usbdev;
struct video_device videodev;
struct v4l2_device v4l2_dev;
u8 *transfer_buffer;
struct mutex v4l2_lock;
int curfreq;
int stereo;
int status;
};
static struct usb_device_id usb_dsbr100_device_table [] = {
{ USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, usb_dsbr100_device_table);
/* USB subsystem interface */
static struct usb_driver usb_dsbr100_driver = {
.name = "dsbr100",
.probe = usb_dsbr100_probe,
.disconnect = usb_dsbr100_disconnect,
.id_table = usb_dsbr100_device_table,
.suspend = usb_dsbr100_suspend,
.resume = usb_dsbr100_resume,
.reset_resume = usb_dsbr100_resume,
.supports_autosuspend = 0,
};
/* Low-level device interface begins here */
/* switch on radio */
static int dsbr100_start(struct dsbr100_device *radio)
{
int retval;
int request;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0xC7, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x01, 0x00, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_ONOFF;
goto usb_control_msg_failed;
}
radio->status = STARTED;
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* switch off radio */
static int dsbr100_stop(struct dsbr100_device *radio)
{
int retval;
int request;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x16, 0x1C, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0x00, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_ONOFF;
goto usb_control_msg_failed;
}
radio->status = STOPPED;
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
static int dsbr100_setfreq(struct dsbr100_device *radio)
{
int retval;
int request;
int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_TUNE,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
(freq >> 8) & 0x00ff, freq & 0xff,
radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_TUNE;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x96, 0xB7, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
radio->stereo = -1;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* return the device status. This is, in effect, just whether it
sees a stereo signal or not. Pity. */
static void dsbr100_getstat(struct dsbr100_device *radio)
{
int retval;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00 , 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
radio->stereo = -1;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, USB_REQ_GET_STATUS);
} else {
radio->stereo = !(radio->transfer_buffer[0] & 0x01);
}
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
struct dsbr100_device *radio = video_drvdata(file);
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct dsbr100_device *radio = video_drvdata(file);
if (v->index > 0)
return -EINVAL;
dsbr100_getstat(radio);
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
v->capability = V4L2_TUNER_CAP_LOW;
if(radio->stereo)
v->audmode = V4L2_TUNER_MODE_STEREO;
else
v->audmode = V4L2_TUNER_MODE_MONO;
v->signal = 0xffff; /* We can't get the signal strength */
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
return v->index ? -EINVAL : 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
int retval;
radio->curfreq = f->frequency;
retval = dsbr100_setfreq(radio);
if (retval < 0)
dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
return 0;
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
switch (qc->id) {
case V4L2_CID_AUDIO_MUTE:
return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
}
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct dsbr100_device *radio = video_drvdata(file);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value = radio->status;
return 0;
}
return -EINVAL;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct dsbr100_device *radio = video_drvdata(file);
int retval;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
if (ctrl->value) {
retval = dsbr100_stop(radio);
if (retval < 0) {
dev_warn(&radio->usbdev->dev,
"Radio did not respond properly\n");
return -EBUSY;
}
} else {
retval = dsbr100_start(radio);
if (retval < 0) {
dev_warn(&radio->usbdev->dev,
"Radio did not respond properly\n");
return -EBUSY;
}
}
return 0;
}
return -EINVAL;
}
static int vidioc_g_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
if (a->index > 1)
return -EINVAL;
strcpy(a->name, "Radio");
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
static int vidioc_s_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
return a->index ? -EINVAL : 0;
}
/* USB subsystem interface begins here */
/*
* Handle unplugging of the device.
* We call video_unregister_device in any case.
* The last function called in this procedure is
* usb_dsbr100_video_device_release
*/
static void usb_dsbr100_disconnect(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->v4l2_lock);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->videodev);
v4l2_device_disconnect(&radio->v4l2_dev);
mutex_unlock(&radio->v4l2_lock);
v4l2_device_put(&radio->v4l2_dev);
}
/* Suspend device - stop device. */
static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
mutex_lock(&radio->v4l2_lock);
if (radio->status == STARTED) {
retval = dsbr100_stop(radio);
if (retval < 0)
dev_warn(&intf->dev, "dsbr100_stop failed\n");
/* After dsbr100_stop() status set to STOPPED.
* If we want driver to start radio on resume
* we set status equal to STARTED.
* On resume we will check status and run radio if needed.
*/
radio->status = STARTED;
}
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "going into suspend..\n");
return 0;
}
/* Resume device - start device. */
static int usb_dsbr100_resume(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
mutex_lock(&radio->v4l2_lock);
if (radio->status == STARTED) {
retval = dsbr100_start(radio);
if (retval < 0)
dev_warn(&intf->dev, "dsbr100_start failed\n");
}
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
/* free data structures */
static void usb_dsbr100_release(struct v4l2_device *v4l2_dev)
{
struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev);
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
}
/* File system interface */
static const struct v4l2_file_operations usb_dsbr100_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
};
/* check if the device is present and register with v4l and usb if it is */
static int usb_dsbr100_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct dsbr100_device *radio;
struct v4l2_device *v4l2_dev;
int retval;
radio = kzalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
if (!radio)
return -ENOMEM;
radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
if (!(radio->transfer_buffer)) {
kfree(radio);
return -ENOMEM;
}
v4l2_dev = &radio->v4l2_dev;
v4l2_dev->release = usb_dsbr100_release;
retval = v4l2_device_register(&intf->dev, v4l2_dev);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register v4l2_device\n");
kfree(radio->transfer_buffer);
kfree(radio);
return retval;
}
mutex_init(&radio->v4l2_lock);
strlcpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = v4l2_dev;
radio->videodev.fops = &usb_dsbr100_fops;
radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops;
radio->videodev.release = video_device_release_empty;
radio->videodev.lock = &radio->v4l2_lock;
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
radio->status = STOPPED;
video_set_drvdata(&radio->videodev, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register video device\n");
v4l2_device_unregister(v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
return -EIO;
}
usb_set_intfdata(intf, radio);
return 0;
}
module_usb_driver(usb_dsbr100_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| gpl-2.0 |
Split-Screen/android_kernel_htc_msm8974 | drivers/media/radio/dsbr100.c | 4905 | 16552 | /* A driver for the D-Link DSB-R100 USB radio and Gemtek USB Radio 21.
The device plugs into both the USB and an analog audio input, so this thing
only deals with initialisation and frequency setting, the
audio data has to be handled by a sound driver.
Major issue: I can't find out where the device reports the signal
strength, and indeed the windows software appearantly just looks
at the stereo indicator as well. So, scanning will only find
stereo stations. Sad, but I can't help it.
Also, the windows program sends oodles of messages over to the
device, and I couldn't figure out their meaning. My suspicion
is that they don't have any:-)
You might find some interesting stuff about this module at
http://unimut.fsk.uni-heidelberg.de/unimut/demi/dsbr
Copyright (c) 2000 Markus Demleitner <msdemlei@cl.uni-heidelberg.de>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
History:
Version 0.46:
Removed usb_dsbr100_open/close calls and radio->users counter. Also,
radio->muted changed to radio->status and suspend/resume calls updated.
Version 0.45:
Converted to v4l2_device.
Version 0.44:
Add suspend/resume functions, fix unplug of device,
a lot of cleanups and fixes by Alexey Klimov <klimov.linux@gmail.com>
Version 0.43:
Oliver Neukum: avoided DMA coherency issue
Version 0.42:
Converted dsbr100 to use video_ioctl2
by Douglas Landgraf <dougsland@gmail.com>
Version 0.41-ac1:
Alan Cox: Some cleanups and fixes
Version 0.41:
Converted to V4L2 API by Mauro Carvalho Chehab <mchehab@infradead.org>
Version 0.40:
Markus: Updates for 2.6.x kernels, code layout changes, name sanitizing
Version 0.30:
Markus: Updates for 2.5.x kernel and more ISO compliant source
Version 0.25:
PSL and Markus: Cleanup, radio now doesn't stop on device close
Version 0.24:
Markus: Hope I got these silly VIDEO_TUNER_LOW issues finally
right. Some minor cleanup, improved standalone compilation
Version 0.23:
Markus: Sign extension bug fixed by declaring transfer_buffer unsigned
Version 0.22:
Markus: Some (brown bag) cleanup in what VIDIOCSTUNER returns,
thanks to Mike Cox for pointing the problem out.
Version 0.21:
Markus: Minor cleanup, warnings if something goes wrong, lame attempt
to adhere to Documentation/CodingStyle
Version 0.2:
Brad Hards <bradh@dynamite.com.au>: Fixes to make it work as non-module
Markus: Copyright clarification
Version 0.01: Markus: initial release
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/slab.h>
#include <linux/input.h>
#include <linux/videodev2.h>
#include <media/v4l2-device.h>
#include <media/v4l2-ioctl.h>
#include <linux/usb.h>
/*
* Version Information
*/
#define DRIVER_VERSION "0.4.7"
#define DRIVER_AUTHOR "Markus Demleitner <msdemlei@tucana.harvard.edu>"
#define DRIVER_DESC "D-Link DSB-R100 USB FM radio driver"
#define DSB100_VENDOR 0x04b4
#define DSB100_PRODUCT 0x1002
/* Commands the device appears to understand */
#define DSB100_TUNE 1
#define DSB100_ONOFF 2
#define TB_LEN 16
/* Frequency limits in MHz -- these are European values. For Japanese
devices, that would be 76 and 91. */
#define FREQ_MIN 87.5
#define FREQ_MAX 108.0
#define FREQ_MUL 16000
/* defines for radio->status */
#define STARTED 0
#define STOPPED 1
#define v4l2_dev_to_radio(d) container_of(d, struct dsbr100_device, v4l2_dev)
static int usb_dsbr100_probe(struct usb_interface *intf,
const struct usb_device_id *id);
static void usb_dsbr100_disconnect(struct usb_interface *intf);
static int usb_dsbr100_suspend(struct usb_interface *intf,
pm_message_t message);
static int usb_dsbr100_resume(struct usb_interface *intf);
static int radio_nr = -1;
module_param(radio_nr, int, 0);
/* Data for one (physical) device */
struct dsbr100_device {
struct usb_device *usbdev;
struct video_device videodev;
struct v4l2_device v4l2_dev;
u8 *transfer_buffer;
struct mutex v4l2_lock;
int curfreq;
int stereo;
int status;
};
static struct usb_device_id usb_dsbr100_device_table [] = {
{ USB_DEVICE(DSB100_VENDOR, DSB100_PRODUCT) },
{ } /* Terminating entry */
};
MODULE_DEVICE_TABLE (usb, usb_dsbr100_device_table);
/* USB subsystem interface */
static struct usb_driver usb_dsbr100_driver = {
.name = "dsbr100",
.probe = usb_dsbr100_probe,
.disconnect = usb_dsbr100_disconnect,
.id_table = usb_dsbr100_device_table,
.suspend = usb_dsbr100_suspend,
.resume = usb_dsbr100_resume,
.reset_resume = usb_dsbr100_resume,
.supports_autosuspend = 0,
};
/* Low-level device interface begins here */
/* switch on radio */
static int dsbr100_start(struct dsbr100_device *radio)
{
int retval;
int request;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0xC7, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x01, 0x00, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_ONOFF;
goto usb_control_msg_failed;
}
radio->status = STARTED;
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* switch off radio */
static int dsbr100_stop(struct dsbr100_device *radio)
{
int retval;
int request;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x16, 0x1C, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_ONOFF,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0x00, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_ONOFF;
goto usb_control_msg_failed;
}
radio->status = STOPPED;
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* set a frequency, freq is defined by v4l's TUNER_LOW, i.e. 1/16th kHz */
static int dsbr100_setfreq(struct dsbr100_device *radio)
{
int retval;
int request;
int freq = (radio->curfreq / 16 * 80) / 1000 + 856;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
DSB100_TUNE,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
(freq >> 8) & 0x00ff, freq & 0xff,
radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = DSB100_TUNE;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x96, 0xB7, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00, 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
request = USB_REQ_GET_STATUS;
goto usb_control_msg_failed;
}
radio->stereo = !((radio->transfer_buffer)[0] & 0x01);
return (radio->transfer_buffer)[0];
usb_control_msg_failed:
radio->stereo = -1;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, request);
return retval;
}
/* return the device status. This is, in effect, just whether it
sees a stereo signal or not. Pity. */
static void dsbr100_getstat(struct dsbr100_device *radio)
{
int retval;
retval = usb_control_msg(radio->usbdev,
usb_rcvctrlpipe(radio->usbdev, 0),
USB_REQ_GET_STATUS,
USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN,
0x00 , 0x24, radio->transfer_buffer, 8, 300);
if (retval < 0) {
radio->stereo = -1;
dev_err(&radio->usbdev->dev,
"%s - usb_control_msg returned %i, request %i\n",
__func__, retval, USB_REQ_GET_STATUS);
} else {
radio->stereo = !(radio->transfer_buffer[0] & 0x01);
}
}
static int vidioc_querycap(struct file *file, void *priv,
struct v4l2_capability *v)
{
struct dsbr100_device *radio = video_drvdata(file);
strlcpy(v->driver, "dsbr100", sizeof(v->driver));
strlcpy(v->card, "D-Link R-100 USB FM Radio", sizeof(v->card));
usb_make_path(radio->usbdev, v->bus_info, sizeof(v->bus_info));
v->capabilities = V4L2_CAP_TUNER;
return 0;
}
static int vidioc_g_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
struct dsbr100_device *radio = video_drvdata(file);
if (v->index > 0)
return -EINVAL;
dsbr100_getstat(radio);
strcpy(v->name, "FM");
v->type = V4L2_TUNER_RADIO;
v->rangelow = FREQ_MIN * FREQ_MUL;
v->rangehigh = FREQ_MAX * FREQ_MUL;
v->rxsubchans = V4L2_TUNER_SUB_MONO | V4L2_TUNER_SUB_STEREO;
v->capability = V4L2_TUNER_CAP_LOW;
if(radio->stereo)
v->audmode = V4L2_TUNER_MODE_STEREO;
else
v->audmode = V4L2_TUNER_MODE_MONO;
v->signal = 0xffff; /* We can't get the signal strength */
return 0;
}
static int vidioc_s_tuner(struct file *file, void *priv,
struct v4l2_tuner *v)
{
return v->index ? -EINVAL : 0;
}
static int vidioc_s_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
int retval;
radio->curfreq = f->frequency;
retval = dsbr100_setfreq(radio);
if (retval < 0)
dev_warn(&radio->usbdev->dev, "Set frequency failed\n");
return 0;
}
static int vidioc_g_frequency(struct file *file, void *priv,
struct v4l2_frequency *f)
{
struct dsbr100_device *radio = video_drvdata(file);
f->type = V4L2_TUNER_RADIO;
f->frequency = radio->curfreq;
return 0;
}
static int vidioc_queryctrl(struct file *file, void *priv,
struct v4l2_queryctrl *qc)
{
switch (qc->id) {
case V4L2_CID_AUDIO_MUTE:
return v4l2_ctrl_query_fill(qc, 0, 1, 1, 1);
}
return -EINVAL;
}
static int vidioc_g_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct dsbr100_device *radio = video_drvdata(file);
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
ctrl->value = radio->status;
return 0;
}
return -EINVAL;
}
static int vidioc_s_ctrl(struct file *file, void *priv,
struct v4l2_control *ctrl)
{
struct dsbr100_device *radio = video_drvdata(file);
int retval;
switch (ctrl->id) {
case V4L2_CID_AUDIO_MUTE:
if (ctrl->value) {
retval = dsbr100_stop(radio);
if (retval < 0) {
dev_warn(&radio->usbdev->dev,
"Radio did not respond properly\n");
return -EBUSY;
}
} else {
retval = dsbr100_start(radio);
if (retval < 0) {
dev_warn(&radio->usbdev->dev,
"Radio did not respond properly\n");
return -EBUSY;
}
}
return 0;
}
return -EINVAL;
}
static int vidioc_g_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
if (a->index > 1)
return -EINVAL;
strcpy(a->name, "Radio");
a->capability = V4L2_AUDCAP_STEREO;
return 0;
}
static int vidioc_g_input(struct file *filp, void *priv, unsigned int *i)
{
*i = 0;
return 0;
}
static int vidioc_s_input(struct file *filp, void *priv, unsigned int i)
{
return i ? -EINVAL : 0;
}
static int vidioc_s_audio(struct file *file, void *priv,
struct v4l2_audio *a)
{
return a->index ? -EINVAL : 0;
}
/* USB subsystem interface begins here */
/*
* Handle unplugging of the device.
* We call video_unregister_device in any case.
* The last function called in this procedure is
* usb_dsbr100_video_device_release
*/
static void usb_dsbr100_disconnect(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
v4l2_device_get(&radio->v4l2_dev);
mutex_lock(&radio->v4l2_lock);
usb_set_intfdata(intf, NULL);
video_unregister_device(&radio->videodev);
v4l2_device_disconnect(&radio->v4l2_dev);
mutex_unlock(&radio->v4l2_lock);
v4l2_device_put(&radio->v4l2_dev);
}
/* Suspend device - stop device. */
static int usb_dsbr100_suspend(struct usb_interface *intf, pm_message_t message)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
mutex_lock(&radio->v4l2_lock);
if (radio->status == STARTED) {
retval = dsbr100_stop(radio);
if (retval < 0)
dev_warn(&intf->dev, "dsbr100_stop failed\n");
/* After dsbr100_stop() status set to STOPPED.
* If we want driver to start radio on resume
* we set status equal to STARTED.
* On resume we will check status and run radio if needed.
*/
radio->status = STARTED;
}
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "going into suspend..\n");
return 0;
}
/* Resume device - start device. */
static int usb_dsbr100_resume(struct usb_interface *intf)
{
struct dsbr100_device *radio = usb_get_intfdata(intf);
int retval;
mutex_lock(&radio->v4l2_lock);
if (radio->status == STARTED) {
retval = dsbr100_start(radio);
if (retval < 0)
dev_warn(&intf->dev, "dsbr100_start failed\n");
}
mutex_unlock(&radio->v4l2_lock);
dev_info(&intf->dev, "coming out of suspend..\n");
return 0;
}
/* free data structures */
static void usb_dsbr100_release(struct v4l2_device *v4l2_dev)
{
struct dsbr100_device *radio = v4l2_dev_to_radio(v4l2_dev);
v4l2_device_unregister(&radio->v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
}
/* File system interface */
static const struct v4l2_file_operations usb_dsbr100_fops = {
.owner = THIS_MODULE,
.unlocked_ioctl = video_ioctl2,
};
static const struct v4l2_ioctl_ops usb_dsbr100_ioctl_ops = {
.vidioc_querycap = vidioc_querycap,
.vidioc_g_tuner = vidioc_g_tuner,
.vidioc_s_tuner = vidioc_s_tuner,
.vidioc_g_frequency = vidioc_g_frequency,
.vidioc_s_frequency = vidioc_s_frequency,
.vidioc_queryctrl = vidioc_queryctrl,
.vidioc_g_ctrl = vidioc_g_ctrl,
.vidioc_s_ctrl = vidioc_s_ctrl,
.vidioc_g_audio = vidioc_g_audio,
.vidioc_s_audio = vidioc_s_audio,
.vidioc_g_input = vidioc_g_input,
.vidioc_s_input = vidioc_s_input,
};
/* check if the device is present and register with v4l and usb if it is */
static int usb_dsbr100_probe(struct usb_interface *intf,
const struct usb_device_id *id)
{
struct dsbr100_device *radio;
struct v4l2_device *v4l2_dev;
int retval;
radio = kzalloc(sizeof(struct dsbr100_device), GFP_KERNEL);
if (!radio)
return -ENOMEM;
radio->transfer_buffer = kmalloc(TB_LEN, GFP_KERNEL);
if (!(radio->transfer_buffer)) {
kfree(radio);
return -ENOMEM;
}
v4l2_dev = &radio->v4l2_dev;
v4l2_dev->release = usb_dsbr100_release;
retval = v4l2_device_register(&intf->dev, v4l2_dev);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register v4l2_device\n");
kfree(radio->transfer_buffer);
kfree(radio);
return retval;
}
mutex_init(&radio->v4l2_lock);
strlcpy(radio->videodev.name, v4l2_dev->name, sizeof(radio->videodev.name));
radio->videodev.v4l2_dev = v4l2_dev;
radio->videodev.fops = &usb_dsbr100_fops;
radio->videodev.ioctl_ops = &usb_dsbr100_ioctl_ops;
radio->videodev.release = video_device_release_empty;
radio->videodev.lock = &radio->v4l2_lock;
radio->usbdev = interface_to_usbdev(intf);
radio->curfreq = FREQ_MIN * FREQ_MUL;
radio->status = STOPPED;
video_set_drvdata(&radio->videodev, radio);
retval = video_register_device(&radio->videodev, VFL_TYPE_RADIO, radio_nr);
if (retval < 0) {
v4l2_err(v4l2_dev, "couldn't register video device\n");
v4l2_device_unregister(v4l2_dev);
kfree(radio->transfer_buffer);
kfree(radio);
return -EIO;
}
usb_set_intfdata(intf, radio);
return 0;
}
module_usb_driver(usb_dsbr100_driver);
MODULE_AUTHOR( DRIVER_AUTHOR );
MODULE_DESCRIPTION( DRIVER_DESC );
MODULE_LICENSE("GPL");
MODULE_VERSION(DRIVER_VERSION);
| gpl-2.0 |
shengdie/simon_kernel_l01f_kk | arch/arm/mach-kirkwood/db88f6281-bp-setup.c | 4905 | 2449 | /*
* arch/arm/mach-kirkwood/db88f6281-bp-setup.c
*
* Marvell DB-88F6281-BP Development Board Setup
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <linux/kernel.h>
#include <linux/init.h>
#include <linux/platform_device.h>
#include <linux/mtd/partitions.h>
#include <linux/ata_platform.h>
#include <linux/mv643xx_eth.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <mach/kirkwood.h>
#include <plat/mvsdio.h>
#include "common.h"
#include "mpp.h"
static struct mtd_partition db88f6281_nand_parts[] = {
{
.name = "u-boot",
.offset = 0,
.size = SZ_1M
}, {
.name = "uImage",
.offset = MTDPART_OFS_NXTBLK,
.size = SZ_4M
}, {
.name = "root",
.offset = MTDPART_OFS_NXTBLK,
.size = MTDPART_SIZ_FULL
},
};
static struct mv643xx_eth_platform_data db88f6281_ge00_data = {
.phy_addr = MV643XX_ETH_PHY_ADDR(8),
};
static struct mv_sata_platform_data db88f6281_sata_data = {
.n_ports = 2,
};
static struct mvsdio_platform_data db88f6281_mvsdio_data = {
.gpio_write_protect = 37,
.gpio_card_detect = 38,
};
static unsigned int db88f6281_mpp_config[] __initdata = {
MPP0_NF_IO2,
MPP1_NF_IO3,
MPP2_NF_IO4,
MPP3_NF_IO5,
MPP4_NF_IO6,
MPP5_NF_IO7,
MPP18_NF_IO0,
MPP19_NF_IO1,
MPP37_GPIO,
MPP38_GPIO,
0
};
static void __init db88f6281_init(void)
{
/*
* Basic setup. Needs to be called early.
*/
kirkwood_init();
kirkwood_mpp_conf(db88f6281_mpp_config);
kirkwood_nand_init(ARRAY_AND_SIZE(db88f6281_nand_parts), 25);
kirkwood_ehci_init();
kirkwood_ge00_init(&db88f6281_ge00_data);
kirkwood_sata_init(&db88f6281_sata_data);
kirkwood_uart0_init();
kirkwood_sdio_init(&db88f6281_mvsdio_data);
}
static int __init db88f6281_pci_init(void)
{
if (machine_is_db88f6281_bp()) {
u32 dev, rev;
kirkwood_pcie_id(&dev, &rev);
if (dev == MV88F6282_DEV_ID)
kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0);
else
kirkwood_pcie_init(KW_PCIE0);
}
return 0;
}
subsys_initcall(db88f6281_pci_init);
MACHINE_START(DB88F6281_BP, "Marvell DB-88F6281-BP Development Board")
/* Maintainer: Saeed Bishara <saeed@marvell.com> */
.atag_offset = 0x100,
.init_machine = db88f6281_init,
.map_io = kirkwood_map_io,
.init_early = kirkwood_init_early,
.init_irq = kirkwood_init_irq,
.timer = &kirkwood_timer,
.restart = kirkwood_restart,
MACHINE_END
| gpl-2.0 |
Luavis/SOS | arch/m68k/coldfire/dma_timer.c | 9001 | 2187 | /*
* dma_timer.c -- Freescale ColdFire DMA Timer.
*
* Copyright (C) 2007, Benedikt Spranger <b.spranger@linutronix.de>
* Copyright (C) 2008. Sebastian Siewior, Linutronix
*
*/
#include <linux/clocksource.h>
#include <linux/io.h>
#include <asm/machdep.h>
#include <asm/coldfire.h>
#include <asm/mcfpit.h>
#include <asm/mcfsim.h>
#define DMA_TIMER_0 (0x00)
#define DMA_TIMER_1 (0x40)
#define DMA_TIMER_2 (0x80)
#define DMA_TIMER_3 (0xc0)
#define DTMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x400)
#define DTXMR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x402)
#define DTER0 (MCF_IPSBAR + DMA_TIMER_0 + 0x403)
#define DTRR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x404)
#define DTCR0 (MCF_IPSBAR + DMA_TIMER_0 + 0x408)
#define DTCN0 (MCF_IPSBAR + DMA_TIMER_0 + 0x40c)
#define DMA_FREQ ((MCF_CLK / 2) / 16)
/* DTMR */
#define DMA_DTMR_RESTART (1 << 3)
#define DMA_DTMR_CLK_DIV_1 (1 << 1)
#define DMA_DTMR_CLK_DIV_16 (2 << 1)
#define DMA_DTMR_ENABLE (1 << 0)
static cycle_t cf_dt_get_cycles(struct clocksource *cs)
{
return __raw_readl(DTCN0);
}
static struct clocksource clocksource_cf_dt = {
.name = "coldfire_dma_timer",
.rating = 200,
.read = cf_dt_get_cycles,
.mask = CLOCKSOURCE_MASK(32),
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
static int __init init_cf_dt_clocksource(void)
{
/*
* We setup DMA timer 0 in free run mode. This incrementing counter is
* used as a highly precious clock source. With MCF_CLOCK = 150 MHz we
* get a ~213 ns resolution and the 32bit register will overflow almost
* every 15 minutes.
*/
__raw_writeb(0x00, DTXMR0);
__raw_writeb(0x00, DTER0);
__raw_writel(0x00000000, DTRR0);
__raw_writew(DMA_DTMR_CLK_DIV_16 | DMA_DTMR_ENABLE, DTMR0);
return clocksource_register_hz(&clocksource_cf_dt, DMA_FREQ);
}
arch_initcall(init_cf_dt_clocksource);
#define CYC2NS_SCALE_FACTOR 10 /* 2^10, carefully chosen */
#define CYC2NS_SCALE ((1000000 << CYC2NS_SCALE_FACTOR) / (DMA_FREQ / 1000))
static unsigned long long cycles2ns(unsigned long cycl)
{
return (unsigned long long) ((unsigned long long)cycl *
CYC2NS_SCALE) >> CYC2NS_SCALE_FACTOR;
}
unsigned long long sched_clock(void)
{
unsigned long cycl = __raw_readl(DTCN0);
return cycles2ns(cycl);
}
| gpl-2.0 |
ciwrl/android_kernel_huawei_msm8939 | arch/avr32/kernel/avr32_ksyms.c | 11561 | 1827 | /*
* Export AVR32-specific functions for loadable modules.
*
* Copyright (C) 2004-2006 Atmel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/module.h>
#include <asm/checksum.h>
#include <asm/uaccess.h>
/*
* GCC functions
*/
extern unsigned long long __avr32_lsl64(unsigned long long u, unsigned long b);
extern unsigned long long __avr32_lsr64(unsigned long long u, unsigned long b);
extern unsigned long long __avr32_asr64(unsigned long long u, unsigned long b);
EXPORT_SYMBOL(__avr32_lsl64);
EXPORT_SYMBOL(__avr32_lsr64);
EXPORT_SYMBOL(__avr32_asr64);
/*
* String functions
*/
EXPORT_SYMBOL(memset);
EXPORT_SYMBOL(memcpy);
EXPORT_SYMBOL(clear_page);
EXPORT_SYMBOL(copy_page);
/*
* Userspace access stuff.
*/
EXPORT_SYMBOL(copy_from_user);
EXPORT_SYMBOL(copy_to_user);
EXPORT_SYMBOL(__copy_user);
EXPORT_SYMBOL(strncpy_from_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(clear_user);
EXPORT_SYMBOL(__clear_user);
EXPORT_SYMBOL(strnlen_user);
EXPORT_SYMBOL(csum_partial);
EXPORT_SYMBOL(csum_partial_copy_generic);
/* Delay loops (lib/delay.S) */
EXPORT_SYMBOL(__ndelay);
EXPORT_SYMBOL(__udelay);
EXPORT_SYMBOL(__const_udelay);
/* Bit operations (lib/findbit.S) */
EXPORT_SYMBOL(find_first_zero_bit);
EXPORT_SYMBOL(find_next_zero_bit);
EXPORT_SYMBOL(find_first_bit);
EXPORT_SYMBOL(find_next_bit);
EXPORT_SYMBOL(find_next_bit_le);
EXPORT_SYMBOL(find_next_zero_bit_le);
/* I/O primitives (lib/io-*.S) */
EXPORT_SYMBOL(__raw_readsb);
EXPORT_SYMBOL(__raw_readsw);
EXPORT_SYMBOL(__raw_readsl);
EXPORT_SYMBOL(__raw_writesb);
EXPORT_SYMBOL(__raw_writesw);
EXPORT_SYMBOL(__raw_writesl);
| gpl-2.0 |
BlazeDevs/android_kernel_samsung_msm8660-common | drivers/macintosh/ans-lcd.c | 12073 | 4048 | /*
* /dev/lcd driver for Apple Network Servers.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
#include "ans-lcd.h"
#define ANSLCD_ADDR 0xf301c000
#define ANSLCD_CTRL_IX 0x00
#define ANSLCD_DATA_IX 0x10
static unsigned long anslcd_short_delay = 80;
static unsigned long anslcd_long_delay = 3280;
static volatile unsigned char __iomem *anslcd_ptr;
static DEFINE_MUTEX(anslcd_mutex);
#undef DEBUG
static void
anslcd_write_byte_ctrl ( unsigned char c )
{
#ifdef DEBUG
printk(KERN_DEBUG "LCD: CTRL byte: %02x\n",c);
#endif
out_8(anslcd_ptr + ANSLCD_CTRL_IX, c);
switch(c) {
case 1:
case 2:
case 3:
udelay(anslcd_long_delay); break;
default: udelay(anslcd_short_delay);
}
}
static void
anslcd_write_byte_data ( unsigned char c )
{
out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
udelay(anslcd_short_delay);
}
static ssize_t
anslcd_write( struct file * file, const char __user * buf,
size_t count, loff_t *ppos )
{
const char __user *p = buf;
int i;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: write\n");
#endif
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
mutex_lock(&anslcd_mutex);
for ( i = *ppos; count > 0; ++i, ++p, --count )
{
char c;
__get_user(c, p);
anslcd_write_byte_data( c );
}
mutex_unlock(&anslcd_mutex);
*ppos = i;
return p - buf;
}
static long
anslcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
char ch, __user *temp;
long ret = 0;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
#endif
mutex_lock(&anslcd_mutex);
switch ( cmd )
{
case ANSLCD_CLEAR:
anslcd_write_byte_ctrl ( 0x38 );
anslcd_write_byte_ctrl ( 0x0f );
anslcd_write_byte_ctrl ( 0x06 );
anslcd_write_byte_ctrl ( 0x01 );
anslcd_write_byte_ctrl ( 0x02 );
break;
case ANSLCD_SENDCTRL:
temp = (char __user *) arg;
__get_user(ch, temp);
for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */
anslcd_write_byte_ctrl ( ch );
__get_user(ch, temp);
}
break;
case ANSLCD_SETSHORTDELAY:
if (!capable(CAP_SYS_ADMIN))
ret =-EACCES;
else
anslcd_short_delay=arg;
break;
case ANSLCD_SETLONGDELAY:
if (!capable(CAP_SYS_ADMIN))
ret = -EACCES;
else
anslcd_long_delay=arg;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&anslcd_mutex);
return ret;
}
static int
anslcd_open( struct inode * inode, struct file * file )
{
return 0;
}
const struct file_operations anslcd_fops = {
.write = anslcd_write,
.unlocked_ioctl = anslcd_ioctl,
.open = anslcd_open,
.llseek = default_llseek,
};
static struct miscdevice anslcd_dev = {
ANSLCD_MINOR,
"anslcd",
&anslcd_fops
};
const char anslcd_logo[] = "********************" /* Line #1 */
"* LINUX! *" /* Line #3 */
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
static int __init
anslcd_init(void)
{
int a;
int retval;
struct device_node* node;
node = of_find_node_by_name(NULL, "lcd");
if (!node || !node->parent || strcmp(node->parent->name, "gc")) {
of_node_put(node);
return -ENODEV;
}
of_node_put(node);
anslcd_ptr = ioremap(ANSLCD_ADDR, 0x20);
retval = misc_register(&anslcd_dev);
if(retval < 0){
printk(KERN_INFO "LCD: misc_register failed\n");
iounmap(anslcd_ptr);
return retval;
}
#ifdef DEBUG
printk(KERN_DEBUG "LCD: init\n");
#endif
mutex_lock(&anslcd_mutex);
anslcd_write_byte_ctrl ( 0x38 );
anslcd_write_byte_ctrl ( 0x0c );
anslcd_write_byte_ctrl ( 0x06 );
anslcd_write_byte_ctrl ( 0x01 );
anslcd_write_byte_ctrl ( 0x02 );
for(a=0;a<80;a++) {
anslcd_write_byte_data(anslcd_logo[a]);
}
mutex_unlock(&anslcd_mutex);
return 0;
}
static void __exit
anslcd_exit(void)
{
misc_deregister(&anslcd_dev);
iounmap(anslcd_ptr);
}
module_init(anslcd_init);
module_exit(anslcd_exit);
| gpl-2.0 |
letama/android_kernel_nozomi | drivers/macintosh/ans-lcd.c | 12073 | 4048 | /*
* /dev/lcd driver for Apple Network Servers.
*/
#include <linux/types.h>
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/miscdevice.h>
#include <linux/fcntl.h>
#include <linux/init.h>
#include <linux/delay.h>
#include <linux/fs.h>
#include <asm/uaccess.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/io.h>
#include "ans-lcd.h"
#define ANSLCD_ADDR 0xf301c000
#define ANSLCD_CTRL_IX 0x00
#define ANSLCD_DATA_IX 0x10
static unsigned long anslcd_short_delay = 80;
static unsigned long anslcd_long_delay = 3280;
static volatile unsigned char __iomem *anslcd_ptr;
static DEFINE_MUTEX(anslcd_mutex);
#undef DEBUG
static void
anslcd_write_byte_ctrl ( unsigned char c )
{
#ifdef DEBUG
printk(KERN_DEBUG "LCD: CTRL byte: %02x\n",c);
#endif
out_8(anslcd_ptr + ANSLCD_CTRL_IX, c);
switch(c) {
case 1:
case 2:
case 3:
udelay(anslcd_long_delay); break;
default: udelay(anslcd_short_delay);
}
}
static void
anslcd_write_byte_data ( unsigned char c )
{
out_8(anslcd_ptr + ANSLCD_DATA_IX, c);
udelay(anslcd_short_delay);
}
static ssize_t
anslcd_write( struct file * file, const char __user * buf,
size_t count, loff_t *ppos )
{
const char __user *p = buf;
int i;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: write\n");
#endif
if (!access_ok(VERIFY_READ, buf, count))
return -EFAULT;
mutex_lock(&anslcd_mutex);
for ( i = *ppos; count > 0; ++i, ++p, --count )
{
char c;
__get_user(c, p);
anslcd_write_byte_data( c );
}
mutex_unlock(&anslcd_mutex);
*ppos = i;
return p - buf;
}
static long
anslcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
char ch, __user *temp;
long ret = 0;
#ifdef DEBUG
printk(KERN_DEBUG "LCD: ioctl(%d,%d)\n",cmd,arg);
#endif
mutex_lock(&anslcd_mutex);
switch ( cmd )
{
case ANSLCD_CLEAR:
anslcd_write_byte_ctrl ( 0x38 );
anslcd_write_byte_ctrl ( 0x0f );
anslcd_write_byte_ctrl ( 0x06 );
anslcd_write_byte_ctrl ( 0x01 );
anslcd_write_byte_ctrl ( 0x02 );
break;
case ANSLCD_SENDCTRL:
temp = (char __user *) arg;
__get_user(ch, temp);
for (; ch; temp++) { /* FIXME: This is ugly, but should work, as a \0 byte is not a valid command code */
anslcd_write_byte_ctrl ( ch );
__get_user(ch, temp);
}
break;
case ANSLCD_SETSHORTDELAY:
if (!capable(CAP_SYS_ADMIN))
ret =-EACCES;
else
anslcd_short_delay=arg;
break;
case ANSLCD_SETLONGDELAY:
if (!capable(CAP_SYS_ADMIN))
ret = -EACCES;
else
anslcd_long_delay=arg;
break;
default:
ret = -EINVAL;
}
mutex_unlock(&anslcd_mutex);
return ret;
}
static int
anslcd_open( struct inode * inode, struct file * file )
{
return 0;
}
const struct file_operations anslcd_fops = {
.write = anslcd_write,
.unlocked_ioctl = anslcd_ioctl,
.open = anslcd_open,
.llseek = default_llseek,
};
static struct miscdevice anslcd_dev = {
ANSLCD_MINOR,
"anslcd",
&anslcd_fops
};
const char anslcd_logo[] = "********************" /* Line #1 */
"* LINUX! *" /* Line #3 */
"* Welcome to *" /* Line #2 */
"********************"; /* Line #4 */
static int __init
anslcd_init(void)
{
int a;
int retval;
struct device_node* node;
node = of_find_node_by_name(NULL, "lcd");
if (!node || !node->parent || strcmp(node->parent->name, "gc")) {
of_node_put(node);
return -ENODEV;
}
of_node_put(node);
anslcd_ptr = ioremap(ANSLCD_ADDR, 0x20);
retval = misc_register(&anslcd_dev);
if(retval < 0){
printk(KERN_INFO "LCD: misc_register failed\n");
iounmap(anslcd_ptr);
return retval;
}
#ifdef DEBUG
printk(KERN_DEBUG "LCD: init\n");
#endif
mutex_lock(&anslcd_mutex);
anslcd_write_byte_ctrl ( 0x38 );
anslcd_write_byte_ctrl ( 0x0c );
anslcd_write_byte_ctrl ( 0x06 );
anslcd_write_byte_ctrl ( 0x01 );
anslcd_write_byte_ctrl ( 0x02 );
for(a=0;a<80;a++) {
anslcd_write_byte_data(anslcd_logo[a]);
}
mutex_unlock(&anslcd_mutex);
return 0;
}
static void __exit
anslcd_exit(void)
{
misc_deregister(&anslcd_dev);
iounmap(anslcd_ptr);
}
module_init(anslcd_init);
module_exit(anslcd_exit);
| gpl-2.0 |
tmshlvck/omnia-linux | drivers/char/ipmi/ipmi_si_intf.c | 42 | 59991 | // SPDX-License-Identifier: GPL-2.0+
/*
* ipmi_si.c
*
* The interface to the IPMI driver for the system interfaces (KCS, SMIC,
* BT).
*
* Author: MontaVista Software, Inc.
* Corey Minyard <minyard@mvista.com>
* source@mvista.com
*
* Copyright 2002 MontaVista Software Inc.
* Copyright 2006 IBM Corp., Christian Krafft <krafft@de.ibm.com>
*/
/*
* This file holds the "policy" for the interface to the SMI state
* machine. It does the configuration, handles timers and interrupts,
* and drives the real SMI state machine.
*/
#define pr_fmt(fmt) "ipmi_si: " fmt
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/timer.h>
#include <linux/errno.h>
#include <linux/spinlock.h>
#include <linux/slab.h>
#include <linux/delay.h>
#include <linux/list.h>
#include <linux/notifier.h>
#include <linux/mutex.h>
#include <linux/kthread.h>
#include <asm/irq.h>
#include <linux/interrupt.h>
#include <linux/rcupdate.h>
#include <linux/ipmi.h>
#include <linux/ipmi_smi.h>
#include "ipmi_si.h"
#include "ipmi_si_sm.h"
#include <linux/string.h>
#include <linux/ctype.h>
/* Measure times between events in the driver. */
#undef DEBUG_TIMING
/* Call every 10 ms. */
#define SI_TIMEOUT_TIME_USEC 10000
#define SI_USEC_PER_JIFFY (1000000/HZ)
#define SI_TIMEOUT_JIFFIES (SI_TIMEOUT_TIME_USEC/SI_USEC_PER_JIFFY)
#define SI_SHORT_TIMEOUT_USEC 250 /* .25ms when the SM request a
short timeout */
enum si_intf_state {
SI_NORMAL,
SI_GETTING_FLAGS,
SI_GETTING_EVENTS,
SI_CLEARING_FLAGS,
SI_GETTING_MESSAGES,
SI_CHECKING_ENABLES,
SI_SETTING_ENABLES
/* FIXME - add watchdog stuff. */
};
/* Some BT-specific defines we need here. */
#define IPMI_BT_INTMASK_REG 2
#define IPMI_BT_INTMASK_CLEAR_IRQ_BIT 2
#define IPMI_BT_INTMASK_ENABLE_IRQ_BIT 1
static const char * const si_to_str[] = { "invalid", "kcs", "smic", "bt" };
static bool initialized;
/*
* Indexes into stats[] in smi_info below.
*/
enum si_stat_indexes {
/*
* Number of times the driver requested a timer while an operation
* was in progress.
*/
SI_STAT_short_timeouts = 0,
/*
* Number of times the driver requested a timer while nothing was in
* progress.
*/
SI_STAT_long_timeouts,
/* Number of times the interface was idle while being polled. */
SI_STAT_idles,
/* Number of interrupts the driver handled. */
SI_STAT_interrupts,
/* Number of time the driver got an ATTN from the hardware. */
SI_STAT_attentions,
/* Number of times the driver requested flags from the hardware. */
SI_STAT_flag_fetches,
/* Number of times the hardware didn't follow the state machine. */
SI_STAT_hosed_count,
/* Number of completed messages. */
SI_STAT_complete_transactions,
/* Number of IPMI events received from the hardware. */
SI_STAT_events,
/* Number of watchdog pretimeouts. */
SI_STAT_watchdog_pretimeouts,
/* Number of asynchronous messages received. */
SI_STAT_incoming_messages,
/* This *must* remain last, add new values above this. */
SI_NUM_STATS
};
struct smi_info {
int si_num;
struct ipmi_smi *intf;
struct si_sm_data *si_sm;
const struct si_sm_handlers *handlers;
spinlock_t si_lock;
struct ipmi_smi_msg *waiting_msg;
struct ipmi_smi_msg *curr_msg;
enum si_intf_state si_state;
/*
* Used to handle the various types of I/O that can occur with
* IPMI
*/
struct si_sm_io io;
/*
* Per-OEM handler, called from handle_flags(). Returns 1
* when handle_flags() needs to be re-run or 0 indicating it
* set si_state itself.
*/
int (*oem_data_avail_handler)(struct smi_info *smi_info);
/*
* Flags from the last GET_MSG_FLAGS command, used when an ATTN
* is set to hold the flags until we are done handling everything
* from the flags.
*/
#define RECEIVE_MSG_AVAIL 0x01
#define EVENT_MSG_BUFFER_FULL 0x02
#define WDT_PRE_TIMEOUT_INT 0x08
#define OEM0_DATA_AVAIL 0x20
#define OEM1_DATA_AVAIL 0x40
#define OEM2_DATA_AVAIL 0x80
#define OEM_DATA_AVAIL (OEM0_DATA_AVAIL | \
OEM1_DATA_AVAIL | \
OEM2_DATA_AVAIL)
unsigned char msg_flags;
/* Does the BMC have an event buffer? */
bool has_event_buffer;
/*
* If set to true, this will request events the next time the
* state machine is idle.
*/
atomic_t req_events;
/*
* If true, run the state machine to completion on every send
* call. Generally used after a panic to make sure stuff goes
* out.
*/
bool run_to_completion;
/* The timer for this si. */
struct timer_list si_timer;
/* This flag is set, if the timer can be set */
bool timer_can_start;
/* This flag is set, if the timer is running (timer_pending() isn't enough) */
bool timer_running;
/* The time (in jiffies) the last timeout occurred at. */
unsigned long last_timeout_jiffies;
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
/*
* The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of
* memory. Once that situation clears up, it will re-enable
* interrupts.
*/
bool interrupt_disabled;
/*
* Does the BMC support events?
*/
bool supports_event_msg_buff;
/*
* Can we disable interrupts the global enables receive irq
* bit? There are currently two forms of brokenness, some
* systems cannot disable the bit (which is technically within
* the spec but a bad idea) and some systems have the bit
* forced to zero even though interrupts work (which is
* clearly outside the spec). The next bool tells which form
* of brokenness is present.
*/
bool cannot_disable_irq;
/*
* Some systems are broken and cannot set the irq enable
* bit, even if they support interrupts.
*/
bool irq_enable_broken;
/* Is the driver in maintenance mode? */
bool in_maintenance_mode;
/*
* Did we get an attention that we did not handle?
*/
bool got_attn;
/* From the get device id response... */
struct ipmi_device_id device_id;
/* Have we added the device group to the device? */
bool dev_group_added;
/* Counters and things for the proc filesystem. */
atomic_t stats[SI_NUM_STATS];
struct task_struct *thread;
struct list_head link;
};
#define smi_inc_stat(smi, stat) \
atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
#define smi_get_stat(smi, stat) \
((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
#define IPMI_MAX_INTFS 4
static int force_kipmid[IPMI_MAX_INTFS];
static int num_force_kipmid;
static unsigned int kipmid_max_busy_us[IPMI_MAX_INTFS];
static int num_max_busy_us;
static bool unload_when_empty = true;
static int try_smi_init(struct smi_info *smi);
static void cleanup_one_si(struct smi_info *smi_info);
static void cleanup_ipmi_si(void);
#ifdef DEBUG_TIMING
void debug_timestamp(char *msg)
{
struct timespec64 t;
ktime_get_ts64(&t);
pr_debug("**%s: %lld.%9.9ld\n", msg, t.tv_sec, t.tv_nsec);
}
#else
#define debug_timestamp(x)
#endif
static ATOMIC_NOTIFIER_HEAD(xaction_notifier_list);
static int register_xaction_notifier(struct notifier_block *nb)
{
return atomic_notifier_chain_register(&xaction_notifier_list, nb);
}
static void deliver_recv_msg(struct smi_info *smi_info,
struct ipmi_smi_msg *msg)
{
/* Deliver the message to the upper layer. */
ipmi_smi_msg_received(smi_info->intf, msg);
}
static void return_hosed_msg(struct smi_info *smi_info, int cCode)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
if (cCode < 0 || cCode > IPMI_ERR_UNSPECIFIED)
cCode = IPMI_ERR_UNSPECIFIED;
/* else use it as is */
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = cCode;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
static enum si_sm_result start_next_msg(struct smi_info *smi_info)
{
int rv;
if (!smi_info->waiting_msg) {
smi_info->curr_msg = NULL;
rv = SI_SM_IDLE;
} else {
int err;
smi_info->curr_msg = smi_info->waiting_msg;
smi_info->waiting_msg = NULL;
debug_timestamp("Start2");
err = atomic_notifier_call_chain(&xaction_notifier_list,
0, smi_info);
if (err & NOTIFY_STOP_MASK) {
rv = SI_SM_CALL_WITHOUT_DELAY;
goto out;
}
err = smi_info->handlers->start_transaction(
smi_info->si_sm,
smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
if (err)
return_hosed_msg(smi_info, err);
rv = SI_SM_CALL_WITHOUT_DELAY;
}
out:
return rv;
}
static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
{
if (!smi_info->timer_can_start)
return;
smi_info->last_timeout_jiffies = jiffies;
mod_timer(&smi_info->si_timer, new_val);
smi_info->timer_running = true;
}
/*
* Start a new message and (re)start the timer and thread.
*/
static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
unsigned int size)
{
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
}
static void start_check_enables(struct smi_info *smi_info)
{
unsigned char msg[2];
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_CHECKING_ENABLES;
}
static void start_clear_flags(struct smi_info *smi_info)
{
unsigned char msg[3];
/* Make sure the watchdog pre-timeout flag is not set at startup. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
msg[2] = WDT_PRE_TIMEOUT_INT;
start_new_msg(smi_info, msg, 3);
smi_info->si_state = SI_CLEARING_FLAGS;
}
static void start_getting_msg_queue(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_MESSAGES;
}
static void start_getting_events(struct smi_info *smi_info)
{
smi_info->curr_msg->data[0] = (IPMI_NETFN_APP_REQUEST << 2);
smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
smi_info->curr_msg->data_size = 2;
start_new_msg(smi_info, smi_info->curr_msg->data,
smi_info->curr_msg->data_size);
smi_info->si_state = SI_GETTING_EVENTS;
}
/*
* When we have a situtaion where we run out of memory and cannot
* allocate messages, we just leave them in the BMC and run the system
* polled until we can allocate some memory. Once we have some
* memory, we will re-enable the interrupt.
*
* Note that we cannot just use disable_irq(), since the interrupt may
* be shared.
*/
static inline bool disable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = true;
start_check_enables(smi_info);
return true;
}
return false;
}
static inline bool enable_si_irq(struct smi_info *smi_info)
{
if ((smi_info->io.irq) && (smi_info->interrupt_disabled)) {
smi_info->interrupt_disabled = false;
start_check_enables(smi_info);
return true;
}
return false;
}
/*
* Allocate a message. If unable to allocate, start the interrupt
* disable process and return NULL. If able to allocate but
* interrupts are disabled, free the message and return NULL after
* starting the interrupt enable process.
*/
static struct ipmi_smi_msg *alloc_msg_handle_irq(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
msg = ipmi_alloc_smi_msg();
if (!msg) {
if (!disable_si_irq(smi_info))
smi_info->si_state = SI_NORMAL;
} else if (enable_si_irq(smi_info)) {
ipmi_free_smi_msg(msg);
msg = NULL;
}
return msg;
}
static void handle_flags(struct smi_info *smi_info)
{
retry:
if (smi_info->msg_flags & WDT_PRE_TIMEOUT_INT) {
/* Watchdog pre-timeout */
smi_inc_stat(smi_info, watchdog_pretimeouts);
start_clear_flags(smi_info);
smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
ipmi_smi_watchdog_pretimeout(smi_info->intf);
} else if (smi_info->msg_flags & RECEIVE_MSG_AVAIL) {
/* Messages available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_msg_queue(smi_info);
} else if (smi_info->msg_flags & EVENT_MSG_BUFFER_FULL) {
/* Events available. */
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
return;
start_getting_events(smi_info);
} else if (smi_info->msg_flags & OEM_DATA_AVAIL &&
smi_info->oem_data_avail_handler) {
if (smi_info->oem_data_avail_handler(smi_info))
goto retry;
} else
smi_info->si_state = SI_NORMAL;
}
/*
* Global enables we care about.
*/
#define GLOBAL_ENABLES_MASK (IPMI_BMC_EVT_MSG_BUFF | IPMI_BMC_RCV_MSG_INTR | \
IPMI_BMC_EVT_MSG_INTR)
static u8 current_global_enables(struct smi_info *smi_info, u8 base,
bool *irq_on)
{
u8 enables = 0;
if (smi_info->supports_event_msg_buff)
enables |= IPMI_BMC_EVT_MSG_BUFF;
if (((smi_info->io.irq && !smi_info->interrupt_disabled) ||
smi_info->cannot_disable_irq) &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_RCV_MSG_INTR;
if (smi_info->supports_event_msg_buff &&
smi_info->io.irq && !smi_info->interrupt_disabled &&
!smi_info->irq_enable_broken)
enables |= IPMI_BMC_EVT_MSG_INTR;
*irq_on = enables & (IPMI_BMC_EVT_MSG_INTR | IPMI_BMC_RCV_MSG_INTR);
return enables;
}
static void check_bt_irq(struct smi_info *smi_info, bool irq_on)
{
u8 irqstate = smi_info->io.inputb(&smi_info->io, IPMI_BT_INTMASK_REG);
irqstate &= IPMI_BT_INTMASK_ENABLE_IRQ_BIT;
if ((bool)irqstate == irq_on)
return;
if (irq_on)
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
else
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG, 0);
}
static void handle_transaction_done(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg;
debug_timestamp("Done");
switch (smi_info->si_state) {
case SI_NORMAL:
if (!smi_info->curr_msg)
break;
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
break;
case SI_GETTING_FLAGS:
{
unsigned char msg[4];
unsigned int len;
/* We got the flags from the SMI, now handle them. */
len = smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
/* Error fetching flags, just give up for now. */
smi_info->si_state = SI_NORMAL;
} else if (len < 4) {
/*
* Hmm, no flags. That's technically illegal, but
* don't use uninitialized data.
*/
smi_info->si_state = SI_NORMAL;
} else {
smi_info->msg_flags = msg[3];
handle_flags(smi_info);
}
break;
}
case SI_CLEARING_FLAGS:
{
unsigned char msg[3];
/* We cleared the flags. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 3);
if (msg[2] != 0) {
/* Error clearing flags */
dev_warn(smi_info->io.dev,
"Error clearing flags: %2.2x\n", msg[2]);
}
smi_info->si_state = SI_NORMAL;
break;
}
case SI_GETTING_EVENTS:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the event flag. */
smi_info->msg_flags &= ~EVENT_MSG_BUFFER_FULL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, events);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_GETTING_MESSAGES:
{
smi_info->curr_msg->rsp_size
= smi_info->handlers->get_result(
smi_info->si_sm,
smi_info->curr_msg->rsp,
IPMI_MAX_MSG_LENGTH);
/*
* Do this here becase deliver_recv_msg() releases the
* lock, and a new message can be put in during the
* time the lock is released.
*/
msg = smi_info->curr_msg;
smi_info->curr_msg = NULL;
if (msg->rsp[2] != 0) {
/* Error getting event, probably done. */
msg->done(msg);
/* Take off the msg flag. */
smi_info->msg_flags &= ~RECEIVE_MSG_AVAIL;
handle_flags(smi_info);
} else {
smi_inc_stat(smi_info, incoming_messages);
/*
* Do this before we deliver the message
* because delivering the message releases the
* lock and something else can mess with the
* state.
*/
handle_flags(smi_info);
deliver_recv_msg(smi_info, msg);
}
break;
}
case SI_CHECKING_ENABLES:
{
unsigned char msg[4];
u8 enables;
bool irq_on;
/* We got the flags from the SMI, now handle them. */
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0) {
dev_warn(smi_info->io.dev,
"Couldn't get irq info: %x.\n", msg[2]);
dev_warn(smi_info->io.dev,
"Maybe ok, but ipmi might run very slowly.\n");
smi_info->si_state = SI_NORMAL;
break;
}
enables = current_global_enables(smi_info, 0, &irq_on);
if (smi_info->io.si_type == SI_BT)
/* BT has its own interrupt enable bit. */
check_bt_irq(smi_info, irq_on);
if (enables != (msg[3] & GLOBAL_ENABLES_MASK)) {
/* Enables are not correct, fix them. */
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables | (msg[3] & ~GLOBAL_ENABLES_MASK);
smi_info->handlers->start_transaction(
smi_info->si_sm, msg, 3);
smi_info->si_state = SI_SETTING_ENABLES;
} else if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
case SI_SETTING_ENABLES:
{
unsigned char msg[4];
smi_info->handlers->get_result(smi_info->si_sm, msg, 4);
if (msg[2] != 0)
dev_warn(smi_info->io.dev,
"Could not set the global enables: 0x%x.\n",
msg[2]);
if (smi_info->supports_event_msg_buff) {
smi_info->curr_msg = ipmi_alloc_smi_msg();
if (!smi_info->curr_msg) {
smi_info->si_state = SI_NORMAL;
break;
}
start_getting_events(smi_info);
} else {
smi_info->si_state = SI_NORMAL;
}
break;
}
}
}
/*
* Called on timeouts and events. Timeouts should pass the elapsed
* time, interrupts should pass in zero. Must be called with
* si_lock held and interrupts disabled.
*/
static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
int time)
{
enum si_sm_result si_sm_result;
restart:
/*
* There used to be a loop here that waited a little while
* (around 25us) before giving up. That turned out to be
* pointless, the minimum delays I was seeing were in the 300us
* range, which is far too long to wait in an interrupt. So
* we just run until the state machine tells us something
* happened or it needs a delay.
*/
si_sm_result = smi_info->handlers->event(smi_info->si_sm, time);
time = 0;
while (si_sm_result == SI_SM_CALL_WITHOUT_DELAY)
si_sm_result = smi_info->handlers->event(smi_info->si_sm, 0);
if (si_sm_result == SI_SM_TRANSACTION_COMPLETE) {
smi_inc_stat(smi_info, complete_transactions);
handle_transaction_done(smi_info);
goto restart;
} else if (si_sm_result == SI_SM_HOSED) {
smi_inc_stat(smi_info, hosed_count);
/*
* Do the before return_hosed_msg, because that
* releases the lock.
*/
smi_info->si_state = SI_NORMAL;
if (smi_info->curr_msg != NULL) {
/*
* If we were handling a user message, format
* a response to send to the upper layer to
* tell it about the error.
*/
return_hosed_msg(smi_info, IPMI_ERR_UNSPECIFIED);
}
goto restart;
}
/*
* We prefer handling attn over new messages. But don't do
* this if there is not yet an upper layer to handle anything.
*/
if (si_sm_result == SI_SM_ATTN || smi_info->got_attn) {
unsigned char msg[2];
if (smi_info->si_state != SI_NORMAL) {
/*
* We got an ATTN, but we are doing something else.
* Handle the ATTN later.
*/
smi_info->got_attn = true;
} else {
smi_info->got_attn = false;
smi_inc_stat(smi_info, attentions);
/*
* Got a attn, send down a get message flags to see
* what's causing it. It would be better to handle
* this in the upper layer, but due to the way
* interrupts work with the SMI, that's not really
* possible.
*/
msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
msg[1] = IPMI_GET_MSG_FLAGS_CMD;
start_new_msg(smi_info, msg, 2);
smi_info->si_state = SI_GETTING_FLAGS;
goto restart;
}
}
/* If we are currently idle, try to start the next message. */
if (si_sm_result == SI_SM_IDLE) {
smi_inc_stat(smi_info, idles);
si_sm_result = start_next_msg(smi_info);
if (si_sm_result != SI_SM_IDLE)
goto restart;
}
if ((si_sm_result == SI_SM_IDLE)
&& (atomic_read(&smi_info->req_events))) {
/*
* We are idle and the upper layer requested that I fetch
* events, so do so.
*/
atomic_set(&smi_info->req_events, 0);
/*
* Take this opportunity to check the interrupt and
* message enable state for the BMC. The BMC can be
* asynchronously reset, and may thus get interrupts
* disable and messages disabled.
*/
if (smi_info->supports_event_msg_buff || smi_info->io.irq) {
start_check_enables(smi_info);
} else {
smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
if (!smi_info->curr_msg)
goto out;
start_getting_events(smi_info);
}
goto restart;
}
if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
/* Ok it if fails, the timer will just go off. */
if (del_timer(&smi_info->si_timer))
smi_info->timer_running = false;
}
out:
return si_sm_result;
}
static void check_start_timer_thread(struct smi_info *smi_info)
{
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
}
static void flush_messages(void *send_info)
{
struct smi_info *smi_info = send_info;
enum si_sm_result result;
/*
* Currently, this function is called only in run-to-completion
* mode. This means we are single-threaded, no need for locks.
*/
result = smi_event_handler(smi_info, 0);
while (result != SI_SM_IDLE) {
udelay(SI_SHORT_TIMEOUT_USEC);
result = smi_event_handler(smi_info, SI_SHORT_TIMEOUT_USEC);
}
}
static void sender(void *send_info,
struct ipmi_smi_msg *msg)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
debug_timestamp("Enqueue");
if (smi_info->run_to_completion) {
/*
* If we are running to completion, start it. Upper
* layer will call flush_messages to clear it out.
*/
smi_info->waiting_msg = msg;
return;
}
spin_lock_irqsave(&smi_info->si_lock, flags);
/*
* The following two lines don't need to be under the lock for
* the lock's sake, but they do need SMP memory barriers to
* avoid getting things out of order. We are already claiming
* the lock, anyway, so just do it under the lock to avoid the
* ordering problem.
*/
BUG_ON(smi_info->waiting_msg);
smi_info->waiting_msg = msg;
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void set_run_to_completion(void *send_info, bool i_run_to_completion)
{
struct smi_info *smi_info = send_info;
smi_info->run_to_completion = i_run_to_completion;
if (i_run_to_completion)
flush_messages(smi_info);
}
/*
* Use -1 as a special constant to tell that we are spinning in kipmid
* looking for something and not delaying between checks
*/
#define IPMI_TIME_NOT_BUSY ns_to_ktime(-1ull)
static inline bool ipmi_thread_busy_wait(enum si_sm_result smi_result,
const struct smi_info *smi_info,
ktime_t *busy_until)
{
unsigned int max_busy_us = 0;
if (smi_info->si_num < num_max_busy_us)
max_busy_us = kipmid_max_busy_us[smi_info->si_num];
if (max_busy_us == 0 || smi_result != SI_SM_CALL_WITH_DELAY)
*busy_until = IPMI_TIME_NOT_BUSY;
else if (*busy_until == IPMI_TIME_NOT_BUSY) {
*busy_until = ktime_get() + max_busy_us * NSEC_PER_USEC;
} else {
if (unlikely(ktime_get() > *busy_until)) {
*busy_until = IPMI_TIME_NOT_BUSY;
return false;
}
}
return true;
}
/*
* A busy-waiting loop for speeding up IPMI operation.
*
* Lousy hardware makes this hard. This is only enabled for systems
* that are not BT and do not have interrupts. It starts spinning
* when an operation is complete or until max_busy tells it to stop
* (if that is enabled). See the paragraph on kimid_max_busy_us in
* Documentation/IPMI.txt for details.
*/
static int ipmi_thread(void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
enum si_sm_result smi_result;
ktime_t busy_until = IPMI_TIME_NOT_BUSY;
set_user_nice(current, MAX_NICE);
while (!kthread_should_stop()) {
int busy_wait;
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_result = smi_event_handler(smi_info, 0);
/*
* If the driver is doing something, there is a possible
* race with the timer. If the timer handler see idle,
* and the thread here sees something else, the timer
* handler won't restart the timer even though it is
* required. So start it here if necessary.
*/
if (smi_result != SI_SM_IDLE && !smi_info->timer_running)
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
busy_wait = ipmi_thread_busy_wait(smi_result, smi_info,
&busy_until);
if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
; /* do nothing */
} else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) {
/*
* In maintenance mode we run as fast as
* possible to allow firmware updates to
* complete as fast as possible, but normally
* don't bang on the scheduler.
*/
if (smi_info->in_maintenance_mode)
schedule();
else
usleep_range(100, 200);
} else if (smi_result == SI_SM_IDLE) {
if (atomic_read(&smi_info->need_watch)) {
schedule_timeout_interruptible(100);
} else {
/* Wait to be woken up when we are needed. */
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else {
schedule_timeout_interruptible(1);
}
}
return 0;
}
static void poll(void *send_info)
{
struct smi_info *smi_info = send_info;
unsigned long flags = 0;
bool run_to_completion = smi_info->run_to_completion;
/*
* Make sure there is some delay in the poll loop so we can
* drive time forward and timeout things.
*/
udelay(10);
if (!run_to_completion)
spin_lock_irqsave(&smi_info->si_lock, flags);
smi_event_handler(smi_info, 10);
if (!run_to_completion)
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void request_events(void *send_info)
{
struct smi_info *smi_info = send_info;
if (!smi_info->has_event_buffer)
return;
atomic_set(&smi_info->req_events, 1);
}
static void set_need_watch(void *send_info, unsigned int watch_mask)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
int enable;
enable = !!watch_mask;
atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags);
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static void smi_timeout(struct timer_list *t)
{
struct smi_info *smi_info = from_timer(smi_info, t, si_timer);
enum si_sm_result smi_result;
unsigned long flags;
unsigned long jiffies_now;
long time_diff;
long timeout;
spin_lock_irqsave(&(smi_info->si_lock), flags);
debug_timestamp("Timer");
jiffies_now = jiffies;
time_diff = (((long)jiffies_now - (long)smi_info->last_timeout_jiffies)
* SI_USEC_PER_JIFFY);
smi_result = smi_event_handler(smi_info, time_diff);
if ((smi_info->io.irq) && (!smi_info->interrupt_disabled)) {
/* Running with interrupts, only do long timeouts. */
timeout = jiffies + SI_TIMEOUT_JIFFIES;
smi_inc_stat(smi_info, long_timeouts);
goto do_mod_timer;
}
/*
* If the state machine asks for a short delay, then shorten
* the timer timeout.
*/
if (smi_result == SI_SM_CALL_WITH_DELAY) {
smi_inc_stat(smi_info, short_timeouts);
timeout = jiffies + 1;
} else {
smi_inc_stat(smi_info, long_timeouts);
timeout = jiffies + SI_TIMEOUT_JIFFIES;
}
do_mod_timer:
if (smi_result != SI_SM_IDLE)
smi_mod_timer(smi_info, timeout);
else
smi_info->timer_running = false;
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
}
irqreturn_t ipmi_si_irq_handler(int irq, void *data)
{
struct smi_info *smi_info = data;
unsigned long flags;
if (smi_info->io.si_type == SI_BT)
/* We need to clear the IRQ flag for the BT interface. */
smi_info->io.outputb(&smi_info->io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_CLEAR_IRQ_BIT
| IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
spin_lock_irqsave(&(smi_info->si_lock), flags);
smi_inc_stat(smi_info, interrupts);
debug_timestamp("Interrupt");
smi_event_handler(smi_info, 0);
spin_unlock_irqrestore(&(smi_info->si_lock), flags);
return IRQ_HANDLED;
}
static int smi_start_processing(void *send_info,
struct ipmi_smi *intf)
{
struct smi_info *new_smi = send_info;
int enable = 0;
new_smi->intf = intf;
/* Set up the timer that drives the interface. */
timer_setup(&new_smi->si_timer, smi_timeout, 0);
new_smi->timer_can_start = true;
smi_mod_timer(new_smi, jiffies + SI_TIMEOUT_JIFFIES);
/* Try to claim any interrupts. */
if (new_smi->io.irq_setup) {
new_smi->io.irq_handler_data = new_smi;
new_smi->io.irq_setup(&new_smi->io);
}
/*
* Check if the user forcefully enabled the daemon.
*/
if (new_smi->si_num < num_force_kipmid)
enable = force_kipmid[new_smi->si_num];
/*
* The BT interface is efficient enough to not need a thread,
* and there is no need for a thread if we have interrupts.
*/
else if ((new_smi->io.si_type != SI_BT) && (!new_smi->io.irq))
enable = 1;
if (enable) {
new_smi->thread = kthread_run(ipmi_thread, new_smi,
"kipmi%d", new_smi->si_num);
if (IS_ERR(new_smi->thread)) {
dev_notice(new_smi->io.dev, "Could not start"
" kernel thread due to error %ld, only using"
" timers to drive the interface\n",
PTR_ERR(new_smi->thread));
new_smi->thread = NULL;
}
}
return 0;
}
static int get_smi_info(void *send_info, struct ipmi_smi_info *data)
{
struct smi_info *smi = send_info;
data->addr_src = smi->io.addr_source;
data->dev = smi->io.dev;
data->addr_info = smi->io.addr_info;
get_device(smi->io.dev);
return 0;
}
static void set_maintenance_mode(void *send_info, bool enable)
{
struct smi_info *smi_info = send_info;
if (!enable)
atomic_set(&smi_info->req_events, 0);
smi_info->in_maintenance_mode = enable;
}
static void shutdown_smi(void *send_info);
static const struct ipmi_smi_handlers handlers = {
.owner = THIS_MODULE,
.start_processing = smi_start_processing,
.shutdown = shutdown_smi,
.get_smi_info = get_smi_info,
.sender = sender,
.request_events = request_events,
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion,
.flush_messages = flush_messages,
.poll = poll,
};
static LIST_HEAD(smi_infos);
static DEFINE_MUTEX(smi_infos_lock);
static int smi_num; /* Used to sequence the SMIs */
static const char * const addr_space_to_str[] = { "i/o", "mem" };
module_param_array(force_kipmid, int, &num_force_kipmid, 0);
MODULE_PARM_DESC(force_kipmid, "Force the kipmi daemon to be enabled (1) or"
" disabled(0). Normally the IPMI driver auto-detects"
" this, but the value may be overridden by this parm.");
module_param(unload_when_empty, bool, 0);
MODULE_PARM_DESC(unload_when_empty, "Unload the module if no interfaces are"
" specified or found, default is 1. Setting to 0"
" is useful for hot add of devices using hotmod.");
module_param_array(kipmid_max_busy_us, uint, &num_max_busy_us, 0644);
MODULE_PARM_DESC(kipmid_max_busy_us,
"Max time (in microseconds) to busy-wait for IPMI data before"
" sleeping. 0 (default) means to wait forever. Set to 100-500"
" if kipmid is using up a lot of CPU time.");
void ipmi_irq_finish_setup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Enable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG,
IPMI_BT_INTMASK_ENABLE_IRQ_BIT);
}
void ipmi_irq_start_cleanup(struct si_sm_io *io)
{
if (io->si_type == SI_BT)
/* Disable the interrupt in the BT interface. */
io->outputb(io, IPMI_BT_INTMASK_REG, 0);
}
static void std_irq_cleanup(struct si_sm_io *io)
{
ipmi_irq_start_cleanup(io);
free_irq(io->irq, io->irq_handler_data);
}
int ipmi_std_irq_setup(struct si_sm_io *io)
{
int rv;
if (!io->irq)
return 0;
rv = request_irq(io->irq,
ipmi_si_irq_handler,
IRQF_SHARED,
SI_DEVICE_NAME,
io->irq_handler_data);
if (rv) {
dev_warn(io->dev, "%s unable to claim interrupt %d,"
" running polled\n",
SI_DEVICE_NAME, io->irq);
io->irq = 0;
} else {
io->irq_cleanup = std_irq_cleanup;
ipmi_irq_finish_setup(io);
dev_info(io->dev, "Using irq %d\n", io->irq);
}
return rv;
}
static int wait_for_msg_done(struct smi_info *smi_info)
{
enum si_sm_result smi_result;
smi_result = smi_info->handlers->event(smi_info->si_sm, 0);
for (;;) {
if (smi_result == SI_SM_CALL_WITH_DELAY ||
smi_result == SI_SM_CALL_WITH_TICK_DELAY) {
schedule_timeout_uninterruptible(1);
smi_result = smi_info->handlers->event(
smi_info->si_sm, jiffies_to_usecs(1));
} else if (smi_result == SI_SM_CALL_WITHOUT_DELAY) {
smi_result = smi_info->handlers->event(
smi_info->si_sm, 0);
} else
break;
}
if (smi_result == SI_SM_HOSED)
/*
* We couldn't get the state machine to run, so whatever's at
* the port is probably not an IPMI SMI interface.
*/
return -ENODEV;
return 0;
}
static int try_get_dev_id(struct smi_info *smi_info)
{
unsigned char msg[2];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
/*
* Do a Get Device ID command, since it comes back with some
* useful info.
*/
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_DEVICE_ID_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv)
goto out;
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
/* Check and record info from the get device id, in case we need it. */
rv = ipmi_demangle_device_id(resp[0] >> 2, resp[1],
resp + 2, resp_len - 2, &smi_info->device_id);
out:
kfree(resp);
return rv;
}
static int get_global_enables(struct smi_info *smi_info, u8 *enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from get global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
dev_warn(smi_info->io.dev,
"Invalid return from get global enables command: %ld %x %x %x\n",
resp_len, resp[0], resp[1], resp[2]);
rv = -EINVAL;
goto out;
} else {
*enables = resp[3];
}
out:
kfree(resp);
return rv;
}
/*
* Returns 1 if it gets an error from the command.
*/
static int set_global_enables(struct smi_info *smi_info, u8 enables)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = enables;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
dev_warn(smi_info->io.dev,
"Error getting response from set global enables command: %d\n",
rv);
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
dev_warn(smi_info->io.dev,
"Invalid return from set global enables command: %ld %x %x\n",
resp_len, resp[0], resp[1]);
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
rv = 1;
out:
kfree(resp);
return rv;
}
/*
* Some BMCs do not support clearing the receive irq bit in the global
* enables (even if they don't support interrupts on the BMC). Check
* for this and handle it properly.
*/
static void check_clr_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
if ((enables & IPMI_BMC_RCV_MSG_INTR) == 0)
/* Already clear, should work ok. */
return;
enables &= ~IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check clearing the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* clearing the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support clearing the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
}
}
/*
* Some BMCs do not support setting the interrupt bits in the global
* enables even if they support interrupts. Clearly bad, but we can
* compensate.
*/
static void check_set_rcv_irq(struct smi_info *smi_info)
{
u8 enables = 0;
int rv;
if (!smi_info->io.irq)
return;
rv = get_global_enables(smi_info, &enables);
if (!rv) {
enables |= IPMI_BMC_RCV_MSG_INTR;
rv = set_global_enables(smi_info, enables);
}
if (rv < 0) {
dev_err(smi_info->io.dev,
"Cannot check setting the rcv irq: %d\n", rv);
return;
}
if (rv) {
/*
* An error when setting the event buffer bit means
* setting the bit is not supported.
*/
dev_warn(smi_info->io.dev,
"The BMC does not support setting the recv irq bit, compensating, but the BMC needs to be fixed.\n");
smi_info->cannot_disable_irq = true;
smi_info->irq_enable_broken = true;
}
}
static int try_enable_event_buffer(struct smi_info *smi_info)
{
unsigned char msg[3];
unsigned char *resp;
unsigned long resp_len;
int rv = 0;
resp = kmalloc(IPMI_MAX_MSG_LENGTH, GFP_KERNEL);
if (!resp)
return -ENOMEM;
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from get global enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 4 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_GET_BMC_GLOBAL_ENABLES_CMD ||
resp[2] != 0) {
pr_warn("Invalid return from get global enables command, cannot enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[3] & IPMI_BMC_EVT_MSG_BUFF) {
/* buffer is already enabled, nothing to do. */
smi_info->supports_event_msg_buff = true;
goto out;
}
msg[0] = IPMI_NETFN_APP_REQUEST << 2;
msg[1] = IPMI_SET_BMC_GLOBAL_ENABLES_CMD;
msg[2] = resp[3] | IPMI_BMC_EVT_MSG_BUFF;
smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
rv = wait_for_msg_done(smi_info);
if (rv) {
pr_warn("Error getting response from set global, enables command, the event buffer is not enabled\n");
goto out;
}
resp_len = smi_info->handlers->get_result(smi_info->si_sm,
resp, IPMI_MAX_MSG_LENGTH);
if (resp_len < 3 ||
resp[0] != (IPMI_NETFN_APP_REQUEST | 1) << 2 ||
resp[1] != IPMI_SET_BMC_GLOBAL_ENABLES_CMD) {
pr_warn("Invalid return from get global, enables command, not enable the event buffer\n");
rv = -EINVAL;
goto out;
}
if (resp[2] != 0)
/*
* An error when setting the event buffer bit means
* that the event buffer is not supported.
*/
rv = -ENOENT;
else
smi_info->supports_event_msg_buff = true;
out:
kfree(resp);
return rv;
}
#define IPMI_SI_ATTR(name) \
static ssize_t name##_show(struct device *dev, \
struct device_attribute *attr, \
char *buf) \
{ \
struct smi_info *smi_info = dev_get_drvdata(dev); \
\
return snprintf(buf, 10, "%u\n", smi_get_stat(smi_info, name)); \
} \
static DEVICE_ATTR(name, 0444, name##_show, NULL)
static ssize_t type_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 10, "%s\n", si_to_str[smi_info->io.si_type]);
}
static DEVICE_ATTR(type, 0444, type_show, NULL);
static ssize_t interrupts_enabled_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
int enabled = smi_info->io.irq && !smi_info->interrupt_disabled;
return snprintf(buf, 10, "%d\n", enabled);
}
static DEVICE_ATTR(interrupts_enabled, 0444,
interrupts_enabled_show, NULL);
IPMI_SI_ATTR(short_timeouts);
IPMI_SI_ATTR(long_timeouts);
IPMI_SI_ATTR(idles);
IPMI_SI_ATTR(interrupts);
IPMI_SI_ATTR(attentions);
IPMI_SI_ATTR(flag_fetches);
IPMI_SI_ATTR(hosed_count);
IPMI_SI_ATTR(complete_transactions);
IPMI_SI_ATTR(events);
IPMI_SI_ATTR(watchdog_pretimeouts);
IPMI_SI_ATTR(incoming_messages);
static ssize_t params_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct smi_info *smi_info = dev_get_drvdata(dev);
return snprintf(buf, 200,
"%s,%s,0x%lx,rsp=%d,rsi=%d,rsh=%d,irq=%d,ipmb=%d\n",
si_to_str[smi_info->io.si_type],
addr_space_to_str[smi_info->io.addr_space],
smi_info->io.addr_data,
smi_info->io.regspacing,
smi_info->io.regsize,
smi_info->io.regshift,
smi_info->io.irq,
smi_info->io.slave_addr);
}
static DEVICE_ATTR(params, 0444, params_show, NULL);
static struct attribute *ipmi_si_dev_attrs[] = {
&dev_attr_type.attr,
&dev_attr_interrupts_enabled.attr,
&dev_attr_short_timeouts.attr,
&dev_attr_long_timeouts.attr,
&dev_attr_idles.attr,
&dev_attr_interrupts.attr,
&dev_attr_attentions.attr,
&dev_attr_flag_fetches.attr,
&dev_attr_hosed_count.attr,
&dev_attr_complete_transactions.attr,
&dev_attr_events.attr,
&dev_attr_watchdog_pretimeouts.attr,
&dev_attr_incoming_messages.attr,
&dev_attr_params.attr,
NULL
};
static const struct attribute_group ipmi_si_dev_attr_group = {
.attrs = ipmi_si_dev_attrs,
};
/*
* oem_data_avail_to_receive_msg_avail
* @info - smi_info structure with msg_flags set
*
* Converts flags from OEM_DATA_AVAIL to RECEIVE_MSG_AVAIL
* Returns 1 indicating need to re-run handle_flags().
*/
static int oem_data_avail_to_receive_msg_avail(struct smi_info *smi_info)
{
smi_info->msg_flags = ((smi_info->msg_flags & ~OEM_DATA_AVAIL) |
RECEIVE_MSG_AVAIL);
return 1;
}
/*
* setup_dell_poweredge_oem_data_handler
* @info - smi_info.device_id must be populated
*
* Systems that match, but have firmware version < 1.40 may assert
* OEM0_DATA_AVAIL on their own, without being told via Set Flags that
* it's safe to do so. Such systems will de-assert OEM1_DATA_AVAIL
* upon receipt of IPMI_GET_MSG_CMD, so we should treat these flags
* as RECEIVE_MSG_AVAIL instead.
*
* As Dell has no plans to release IPMI 1.5 firmware that *ever*
* assert the OEM[012] bits, and if it did, the driver would have to
* change to handle that properly, we don't actually check for the
* firmware version.
* Device ID = 0x20 BMC on PowerEdge 8G servers
* Device Revision = 0x80
* Firmware Revision1 = 0x01 BMC version 1.40
* Firmware Revision2 = 0x40 BCD encoded
* IPMI Version = 0x51 IPMI 1.5
* Manufacturer ID = A2 02 00 Dell IANA
*
* Additionally, PowerEdge systems with IPMI < 1.5 may also assert
* OEM0_DATA_AVAIL and needs to be treated as RECEIVE_MSG_AVAIL.
*
*/
#define DELL_POWEREDGE_8G_BMC_DEVICE_ID 0x20
#define DELL_POWEREDGE_8G_BMC_DEVICE_REV 0x80
#define DELL_POWEREDGE_8G_BMC_IPMI_VERSION 0x51
#define DELL_IANA_MFR_ID 0x0002a2
static void setup_dell_poweredge_oem_data_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID) {
if (id->device_id == DELL_POWEREDGE_8G_BMC_DEVICE_ID &&
id->device_revision == DELL_POWEREDGE_8G_BMC_DEVICE_REV &&
id->ipmi_version == DELL_POWEREDGE_8G_BMC_IPMI_VERSION) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
} else if (ipmi_version_major(id) < 1 ||
(ipmi_version_major(id) == 1 &&
ipmi_version_minor(id) < 5)) {
smi_info->oem_data_avail_handler =
oem_data_avail_to_receive_msg_avail;
}
}
}
#define CANNOT_RETURN_REQUESTED_LENGTH 0xCA
static void return_hosed_msg_badsize(struct smi_info *smi_info)
{
struct ipmi_smi_msg *msg = smi_info->curr_msg;
/* Make it a response */
msg->rsp[0] = msg->data[0] | 4;
msg->rsp[1] = msg->data[1];
msg->rsp[2] = CANNOT_RETURN_REQUESTED_LENGTH;
msg->rsp_size = 3;
smi_info->curr_msg = NULL;
deliver_recv_msg(smi_info, msg);
}
/*
* dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be populated
*
* Dell PowerEdge servers with the BT interface (x6xx and 1750) will
* not respond to a Get SDR command if the length of the data
* requested is exactly 0x3A, which leads to command timeouts and no
* data returned. This intercepts such commands, and causes userspace
* callers to try again with a different-sized buffer, which succeeds.
*/
#define STORAGE_NETFN 0x0A
#define STORAGE_CMD_GET_SDR 0x23
static int dell_poweredge_bt_xaction_handler(struct notifier_block *self,
unsigned long unused,
void *in)
{
struct smi_info *smi_info = in;
unsigned char *data = smi_info->curr_msg->data;
unsigned int size = smi_info->curr_msg->data_size;
if (size >= 8 &&
(data[0]>>2) == STORAGE_NETFN &&
data[1] == STORAGE_CMD_GET_SDR &&
data[7] == 0x3A) {
return_hosed_msg_badsize(smi_info);
return NOTIFY_STOP;
}
return NOTIFY_DONE;
}
static struct notifier_block dell_poweredge_bt_xaction_notifier = {
.notifier_call = dell_poweredge_bt_xaction_handler,
};
/*
* setup_dell_poweredge_bt_xaction_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.start_transaction_pre_hook
* when we know what function to use there.
*/
static void
setup_dell_poweredge_bt_xaction_handler(struct smi_info *smi_info)
{
struct ipmi_device_id *id = &smi_info->device_id;
if (id->manufacturer_id == DELL_IANA_MFR_ID &&
smi_info->io.si_type == SI_BT)
register_xaction_notifier(&dell_poweredge_bt_xaction_notifier);
}
/*
* setup_oem_data_handler
* @info - smi_info.device_id must be filled in already
*
* Fills in smi_info.device_id.oem_data_available_handler
* when we know what function to use there.
*/
static void setup_oem_data_handler(struct smi_info *smi_info)
{
setup_dell_poweredge_oem_data_handler(smi_info);
}
static void setup_xaction_handlers(struct smi_info *smi_info)
{
setup_dell_poweredge_bt_xaction_handler(smi_info);
}
static void check_for_broken_irqs(struct smi_info *smi_info)
{
check_clr_rcv_irq(smi_info);
check_set_rcv_irq(smi_info);
}
static inline void stop_timer_and_thread(struct smi_info *smi_info)
{
if (smi_info->thread != NULL) {
kthread_stop(smi_info->thread);
smi_info->thread = NULL;
}
smi_info->timer_can_start = false;
del_timer_sync(&smi_info->si_timer);
}
static struct smi_info *find_dup_si(struct smi_info *info)
{
struct smi_info *e;
list_for_each_entry(e, &smi_infos, link) {
if (e->io.addr_space != info->io.addr_space)
continue;
if (e->io.addr_data == info->io.addr_data) {
/*
* This is a cheap hack, ACPI doesn't have a defined
* slave address but SMBIOS does. Pick it up from
* any source that has it available.
*/
if (info->io.slave_addr && !e->io.slave_addr)
e->io.slave_addr = info->io.slave_addr;
return e;
}
}
return NULL;
}
int ipmi_si_add_smi(struct si_sm_io *io)
{
int rv = 0;
struct smi_info *new_smi, *dup;
/*
* If the user gave us a hard-coded device at the same
* address, they presumably want us to use it and not what is
* in the firmware.
*/
if (io->addr_source != SI_HARDCODED && io->addr_source != SI_HOTMOD &&
ipmi_si_hardcode_match(io->addr_space, io->addr_data)) {
dev_info(io->dev,
"Hard-coded device at this address already exists");
return -ENODEV;
}
if (!io->io_setup) {
if (io->addr_space == IPMI_IO_ADDR_SPACE) {
io->io_setup = ipmi_si_port_setup;
} else if (io->addr_space == IPMI_MEM_ADDR_SPACE) {
io->io_setup = ipmi_si_mem_setup;
} else {
return -EINVAL;
}
}
new_smi = kzalloc(sizeof(*new_smi), GFP_KERNEL);
if (!new_smi)
return -ENOMEM;
spin_lock_init(&new_smi->si_lock);
new_smi->io = *io;
mutex_lock(&smi_infos_lock);
dup = find_dup_si(new_smi);
if (dup) {
if (new_smi->io.addr_source == SI_ACPI &&
dup->io.addr_source == SI_SMBIOS) {
/* We prefer ACPI over SMBIOS. */
dev_info(dup->io.dev,
"Removing SMBIOS-specified %s state machine in favor of ACPI\n",
si_to_str[new_smi->io.si_type]);
cleanup_one_si(dup);
} else {
dev_info(new_smi->io.dev,
"%s-specified %s state machine: duplicate\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
rv = -EBUSY;
kfree(new_smi);
goto out_err;
}
}
pr_info("Adding %s-specified %s state machine\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type]);
list_add_tail(&new_smi->link, &smi_infos);
if (initialized)
rv = try_smi_init(new_smi);
out_err:
mutex_unlock(&smi_infos_lock);
return rv;
}
/*
* Try to start up an interface. Must be called with smi_infos_lock
* held, primarily to keep smi_num consistent, we only one to do these
* one at a time.
*/
static int try_smi_init(struct smi_info *new_smi)
{
int rv = 0;
int i;
pr_info("Trying %s-specified %s state machine at %s address 0x%lx, slave address 0x%x, irq %d\n",
ipmi_addr_src_to_str(new_smi->io.addr_source),
si_to_str[new_smi->io.si_type],
addr_space_to_str[new_smi->io.addr_space],
new_smi->io.addr_data,
new_smi->io.slave_addr, new_smi->io.irq);
switch (new_smi->io.si_type) {
case SI_KCS:
new_smi->handlers = &kcs_smi_handlers;
break;
case SI_SMIC:
new_smi->handlers = &smic_smi_handlers;
break;
case SI_BT:
new_smi->handlers = &bt_smi_handlers;
break;
default:
/* No support for anything else yet. */
rv = -EIO;
goto out_err;
}
new_smi->si_num = smi_num;
/* Do this early so it's available for logs. */
if (!new_smi->io.dev) {
pr_err("IPMI interface added with no device\n");
rv = EIO;
goto out_err;
}
/* Allocate the state machine's data and initialize it. */
new_smi->si_sm = kmalloc(new_smi->handlers->size(), GFP_KERNEL);
if (!new_smi->si_sm) {
rv = -ENOMEM;
goto out_err;
}
new_smi->io.io_size = new_smi->handlers->init_data(new_smi->si_sm,
&new_smi->io);
/* Now that we know the I/O size, we can set up the I/O. */
rv = new_smi->io.io_setup(&new_smi->io);
if (rv) {
dev_err(new_smi->io.dev, "Could not set up I/O space\n");
goto out_err;
}
/* Do low-level detection first. */
if (new_smi->handlers->detect(new_smi->si_sm)) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"Interface detection failed\n");
rv = -ENODEV;
goto out_err;
}
/*
* Attempt a get device id command. If it fails, we probably
* don't have a BMC here.
*/
rv = try_get_dev_id(new_smi);
if (rv) {
if (new_smi->io.addr_source)
dev_err(new_smi->io.dev,
"There appears to be no BMC at this location\n");
goto out_err;
}
setup_oem_data_handler(new_smi);
setup_xaction_handlers(new_smi);
check_for_broken_irqs(new_smi);
new_smi->waiting_msg = NULL;
new_smi->curr_msg = NULL;
atomic_set(&new_smi->req_events, 0);
new_smi->run_to_completion = false;
for (i = 0; i < SI_NUM_STATS; i++)
atomic_set(&new_smi->stats[i], 0);
new_smi->interrupt_disabled = true;
atomic_set(&new_smi->need_watch, 0);
rv = try_enable_event_buffer(new_smi);
if (rv == 0)
new_smi->has_event_buffer = true;
/*
* Start clearing the flags before we enable interrupts or the
* timer to avoid racing with the timer.
*/
start_clear_flags(new_smi);
/*
* IRQ is defined to be set when non-zero. req_events will
* cause a global flags check that will enable interrupts.
*/
if (new_smi->io.irq) {
new_smi->interrupt_disabled = false;
atomic_set(&new_smi->req_events, 1);
}
dev_set_drvdata(new_smi->io.dev, new_smi);
rv = device_add_group(new_smi->io.dev, &ipmi_si_dev_attr_group);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to add device attributes: error %d\n",
rv);
goto out_err;
}
new_smi->dev_group_added = true;
rv = ipmi_register_smi(&handlers,
new_smi,
new_smi->io.dev,
new_smi->io.slave_addr);
if (rv) {
dev_err(new_smi->io.dev,
"Unable to register device: error %d\n",
rv);
goto out_err;
}
/* Don't increment till we know we have succeeded. */
smi_num++;
dev_info(new_smi->io.dev, "IPMI %s interface initialized\n",
si_to_str[new_smi->io.si_type]);
WARN_ON(new_smi->io.dev->init_name != NULL);
out_err:
if (rv && new_smi->io.io_cleanup) {
new_smi->io.io_cleanup(&new_smi->io);
new_smi->io.io_cleanup = NULL;
}
return rv;
}
static int __init init_ipmi_si(void)
{
struct smi_info *e;
enum ipmi_addr_src type = SI_INVALID;
if (initialized)
return 0;
ipmi_hardcode_init();
pr_info("IPMI System Interface driver\n");
ipmi_si_platform_init();
ipmi_si_pci_init();
ipmi_si_parisc_init();
/* We prefer devices with interrupts, but in the case of a machine
with multiple BMCs we assume that there will be several instances
of a given type so if we succeed in registering a type then also
try to register everything else of the same type */
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
/* Try to register a device if it has an IRQ and we either
haven't successfully registered a device yet or this
device has the same type as one we successfully registered */
if (e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
/* type will only have been set if we successfully registered an si */
if (type)
goto skip_fallback_noirq;
/* Fall back to the preferred device */
list_for_each_entry(e, &smi_infos, link) {
if (!e->io.irq && (!type || e->io.addr_source == type)) {
if (!try_smi_init(e)) {
type = e->io.addr_source;
}
}
}
skip_fallback_noirq:
initialized = true;
mutex_unlock(&smi_infos_lock);
if (type)
return 0;
mutex_lock(&smi_infos_lock);
if (unload_when_empty && list_empty(&smi_infos)) {
mutex_unlock(&smi_infos_lock);
cleanup_ipmi_si();
pr_warn("Unable to find any System Interface(s)\n");
return -ENODEV;
} else {
mutex_unlock(&smi_infos_lock);
return 0;
}
}
module_init(init_ipmi_si);
static void shutdown_smi(void *send_info)
{
struct smi_info *smi_info = send_info;
if (smi_info->dev_group_added) {
device_remove_group(smi_info->io.dev, &ipmi_si_dev_attr_group);
smi_info->dev_group_added = false;
}
if (smi_info->io.dev)
dev_set_drvdata(smi_info->io.dev, NULL);
/*
* Make sure that interrupts, the timer and the thread are
* stopped and will not run again.
*/
smi_info->interrupt_disabled = true;
if (smi_info->io.irq_cleanup) {
smi_info->io.irq_cleanup(&smi_info->io);
smi_info->io.irq_cleanup = NULL;
}
stop_timer_and_thread(smi_info);
/*
* Wait until we know that we are out of any interrupt
* handlers might have been running before we freed the
* interrupt.
*/
synchronize_rcu();
/*
* Timeouts are stopped, now make sure the interrupts are off
* in the BMC. Note that timers and CPU interrupts are off,
* so no need for locks.
*/
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
disable_si_irq(smi_info);
while (smi_info->curr_msg || (smi_info->si_state != SI_NORMAL)) {
poll(smi_info);
schedule_timeout_uninterruptible(1);
}
if (smi_info->handlers)
smi_info->handlers->cleanup(smi_info->si_sm);
if (smi_info->io.addr_source_cleanup) {
smi_info->io.addr_source_cleanup(&smi_info->io);
smi_info->io.addr_source_cleanup = NULL;
}
if (smi_info->io.io_cleanup) {
smi_info->io.io_cleanup(&smi_info->io);
smi_info->io.io_cleanup = NULL;
}
kfree(smi_info->si_sm);
smi_info->si_sm = NULL;
smi_info->intf = NULL;
}
/*
* Must be called with smi_infos_lock held, to serialize the
* smi_info->intf check.
*/
static void cleanup_one_si(struct smi_info *smi_info)
{
if (!smi_info)
return;
list_del(&smi_info->link);
if (smi_info->intf)
ipmi_unregister_smi(smi_info->intf);
kfree(smi_info);
}
int ipmi_si_remove_by_dev(struct device *dev)
{
struct smi_info *e;
int rv = -ENOENT;
mutex_lock(&smi_infos_lock);
list_for_each_entry(e, &smi_infos, link) {
if (e->io.dev == dev) {
cleanup_one_si(e);
rv = 0;
break;
}
}
mutex_unlock(&smi_infos_lock);
return rv;
}
struct device *ipmi_si_remove_by_data(int addr_space, enum si_type si_type,
unsigned long addr)
{
/* remove */
struct smi_info *e, *tmp_e;
struct device *dev = NULL;
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link) {
if (e->io.addr_space != addr_space)
continue;
if (e->io.si_type != si_type)
continue;
if (e->io.addr_data == addr) {
dev = get_device(e->io.dev);
cleanup_one_si(e);
}
}
mutex_unlock(&smi_infos_lock);
return dev;
}
static void cleanup_ipmi_si(void)
{
struct smi_info *e, *tmp_e;
if (!initialized)
return;
ipmi_si_pci_shutdown();
ipmi_si_parisc_shutdown();
ipmi_si_platform_shutdown();
mutex_lock(&smi_infos_lock);
list_for_each_entry_safe(e, tmp_e, &smi_infos, link)
cleanup_one_si(e);
mutex_unlock(&smi_infos_lock);
ipmi_si_hardcode_exit();
ipmi_si_hotmod_exit();
}
module_exit(cleanup_ipmi_si);
MODULE_ALIAS("platform:dmi-ipmi-si");
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Corey Minyard <minyard@mvista.com>");
MODULE_DESCRIPTION("Interface to the IPMI driver for the KCS, SMIC, and BT"
" system interfaces.");
| gpl-2.0 |
boa19861105/android_442_KitKat_kernel_htc_dlxpul | arch/arm/mach-msm/lpass-8960.c | 42 | 7858 | /* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/kernel.h>
#include <linux/interrupt.h>
#include <linux/reboot.h>
#include <linux/workqueue.h>
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/module.h>
#include <linux/err.h>
#include <mach/irqs.h>
#include <mach/scm.h>
#include <mach/peripheral-loader.h>
#include <mach/subsystem_restart.h>
#include <mach/subsystem_notif.h>
#include "smd_private.h"
#include "ramdump.h"
#include "sysmon.h"
#define SCM_Q6_NMI_CMD 0x1
#define MODULE_NAME "lpass_8960"
#define MAX_BUF_SIZE 0x51
/* Subsystem restart: QDSP6 data, functions */
static void lpass_fatal_fn(struct work_struct *);
static DECLARE_WORK(lpass_fatal_work, lpass_fatal_fn);
struct lpass_ssr {
void *lpass_ramdump_dev;
} lpass_ssr;
static struct lpass_ssr lpass_ssr_8960;
static int q6_crash_shutdown;
#if defined (CONFIG_MSM_LPASS_SSR_ENABLE)
static int enable_lpass_ssr = 1;
#else
static int enable_lpass_ssr = 0;
#endif
static int riva_notifier_cb(struct notifier_block *this, unsigned long code,
void *ss_handle)
{
int ret;
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
pr_debug("%s: R-Notify: Shutdown started\n", __func__);
ret = sysmon_send_event(SYSMON_SS_LPASS, "wcnss",
SUBSYS_BEFORE_SHUTDOWN);
if (ret < 0)
pr_err("%s: sysmon_send_event error %d", __func__,
ret);
break;
}
return NOTIFY_DONE;
}
static void *ssr_notif_hdle;
static struct notifier_block rnb = {
.notifier_call = riva_notifier_cb,
};
static int modem_notifier_cb(struct notifier_block *this, unsigned long code,
void *ss_handle)
{
int ret;
switch (code) {
case SUBSYS_BEFORE_SHUTDOWN:
pr_debug("%s: M-Notify: Shutdown started\n", __func__);
ret = sysmon_send_event(SYSMON_SS_LPASS, "modem",
SUBSYS_BEFORE_SHUTDOWN);
if (ret < 0)
pr_err("%s: sysmon_send_event error %d", __func__,
ret);
break;
}
return NOTIFY_DONE;
}
static void *ssr_modem_notif_hdle;
static struct notifier_block mnb = {
.notifier_call = modem_notifier_cb,
};
static void lpass_log_failure_reason(void)
{
char *reason;
char buffer[MAX_BUF_SIZE];
unsigned size;
reason = smem_get_entry(SMEM_SSR_REASON_LPASS0, &size);
if (!reason) {
pr_err("%s: subsystem failure reason: (unknown, smem_get_entry failed).",
MODULE_NAME);
return;
}
if (reason[0] == '\0') {
pr_err("%s: subsystem failure reason: (unknown, init value found)",
MODULE_NAME);
return;
}
size = size < MAX_BUF_SIZE ? size : (MAX_BUF_SIZE-1);
memcpy(buffer, reason, size);
buffer[size] = '\0';
pr_err("%s: subsystem failure reason: %s", MODULE_NAME, buffer);
memset((void *)reason, 0x0, size);
wmb();
}
static void lpass_fatal_fn(struct work_struct *work)
{
pr_err("%s %s: Watchdog bite received from Q6!\n", MODULE_NAME,
__func__);
lpass_log_failure_reason();
ssr_set_restart_reason(
"lpass fatal: Watchdog bite received from Q6!");
panic(MODULE_NAME ": Resetting the SoC");
}
static void lpass_smsm_state_cb(void *data, uint32_t old_state,
uint32_t new_state)
{
/* Ignore if we're the one that set SMSM_RESET */
if (q6_crash_shutdown)
return;
if (new_state & SMSM_RESET) {
pr_err("%s: LPASS SMSM state changed to SMSM_RESET,"
" new_state = 0x%x, old_state = 0x%x\n", __func__,
new_state, old_state);
ssr_set_restart_reason(
"lpass fatal: SMSM state changed to SMSM_RESET!");
lpass_log_failure_reason();
panic(MODULE_NAME ": Resetting the SoC");
}
}
static void send_q6_nmi(void)
{
/* Send NMI to QDSP6 via an SCM call. */
uint32_t cmd = 0x1;
scm_call(SCM_SVC_UTIL, SCM_Q6_NMI_CMD,
&cmd, sizeof(cmd), NULL, 0);
/* Q6 requires worstcase 100ms to dump caches etc.*/
mdelay(100);
pr_debug("%s: Q6 NMI was sent.\n", __func__);
}
static int lpass_shutdown(const struct subsys_data *subsys)
{
send_q6_nmi();
pil_force_shutdown("q6");
disable_irq_nosync(LPASS_Q6SS_WDOG_EXPIRED);
return 0;
}
static int lpass_powerup(const struct subsys_data *subsys)
{
int ret = pil_force_boot("q6");
enable_irq(LPASS_Q6SS_WDOG_EXPIRED);
return ret;
}
/* RAM segments - address and size for 8960 */
static struct ramdump_segment q6_segments[] = { {0x8da00000, 0x8f200000 -
0x8da00000}, {0x28400000, 0x20000} };
static int lpass_ramdump(int enable, const struct subsys_data *subsys)
{
pr_debug("%s: enable[%d]\n", __func__, enable);
if (enable)
return do_ramdump(lpass_ssr_8960.lpass_ramdump_dev,
q6_segments,
ARRAY_SIZE(q6_segments));
else
return 0;
}
static void lpass_crash_shutdown(const struct subsys_data *subsys)
{
q6_crash_shutdown = 1;
send_q6_nmi();
}
static irqreturn_t lpass_wdog_bite_irq(int irq, void *dev_id)
{
int ret;
pr_debug("%s: rxed irq[0x%x]", __func__, irq);
disable_irq_nosync(LPASS_Q6SS_WDOG_EXPIRED);
ret = schedule_work(&lpass_fatal_work);
return IRQ_HANDLED;
}
static struct subsys_data lpass_8960 = {
.name = "lpass",
.shutdown = lpass_shutdown,
.powerup = lpass_powerup,
.ramdump = lpass_ramdump,
.crash_shutdown = lpass_crash_shutdown,
.enable_ssr = 0
};
static int enable_lpass_ssr_set(const char *val, struct kernel_param *kp)
{
int ret;
ret = param_set_int(val, kp);
if (ret)
return ret;
if (enable_lpass_ssr)
pr_info(MODULE_NAME ": Subsystem restart activated for Lpass.\n");
lpass_8960.enable_ssr = enable_lpass_ssr;
return 0;
}
module_param_call(enable_lpass_ssr, enable_lpass_ssr_set, param_get_int,
&enable_lpass_ssr, S_IRUGO | S_IWUSR);
static int __init lpass_restart_init(void)
{
lpass_8960.enable_ssr = enable_lpass_ssr;
return ssr_register_subsystem(&lpass_8960);
}
static int __init lpass_fatal_init(void)
{
int ret;
ret = smsm_state_cb_register(SMSM_Q6_STATE, SMSM_RESET,
lpass_smsm_state_cb, 0);
if (ret < 0)
pr_err("%s: Unable to register SMSM callback! (%d)\n",
__func__, ret);
ret = request_irq(LPASS_Q6SS_WDOG_EXPIRED, lpass_wdog_bite_irq,
IRQF_TRIGGER_RISING, "q6_wdog", NULL);
if (ret < 0) {
pr_err("%s: Unable to request LPASS_Q6SS_WDOG_EXPIRED irq.",
__func__);
goto out;
}
ret = lpass_restart_init();
if (ret < 0) {
pr_err("%s: Unable to reg with lpass ssr. (%d)\n",
__func__, ret);
goto out;
}
lpass_ssr_8960.lpass_ramdump_dev = create_ramdump_device("lpass");
if (!lpass_ssr_8960.lpass_ramdump_dev) {
pr_err("%s: Unable to create ramdump device.\n",
__func__);
ret = -ENOMEM;
goto out;
}
ssr_notif_hdle = subsys_notif_register_notifier("riva",
&rnb);
if (IS_ERR(ssr_notif_hdle) < 0) {
ret = PTR_ERR(ssr_notif_hdle);
pr_err("%s: subsys_register_notifier for Riva: err = %d\n",
__func__, ret);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
goto out;
}
ssr_modem_notif_hdle = subsys_notif_register_notifier("modem",
&mnb);
if (IS_ERR(ssr_modem_notif_hdle) < 0) {
ret = PTR_ERR(ssr_modem_notif_hdle);
pr_err("%s: subsys_register_notifier for Modem: err = %d\n",
__func__, ret);
subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
goto out;
}
pr_info("%s: lpass SSR driver init'ed.\n", __func__);
out:
return ret;
}
static void __exit lpass_fatal_exit(void)
{
subsys_notif_unregister_notifier(ssr_notif_hdle, &rnb);
subsys_notif_unregister_notifier(ssr_modem_notif_hdle, &mnb);
free_irq(LPASS_Q6SS_WDOG_EXPIRED, NULL);
}
module_init(lpass_fatal_init);
module_exit(lpass_fatal_exit);
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
vwmofo/vigor_mofokernel | drivers/gpu-legacy/ion/ion_cp_heap.c | 42 | 27118 | /*
* drivers/gpu/ion/ion_cp_heap.c
*
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/spinlock.h>
#include <linux/err.h>
#include <linux/genalloc.h>
#include <linux/io.h>
#include <linux/ion.h>
#include <linux/mm.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/memory_alloc.h>
#include <linux/seq_file.h>
#include <linux/fmem.h>
#include <linux/iommu.h>
#include <mach/msm_memtypes.h>
#include <mach/scm.h>
#include <mach/iommu_domains.h>
#include "ion_priv.h"
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
/**
* struct ion_cp_heap - container for the heap and shared heap data
* @heap: the heap information structure
* @pool: memory pool to allocate from.
* @base: the base address of the memory pool.
* @permission_type: Identifier for the memory used by SCM for protecting
* and unprotecting memory.
* @secure_base: Base address used when securing a heap that is shared.
* @secure_size: Size used when securing a heap that is shared.
* @lock: mutex to protect shared access.
* @heap_protected: Indicates whether heap has been protected or not.
* @allocated_bytes: the total number of allocated bytes from the pool.
* @total_size: the total size of the memory pool.
* @request_region: function pointer to call when first mapping of memory
* occurs.
* @release_region: function pointer to call when last mapping of memory
* unmapped.
* @bus_id: token used with request/release region.
* @kmap_cached_count: the total number of times this heap has been mapped in
* kernel space (cached).
* @kmap_uncached_count:the total number of times this heap has been mapped in
* kernel space (un-cached).
* @umap_count: the total number of times this heap has been mapped in
* user space.
* @iommu_iova: saved iova when mapping full heap at once.
* @iommu_partition: partition used to map full heap.
* @reusable: indicates if the memory should be reused via fmem.
* @reserved_vrange: reserved virtual address range for use with fmem
* @iommu_map_all: Indicates whether we should map whole heap into IOMMU.
* @iommu_2x_map_domain: Indicates the domain to use for overmapping.
* @has_outer_cache: set to 1 if outer cache is used, 0 otherwise.
*/
struct ion_cp_heap {
struct ion_heap heap;
struct gen_pool *pool;
ion_phys_addr_t base;
unsigned int permission_type;
ion_phys_addr_t secure_base;
size_t secure_size;
struct mutex lock;
unsigned int heap_protected;
unsigned long allocated_bytes;
unsigned long total_size;
int (*request_region)(void *);
int (*release_region)(void *);
void *bus_id;
unsigned long kmap_cached_count;
unsigned long kmap_uncached_count;
unsigned long umap_count;
unsigned long iommu_iova[MAX_DOMAINS];
unsigned long iommu_partition[MAX_DOMAINS];
int reusable;
void *reserved_vrange;
int iommu_map_all;
int iommu_2x_map_domain;
unsigned int has_outer_cache;
atomic_t protect_cnt;
};
enum {
HEAP_NOT_PROTECTED = 0,
HEAP_PROTECTED = 1,
};
static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
unsigned int permission_type);
static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
unsigned int permission_type);
/**
* Get the total number of kernel mappings.
* Must be called with heap->lock locked.
*/
static unsigned long ion_cp_get_total_kmap_count(
const struct ion_cp_heap *cp_heap)
{
return cp_heap->kmap_cached_count + cp_heap->kmap_uncached_count;
}
/**
* Protects memory if heap is unsecured heap. Also ensures that we are in
* the correct FMEM state if this heap is a reusable heap.
* Must be called with heap->lock locked.
*/
static int ion_cp_protect(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
int ret_value = 0;
if (atomic_inc_return(&cp_heap->protect_cnt) == 1) {
/* Make sure we are in C state when the heap is protected. */
if (cp_heap->reusable && !cp_heap->allocated_bytes) {
ret_value = fmem_set_state(FMEM_C_STATE);
if (ret_value)
goto out;
}
ret_value = ion_cp_protect_mem(cp_heap->secure_base,
cp_heap->secure_size, cp_heap->permission_type);
if (ret_value) {
pr_err("Failed to protect memory for heap %s - "
"error code: %d\n", heap->name, ret_value);
if (cp_heap->reusable && !cp_heap->allocated_bytes) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
}
atomic_dec(&cp_heap->protect_cnt);
} else {
cp_heap->heap_protected = HEAP_PROTECTED;
pr_debug("Protected heap %s @ 0x%lx\n",
heap->name, cp_heap->base);
}
}
out:
pr_debug("%s: protect count is %d\n", __func__,
atomic_read(&cp_heap->protect_cnt));
BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
return ret_value;
}
/**
* Unprotects memory if heap is secure heap. Also ensures that we are in
* the correct FMEM state if this heap is a reusable heap.
* Must be called with heap->lock locked.
*/
static void ion_cp_unprotect(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (atomic_dec_and_test(&cp_heap->protect_cnt)) {
int error_code = ion_cp_unprotect_mem(
cp_heap->secure_base, cp_heap->secure_size,
cp_heap->permission_type);
if (error_code) {
pr_err("Failed to un-protect memory for heap %s - "
"error code: %d\n", heap->name, error_code);
} else {
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
pr_debug("Un-protected heap %s @ 0x%x\n", heap->name,
(unsigned int) cp_heap->base);
if (cp_heap->reusable && !cp_heap->allocated_bytes) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state",
__func__);
}
}
}
pr_debug("%s: protect count is %d\n", __func__,
atomic_read(&cp_heap->protect_cnt));
BUG_ON(atomic_read(&cp_heap->protect_cnt) < 0);
}
ion_phys_addr_t ion_cp_allocate(struct ion_heap *heap,
unsigned long size,
unsigned long align,
unsigned long flags)
{
unsigned long offset;
unsigned long secure_allocation = flags & ION_SECURE;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (!secure_allocation && cp_heap->heap_protected == HEAP_PROTECTED) {
mutex_unlock(&cp_heap->lock);
pr_err("ION cannot allocate un-secure memory from protected"
" heap %s\n", heap->name);
return ION_CP_ALLOCATE_FAIL;
}
if (secure_allocation &&
(cp_heap->umap_count > 0 || cp_heap->kmap_cached_count > 0)) {
mutex_unlock(&cp_heap->lock);
pr_err("ION cannot allocate secure memory from heap with "
"outstanding mappings: User space: %lu, kernel space "
"(cached): %lu\n", cp_heap->umap_count,
cp_heap->kmap_cached_count);
return ION_CP_ALLOCATE_FAIL;
}
/*
* if this is the first reusable allocation, transition
* the heap
*/
if (cp_heap->reusable && !cp_heap->allocated_bytes) {
if (fmem_set_state(FMEM_C_STATE) != 0) {
mutex_unlock(&cp_heap->lock);
return ION_RESERVED_ALLOCATE_FAIL;
}
}
cp_heap->allocated_bytes += size;
mutex_unlock(&cp_heap->lock);
offset = gen_pool_alloc_aligned(cp_heap->pool,
size, ilog2(align));
if (!offset) {
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
if ((cp_heap->total_size -
cp_heap->allocated_bytes) >= size)
pr_debug("%s: heap %s has enough memory (%lx) but"
" the allocation of size %lx still failed."
" Memory is probably fragmented.\n",
__func__, heap->name,
cp_heap->total_size -
cp_heap->allocated_bytes, size);
if (cp_heap->reusable && !cp_heap->allocated_bytes &&
cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
}
mutex_unlock(&cp_heap->lock);
return ION_CP_ALLOCATE_FAIL;
}
return offset;
}
static void iommu_unmap_all(unsigned long domain_num,
struct ion_cp_heap *cp_heap)
{
unsigned long left_to_unmap = cp_heap->total_size;
unsigned long order = get_order(SZ_64K);
unsigned long page_size = SZ_64K;
struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
if (domain) {
unsigned long temp_iova = cp_heap->iommu_iova[domain_num];
while (left_to_unmap) {
iommu_unmap(domain, temp_iova, order);
temp_iova += page_size;
left_to_unmap -= page_size;
}
if (domain_num == cp_heap->iommu_2x_map_domain)
msm_iommu_unmap_extra(domain, temp_iova,
cp_heap->total_size, SZ_64K);
} else {
pr_err("Unable to get IOMMU domain %lu\n", domain_num);
}
}
void ion_cp_free(struct ion_heap *heap, ion_phys_addr_t addr,
unsigned long size)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (addr == ION_CP_ALLOCATE_FAIL)
return;
gen_pool_free(cp_heap->pool, addr, size);
mutex_lock(&cp_heap->lock);
cp_heap->allocated_bytes -= size;
if (cp_heap->reusable && !cp_heap->allocated_bytes &&
cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (fmem_set_state(FMEM_T_STATE) != 0)
pr_err("%s: unable to transition heap to T-state\n",
__func__);
}
/* Unmap everything if we previously mapped the whole heap at once. */
if (!cp_heap->allocated_bytes) {
unsigned int i;
for (i = 0; i < MAX_DOMAINS; ++i) {
if (cp_heap->iommu_iova[i]) {
unsigned long vaddr_len = cp_heap->total_size;
if (i == cp_heap->iommu_2x_map_domain)
vaddr_len <<= 1;
iommu_unmap_all(i, cp_heap);
msm_free_iova_address(cp_heap->iommu_iova[i], i,
cp_heap->iommu_partition[i],
vaddr_len);
}
cp_heap->iommu_iova[i] = 0;
cp_heap->iommu_partition[i] = 0;
}
}
mutex_unlock(&cp_heap->lock);
}
static int ion_cp_heap_phys(struct ion_heap *heap,
struct ion_buffer *buffer,
ion_phys_addr_t *addr, size_t *len)
{
*addr = buffer->priv_phys;
*len = buffer->size;
return 0;
}
static int ion_cp_heap_allocate(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long size, unsigned long align,
unsigned long flags)
{
buffer->priv_phys = ion_cp_allocate(heap, size, align, flags);
return buffer->priv_phys == ION_CP_ALLOCATE_FAIL ? -ENOMEM : 0;
}
static void ion_cp_heap_free(struct ion_buffer *buffer)
{
struct ion_heap *heap = buffer->heap;
ion_cp_free(heap, buffer->priv_phys, buffer->size);
buffer->priv_phys = ION_CP_ALLOCATE_FAIL;
}
struct scatterlist *ion_cp_heap_create_sglist(struct ion_buffer *buffer)
{
struct scatterlist *sglist;
sglist = vmalloc(sizeof(*sglist));
if (!sglist)
return ERR_PTR(-ENOMEM);
sg_init_table(sglist, 1);
sglist->length = buffer->size;
sglist->offset = 0;
sglist->dma_address = buffer->priv_phys;
return sglist;
}
struct scatterlist *ion_cp_heap_map_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
return ion_cp_heap_create_sglist(buffer);
}
void ion_cp_heap_unmap_dma(struct ion_heap *heap,
struct ion_buffer *buffer)
{
if (buffer->sglist)
vfree(buffer->sglist);
}
/**
* Call request region for SMI memory of this is the first mapping.
*/
static int ion_cp_request_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
if (cp_heap->request_region)
ret_value = cp_heap->request_region(cp_heap->bus_id);
return ret_value;
}
/**
* Call release region for SMI memory of this is the last un-mapping.
*/
static int ion_cp_release_region(struct ion_cp_heap *cp_heap)
{
int ret_value = 0;
if ((cp_heap->umap_count + ion_cp_get_total_kmap_count(cp_heap)) == 0)
if (cp_heap->release_region)
ret_value = cp_heap->release_region(cp_heap->bus_id);
return ret_value;
}
void *ion_map_fmem_buffer(struct ion_buffer *buffer, unsigned long phys_base,
void *virt_base, unsigned long flags)
{
int ret;
unsigned int offset = buffer->priv_phys - phys_base;
unsigned long start = ((unsigned long)virt_base) + offset;
const struct mem_type *type = ION_IS_CACHED(flags) ?
get_mem_type(MT_DEVICE_CACHED) :
get_mem_type(MT_DEVICE);
if (phys_base > buffer->priv_phys)
return NULL;
ret = ioremap_page_range(start, start + buffer->size,
buffer->priv_phys, __pgprot(type->prot_pte));
if (!ret)
return (void *)start;
else
return NULL;
}
void *ion_cp_heap_map_kernel(struct ion_heap *heap,
struct ion_buffer *buffer,
unsigned long flags)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
void *ret_value = NULL;
mutex_lock(&cp_heap->lock);
if ((cp_heap->heap_protected == HEAP_NOT_PROTECTED) ||
((cp_heap->heap_protected == HEAP_PROTECTED) &&
!ION_IS_CACHED(flags))) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return NULL;
}
if (cp_heap->reusable) {
ret_value = ion_map_fmem_buffer(buffer, cp_heap->base,
cp_heap->reserved_vrange, flags);
} else {
if (ION_IS_CACHED(flags))
ret_value = ioremap_cached(buffer->priv_phys,
buffer->size);
else
ret_value = ioremap(buffer->priv_phys,
buffer->size);
}
if (!ret_value) {
ion_cp_release_region(cp_heap);
} else {
if (ION_IS_CACHED(buffer->flags))
++cp_heap->kmap_cached_count;
else
++cp_heap->kmap_uncached_count;
}
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
void ion_cp_heap_unmap_kernel(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
if (cp_heap->reusable)
unmap_kernel_range((unsigned long)buffer->vaddr, buffer->size);
else
__arch_iounmap(buffer->vaddr);
buffer->vaddr = NULL;
mutex_lock(&cp_heap->lock);
if (ION_IS_CACHED(buffer->flags))
--cp_heap->kmap_cached_count;
else
--cp_heap->kmap_uncached_count;
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
return;
}
int ion_cp_heap_map_user(struct ion_heap *heap, struct ion_buffer *buffer,
struct vm_area_struct *vma, unsigned long flags)
{
int ret_value = -EAGAIN;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->heap_protected == HEAP_NOT_PROTECTED) {
if (ion_cp_request_region(cp_heap)) {
mutex_unlock(&cp_heap->lock);
return -EINVAL;
}
if (!ION_IS_CACHED(flags))
vma->vm_page_prot = pgprot_writecombine(
vma->vm_page_prot);
ret_value = remap_pfn_range(vma, vma->vm_start,
__phys_to_pfn(buffer->priv_phys) + vma->vm_pgoff,
vma->vm_end - vma->vm_start,
vma->vm_page_prot);
if (ret_value)
ion_cp_release_region(cp_heap);
else
++cp_heap->umap_count;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
void ion_cp_heap_unmap_user(struct ion_heap *heap,
struct ion_buffer *buffer)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
--cp_heap->umap_count;
ion_cp_release_region(cp_heap);
mutex_unlock(&cp_heap->lock);
}
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer,
void *vaddr, unsigned int offset, unsigned int length,
unsigned int cmd)
{
void (*outer_cache_op)(phys_addr_t, phys_addr_t);
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
switch (cmd) {
case ION_IOC_CLEAN_CACHES:
dmac_clean_range(vaddr, vaddr + length);
outer_cache_op = outer_clean_range;
break;
case ION_IOC_INV_CACHES:
dmac_inv_range(vaddr, vaddr + length);
outer_cache_op = outer_inv_range;
break;
case ION_IOC_CLEAN_INV_CACHES:
dmac_flush_range(vaddr, vaddr + length);
outer_cache_op = outer_flush_range;
break;
default:
return -EINVAL;
}
if (cp_heap->has_outer_cache) {
unsigned long pstart = buffer->priv_phys + offset;
outer_cache_op(pstart, pstart + length);
}
return 0;
}
static int ion_cp_print_debug(struct ion_heap *heap, struct seq_file *s)
{
unsigned long total_alloc;
unsigned long total_size;
unsigned long umap_count;
unsigned long kmap_count;
unsigned long heap_protected;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
total_alloc = cp_heap->allocated_bytes;
total_size = cp_heap->total_size;
umap_count = cp_heap->umap_count;
kmap_count = ion_cp_get_total_kmap_count(cp_heap);
heap_protected = cp_heap->heap_protected == HEAP_PROTECTED;
mutex_unlock(&cp_heap->lock);
seq_printf(s, "total bytes currently allocated: %lx\n", total_alloc);
seq_printf(s, "total heap size: %lx\n", total_size);
seq_printf(s, "umapping count: %lx\n", umap_count);
seq_printf(s, "kmapping count: %lx\n", kmap_count);
seq_printf(s, "heap protected: %s\n", heap_protected ? "Yes" : "No");
seq_printf(s, "reusable: %s\n", cp_heap->reusable ? "Yes" : "No");
return 0;
}
int ion_cp_secure_heap(struct ion_heap *heap)
{
int ret_value;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
if (cp_heap->umap_count == 0 && cp_heap->kmap_cached_count == 0) {
ret_value = ion_cp_protect(heap);
} else {
pr_err("ION cannot secure heap with outstanding mappings: "
"User space: %lu, kernel space (cached): %lu\n",
cp_heap->umap_count, cp_heap->kmap_cached_count);
ret_value = -EINVAL;
}
mutex_unlock(&cp_heap->lock);
return ret_value;
}
int ion_cp_unsecure_heap(struct ion_heap *heap)
{
int ret_value = 0;
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
mutex_lock(&cp_heap->lock);
ion_cp_unprotect(heap);
mutex_unlock(&cp_heap->lock);
return ret_value;
}
static int iommu_map_all(unsigned long domain_num, struct ion_cp_heap *cp_heap,
int partition, unsigned long prot)
{
unsigned long left_to_map = cp_heap->total_size;
unsigned long order = get_order(SZ_64K);
unsigned long page_size = SZ_64K;
int ret_value = 0;
unsigned long virt_addr_len = cp_heap->total_size;
struct iommu_domain *domain = msm_get_iommu_domain(domain_num);
/* If we are mapping into the video domain we need to map twice the
* size of the heap to account for prefetch issue in video core.
*/
if (domain_num == cp_heap->iommu_2x_map_domain)
virt_addr_len <<= 1;
if (cp_heap->total_size & (SZ_64K-1)) {
pr_err("Heap size is not aligned to 64K, cannot map into IOMMU\n");
ret_value = -EINVAL;
}
if (cp_heap->base & (SZ_64K-1)) {
pr_err("Heap physical address is not aligned to 64K, cannot map into IOMMU\n");
ret_value = -EINVAL;
}
if (!ret_value && domain) {
unsigned long temp_phys = cp_heap->base;
unsigned long temp_iova =
msm_allocate_iova_address(domain_num, partition,
virt_addr_len, SZ_64K);
if (!temp_iova) {
pr_err("%s: could not allocate iova from domain %lu, partition %d\n",
__func__, domain_num, partition);
ret_value = -ENOMEM;
goto out;
}
cp_heap->iommu_iova[domain_num] = temp_iova;
while (left_to_map) {
int ret = iommu_map(domain, temp_iova, temp_phys,
order, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p, error: %d\n",
__func__, temp_iova, domain, ret);
ret_value = -EAGAIN;
goto free_iova;
}
temp_iova += page_size;
temp_phys += page_size;
left_to_map -= page_size;
}
if (domain_num == cp_heap->iommu_2x_map_domain)
ret_value = msm_iommu_map_extra(domain, temp_iova,
cp_heap->total_size,
SZ_64K, prot);
if (ret_value)
goto free_iova;
} else {
pr_err("Unable to get IOMMU domain %lu\n", domain_num);
ret_value = -ENOMEM;
}
goto out;
free_iova:
msm_free_iova_address(cp_heap->iommu_iova[domain_num], domain_num,
partition, virt_addr_len);
out:
return ret_value;
}
static int ion_cp_heap_map_iommu(struct ion_buffer *buffer,
struct ion_iommu_map *data,
unsigned int domain_num,
unsigned int partition_num,
unsigned long align,
unsigned long iova_length,
unsigned long flags)
{
struct iommu_domain *domain;
int ret = 0;
unsigned long extra;
struct scatterlist *sglist = 0;
struct ion_cp_heap *cp_heap =
container_of(buffer->heap, struct ion_cp_heap, heap);
int prot = IOMMU_WRITE | IOMMU_READ;
prot |= ION_IS_CACHED(flags) ? IOMMU_CACHE : 0;
data->mapped_size = iova_length;
if (!msm_use_iommu()) {
data->iova_addr = buffer->priv_phys;
return 0;
}
if (cp_heap->iommu_iova[domain_num]) {
/* Already mapped. */
unsigned long offset = buffer->priv_phys - cp_heap->base;
data->iova_addr = cp_heap->iommu_iova[domain_num] + offset;
return 0;
} else if (cp_heap->iommu_map_all) {
ret = iommu_map_all(domain_num, cp_heap, partition_num, prot);
if (!ret) {
unsigned long offset =
buffer->priv_phys - cp_heap->base;
data->iova_addr =
cp_heap->iommu_iova[domain_num] + offset;
cp_heap->iommu_partition[domain_num] = partition_num;
/*
clear delayed map flag so that we don't interfere
with this feature (we are already delaying).
*/
data->flags &= ~ION_IOMMU_UNMAP_DELAYED;
return 0;
} else {
cp_heap->iommu_iova[domain_num] = 0;
cp_heap->iommu_partition[domain_num] = 0;
return ret;
}
}
extra = iova_length - buffer->size;
data->iova_addr = msm_allocate_iova_address(domain_num, partition_num,
data->mapped_size, align);
if (!data->iova_addr) {
ret = -ENOMEM;
goto out;
}
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
ret = -ENOMEM;
goto out1;
}
sglist = ion_cp_heap_create_sglist(buffer);
if (IS_ERR_OR_NULL(sglist)) {
ret = -ENOMEM;
goto out1;
}
ret = iommu_map_range(domain, data->iova_addr, sglist,
buffer->size, prot);
if (ret) {
pr_err("%s: could not map %lx in domain %p\n",
__func__, data->iova_addr, domain);
goto out1;
}
if (extra) {
unsigned long extra_iova_addr = data->iova_addr + buffer->size;
ret = msm_iommu_map_extra(domain, extra_iova_addr, extra,
SZ_4K, prot);
if (ret)
goto out2;
}
vfree(sglist);
return ret;
out2:
iommu_unmap_range(domain, data->iova_addr, buffer->size);
out1:
if (!IS_ERR_OR_NULL(sglist))
vfree(sglist);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
out:
return ret;
}
static void ion_cp_heap_unmap_iommu(struct ion_iommu_map *data)
{
unsigned int domain_num;
unsigned int partition_num;
struct iommu_domain *domain;
struct ion_cp_heap *cp_heap =
container_of(data->buffer->heap, struct ion_cp_heap, heap);
if (!msm_use_iommu())
return;
domain_num = iommu_map_domain(data);
/* If we are mapping everything we'll wait to unmap until everything
is freed. */
if (cp_heap->iommu_iova[domain_num])
return;
partition_num = iommu_map_partition(data);
domain = msm_get_iommu_domain(domain_num);
if (!domain) {
WARN(1, "Could not get domain %d. Corruption?\n", domain_num);
return;
}
iommu_unmap_range(domain, data->iova_addr, data->mapped_size);
msm_free_iova_address(data->iova_addr, domain_num, partition_num,
data->mapped_size);
return;
}
static struct ion_heap_ops cp_heap_ops = {
.allocate = ion_cp_heap_allocate,
.free = ion_cp_heap_free,
.phys = ion_cp_heap_phys,
.map_user = ion_cp_heap_map_user,
.unmap_user = ion_cp_heap_unmap_user,
.map_kernel = ion_cp_heap_map_kernel,
.unmap_kernel = ion_cp_heap_unmap_kernel,
.map_dma = ion_cp_heap_map_dma,
.unmap_dma = ion_cp_heap_unmap_dma,
.cache_op = ion_cp_cache_ops,
.print_debug = ion_cp_print_debug,
.secure_heap = ion_cp_secure_heap,
.unsecure_heap = ion_cp_unsecure_heap,
.map_iommu = ion_cp_heap_map_iommu,
.unmap_iommu = ion_cp_heap_unmap_iommu,
};
struct ion_heap *ion_cp_heap_create(struct ion_platform_heap *heap_data)
{
struct ion_cp_heap *cp_heap;
int ret;
cp_heap = kzalloc(sizeof(*cp_heap), GFP_KERNEL);
if (!cp_heap)
return ERR_PTR(-ENOMEM);
mutex_init(&cp_heap->lock);
cp_heap->pool = gen_pool_create(12, -1);
if (!cp_heap->pool)
goto free_heap;
cp_heap->base = heap_data->base;
ret = gen_pool_add(cp_heap->pool, cp_heap->base, heap_data->size, -1);
if (ret < 0)
goto destroy_pool;
cp_heap->allocated_bytes = 0;
cp_heap->umap_count = 0;
cp_heap->kmap_cached_count = 0;
cp_heap->kmap_uncached_count = 0;
cp_heap->total_size = heap_data->size;
cp_heap->heap.ops = &cp_heap_ops;
cp_heap->heap.type = ION_HEAP_TYPE_CP;
cp_heap->heap_protected = HEAP_NOT_PROTECTED;
cp_heap->secure_base = cp_heap->base;
cp_heap->secure_size = heap_data->size;
cp_heap->has_outer_cache = heap_data->has_outer_cache;
atomic_set(&cp_heap->protect_cnt, 0);
if (heap_data->extra_data) {
struct ion_cp_heap_pdata *extra_data =
heap_data->extra_data;
cp_heap->reusable = extra_data->reusable;
cp_heap->reserved_vrange = extra_data->virt_addr;
cp_heap->permission_type = extra_data->permission_type;
if (extra_data->secure_size) {
cp_heap->secure_base = extra_data->secure_base;
cp_heap->secure_size = extra_data->secure_size;
}
if (extra_data->setup_region)
cp_heap->bus_id = extra_data->setup_region();
if (extra_data->request_region)
cp_heap->request_region = extra_data->request_region;
if (extra_data->release_region)
cp_heap->release_region = extra_data->release_region;
cp_heap->iommu_map_all =
extra_data->iommu_map_all;
cp_heap->iommu_2x_map_domain =
extra_data->iommu_2x_map_domain;
}
return &cp_heap->heap;
destroy_pool:
gen_pool_destroy(cp_heap->pool);
free_heap:
kfree(cp_heap);
return ERR_PTR(-ENOMEM);
}
void ion_cp_heap_destroy(struct ion_heap *heap)
{
struct ion_cp_heap *cp_heap =
container_of(heap, struct ion_cp_heap, heap);
gen_pool_destroy(cp_heap->pool);
kfree(cp_heap);
cp_heap = NULL;
}
/* SCM related code for locking down memory for content protection */
#define SCM_CP_LOCK_CMD_ID 0x1
#define SCM_CP_PROTECT 0x1
#define SCM_CP_UNPROTECT 0x0
struct cp_lock_msg {
unsigned int start;
unsigned int end;
unsigned int permission_type;
unsigned char lock;
} __attribute__ ((__packed__));
static int ion_cp_protect_mem(unsigned int phy_base, unsigned int size,
unsigned int permission_type)
{
struct cp_lock_msg cmd;
cmd.start = phy_base;
cmd.end = phy_base + size;
cmd.permission_type = permission_type;
cmd.lock = SCM_CP_PROTECT;
return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
&cmd, sizeof(cmd), NULL, 0);
}
static int ion_cp_unprotect_mem(unsigned int phy_base, unsigned int size,
unsigned int permission_type)
{
struct cp_lock_msg cmd;
cmd.start = phy_base;
cmd.end = phy_base + size;
cmd.permission_type = permission_type;
cmd.lock = SCM_CP_UNPROTECT;
return scm_call(SCM_SVC_CP, SCM_CP_LOCK_CMD_ID,
&cmd, sizeof(cmd), NULL, 0);
}
| gpl-2.0 |
nychitman1/android_kernel_moto_shamu | drivers/mmc/core/slot-gpio.c | 42 | 7253 | /*
* Generic GPIO card-detect helper
*
* Copyright (C) 2011, Guennadi Liakhovetski <g.liakhovetski@gmx.de>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/err.h>
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/jiffies.h>
#include <linux/mmc/host.h>
#include <linux/mmc/slot-gpio.h>
#include <linux/module.h>
#include <linux/slab.h>
struct mmc_gpio {
int ro_gpio;
int cd_gpio;
char *ro_label;
char cd_label[0];
bool status;
};
static int mmc_gpio_get_status(struct mmc_host *host)
{
int ret = -ENOSYS;
struct mmc_gpio *ctx = host->slot.handler_priv;
if (!ctx || !gpio_is_valid(ctx->cd_gpio))
goto out;
ret = !gpio_get_value_cansleep(ctx->cd_gpio) ^
!!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
out:
return ret;
}
static irqreturn_t mmc_gpio_cd_irqt(int irq, void *dev_id)
{
/* Schedule a card detection after a debounce timeout */
struct mmc_host *host = dev_id;
struct mmc_gpio *ctx = host->slot.handler_priv;
int status;
/*
* In case host->ops are not yet initialized return immediately.
* The card will get detected later when host driver calls
* mmc_add_host() after host->ops are initialized.
*/
if (!host->ops)
goto out;
if (host->ops->card_event)
host->ops->card_event(host);
status = mmc_gpio_get_status(host);
if (unlikely(status < 0))
goto out;
if (status ^ ctx->status) {
pr_info("%s: slot status change detected (%d -> %d), GPIO_ACTIVE_%s\n",
mmc_hostname(host), ctx->status, status,
(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH) ?
"HIGH" : "LOW");
ctx->status = status;
/* Schedule a card detection after a debounce timeout */
mmc_detect_change(host, msecs_to_jiffies(200));
}
out:
return IRQ_HANDLED;
}
static int mmc_gpio_alloc(struct mmc_host *host)
{
size_t len = strlen(dev_name(host->parent)) + 4;
struct mmc_gpio *ctx;
mutex_lock(&host->slot.lock);
ctx = host->slot.handler_priv;
if (!ctx) {
/*
* devm_kzalloc() can be called after device_initialize(), even
* before device_add(), i.e., between mmc_alloc_host() and
* mmc_add_host()
*/
ctx = devm_kzalloc(&host->class_dev, sizeof(*ctx) + 2 * len,
GFP_KERNEL);
if (ctx) {
ctx->ro_label = ctx->cd_label + len;
snprintf(ctx->cd_label, len, "%s cd", dev_name(host->parent));
snprintf(ctx->ro_label, len, "%s ro", dev_name(host->parent));
ctx->cd_gpio = -EINVAL;
ctx->ro_gpio = -EINVAL;
host->slot.handler_priv = ctx;
}
}
mutex_unlock(&host->slot.lock);
return ctx ? 0 : -ENOMEM;
}
int mmc_gpio_get_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
if (!ctx || !gpio_is_valid(ctx->ro_gpio))
return -ENOSYS;
return !gpio_get_value_cansleep(ctx->ro_gpio) ^
!!(host->caps2 & MMC_CAP2_RO_ACTIVE_HIGH);
}
EXPORT_SYMBOL(mmc_gpio_get_ro);
int mmc_gpio_get_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
if (!ctx || !gpio_is_valid(ctx->cd_gpio))
return -ENOSYS;
return !gpio_get_value_cansleep(ctx->cd_gpio) ^
!!(host->caps2 & MMC_CAP2_CD_ACTIVE_HIGH);
}
EXPORT_SYMBOL(mmc_gpio_get_cd);
/**
* mmc_gpio_request_ro - request a gpio for write-protection
* @host: mmc host
* @gpio: gpio number requested
*
* As devm_* managed functions are used in mmc_gpio_request_ro(), client
* drivers do not need to explicitly call mmc_gpio_free_ro() for freeing up,
* if the requesting and freeing are only needed at probing and unbinding time
* for once. However, if client drivers do something special like runtime
* switching for write-protection, they are responsible for calling
* mmc_gpio_request_ro() and mmc_gpio_free_ro() as a pair on their own.
*
* Returns zero on success, else an error.
*/
int mmc_gpio_request_ro(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
int ret;
if (!gpio_is_valid(gpio))
return -EINVAL;
ret = mmc_gpio_alloc(host);
if (ret < 0)
return ret;
ctx = host->slot.handler_priv;
ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
ctx->ro_label);
if (ret < 0)
return ret;
ctx->ro_gpio = gpio;
return 0;
}
EXPORT_SYMBOL(mmc_gpio_request_ro);
/**
* mmc_gpio_request_cd - request a gpio for card-detection
* @host: mmc host
* @gpio: gpio number requested
*
* As devm_* managed functions are used in mmc_gpio_request_cd(), client
* drivers do not need to explicitly call mmc_gpio_free_cd() for freeing up,
* if the requesting and freeing are only needed at probing and unbinding time
* for once. However, if client drivers do something special like runtime
* switching for card-detection, they are responsible for calling
* mmc_gpio_request_cd() and mmc_gpio_free_cd() as a pair on their own.
*
* Returns zero on success, else an error.
*/
int mmc_gpio_request_cd(struct mmc_host *host, unsigned int gpio)
{
struct mmc_gpio *ctx;
int irq = gpio_to_irq(gpio);
int ret;
ret = mmc_gpio_alloc(host);
if (ret < 0)
return ret;
ctx = host->slot.handler_priv;
ret = devm_gpio_request_one(&host->class_dev, gpio, GPIOF_DIR_IN,
ctx->cd_label);
if (ret < 0)
/*
* don't bother freeing memory. It might still get used by other
* slot functions, in any case it will be freed, when the device
* is destroyed.
*/
return ret;
/*
* Even if gpio_to_irq() returns a valid IRQ number, the platform might
* still prefer to poll, e.g., because that IRQ number is already used
* by another unit and cannot be shared.
*/
if (irq >= 0 && host->caps & MMC_CAP_NEEDS_POLL)
irq = -EINVAL;
ctx->cd_gpio = gpio;
host->slot.cd_irq = irq;
ret = mmc_gpio_get_status(host);
if (ret < 0)
return ret;
ctx->status = ret;
if (irq >= 0) {
ret = devm_request_threaded_irq(&host->class_dev, irq,
NULL, mmc_gpio_cd_irqt,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
ctx->cd_label, host);
if (ret < 0)
irq = ret;
}
if (irq < 0)
host->caps |= MMC_CAP_NEEDS_POLL;
return 0;
}
EXPORT_SYMBOL(mmc_gpio_request_cd);
/**
* mmc_gpio_free_ro - free the write-protection gpio
* @host: mmc host
*
* It's provided only for cases that client drivers need to manually free
* up the write-protection gpio requested by mmc_gpio_request_ro().
*/
void mmc_gpio_free_ro(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
int gpio;
if (!ctx || !gpio_is_valid(ctx->ro_gpio))
return;
gpio = ctx->ro_gpio;
ctx->ro_gpio = -EINVAL;
devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_ro);
/**
* mmc_gpio_free_cd - free the card-detection gpio
* @host: mmc host
*
* It's provided only for cases that client drivers need to manually free
* up the card-detection gpio requested by mmc_gpio_request_cd().
*/
void mmc_gpio_free_cd(struct mmc_host *host)
{
struct mmc_gpio *ctx = host->slot.handler_priv;
int gpio;
if (!ctx || !gpio_is_valid(ctx->cd_gpio))
return;
if (host->slot.cd_irq >= 0) {
devm_free_irq(&host->class_dev, host->slot.cd_irq, host);
host->slot.cd_irq = -EINVAL;
}
gpio = ctx->cd_gpio;
ctx->cd_gpio = -EINVAL;
devm_gpio_free(&host->class_dev, gpio);
}
EXPORT_SYMBOL(mmc_gpio_free_cd);
| gpl-2.0 |
X-ROM/android_kernel_motorola_msm8226 | drivers/gpu/msm/adreno.c | 42 | 92093 | /* Copyright (c) 2002,2007-2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/module.h>
#include <linux/uaccess.h>
#include <linux/vmalloc.h>
#include <linux/ioctl.h>
#include <linux/sched.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/delay.h>
#include <linux/of_coresight.h>
#include <linux/input.h>
#include <mach/socinfo.h>
#include <mach/msm_bus_board.h>
#include <mach/msm_bus.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "kgsl_iommu.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_trace.h"
#include "a2xx_reg.h"
#include "a3xx_reg.h"
#define DRIVER_VERSION_MAJOR 3
#define DRIVER_VERSION_MINOR 1
/* Number of times to try hard reset */
#define NUM_TIMES_RESET_RETRY 5
/* Adreno MH arbiter config*/
#define ADRENO_CFG_MHARB \
(0x10 \
| (0 << MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT) \
| (1 << MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT) \
| (0 << MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT) \
| (1 << MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT) \
| (0 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT) \
| (0x8 << MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT) \
| (1 << MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT) \
| (1 << MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT))
#define ADRENO_MMU_CONFIG \
(0x01 \
| (MMU_CONFIG << MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
#define KGSL_LOG_LEVEL_DEFAULT 3
static void adreno_start_work(struct work_struct *work);
static void adreno_input_work(struct work_struct *work);
/*
* The default values for the simpleondemand governor are 90 and 5,
* we use different values here.
* They have to be tuned and compare with the tz governor anyway.
*/
static struct devfreq_simple_ondemand_data adreno_ondemand_data = {
.upthreshold = 80,
.downdifferential = 20,
};
static struct devfreq_msm_adreno_tz_data adreno_tz_data = {
.bus = {
.max = 450,
},
.device_id = KGSL_DEVICE_3D0,
};
static const struct devfreq_governor_data adreno_governors[] = {
{ .name = "simple_ondemand", .data = &adreno_ondemand_data },
{ .name = "msm-adreno-tz", .data = &adreno_tz_data },
};
static const struct kgsl_functable adreno_functable;
static struct adreno_device device_3d0 = {
.dev = {
KGSL_DEVICE_COMMON_INIT(device_3d0.dev),
.pwrscale = KGSL_PWRSCALE_INIT(adreno_governors,
ARRAY_SIZE(adreno_governors)),
.name = DEVICE_3D0_NAME,
.id = KGSL_DEVICE_3D0,
.mh = {
.mharb = ADRENO_CFG_MHARB,
/* Remove 1k boundary check in z470 to avoid a GPU
* hang. Notice that this solution won't work if
* both EBI and SMI are used
*/
.mh_intf_cfg1 = 0x00032f07,
/* turn off memory protection unit by setting
acceptable physical address range to include
all pages. */
.mpu_base = 0x00000000,
.mpu_range = 0xFFFFF000,
},
.mmu = {
.config = ADRENO_MMU_CONFIG,
},
.pwrctrl = {
.irq_name = KGSL_3D0_IRQ,
},
.iomemname = KGSL_3D0_REG_MEMORY,
.shadermemname = KGSL_3D0_SHADER_MEMORY,
.ftbl = &adreno_functable,
.cmd_log = KGSL_LOG_LEVEL_DEFAULT,
.ctxt_log = KGSL_LOG_LEVEL_DEFAULT,
.drv_log = KGSL_LOG_LEVEL_DEFAULT,
.mem_log = KGSL_LOG_LEVEL_DEFAULT,
.pwr_log = KGSL_LOG_LEVEL_DEFAULT,
.pm_dump_enable = 0,
},
.gmem_base = 0,
.gmem_size = SZ_256K,
.pfp_fw = NULL,
.pm4_fw = NULL,
.wait_timeout = 0, /* in milliseconds, 0 means disabled */
.ib_check_level = 0,
.ft_policy = KGSL_FT_DEFAULT_POLICY,
.ft_pf_policy = KGSL_FT_PAGEFAULT_DEFAULT_POLICY,
.fast_hang_detect = 1,
.long_ib_detect = 1,
.start_work = __WORK_INITIALIZER(device_3d0.start_work,
adreno_start_work),
.input_work = __WORK_INITIALIZER(device_3d0.input_work,
adreno_input_work),
};
unsigned int ft_detect_regs[FT_DETECT_REGS_COUNT];
static struct workqueue_struct *adreno_wq;
/*
* This is the master list of all GPU cores that are supported by this
* driver.
*/
#define ANY_ID (~0)
#define NO_VER (~0)
static const struct {
enum adreno_gpurev gpurev;
unsigned int core, major, minor, patchid;
const char *pm4fw;
const char *pfpfw;
struct adreno_gpudev *gpudev;
unsigned int istore_size;
unsigned int pix_shader_start;
/* Size of an instruction in dwords */
unsigned int instruction_size;
/* size of gmem for gpu*/
unsigned int gmem_size;
/* version of pm4 microcode that supports sync_lock
between CPU and GPU for IOMMU-v0 programming */
unsigned int sync_lock_pm4_ver;
/* version of pfp microcode that supports sync_lock
between CPU and GPU for IOMMU-v0 programming */
unsigned int sync_lock_pfp_ver;
/* PM4 jump table index */
unsigned int pm4_jt_idx;
/* PM4 jump table load addr */
unsigned int pm4_jt_addr;
/* PFP jump table index */
unsigned int pfp_jt_idx;
/* PFP jump table load addr */
unsigned int pfp_jt_addr;
/* PM4 bootstrap loader size */
unsigned int pm4_bstrp_size;
/* PFP bootstrap loader size */
unsigned int pfp_bstrp_size;
/* PFP bootstrap loader supported version */
unsigned int pfp_bstrp_ver;
} adreno_gpulist[] = {
{ ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
"yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
512, 384, 3, SZ_256K, NO_VER, NO_VER },
{ ADRENO_REV_A203, 0, 1, 1, ANY_ID,
"yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
512, 384, 3, SZ_256K, NO_VER, NO_VER },
{ ADRENO_REV_A205, 0, 1, 0, ANY_ID,
"yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev,
512, 384, 3, SZ_256K, NO_VER, NO_VER },
{ ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
"leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev,
512, 384, 3, SZ_512K, NO_VER, NO_VER },
/*
* patchlevel 5 (8960v2) needs special pm4 firmware to work around
* a hardware problem.
*/
{ ADRENO_REV_A225, 2, 2, 0, 5,
"a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
1536, 768, 3, SZ_512K, NO_VER, NO_VER },
{ ADRENO_REV_A225, 2, 2, 0, 6,
"a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
{ ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
"a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev,
1536, 768, 3, SZ_512K, 0x225011, 0x225002 },
/* A3XX doesn't use the pix_shader_start */
{ ADRENO_REV_A305, 3, 0, 5, 0,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_256K, 0x3FF037, 0x3FF016 },
/* A3XX doesn't use the pix_shader_start */
{ ADRENO_REV_A320, 3, 2, ANY_ID, ANY_ID,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_512K, 0x3FF037, 0x3FF016 },
{ ADRENO_REV_A330, 3, 3, 0, ANY_ID,
"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_1M, NO_VER, NO_VER, 0x8AD, 0x2E4, 0x201, 0x200,
0x6, 0x20, 0x330020 },
{ ADRENO_REV_A305B, 3, 0, 5, 0x10,
"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
0x201, 0x200 },
/* 8226v2 */
{ ADRENO_REV_A305B, 3, 0, 5, 0x12,
"a330_pm4.fw", "a330_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_128K, NO_VER, NO_VER, 0x8AD, 0x2E4,
0x201, 0x200 },
{ ADRENO_REV_A305C, 3, 0, 5, 0x20,
"a300_pm4.fw", "a300_pfp.fw", &adreno_a3xx_gpudev,
512, 0, 2, SZ_128K, 0x3FF037, 0x3FF016 },
};
/* Nice level for the higher priority GPU start thread */
static unsigned int _wake_nice = -7;
/* Number of milliseconds to stay active active after a wake on touch */
static unsigned int _wake_timeout = 100;
/*
* A workqueue callback responsible for actually turning on the GPU after a
* touch event. kgsl_pwrctrl_wake() is used without any active_count protection
* to avoid the need to maintain state. Either somebody will start using the
* GPU or the idle timer will fire and put the GPU back into slumber
*/
static void adreno_input_work(struct work_struct *work)
{
struct adreno_device *adreno_dev = container_of(work,
struct adreno_device, input_work);
struct kgsl_device *device = &adreno_dev->dev;
mutex_lock(&device->mutex);
device->flags |= KGSL_FLAG_WAKE_ON_TOUCH;
/*
* Don't schedule adreno_start in a high priority workqueue, we are
* already in a workqueue which should be sufficient
*/
kgsl_pwrctrl_wake(device, 0);
/*
* When waking up from a touch event we want to stay active long enough
* for the user to send a draw command. The default idle timer timeout
* is shorter than we want so go ahead and push the idle timer out
* further for this special case
*/
mod_timer(&device->idle_timer,
jiffies + msecs_to_jiffies(_wake_timeout));
mutex_unlock(&device->mutex);
}
/*
* Process input events and schedule work if needed. At this point we are only
* interested in groking EV_ABS touchscreen events
*/
static void adreno_input_event(struct input_handle *handle, unsigned int type,
unsigned int code, int value)
{
struct kgsl_device *device = handle->handler->private;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Only queue the work under certain circumstances: we have to be in
* slumber, the event has to be EV_EBS and we had to have processed an
* IB since the last time we called wake on touch.
*/
if ((type == EV_ABS) &&
!(device->flags & KGSL_FLAG_WAKE_ON_TOUCH) &&
(device->state == KGSL_STATE_SLUMBER))
schedule_work(&adreno_dev->input_work);
}
static int adreno_input_connect(struct input_handler *handler,
struct input_dev *dev, const struct input_device_id *id)
{
struct input_handle *handle;
int ret;
handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL)
return -ENOMEM;
handle->dev = dev;
handle->handler = handler;
handle->name = handler->name;
ret = input_register_handle(handle);
if (ret) {
kfree(handle);
return ret;
}
ret = input_open_device(handle);
if (ret) {
input_unregister_handle(handle);
kfree(handle);
}
return ret;
}
static void adreno_input_disconnect(struct input_handle *handle)
{
input_close_device(handle);
input_unregister_handle(handle);
kfree(handle);
}
/*
* We are only interested in EV_ABS events so only register handlers for those
* input devices that have EV_ABS events
*/
static const struct input_device_id adreno_input_ids[] = {
{
.flags = INPUT_DEVICE_ID_MATCH_EVBIT,
.evbit = { BIT_MASK(EV_ABS) },
},
{ },
};
static struct input_handler adreno_input_handler = {
.event = adreno_input_event,
.connect = adreno_input_connect,
.disconnect = adreno_input_disconnect,
.name = "kgsl",
.id_table = adreno_input_ids,
};
/**
* adreno_perfcounter_init: Reserve kernel performance counters
* @device: device to configure
*
* The kernel needs/wants a certain group of performance counters for
* its own activities. Reserve these performance counters at init time
* to ensure that they are always reserved for the kernel. The performance
* counters used by the kernel can be obtained by the user, but these
* performance counters will remain active as long as the device is alive.
*/
static int adreno_perfcounter_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (adreno_dev->gpudev->perfcounter_init)
return adreno_dev->gpudev->perfcounter_init(adreno_dev);
return 0;
};
/**
* adreno_perfcounter_close: Release counters initialized by
* adreno_perfcounter_init
* @device: device to realease counters for
*
*/
static void adreno_perfcounter_close(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (adreno_dev->gpudev->perfcounter_close)
return adreno_dev->gpudev->perfcounter_close(adreno_dev);
}
/**
* adreno_perfcounter_start: Enable performance counters
* @adreno_dev: Adreno device to configure
*
* Ensure all performance counters are enabled that are allocated. Since
* the device was most likely stopped, we can't trust that the counters
* are still valid so make it so.
* Returns 0 on success else error code
*/
static int adreno_perfcounter_start(struct adreno_device *adreno_dev)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int i, j;
int ret = 0;
if (NULL == counters)
return 0;
/* group id iter */
for (i = 0; i < counters->group_count; i++) {
group = &(counters->groups[i]);
/* countable iter */
for (j = 0; j < group->reg_count; j++) {
if (group->regs[j].countable ==
KGSL_PERFCOUNTER_NOT_USED ||
group->regs[j].countable ==
KGSL_PERFCOUNTER_BROKEN)
continue;
if (adreno_dev->gpudev->perfcounter_enable)
ret = adreno_dev->gpudev->perfcounter_enable(
adreno_dev, i, j,
group->regs[j].countable);
if (ret)
goto done;
}
}
done:
return ret;
}
/**
* adreno_perfcounter_read_group() - Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @reads: List of kgsl_perfcounter_read_groups
* @count: Length of list
*
* Read the performance counters for the groupid/countable pairs and return
* the 64 bit result for each pair
*/
int adreno_perfcounter_read_group(struct adreno_device *adreno_dev,
struct kgsl_perfcounter_read_group __user *reads, unsigned int count)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
struct kgsl_perfcounter_read_group *list = NULL;
unsigned int i, j;
int ret = 0;
if (NULL == counters)
return -EINVAL;
/* sanity check for later */
if (!adreno_dev->gpudev->perfcounter_read)
return -EINVAL;
/* sanity check params passed in */
if (reads == NULL || count == 0 || count > 100)
return -EINVAL;
list = kmalloc(sizeof(struct kgsl_perfcounter_read_group) * count,
GFP_KERNEL);
if (!list)
return -ENOMEM;
if (copy_from_user(list, reads,
sizeof(struct kgsl_perfcounter_read_group) * count)) {
ret = -EFAULT;
goto done;
}
/* list iterator */
for (j = 0; j < count; j++) {
list[j].value = 0;
/* Verify that the group ID is within range */
if (list[j].groupid >= counters->group_count) {
ret = -EINVAL;
goto done;
}
group = &(counters->groups[list[j].groupid]);
/* group/counter iterator */
for (i = 0; i < group->reg_count; i++) {
if (group->regs[i].countable == list[j].countable) {
list[j].value =
adreno_dev->gpudev->perfcounter_read(
adreno_dev, list[j].groupid, i);
break;
}
}
}
/* write the data */
if (copy_to_user(reads, list,
sizeof(struct kgsl_perfcounter_read_group) *
count) != 0)
ret = -EFAULT;
done:
kfree(list);
return ret;
}
/**
* adreno_perfcounter_get_groupid() - Get the performance counter ID
* @adreno_dev: Adreno device
* @name: Performance counter group name string
*
* Get the groupid based on the name and return this ID
*/
int adreno_perfcounter_get_groupid(struct adreno_device *adreno_dev,
const char *name)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
int i;
if (name == NULL)
return -EINVAL;
if (NULL == counters)
return -EINVAL;
for (i = 0; i < counters->group_count; ++i) {
group = &(counters->groups[i]);
if (!strcmp(group->name, name))
return i;
}
return -EINVAL;
}
/**
* adreno_perfcounter_get_name() - Get the group name
* @adreno_dev: Adreno device
* @groupid: Desired performance counter groupid
*
* Get the name based on the groupid and return it
*/
const char *adreno_perfcounter_get_name(struct adreno_device *adreno_dev,
unsigned int groupid)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
if (NULL == counters)
return NULL;
if (groupid >= counters->group_count)
return NULL;
return counters->groups[groupid].name;
}
/**
* adreno_perfcounter_query_group: Determine which countables are in counters
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
* @countables: Return list of all countables in the groups counters
* @count: Max length of the array
* @max_counters: max counters for the groupid
*
* Query the current state of counters for the group.
*/
int adreno_perfcounter_query_group(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int *countables, unsigned int count,
unsigned int *max_counters)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int i;
*max_counters = 0;
if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
return -EINVAL;
group = &(counters->groups[groupid]);
*max_counters = group->reg_count;
/*
* if NULL countable or *count of zero, return max reg_count in
* *max_counters and return success
*/
if (countables == NULL || count == 0)
return 0;
/*
* Go through all available counters. Write upto *count * countable
* values.
*/
for (i = 0; i < group->reg_count && i < count; i++) {
if (copy_to_user(&countables[i], &(group->regs[i].countable),
sizeof(unsigned int)) != 0)
return -EFAULT;
}
return 0;
}
static inline void refcount_group(struct adreno_perfcount_group *group,
unsigned int reg, unsigned int flags, unsigned int *lo)
{
if (flags & PERFCOUNTER_FLAG_KERNEL)
group->regs[reg].kernelcount++;
else
group->regs[reg].usercount++;
if (lo)
*lo = group->regs[reg].offset;
}
/**
* adreno_perfcounter_get: Try to put a countable in an available counter
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
* @countable: Countable desired to be in a counter
* @offset: Return offset of the countable
* @flags: Used to setup kernel perf counters
*
* Try to place a countable in an available counter. If the countable is
* already in a counter, reference count the counter/countable pair resource
* and return success
*/
int adreno_perfcounter_get(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int countable, unsigned int *offset,
unsigned int flags)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int empty = -1;
int ret = 0;
/* always clear return variables */
if (offset)
*offset = 0;
if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
return -EINVAL;
group = &(counters->groups[groupid]);
if (group->flags & ADRENO_PERFCOUNTER_GROUP_FIXED) {
/*
* In fixed groups the countable equals the fixed register the
* user wants. First make sure it is in range
*/
if (countable >= group->reg_count)
return -EINVAL;
/* If it is already reserved, just increase the refcounts */
if ((group->regs[countable].kernelcount != 0) ||
(group->regs[countable].usercount != 0)) {
refcount_group(group, countable, flags, offset);
return 0;
}
empty = countable;
} else {
unsigned int i;
/*
* Check if the countable is already associated with a counter.
* Refcount and return the offset, otherwise, try and find an
* empty counter and assign the countable to it.
*/
for (i = 0; i < group->reg_count; i++) {
if (group->regs[i].countable == countable) {
refcount_group(group, i, flags, offset);
return 0;
} else if (group->regs[i].countable ==
KGSL_PERFCOUNTER_NOT_USED) {
/* keep track of unused counter */
empty = i;
}
}
}
/* no available counters, so do nothing else */
if (empty == -1)
return -EBUSY;
/* enable the new counter */
ret = adreno_dev->gpudev->perfcounter_enable(adreno_dev, groupid, empty,
countable);
if (ret)
return ret;
/* initialize the new counter */
group->regs[empty].countable = countable;
/* set initial kernel and user count */
if (flags & PERFCOUNTER_FLAG_KERNEL) {
group->regs[empty].kernelcount = 1;
group->regs[empty].usercount = 0;
} else {
group->regs[empty].kernelcount = 0;
group->regs[empty].usercount = 1;
}
if (offset)
*offset = group->regs[empty].offset;
return ret;
}
/**
* adreno_perfcounter_put: Release a countable from counter resource
* @adreno_dev: Adreno device to configure
* @groupid: Desired performance counter group
* @countable: Countable desired to be freed from a counter
* @flags: Flag to determine if kernel or user space request
*
* Put a performance counter/countable pair that was previously received. If
* noone else is using the countable, free up the counter for others.
*/
int adreno_perfcounter_put(struct adreno_device *adreno_dev,
unsigned int groupid, unsigned int countable, unsigned int flags)
{
struct adreno_perfcounters *counters = adreno_dev->gpudev->perfcounters;
struct adreno_perfcount_group *group;
unsigned int i;
if (NULL == counters)
return -EINVAL;
if (groupid >= counters->group_count)
return -EINVAL;
group = &(counters->groups[groupid]);
/*
* Find if the counter/countable pair is used currently.
* Start cycling through registers in the bank.
*/
for (i = 0; i < group->reg_count; i++) {
/* check if countable assigned is what we are looking for */
if (group->regs[i].countable == countable) {
/* found pair, book keep count based on request type */
if (flags & PERFCOUNTER_FLAG_KERNEL &&
group->regs[i].kernelcount > 0)
group->regs[i].kernelcount--;
else if (group->regs[i].usercount > 0)
group->regs[i].usercount--;
else
break;
/* mark available if not used anymore */
if (group->regs[i].kernelcount == 0 &&
group->regs[i].usercount == 0)
group->regs[i].countable =
KGSL_PERFCOUNTER_NOT_USED;
return 0;
}
}
return -EINVAL;
}
/**
* adreno_perfcounter_restore() - Restore performance counters
* @adreno_dev: adreno device to configure
*
* Load the physical performance counters with 64 bit value which are
* saved on GPU power collapse.
*/
static inline void adreno_perfcounter_restore(struct adreno_device *adreno_dev)
{
if (adreno_dev->gpudev->perfcounter_restore)
adreno_dev->gpudev->perfcounter_restore(adreno_dev);
}
/**
* adreno_perfcounter_save() - Save performance counters
* @adreno_dev: adreno device to configure
*
* Save the performance counter values before GPU power collapse.
* The saved values are restored on restart.
* This ensures physical counters are coherent across power-collapse.
*/
static inline void adreno_perfcounter_save(struct adreno_device *adreno_dev)
{
if (adreno_dev->gpudev->perfcounter_save)
adreno_dev->gpudev->perfcounter_save(adreno_dev);
}
static irqreturn_t adreno_irq_handler(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
return adreno_dev->gpudev->irq_handler(adreno_dev);
}
static void adreno_cleanup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
kgsl_mmu_unmap(pagetable, &rb->buffer_desc);
kgsl_mmu_unmap(pagetable, &device->memstore);
kgsl_mmu_unmap(pagetable, &adreno_dev->pwron_fixup);
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
kgsl_mmu_unmap(pagetable, &adreno_dev->profile.shared_buffer);
}
static int adreno_setup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
int result;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc);
/*
* ALERT: Order of these mapping is important to
* Keep the most used entries like memstore
* and mmu setstate memory by TLB prefetcher.
*/
if (!result)
result = kgsl_mmu_map_global(pagetable, &device->memstore);
if (!result)
result = kgsl_mmu_map_global(pagetable,
&adreno_dev->pwron_fixup);
if (!result)
result = kgsl_mmu_map_global(pagetable,
&device->mmu.setstate_memory);
if (!result)
result = kgsl_mmu_map_global(pagetable,
&adreno_dev->profile.shared_buffer);
if (result) {
/* On error clean up what we have wrought */
adreno_cleanup_pt(device, pagetable);
return result;
}
/*
* Set the mpu end to the last "normal" global memory we use.
* For the IOMMU, this will be used to restrict access to the
* mapped registers.
*/
device->mh.mpu_range = adreno_dev->profile.shared_buffer.gpuaddr +
adreno_dev->profile.shared_buffer.size;
return 0;
}
static unsigned int _adreno_iommu_setstate_v0(struct kgsl_device *device,
unsigned int *cmds_orig,
phys_addr_t pt_val,
int num_iommu_units, uint32_t flags)
{
phys_addr_t reg_pt_val;
unsigned int *cmds = cmds_orig;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds, 0xFFFFF000,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
else
cmds += adreno_add_bank_change_cmds(cmds,
KGSL_IOMMU_CONTEXT_USER,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
cmds += adreno_add_idle_cmds(adreno_dev, cmds);
/* Acquire GPU-CPU sync Lock here */
cmds += kgsl_mmu_sync_lock(&device->mmu, cmds);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/*
* We need to perfrom the following operations for all
* IOMMU units
*/
for (i = 0; i < num_iommu_units; i++) {
reg_pt_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
i, KGSL_IOMMU_CONTEXT_USER);
reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
/*
* Set address of the new pagetable by writng to IOMMU
* TTBR0 register
*/
*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
*cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0);
*cmds++ = reg_pt_val;
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
/*
* Read back the ttbr0 register as a barrier to ensure
* above writes have completed
*/
cmds += adreno_add_read_cmds(device, cmds,
kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER, KGSL_IOMMU_CTX_TTBR0),
reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/*
* tlb flush
*/
for (i = 0; i < num_iommu_units; i++) {
reg_pt_val = (pt_val + kgsl_mmu_get_default_ttbr0(
&device->mmu,
i, KGSL_IOMMU_CONTEXT_USER));
reg_pt_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
reg_pt_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
*cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
*cmds++ = kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TLBIALL);
*cmds++ = 1;
cmds += __adreno_add_idle_indirect_cmds(cmds,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
cmds += adreno_add_read_cmds(device, cmds,
kgsl_mmu_get_reg_gpuaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TTBR0),
reg_pt_val,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
}
}
/* Release GPU-CPU sync Lock here */
cmds += kgsl_mmu_sync_unlock(&device->mmu, cmds);
if (cpu_is_msm8960())
cmds += adreno_add_change_mh_phys_limit_cmds(cmds,
kgsl_mmu_get_reg_gpuaddr(&device->mmu, 0,
0, KGSL_IOMMU_GLOBAL_BASE),
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
else
cmds += adreno_add_bank_change_cmds(cmds,
KGSL_IOMMU_CONTEXT_PRIV,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
cmds += adreno_add_idle_cmds(adreno_dev, cmds);
return cmds - cmds_orig;
}
static unsigned int _adreno_iommu_setstate_v1(struct kgsl_device *device,
unsigned int *cmds_orig,
phys_addr_t pt_val,
int num_iommu_units, uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
phys_addr_t ttbr0_val;
unsigned int reg_pt_val;
unsigned int *cmds = cmds_orig;
int i;
unsigned int ttbr0, tlbiall, tlbstatus, tlbsync, mmu_ctrl;
cmds += adreno_add_idle_cmds(adreno_dev, cmds);
for (i = 0; i < num_iommu_units; i++) {
ttbr0_val = kgsl_mmu_get_default_ttbr0(&device->mmu,
i, KGSL_IOMMU_CONTEXT_USER);
ttbr0_val &= ~KGSL_IOMMU_CTX_TTBR0_ADDR_MASK;
ttbr0_val |= (pt_val & KGSL_IOMMU_CTX_TTBR0_ADDR_MASK);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
mmu_ctrl = kgsl_mmu_get_reg_ahbaddr(
&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL) >> 2;
ttbr0 = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TTBR0) >> 2;
if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0;
/*
* glue commands together until next
* WAIT_FOR_ME
*/
cmds += adreno_wait_reg_eq(cmds,
adreno_getreg(adreno_dev,
ADRENO_REG_CP_WFI_PEND_CTR),
1, 0xFFFFFFFF, 0xF);
/* set the iommu lock bit */
*cmds++ = cp_type3_packet(CP_REG_RMW, 3);
*cmds++ = mmu_ctrl;
/* AND to unmask the lock bit */
*cmds++ =
~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
/* OR to set the IOMMU lock bit */
*cmds++ =
KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT;
/* wait for smmu to lock */
cmds += adreno_wait_reg_eq(cmds, mmu_ctrl,
KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE,
KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_IDLE, 0xF);
}
/* set ttbr0 */
if (sizeof(phys_addr_t) > sizeof(unsigned long)) {
reg_pt_val = ttbr0_val & 0xFFFFFFFF;
*cmds++ = cp_type0_packet(ttbr0, 1);
*cmds++ = reg_pt_val;
reg_pt_val = (unsigned int)
((ttbr0_val & 0xFFFFFFFF00000000ULL) >> 32);
*cmds++ = cp_type0_packet(ttbr0 + 1, 1);
*cmds++ = reg_pt_val;
} else {
reg_pt_val = ttbr0_val;
*cmds++ = cp_type0_packet(ttbr0, 1);
*cmds++ = reg_pt_val;
}
if (kgsl_mmu_hw_halt_supported(&device->mmu, i)) {
/* unlock the IOMMU lock */
*cmds++ = cp_type3_packet(CP_REG_RMW, 3);
*cmds++ = mmu_ctrl;
/* AND to unmask the lock bit */
*cmds++ =
~(KGSL_IOMMU_IMPLDEF_MICRO_MMU_CTRL_HALT);
/* OR with 0 so lock bit is unset */
*cmds++ = 0;
/* release all commands with wait_for_me */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*cmds++ = 0;
}
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
tlbiall = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TLBIALL) >> 2;
*cmds++ = cp_type0_packet(tlbiall, 1);
*cmds++ = 1;
tlbsync = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TLBSYNC) >> 2;
*cmds++ = cp_type0_packet(tlbsync, 1);
*cmds++ = 0;
tlbstatus = kgsl_mmu_get_reg_ahbaddr(&device->mmu, i,
KGSL_IOMMU_CONTEXT_USER,
KGSL_IOMMU_CTX_TLBSTATUS) >> 2;
cmds += adreno_wait_reg_eq(cmds, tlbstatus, 0,
KGSL_IOMMU_CTX_TLBSTATUS_SACTIVE, 0xF);
/* release all commands with wait_for_me */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
*cmds++ = 0;
}
}
cmds += adreno_add_idle_cmds(adreno_dev, cmds);
return cmds - cmds_orig;
}
/**
* adreno_use_default_setstate() - Use CPU instead of the GPU to manage the mmu?
* @adreno_dev: the device
*
* In many cases it is preferable to poke the iommu or gpummu directly rather
* than using the GPU command stream. If we are idle or trying to go to a low
* power state, using the command stream will be slower and asynchronous, which
* needlessly complicates the power state transitions. Additionally,
* the hardware simulators do not support command stream MMU operations so
* the command stream can never be used if we are capturing CFF data.
*
*/
static bool adreno_use_default_setstate(struct adreno_device *adreno_dev)
{
return (adreno_isidle(&adreno_dev->dev) ||
KGSL_STATE_ACTIVE != adreno_dev->dev.state ||
atomic_read(&adreno_dev->dev.active_cnt) == 0 ||
adreno_dev->dev.cff_dump_enable);
}
static int adreno_iommu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
phys_addr_t pt_val;
unsigned int link[230];
unsigned int *cmds = &link[0];
int sizedwords = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int num_iommu_units;
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int result;
if (adreno_use_default_setstate(adreno_dev)) {
kgsl_mmu_device_setstate(&device->mmu, flags);
return 0;
}
num_iommu_units = kgsl_mmu_get_num_iommu_units(&device->mmu);
context = kgsl_context_get(device, context_id);
if (!context) {
kgsl_mmu_device_setstate(&device->mmu, flags);
return 0;
}
adreno_ctx = ADRENO_CONTEXT(context);
result = kgsl_mmu_enable_clk(&device->mmu, KGSL_IOMMU_CONTEXT_USER);
if (result)
goto done;
pt_val = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
cmds += __adreno_add_idle_indirect_cmds(cmds,
device->mmu.setstate_memory.gpuaddr +
KGSL_IOMMU_SETSTATE_NOP_OFFSET);
if (msm_soc_version_supports_iommu_v0())
cmds += _adreno_iommu_setstate_v0(device, cmds, pt_val,
num_iommu_units, flags);
else
cmds += _adreno_iommu_setstate_v1(device, cmds, pt_val,
num_iommu_units, flags);
sizedwords += (cmds - &link[0]);
if (sizedwords == 0) {
KGSL_DRV_ERR(device, "no commands generated\n");
BUG();
}
/* invalidate all base pointers */
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff;
sizedwords += 2;
if (sizedwords > (ARRAY_SIZE(link))) {
KGSL_DRV_ERR(device, "Temp command buffer overflow\n");
BUG();
}
/*
* This returns the per context timestamp but we need to
* use the global timestamp for iommu clock disablement
*/
result = adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE, &link[0], sizedwords);
/*
* On error disable the IOMMU clock right away otherwise turn it off
* after the command has been retired
*/
if (result)
kgsl_mmu_disable_clk(&device->mmu,
KGSL_IOMMU_CONTEXT_USER);
else
kgsl_mmu_disable_clk_on_ts(&device->mmu, rb->global_ts,
KGSL_IOMMU_CONTEXT_USER);
done:
kgsl_context_put(context);
return result;
}
static int adreno_gpummu_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int link[32];
unsigned int *cmds = &link[0];
int sizedwords = 0;
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
struct kgsl_context *context;
struct adreno_context *adreno_ctx = NULL;
int ret = 0;
/*
* Fix target freeze issue by adding TLB flush for each submit
* on A20X based targets.
*/
if (adreno_is_a20x(adreno_dev))
flags |= KGSL_MMUFLAGS_TLBFLUSH;
/*
* If possible, then set the state via the command stream to avoid
* a CPU idle. Otherwise, use the default setstate which uses register
* writes For CFF dump we must idle and use the registers so that it is
* easier to filter out the mmu accesses from the dump
*/
if (!adreno_use_default_setstate(adreno_dev)) {
context = kgsl_context_get(device, context_id);
if (context == NULL)
return -EINVAL;
adreno_ctx = ADRENO_CONTEXT(context);
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/* wait for graphics pipe to be idle */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
/* set page table base */
*cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
*cmds++ = kgsl_mmu_get_pt_base_addr(&device->mmu,
device->mmu.hwpagetable);
sizedwords += 4;
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
1);
*cmds++ = 0x00000000;
sizedwords += 2;
}
*cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
*cmds++ = mh_mmu_invalidate;
sizedwords += 2;
}
if (flags & KGSL_MMUFLAGS_PTUPDATE &&
adreno_is_a20x(adreno_dev)) {
/* HW workaround: to resolve MMU page fault interrupts
* caused by the VGT.It prevents the CP PFP from filling
* the VGT DMA request fifo too early,thereby ensuring
* that the VGT will not fetch vertex/bin data until
* after the page table base register has been updated.
*
* Two null DRAW_INDX_BIN packets are inserted right
* after the page table base update, followed by a
* wait for idle. The null packets will fill up the
* VGT DMA request fifo and prevent any further
* vertex/bin updates from occurring until the wait
* has finished. */
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = (0x4 << 16) |
(REG_PA_SU_SC_MODE_CNTL - 0x2000);
*cmds++ = 0; /* disable faceness generation */
*cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
*cmds++ = device->mmu.setstate_memory.gpuaddr;
*cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
*cmds++ = 3; /* bin size */
*cmds++ =
device->mmu.setstate_memory.gpuaddr; /* dma base */
*cmds++ = 6; /* dma size */
*cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
*cmds++ = 3; /* bin size */
/* dma base */
*cmds++ = device->mmu.setstate_memory.gpuaddr;
*cmds++ = 6; /* dma size */
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
sizedwords += 21;
}
if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff; /* invalidate all base pointers */
sizedwords += 2;
}
ret = adreno_ringbuffer_issuecmds(device, adreno_ctx,
KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
kgsl_context_put(context);
} else {
kgsl_mmu_device_setstate(&device->mmu, flags);
}
return ret;
}
static int adreno_setstate(struct kgsl_device *device,
unsigned int context_id,
uint32_t flags)
{
/* call the mmu specific handler */
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype())
return adreno_gpummu_setstate(device, context_id, flags);
else if (KGSL_MMU_TYPE_IOMMU == kgsl_mmu_get_mmutype())
return adreno_iommu_setstate(device, context_id, flags);
return 0;
}
static unsigned int
a3xx_getchipid(struct kgsl_device *device)
{
struct kgsl_device_platform_data *pdata =
kgsl_device_get_drvdata(device);
/*
* All current A3XX chipids are detected at the SOC level. Leave this
* function here to support any future GPUs that have working
* chip ID registers
*/
return pdata->chipid;
}
static unsigned int
a2xx_getchipid(struct kgsl_device *device)
{
unsigned int chipid = 0;
unsigned int coreid, majorid, minorid, patchid, revid;
struct kgsl_device_platform_data *pdata =
kgsl_device_get_drvdata(device);
/* If the chip id is set at the platform level, then just use that */
if (pdata->chipid != 0)
return pdata->chipid;
kgsl_regread(device, REG_RBBM_PERIPHID1, &coreid);
kgsl_regread(device, REG_RBBM_PERIPHID2, &majorid);
kgsl_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
/*
* adreno 22x gpus are indicated by coreid 2,
* but REG_RBBM_PERIPHID1 always contains 0 for this field
*/
if (cpu_is_msm8x60())
chipid = 2 << 24;
else
chipid = (coreid & 0xF) << 24;
chipid |= ((majorid >> 4) & 0xF) << 16;
minorid = ((revid >> 0) & 0xFF);
patchid = ((revid >> 16) & 0xFF);
/* 8x50 returns 0 for patch release, but it should be 1 */
/* 8x25 returns 0 for minor id, but it should be 1 */
if (cpu_is_qsd8x50())
patchid = 1;
else if ((cpu_is_msm8625() || cpu_is_msm8625q()) && minorid == 0)
minorid = 1;
chipid |= (minorid << 8) | patchid;
return chipid;
}
static unsigned int
adreno_getchipid(struct kgsl_device *device)
{
struct kgsl_device_platform_data *pdata =
kgsl_device_get_drvdata(device);
/*
* All A3XX chipsets will have pdata set, so assume !pdata->chipid is
* an A2XX processor
*/
if (pdata->chipid == 0 || ADRENO_CHIPID_MAJOR(pdata->chipid) == 2)
return a2xx_getchipid(device);
else
return a3xx_getchipid(device);
}
static inline bool _rev_match(unsigned int id, unsigned int entry)
{
return (entry == ANY_ID || entry == id);
}
static void
adreno_identify_gpu(struct adreno_device *adreno_dev)
{
unsigned int i, core, major, minor, patchid;
adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
core = ADRENO_CHIPID_CORE(adreno_dev->chip_id);
major = ADRENO_CHIPID_MAJOR(adreno_dev->chip_id);
minor = ADRENO_CHIPID_MINOR(adreno_dev->chip_id);
patchid = ADRENO_CHIPID_PATCH(adreno_dev->chip_id);
for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
if (core == adreno_gpulist[i].core &&
_rev_match(major, adreno_gpulist[i].major) &&
_rev_match(minor, adreno_gpulist[i].minor) &&
_rev_match(patchid, adreno_gpulist[i].patchid))
break;
}
if (i == ARRAY_SIZE(adreno_gpulist)) {
adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
return;
}
adreno_dev->gpurev = adreno_gpulist[i].gpurev;
adreno_dev->gpudev = adreno_gpulist[i].gpudev;
adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
adreno_dev->istore_size = adreno_gpulist[i].istore_size;
adreno_dev->pix_shader_start = adreno_gpulist[i].pix_shader_start;
adreno_dev->instruction_size = adreno_gpulist[i].instruction_size;
adreno_dev->gmem_size = adreno_gpulist[i].gmem_size;
adreno_dev->pm4_jt_idx = adreno_gpulist[i].pm4_jt_idx;
adreno_dev->pm4_jt_addr = adreno_gpulist[i].pm4_jt_addr;
adreno_dev->pm4_bstrp_size = adreno_gpulist[i].pm4_bstrp_size;
adreno_dev->pfp_jt_idx = adreno_gpulist[i].pfp_jt_idx;
adreno_dev->pfp_jt_addr = adreno_gpulist[i].pfp_jt_addr;
adreno_dev->pfp_bstrp_size = adreno_gpulist[i].pfp_bstrp_size;
adreno_dev->pfp_bstrp_ver = adreno_gpulist[i].pfp_bstrp_ver;
adreno_dev->gpulist_index = i;
/*
* Initialize uninitialzed gpu registers, only needs to be done once
* Make all offsets that are not initialized to ADRENO_REG_UNUSED
*/
for (i = 0; i < ADRENO_REG_REGISTER_MAX; i++) {
if (adreno_dev->gpudev->reg_offsets->offset_0 != i &&
!adreno_dev->gpudev->reg_offsets->offsets[i]) {
adreno_dev->gpudev->reg_offsets->offsets[i] =
ADRENO_REG_UNUSED;
}
}
}
static struct platform_device_id adreno_id_table[] = {
{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
{},
};
MODULE_DEVICE_TABLE(platform, adreno_id_table);
static struct of_device_id adreno_match_table[] = {
{ .compatible = "qcom,kgsl-3d0", },
{}
};
static inline int adreno_of_read_property(struct device_node *node,
const char *prop, unsigned int *ptr)
{
int ret = of_property_read_u32(node, prop, ptr);
if (ret)
KGSL_CORE_ERR("Unable to read '%s'\n", prop);
return ret;
}
static struct device_node *adreno_of_find_subnode(struct device_node *parent,
const char *name)
{
struct device_node *child;
for_each_child_of_node(parent, child) {
if (of_device_is_compatible(child, name))
return child;
}
return NULL;
}
static int adreno_of_get_pwrlevels(struct device_node *parent,
struct kgsl_device_platform_data *pdata)
{
struct device_node *node, *child;
int ret = -EINVAL;
node = adreno_of_find_subnode(parent, "qcom,gpu-pwrlevels");
if (node == NULL) {
KGSL_CORE_ERR("Unable to find 'qcom,gpu-pwrlevels'\n");
return -EINVAL;
}
pdata->num_levels = 0;
for_each_child_of_node(node, child) {
unsigned int index;
struct kgsl_pwrlevel *level;
if (adreno_of_read_property(child, "reg", &index))
goto done;
if (index >= KGSL_MAX_PWRLEVELS) {
KGSL_CORE_ERR("Pwrlevel index %d is out of range\n",
index);
continue;
}
if (index >= pdata->num_levels)
pdata->num_levels = index + 1;
level = &pdata->pwrlevel[index];
if (adreno_of_read_property(child, "qcom,gpu-freq",
&level->gpu_freq))
goto done;
if (adreno_of_read_property(child, "qcom,bus-freq",
&level->bus_freq))
goto done;
if (adreno_of_read_property(child, "qcom,io-fraction",
&level->io_fraction))
level->io_fraction = 0;
}
if (adreno_of_read_property(parent, "qcom,initial-pwrlevel",
&pdata->init_level))
pdata->init_level = 1;
if (pdata->init_level < 0 || pdata->init_level > pdata->num_levels) {
KGSL_CORE_ERR("Initial power level out of range\n");
pdata->init_level = 1;
}
ret = 0;
done:
return ret;
}
static int adreno_of_get_iommu(struct device_node *parent,
struct kgsl_device_platform_data *pdata)
{
struct device_node *node, *child;
struct kgsl_device_iommu_data *data = NULL;
struct kgsl_iommu_ctx *ctxs = NULL;
u32 reg_val[2];
int ctx_index = 0;
node = of_parse_phandle(parent, "iommu", 0);
if (node == NULL)
return -EINVAL;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (data == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*data));
goto err;
}
if (of_property_read_u32_array(node, "reg", reg_val, 2))
goto err;
data->physstart = reg_val[0];
data->physend = data->physstart + reg_val[1] - 1;
data->iommu_halt_enable = of_property_read_bool(node,
"qcom,iommu-enable-halt");
data->iommu_ctx_count = 0;
for_each_child_of_node(node, child)
data->iommu_ctx_count++;
ctxs = kzalloc(data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx),
GFP_KERNEL);
if (ctxs == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
data->iommu_ctx_count * sizeof(struct kgsl_iommu_ctx));
goto err;
}
for_each_child_of_node(node, child) {
int ret = of_property_read_string(child, "label",
&ctxs[ctx_index].iommu_ctx_name);
if (ret) {
KGSL_CORE_ERR("Unable to read KGSL IOMMU 'label'\n");
goto err;
}
ret = of_property_read_u32_array(child, "reg", reg_val, 2);
if (ret) {
KGSL_CORE_ERR("Unable to read KGSL IOMMU 'reg'\n");
goto err;
}
if (msm_soc_version_supports_iommu_v0())
ctxs[ctx_index].ctx_id = (reg_val[0] -
data->physstart) >> KGSL_IOMMU_CTX_SHIFT;
else
ctxs[ctx_index].ctx_id = ((reg_val[0] -
data->physstart) >> KGSL_IOMMU_CTX_SHIFT) - 8;
ctx_index++;
}
data->iommu_ctxs = ctxs;
pdata->iommu_data = data;
pdata->iommu_count = 1;
return 0;
err:
kfree(ctxs);
kfree(data);
return -EINVAL;
}
static int adreno_of_get_pdata(struct platform_device *pdev)
{
struct kgsl_device_platform_data *pdata = NULL;
struct kgsl_device *device;
int ret = -EINVAL;
pdev->id_entry = adreno_id_table;
pdata = pdev->dev.platform_data;
if (pdata)
return 0;
if (of_property_read_string(pdev->dev.of_node, "label", &pdev->name)) {
KGSL_CORE_ERR("Unable to read 'label'\n");
goto err;
}
if (adreno_of_read_property(pdev->dev.of_node, "qcom,id", &pdev->id))
goto err;
pdata = kzalloc(sizeof(*pdata), GFP_KERNEL);
if (pdata == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*pdata));
ret = -ENOMEM;
goto err;
}
if (adreno_of_read_property(pdev->dev.of_node, "qcom,chipid",
&pdata->chipid))
goto err;
/* pwrlevel Data */
ret = adreno_of_get_pwrlevels(pdev->dev.of_node, pdata);
if (ret)
goto err;
if (adreno_of_read_property(pdev->dev.of_node, "qcom,idle-timeout",
&pdata->idle_timeout))
pdata->idle_timeout = HZ/12;
pdata->strtstp_sleepwake = of_property_read_bool(pdev->dev.of_node,
"qcom,strtstp-sleepwake");
pdata->bus_control = of_property_read_bool(pdev->dev.of_node,
"qcom,bus-control");
if (adreno_of_read_property(pdev->dev.of_node, "qcom,clk-map",
&pdata->clk_map))
goto err;
device = (struct kgsl_device *)pdev->id_entry->driver_data;
if (device->id != KGSL_DEVICE_3D0)
goto err;
/* Bus Scale Data */
pdata->bus_scale_table = msm_bus_cl_get_pdata(pdev);
if (IS_ERR_OR_NULL(pdata->bus_scale_table)) {
ret = PTR_ERR(pdata->bus_scale_table);
if (!ret)
ret = -EINVAL;
goto err;
}
ret = adreno_of_get_iommu(pdev->dev.of_node, pdata);
if (ret)
goto err;
pdata->coresight_pdata = of_get_coresight_platform_data(&pdev->dev,
pdev->dev.of_node);
pdev->dev.platform_data = pdata;
return 0;
err:
if (pdata) {
if (pdata->iommu_data)
kfree(pdata->iommu_data->iommu_ctxs);
kfree(pdata->iommu_data);
}
kfree(pdata);
return ret;
}
#ifdef CONFIG_MSM_OCMEM
static int
adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
{
if (!(adreno_is_a330(adreno_dev) ||
adreno_is_a305b(adreno_dev)))
return 0;
/* OCMEM is only needed once, do not support consective allocation */
if (adreno_dev->ocmem_hdl != NULL)
return 0;
adreno_dev->ocmem_hdl =
ocmem_allocate(OCMEM_GRAPHICS, adreno_dev->gmem_size);
if (adreno_dev->ocmem_hdl == NULL)
return -ENOMEM;
adreno_dev->gmem_size = adreno_dev->ocmem_hdl->len;
adreno_dev->ocmem_base = adreno_dev->ocmem_hdl->addr;
return 0;
}
static void
adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
{
if (!(adreno_is_a330(adreno_dev) ||
adreno_is_a305b(adreno_dev)))
return;
if (adreno_dev->ocmem_hdl == NULL)
return;
ocmem_free(OCMEM_GRAPHICS, adreno_dev->ocmem_hdl);
adreno_dev->ocmem_hdl = NULL;
}
#else
static int
adreno_ocmem_gmem_malloc(struct adreno_device *adreno_dev)
{
return 0;
}
static void
adreno_ocmem_gmem_free(struct adreno_device *adreno_dev)
{
}
#endif
static int __devinit
adreno_probe(struct platform_device *pdev)
{
struct kgsl_device *device;
struct kgsl_device_platform_data *pdata = NULL;
struct adreno_device *adreno_dev;
int status = -EINVAL;
bool is_dt;
is_dt = of_match_device(adreno_match_table, &pdev->dev);
if (is_dt && pdev->dev.of_node) {
status = adreno_of_get_pdata(pdev);
if (status)
goto error_return;
}
device = (struct kgsl_device *)pdev->id_entry->driver_data;
adreno_dev = ADRENO_DEVICE(device);
device->parentdev = &pdev->dev;
status = adreno_ringbuffer_init(device);
if (status != 0)
goto error;
status = kgsl_device_platform_probe(device);
if (status)
goto error_close_rb;
status = adreno_dispatcher_init(adreno_dev);
if (status)
goto error_close_device;
adreno_debugfs_init(device);
adreno_profile_init(device);
adreno_ft_init_sysfs(device);
kgsl_pwrscale_init(&pdev->dev, CONFIG_MSM_ADRENO_DEFAULT_GOVERNOR);
device->flags &= ~KGSL_FLAGS_SOFT_RESET;
pdata = kgsl_device_get_drvdata(device);
adreno_coresight_init(pdev);
adreno_input_handler.private = device;
/*
* It isn't fatal if we cannot register the input handler. Sad,
* perhaps, but not fatal
*/
if (input_register_handler(&adreno_input_handler))
KGSL_DRV_ERR(device, "Unable to register the input handler\n");
return 0;
error_close_device:
kgsl_device_platform_remove(device);
error_close_rb:
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
error:
device->parentdev = NULL;
error_return:
return status;
}
static int __devexit adreno_remove(struct platform_device *pdev)
{
struct kgsl_device *device;
struct adreno_device *adreno_dev;
device = (struct kgsl_device *)pdev->id_entry->driver_data;
adreno_dev = ADRENO_DEVICE(device);
input_unregister_handler(&adreno_input_handler);
adreno_coresight_remove(pdev);
adreno_profile_close(device);
kgsl_pwrscale_close(device);
adreno_dispatcher_close(adreno_dev);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
adreno_perfcounter_close(device);
kgsl_device_platform_remove(device);
clear_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
return 0;
}
static int adreno_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int i;
int ret;
/* Make a high priority workqueue for starting the GPU */
adreno_wq = alloc_workqueue("adreno", WQ_HIGHPRI | WQ_UNBOUND, 1);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/*
* initialization only needs to be done once initially until
* device is shutdown
*/
if (test_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv))
return 0;
/* Power up the device */
kgsl_pwrctrl_enable(device);
/* Identify the specific GPU */
adreno_identify_gpu(adreno_dev);
if (adreno_ringbuffer_read_pm4_ucode(device)) {
KGSL_DRV_ERR(device, "Reading pm4 microcode failed %s\n",
adreno_dev->pm4_fwfile);
BUG_ON(1);
}
if (adreno_ringbuffer_read_pfp_ucode(device)) {
KGSL_DRV_ERR(device, "Reading pfp microcode failed %s\n",
adreno_dev->pfp_fwfile);
BUG_ON(1);
}
if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
adreno_dev->chip_id);
BUG_ON(1);
}
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
/*
* Check if firmware supports the sync lock PM4 packets needed
* for IOMMUv1
*/
if ((adreno_dev->pm4_fw_version >=
adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pm4_ver) &&
(adreno_dev->pfp_fw_version >=
adreno_gpulist[adreno_dev->gpulist_index].sync_lock_pfp_ver))
device->mmu.flags |= KGSL_MMU_FLAGS_IOMMU_SYNC;
/* Initialize ft detection register offsets */
ft_detect_regs[0] = adreno_getreg(adreno_dev,
ADRENO_REG_RBBM_STATUS);
ft_detect_regs[1] = adreno_getreg(adreno_dev,
ADRENO_REG_CP_RB_RPTR);
ft_detect_regs[2] = adreno_getreg(adreno_dev,
ADRENO_REG_CP_IB1_BASE);
ft_detect_regs[3] = adreno_getreg(adreno_dev,
ADRENO_REG_CP_IB1_BUFSZ);
ft_detect_regs[4] = adreno_getreg(adreno_dev,
ADRENO_REG_CP_IB2_BASE);
ft_detect_regs[5] = adreno_getreg(adreno_dev,
ADRENO_REG_CP_IB2_BUFSZ);
for (i = 6; i < FT_DETECT_REGS_COUNT; i++)
ft_detect_regs[i] = 0;
ret = adreno_perfcounter_init(device);
if (ret)
goto done;
/* Power down the device */
kgsl_pwrctrl_disable(device);
/* Enable the power on shader corruption fix for all A3XX targets */
if (adreno_is_a3xx(adreno_dev))
adreno_a3xx_pwron_fixup_init(adreno_dev);
set_bit(ADRENO_DEVICE_INITIALIZED, &adreno_dev->priv);
done:
return ret;
}
/**
* _adreno_start - Power up the GPU and prepare to accept commands
* @adreno_dev: Pointer to an adreno_device structure
*
* The core function that powers up and initalizes the GPU. This function is
* called at init and after coming out of SLUMBER
*/
static int _adreno_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
int status = -EINVAL;
unsigned int state = device->state;
unsigned int regulator_left_on = 0;
kgsl_cffdump_open(device);
kgsl_pwrctrl_set_state(device, KGSL_STATE_INIT);
regulator_left_on = (regulator_is_enabled(device->pwrctrl.gpu_reg) ||
(device->pwrctrl.gpu_cx &&
regulator_is_enabled(device->pwrctrl.gpu_cx)));
/* Clear any GPU faults that might have been left over */
adreno_clear_gpu_fault(adreno_dev);
/* Power up the device */
kgsl_pwrctrl_enable(device);
/* Set the bit to indicate that we've just powered on */
set_bit(ADRENO_DEVICE_PWRON, &adreno_dev->priv);
/* Set up a2xx special case */
if (adreno_is_a2xx(adreno_dev)) {
/*
* the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
* on older gpus
*/
if (adreno_is_a20x(adreno_dev)) {
device->mh.mh_intf_cfg1 = 0;
device->mh.mh_intf_cfg2 = 0;
}
kgsl_mh_start(device);
}
status = kgsl_mmu_start(device);
if (status)
goto error_clk_off;
status = adreno_ocmem_gmem_malloc(adreno_dev);
if (status) {
KGSL_DRV_ERR(device, "OCMEM malloc failed\n");
goto error_mmu_off;
}
if (regulator_left_on && adreno_dev->gpudev->soft_reset) {
/*
* Reset the GPU for A3xx. A2xx does a soft reset in
* the start function.
*/
adreno_dev->gpudev->soft_reset(adreno_dev);
}
/* Restore performance counter registers with saved values */
adreno_perfcounter_restore(adreno_dev);
/* Start the GPU */
adreno_dev->gpudev->start(adreno_dev);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
device->ftbl->irqctrl(device, 1);
status = adreno_ringbuffer_cold_start(&adreno_dev->ringbuffer);
if (status)
goto error_irq_off;
status = adreno_perfcounter_start(adreno_dev);
if (status)
goto error_rb_stop;
/* Start the dispatcher */
adreno_dispatcher_start(device);
device->reset_counter++;
set_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
return 0;
error_rb_stop:
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
error_irq_off:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
error_mmu_off:
kgsl_mmu_stop(&device->mmu);
error_clk_off:
kgsl_pwrctrl_disable(device);
/* set the state back to original state */
kgsl_pwrctrl_set_state(device, state);
return status;
}
static int _status;
/**
* _adreno_start_work() - Work handler for the low latency adreno_start
* @work: Pointer to the work_struct for
*
* The work callbak for the low lantecy GPU start - this executes the core
* _adreno_start function in the workqueue.
*/
static void adreno_start_work(struct work_struct *work)
{
struct adreno_device *adreno_dev = container_of(work,
struct adreno_device, start_work);
struct kgsl_device *device = &adreno_dev->dev;
/* Nice ourselves to be higher priority but not too high priority */
set_user_nice(current, _wake_nice);
mutex_lock(&device->mutex);
/*
* If adreno start is already called, no need to call it again
* it can lead to unpredictable behavior if we try to start
* the device that is already started.
* Below is the sequence of events that can go bad without the check
* 1) thread 1 calls adreno_start to be scheduled on high priority wq
* 2) thread 2 calls adreno_start with normal priority
* 3) thread 1 after checking the device to be in slumber state gives
* up mutex to be scheduled on high priority wq
* 4) thread 2 after checking the device to be in slumber state gets
* the mutex and finishes adreno_start before thread 1 is scheduled
* on high priority wq.
* 5) thread 1 gets scheduled on high priority wq and executes
* adreno_start again. This leads to unpredictable behavior.
*/
if (!test_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv))
_status = _adreno_start(adreno_dev);
else
_status = 0;
mutex_unlock(&device->mutex);
}
/**
* adreno_start() - Power up and initialize the GPU
* @device: Pointer to the KGSL device to power up
* @priority: Boolean flag to specify of the start should be scheduled in a low
* latency work queue
*
* Power up the GPU and initialize it. If priority is specified then queue the
* start function in a high priority queue for lower latency.
*/
static int adreno_start(struct kgsl_device *device, int priority)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* No priority (normal latency) call the core start function directly */
if (!priority)
return _adreno_start(adreno_dev);
/*
* If priority is specified (low latency) then queue the work in a
* higher priority work queue and wait for it to finish
*/
queue_work(adreno_wq, &adreno_dev->start_work);
mutex_unlock(&device->mutex);
flush_work(&adreno_dev->start_work);
mutex_lock(&device->mutex);
return _status;
}
static int adreno_stop(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (adreno_dev->drawctxt_active)
kgsl_context_put(&adreno_dev->drawctxt_active->base);
adreno_dev->drawctxt_active = NULL;
adreno_dispatcher_stop(adreno_dev);
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
kgsl_mmu_stop(&device->mmu);
device->ftbl->irqctrl(device, 0);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
adreno_ocmem_gmem_free(adreno_dev);
/* Save physical performance counter values before GPU power down*/
adreno_perfcounter_save(adreno_dev);
/* Power down the device */
kgsl_pwrctrl_disable(device);
kgsl_cffdump_close(device);
clear_bit(ADRENO_DEVICE_STARTED, &adreno_dev->priv);
return 0;
}
/**
* adreno_reset() - Helper function to reset the GPU
* @device: Pointer to the KGSL device structure for the GPU
*
* Try to reset the GPU to recover from a fault. First, try to do a low latency
* soft reset. If the soft reset fails for some reason, then bring out the big
* guns and toggle the footswitch.
*/
int adreno_reset(struct kgsl_device *device)
{
int ret = -EINVAL;
struct kgsl_mmu *mmu = &device->mmu;
int i = 0;
/* Try soft reset first, for non mmu fault case only */
if (!atomic_read(&mmu->fault)) {
ret = adreno_soft_reset(device);
if (ret)
KGSL_DEV_ERR_ONCE(device, "Device soft reset failed\n");
}
if (ret) {
/* If soft reset failed/skipped, then pull the power */
adreno_stop(device);
/* Keep trying to start the device until it works */
for (i = 0; i < NUM_TIMES_RESET_RETRY; i++) {
ret = adreno_start(device, 0);
if (!ret)
break;
msleep(20);
}
}
if (ret)
return ret;
if (0 != i)
KGSL_DRV_WARN(device, "Device hard reset tried %d tries\n", i);
/*
* If active_cnt is non-zero then the system was active before
* going into a reset - put it back in that state
*/
if (atomic_read(&device->active_cnt))
kgsl_pwrctrl_set_state(device, KGSL_STATE_ACTIVE);
/* Set the page table back to the default page table */
kgsl_mmu_setstate(&device->mmu, device->mmu.defaultpagetable,
KGSL_MEMSTORE_GLOBAL);
return ret;
}
/**
* _ft_sysfs_store() - Common routine to write to FT sysfs files
* @buf: value to write
* @count: size of the value to write
* @sysfs_cfg: KGSL FT sysfs config to write
*
* This is a common routine to write to FT sysfs files.
*/
static int _ft_sysfs_store(const char *buf, size_t count, unsigned int *ptr)
{
char temp[20];
unsigned long val;
int rc;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = kstrtoul(temp, 0, &val);
if (rc)
return rc;
*ptr = val;
return count;
}
/**
* _get_adreno_dev() - Routine to get a pointer to adreno dev
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*/
struct adreno_device *_get_adreno_dev(struct device *dev)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
return device ? ADRENO_DEVICE(device) : NULL;
}
/**
* _ft_policy_store() - Routine to configure FT policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*
* FT policy can be set to any of the options below.
* KGSL_FT_DISABLE -> BIT(0) Set to disable FT
* KGSL_FT_REPLAY -> BIT(1) Set to enable replay
* KGSL_FT_SKIPIB -> BIT(2) Set to skip IB
* KGSL_FT_SKIPFRAME -> BIT(3) Set to skip frame
* by default set FT policy to KGSL_FT_DEFAULT_POLICY
*/
static int _ft_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
int ret;
if (adreno_dev == NULL)
return 0;
mutex_lock(&adreno_dev->dev.mutex);
ret = _ft_sysfs_store(buf, count, &adreno_dev->ft_policy);
mutex_unlock(&adreno_dev->dev.mutex);
return ret;
}
/**
* _ft_policy_show() - Routine to read FT policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value read
*
* This is a routine to read current FT policy
*/
static int _ft_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
if (adreno_dev == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_policy);
}
/**
* _ft_pagefault_policy_store() - Routine to configure FT
* pagefault policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*
* FT pagefault policy can be set to any of the options below.
* KGSL_FT_PAGEFAULT_INT_ENABLE -> BIT(0) set to enable pagefault INT
* KGSL_FT_PAGEFAULT_GPUHALT_ENABLE -> BIT(1) Set to enable GPU HALT on
* pagefaults. This stalls the GPU on a pagefault on IOMMU v1 HW.
* KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE -> BIT(2) Set to log only one
* pagefault per page.
* KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT -> BIT(3) Set to log only one
* pagefault per INT.
*/
static int _ft_pagefault_policy_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
int ret = 0;
unsigned int policy = 0;
if (adreno_dev == NULL)
return 0;
mutex_lock(&adreno_dev->dev.mutex);
/* MMU option changed call function to reset MMU options */
if (count != _ft_sysfs_store(buf, count, &policy))
ret = -EINVAL;
if (!ret) {
policy &= (KGSL_FT_PAGEFAULT_INT_ENABLE |
KGSL_FT_PAGEFAULT_GPUHALT_ENABLE |
KGSL_FT_PAGEFAULT_LOG_ONE_PER_PAGE |
KGSL_FT_PAGEFAULT_LOG_ONE_PER_INT);
ret = kgsl_mmu_set_pagefault_policy(&(adreno_dev->dev.mmu),
adreno_dev->ft_pf_policy);
if (!ret)
adreno_dev->ft_pf_policy = policy;
}
mutex_unlock(&adreno_dev->dev.mutex);
if (!ret)
return count;
else
return 0;
}
/**
* _ft_pagefault_policy_show() - Routine to read FT pagefault
* policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value read
*
* This is a routine to read current FT pagefault policy
*/
static int _ft_pagefault_policy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
if (adreno_dev == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "0x%X\n", adreno_dev->ft_pf_policy);
}
/**
* _ft_fast_hang_detect_store() - Routine to configure FT fast
* hang detect policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*
* 0x1 - Enable fast hang detection
* 0x0 - Disable fast hang detection
*/
static int _ft_fast_hang_detect_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
int ret, tmp;
if (adreno_dev == NULL)
return 0;
mutex_lock(&adreno_dev->dev.mutex);
tmp = adreno_dev->fast_hang_detect;
ret = _ft_sysfs_store(buf, count, &adreno_dev->fast_hang_detect);
if (tmp != adreno_dev->fast_hang_detect) {
if (adreno_dev->fast_hang_detect) {
if (adreno_dev->gpudev->fault_detect_start)
adreno_dev->gpudev->fault_detect_start(
adreno_dev);
} else {
if (adreno_dev->gpudev->fault_detect_stop)
adreno_dev->gpudev->fault_detect_stop(
adreno_dev);
}
}
mutex_unlock(&adreno_dev->dev.mutex);
return ret;
}
/**
* _ft_fast_hang_detect_show() - Routine to read FT fast
* hang detect policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value read
*/
static int _ft_fast_hang_detect_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
if (adreno_dev == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
(adreno_dev->fast_hang_detect ? 1 : 0));
}
/**
* _ft_long_ib_detect_store() - Routine to configure FT long IB
* detect policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*
* 0x0 - Enable long IB detection
* 0x1 - Disable long IB detection
*/
static int _ft_long_ib_detect_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
int ret;
if (adreno_dev == NULL)
return 0;
mutex_lock(&adreno_dev->dev.mutex);
ret = _ft_sysfs_store(buf, count, &adreno_dev->long_ib_detect);
mutex_unlock(&adreno_dev->dev.mutex);
return ret;
}
/**
* _ft_long_ib_detect_show() - Routine to read FT long IB
* detect policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value read
*/
static int _ft_long_ib_detect_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct adreno_device *adreno_dev = _get_adreno_dev(dev);
if (adreno_dev == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
(adreno_dev->long_ib_detect ? 1 : 0));
}
/**
* _wake_timeout_store() - Store the amount of time to extend idle check after
* wake on touch
* @dev: device ptr
* @attr: Device attribute
* @buf: value to write
* @count: size of the value to write
*
*/
static ssize_t _wake_timeout_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return _ft_sysfs_store(buf, count, &_wake_timeout);
}
/**
* _wake_timeout_show() - Show the amount of time idle check gets extended
* after wake on touch
* detect policy
* @dev: device ptr
* @attr: Device attribute
* @buf: value read
*/
static ssize_t _wake_timeout_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
return snprintf(buf, PAGE_SIZE, "%d\n", _wake_timeout);
}
#define FT_DEVICE_ATTR(name) \
DEVICE_ATTR(name, 0644, _ ## name ## _show, _ ## name ## _store);
FT_DEVICE_ATTR(ft_policy);
FT_DEVICE_ATTR(ft_pagefault_policy);
FT_DEVICE_ATTR(ft_fast_hang_detect);
FT_DEVICE_ATTR(ft_long_ib_detect);
static DEVICE_INT_ATTR(wake_nice, 0644, _wake_nice);
static FT_DEVICE_ATTR(wake_timeout);
const struct device_attribute *ft_attr_list[] = {
&dev_attr_ft_policy,
&dev_attr_ft_pagefault_policy,
&dev_attr_ft_fast_hang_detect,
&dev_attr_ft_long_ib_detect,
&dev_attr_wake_nice.attr,
&dev_attr_wake_timeout,
NULL,
};
int adreno_ft_init_sysfs(struct kgsl_device *device)
{
return kgsl_create_device_sysfs_files(device->dev, ft_attr_list);
}
void adreno_ft_uninit_sysfs(struct kgsl_device *device)
{
kgsl_remove_device_sysfs_files(device->dev, ft_attr_list);
}
static int adreno_getproperty(struct kgsl_device *device,
enum kgsl_property_type type,
void *value,
unsigned int sizebytes)
{
int status = -EINVAL;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
switch (type) {
case KGSL_PROP_DEVICE_INFO:
{
struct kgsl_devinfo devinfo;
if (sizebytes != sizeof(devinfo)) {
status = -EINVAL;
break;
}
memset(&devinfo, 0, sizeof(devinfo));
devinfo.device_id = device->id+1;
devinfo.chip_id = adreno_dev->chip_id;
devinfo.mmu_enabled = kgsl_mmu_enabled();
devinfo.gpu_id = adreno_dev->gpurev;
devinfo.gmem_gpubaseaddr = adreno_dev->gmem_base;
devinfo.gmem_sizebytes = adreno_dev->gmem_size;
if (copy_to_user(value, &devinfo, sizeof(devinfo)) !=
0) {
status = -EFAULT;
break;
}
status = 0;
}
break;
case KGSL_PROP_DEVICE_SHADOW:
{
struct kgsl_shadowprop shadowprop;
if (sizebytes != sizeof(shadowprop)) {
status = -EINVAL;
break;
}
memset(&shadowprop, 0, sizeof(shadowprop));
if (device->memstore.hostptr) {
/*NOTE: with mmu enabled, gpuaddr doesn't mean
* anything to mmap().
*/
shadowprop.gpuaddr = device->memstore.gpuaddr;
shadowprop.size = device->memstore.size;
/* GSL needs this to be set, even if it
appears to be meaningless */
shadowprop.flags = KGSL_FLAGS_INITIALIZED |
KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
}
if (copy_to_user(value, &shadowprop,
sizeof(shadowprop))) {
status = -EFAULT;
break;
}
status = 0;
}
break;
case KGSL_PROP_MMU_ENABLE:
{
int mmu_prop = kgsl_mmu_enabled();
if (sizebytes != sizeof(int)) {
status = -EINVAL;
break;
}
if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
status = -EFAULT;
break;
}
status = 0;
}
break;
case KGSL_PROP_INTERRUPT_WAITS:
{
int int_waits = 1;
if (sizebytes != sizeof(int)) {
status = -EINVAL;
break;
}
if (copy_to_user(value, &int_waits, sizeof(int))) {
status = -EFAULT;
break;
}
status = 0;
}
break;
default:
status = -EINVAL;
}
return status;
}
static int adreno_set_constraint(struct kgsl_device *device,
struct kgsl_context *context,
struct kgsl_device_constraint *constraint)
{
int status = 0;
switch (constraint->type) {
case KGSL_CONSTRAINT_PWRLEVEL: {
struct kgsl_device_constraint_pwrlevel pwr;
if (constraint->size != sizeof(pwr)) {
status = -EINVAL;
break;
}
if (copy_from_user(&pwr,
(void __user *)constraint->data,
sizeof(pwr))) {
status = -EFAULT;
break;
}
if (pwr.level >= KGSL_CONSTRAINT_PWR_MAXLEVELS) {
status = -EINVAL;
break;
}
context->pwr_constraint.type =
KGSL_CONSTRAINT_PWRLEVEL;
context->pwr_constraint.sub_type = pwr.level;
}
break;
case KGSL_CONSTRAINT_NONE:
context->pwr_constraint.type = KGSL_CONSTRAINT_NONE;
break;
default:
status = -EINVAL;
break;
}
return status;
}
static int adreno_setproperty(struct kgsl_device_private *dev_priv,
enum kgsl_property_type type,
void *value,
unsigned int sizebytes)
{
int status = -EINVAL;
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
switch (type) {
case KGSL_PROP_PWRCTRL: {
unsigned int enable;
if (sizebytes != sizeof(enable))
break;
if (copy_from_user(&enable, (void __user *) value,
sizeof(enable))) {
status = -EFAULT;
break;
}
if (enable) {
device->pwrctrl.ctrl_flags = 0;
adreno_dev->fast_hang_detect = 1;
if (adreno_dev->gpudev->fault_detect_start)
adreno_dev->gpudev->fault_detect_start(
adreno_dev);
kgsl_pwrscale_enable(device);
} else {
kgsl_pwrctrl_wake(device, 0);
device->pwrctrl.ctrl_flags = KGSL_PWR_ON;
adreno_dev->fast_hang_detect = 0;
if (adreno_dev->gpudev->fault_detect_stop)
adreno_dev->gpudev->fault_detect_stop(
adreno_dev);
kgsl_pwrscale_disable(device);
}
status = 0;
}
break;
case KGSL_PROP_PWR_CONSTRAINT: {
struct kgsl_device_constraint constraint;
struct kgsl_context *context;
if (sizebytes != sizeof(constraint))
break;
if (copy_from_user(&constraint, value,
sizeof(constraint))) {
status = -EFAULT;
break;
}
context = kgsl_context_get_owner(dev_priv,
constraint.context_id);
if (context == NULL)
break;
status = adreno_set_constraint(device, context,
&constraint);
kgsl_context_put(context);
}
break;
default:
break;
}
return status;
}
/**
* adreno_hw_isidle() - Check if the GPU core is idle
* @device: Pointer to the KGSL device structure for the GPU
*
* Return true if the RBBM status register for the GPU type indicates that the
* hardware is idle
*/
static bool adreno_hw_isidle(struct kgsl_device *device)
{
unsigned int reg_rbbm_status;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* Don't consider ourselves idle if there is an IRQ pending */
if (adreno_dev->gpudev->irq_pending(adreno_dev))
return false;
adreno_readreg(adreno_dev, ADRENO_REG_RBBM_STATUS,
®_rbbm_status);
if (adreno_is_a2xx(adreno_dev)) {
if (reg_rbbm_status == 0x110)
return true;
} else if (adreno_is_a3xx(adreno_dev)) {
if (!(reg_rbbm_status & 0x80000000))
return true;
}
return false;
}
/**
* adreno_soft_reset() - Do a soft reset of the GPU hardware
* @device: KGSL device to soft reset
*
* "soft reset" the GPU hardware - this is a fast path GPU reset
* The GPU hardware is reset but we never pull power so we can skip
* a lot of the standard adreno_stop/adreno_start sequence
*/
int adreno_soft_reset(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
if (!adreno_dev->gpudev->soft_reset) {
dev_WARN_ONCE(device->dev, 1, "Soft reset not supported");
return -EINVAL;
}
if (adreno_dev->drawctxt_active)
kgsl_context_put(&adreno_dev->drawctxt_active->base);
adreno_dev->drawctxt_active = NULL;
/* Stop the ringbuffer */
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
if (kgsl_pwrctrl_isenabled(device))
device->ftbl->irqctrl(device, 0);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
adreno_clear_gpu_fault(adreno_dev);
/* Delete the idle timer */
del_timer_sync(&device->idle_timer);
/* Make sure we are totally awake */
kgsl_pwrctrl_enable(device);
/* save physical performance counter values before GPU soft reset */
adreno_perfcounter_save(adreno_dev);
/* Reset the GPU */
adreno_dev->gpudev->soft_reset(adreno_dev);
/* Restore physical performance counter values after soft reset */
adreno_perfcounter_restore(adreno_dev);
/* Reinitialize the GPU */
adreno_dev->gpudev->start(adreno_dev);
/* Enable IRQ */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
device->ftbl->irqctrl(device, 1);
/*
* If we have offsets for the jump tables we can try to do a warm start,
* otherwise do a full ringbuffer restart
*/
if (adreno_dev->pm4_jt_idx)
ret = adreno_ringbuffer_warm_start(&adreno_dev->ringbuffer);
else
ret = adreno_ringbuffer_cold_start(&adreno_dev->ringbuffer);
if (ret)
return ret;
device->reset_counter++;
return 0;
}
/*
* adreno_isidle() - return true if the GPU hardware is idle
* @device: Pointer to the KGSL device structure for the GPU
*
* Return true if the GPU hardware is idle and there are no commands pending in
* the ringbuffer
*/
bool adreno_isidle(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int rptr;
if (!kgsl_pwrctrl_isenabled(device))
return true;
rptr = adreno_get_rptr(&adreno_dev->ringbuffer);
if (rptr == adreno_dev->ringbuffer.wptr)
return adreno_hw_isidle(device);
return false;
}
/**
* adreno_idle() - wait for the GPU hardware to go idle
* @device: Pointer to the KGSL device structure for the GPU
*
* Wait up to ADRENO_IDLE_TIMEOUT milliseconds for the GPU hardware to go quiet.
*/
int adreno_idle(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned long wait = jiffies + msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
/*
* Make sure the device mutex is held so the dispatcher can't send any
* more commands to the hardware
*/
BUG_ON(!mutex_is_locked(&device->mutex));
if (adreno_is_a3xx(adreno_dev))
kgsl_cffdump_regpoll(device,
adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
0x00000000, 0x80000000);
else
kgsl_cffdump_regpoll(device,
adreno_getreg(adreno_dev, ADRENO_REG_RBBM_STATUS) << 2,
0x110, 0x110);
while (time_before(jiffies, wait)) {
/*
* If we fault, stop waiting and return an error. The dispatcher
* will clean up the fault from the work queue, but we need to
* make sure we don't block it by waiting for an idle that
* will never come.
*/
if (adreno_gpu_fault(adreno_dev) != 0)
return -EDEADLK;
if (adreno_isidle(device))
return 0;
}
return -ETIMEDOUT;
}
/**
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
* Drain the dispatcher of existing command batches. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
INIT_COMPLETION(device->cmdbatch_gate);
return 0;
}
/* Caller must hold the device mutex. */
static int adreno_suspend_context(struct kgsl_device *device)
{
int status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* process any profiling results that are available */
adreno_profile_process_results(device);
/* switch to NULL ctxt */
if (adreno_dev->drawctxt_active != NULL) {
adreno_drawctxt_switch(adreno_dev, NULL, 0);
status = adreno_idle(device);
}
return status;
}
/* Find a memory structure attached to an adreno context */
struct kgsl_memdesc *adreno_find_ctxtmem(struct kgsl_device *device,
phys_addr_t pt_base, unsigned int gpuaddr, unsigned int size)
{
struct kgsl_context *context;
int next = 0;
struct kgsl_memdesc *desc = NULL;
read_lock(&device->context_lock);
while (1) {
context = idr_get_next(&device->context_idr, &next);
if (context == NULL)
break;
if (kgsl_mmu_pt_equal(&device->mmu,
context->proc_priv->pagetable,
pt_base)) {
struct adreno_context *adreno_context;
adreno_context = ADRENO_CONTEXT(context);
desc = &adreno_context->gpustate;
if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
break;
desc = &adreno_context->context_gmem_shadow.gmemshadow;
if (kgsl_gpuaddr_in_memdesc(desc, gpuaddr, size))
break;
}
next = next + 1;
desc = NULL;
}
read_unlock(&device->context_lock);
return desc;
}
struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
phys_addr_t pt_base,
unsigned int gpuaddr,
unsigned int size)
{
struct kgsl_mem_entry *entry;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *ringbuffer = &adreno_dev->ringbuffer;
if (kgsl_gpuaddr_in_memdesc(&ringbuffer->buffer_desc, gpuaddr, size))
return &ringbuffer->buffer_desc;
if (kgsl_gpuaddr_in_memdesc(&device->memstore, gpuaddr, size))
return &device->memstore;
if (kgsl_gpuaddr_in_memdesc(&adreno_dev->pwron_fixup, gpuaddr, size))
return &adreno_dev->pwron_fixup;
if (kgsl_gpuaddr_in_memdesc(&device->mmu.setstate_memory, gpuaddr,
size))
return &device->mmu.setstate_memory;
entry = kgsl_get_mem_entry(device, pt_base, gpuaddr, size);
if (entry)
return &entry->memdesc;
return adreno_find_ctxtmem(device, pt_base, gpuaddr, size);
}
uint8_t *adreno_convertaddr(struct kgsl_device *device, phys_addr_t pt_base,
unsigned int gpuaddr, unsigned int size)
{
struct kgsl_memdesc *memdesc;
memdesc = adreno_find_region(device, pt_base, gpuaddr, size);
return memdesc ? kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr) : NULL;
}
/**
* adreno_read - General read function to read adreno device memory
* @device - Pointer to the GPU device struct (for adreno device)
* @base - Base address (kernel virtual) where the device memory is mapped
* @offsetwords - Offset in words from the base address, of the memory that
* is to be read
* @value - Value read from the device memory
* @mem_len - Length of the device memory mapped to the kernel
*/
static void adreno_read(struct kgsl_device *device, void *base,
unsigned int offsetwords, unsigned int *value,
unsigned int mem_len)
{
unsigned int *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= mem_len);
reg = (unsigned int *)(base + (offsetwords << 2));
if (!in_interrupt())
kgsl_pre_hwaccess(device);
/*ensure this read finishes before the next one.
* i.e. act like normal readl() */
*value = __raw_readl(reg);
rmb();
}
/**
* adreno_regread - Used to read adreno device registers
* @offsetwords - Word (4 Bytes) offset to the register to be read
* @value - Value read from device register
*/
static void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
{
adreno_read(device, device->reg_virt, offsetwords, value,
device->reg_len);
}
/**
* adreno_shadermem_regread - Used to read GPU (adreno) shader memory
* @device - GPU device whose shader memory is to be read
* @offsetwords - Offset in words, of the shader memory address to be read
* @value - Pointer to where the read shader mem value is to be stored
*/
void adreno_shadermem_regread(struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value)
{
adreno_read(device, device->shader_mem_virt, offsetwords, value,
device->shader_mem_len);
}
static void adreno_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
unsigned int *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= device->reg_len);
if (!in_interrupt())
kgsl_pre_hwaccess(device);
kgsl_trace_regwrite(device, offsetwords, value);
kgsl_cffdump_regwrite(device, offsetwords << 2, value);
reg = (unsigned int *)(device->reg_virt + (offsetwords << 2));
/*ensure previous writes post before this one,
* i.e. act like normal writel() */
wmb();
__raw_writel(value, reg);
}
/**
* adreno_waittimestamp - sleep while waiting for the specified timestamp
* @device - pointer to a KGSL device structure
* @context - pointer to the active kgsl context
* @timestamp - GPU timestamp to wait for
* @msecs - amount of time to wait (in milliseconds)
*
* Wait up to 'msecs' milliseconds for the specified timestamp to expire.
*/
static int adreno_waittimestamp(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int timestamp,
unsigned int msecs)
{
int ret;
struct adreno_context *drawctxt;
if (context == NULL) {
/* If they are doing then complain once */
dev_WARN_ONCE(device->dev, 1,
"IOCTL_KGSL_DEVICE_WAITTIMESTAMP is deprecated\n");
return -ENOTTY;
}
/* Return -EINVAL if the context has been detached */
if (kgsl_context_detached(context))
return -EINVAL;
ret = adreno_drawctxt_wait(ADRENO_DEVICE(device), context,
timestamp, msecs);
/* If the context got invalidated then return a specific error */
drawctxt = ADRENO_CONTEXT(context);
if (drawctxt->state == ADRENO_CONTEXT_STATE_INVALID)
ret = -EDEADLK;
/*
* Return -EPROTO if the device has faulted since the last time we
* checked. Userspace uses this as a marker for performing post
* fault activities
*/
if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &drawctxt->priv))
ret = -EPROTO;
return ret;
}
static unsigned int adreno_readtimestamp(struct kgsl_device *device,
struct kgsl_context *context, enum kgsl_timestamp_type type)
{
unsigned int timestamp = 0;
unsigned int id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
switch (type) {
case KGSL_TIMESTAMP_QUEUED: {
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
timestamp = adreno_context_timestamp(context,
&adreno_dev->ringbuffer);
break;
}
case KGSL_TIMESTAMP_CONSUMED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
KGSL_MEMSTORE_OFFSET(id, soptimestamp));
break;
case KGSL_TIMESTAMP_RETIRED:
kgsl_sharedmem_readl(&device->memstore, ×tamp,
KGSL_MEMSTORE_OFFSET(id, eoptimestamp));
break;
}
rmb();
return timestamp;
}
static long adreno_ioctl(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int result = 0;
switch (cmd) {
case IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET: {
struct kgsl_drawctxt_set_bin_base_offset *binbase = data;
struct kgsl_context *context;
binbase = data;
context = kgsl_context_get_owner(dev_priv,
binbase->drawctxt_id);
if (context) {
adreno_drawctxt_set_bin_base_offset(
device, context, binbase->offset);
} else {
result = -EINVAL;
KGSL_DRV_ERR(device,
"invalid drawctxt drawctxt_id %d "
"device_id=%d\n",
binbase->drawctxt_id, device->id);
}
kgsl_context_put(context);
break;
}
case IOCTL_KGSL_PERFCOUNTER_GET: {
struct kgsl_perfcounter_get *get = data;
/*
* adreno_perfcounter_get() is called by kernel clients
* during start(), so it is not safe to take an
* active count inside this function.
*/
result = kgsl_active_count_get(device);
if (result)
break;
result = adreno_perfcounter_get(adreno_dev, get->groupid,
get->countable, &get->offset, PERFCOUNTER_FLAG_NONE);
kgsl_active_count_put(device);
break;
}
case IOCTL_KGSL_PERFCOUNTER_PUT: {
struct kgsl_perfcounter_put *put = data;
result = adreno_perfcounter_put(adreno_dev, put->groupid,
put->countable, PERFCOUNTER_FLAG_NONE);
break;
}
case IOCTL_KGSL_PERFCOUNTER_QUERY: {
struct kgsl_perfcounter_query *query = data;
result = adreno_perfcounter_query_group(adreno_dev,
query->groupid, query->countables,
query->count, &query->max_counters);
break;
}
case IOCTL_KGSL_PERFCOUNTER_READ: {
struct kgsl_perfcounter_read *read = data;
result = kgsl_active_count_get(device);
if (result)
break;
result = adreno_perfcounter_read_group(adreno_dev,
read->reads, read->count);
kgsl_active_count_put(device);
break;
}
default:
KGSL_DRV_INFO(dev_priv->device,
"invalid ioctl code %08x\n", cmd);
result = -ENOIOCTLCMD;
break;
}
return result;
}
static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
{
freq /= 1000000;
return ticks / freq;
}
static void adreno_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct adreno_busy_data busy_data;
memset(stats, 0, sizeof(*stats));
/*
* Get the busy cycles counted since the counter was last reset.
* If we're not currently active, there shouldn't have been
* any cycles since the last time this function was called.
*/
if (device->state == KGSL_STATE_ACTIVE)
adreno_dev->gpudev->busy_cycles(adreno_dev, &busy_data);
stats->busy_time = adreno_ticks_to_us(busy_data.gpu_busy,
kgsl_pwrctrl_active_freq(pwr));
stats->ram_time = busy_data.vbif_ram_cycles;
stats->ram_wait = busy_data.vbif_starved_ram;
}
void adreno_irqctrl(struct kgsl_device *device, int state)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
adreno_dev->gpudev->irq_control(adreno_dev, state);
}
static unsigned int adreno_gpuid(struct kgsl_device *device,
unsigned int *chipid)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* Some applications need to know the chip ID too, so pass
* that as a parameter */
if (chipid != NULL)
*chipid = adreno_dev->chip_id;
/* Standard KGSL gpuid format:
* top word is 0x0002 for 2D or 0x0003 for 3D
* Bottom word is core specific identifer
*/
return (0x0003 << 16) | ((int) adreno_dev->gpurev);
}
static const struct kgsl_functable adreno_functable = {
/* Mandatory functions */
.regread = adreno_regread,
.regwrite = adreno_regwrite,
.idle = adreno_idle,
.isidle = adreno_isidle,
.suspend_context = adreno_suspend_context,
.init = adreno_init,
.start = adreno_start,
.stop = adreno_stop,
.getproperty = adreno_getproperty,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
.issueibcmds = adreno_ringbuffer_issueibcmds,
.ioctl = adreno_ioctl,
.setup_pt = adreno_setup_pt,
.cleanup_pt = adreno_cleanup_pt,
.power_stats = adreno_power_stats,
.irqctrl = adreno_irqctrl,
.gpuid = adreno_gpuid,
.snapshot = adreno_snapshot,
.irq_handler = adreno_irq_handler,
.drain = adreno_drain,
/* Optional functions */
.setstate = adreno_setstate,
.drawctxt_create = adreno_drawctxt_create,
.drawctxt_detach = adreno_drawctxt_detach,
.drawctxt_destroy = adreno_drawctxt_destroy,
.setproperty = adreno_setproperty,
.postmortem_dump = adreno_dump,
.drawctxt_sched = adreno_drawctxt_sched,
.resume = adreno_dispatcher_start,
};
static struct platform_driver adreno_platform_driver = {
.probe = adreno_probe,
.remove = __devexit_p(adreno_remove),
.suspend = kgsl_suspend_driver,
.resume = kgsl_resume_driver,
.id_table = adreno_id_table,
.driver = {
.owner = THIS_MODULE,
.name = DEVICE_3D_NAME,
.pm = &kgsl_pm_ops,
.of_match_table = adreno_match_table,
}
};
static int __init kgsl_3d_init(void)
{
return platform_driver_register(&adreno_platform_driver);
}
static void __exit kgsl_3d_exit(void)
{
platform_driver_unregister(&adreno_platform_driver);
}
module_init(kgsl_3d_init);
module_exit(kgsl_3d_exit);
MODULE_DESCRIPTION("3D Graphics driver");
MODULE_VERSION("1.2");
MODULE_LICENSE("GPL v2");
MODULE_ALIAS("platform:kgsl_3d");
| gpl-2.0 |
harise100/Harise100Kernel | arch/arm/mach-msm/cpufreq.c | 42 | 10099 | /* arch/arm/mach-msm/cpufreq.c
*
* MSM architecture cpufreq driver
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2007-2012, The Linux Foundation. All rights reserved.
* Author: Mike A. Chan <mikechan@google.com>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/earlysuspend.h>
#include <linux/init.h>
#include <linux/module.h>
#include <linux/cpufreq.h>
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <mach/socinfo.h>
#include <mach/cpufreq.h>
#include "acpuclock.h"
struct cpufreq_suspend_t {
struct mutex suspend_mutex;
int device_suspended;
};
static DEFINE_PER_CPU(struct cpufreq_suspend_t, cpufreq_suspend);
struct cpu_freq {
uint32_t max;
uint32_t min;
uint32_t allowed_max;
uint32_t allowed_min;
uint32_t limits_init;
};
static DEFINE_PER_CPU(struct cpu_freq, cpu_freq_info);
#ifdef CONFIG_SEC_DVFS
static unsigned int upper_limit_freq = 1566000;
static unsigned int lower_limit_freq;
static unsigned int cpuinfo_max_freq;
static unsigned int cpuinfo_min_freq;
unsigned int get_min_lock(void)
{
return lower_limit_freq;
}
unsigned int get_max_lock(void)
{
return upper_limit_freq;
}
void set_min_lock(int freq)
{
if (freq <= MIN_FREQ_LIMIT)
lower_limit_freq = 0;
else if (freq > MAX_FREQ_LIMIT)
lower_limit_freq = 0;
else
lower_limit_freq = freq;
}
void set_max_lock(int freq)
{
if (freq < MIN_FREQ_LIMIT)
upper_limit_freq = 0;
else if (freq >= MAX_FREQ_LIMIT)
upper_limit_freq = 0;
else
upper_limit_freq = freq;
}
int get_max_freq(void)
{
return cpuinfo_max_freq;
}
int get_min_freq(void)
{
return cpuinfo_min_freq;
}
#endif
static int set_cpu_freq(struct cpufreq_policy *policy, unsigned int new_freq)
{
int ret = 0;
struct cpufreq_freqs freqs;
struct cpu_freq *limit = &per_cpu(cpu_freq_info, policy->cpu);
if (limit->limits_init) {
if (new_freq > limit->allowed_max) {
new_freq = limit->allowed_max;
pr_debug("max: limiting freq to %d\n", new_freq);
}
if (new_freq < limit->allowed_min) {
new_freq = limit->allowed_min;
pr_debug("min: limiting freq to %d\n", new_freq);
}
}
#ifdef CONFIG_SEC_DVFS
if (lower_limit_freq || upper_limit_freq) {
unsigned int t_freq = new_freq;
if (lower_limit_freq && new_freq < lower_limit_freq)
t_freq = lower_limit_freq;
if (upper_limit_freq && new_freq > upper_limit_freq)
t_freq = upper_limit_freq;
new_freq = t_freq;
if (new_freq < policy->min)
new_freq = policy->min;
if (new_freq > policy->max)
new_freq = policy->max;
if (new_freq == policy->cur)
return 0;
}
#endif
freqs.old = policy->cur;
freqs.new = new_freq;
freqs.cpu = policy->cpu;
cpufreq_notify_transition(&freqs, CPUFREQ_PRECHANGE);
ret = acpuclk_set_rate(policy->cpu, new_freq, SETRATE_CPUFREQ);
if (!ret)
cpufreq_notify_transition(&freqs, CPUFREQ_POSTCHANGE);
return ret;
}
static int msm_cpufreq_target(struct cpufreq_policy *policy,
unsigned int target_freq,
unsigned int relation)
{
int ret = -EFAULT;
int index;
struct cpufreq_frequency_table *table;
if (!cpu_active(policy->cpu)) {
pr_info("cpufreq: cpu %d is not active.\n", policy->cpu);
return -ENODEV;
}
mutex_lock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
if (per_cpu(cpufreq_suspend, policy->cpu).device_suspended) {
pr_debug("cpufreq: cpu%d scheduling frequency change "
"in suspend.\n", policy->cpu);
ret = -EFAULT;
goto done;
}
table = cpufreq_frequency_get_table(policy->cpu);
if (cpufreq_frequency_table_target(policy, table, target_freq, relation,
&index)) {
pr_err("cpufreq: invalid target_freq: %d\n", target_freq);
ret = -EINVAL;
goto done;
}
pr_debug("CPU[%d] target %d relation %d (%d-%d) selected %d\n",
policy->cpu, target_freq, relation,
policy->min, policy->max, table[index].frequency);
ret = set_cpu_freq(policy, table[index].frequency);
done:
mutex_unlock(&per_cpu(cpufreq_suspend, policy->cpu).suspend_mutex);
return ret;
}
static int msm_cpufreq_verify(struct cpufreq_policy *policy)
{
cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq,
policy->cpuinfo.max_freq);
return 0;
}
static unsigned int msm_cpufreq_get_freq(unsigned int cpu)
{
return acpuclk_get_rate(cpu);
}
static inline int msm_cpufreq_limits_init(void)
{
int cpu = 0;
int i = 0;
struct cpufreq_frequency_table *table = NULL;
uint32_t min = (uint32_t) -1;
uint32_t max = 0;
struct cpu_freq *limit = NULL;
for_each_possible_cpu(cpu) {
limit = &per_cpu(cpu_freq_info, cpu);
table = cpufreq_frequency_get_table(cpu);
if (table == NULL) {
pr_err("%s: error reading cpufreq table for cpu %d\n",
__func__, cpu);
continue;
}
for (i = 0; (table[i].frequency != CPUFREQ_TABLE_END); i++) {
if (table[i].frequency > max)
max = table[i].frequency;
if (table[i].frequency < min)
min = table[i].frequency;
}
limit->allowed_min = min;
limit->allowed_max = max;
limit->min = min;
limit->max = max;
limit->limits_init = 1;
}
return 0;
}
int msm_cpufreq_set_freq_limits(uint32_t cpu, uint32_t min, uint32_t max)
{
struct cpu_freq *limit = &per_cpu(cpu_freq_info, cpu);
if (!limit->limits_init)
msm_cpufreq_limits_init();
if ((min != MSM_CPUFREQ_NO_LIMIT) &&
min >= limit->min && min <= limit->max)
limit->allowed_min = min;
else
limit->allowed_min = limit->min;
if ((max != MSM_CPUFREQ_NO_LIMIT) &&
max <= limit->max && max >= limit->min)
limit->allowed_max = max;
else
limit->allowed_max = limit->max;
pr_debug("%s: Limiting cpu %d min = %d, max = %d\n",
__func__, cpu,
limit->allowed_min, limit->allowed_max);
return 0;
}
EXPORT_SYMBOL(msm_cpufreq_set_freq_limits);
static int __cpuinit msm_cpufreq_init(struct cpufreq_policy *policy)
{
int cur_freq;
int index;
struct cpufreq_frequency_table *table;
table = cpufreq_frequency_get_table(policy->cpu);
if (table == NULL)
return -ENODEV;
/*
* In 8625 both cpu core's frequency can not
* be changed independently. Each cpu is bound to
* same frequency. Hence set the cpumask to all cpu.
*/
if (cpu_is_msm8625())
cpumask_setall(policy->cpus);
if (cpufreq_frequency_table_cpuinfo(policy, table)) {
#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
policy->cpuinfo.min_freq = CONFIG_MSM_CPU_FREQ_MIN;
policy->cpuinfo.max_freq = CONFIG_MSM_CPU_FREQ_MAX;
#endif
}
#ifdef CONFIG_MSM_CPU_FREQ_SET_MIN_MAX
policy->min = CONFIG_MSM_CPU_FREQ_MIN;
policy->max = CONFIG_MSM_CPU_FREQ_MAX;
#endif
#ifdef CONFIG_SEC_DVFS
cpuinfo_max_freq = policy->cpuinfo.max_freq;
cpuinfo_min_freq = policy->cpuinfo.min_freq;
#endif
cur_freq = acpuclk_get_rate(policy->cpu);
if (cpufreq_frequency_table_target(policy, table, cur_freq,
CPUFREQ_RELATION_H, &index) &&
cpufreq_frequency_table_target(policy, table, cur_freq,
CPUFREQ_RELATION_L, &index)) {
pr_info("%s: cpu%d at invalid freq: %d\n", __func__,
policy->cpu, cur_freq);
return -EINVAL;
}
if (cur_freq != table[index].frequency) {
int ret = 0;
ret = acpuclk_set_rate(policy->cpu, table[index].frequency,
SETRATE_CPUFREQ);
if (ret)
return ret;
pr_info("cpufreq: cpu%d init at %d switching to %d\n",
policy->cpu, cur_freq, table[index].frequency);
cur_freq = table[index].frequency;
}
policy->cur = cur_freq;
policy->cpuinfo.transition_latency =
acpuclk_get_switch_time() * NSEC_PER_USEC;
return 0;
}
static int __cpuinit msm_cpufreq_cpu_callback(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
unsigned int cpu = (unsigned long)hcpu;
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
mutex_lock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
mutex_unlock(&per_cpu(cpufreq_suspend, cpu).suspend_mutex);
break;
case CPU_DOWN_FAILED:
case CPU_DOWN_FAILED_FROZEN:
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
break;
}
return NOTIFY_OK;
}
static struct notifier_block __refdata msm_cpufreq_cpu_notifier = {
.notifier_call = msm_cpufreq_cpu_callback,
};
/*
* Define suspend/resume for cpufreq_driver. Kernel will call
* these during suspend/resume with interrupts disabled. This
* helps the suspend/resume variable get's updated before cpufreq
* governor tries to change the frequency after coming out of suspend.
*/
static int msm_cpufreq_suspend(struct cpufreq_policy *policy)
{
int cpu;
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_suspend, cpu).device_suspended = 1;
}
return 0;
}
static int msm_cpufreq_resume(struct cpufreq_policy *policy)
{
int cpu;
for_each_possible_cpu(cpu) {
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
}
return 0;
}
static struct freq_attr *msm_freq_attr[] = {
&cpufreq_freq_attr_scaling_available_freqs,
NULL,
};
static struct cpufreq_driver msm_cpufreq_driver = {
/* lps calculations are handled here. */
.flags = CPUFREQ_STICKY | CPUFREQ_CONST_LOOPS,
.init = msm_cpufreq_init,
.verify = msm_cpufreq_verify,
.target = msm_cpufreq_target,
.get = msm_cpufreq_get_freq,
.suspend = msm_cpufreq_suspend,
.resume = msm_cpufreq_resume,
.name = "msm",
.attr = msm_freq_attr,
};
static int __init msm_cpufreq_register(void)
{
int cpu;
for_each_possible_cpu(cpu) {
mutex_init(&(per_cpu(cpufreq_suspend, cpu).suspend_mutex));
per_cpu(cpufreq_suspend, cpu).device_suspended = 0;
}
register_hotcpu_notifier(&msm_cpufreq_cpu_notifier);
return cpufreq_register_driver(&msm_cpufreq_driver);
}
late_initcall(msm_cpufreq_register);
| gpl-2.0 |
Grarak/grakernel-msm8930 | sound/soc/codecs/wcd9xxx-common.c | 42 | 17058 | /* Copyright (c) 2013, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/module.h>
#include <sound/soc.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/mfd/wcd9xxx/wcd9xxx_registers.h>
#include "wcd9xxx-common.h"
#define CLSH_COMPUTE_EAR 0x01
#define CLSH_COMPUTE_HPH_L 0x02
#define CLSH_COMPUTE_HPH_R 0x03
#define BUCK_VREF_2V 0xFF
#define BUCK_VREF_1P8V 0xE6
#define NCP_FCLK_LEVEL_8 0x08
#define NCP_FCLK_LEVEL_5 0x05
#define BUCK_SETTLE_TIME_US 50
#define NCP_SETTLE_TIME_US 50
static inline void wcd9xxx_enable_clsh_block(
struct snd_soc_codec *codec,
bool on)
{
snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
0x01, on ? 0x01 : 0x00);
}
static inline void wcd9xxx_enable_anc_delay(
struct snd_soc_codec *codec,
bool on)
{
snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
0x02, on ? 0x02 : 0x00);
}
static inline void wcd9xxx_enable_ncp(
struct snd_soc_codec *codec,
bool on)
{
snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN,
0x01, on ? 0x01 : 0x00);
}
static inline void wcd9xxx_enable_buck(
struct snd_soc_codec *codec,
bool on)
{
snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
0x80, on ? 0x80 : 0x00);
}
static int cdc_lo_count;
static void (*clsh_state_fp[NUM_CLSH_STATES])
(struct snd_soc_codec *,
struct wcd9xxx_clsh_cdc_data *,
u8 req_state, bool req_type);
static const char *state_to_str(u8 state)
{
if (state == WCD9XXX_CLSH_STATE_IDLE)
return "STATE_IDLE";
else if (state == WCD9XXX_CLSH_STATE_EAR)
return "STATE_EAR";
else if (state == WCD9XXX_CLSH_STATE_HPHL)
return "STATE_HPH_L";
else if (state == WCD9XXX_CLSH_STATE_HPHR)
return "STATE_HPH_R";
else if (state == (WCD9XXX_CLSH_STATE_HPHL
| WCD9XXX_CLSH_STATE_HPHR))
return "STATE_HPH_L_R";
else if (state == WCD9XXX_CLSH_STATE_LO)
return "STATE_LO";
return "UNKNOWN_STATE";
}
static void wcd9xxx_cfg_clsh_buck(
struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_BUCK_CTRL_CCL_4, 0x0B, 0x00},
{WCD9XXX_A_BUCK_CTRL_CCL_1, 0xF0, 0x50},
{WCD9XXX_A_BUCK_CTRL_CCL_3, 0x03, 0x00},
{WCD9XXX_A_BUCK_CTRL_CCL_3, 0x0B, 0x00},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
reg_set[i].val);
dev_dbg(codec->dev, "%s: Programmed buck parameters", __func__);
}
static void wcd9xxx_cfg_clsh_param_common(
struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 0, 0},
{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, 0x3 << 2, 1 << 2},
{WCD9XXX_A_CDC_CLSH_BUCK_NCP_VARS, (0x1 << 4), 0},
{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 0), 0x01},
{WCD9XXX_A_CDC_CLSH_B2_CTL, (0x3 << 2), (0x01 << 2)},
{WCD9XXX_A_CDC_CLSH_B2_CTL, (0xf << 4), (0x03 << 4)},
{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 4), (0x03 << 4)},
{WCD9XXX_A_CDC_CLSH_B3_CTL, (0xf << 0), (0x0B)},
{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 5), (0x01 << 5)},
{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 1), (0x01 << 1)},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
reg_set[i].val);
dev_dbg(codec->dev, "%s: Programmed class H controller common parameters",
__func__);
}
static void wcd9xxx_chargepump_request(
struct snd_soc_codec *codec, bool on)
{
static int cp_count;
if (on && (++cp_count == 1)) {
snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
0x01, 0x01);
dev_info(codec->dev, "%s: Charge Pump enabled, count = %d\n",
__func__, cp_count);
}
else if (!on) {
if (--cp_count < 0) {
dev_err(codec->dev, "%s: Unbalanced disable for charge pump\n",
__func__);
if (snd_soc_read(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL)
& 0x01) {
dev_info(codec->dev, "%s: Actual chargepump is ON\n",
__func__);
}
cp_count = 0;
WARN_ON(1);
}
if (cp_count == 0) {
snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLK_OTHR_CTL,
0x01, 0x00);
dev_dbg(codec->dev, "%s: Charge pump disabled, count = %d\n",
__func__, cp_count);
}
}
}
static inline void wcd9xxx_clsh_computation_request(
struct snd_soc_codec *codec, int compute_pa, bool on)
{
u8 reg_val, reg_mask;
switch (compute_pa) {
case CLSH_COMPUTE_EAR:
reg_mask = 0x10;
reg_val = (on ? 0x10 : 0x00);
break;
case CLSH_COMPUTE_HPH_L:
reg_mask = 0x08;
reg_val = (on ? 0x08 : 0x00);
break;
case CLSH_COMPUTE_HPH_R:
reg_mask = 0x04;
reg_val = (on ? 0x04 : 0x00);
break;
default:
dev_err(codec->dev, "%s: class h computation PA request incorrect\n",
__func__);
return;
}
snd_soc_update_bits(codec, WCD9XXX_A_CDC_CLSH_B1_CTL,
reg_mask, reg_val);
}
static void wcd9xxx_enable_buck_mode(struct snd_soc_codec *codec,
u8 buck_vref)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x03},
{WCD9XXX_A_BUCK_MODE_4, 0xFF, buck_vref},
{WCD9XXX_A_BUCK_MODE_1, 0x04, 0x04},
{WCD9XXX_A_BUCK_MODE_1, 0x08, 0x00},
{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x00},
{WCD9XXX_A_BUCK_MODE_3, 0x08, 0x00},
{WCD9XXX_A_BUCK_MODE_1, 0x80, 0x80},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg,
reg_set[i].mask, reg_set[i].val);
dev_dbg(codec->dev, "%s: Done\n", __func__);
usleep_range(BUCK_SETTLE_TIME_US, BUCK_SETTLE_TIME_US);
}
static void wcd9xxx_clsh_enable_post_pa(struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_BUCK_MODE_5, 0x02, 0x00},
{WCD9XXX_A_NCP_STATIC, 0x20, 0x00},
{WCD9XXX_A_BUCK_MODE_3, 0x04, 0x04},
{WCD9XXX_A_BUCK_MODE_3, 0x08, 0x08},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg,
reg_set[i].mask, reg_set[i].val);
dev_dbg(codec->dev, "%s: completed clsh mode settings after PA enable\n",
__func__);
}
static void wcd9xxx_set_fclk_enable_ncp(struct snd_soc_codec *codec,
u8 fclk_level)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_NCP_STATIC, 0x20, 0x20},
{WCD9XXX_A_NCP_EN, 0x01, 0x01},
};
snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
0x010, 0x00);
snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
0x0F, fclk_level);
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg,
reg_set[i].mask, reg_set[i].val);
usleep_range(NCP_SETTLE_TIME_US, NCP_SETTLE_TIME_US);
dev_dbg(codec->dev, "%s: set ncp done\n", __func__);
}
static void wcd9xxx_cfg_clsh_param_ear(struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 7), 0},
{WCD9XXX_A_CDC_CLSH_V_PA_HD_EAR, (0x3f << 0), 0x0D},
{WCD9XXX_A_CDC_CLSH_V_PA_MIN_EAR, (0x3f << 0), 0x3A},
/* Under assumption that EAR load is 10.7ohm */
{WCD9XXX_A_CDC_CLSH_IDLE_EAR_THSD, (0x3f << 0), 0x26},
{WCD9XXX_A_CDC_CLSH_FCLKONLY_EAR_THSD, (0x3f << 0), 0x2C},
{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_L, 0xff, 0xA9},
{WCD9XXX_A_CDC_CLSH_I_PA_FACT_EAR_U, 0xff, 0x07},
{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
{WCD9XXX_A_CDC_CLSH_K_ADDR, (0xf << 0), 0x08},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1b},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x2d},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x36},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x37},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg,
reg_set[i].mask, reg_set[i].val);
dev_dbg(codec->dev, "%s: Programmed Class H controller EAR specific params\n",
__func__);
}
static void wcd9xxx_cfg_clsh_param_hph(struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_CDC_CLSH_B1_CTL, (0x1 << 6), 0},
{WCD9XXX_A_CDC_CLSH_V_PA_HD_HPH, 0x3f, 0x0D},
{WCD9XXX_A_CDC_CLSH_V_PA_MIN_HPH, 0x3f, 0x1D},
/* Under assumption that HPH load is 16ohm per channel */
{WCD9XXX_A_CDC_CLSH_IDLE_HPH_THSD, 0x3f, 0x13},
{WCD9XXX_A_CDC_CLSH_FCLKONLY_HPH_THSD, 0x1f, 0x19},
{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_L, 0xff, 0x97},
{WCD9XXX_A_CDC_CLSH_I_PA_FACT_HPH_U, 0xff, 0x05},
{WCD9XXX_A_CDC_CLSH_K_ADDR, (0x1 << 7), 0},
{WCD9XXX_A_CDC_CLSH_K_ADDR, 0x0f, 0},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0xAE},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x01},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x1C},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x24},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x25},
{WCD9XXX_A_CDC_CLSH_K_DATA, 0xff, 0x00},
};
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg, reg_set[i].mask,
reg_set[i].val);
dev_dbg(codec->dev, "%s: Programmed Class H controller HPH specific params\n",
__func__);
}
static void wcd9xxx_clsh_turnoff_postpa
(struct snd_soc_codec *codec)
{
int i;
const struct wcd9xxx_reg_mask_val reg_set[] = {
{WCD9XXX_A_NCP_EN, 0x01, 0x00},
{WCD9XXX_A_BUCK_MODE_1, 0x80, 0x00},
{WCD9XXX_A_CDC_CLSH_B1_CTL, 0x10, 0x00},
};
wcd9xxx_chargepump_request(codec, false);
for (i = 0; i < ARRAY_SIZE(reg_set); i++)
snd_soc_update_bits(codec, reg_set[i].reg,
reg_set[i].mask, reg_set[i].val);
wcd9xxx_enable_clsh_block(codec, false);
dev_dbg(codec->dev, "%s: Done\n", __func__);
}
static void wcd9xxx_clsh_state_idle(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
if (is_enable) {
dev_err(codec->dev, "%s: wrong transition, cannot enable IDLE state\n",
__func__);
} else {
if (req_state == WCD9XXX_CLSH_STATE_EAR) {
wcd9xxx_clsh_turnoff_postpa(codec);
} else if (req_state == WCD9XXX_CLSH_STATE_HPHL) {
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_L, false);
wcd9xxx_clsh_turnoff_postpa(codec);
} else if (req_state == WCD9XXX_CLSH_STATE_HPHR) {
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_R, false);
wcd9xxx_clsh_turnoff_postpa(codec);
} else if (req_state == WCD9XXX_CLSH_STATE_LO) {
wcd9xxx_enable_ncp(codec, false);
wcd9xxx_enable_buck(codec, false);
}
}
}
static void wcd9xxx_clsh_state_ear(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
if (is_enable) {
wcd9xxx_cfg_clsh_buck(codec);
wcd9xxx_cfg_clsh_param_common(codec);
wcd9xxx_cfg_clsh_param_ear(codec);
wcd9xxx_enable_clsh_block(codec, true);
wcd9xxx_chargepump_request(codec, true);
wcd9xxx_enable_anc_delay(codec, true);
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_EAR, true);
wcd9xxx_enable_buck_mode(codec, BUCK_VREF_2V);
wcd9xxx_set_fclk_enable_ncp(codec, NCP_FCLK_LEVEL_8);
dev_dbg(codec->dev, "%s: Enabled ear mode class h\n", __func__);
} else {
dev_dbg(codec->dev, "%s: stub fallback to ear\n", __func__);
}
}
static void wcd9xxx_clsh_state_hph_l(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
if (is_enable) {
wcd9xxx_cfg_clsh_buck(codec);
wcd9xxx_cfg_clsh_param_common(codec);
wcd9xxx_cfg_clsh_param_hph(codec);
wcd9xxx_enable_clsh_block(codec, true);
wcd9xxx_chargepump_request(codec, true);
wcd9xxx_enable_anc_delay(codec, true);
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_L, true);
wcd9xxx_enable_buck_mode(codec, BUCK_VREF_2V);
wcd9xxx_set_fclk_enable_ncp(codec, NCP_FCLK_LEVEL_8);
dev_dbg(codec->dev, "%s: Done\n", __func__);
} else {
if (req_state == WCD9XXX_CLSH_STATE_HPHR) {
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_R, false);
} else {
dev_dbg(codec->dev, "%s: stub fallback to hph_l\n",
__func__);
}
}
}
static void wcd9xxx_clsh_state_hph_r(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
if (is_enable) {
wcd9xxx_cfg_clsh_buck(codec);
wcd9xxx_cfg_clsh_param_common(codec);
wcd9xxx_cfg_clsh_param_hph(codec);
wcd9xxx_enable_clsh_block(codec, true);
wcd9xxx_chargepump_request(codec, true);
wcd9xxx_enable_anc_delay(codec, true);
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_R, true);
wcd9xxx_enable_buck_mode(codec, BUCK_VREF_2V);
wcd9xxx_set_fclk_enable_ncp(codec, NCP_FCLK_LEVEL_8);
dev_dbg(codec->dev, "%s: Done\n", __func__);
} else {
if (req_state == WCD9XXX_CLSH_STATE_HPHL) {
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_L, false);
} else {
dev_dbg(codec->dev, "%s: stub fallback to hph_r\n",
__func__);
}
}
}
static void wcd9xxx_clsh_state_hph_st(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
if (is_enable) {
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_L, true);
wcd9xxx_clsh_computation_request(codec,
CLSH_COMPUTE_HPH_R, true);
} else {
dev_dbg(codec->dev, "%s: stub fallback to hph_st\n", __func__);
}
}
static void wcd9xxx_clsh_state_lo(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
/* TODO. Read from device tree */
clsh_d->buck_mv = WCD9XXX_CDC_BUCK_MV_2P15;
if (is_enable) {
if (++cdc_lo_count > 1)
return;
wcd9xxx_enable_buck_mode(codec, BUCK_VREF_1P8V);
wcd9xxx_set_fclk_enable_ncp(codec, NCP_FCLK_LEVEL_5);
if (clsh_d->buck_mv == WCD9XXX_CDC_BUCK_MV_1P8) {
wcd9xxx_enable_buck(codec, false);
snd_soc_update_bits(codec, WCD9XXX_A_NCP_STATIC,
0x20, 0x01);
wcd9xxx_enable_ncp(codec, true);
msleep(NCP_SETTLE_TIME_US);
} else {
snd_soc_update_bits(codec, WCD9XXX_A_NCP_EN,
0x40, 0x00);
wcd9xxx_enable_ncp(codec, true);
msleep(NCP_SETTLE_TIME_US);
snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
0x01, 0x01);
snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_5,
0xFB, (0x02 << 2));
}
snd_soc_update_bits(codec, WCD9XXX_A_BUCK_MODE_1,
0x04, 0x00);
} else {
dev_dbg(codec->dev, "%s: stub fallback to lineout\n", __func__);
}
}
static void wcd9xxx_clsh_state_err(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *clsh_d,
u8 req_state, bool is_enable)
{
dev_err(codec->dev, "%s Wrong request for class H state machine requested to %s %s"
, __func__, is_enable ? "enable" : "disable",
state_to_str(req_state));
WARN_ON(1);
}
void wcd9xxx_clsh_fsm(struct snd_soc_codec *codec,
struct wcd9xxx_clsh_cdc_data *cdc_clsh_d,
u8 req_state, bool req_type, u8 clsh_event)
{
u8 old_state, new_state;
switch (clsh_event) {
case WCD9XXX_CLSH_EVENT_PRE_DAC:
/* PRE_DAC event should be used only for Enable */
BUG_ON(req_type != WCD9XXX_CLSH_REQ_ENABLE);
old_state = cdc_clsh_d->state;
new_state = old_state | req_state;
(*clsh_state_fp[new_state]) (codec, cdc_clsh_d,
req_state, req_type);
cdc_clsh_d->state = new_state;
dev_info(codec->dev, "%s: ClassH state transition from %s to %s\n",
__func__, state_to_str(old_state),
state_to_str(cdc_clsh_d->state));
break;
case WCD9XXX_CLSH_EVENT_POST_PA:
if (req_type == WCD9XXX_CLSH_REQ_DISABLE) {
if (req_state == WCD9XXX_CLSH_STATE_LO
&& --cdc_lo_count > 0)
break;
old_state = cdc_clsh_d->state;
new_state = old_state & (~req_state);
if (new_state < NUM_CLSH_STATES) {
(*clsh_state_fp[new_state]) (codec, cdc_clsh_d,
req_state, req_type);
cdc_clsh_d->state = new_state;
dev_info(codec->dev, "%s: ClassH state transition from %s to %s\n",
__func__, state_to_str(old_state),
state_to_str(cdc_clsh_d->state));
} else {
dev_err(codec->dev, "%s: wrong new state = %x\n",
__func__, new_state);
}
} else if (req_state != WCD9XXX_CLSH_STATE_LO) {
wcd9xxx_clsh_enable_post_pa(codec);
}
break;
}
}
EXPORT_SYMBOL_GPL(wcd9xxx_clsh_fsm);
void wcd9xxx_clsh_init(struct wcd9xxx_clsh_cdc_data *clsh)
{
int i;
clsh->state = WCD9XXX_CLSH_STATE_IDLE;
for (i = 0; i < NUM_CLSH_STATES; i++)
clsh_state_fp[i] = wcd9xxx_clsh_state_err;
clsh_state_fp[WCD9XXX_CLSH_STATE_IDLE] = wcd9xxx_clsh_state_idle;
clsh_state_fp[WCD9XXX_CLSH_STATE_EAR] = wcd9xxx_clsh_state_ear;
clsh_state_fp[WCD9XXX_CLSH_STATE_HPHL] =
wcd9xxx_clsh_state_hph_l;
clsh_state_fp[WCD9XXX_CLSH_STATE_HPHR] =
wcd9xxx_clsh_state_hph_r;
clsh_state_fp[WCD9XXX_CLSH_STATE_HPH_ST] =
wcd9xxx_clsh_state_hph_st;
clsh_state_fp[WCD9XXX_CLSH_STATE_LO] = wcd9xxx_clsh_state_lo;
}
EXPORT_SYMBOL_GPL(wcd9xxx_clsh_init);
MODULE_DESCRIPTION("WCD9XXX Common");
MODULE_LICENSE("GPL v2");
| gpl-2.0 |
voron19982/limbo-android | jni/glib/gio/tests/volumemonitor.c | 42 | 3667 | #include <gio/gio.h>
static GVolumeMonitor *monitor;
static void
do_mount_tests (GDrive *drive, GVolume *volume, GMount *mount)
{
GDrive *d;
GVolume *v;
gchar *name;
gchar *uuid;
name = g_mount_get_name (mount);
g_assert (name != NULL);
g_free (name);
v = g_mount_get_volume (mount);
g_assert (v == volume);
if (v != NULL)
g_object_unref (v);
d = g_mount_get_drive (mount);
g_assert (d == drive);
if (d != NULL)
g_object_unref (d);
uuid = g_mount_get_uuid (mount);
if (uuid)
{
GMount *m;
m = g_volume_monitor_get_mount_for_uuid (monitor, uuid);
g_assert (m == mount);
g_object_unref (m);
g_free (uuid);
}
}
static void
do_volume_tests (GDrive *drive, GVolume *volume)
{
GDrive *d;
gchar *name;
GMount *mount;
gchar *uuid;
name = g_volume_get_name (volume);
g_assert (name != NULL);
g_free (name);
d = g_volume_get_drive (volume);
g_assert (d == drive);
g_object_unref (d);
mount = g_volume_get_mount (volume);
if (mount != NULL)
{
do_mount_tests (drive, volume, mount);
g_object_unref (mount);
}
uuid = g_volume_get_uuid (volume);
if (uuid)
{
GVolume *v;
v = g_volume_monitor_get_volume_for_uuid (monitor, uuid);
g_assert (v == volume);
g_object_unref (v);
g_free (uuid);
}
}
static void
do_drive_tests (GDrive *drive)
{
GList *volumes, *l;
gchar *name;
gboolean has_volumes;
g_assert (G_IS_DRIVE (drive));
name = g_drive_get_name (drive);
g_assert (name != NULL);
g_free (name);
has_volumes = g_drive_has_volumes (drive);
volumes = g_drive_get_volumes (drive);
g_assert (has_volumes == (volumes != NULL));
for (l = volumes; l; l = l->next)
{
GVolume *volume = l->data;
do_volume_tests (drive, volume);
}
g_list_foreach (volumes, (GFunc)g_object_unref, NULL);
g_list_free (volumes);
}
static void
test_connected_drives (void)
{
GList *drives;
GList *l;
drives = g_volume_monitor_get_connected_drives (monitor);
for (l = drives; l; l = l->next)
{
GDrive *drive = l->data;
do_drive_tests (drive);
}
g_list_foreach (drives, (GFunc)g_object_unref, NULL);
g_list_free (drives);
}
static void
test_volumes (void)
{
GList *volumes, *l;
volumes = g_volume_monitor_get_volumes (monitor);
for (l = volumes; l; l = l->next)
{
GVolume *volume = l->data;
GDrive *drive;
drive = g_volume_get_drive (volume);
do_volume_tests (drive, volume);
g_object_unref (drive);
}
g_list_foreach (volumes, (GFunc)g_object_unref, NULL);
g_list_free (volumes);
}
static void
test_mounts (void)
{
GList *mounts, *l;
mounts = g_volume_monitor_get_mounts (monitor);
for (l = mounts; l; l = l->next)
{
GMount *mount = l->data;
GVolume *volume;
GDrive *drive;
drive = g_mount_get_drive (mount);
volume = g_mount_get_volume (mount);
do_mount_tests (drive, volume, mount);
if (drive != NULL)
g_object_unref (drive);
if (volume != NULL)
g_object_unref (volume);
}
g_list_foreach (mounts, (GFunc)g_object_unref, NULL);
g_list_free (mounts);
}
int
main (int argc, char *argv[])
{
gboolean ret;
g_setenv ("GIO_USE_VFS", "local", FALSE);
g_type_init ();
g_test_init (&argc, &argv, NULL);
monitor = g_volume_monitor_get ();
g_test_add_func ("/volumemonitor/connected_drives", test_connected_drives);
g_test_add_func ("/volumemonitor/volumes", test_volumes);
g_test_add_func ("/volumemonitor/mounts", test_mounts);
ret = g_test_run ();
g_object_unref (monitor);
return ret;
}
| gpl-2.0 |
soderstrom-rikard/linux | lib/list_debug.c | 42 | 1759 | /*
* Copyright 2006, Red Hat, Inc., Dave Jones
* Released under the General Public License (GPL).
*
* This file contains the linked list validation for DEBUG_LIST.
*/
#include <linux/export.h>
#include <linux/list.h>
#include <linux/bug.h>
#include <linux/kernel.h>
#include <linux/rculist.h>
/*
* Check that the data structures for the list manipulations are reasonably
* valid. Failures here indicate memory corruption (and possibly an exploit
* attempt).
*/
bool __list_add_valid(struct list_head *new, struct list_head *prev,
struct list_head *next)
{
CHECK_DATA_CORRUPTION(next->prev != prev,
"list_add corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
prev, next->prev, next);
CHECK_DATA_CORRUPTION(prev->next != next,
"list_add corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
next, prev->next, prev);
CHECK_DATA_CORRUPTION(new == prev || new == next,
"list_add double add: new=%p, prev=%p, next=%p.\n",
new, prev, next);
return true;
}
EXPORT_SYMBOL(__list_add_valid);
bool __list_del_entry_valid(struct list_head *entry)
{
struct list_head *prev, *next;
prev = entry->prev;
next = entry->next;
CHECK_DATA_CORRUPTION(next == LIST_POISON1,
"list_del corruption, %p->next is LIST_POISON1 (%p)\n",
entry, LIST_POISON1);
CHECK_DATA_CORRUPTION(prev == LIST_POISON2,
"list_del corruption, %p->prev is LIST_POISON2 (%p)\n",
entry, LIST_POISON2);
CHECK_DATA_CORRUPTION(prev->next != entry,
"list_del corruption. prev->next should be %p, but was %p\n",
entry, prev->next);
CHECK_DATA_CORRUPTION(next->prev != entry,
"list_del corruption. next->prev should be %p, but was %p\n",
entry, next->prev);
return true;
}
EXPORT_SYMBOL(__list_del_entry_valid);
| gpl-2.0 |
tank0412/android_kernel_xiaomi_latte | tools/perf/util/sort.c | 298 | 29115 | #include "sort.h"
#include "hist.h"
#include "comm.h"
#include "symbol.h"
regex_t parent_regex;
const char default_parent_pattern[] = "^sys_|^do_page_fault";
const char *parent_pattern = default_parent_pattern;
const char default_sort_order[] = "comm,dso,symbol";
const char *sort_order = default_sort_order;
regex_t ignore_callees_regex;
int have_ignore_callees = 0;
int sort__need_collapse = 0;
int sort__has_parent = 0;
int sort__has_sym = 0;
int sort__has_dso = 0;
enum sort_mode sort__mode = SORT_MODE__NORMAL;
enum sort_type sort__first_dimension;
LIST_HEAD(hist_entry__sort_list);
static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
{
int n;
va_list ap;
va_start(ap, fmt);
n = vsnprintf(bf, size, fmt, ap);
if (symbol_conf.field_sep && n > 0) {
char *sep = bf;
while (1) {
sep = strchr(sep, *symbol_conf.field_sep);
if (sep == NULL)
break;
*sep = '.';
}
}
va_end(ap);
if (n >= (int)size)
return size - 1;
return n;
}
static int64_t cmp_null(const void *l, const void *r)
{
if (!l && !r)
return 0;
else if (!l)
return -1;
else
return 1;
}
/* --sort pid */
static int64_t
sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->thread->tid - left->thread->tid;
}
static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
const char *comm = thread__comm_str(he->thread);
return repsep_snprintf(bf, size, "%*s:%5d", width - 6,
comm ?: "", he->thread->tid);
}
struct sort_entry sort_thread = {
.se_header = "Command: Pid",
.se_cmp = sort__thread_cmp,
.se_snprintf = hist_entry__thread_snprintf,
.se_width_idx = HISTC_THREAD,
};
/* --sort comm */
static int64_t
sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
{
/* Compare the addr that should be unique among comm */
return comm__str(right->comm) - comm__str(left->comm);
}
static int64_t
sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
{
/* Compare the addr that should be unique among comm */
return comm__str(right->comm) - comm__str(left->comm);
}
static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*s", width, comm__str(he->comm));
}
struct sort_entry sort_comm = {
.se_header = "Command",
.se_cmp = sort__comm_cmp,
.se_collapse = sort__comm_collapse,
.se_snprintf = hist_entry__comm_snprintf,
.se_width_idx = HISTC_COMM,
};
/* --sort dso */
static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
{
struct dso *dso_l = map_l ? map_l->dso : NULL;
struct dso *dso_r = map_r ? map_r->dso : NULL;
const char *dso_name_l, *dso_name_r;
if (!dso_l || !dso_r)
return cmp_null(dso_l, dso_r);
if (verbose) {
dso_name_l = dso_l->long_name;
dso_name_r = dso_r->long_name;
} else {
dso_name_l = dso_l->short_name;
dso_name_r = dso_r->short_name;
}
return strcmp(dso_name_l, dso_name_r);
}
static int64_t
sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->ms.map, right->ms.map);
}
static int _hist_entry__dso_snprintf(struct map *map, char *bf,
size_t size, unsigned int width)
{
if (map && map->dso) {
const char *dso_name = !verbose ? map->dso->short_name :
map->dso->long_name;
return repsep_snprintf(bf, size, "%-*s", width, dso_name);
}
return repsep_snprintf(bf, size, "%-*s", width, "[unknown]");
}
static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
}
struct sort_entry sort_dso = {
.se_header = "Shared Object",
.se_cmp = sort__dso_cmp,
.se_snprintf = hist_entry__dso_snprintf,
.se_width_idx = HISTC_DSO,
};
/* --sort symbol */
static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
{
return (int64_t)(right_ip - left_ip);
}
static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
{
u64 ip_l, ip_r;
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
if (sym_l == sym_r)
return 0;
ip_l = sym_l->start;
ip_r = sym_r->start;
return (int64_t)(ip_r - ip_l);
}
static int64_t
sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
{
int64_t ret;
if (!left->ms.sym && !right->ms.sym)
return _sort__addr_cmp(left->ip, right->ip);
/*
* comparing symbol address alone is not enough since it's a
* relative address within a dso.
*/
if (!sort__has_dso) {
ret = sort__dso_cmp(left, right);
if (ret != 0)
return ret;
}
return _sort__sym_cmp(left->ms.sym, right->ms.sym);
}
static int _hist_entry__sym_snprintf(struct map *map, struct symbol *sym,
u64 ip, char level, char *bf, size_t size,
unsigned int width)
{
size_t ret = 0;
if (verbose) {
char o = map ? dso__symtab_origin(map->dso) : '!';
ret += repsep_snprintf(bf, size, "%-#*llx %c ",
BITS_PER_LONG / 4 + 2, ip, o);
}
ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
if (sym && map) {
if (map->type == MAP__VARIABLE) {
ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
ip - map->unmap_ip(map, sym->start));
ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
width - ret, "");
} else {
ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
width - ret,
sym->name);
}
} else {
size_t len = BITS_PER_LONG / 4;
ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
len, ip);
ret += repsep_snprintf(bf + ret, size - ret, "%-*s",
width - ret, "");
}
return ret;
}
static int hist_entry__sym_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__sym_snprintf(he->ms.map, he->ms.sym, he->ip,
he->level, bf, size, width);
}
struct sort_entry sort_sym = {
.se_header = "Symbol",
.se_cmp = sort__sym_cmp,
.se_snprintf = hist_entry__sym_snprintf,
.se_width_idx = HISTC_SYMBOL,
};
/* --sort srcline */
static int64_t
sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
{
if (!left->srcline) {
if (!left->ms.map)
left->srcline = SRCLINE_UNKNOWN;
else {
struct map *map = left->ms.map;
left->srcline = get_srcline(map->dso,
map__rip_2objdump(map, left->ip));
}
}
if (!right->srcline) {
if (!right->ms.map)
right->srcline = SRCLINE_UNKNOWN;
else {
struct map *map = right->ms.map;
right->srcline = get_srcline(map->dso,
map__rip_2objdump(map, right->ip));
}
}
return strcmp(left->srcline, right->srcline);
}
static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
size_t size,
unsigned int width __maybe_unused)
{
return repsep_snprintf(bf, size, "%s", he->srcline);
}
struct sort_entry sort_srcline = {
.se_header = "Source:Line",
.se_cmp = sort__srcline_cmp,
.se_snprintf = hist_entry__srcline_snprintf,
.se_width_idx = HISTC_SRCLINE,
};
/* --sort parent */
static int64_t
sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct symbol *sym_l = left->parent;
struct symbol *sym_r = right->parent;
if (!sym_l || !sym_r)
return cmp_null(sym_l, sym_r);
return strcmp(sym_l->name, sym_r->name);
}
static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*s", width,
he->parent ? he->parent->name : "[other]");
}
struct sort_entry sort_parent = {
.se_header = "Parent symbol",
.se_cmp = sort__parent_cmp,
.se_snprintf = hist_entry__parent_snprintf,
.se_width_idx = HISTC_PARENT,
};
/* --sort cpu */
static int64_t
sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
{
return right->cpu - left->cpu;
}
static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%*d", width, he->cpu);
}
struct sort_entry sort_cpu = {
.se_header = "CPU",
.se_cmp = sort__cpu_cmp,
.se_snprintf = hist_entry__cpu_snprintf,
.se_width_idx = HISTC_CPU,
};
/* sort keys for branch stacks */
static int64_t
sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->branch_info->from.map,
right->branch_info->from.map);
}
static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(he->branch_info->from.map,
bf, size, width);
}
static int64_t
sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
return _sort__dso_cmp(left->branch_info->to.map,
right->branch_info->to.map);
}
static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return _hist_entry__dso_snprintf(he->branch_info->to.map,
bf, size, width);
}
static int64_t
sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *from_l = &left->branch_info->from;
struct addr_map_symbol *from_r = &right->branch_info->from;
if (!from_l->sym && !from_r->sym)
return _sort__addr_cmp(from_l->addr, from_r->addr);
return _sort__sym_cmp(from_l->sym, from_r->sym);
}
static int64_t
sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct addr_map_symbol *to_l = &left->branch_info->to;
struct addr_map_symbol *to_r = &right->branch_info->to;
if (!to_l->sym && !to_r->sym)
return _sort__addr_cmp(to_l->addr, to_r->addr);
return _sort__sym_cmp(to_l->sym, to_r->sym);
}
static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct addr_map_symbol *from = &he->branch_info->from;
return _hist_entry__sym_snprintf(from->map, from->sym, from->addr,
he->level, bf, size, width);
}
static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct addr_map_symbol *to = &he->branch_info->to;
return _hist_entry__sym_snprintf(to->map, to->sym, to->addr,
he->level, bf, size, width);
}
struct sort_entry sort_dso_from = {
.se_header = "Source Shared Object",
.se_cmp = sort__dso_from_cmp,
.se_snprintf = hist_entry__dso_from_snprintf,
.se_width_idx = HISTC_DSO_FROM,
};
struct sort_entry sort_dso_to = {
.se_header = "Target Shared Object",
.se_cmp = sort__dso_to_cmp,
.se_snprintf = hist_entry__dso_to_snprintf,
.se_width_idx = HISTC_DSO_TO,
};
struct sort_entry sort_sym_from = {
.se_header = "Source Symbol",
.se_cmp = sort__sym_from_cmp,
.se_snprintf = hist_entry__sym_from_snprintf,
.se_width_idx = HISTC_SYMBOL_FROM,
};
struct sort_entry sort_sym_to = {
.se_header = "Target Symbol",
.se_cmp = sort__sym_to_cmp,
.se_snprintf = hist_entry__sym_to_snprintf,
.se_width_idx = HISTC_SYMBOL_TO,
};
static int64_t
sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
{
const unsigned char mp = left->branch_info->flags.mispred !=
right->branch_info->flags.mispred;
const unsigned char p = left->branch_info->flags.predicted !=
right->branch_info->flags.predicted;
return mp || p;
}
static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width){
static const char *out = "N/A";
if (he->branch_info->flags.predicted)
out = "N";
else if (he->branch_info->flags.mispred)
out = "Y";
return repsep_snprintf(bf, size, "%-*s", width, out);
}
/* --sort daddr_sym */
static int64_t
sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
uint64_t l = 0, r = 0;
if (left->mem_info)
l = left->mem_info->daddr.addr;
if (right->mem_info)
r = right->mem_info->daddr.addr;
return (int64_t)(r - l);
}
static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
uint64_t addr = 0;
struct map *map = NULL;
struct symbol *sym = NULL;
if (he->mem_info) {
addr = he->mem_info->daddr.addr;
map = he->mem_info->daddr.map;
sym = he->mem_info->daddr.sym;
}
return _hist_entry__sym_snprintf(map, sym, addr, he->level, bf, size,
width);
}
static int64_t
sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
{
struct map *map_l = NULL;
struct map *map_r = NULL;
if (left->mem_info)
map_l = left->mem_info->daddr.map;
if (right->mem_info)
map_r = right->mem_info->daddr.map;
return _sort__dso_cmp(map_l, map_r);
}
static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
struct map *map = NULL;
if (he->mem_info)
map = he->mem_info->daddr.map;
return _hist_entry__dso_snprintf(map, bf, size, width);
}
static int64_t
sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_lock = PERF_MEM_LOCK_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_lock = PERF_MEM_LOCK_NA;
return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
}
static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
const char *out;
u64 mask = PERF_MEM_LOCK_NA;
if (he->mem_info)
mask = he->mem_info->data_src.mem_lock;
if (mask & PERF_MEM_LOCK_NA)
out = "N/A";
else if (mask & PERF_MEM_LOCK_LOCKED)
out = "Yes";
else
out = "No";
return repsep_snprintf(bf, size, "%-*s", width, out);
}
static int64_t
sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
}
static const char * const tlb_access[] = {
"N/A",
"HIT",
"MISS",
"L1",
"L2",
"Walker",
"Fault",
};
#define NUM_TLB_ACCESS (sizeof(tlb_access)/sizeof(const char *))
static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
size_t sz = sizeof(out) - 1; /* -1 for null termination */
size_t l = 0, i;
u64 m = PERF_MEM_TLB_NA;
u64 hit, miss;
out[0] = '\0';
if (he->mem_info)
m = he->mem_info->data_src.mem_dtlb;
hit = m & PERF_MEM_TLB_HIT;
miss = m & PERF_MEM_TLB_MISS;
/* already taken care of */
m &= ~(PERF_MEM_TLB_HIT|PERF_MEM_TLB_MISS);
for (i = 0; m && i < NUM_TLB_ACCESS; i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
strncat(out, tlb_access[i], sz - l);
l += strlen(tlb_access[i]);
}
if (*out == '\0')
strcpy(out, "N/A");
if (hit)
strncat(out, " hit", sz - l);
if (miss)
strncat(out, " miss", sz - l);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
static int64_t
sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_lvl = PERF_MEM_LVL_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_lvl = PERF_MEM_LVL_NA;
return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
}
static const char * const mem_lvl[] = {
"N/A",
"HIT",
"MISS",
"L1",
"LFB",
"L2",
"L3",
"Local RAM",
"Remote RAM (1 hop)",
"Remote RAM (2 hops)",
"Remote Cache (1 hop)",
"Remote Cache (2 hops)",
"I/O",
"Uncached",
};
#define NUM_MEM_LVL (sizeof(mem_lvl)/sizeof(const char *))
static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
size_t sz = sizeof(out) - 1; /* -1 for null termination */
size_t i, l = 0;
u64 m = PERF_MEM_LVL_NA;
u64 hit, miss;
if (he->mem_info)
m = he->mem_info->data_src.mem_lvl;
out[0] = '\0';
hit = m & PERF_MEM_LVL_HIT;
miss = m & PERF_MEM_LVL_MISS;
/* already taken care of */
m &= ~(PERF_MEM_LVL_HIT|PERF_MEM_LVL_MISS);
for (i = 0; m && i < NUM_MEM_LVL; i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
strncat(out, mem_lvl[i], sz - l);
l += strlen(mem_lvl[i]);
}
if (*out == '\0')
strcpy(out, "N/A");
if (hit)
strncat(out, " hit", sz - l);
if (miss)
strncat(out, " miss", sz - l);
return repsep_snprintf(bf, size, "%-*s", width, out);
}
static int64_t
sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
{
union perf_mem_data_src data_src_l;
union perf_mem_data_src data_src_r;
if (left->mem_info)
data_src_l = left->mem_info->data_src;
else
data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
if (right->mem_info)
data_src_r = right->mem_info->data_src;
else
data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
}
static const char * const snoop_access[] = {
"N/A",
"None",
"Miss",
"Hit",
"HitM",
};
#define NUM_SNOOP_ACCESS (sizeof(snoop_access)/sizeof(const char *))
static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
char out[64];
size_t sz = sizeof(out) - 1; /* -1 for null termination */
size_t i, l = 0;
u64 m = PERF_MEM_SNOOP_NA;
out[0] = '\0';
if (he->mem_info)
m = he->mem_info->data_src.mem_snoop;
for (i = 0; m && i < NUM_SNOOP_ACCESS; i++, m >>= 1) {
if (!(m & 0x1))
continue;
if (l) {
strcat(out, " or ");
l += 4;
}
strncat(out, snoop_access[i], sz - l);
l += strlen(snoop_access[i]);
}
if (*out == '\0')
strcpy(out, "N/A");
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_mispredict = {
.se_header = "Branch Mispredicted",
.se_cmp = sort__mispredict_cmp,
.se_snprintf = hist_entry__mispredict_snprintf,
.se_width_idx = HISTC_MISPREDICT,
};
static u64 he_weight(struct hist_entry *he)
{
return he->stat.nr_events ? he->stat.weight / he->stat.nr_events : 0;
}
static int64_t
sort__local_weight_cmp(struct hist_entry *left, struct hist_entry *right)
{
return he_weight(left) - he_weight(right);
}
static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*llu", width, he_weight(he));
}
struct sort_entry sort_local_weight = {
.se_header = "Local Weight",
.se_cmp = sort__local_weight_cmp,
.se_snprintf = hist_entry__local_weight_snprintf,
.se_width_idx = HISTC_LOCAL_WEIGHT,
};
static int64_t
sort__global_weight_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->stat.weight - right->stat.weight;
}
static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
return repsep_snprintf(bf, size, "%-*llu", width, he->stat.weight);
}
struct sort_entry sort_global_weight = {
.se_header = "Weight",
.se_cmp = sort__global_weight_cmp,
.se_snprintf = hist_entry__global_weight_snprintf,
.se_width_idx = HISTC_GLOBAL_WEIGHT,
};
struct sort_entry sort_mem_daddr_sym = {
.se_header = "Data Symbol",
.se_cmp = sort__daddr_cmp,
.se_snprintf = hist_entry__daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_SYMBOL,
};
struct sort_entry sort_mem_daddr_dso = {
.se_header = "Data Object",
.se_cmp = sort__dso_daddr_cmp,
.se_snprintf = hist_entry__dso_daddr_snprintf,
.se_width_idx = HISTC_MEM_DADDR_SYMBOL,
};
struct sort_entry sort_mem_locked = {
.se_header = "Locked",
.se_cmp = sort__locked_cmp,
.se_snprintf = hist_entry__locked_snprintf,
.se_width_idx = HISTC_MEM_LOCKED,
};
struct sort_entry sort_mem_tlb = {
.se_header = "TLB access",
.se_cmp = sort__tlb_cmp,
.se_snprintf = hist_entry__tlb_snprintf,
.se_width_idx = HISTC_MEM_TLB,
};
struct sort_entry sort_mem_lvl = {
.se_header = "Memory access",
.se_cmp = sort__lvl_cmp,
.se_snprintf = hist_entry__lvl_snprintf,
.se_width_idx = HISTC_MEM_LVL,
};
struct sort_entry sort_mem_snoop = {
.se_header = "Snoop",
.se_cmp = sort__snoop_cmp,
.se_snprintf = hist_entry__snoop_snprintf,
.se_width_idx = HISTC_MEM_SNOOP,
};
static int64_t
sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->branch_info->flags.abort !=
right->branch_info->flags.abort;
}
static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = ".";
if (he->branch_info->flags.abort)
out = "A";
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_abort = {
.se_header = "Transaction abort",
.se_cmp = sort__abort_cmp,
.se_snprintf = hist_entry__abort_snprintf,
.se_width_idx = HISTC_ABORT,
};
static int64_t
sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->branch_info->flags.in_tx !=
right->branch_info->flags.in_tx;
}
static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
static const char *out = ".";
if (he->branch_info->flags.in_tx)
out = "T";
return repsep_snprintf(bf, size, "%-*s", width, out);
}
struct sort_entry sort_in_tx = {
.se_header = "Branch in transaction",
.se_cmp = sort__in_tx_cmp,
.se_snprintf = hist_entry__in_tx_snprintf,
.se_width_idx = HISTC_IN_TX,
};
static int64_t
sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
{
return left->transaction - right->transaction;
}
static inline char *add_str(char *p, const char *str)
{
strcpy(p, str);
return p + strlen(str);
}
static struct txbit {
unsigned flag;
const char *name;
int skip_for_len;
} txbits[] = {
{ PERF_TXN_ELISION, "EL ", 0 },
{ PERF_TXN_TRANSACTION, "TX ", 1 },
{ PERF_TXN_SYNC, "SYNC ", 1 },
{ PERF_TXN_ASYNC, "ASYNC ", 0 },
{ PERF_TXN_RETRY, "RETRY ", 0 },
{ PERF_TXN_CONFLICT, "CON ", 0 },
{ PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
{ PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
{ 0, NULL, 0 }
};
int hist_entry__transaction_len(void)
{
int i;
int len = 0;
for (i = 0; txbits[i].name; i++) {
if (!txbits[i].skip_for_len)
len += strlen(txbits[i].name);
}
len += 4; /* :XX<space> */
return len;
}
static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
size_t size, unsigned int width)
{
u64 t = he->transaction;
char buf[128];
char *p = buf;
int i;
buf[0] = 0;
for (i = 0; txbits[i].name; i++)
if (txbits[i].flag & t)
p = add_str(p, txbits[i].name);
if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
p = add_str(p, "NEITHER ");
if (t & PERF_TXN_ABORT_MASK) {
sprintf(p, ":%" PRIx64,
(t & PERF_TXN_ABORT_MASK) >>
PERF_TXN_ABORT_SHIFT);
p += strlen(p);
}
return repsep_snprintf(bf, size, "%-*s", width, buf);
}
struct sort_entry sort_transaction = {
.se_header = "Transaction ",
.se_cmp = sort__transaction_cmp,
.se_snprintf = hist_entry__transaction_snprintf,
.se_width_idx = HISTC_TRANSACTION,
};
struct sort_dimension {
const char *name;
struct sort_entry *entry;
int taken;
};
#define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
static struct sort_dimension common_sort_dimensions[] = {
DIM(SORT_PID, "pid", sort_thread),
DIM(SORT_COMM, "comm", sort_comm),
DIM(SORT_DSO, "dso", sort_dso),
DIM(SORT_SYM, "symbol", sort_sym),
DIM(SORT_PARENT, "parent", sort_parent),
DIM(SORT_CPU, "cpu", sort_cpu),
DIM(SORT_SRCLINE, "srcline", sort_srcline),
DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
DIM(SORT_TRANSACTION, "transaction", sort_transaction),
};
#undef DIM
#define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
static struct sort_dimension bstack_sort_dimensions[] = {
DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
DIM(SORT_IN_TX, "in_tx", sort_in_tx),
DIM(SORT_ABORT, "abort", sort_abort),
};
#undef DIM
#define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
static struct sort_dimension memory_sort_dimensions[] = {
DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
};
#undef DIM
static void __sort_dimension__add(struct sort_dimension *sd, enum sort_type idx)
{
if (sd->taken)
return;
if (sd->entry->se_collapse)
sort__need_collapse = 1;
if (list_empty(&hist_entry__sort_list))
sort__first_dimension = idx;
list_add_tail(&sd->entry->list, &hist_entry__sort_list);
sd->taken = 1;
}
int sort_dimension__add(const char *tok)
{
unsigned int i;
for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
struct sort_dimension *sd = &common_sort_dimensions[i];
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sd->entry == &sort_parent) {
int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
if (ret) {
char err[BUFSIZ];
regerror(ret, &parent_regex, err, sizeof(err));
pr_err("Invalid regex: %s\n%s", parent_pattern, err);
return -EINVAL;
}
sort__has_parent = 1;
} else if (sd->entry == &sort_sym) {
sort__has_sym = 1;
} else if (sd->entry == &sort_dso) {
sort__has_dso = 1;
}
__sort_dimension__add(sd, i);
return 0;
}
for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
struct sort_dimension *sd = &bstack_sort_dimensions[i];
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__BRANCH)
return -EINVAL;
if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
sort__has_sym = 1;
__sort_dimension__add(sd, i + __SORT_BRANCH_STACK);
return 0;
}
for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
struct sort_dimension *sd = &memory_sort_dimensions[i];
if (strncasecmp(tok, sd->name, strlen(tok)))
continue;
if (sort__mode != SORT_MODE__MEMORY)
return -EINVAL;
if (sd->entry == &sort_mem_daddr_sym)
sort__has_sym = 1;
__sort_dimension__add(sd, i + __SORT_MEMORY_MODE);
return 0;
}
return -ESRCH;
}
int setup_sorting(void)
{
char *tmp, *tok, *str = strdup(sort_order);
int ret = 0;
if (str == NULL) {
error("Not enough memory to setup sort keys");
return -ENOMEM;
}
for (tok = strtok_r(str, ", ", &tmp);
tok; tok = strtok_r(NULL, ", ", &tmp)) {
ret = sort_dimension__add(tok);
if (ret == -EINVAL) {
error("Invalid --sort key: `%s'", tok);
break;
} else if (ret == -ESRCH) {
error("Unknown --sort key: `%s'", tok);
break;
}
}
free(str);
return ret;
}
static void sort_entry__setup_elide(struct sort_entry *se,
struct strlist *list,
const char *list_name, FILE *fp)
{
if (list && strlist__nr_entries(list) == 1) {
if (fp != NULL)
fprintf(fp, "# %s: %s\n", list_name,
strlist__entry(list, 0)->s);
se->elide = true;
}
}
void sort__setup_elide(FILE *output)
{
struct sort_entry *se;
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"dso", output);
sort_entry__setup_elide(&sort_comm, symbol_conf.comm_list,
"comm", output);
sort_entry__setup_elide(&sort_sym, symbol_conf.sym_list,
"symbol", output);
if (sort__mode == SORT_MODE__BRANCH) {
sort_entry__setup_elide(&sort_dso_from,
symbol_conf.dso_from_list,
"dso_from", output);
sort_entry__setup_elide(&sort_dso_to,
symbol_conf.dso_to_list,
"dso_to", output);
sort_entry__setup_elide(&sort_sym_from,
symbol_conf.sym_from_list,
"sym_from", output);
sort_entry__setup_elide(&sort_sym_to,
symbol_conf.sym_to_list,
"sym_to", output);
} else if (sort__mode == SORT_MODE__MEMORY) {
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"symbol_daddr", output);
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"dso_daddr", output);
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"mem", output);
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"local_weight", output);
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"tlb", output);
sort_entry__setup_elide(&sort_dso, symbol_conf.dso_list,
"snoop", output);
}
/*
* It makes no sense to elide all of sort entries.
* Just revert them to show up again.
*/
list_for_each_entry(se, &hist_entry__sort_list, list) {
if (!se->elide)
return;
}
list_for_each_entry(se, &hist_entry__sort_list, list)
se->elide = false;
}
| gpl-2.0 |
maxfu/legacy_android_kernel_exynos4210 | drivers/usb/gadget/f_eem.c | 554 | 16534 | /*
* f_eem.c -- USB CDC Ethernet (EEM) link function driver
*
* Copyright (C) 2003-2005,2008 David Brownell
* Copyright (C) 2008 Nokia Corporation
* Copyright (C) 2009 EF Johnson Technologies
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/etherdevice.h>
#include <linux/crc32.h>
#include <linux/slab.h>
#include "u_ether.h"
#define EEM_HLEN 2
/*
* This function is a "CDC Ethernet Emulation Model" (CDC EEM)
* Ethernet link.
*/
struct eem_ep_descs {
struct usb_endpoint_descriptor *in;
struct usb_endpoint_descriptor *out;
};
struct f_eem {
struct gether port;
u8 ctrl_id;
struct eem_ep_descs fs;
struct eem_ep_descs hs;
struct eem_ep_descs ss;
};
static inline struct f_eem *func_to_eem(struct usb_function *f)
{
return container_of(f, struct f_eem, port.func);
}
/*-------------------------------------------------------------------------*/
/* interface descriptor: */
static struct usb_interface_descriptor eem_intf __initdata = {
.bLength = sizeof eem_intf,
.bDescriptorType = USB_DT_INTERFACE,
/* .bInterfaceNumber = DYNAMIC */
.bNumEndpoints = 2,
.bInterfaceClass = USB_CLASS_COMM,
.bInterfaceSubClass = USB_CDC_SUBCLASS_EEM,
.bInterfaceProtocol = USB_CDC_PROTO_EEM,
/* .iInterface = DYNAMIC */
};
/* full speed support: */
static struct usb_endpoint_descriptor eem_fs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_endpoint_descriptor eem_fs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
};
static struct usb_descriptor_header *eem_fs_function[] __initdata = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_fs_in_desc,
(struct usb_descriptor_header *) &eem_fs_out_desc,
NULL,
};
/* high speed support: */
static struct usb_endpoint_descriptor eem_hs_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_endpoint_descriptor eem_hs_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(512),
};
static struct usb_descriptor_header *eem_hs_function[] __initdata = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_hs_in_desc,
(struct usb_descriptor_header *) &eem_hs_out_desc,
NULL,
};
/* super speed support: */
static struct usb_endpoint_descriptor eem_ss_in_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_endpoint_descriptor eem_ss_out_desc __initdata = {
.bLength = USB_DT_ENDPOINT_SIZE,
.bDescriptorType = USB_DT_ENDPOINT,
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_BULK,
.wMaxPacketSize = cpu_to_le16(1024),
};
static struct usb_ss_ep_comp_descriptor eem_ss_bulk_comp_desc __initdata = {
.bLength = sizeof eem_ss_bulk_comp_desc,
.bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
/* the following 2 values can be tweaked if necessary */
/* .bMaxBurst = 0, */
/* .bmAttributes = 0, */
};
static struct usb_descriptor_header *eem_ss_function[] __initdata = {
/* CDC EEM control descriptors */
(struct usb_descriptor_header *) &eem_intf,
(struct usb_descriptor_header *) &eem_ss_in_desc,
(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
(struct usb_descriptor_header *) &eem_ss_out_desc,
(struct usb_descriptor_header *) &eem_ss_bulk_comp_desc,
NULL,
};
/* string descriptors: */
static struct usb_string eem_string_defs[] = {
[0].s = "CDC Ethernet Emulation Model (EEM)",
{ } /* end of list */
};
static struct usb_gadget_strings eem_string_table = {
.language = 0x0409, /* en-us */
.strings = eem_string_defs,
};
static struct usb_gadget_strings *eem_strings[] = {
&eem_string_table,
NULL,
};
/*-------------------------------------------------------------------------*/
static int eem_setup(struct usb_function *f, const struct usb_ctrlrequest *ctrl)
{
struct usb_composite_dev *cdev = f->config->cdev;
int value = -EOPNOTSUPP;
u16 w_index = le16_to_cpu(ctrl->wIndex);
u16 w_value = le16_to_cpu(ctrl->wValue);
u16 w_length = le16_to_cpu(ctrl->wLength);
DBG(cdev, "invalid control req%02x.%02x v%04x i%04x l%d\n",
ctrl->bRequestType, ctrl->bRequest,
w_value, w_index, w_length);
/* device either stalls (value < 0) or reports success */
return value;
}
static int eem_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
struct net_device *net;
/* we know alt == 0, so this is an activation or a reset */
if (alt != 0)
goto fail;
if (intf == eem->ctrl_id) {
if (eem->port.in_ep->driver_data) {
DBG(cdev, "reset eem\n");
gether_disconnect(&eem->port);
}
if (!eem->port.in) {
DBG(cdev, "init eem\n");
if (gadget_is_superspeed(cdev->gadget) &&
cdev->gadget->speed == USB_SPEED_SUPER) {
eem->port.in = eem->ss.in;
eem->port.out = eem->ss.out;
} else {
eem->port.in = ep_choose(cdev->gadget,
eem->hs.in, eem->fs.in);
eem->port.out = ep_choose(cdev->gadget,
eem->hs.out, eem->fs.out);
}
}
/* zlps should not occur because zero-length EEM packets
* will be inserted in those cases where they would occur
*/
eem->port.is_zlp_ok = 1;
eem->port.cdc_filter = DEFAULT_FILTER;
DBG(cdev, "activate eem\n");
net = gether_connect(&eem->port);
if (IS_ERR(net))
return PTR_ERR(net);
} else
goto fail;
return 0;
fail:
return -EINVAL;
}
static void eem_disable(struct usb_function *f)
{
struct f_eem *eem = func_to_eem(f);
struct usb_composite_dev *cdev = f->config->cdev;
DBG(cdev, "eem deactivated\n");
if (eem->port.in_ep->driver_data)
gether_disconnect(&eem->port);
}
/*-------------------------------------------------------------------------*/
/* EEM function driver setup/binding */
static int __init
eem_bind(struct usb_configuration *c, struct usb_function *f)
{
struct usb_composite_dev *cdev = c->cdev;
struct f_eem *eem = func_to_eem(f);
int status;
struct usb_ep *ep;
/* allocate instance-specific interface IDs */
status = usb_interface_id(c, f);
if (status < 0)
goto fail;
eem->ctrl_id = status;
eem_intf.bInterfaceNumber = status;
status = -ENODEV;
/* allocate instance-specific endpoints */
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_in_desc);
if (!ep)
goto fail;
eem->port.in_ep = ep;
ep->driver_data = cdev; /* claim */
ep = usb_ep_autoconfig(cdev->gadget, &eem_fs_out_desc);
if (!ep)
goto fail;
eem->port.out_ep = ep;
ep->driver_data = cdev; /* claim */
status = -ENOMEM;
/* copy descriptors, and track endpoint copies */
f->descriptors = usb_copy_descriptors(eem_fs_function);
if (!f->descriptors)
goto fail;
eem->fs.in = usb_find_endpoint(eem_fs_function,
f->descriptors, &eem_fs_in_desc);
eem->fs.out = usb_find_endpoint(eem_fs_function,
f->descriptors, &eem_fs_out_desc);
/* support all relevant hardware speeds... we expect that when
* hardware is dual speed, all bulk-capable endpoints work at
* both speeds
*/
if (gadget_is_dualspeed(c->cdev->gadget)) {
eem_hs_in_desc.bEndpointAddress =
eem_fs_in_desc.bEndpointAddress;
eem_hs_out_desc.bEndpointAddress =
eem_fs_out_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->hs_descriptors = usb_copy_descriptors(eem_hs_function);
if (!f->hs_descriptors)
goto fail;
eem->hs.in = usb_find_endpoint(eem_hs_function,
f->hs_descriptors, &eem_hs_in_desc);
eem->hs.out = usb_find_endpoint(eem_hs_function,
f->hs_descriptors, &eem_hs_out_desc);
}
if (gadget_is_superspeed(c->cdev->gadget)) {
eem_ss_in_desc.bEndpointAddress =
eem_fs_in_desc.bEndpointAddress;
eem_ss_out_desc.bEndpointAddress =
eem_fs_out_desc.bEndpointAddress;
/* copy descriptors, and track endpoint copies */
f->ss_descriptors = usb_copy_descriptors(eem_ss_function);
if (!f->ss_descriptors)
goto fail;
eem->ss.in = usb_find_endpoint(eem_ss_function,
f->ss_descriptors, &eem_ss_in_desc);
eem->ss.out = usb_find_endpoint(eem_ss_function,
f->ss_descriptors, &eem_ss_out_desc);
}
DBG(cdev, "CDC Ethernet (EEM): %s speed IN/%s OUT/%s\n",
gadget_is_superspeed(c->cdev->gadget) ? "super" :
gadget_is_dualspeed(c->cdev->gadget) ? "dual" : "full",
eem->port.in_ep->name, eem->port.out_ep->name);
return 0;
fail:
if (f->descriptors)
usb_free_descriptors(f->descriptors);
if (f->hs_descriptors)
usb_free_descriptors(f->hs_descriptors);
/* we might as well release our claims on endpoints */
if (eem->port.out)
eem->port.out_ep->driver_data = NULL;
if (eem->port.in)
eem->port.in_ep->driver_data = NULL;
ERROR(cdev, "%s: can't bind, err %d\n", f->name, status);
return status;
}
static void
eem_unbind(struct usb_configuration *c, struct usb_function *f)
{
struct f_eem *eem = func_to_eem(f);
DBG(c->cdev, "eem unbind\n");
if (gadget_is_superspeed(c->cdev->gadget))
usb_free_descriptors(f->ss_descriptors);
if (gadget_is_dualspeed(c->cdev->gadget))
usb_free_descriptors(f->hs_descriptors);
usb_free_descriptors(f->descriptors);
kfree(eem);
}
static void eem_cmd_complete(struct usb_ep *ep, struct usb_request *req)
{
struct sk_buff *skb = (struct sk_buff *)req->context;
dev_kfree_skb_any(skb);
}
/*
* Add the EEM header and ethernet checksum.
* We currently do not attempt to put multiple ethernet frames
* into a single USB transfer
*/
static struct sk_buff *eem_wrap(struct gether *port, struct sk_buff *skb)
{
struct sk_buff *skb2 = NULL;
struct usb_ep *in = port->in_ep;
int padlen = 0;
u16 len = skb->len;
if (!skb_cloned(skb)) {
int headroom = skb_headroom(skb);
int tailroom = skb_tailroom(skb);
/* When (len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) is 0,
* stick two bytes of zero-length EEM packet on the end.
*/
if (((len + EEM_HLEN + ETH_FCS_LEN) % in->maxpacket) == 0)
padlen += 2;
if ((tailroom >= (ETH_FCS_LEN + padlen)) &&
(headroom >= EEM_HLEN))
goto done;
}
skb2 = skb_copy_expand(skb, EEM_HLEN, ETH_FCS_LEN + padlen, GFP_ATOMIC);
dev_kfree_skb_any(skb);
skb = skb2;
if (!skb)
return skb;
done:
/* use the "no CRC" option */
put_unaligned_be32(0xdeadbeef, skb_put(skb, 4));
/* EEM packet header format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel CRC)
* b15: bmType (0 == data)
*/
len = skb->len;
put_unaligned_le16(len & 0x3FFF, skb_push(skb, 2));
/* add a zero-length EEM packet, if needed */
if (padlen)
put_unaligned_le16(0, skb_put(skb, 2));
return skb;
}
/*
* Remove the EEM header. Note that there can be many EEM packets in a single
* USB transfer, so we need to break them out and handle them independently.
*/
static int eem_unwrap(struct gether *port,
struct sk_buff *skb,
struct sk_buff_head *list)
{
struct usb_composite_dev *cdev = port->func.config->cdev;
int status = 0;
do {
struct sk_buff *skb2;
u16 header;
u16 len = 0;
if (skb->len < EEM_HLEN) {
status = -EINVAL;
DBG(cdev, "invalid EEM header\n");
goto error;
}
/* remove the EEM header */
header = get_unaligned_le16(skb->data);
skb_pull(skb, EEM_HLEN);
/* EEM packet header format:
* b0..14: EEM type dependent (data or command)
* b15: bmType (0 == data, 1 == command)
*/
if (header & BIT(15)) {
struct usb_request *req = cdev->req;
u16 bmEEMCmd;
/* EEM command packet format:
* b0..10: bmEEMCmdParam
* b11..13: bmEEMCmd
* b14: reserved (must be zero)
* b15: bmType (1 == command)
*/
if (header & BIT(14))
continue;
bmEEMCmd = (header >> 11) & 0x7;
switch (bmEEMCmd) {
case 0: /* echo */
len = header & 0x7FF;
if (skb->len < len) {
status = -EOVERFLOW;
goto error;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "EEM echo response error\n");
goto next;
}
skb_trim(skb2, len);
put_unaligned_le16(BIT(15) | BIT(11) | len,
skb_push(skb2, 2));
skb_copy_bits(skb2, 0, req->buf, skb2->len);
req->length = skb2->len;
req->complete = eem_cmd_complete;
req->zero = 1;
req->context = skb2;
if (usb_ep_queue(port->in_ep, req, GFP_ATOMIC))
DBG(cdev, "echo response queue fail\n");
break;
case 1: /* echo response */
case 2: /* suspend hint */
case 3: /* response hint */
case 4: /* response complete hint */
case 5: /* tickle */
default: /* reserved */
continue;
}
} else {
u32 crc, crc2;
struct sk_buff *skb3;
/* check for zero-length EEM packet */
if (header == 0)
continue;
/* EEM data packet format:
* b0..13: length of ethernet frame
* b14: bmCRC (0 == sentinel, 1 == calculated)
* b15: bmType (0 == data)
*/
len = header & 0x3FFF;
if ((skb->len < len)
|| (len < (ETH_HLEN + ETH_FCS_LEN))) {
status = -EINVAL;
goto error;
}
/* validate CRC */
if (header & BIT(14)) {
crc = get_unaligned_le32(skb->data + len
- ETH_FCS_LEN);
crc2 = ~crc32_le(~0,
skb->data, len - ETH_FCS_LEN);
} else {
crc = get_unaligned_be32(skb->data + len
- ETH_FCS_LEN);
crc2 = 0xdeadbeef;
}
if (crc != crc2) {
DBG(cdev, "invalid EEM CRC\n");
goto next;
}
skb2 = skb_clone(skb, GFP_ATOMIC);
if (unlikely(!skb2)) {
DBG(cdev, "unable to unframe EEM packet\n");
continue;
}
skb_trim(skb2, len - ETH_FCS_LEN);
skb3 = skb_copy_expand(skb2,
NET_IP_ALIGN,
0,
GFP_ATOMIC);
if (unlikely(!skb3)) {
DBG(cdev, "unable to realign EEM packet\n");
dev_kfree_skb_any(skb2);
continue;
}
dev_kfree_skb_any(skb2);
skb_queue_tail(list, skb3);
}
next:
skb_pull(skb, len);
} while (skb->len);
error:
dev_kfree_skb_any(skb);
return status;
}
/**
* eem_bind_config - add CDC Ethernet (EEM) network link to a configuration
* @c: the configuration to support the network link
* Context: single threaded during gadget setup
*
* Returns zero on success, else negative errno.
*
* Caller must have called @gether_setup(). Caller is also responsible
* for calling @gether_cleanup() before module unload.
*/
int __init eem_bind_config(struct usb_configuration *c)
{
struct f_eem *eem;
int status;
/* maybe allocate device-global string IDs */
if (eem_string_defs[0].id == 0) {
/* control interface label */
status = usb_string_id(c->cdev);
if (status < 0)
return status;
eem_string_defs[0].id = status;
eem_intf.iInterface = status;
}
/* allocate and initialize one new instance */
eem = kzalloc(sizeof *eem, GFP_KERNEL);
if (!eem)
return -ENOMEM;
eem->port.cdc_filter = DEFAULT_FILTER;
eem->port.func.name = "cdc_eem";
eem->port.func.strings = eem_strings;
/* descriptors are per-instance copies */
eem->port.func.bind = eem_bind;
eem->port.func.unbind = eem_unbind;
eem->port.func.set_alt = eem_set_alt;
eem->port.func.setup = eem_setup;
eem->port.func.disable = eem_disable;
eem->port.wrap = eem_wrap;
eem->port.unwrap = eem_unwrap;
eem->port.header_len = EEM_HLEN;
status = usb_add_function(c, &eem->port.func);
if (status)
kfree(eem);
return status;
}
| gpl-2.0 |
mialwe/midnight-i9000-kernel | drivers/staging/comedi/drivers/cb_pcidio.c | 810 | 9430 | /*
comedi/drivers/cb_pcidio.c
A Comedi driver for PCI-DIO24H & PCI-DIO48H of ComputerBoards (currently MeasurementComputing)
COMEDI - Linux Control and Measurement Device Interface
Copyright (C) 2000 David A. Schleef <ds@schleef.org>
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*/
/*
Driver: cb_pcidio
Description: ComputerBoards' DIO boards with PCI interface
Devices: [Measurement Computing] PCI-DIO24 (cb_pcidio), PCI-DIO24H, PCI-DIO48H
Author: Yoshiya Matsuzaka
Updated: Mon, 29 Oct 2007 15:40:47 +0000
Status: experimental
This driver has been modified from skel.c of comedi-0.7.70.
Configuration Options:
[0] - PCI bus of device (optional)
[1] - PCI slot of device (optional)
If bus/slot is not specified, the first available PCI device will
be used.
Passing a zero for an option is the same as leaving it unspecified.
*/
/*------------------------------ HEADER FILES ---------------------------------*/
#include "../comedidev.h"
#include "comedi_pci.h"
#include "8255.h"
/*-------------------------- MACROS and DATATYPES -----------------------------*/
#define PCI_VENDOR_ID_CB 0x1307
/*
* Board descriptions for two imaginary boards. Describing the
* boards in this way is optional, and completely driver-dependent.
* Some drivers use arrays such as this, other do not.
*/
struct pcidio_board {
const char *name; /* name of the board */
int dev_id;
int n_8255; /* number of 8255 chips on board */
/* indices of base address regions */
int pcicontroler_badrindex;
int dioregs_badrindex;
};
static const struct pcidio_board pcidio_boards[] = {
{
.name = "pci-dio24",
.dev_id = 0x0028,
.n_8255 = 1,
.pcicontroler_badrindex = 1,
.dioregs_badrindex = 2,
},
{
.name = "pci-dio24h",
.dev_id = 0x0014,
.n_8255 = 1,
.pcicontroler_badrindex = 1,
.dioregs_badrindex = 2,
},
{
.name = "pci-dio48h",
.dev_id = 0x000b,
.n_8255 = 2,
.pcicontroler_badrindex = 0,
.dioregs_badrindex = 1,
},
};
/* This is used by modprobe to translate PCI IDs to drivers. Should
* only be used for PCI and ISA-PnP devices */
/* Please add your PCI vendor ID to comedidev.h, and it will be forwarded
* upstream. */
static DEFINE_PCI_DEVICE_TABLE(pcidio_pci_table) = {
{
PCI_VENDOR_ID_CB, 0x0028, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
PCI_VENDOR_ID_CB, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
0}
};
MODULE_DEVICE_TABLE(pci, pcidio_pci_table);
/*
* Useful for shorthand access to the particular board structure
*/
#define thisboard ((const struct pcidio_board *)dev->board_ptr)
/* this structure is for data unique to this hardware driver. If
several hardware drivers keep similar information in this structure,
feel free to suggest moving the variable to the struct comedi_device struct. */
struct pcidio_private {
int data; /* currently unused */
/* would be useful for a PCI device */
struct pci_dev *pci_dev;
/* used for DO readback, currently unused */
unsigned int do_readback[4]; /* up to 4 unsigned int suffice to hold 96 bits for PCI-DIO96 */
unsigned long dio_reg_base; /* address of port A of the first 8255 chip on board */
};
/*
* most drivers define the following macro to make it easy to
* access the private structure.
*/
#define devpriv ((struct pcidio_private *)dev->private)
/*
* The struct comedi_driver structure tells the Comedi core module
* which functions to call to configure/deconfigure (attach/detach)
* the board, and also about the kernel module that contains
* the device code.
*/
static int pcidio_attach(struct comedi_device *dev,
struct comedi_devconfig *it);
static int pcidio_detach(struct comedi_device *dev);
static struct comedi_driver driver_cb_pcidio = {
.driver_name = "cb_pcidio",
.module = THIS_MODULE,
.attach = pcidio_attach,
.detach = pcidio_detach,
/* It is not necessary to implement the following members if you are
* writing a driver for a ISA PnP or PCI card */
/* Most drivers will support multiple types of boards by
* having an array of board structures. These were defined
* in pcidio_boards[] above. Note that the element 'name'
* was first in the structure -- Comedi uses this fact to
* extract the name of the board without knowing any details
* about the structure except for its length.
* When a device is attached (by comedi_config), the name
* of the device is given to Comedi, and Comedi tries to
* match it by going through the list of board names. If
* there is a match, the address of the pointer is put
* into dev->board_ptr and driver->attach() is called.
*
* Note that these are not necessary if you can determine
* the type of board in software. ISA PnP, PCI, and PCMCIA
* devices are such boards.
*/
/* The following fields should NOT be initialized if you are dealing
* with PCI devices
*
* .board_name = pcidio_boards,
* .offset = sizeof(struct pcidio_board),
* .num_names = sizeof(pcidio_boards) / sizeof(structpcidio_board),
*/
};
/*------------------------------- FUNCTIONS -----------------------------------*/
/*
* Attach is called by the Comedi core to configure the driver
* for a particular board. If you specified a board_name array
* in the driver structure, dev->board_ptr contains that
* address.
*/
static int pcidio_attach(struct comedi_device *dev, struct comedi_devconfig *it)
{
struct pci_dev *pcidev = NULL;
int index;
int i;
printk("comedi%d: cb_pcidio: \n", dev->minor);
/*
* Allocate the private structure area. alloc_private() is a
* convenient macro defined in comedidev.h.
*/
if (alloc_private(dev, sizeof(struct pcidio_private)) < 0)
return -ENOMEM;
/*
* If you can probe the device to determine what device in a series
* it is, this is the place to do it. Otherwise, dev->board_ptr
* should already be initialized.
*/
/*
* Probe the device to determine what device in the series it is.
*/
for (pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, NULL);
pcidev != NULL;
pcidev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, pcidev)) {
/* is it not a computer boards card? */
if (pcidev->vendor != PCI_VENDOR_ID_CB)
continue;
/* loop through cards supported by this driver */
for (index = 0; index < ARRAY_SIZE(pcidio_boards); index++) {
if (pcidio_boards[index].dev_id != pcidev->device)
continue;
/* was a particular bus/slot requested? */
if (it->options[0] || it->options[1]) {
/* are we on the wrong bus/slot? */
if (pcidev->bus->number != it->options[0] ||
PCI_SLOT(pcidev->devfn) != it->options[1]) {
continue;
}
}
dev->board_ptr = pcidio_boards + index;
goto found;
}
}
printk("No supported ComputerBoards/MeasurementComputing card found on "
"requested position\n");
return -EIO;
found:
/*
* Initialize dev->board_name. Note that we can use the "thisboard"
* macro now, since we just initialized it in the last line.
*/
dev->board_name = thisboard->name;
devpriv->pci_dev = pcidev;
printk("Found %s on bus %i, slot %i\n", thisboard->name,
devpriv->pci_dev->bus->number,
PCI_SLOT(devpriv->pci_dev->devfn));
if (comedi_pci_enable(pcidev, thisboard->name)) {
printk
("cb_pcidio: failed to enable PCI device and request regions\n");
return -EIO;
}
devpriv->dio_reg_base
=
pci_resource_start(devpriv->pci_dev,
pcidio_boards[index].dioregs_badrindex);
/*
* Allocate the subdevice structures. alloc_subdevice() is a
* convenient macro defined in comedidev.h.
*/
if (alloc_subdevices(dev, thisboard->n_8255) < 0)
return -ENOMEM;
for (i = 0; i < thisboard->n_8255; i++) {
subdev_8255_init(dev, dev->subdevices + i,
NULL, devpriv->dio_reg_base + i * 4);
printk(" subdev %d: base = 0x%lx\n", i,
devpriv->dio_reg_base + i * 4);
}
printk("attached\n");
return 1;
}
/*
* _detach is called to deconfigure a device. It should deallocate
* resources.
* This function is also called when _attach() fails, so it should be
* careful not to release resources that were not necessarily
* allocated by _attach(). dev->private and dev->subdevices are
* deallocated automatically by the core.
*/
static int pcidio_detach(struct comedi_device *dev)
{
printk("comedi%d: cb_pcidio: remove\n", dev->minor);
if (devpriv) {
if (devpriv->pci_dev) {
if (devpriv->dio_reg_base)
comedi_pci_disable(devpriv->pci_dev);
pci_dev_put(devpriv->pci_dev);
}
}
if (dev->subdevices) {
int i;
for (i = 0; i < thisboard->n_8255; i++)
subdev_8255_cleanup(dev, dev->subdevices + i);
}
return 0;
}
/*
* A convenient macro that defines init_module() and cleanup_module(),
* as necessary.
*/
COMEDI_PCI_INITCLEANUP(driver_cb_pcidio, pcidio_pci_table);
| gpl-2.0 |
Framework43/touchpad-kernel | drivers/staging/rtl8192e/r8192E_wx.c | 810 | 36891 | /*
This file contains wireless extension handlers.
This is part of rtl8180 OpenSource driver.
Copyright (C) Andrea Merello 2004-2005 <andreamrl@tiscali.it>
Released under the terms of GPL (General Public Licence)
Parts of this driver are based on the GPL part
of the official realtek driver.
Parts of this driver are based on the rtl8180 driver skeleton
from Patric Schenke & Andres Salomon.
Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver.
We want to tanks the Authors of those projects and the Ndiswrapper
project Authors.
*/
#include <linux/string.h>
#include "r8192E.h"
#include "r8192E_hw.h"
#include "r8192E_wx.h"
#ifdef ENABLE_DOT11D
#include "ieee80211/dot11d.h"
#endif
#define RATE_COUNT 12
static u32 rtl8180_rates[] = {1000000,2000000,5500000,11000000,
6000000,9000000,12000000,18000000,24000000,36000000,48000000,54000000};
#ifndef ENETDOWN
#define ENETDOWN 1
#endif
static int r8192_wx_get_freq(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_freq(priv->ieee80211,a,wrqu,b);
}
static int r8192_wx_get_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv=ieee80211_priv(dev);
return ieee80211_wx_get_mode(priv->ieee80211,a,wrqu,b);
}
static int r8192_wx_get_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_rate(priv->ieee80211,info,wrqu,extra);
}
static int r8192_wx_set_rate(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_rate(priv->ieee80211,info,wrqu,extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_rts(priv->ieee80211,info,wrqu,extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_rts(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_rts(priv->ieee80211,info,wrqu,extra);
}
static int r8192_wx_set_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_power(priv->ieee80211,info,wrqu,extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_power(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_power(priv->ieee80211,info,wrqu,extra);
}
#ifdef JOHN_IOCTL
u16 read_rtl8225(struct net_device *dev, u8 addr);
void write_rtl8225(struct net_device *dev, u8 adr, u16 data);
u32 john_read_rtl8225(struct net_device *dev, u8 adr);
void _write_rtl8225(struct net_device *dev, u8 adr, u16 data);
static int r8192_wx_read_regs(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 addr;
u16 data1;
down(&priv->wx_sem);
get_user(addr,(u8*)wrqu->data.pointer);
data1 = read_rtl8225(dev, addr);
wrqu->data.length = data1;
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_write_regs(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 addr;
down(&priv->wx_sem);
get_user(addr, (u8*)wrqu->data.pointer);
write_rtl8225(dev, addr, wrqu->data.length);
up(&priv->wx_sem);
return 0;
}
void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
u8 rtl8187_read_phy(struct net_device *dev,u8 adr, u32 data);
static int r8192_wx_read_bb(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 databb;
#if 0
int i;
for(i=0;i<12;i++) printk("%8x\n", read_cam(dev, i) );
#endif
down(&priv->wx_sem);
databb = rtl8187_read_phy(dev, (u8)wrqu->data.length, 0x00000000);
wrqu->data.length = databb;
up(&priv->wx_sem);
return 0;
}
void rtl8187_write_phy(struct net_device *dev, u8 adr, u32 data);
static int r8192_wx_write_bb(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u8 databb;
down(&priv->wx_sem);
get_user(databb, (u8*)wrqu->data.pointer);
rtl8187_write_phy(dev, wrqu->data.length, databb);
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_write_nicb(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 addr;
down(&priv->wx_sem);
get_user(addr, (u32*)wrqu->data.pointer);
write_nic_byte(dev, addr, wrqu->data.length);
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_read_nicb(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
u32 addr;
u16 data1;
down(&priv->wx_sem);
get_user(addr,(u32*)wrqu->data.pointer);
data1 = read_nic_byte(dev, addr);
wrqu->data.length = data1;
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_get_ap_status(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device *ieee = priv->ieee80211;
struct ieee80211_network *target;
int name_len;
down(&priv->wx_sem);
//count the length of input ssid
for(name_len=0 ; ((char*)wrqu->data.pointer)[name_len]!='\0' ; name_len++);
//search for the correspoding info which is received
list_for_each_entry(target, &ieee->network_list, list) {
if ( (target->ssid_len == name_len) &&
(strncmp(target->ssid, (char*)wrqu->data.pointer, name_len)==0)){
if(target->wpa_ie_len>0 || target->rsn_ie_len>0 )
//set flags=1 to indicate this ap is WPA
wrqu->data.flags = 1;
else wrqu->data.flags = 0;
break;
}
}
up(&priv->wx_sem);
return 0;
}
#endif
static int r8192_wx_set_rawtx(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_rawtx(priv->ieee80211, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_force_reset(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
down(&priv->wx_sem);
printk("%s(): force reset ! extra is %d\n",__FUNCTION__, *extra);
priv->force_reset = *extra;
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_set_crcmon(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int *parms = (int *)extra;
int enable = (parms[0] > 0);
short prev = priv->crcmon;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
if(enable)
priv->crcmon=1;
else
priv->crcmon=0;
DMESG("bad CRC in monitor mode are %s",
priv->crcmon ? "accepted" : "rejected");
if(prev != priv->crcmon && priv->up){
//rtl8180_down(dev);
//rtl8180_up(dev);
}
up(&priv->wx_sem);
return 0;
}
static int r8192_wx_set_mode(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RT_RF_POWER_STATE rtState;
int ret;
if(priv->bHwRadioOff == true)
return 0;
rtState = priv->ieee80211->eRFPowerState;
down(&priv->wx_sem);
#ifdef ENABLE_IPS
if(wrqu->mode == IW_MODE_ADHOC){
if(priv->ieee80211->PowerSaveControl.bInactivePs){
if(rtState == eRfOff){
if(priv->ieee80211->RfOffReason > RF_CHANGE_BY_IPS)
{
RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
up(&priv->wx_sem);
return -1;
}
else{
RT_TRACE(COMP_ERR, "%s(): IPSLeave\n",__FUNCTION__);
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
}
}
}
}
#endif
ret = ieee80211_wx_set_mode(priv->ieee80211,a,wrqu,b);
//rtl8187_set_rxconf(dev);
up(&priv->wx_sem);
return ret;
}
struct iw_range_with_scan_capa
{
/* Informative stuff (to choose between different interface) */
__u32 throughput; /* To give an idea... */
/* In theory this value should be the maximum benchmarked
* TCP/IP throughput, because with most of these devices the
* bit rate is meaningless (overhead an co) to estimate how
* fast the connection will go and pick the fastest one.
* I suggest people to play with Netperf or any benchmark...
*/
/* NWID (or domain id) */
__u32 min_nwid; /* Minimal NWID we are able to set */
__u32 max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
__u16 old_num_channels;
__u8 old_num_frequency;
/* Scan capabilities */
__u8 scan_capa;
};
static int rtl8180_wx_get_range(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct iw_range *range = (struct iw_range *)extra;
struct iw_range_with_scan_capa* tmp = (struct iw_range_with_scan_capa*)range;
struct r8192_priv *priv = ieee80211_priv(dev);
u16 val;
int i;
wrqu->data.length = sizeof(*range);
memset(range, 0, sizeof(*range));
/* Let's try to keep this struct in the same order as in
* linux/include/wireless.h
*/
/* TODO: See what values we can set, and remove the ones we can't
* set, or fill them with some default data.
*/
/* ~5 Mb/s real (802.11b) */
range->throughput = 130 * 1000 * 1000;
// TODO: Not used in 802.11b?
// range->min_nwid; /* Minimal NWID we are able to set */
// TODO: Not used in 802.11b?
// range->max_nwid; /* Maximal NWID we are able to set */
/* Old Frequency (backward compat - moved lower ) */
// range->old_num_channels;
// range->old_num_frequency;
// range->old_freq[6]; /* Filler to keep "version" at the same offset */
if(priv->rf_set_sens != NULL)
range->sensitivity = priv->max_sens; /* signal level threshold range */
range->max_qual.qual = 100;
/* TODO: Find real max RSSI and stick here */
range->max_qual.level = 0;
range->max_qual.noise = -98;
range->max_qual.updated = 7; /* Updated all three */
range->avg_qual.qual = 92; /* > 8% missed beacons is 'bad' */
/* TODO: Find real 'good' to 'bad' threshold value for RSSI */
range->avg_qual.level = 20 + -98;
range->avg_qual.noise = 0;
range->avg_qual.updated = 7; /* Updated all three */
range->num_bitrates = RATE_COUNT;
for (i = 0; i < RATE_COUNT && i < IW_MAX_BITRATES; i++) {
range->bitrate[i] = rtl8180_rates[i];
}
range->min_frag = MIN_FRAG_THRESHOLD;
range->max_frag = MAX_FRAG_THRESHOLD;
range->min_pmp=0;
range->max_pmp = 5000000;
range->min_pmt = 0;
range->max_pmt = 65535*1000;
range->pmp_flags = IW_POWER_PERIOD;
range->pmt_flags = IW_POWER_TIMEOUT;
range->pm_capa = IW_POWER_PERIOD | IW_POWER_TIMEOUT | IW_POWER_ALL_R;
range->we_version_compiled = WIRELESS_EXT;
range->we_version_source = 18;
// range->retry_capa; /* What retry options are supported */
// range->retry_flags; /* How to decode max/min retry limit */
// range->r_time_flags; /* How to decode max/min retry life */
// range->min_retry; /* Minimal number of retries */
// range->max_retry; /* Maximal number of retries */
// range->min_r_time; /* Minimal retry lifetime */
// range->max_r_time; /* Maximal retry lifetime */
for (i = 0, val = 0; i < 14; i++) {
// Include only legal frequencies for some countries
#ifdef ENABLE_DOT11D
if ((GET_DOT11D_INFO(priv->ieee80211)->channel_map)[i+1]) {
#else
if ((priv->ieee80211->channel_map)[i+1]) {
#endif
range->freq[val].i = i + 1;
range->freq[val].m = ieee80211_wlan_frequencies[i] * 100000;
range->freq[val].e = 1;
val++;
} else {
// FIXME: do we need to set anything for channels
// we don't use ?
}
if (val == IW_MAX_FREQUENCIES)
break;
}
range->num_frequency = val;
range->num_channels = val;
#if WIRELESS_EXT > 17
range->enc_capa = IW_ENC_CAPA_WPA|IW_ENC_CAPA_WPA2|
IW_ENC_CAPA_CIPHER_TKIP|IW_ENC_CAPA_CIPHER_CCMP;
#endif
tmp->scan_capa = 0x01;
return 0;
}
static int r8192_wx_set_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
RT_RF_POWER_STATE rtState;
int ret;
if(priv->bHwRadioOff == true)
return 0;
rtState = priv->ieee80211->eRFPowerState;
if(!priv->up) return -ENETDOWN;
if (priv->ieee80211->LinkDetectInfo.bBusyTraffic == true)
return -EAGAIN;
if (wrqu->data.flags & IW_SCAN_THIS_ESSID)
{
struct iw_scan_req* req = (struct iw_scan_req*)b;
if (req->essid_len)
{
//printk("==**&*&*&**===>scan set ssid:%s\n", req->essid);
ieee->current_network.ssid_len = req->essid_len;
memcpy(ieee->current_network.ssid, req->essid, req->essid_len);
//printk("=====>network ssid:%s\n", ieee->current_network.ssid);
}
}
down(&priv->wx_sem);
#ifdef ENABLE_IPS
priv->ieee80211->actscanning = true;
if(priv->ieee80211->state != IEEE80211_LINKED){
if(priv->ieee80211->PowerSaveControl.bInactivePs){
if(rtState == eRfOff){
if(priv->ieee80211->RfOffReason > RF_CHANGE_BY_IPS)
{
RT_TRACE(COMP_ERR, "%s(): RF is OFF.\n",__FUNCTION__);
up(&priv->wx_sem);
return -1;
}
else{
//RT_TRACE(COMP_PS, "%s(): IPSLeave\n",__FUNCTION__);
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
}
}
}
priv->ieee80211->scanning = 0;
ieee80211_softmac_scan_syncro(priv->ieee80211);
ret = 0;
}
else
#else
if(priv->ieee80211->state != IEEE80211_LINKED){
priv->ieee80211->scanning = 0;
ieee80211_softmac_scan_syncro(priv->ieee80211);
ret = 0;
}
else
#endif
ret = ieee80211_wx_set_scan(priv->ieee80211,a,wrqu,b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_scan(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
if(!priv->up) return -ENETDOWN;
down(&priv->wx_sem);
ret = ieee80211_wx_get_scan(priv->ieee80211,a,wrqu,b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
struct r8192_priv *priv = ieee80211_priv(dev);
RT_RF_POWER_STATE rtState;
int ret;
if(priv->bHwRadioOff == true)
return 0;
rtState = priv->ieee80211->eRFPowerState;
down(&priv->wx_sem);
#ifdef ENABLE_IPS
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
#endif
ret = ieee80211_wx_set_essid(priv->ieee80211,a,wrqu,b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_essid(struct net_device *dev,
struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
down(&priv->wx_sem);
ret = ieee80211_wx_get_essid(priv->ieee80211, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_freq(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu, char *b)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_freq(priv->ieee80211, a, wrqu, b);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_name(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_name(priv->ieee80211, info, wrqu, extra);
}
static int r8192_wx_set_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
if (wrqu->frag.disabled)
priv->ieee80211->fts = DEFAULT_FRAG_THRESHOLD;
else {
if (wrqu->frag.value < MIN_FRAG_THRESHOLD ||
wrqu->frag.value > MAX_FRAG_THRESHOLD)
return -EINVAL;
priv->ieee80211->fts = wrqu->frag.value & ~0x1;
}
return 0;
}
static int r8192_wx_get_frag(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
wrqu->frag.value = priv->ieee80211->fts;
wrqu->frag.fixed = 0; /* no auto select */
wrqu->frag.disabled = (wrqu->frag.value == DEFAULT_FRAG_THRESHOLD);
return 0;
}
static int r8192_wx_set_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *awrq,
char *extra)
{
int ret;
struct r8192_priv *priv = ieee80211_priv(dev);
// struct sockaddr *temp = (struct sockaddr *)awrq;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
#ifdef ENABLE_IPS
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
#endif
ret = ieee80211_wx_set_wap(priv->ieee80211,info,awrq,extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_get_wap(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_wap(priv->ieee80211,info,wrqu,extra);
}
static int r8192_wx_get_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = ieee80211_priv(dev);
return ieee80211_wx_get_encode(priv->ieee80211, info, wrqu, key);
}
static int r8192_wx_set_enc(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *key)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int ret;
struct ieee80211_device *ieee = priv->ieee80211;
//u32 TargetContent;
u32 hwkey[4]={0,0,0,0};
u8 mask=0xff;
u32 key_idx=0;
u8 zero_addr[4][6] ={{0x00,0x00,0x00,0x00,0x00,0x00},
{0x00,0x00,0x00,0x00,0x00,0x01},
{0x00,0x00,0x00,0x00,0x00,0x02},
{0x00,0x00,0x00,0x00,0x00,0x03} };
int i;
if(priv->bHwRadioOff == true)
return 0;
if(!priv->up) return -ENETDOWN;
priv->ieee80211->wx_set_enc = 1;
#ifdef ENABLE_IPS
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
#endif
down(&priv->wx_sem);
RT_TRACE(COMP_SEC, "Setting SW wep key");
ret = ieee80211_wx_set_encode(priv->ieee80211,info,wrqu,key);
up(&priv->wx_sem);
//sometimes, the length is zero while we do not type key value
if(wrqu->encoding.length!=0){
for(i=0 ; i<4 ; i++){
hwkey[i] |= key[4*i+0]&mask;
if(i==1&&(4*i+1)==wrqu->encoding.length) mask=0x00;
if(i==3&&(4*i+1)==wrqu->encoding.length) mask=0x00;
hwkey[i] |= (key[4*i+1]&mask)<<8;
hwkey[i] |= (key[4*i+2]&mask)<<16;
hwkey[i] |= (key[4*i+3]&mask)<<24;
}
#define CONF_WEP40 0x4
#define CONF_WEP104 0x14
switch(wrqu->encoding.flags & IW_ENCODE_INDEX){
case 0: key_idx = ieee->tx_keyidx; break;
case 1: key_idx = 0; break;
case 2: key_idx = 1; break;
case 3: key_idx = 2; break;
case 4: key_idx = 3; break;
default: break;
}
//printk("-------====>length:%d, key_idx:%d, flag:%x\n", wrqu->encoding.length, key_idx, wrqu->encoding.flags);
if(wrqu->encoding.length==0x5){
ieee->pairwise_key_type = KEY_TYPE_WEP40;
EnableHWSecurityConfig8192(dev);
setKey( dev,
key_idx, //EntryNo
key_idx, //KeyIndex
KEY_TYPE_WEP40, //KeyType
zero_addr[key_idx],
0, //DefaultKey
hwkey); //KeyContent
#if 0
if(key_idx == 0){
//write_nic_byte(dev, SECR, 7);
setKey( dev,
4, //EntryNo
key_idx, //KeyIndex
KEY_TYPE_WEP40, //KeyType
broadcast_addr, //addr
0, //DefaultKey
hwkey); //KeyContent
}
#endif
}
else if(wrqu->encoding.length==0xd){
ieee->pairwise_key_type = KEY_TYPE_WEP104;
EnableHWSecurityConfig8192(dev);
setKey( dev,
key_idx, //EntryNo
key_idx, //KeyIndex
KEY_TYPE_WEP104, //KeyType
zero_addr[key_idx],
0, //DefaultKey
hwkey); //KeyContent
#if 0
if(key_idx == 0){
//write_nic_byte(dev, SECR, 7);
setKey( dev,
4, //EntryNo
key_idx, //KeyIndex
KEY_TYPE_WEP104, //KeyType
broadcast_addr, //addr
0, //DefaultKey
hwkey); //KeyContent
}
#endif
}
else printk("wrong type in WEP, not WEP40 and WEP104\n");
}
#if 0
//consider the setting different key index situation
//wrqu->encoding.flags = 801 means that we set key with index "1"
if(wrqu->encoding.length==0 && (wrqu->encoding.flags >>8) == 0x8 ){
printk("===>1\n");
//write_nic_byte(dev, SECR, 7);
EnableHWSecurityConfig8192(dev);
//copy wpa config from default key(key0~key3) to broadcast key(key5)
//
key_idx = (wrqu->encoding.flags & 0xf)-1 ;
write_cam(dev, (4*6), 0xffff0000|read_cam(dev, key_idx*6) );
write_cam(dev, (4*6)+1, 0xffffffff);
write_cam(dev, (4*6)+2, read_cam(dev, (key_idx*6)+2) );
write_cam(dev, (4*6)+3, read_cam(dev, (key_idx*6)+3) );
write_cam(dev, (4*6)+4, read_cam(dev, (key_idx*6)+4) );
write_cam(dev, (4*6)+5, read_cam(dev, (key_idx*6)+5) );
}
#endif
priv->ieee80211->wx_set_enc = 0;
return ret;
}
static int r8192_wx_set_scan_type(struct net_device *dev, struct iw_request_info *aa, union
iwreq_data *wrqu, char *p){
struct r8192_priv *priv = ieee80211_priv(dev);
int *parms=(int*)p;
int mode=parms[0];
priv->ieee80211->active_scan = mode;
return 1;
}
static int r8192_wx_set_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
int err = 0;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
if (wrqu->retry.flags & IW_RETRY_LIFETIME ||
wrqu->retry.disabled){
err = -EINVAL;
goto exit;
}
if (!(wrqu->retry.flags & IW_RETRY_LIMIT)){
err = -EINVAL;
goto exit;
}
if(wrqu->retry.value > R8180_MAX_RETRY){
err= -EINVAL;
goto exit;
}
if (wrqu->retry.flags & IW_RETRY_MAX) {
priv->retry_rts = wrqu->retry.value;
DMESG("Setting retry for RTS/CTS data to %d", wrqu->retry.value);
}else {
priv->retry_data = wrqu->retry.value;
DMESG("Setting retry for non RTS/CTS data to %d", wrqu->retry.value);
}
/* FIXME !
* We might try to write directly the TX config register
* or to restart just the (R)TX process.
* I'm unsure if whole reset is really needed
*/
rtl8192_commit(dev);
/*
if(priv->up){
rtl8180_rtx_disable(dev);
rtl8180_rx_enable(dev);
rtl8180_tx_enable(dev);
}
*/
exit:
up(&priv->wx_sem);
return err;
}
static int r8192_wx_get_retry(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
wrqu->retry.disabled = 0; /* can't be disabled */
if ((wrqu->retry.flags & IW_RETRY_TYPE) ==
IW_RETRY_LIFETIME)
return -EINVAL;
if (wrqu->retry.flags & IW_RETRY_MAX) {
wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MAX;
wrqu->retry.value = priv->retry_rts;
} else {
wrqu->retry.flags = IW_RETRY_LIMIT & IW_RETRY_MIN;
wrqu->retry.value = priv->retry_data;
}
//DMESG("returning %d",wrqu->retry.value);
return 0;
}
static int r8192_wx_get_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->rf_set_sens == NULL)
return -1; /* we have not this support for this radio */
wrqu->sens.value = priv->sens;
return 0;
}
static int r8192_wx_set_sens(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
short err = 0;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
//DMESG("attempt to set sensivity to %ddb",wrqu->sens.value);
if(priv->rf_set_sens == NULL) {
err= -1; /* we have not this support for this radio */
goto exit;
}
if(priv->rf_set_sens(dev, wrqu->sens.value) == 0)
priv->sens = wrqu->sens.value;
else
err= -EINVAL;
exit:
up(&priv->wx_sem);
return err;
}
#if (WIRELESS_EXT >= 18)
static int r8192_wx_set_enc_ext(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
int ret=0;
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
priv->ieee80211->wx_set_enc = 1;
#ifdef ENABLE_IPS
down(&priv->ieee80211->ips_sem);
IPSLeave(dev);
up(&priv->ieee80211->ips_sem);
#endif
ret = ieee80211_wx_set_encode_ext(ieee, info, wrqu, extra);
{
u8 broadcast_addr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
u8 zero[6] = {0};
u32 key[4] = {0};
struct iw_encode_ext *ext = (struct iw_encode_ext *)extra;
struct iw_point *encoding = &wrqu->encoding;
#if 0
static u8 CAM_CONST_ADDR[4][6] = {
{0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x01},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x02},
{0x00, 0x00, 0x00, 0x00, 0x00, 0x03}};
#endif
u8 idx = 0, alg = 0, group = 0;
if ((encoding->flags & IW_ENCODE_DISABLED) ||
ext->alg == IW_ENCODE_ALG_NONE) //none is not allowed to use hwsec WB 2008.07.01
{
ieee->pairwise_key_type = ieee->group_key_type = KEY_TYPE_NA;
CamResetAllEntry(dev);
goto end_hw_sec;
}
alg = (ext->alg == IW_ENCODE_ALG_CCMP)?KEY_TYPE_CCMP:ext->alg; // as IW_ENCODE_ALG_CCMP is defined to be 3 and KEY_TYPE_CCMP is defined to 4;
idx = encoding->flags & IW_ENCODE_INDEX;
if (idx)
idx --;
group = ext->ext_flags & IW_ENCODE_EXT_GROUP_KEY;
if ((!group) || (IW_MODE_ADHOC == ieee->iw_mode) || (alg == KEY_TYPE_WEP40))
{
if ((ext->key_len == 13) && (alg == KEY_TYPE_WEP40) )
alg = KEY_TYPE_WEP104;
ieee->pairwise_key_type = alg;
EnableHWSecurityConfig8192(dev);
}
memcpy((u8*)key, ext->key, 16); //we only get 16 bytes key.why? WB 2008.7.1
if ((alg & KEY_TYPE_WEP40) && (ieee->auth_mode !=2) )
{
if (ext->key_len == 13)
ieee->pairwise_key_type = alg = KEY_TYPE_WEP104;
setKey( dev,
idx,//EntryNo
idx, //KeyIndex
alg, //KeyType
zero, //MacAddr
0, //DefaultKey
key); //KeyContent
}
else if (group)
{
ieee->group_key_type = alg;
setKey( dev,
idx,//EntryNo
idx, //KeyIndex
alg, //KeyType
broadcast_addr, //MacAddr
0, //DefaultKey
key); //KeyContent
}
else //pairwise key
{
if ((ieee->pairwise_key_type == KEY_TYPE_CCMP) && ieee->pHTInfo->bCurrentHTSupport){
write_nic_byte(dev, 0x173, 1); //fix aes bug
}
setKey( dev,
4,//EntryNo
idx, //KeyIndex
alg, //KeyType
(u8*)ieee->ap_mac_addr, //MacAddr
0, //DefaultKey
key); //KeyContent
}
}
end_hw_sec:
priv->ieee80211->wx_set_enc = 0;
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_auth(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
int ret=0;
//printk("====>%s()\n", __FUNCTION__);
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_auth(priv->ieee80211, info, &(data->param), extra);
up(&priv->wx_sem);
return ret;
}
static int r8192_wx_set_mlme(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
//printk("====>%s()\n", __FUNCTION__);
int ret=0;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_mlme(priv->ieee80211, info, wrqu, extra);
up(&priv->wx_sem);
return ret;
}
#endif
static int r8192_wx_set_gen_ie(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *data, char *extra)
{
//printk("====>%s(), len:%d\n", __FUNCTION__, data->length);
int ret=0;
struct r8192_priv *priv = ieee80211_priv(dev);
if(priv->bHwRadioOff == true)
return 0;
down(&priv->wx_sem);
ret = ieee80211_wx_set_gen_ie(priv->ieee80211, extra, data->data.length);
up(&priv->wx_sem);
//printk("<======%s(), ret:%d\n", __FUNCTION__, ret);
return ret;
}
static int dummy(struct net_device *dev, struct iw_request_info *a,
union iwreq_data *wrqu,char *b)
{
return -1;
}
// check ac/dc status with the help of user space application */
static int r8192_wx_adapter_power_status(struct net_device *dev,
struct iw_request_info *info,
union iwreq_data *wrqu, char *extra)
{
struct r8192_priv *priv = ieee80211_priv(dev);
#ifdef ENABLE_LPS
PRT_POWER_SAVE_CONTROL pPSC = (PRT_POWER_SAVE_CONTROL)(&(priv->ieee80211->PowerSaveControl));
struct ieee80211_device* ieee = priv->ieee80211;
#endif
down(&priv->wx_sem);
#ifdef ENABLE_LPS
RT_TRACE(COMP_POWER, "%s(): %s\n",__FUNCTION__, (*extra == 6)?"DC power":"AC power");
// ieee->ps shall not be set under DC mode, otherwise it conflict
// with Leisure power save mode setting.
//
if(*extra || priv->force_lps) {
priv->ps_force = false;
pPSC->bLeisurePs = true;
} else {
//LZM for PS-Poll AID issue. 090429
if(priv->ieee80211->state == IEEE80211_LINKED)
LeisurePSLeave(dev);
priv->ps_force = true;
pPSC->bLeisurePs = false;
ieee->ps = *extra;
}
#endif
up(&priv->wx_sem);
return 0;
}
static iw_handler r8192_wx_handlers[] =
{
NULL, /* SIOCSIWCOMMIT */
r8192_wx_get_name, /* SIOCGIWNAME */
dummy, /* SIOCSIWNWID */
dummy, /* SIOCGIWNWID */
r8192_wx_set_freq, /* SIOCSIWFREQ */
r8192_wx_get_freq, /* SIOCGIWFREQ */
r8192_wx_set_mode, /* SIOCSIWMODE */
r8192_wx_get_mode, /* SIOCGIWMODE */
r8192_wx_set_sens, /* SIOCSIWSENS */
r8192_wx_get_sens, /* SIOCGIWSENS */
NULL, /* SIOCSIWRANGE */
rtl8180_wx_get_range, /* SIOCGIWRANGE */
NULL, /* SIOCSIWPRIV */
NULL, /* SIOCGIWPRIV */
NULL, /* SIOCSIWSTATS */
NULL, /* SIOCGIWSTATS */
dummy, /* SIOCSIWSPY */
dummy, /* SIOCGIWSPY */
NULL, /* SIOCGIWTHRSPY */
NULL, /* SIOCWIWTHRSPY */
r8192_wx_set_wap, /* SIOCSIWAP */
r8192_wx_get_wap, /* SIOCGIWAP */
#if (WIRELESS_EXT >= 18)
r8192_wx_set_mlme, /* MLME-- */
#else
NULL,
#endif
dummy, /* SIOCGIWAPLIST -- depricated */
r8192_wx_set_scan, /* SIOCSIWSCAN */
r8192_wx_get_scan, /* SIOCGIWSCAN */
r8192_wx_set_essid, /* SIOCSIWESSID */
r8192_wx_get_essid, /* SIOCGIWESSID */
dummy, /* SIOCSIWNICKN */
dummy, /* SIOCGIWNICKN */
NULL, /* -- hole -- */
NULL, /* -- hole -- */
r8192_wx_set_rate, /* SIOCSIWRATE */
r8192_wx_get_rate, /* SIOCGIWRATE */
r8192_wx_set_rts, /* SIOCSIWRTS */
r8192_wx_get_rts, /* SIOCGIWRTS */
r8192_wx_set_frag, /* SIOCSIWFRAG */
r8192_wx_get_frag, /* SIOCGIWFRAG */
dummy, /* SIOCSIWTXPOW */
dummy, /* SIOCGIWTXPOW */
r8192_wx_set_retry, /* SIOCSIWRETRY */
r8192_wx_get_retry, /* SIOCGIWRETRY */
r8192_wx_set_enc, /* SIOCSIWENCODE */
r8192_wx_get_enc, /* SIOCGIWENCODE */
r8192_wx_set_power, /* SIOCSIWPOWER */
r8192_wx_get_power, /* SIOCGIWPOWER */
NULL, /*---hole---*/
NULL, /*---hole---*/
r8192_wx_set_gen_ie,//NULL, /* SIOCSIWGENIE */
NULL, /* SIOCSIWGENIE */
#if (WIRELESS_EXT >= 18)
r8192_wx_set_auth,//NULL, /* SIOCSIWAUTH */
NULL,//r8192_wx_get_auth,//NULL, /* SIOCSIWAUTH */
r8192_wx_set_enc_ext, /* SIOCSIWENCODEEXT */
#else
NULL,
NULL,
NULL,
#endif
NULL,//r8192_wx_get_enc_ext,//NULL, /* SIOCSIWENCODEEXT */
NULL, /* SIOCSIWPMKSA */
NULL, /*---hole---*/
};
static const struct iw_priv_args r8192_private_args[] = {
{
SIOCIWFIRSTPRIV + 0x0,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "badcrc"
},
{
SIOCIWFIRSTPRIV + 0x1,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "activescan"
},
{
SIOCIWFIRSTPRIV + 0x2,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "rawtx"
}
,
{
SIOCIWFIRSTPRIV + 0x3,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED | 1, 0, "forcereset"
}
,
{
SIOCIWFIRSTPRIV + 0x4,
IW_PRIV_TYPE_INT | IW_PRIV_SIZE_FIXED|1, IW_PRIV_TYPE_NONE,
"set_power"
}
};
static iw_handler r8192_private_handler[] = {
r8192_wx_set_crcmon, /*SIOCIWSECONDPRIV*/
r8192_wx_set_scan_type,
r8192_wx_set_rawtx,
r8192_wx_force_reset,
r8192_wx_adapter_power_status,
};
//#if WIRELESS_EXT >= 17
struct iw_statistics *r8192_get_wireless_stats(struct net_device *dev)
{
struct r8192_priv *priv = ieee80211_priv(dev);
struct ieee80211_device* ieee = priv->ieee80211;
struct iw_statistics* wstats = &priv->wstats;
int tmp_level = 0;
int tmp_qual = 0;
int tmp_noise = 0;
if(ieee->state < IEEE80211_LINKED)
{
wstats->qual.qual = 0;
wstats->qual.level = 0;
wstats->qual.noise = 0;
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
tmp_level = (&ieee->current_network)->stats.rssi;
tmp_qual = (&ieee->current_network)->stats.signal;
tmp_noise = (&ieee->current_network)->stats.noise;
//printk("level:%d, qual:%d, noise:%d\n", tmp_level, tmp_qual, tmp_noise);
wstats->qual.level = tmp_level;
wstats->qual.qual = tmp_qual;
wstats->qual.noise = tmp_noise;
wstats->qual.updated = IW_QUAL_ALL_UPDATED | IW_QUAL_DBM;
return wstats;
}
//#endif
struct iw_handler_def r8192_wx_handlers_def={
.standard = r8192_wx_handlers,
.num_standard = sizeof(r8192_wx_handlers) / sizeof(iw_handler),
.private = r8192_private_handler,
.num_private = sizeof(r8192_private_handler) / sizeof(iw_handler),
.num_private_args = sizeof(r8192_private_args) / sizeof(struct iw_priv_args),
#if WIRELESS_EXT >= 17
.get_wireless_stats = r8192_get_wireless_stats,
#endif
.private_args = (struct iw_priv_args *)r8192_private_args,
};
| gpl-2.0 |
oppo-source/R7f-4.4-kernel-source | drivers/staging/comedi/drivers/ni_labpc_cs.c | 2090 | 4454 | /*
comedi/drivers/ni_labpc_cs.c
Driver for National Instruments daqcard-1200 boards
Copyright (C) 2001, 2002, 2003 Frank Mori Hess <fmhess@users.sourceforge.net>
PCMCIA crap is adapted from dummy_cs.c 1.31 2001/08/24 12:13:13
from the pcmcia package.
The initial developer of the pcmcia dummy_cs.c code is David A. Hinds
<dahinds@users.sourceforge.net>. Portions created by David A. Hinds
are Copyright (C) 1999 David A. Hinds.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
************************************************************************
*/
/*
Driver: ni_labpc_cs
Description: National Instruments Lab-PC (& compatibles)
Author: Frank Mori Hess <fmhess@users.sourceforge.net>
Devices: [National Instruments] DAQCard-1200 (daqcard-1200)
Status: works
Thanks go to Fredrik Lingvall for much testing and perseverance in
helping to debug daqcard-1200 support.
The 1200 series boards have onboard calibration dacs for correcting
analog input/output offsets and gains. The proper settings for these
caldacs are stored on the board's eeprom. To read the caldac values
from the eeprom and store them into a file that can be then be used by
comedilib, use the comedi_calibrate program.
Configuration options:
none
The daqcard-1200 has quirky chanlist requirements
when scanning multiple channels. Multiple channel scan
sequence must start at highest channel, then decrement down to
channel 0. Chanlists consisting of all one channel
are also legal, and allow you to pace conversions in bursts.
*/
/*
NI manuals:
340988a (daqcard-1200)
*/
#include "../comedidev.h"
#include <linux/delay.h>
#include <linux/slab.h>
#include "8253.h"
#include "8255.h"
#include "comedi_fc.h"
#include "ni_labpc.h"
#include <pcmcia/cistpl.h>
#include <pcmcia/cisreg.h>
#include <pcmcia/ds.h>
static const struct labpc_boardinfo labpc_cs_boards[] = {
{
.name = "daqcard-1200",
.device_id = 0x103,
.ai_speed = 10000,
.register_layout = labpc_1200_layout,
.has_ao = 1,
.ai_range_table = &range_labpc_1200_ai,
.ai_range_code = labpc_1200_ai_gain_bits,
},
};
static int labpc_auto_attach(struct comedi_device *dev,
unsigned long context)
{
struct pcmcia_device *link = comedi_to_pcmcia_dev(dev);
struct labpc_private *devpriv;
int ret;
/* The ni_labpc driver needs the board_ptr */
dev->board_ptr = &labpc_cs_boards[0];
link->config_flags |= CONF_AUTO_SET_IO |
CONF_ENABLE_IRQ | CONF_ENABLE_PULSE_IRQ;
ret = comedi_pcmcia_enable(dev, NULL);
if (ret)
return ret;
dev->iobase = link->resource[0]->start;
if (!link->irq)
return -EINVAL;
devpriv = kzalloc(sizeof(*devpriv), GFP_KERNEL);
if (!devpriv)
return -ENOMEM;
dev->private = devpriv;
return labpc_common_attach(dev, link->irq, IRQF_SHARED);
}
static void labpc_detach(struct comedi_device *dev)
{
labpc_common_detach(dev);
comedi_pcmcia_disable(dev);
}
static struct comedi_driver driver_labpc_cs = {
.driver_name = "ni_labpc_cs",
.module = THIS_MODULE,
.auto_attach = labpc_auto_attach,
.detach = labpc_detach,
};
static int labpc_cs_attach(struct pcmcia_device *link)
{
return comedi_pcmcia_auto_config(link, &driver_labpc_cs);
}
static const struct pcmcia_device_id labpc_cs_ids[] = {
PCMCIA_DEVICE_MANF_CARD(0x010b, 0x0103), /* daqcard-1200 */
PCMCIA_DEVICE_NULL
};
MODULE_DEVICE_TABLE(pcmcia, labpc_cs_ids);
static struct pcmcia_driver labpc_cs_driver = {
.name = "daqcard-1200",
.owner = THIS_MODULE,
.id_table = labpc_cs_ids,
.probe = labpc_cs_attach,
.remove = comedi_pcmcia_auto_unconfig,
};
module_comedi_pcmcia_driver(driver_labpc_cs, labpc_cs_driver);
MODULE_DESCRIPTION("Comedi driver for National Instruments Lab-PC");
MODULE_AUTHOR("Frank Mori Hess <fmhess@users.sourceforge.net>");
MODULE_LICENSE("GPL");
| gpl-2.0 |
Asderdd/android_kernel_google_msm8952 | drivers/net/ethernet/stmicro/stmmac/dwmac100_dma.c | 2090 | 4633 | /*******************************************************************************
This is the driver for the MAC 10/100 on-chip Ethernet controller
currently tested on all the ST boards based on STb7109 and stx7200 SoCs.
DWC Ether MAC 10/100 Universal version 4.0 has been used for developing
this code.
This contains the functions to handle the dma.
Copyright (C) 2007-2009 STMicroelectronics Ltd
This program is free software; you can redistribute it and/or modify it
under the terms and conditions of the GNU General Public License,
version 2, as published by the Free Software Foundation.
This program is distributed in the hope it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
more details.
You should have received a copy of the GNU General Public License along with
this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
The full GNU General Public License is included in this distribution in
the file called "COPYING".
Author: Giuseppe Cavallaro <peppe.cavallaro@st.com>
*******************************************************************************/
#include <asm/io.h>
#include "dwmac100.h"
#include "dwmac_dma.h"
static int dwmac100_dma_init(void __iomem *ioaddr, int pbl, int fb, int mb,
int burst_len, u32 dma_tx, u32 dma_rx, int atds)
{
u32 value = readl(ioaddr + DMA_BUS_MODE);
int limit;
/* DMA SW reset */
value |= DMA_BUS_MODE_SFT_RESET;
writel(value, ioaddr + DMA_BUS_MODE);
limit = 10;
while (limit--) {
if (!(readl(ioaddr + DMA_BUS_MODE) & DMA_BUS_MODE_SFT_RESET))
break;
mdelay(10);
}
if (limit < 0)
return -EBUSY;
/* Enable Application Access by writing to DMA CSR0 */
writel(DMA_BUS_MODE_DEFAULT | (pbl << DMA_BUS_MODE_PBL_SHIFT),
ioaddr + DMA_BUS_MODE);
/* Mask interrupts by writing to CSR7 */
writel(DMA_INTR_DEFAULT_MASK, ioaddr + DMA_INTR_ENA);
/* RX/TX descriptor base addr lists must be written into
* DMA CSR3 and CSR4, respectively
*/
writel(dma_tx, ioaddr + DMA_TX_BASE_ADDR);
writel(dma_rx, ioaddr + DMA_RCV_BASE_ADDR);
return 0;
}
/* Store and Forward capability is not used at all.
*
* The transmit threshold can be programmed by setting the TTC bits in the DMA
* control register.
*/
static void dwmac100_dma_operation_mode(void __iomem *ioaddr, int txmode,
int rxmode)
{
u32 csr6 = readl(ioaddr + DMA_CONTROL);
if (txmode <= 32)
csr6 |= DMA_CONTROL_TTC_32;
else if (txmode <= 64)
csr6 |= DMA_CONTROL_TTC_64;
else
csr6 |= DMA_CONTROL_TTC_128;
writel(csr6, ioaddr + DMA_CONTROL);
}
static void dwmac100_dump_dma_regs(void __iomem *ioaddr)
{
int i;
CHIP_DBG(KERN_DEBUG "DWMAC 100 DMA CSR\n");
for (i = 0; i < 9; i++)
pr_debug("\t CSR%d (offset 0x%x): 0x%08x\n", i,
(DMA_BUS_MODE + i * 4),
readl(ioaddr + DMA_BUS_MODE + i * 4));
CHIP_DBG(KERN_DEBUG "\t CSR20 (offset 0x%x): 0x%08x\n",
DMA_CUR_TX_BUF_ADDR, readl(ioaddr + DMA_CUR_TX_BUF_ADDR));
CHIP_DBG(KERN_DEBUG "\t CSR21 (offset 0x%x): 0x%08x\n",
DMA_CUR_RX_BUF_ADDR, readl(ioaddr + DMA_CUR_RX_BUF_ADDR));
}
/* DMA controller has two counters to track the number of the missed frames. */
static void dwmac100_dma_diagnostic_fr(void *data, struct stmmac_extra_stats *x,
void __iomem *ioaddr)
{
struct net_device_stats *stats = (struct net_device_stats *)data;
u32 csr8 = readl(ioaddr + DMA_MISSED_FRAME_CTR);
if (unlikely(csr8)) {
if (csr8 & DMA_MISSED_FRAME_OVE) {
stats->rx_over_errors += 0x800;
x->rx_overflow_cntr += 0x800;
} else {
unsigned int ove_cntr;
ove_cntr = ((csr8 & DMA_MISSED_FRAME_OVE_CNTR) >> 17);
stats->rx_over_errors += ove_cntr;
x->rx_overflow_cntr += ove_cntr;
}
if (csr8 & DMA_MISSED_FRAME_OVE_M) {
stats->rx_missed_errors += 0xffff;
x->rx_missed_cntr += 0xffff;
} else {
unsigned int miss_f = (csr8 & DMA_MISSED_FRAME_M_CNTR);
stats->rx_missed_errors += miss_f;
x->rx_missed_cntr += miss_f;
}
}
}
const struct stmmac_dma_ops dwmac100_dma_ops = {
.init = dwmac100_dma_init,
.dump_regs = dwmac100_dump_dma_regs,
.dma_mode = dwmac100_dma_operation_mode,
.dma_diagnostic_fr = dwmac100_dma_diagnostic_fr,
.enable_dma_transmission = dwmac_enable_dma_transmission,
.enable_dma_irq = dwmac_enable_dma_irq,
.disable_dma_irq = dwmac_disable_dma_irq,
.start_tx = dwmac_dma_start_tx,
.stop_tx = dwmac_dma_stop_tx,
.start_rx = dwmac_dma_start_rx,
.stop_rx = dwmac_dma_stop_rx,
.dma_interrupt = dwmac_dma_interrupt,
};
| gpl-2.0 |
faux123/flounder | drivers/staging/comedi/drivers/addi_apci_3xxx.c | 2090 | 23747 | #include <linux/pci.h>
#include "../comedidev.h"
#include "comedi_fc.h"
#include "amcc_s5933.h"
#include "addi-data/addi_common.h"
#include "addi-data/addi_eeprom.c"
#include "addi-data/hwdrv_apci3xxx.c"
#include "addi-data/addi_common.c"
enum apci3xxx_boardid {
BOARD_APCI3000_16,
BOARD_APCI3000_8,
BOARD_APCI3000_4,
BOARD_APCI3006_16,
BOARD_APCI3006_8,
BOARD_APCI3006_4,
BOARD_APCI3010_16,
BOARD_APCI3010_8,
BOARD_APCI3010_4,
BOARD_APCI3016_16,
BOARD_APCI3016_8,
BOARD_APCI3016_4,
BOARD_APCI3100_16_4,
BOARD_APCI3100_8_4,
BOARD_APCI3106_16_4,
BOARD_APCI3106_8_4,
BOARD_APCI3110_16_4,
BOARD_APCI3110_8_4,
BOARD_APCI3116_16_4,
BOARD_APCI3116_8_4,
BOARD_APCI3003,
BOARD_APCI3002_16,
BOARD_APCI3002_8,
BOARD_APCI3002_4,
BOARD_APCI3500,
};
static const struct addi_board apci3xxx_boardtypes[] = {
[BOARD_APCI3000_16] = {
.pc_DriverName = "apci3000-16",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3000_8] = {
.pc_DriverName = "apci3000-8",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3000_4] = {
.pc_DriverName = "apci3000-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 4,
.i_NbrAiChannelDiff = 2,
.i_AiChannelList = 4,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3006_16] = {
.pc_DriverName = "apci3006-16",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3006_8] = {
.pc_DriverName = "apci3006-8",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3006_4] = {
.pc_DriverName = "apci3006-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 4,
.i_NbrAiChannelDiff = 2,
.i_AiChannelList = 4,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3010_16] = {
.pc_DriverName = "apci3010-16",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3010_8] = {
.pc_DriverName = "apci3010-8",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3010_4] = {
.pc_DriverName = "apci3010-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 4,
.i_NbrAiChannelDiff = 2,
.i_AiChannelList = 4,
.i_AiMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3016_16] = {
.pc_DriverName = "apci3016-16",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3016_8] = {
.pc_DriverName = "apci3016-8",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3016_4] = {
.pc_DriverName = "apci3016-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 4,
.i_NbrAiChannelDiff = 2,
.i_AiChannelList = 4,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3100_16_4] = {
.pc_DriverName = "apci3100-16-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 4095,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3100_8_4] = {
.pc_DriverName = "apci3100-8-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 4095,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3106_16_4] = {
.pc_DriverName = "apci3106-16-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 65535,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3106_8_4] = {
.pc_DriverName = "apci3106-8-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 65535,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 10000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3110_16_4] = {
.pc_DriverName = "apci3110-16-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 4095,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3110_8_4] = {
.pc_DriverName = "apci3110-8-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 4095,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3116_16_4] = {
.pc_DriverName = "apci3116-16-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 16,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 16,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 65535,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3116_8_4] = {
.pc_DriverName = "apci3116-8-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannel = 8,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 8,
.i_NbrAoChannel = 4,
.i_AiMaxdata = 65535,
.i_AoMaxdata = 4095,
.pr_AiRangelist = &range_apci3XXX_ai,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.i_NbrTTLChannel = 24,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
[BOARD_APCI3003] = {
.pc_DriverName = "apci3003",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 4,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.b_AvailableConvertUnit = 7,
.ui_MinAcquisitiontimeNs = 2500,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
},
[BOARD_APCI3002_16] = {
.pc_DriverName = "apci3002-16",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannelDiff = 16,
.i_AiChannelList = 16,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
},
[BOARD_APCI3002_8] = {
.pc_DriverName = "apci3002-8",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannelDiff = 8,
.i_AiChannelList = 8,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
},
[BOARD_APCI3002_4] = {
.pc_DriverName = "apci3002-4",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAiChannelDiff = 4,
.i_AiChannelList = 4,
.i_AiMaxdata = 65535,
.pr_AiRangelist = &range_apci3XXX_ai,
.i_NbrDiChannel = 4,
.i_NbrDoChannel = 4,
.i_DoMaxdata = 1,
.b_AvailableConvertUnit = 6,
.ui_MinAcquisitiontimeNs = 5000,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ai_config = i_APCI3XXX_InsnConfigAnalogInput,
.ai_read = i_APCI3XXX_InsnReadAnalogInput,
.di_bits = apci3xxx_di_insn_bits,
.do_bits = apci3xxx_do_insn_bits,
},
[BOARD_APCI3500] = {
.pc_DriverName = "apci3500",
.i_IorangeBase1 = 256,
.i_PCIEeprom = ADDIDATA_NO_EEPROM,
.pc_EepromChip = ADDIDATA_9054,
.i_NbrAoChannel = 4,
.i_AoMaxdata = 4095,
.pr_AoRangelist = &range_apci3XXX_ao,
.i_NbrTTLChannel = 24,
.interrupt = v_APCI3XXX_Interrupt,
.reset = i_APCI3XXX_Reset,
.ao_write = i_APCI3XXX_InsnWriteAnalogOutput,
.ttl_config = i_APCI3XXX_InsnConfigInitTTLIO,
.ttl_bits = i_APCI3XXX_InsnBitsTTLIO,
.ttl_read = i_APCI3XXX_InsnReadTTLIO,
.ttl_write = i_APCI3XXX_InsnWriteTTLIO,
},
};
static int apci3xxx_auto_attach(struct comedi_device *dev,
unsigned long context)
{
const struct addi_board *board = NULL;
if (context < ARRAY_SIZE(apci3xxx_boardtypes))
board = &apci3xxx_boardtypes[context];
if (!board)
return -ENODEV;
dev->board_ptr = board;
return addi_auto_attach(dev, context);
}
static struct comedi_driver apci3xxx_driver = {
.driver_name = "addi_apci_3xxx",
.module = THIS_MODULE,
.auto_attach = apci3xxx_auto_attach,
.detach = i_ADDI_Detach,
};
static int apci3xxx_pci_probe(struct pci_dev *dev,
const struct pci_device_id *id)
{
return comedi_pci_auto_config(dev, &apci3xxx_driver, id->driver_data);
}
static DEFINE_PCI_DEVICE_TABLE(apci3xxx_pci_table) = {
{ PCI_VDEVICE(ADDIDATA, 0x3010), BOARD_APCI3000_16 },
{ PCI_VDEVICE(ADDIDATA, 0x300f), BOARD_APCI3000_8 },
{ PCI_VDEVICE(ADDIDATA, 0x300e), BOARD_APCI3000_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3013), BOARD_APCI3006_16 },
{ PCI_VDEVICE(ADDIDATA, 0x3014), BOARD_APCI3006_8 },
{ PCI_VDEVICE(ADDIDATA, 0x3015), BOARD_APCI3006_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3016), BOARD_APCI3010_16 },
{ PCI_VDEVICE(ADDIDATA, 0x3017), BOARD_APCI3010_8 },
{ PCI_VDEVICE(ADDIDATA, 0x3018), BOARD_APCI3010_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3019), BOARD_APCI3016_16 },
{ PCI_VDEVICE(ADDIDATA, 0x301a), BOARD_APCI3016_8 },
{ PCI_VDEVICE(ADDIDATA, 0x301b), BOARD_APCI3016_4 },
{ PCI_VDEVICE(ADDIDATA, 0x301c), BOARD_APCI3100_16_4 },
{ PCI_VDEVICE(ADDIDATA, 0x301d), BOARD_APCI3100_8_4 },
{ PCI_VDEVICE(ADDIDATA, 0x301e), BOARD_APCI3106_16_4 },
{ PCI_VDEVICE(ADDIDATA, 0x301f), BOARD_APCI3106_8_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3020), BOARD_APCI3110_16_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3021), BOARD_APCI3110_8_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3022), BOARD_APCI3116_16_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3023), BOARD_APCI3116_8_4 },
{ PCI_VDEVICE(ADDIDATA, 0x300B), BOARD_APCI3003 },
{ PCI_VDEVICE(ADDIDATA, 0x3002), BOARD_APCI3002_16 },
{ PCI_VDEVICE(ADDIDATA, 0x3003), BOARD_APCI3002_8 },
{ PCI_VDEVICE(ADDIDATA, 0x3004), BOARD_APCI3002_4 },
{ PCI_VDEVICE(ADDIDATA, 0x3024), BOARD_APCI3500 },
{ 0 }
};
MODULE_DEVICE_TABLE(pci, apci3xxx_pci_table);
static struct pci_driver apci3xxx_pci_driver = {
.name = "addi_apci_3xxx",
.id_table = apci3xxx_pci_table,
.probe = apci3xxx_pci_probe,
.remove = comedi_pci_auto_unconfig,
};
module_comedi_pci_driver(apci3xxx_driver, apci3xxx_pci_driver);
MODULE_AUTHOR("Comedi http://www.comedi.org");
MODULE_DESCRIPTION("Comedi low-level driver");
MODULE_LICENSE("GPL");
| gpl-2.0 |
phalf/android_kernel_samsung_mint-vlx-all | block/blk-tag.c | 3882 | 10125 | /*
* Functions related to tagged command queuing
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/slab.h>
#include "blk.h"
/**
* blk_queue_find_tag - find a request by its tag and queue
* @q: The request queue for the device
* @tag: The tag of the request
*
* Notes:
* Should be used when a device returns a tag and you want to match
* it with a request.
*
* no locks need be held.
**/
struct request *blk_queue_find_tag(struct request_queue *q, int tag)
{
return blk_map_queue_find_tag(q->queue_tags, tag);
}
EXPORT_SYMBOL(blk_queue_find_tag);
/**
* __blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* Tries to free the specified @bqt. Returns true if it was
* actually freed and false if there are still references using it
*/
static int __blk_free_tags(struct blk_queue_tag *bqt)
{
int retval;
retval = atomic_dec_and_test(&bqt->refcnt);
if (retval) {
BUG_ON(find_first_bit(bqt->tag_map, bqt->max_depth) <
bqt->max_depth);
kfree(bqt->tag_index);
bqt->tag_index = NULL;
kfree(bqt->tag_map);
bqt->tag_map = NULL;
kfree(bqt);
}
return retval;
}
/**
* __blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* blk_cleanup_queue() will take care of calling this function, if tagging
* has been used. So there's no need to call this directly.
**/
void __blk_queue_free_tags(struct request_queue *q)
{
struct blk_queue_tag *bqt = q->queue_tags;
if (!bqt)
return;
__blk_free_tags(bqt);
q->queue_tags = NULL;
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
/**
* blk_free_tags - release a given set of tag maintenance info
* @bqt: the tag map to free
*
* For externally managed @bqt frees the map. Callers of this
* function must guarantee to have released all the queues that
* might have been using this tag map.
*/
void blk_free_tags(struct blk_queue_tag *bqt)
{
if (unlikely(!__blk_free_tags(bqt)))
BUG();
}
EXPORT_SYMBOL(blk_free_tags);
/**
* blk_queue_free_tags - release tag maintenance info
* @q: the request queue for the device
*
* Notes:
* This is used to disable tagged queuing to a device, yet leave
* queue in function.
**/
void blk_queue_free_tags(struct request_queue *q)
{
queue_flag_clear_unlocked(QUEUE_FLAG_QUEUED, q);
}
EXPORT_SYMBOL(blk_queue_free_tags);
static int
init_tag_map(struct request_queue *q, struct blk_queue_tag *tags, int depth)
{
struct request **tag_index;
unsigned long *tag_map;
int nr_ulongs;
if (q && depth > q->nr_requests * 2) {
depth = q->nr_requests * 2;
printk(KERN_ERR "%s: adjusted depth to %d\n",
__func__, depth);
}
tag_index = kzalloc(depth * sizeof(struct request *), GFP_ATOMIC);
if (!tag_index)
goto fail;
nr_ulongs = ALIGN(depth, BITS_PER_LONG) / BITS_PER_LONG;
tag_map = kzalloc(nr_ulongs * sizeof(unsigned long), GFP_ATOMIC);
if (!tag_map)
goto fail;
tags->real_max_depth = depth;
tags->max_depth = depth;
tags->tag_index = tag_index;
tags->tag_map = tag_map;
return 0;
fail:
kfree(tag_index);
return -ENOMEM;
}
static struct blk_queue_tag *__blk_queue_init_tags(struct request_queue *q,
int depth)
{
struct blk_queue_tag *tags;
tags = kmalloc(sizeof(struct blk_queue_tag), GFP_ATOMIC);
if (!tags)
goto fail;
if (init_tag_map(q, tags, depth))
goto fail;
atomic_set(&tags->refcnt, 1);
return tags;
fail:
kfree(tags);
return NULL;
}
/**
* blk_init_tags - initialize the tag info for an external tag map
* @depth: the maximum queue depth supported
**/
struct blk_queue_tag *blk_init_tags(int depth)
{
return __blk_queue_init_tags(NULL, depth);
}
EXPORT_SYMBOL(blk_init_tags);
/**
* blk_queue_init_tags - initialize the queue tag info
* @q: the request queue for the device
* @depth: the maximum queue depth supported
* @tags: the tag to use
*
* Queue lock must be held here if the function is called to resize an
* existing map.
**/
int blk_queue_init_tags(struct request_queue *q, int depth,
struct blk_queue_tag *tags)
{
int rc;
BUG_ON(tags && q->queue_tags && tags != q->queue_tags);
if (!tags && !q->queue_tags) {
tags = __blk_queue_init_tags(q, depth);
if (!tags)
goto fail;
} else if (q->queue_tags) {
rc = blk_queue_resize_tags(q, depth);
if (rc)
return rc;
queue_flag_set(QUEUE_FLAG_QUEUED, q);
return 0;
} else
atomic_inc(&tags->refcnt);
/*
* assign it, all done
*/
q->queue_tags = tags;
queue_flag_set_unlocked(QUEUE_FLAG_QUEUED, q);
INIT_LIST_HEAD(&q->tag_busy_list);
return 0;
fail:
kfree(tags);
return -ENOMEM;
}
EXPORT_SYMBOL(blk_queue_init_tags);
/**
* blk_queue_resize_tags - change the queueing depth
* @q: the request queue for the device
* @new_depth: the new max command queueing depth
*
* Notes:
* Must be called with the queue lock held.
**/
int blk_queue_resize_tags(struct request_queue *q, int new_depth)
{
struct blk_queue_tag *bqt = q->queue_tags;
struct request **tag_index;
unsigned long *tag_map;
int max_depth, nr_ulongs;
if (!bqt)
return -ENXIO;
/*
* if we already have large enough real_max_depth. just
* adjust max_depth. *NOTE* as requests with tag value
* between new_depth and real_max_depth can be in-flight, tag
* map can not be shrunk blindly here.
*/
if (new_depth <= bqt->real_max_depth) {
bqt->max_depth = new_depth;
return 0;
}
/*
* Currently cannot replace a shared tag map with a new
* one, so error out if this is the case
*/
if (atomic_read(&bqt->refcnt) != 1)
return -EBUSY;
/*
* save the old state info, so we can copy it back
*/
tag_index = bqt->tag_index;
tag_map = bqt->tag_map;
max_depth = bqt->real_max_depth;
if (init_tag_map(q, bqt, new_depth))
return -ENOMEM;
memcpy(bqt->tag_index, tag_index, max_depth * sizeof(struct request *));
nr_ulongs = ALIGN(max_depth, BITS_PER_LONG) / BITS_PER_LONG;
memcpy(bqt->tag_map, tag_map, nr_ulongs * sizeof(unsigned long));
kfree(tag_index);
kfree(tag_map);
return 0;
}
EXPORT_SYMBOL(blk_queue_resize_tags);
/**
* blk_queue_end_tag - end tag operations for a request
* @q: the request queue for the device
* @rq: the request that has completed
*
* Description:
* Typically called when end_that_request_first() returns %0, meaning
* all transfers have been done for a request. It's important to call
* this function before end_that_request_last(), as that will put the
* request back on the free list thus corrupting the internal tag list.
*
* Notes:
* queue lock must be held.
**/
void blk_queue_end_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
int tag = rq->tag;
BUG_ON(tag == -1);
if (unlikely(tag >= bqt->real_max_depth))
/*
* This can happen after tag depth has been reduced.
* FIXME: how about a warning or info message here?
*/
return;
list_del_init(&rq->queuelist);
rq->cmd_flags &= ~REQ_QUEUED;
rq->tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n",
__func__, tag);
bqt->tag_index[tag] = NULL;
if (unlikely(!test_bit(tag, bqt->tag_map))) {
printk(KERN_ERR "%s: attempt to clear non-busy tag (%d)\n",
__func__, tag);
return;
}
/*
* The tag_map bit acts as a lock for tag_index[bit], so we need
* unlock memory barrier semantics.
*/
clear_bit_unlock(tag, bqt->tag_map);
}
EXPORT_SYMBOL(blk_queue_end_tag);
/**
* blk_queue_start_tag - find a free tag and assign it
* @q: the request queue for the device
* @rq: the block request that needs tagging
*
* Description:
* This can either be used as a stand-alone helper, or possibly be
* assigned as the queue &prep_rq_fn (in which case &struct request
* automagically gets a tag assigned). Note that this function
* assumes that any type of request can be queued! if this is not
* true for your device, you must check the request type before
* calling this function. The request will also be removed from
* the request queue, so it's the drivers responsibility to readd
* it if it should need to be restarted for some reason.
*
* Notes:
* queue lock must be held.
**/
int blk_queue_start_tag(struct request_queue *q, struct request *rq)
{
struct blk_queue_tag *bqt = q->queue_tags;
unsigned max_depth;
int tag;
if (unlikely((rq->cmd_flags & REQ_QUEUED))) {
printk(KERN_ERR
"%s: request %p for device [%s] already tagged %d",
__func__, rq,
rq->rq_disk ? rq->rq_disk->disk_name : "?", rq->tag);
BUG();
}
/*
* Protect against shared tag maps, as we may not have exclusive
* access to the tag map.
*
* We reserve a few tags just for sync IO, since we don't want
* to starve sync IO on behalf of flooding async IO.
*/
max_depth = bqt->max_depth;
if (!rq_is_sync(rq) && max_depth > 1) {
max_depth -= 2;
if (!max_depth)
max_depth = 1;
if (q->in_flight[BLK_RW_ASYNC] > max_depth)
return 1;
}
do {
tag = find_first_zero_bit(bqt->tag_map, max_depth);
if (tag >= max_depth)
return 1;
} while (test_and_set_bit_lock(tag, bqt->tag_map));
/*
* We need lock ordering semantics given by test_and_set_bit_lock.
* See blk_queue_end_tag for details.
*/
rq->cmd_flags |= REQ_QUEUED;
rq->tag = tag;
bqt->tag_index[tag] = rq;
blk_start_request(rq);
list_add(&rq->queuelist, &q->tag_busy_list);
return 0;
}
EXPORT_SYMBOL(blk_queue_start_tag);
/**
* blk_queue_invalidate_tags - invalidate all pending tags
* @q: the request queue for the device
*
* Description:
* Hardware conditions may dictate a need to stop all pending requests.
* In this case, we will safely clear the block side of the tag queue and
* readd all requests to the request queue in the right order.
*
* Notes:
* queue lock must be held.
**/
void blk_queue_invalidate_tags(struct request_queue *q)
{
struct list_head *tmp, *n;
list_for_each_safe(tmp, n, &q->tag_busy_list)
blk_requeue_request(q, list_entry_rq(tmp));
}
EXPORT_SYMBOL(blk_queue_invalidate_tags);
| gpl-2.0 |
hvaibhav/beagle-dev | arch/m32r/kernel/ptrace.c | 4394 | 15613 | /*
* linux/arch/m32r/kernel/ptrace.c
*
* Copyright (C) 2002 Hirokazu Takata, Takeo Takahashi
* Copyright (C) 2004 Hirokazu Takata, Kei Sakamoto
*
* Original x86 implementation:
* By Ross Biro 1/23/92
* edited by Linus Torvalds
*
* Some code taken from sh version:
* Copyright (C) 1999, 2000 Kaz Kojima & Niibe Yutaka
* Some code taken from arm version:
* Copyright (C) 2000 Russell King
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/err.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/string.h>
#include <linux/signal.h>
#include <asm/cacheflush.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/mmu_context.h>
/*
* This routine will get a word off of the process kernel stack.
*/
static inline unsigned long int
get_stack_long(struct task_struct *task, int offset)
{
unsigned long *stack;
stack = (unsigned long *)task_pt_regs(task);
return stack[offset];
}
/*
* This routine will put a word on the process kernel stack.
*/
static inline int
put_stack_long(struct task_struct *task, int offset, unsigned long data)
{
unsigned long *stack;
stack = (unsigned long *)task_pt_regs(task);
stack[offset] = data;
return 0;
}
static int reg_offset[] = {
PT_R0, PT_R1, PT_R2, PT_R3, PT_R4, PT_R5, PT_R6, PT_R7,
PT_R8, PT_R9, PT_R10, PT_R11, PT_R12, PT_FP, PT_LR, PT_SPU,
};
/*
* Read the word at offset "off" into the "struct user". We
* actually access the pt_regs stored on the kernel stack.
*/
static int ptrace_read_user(struct task_struct *tsk, unsigned long off,
unsigned long __user *data)
{
unsigned long tmp;
#ifndef NO_FPU
struct user * dummy = NULL;
#endif
if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
switch (off) {
case PT_EVB:
__asm__ __volatile__ (
"mvfc %0, cr5 \n\t"
: "=r" (tmp)
);
break;
case PT_CBR: {
unsigned long psw;
psw = get_stack_long(tsk, PT_PSW);
tmp = ((psw >> 8) & 1);
}
break;
case PT_PSW: {
unsigned long psw, bbpsw;
psw = get_stack_long(tsk, PT_PSW);
bbpsw = get_stack_long(tsk, PT_BBPSW);
tmp = ((psw >> 8) & 0xff) | ((bbpsw & 0xff) << 8);
}
break;
case PT_PC:
tmp = get_stack_long(tsk, PT_BPC);
break;
case PT_BPC:
off = PT_BBPC;
/* fall through */
default:
if (off < (sizeof(struct pt_regs) >> 2))
tmp = get_stack_long(tsk, off);
#ifndef NO_FPU
else if (off >= (long)(&dummy->fpu >> 2) &&
off < (long)(&dummy->u_fpvalid >> 2)) {
if (!tsk_used_math(tsk)) {
if (off == (long)(&dummy->fpu.fpscr >> 2))
tmp = FPSCR_INIT;
else
tmp = 0;
} else
tmp = ((long *)(&tsk->thread.fpu >> 2))
[off - (long)&dummy->fpu];
} else if (off == (long)(&dummy->u_fpvalid >> 2))
tmp = !!tsk_used_math(tsk);
#endif /* not NO_FPU */
else
tmp = 0;
}
return put_user(tmp, data);
}
static int ptrace_write_user(struct task_struct *tsk, unsigned long off,
unsigned long data)
{
int ret = -EIO;
#ifndef NO_FPU
struct user * dummy = NULL;
#endif
if ((off & 3) || off > sizeof(struct user) - 3)
return -EIO;
off >>= 2;
switch (off) {
case PT_EVB:
case PT_BPC:
case PT_SPI:
/* We don't allow to modify evb. */
ret = 0;
break;
case PT_PSW:
case PT_CBR: {
/* We allow to modify only cbr in psw */
unsigned long psw;
psw = get_stack_long(tsk, PT_PSW);
psw = (psw & ~0x100) | ((data & 1) << 8);
ret = put_stack_long(tsk, PT_PSW, psw);
}
break;
case PT_PC:
off = PT_BPC;
data &= ~1;
/* fall through */
default:
if (off < (sizeof(struct pt_regs) >> 2))
ret = put_stack_long(tsk, off, data);
#ifndef NO_FPU
else if (off >= (long)(&dummy->fpu >> 2) &&
off < (long)(&dummy->u_fpvalid >> 2)) {
set_stopped_child_used_math(tsk);
((long *)&tsk->thread.fpu)
[off - (long)&dummy->fpu] = data;
ret = 0;
} else if (off == (long)(&dummy->u_fpvalid >> 2)) {
conditional_stopped_child_used_math(data, tsk);
ret = 0;
}
#endif /* not NO_FPU */
break;
}
return ret;
}
/*
* Get all user integer registers.
*/
static int ptrace_getregs(struct task_struct *tsk, void __user *uregs)
{
struct pt_regs *regs = task_pt_regs(tsk);
return copy_to_user(uregs, regs, sizeof(struct pt_regs)) ? -EFAULT : 0;
}
/*
* Set all user integer registers.
*/
static int ptrace_setregs(struct task_struct *tsk, void __user *uregs)
{
struct pt_regs newregs;
int ret;
ret = -EFAULT;
if (copy_from_user(&newregs, uregs, sizeof(struct pt_regs)) == 0) {
struct pt_regs *regs = task_pt_regs(tsk);
*regs = newregs;
ret = 0;
}
return ret;
}
static inline int
check_condition_bit(struct task_struct *child)
{
return (int)((get_stack_long(child, PT_PSW) >> 8) & 1);
}
static int
check_condition_src(unsigned long op, unsigned long regno1,
unsigned long regno2, struct task_struct *child)
{
unsigned long reg1, reg2;
reg2 = get_stack_long(child, reg_offset[regno2]);
switch (op) {
case 0x0: /* BEQ */
reg1 = get_stack_long(child, reg_offset[regno1]);
return reg1 == reg2;
case 0x1: /* BNE */
reg1 = get_stack_long(child, reg_offset[regno1]);
return reg1 != reg2;
case 0x8: /* BEQZ */
return reg2 == 0;
case 0x9: /* BNEZ */
return reg2 != 0;
case 0xa: /* BLTZ */
return (int)reg2 < 0;
case 0xb: /* BGEZ */
return (int)reg2 >= 0;
case 0xc: /* BLEZ */
return (int)reg2 <= 0;
case 0xd: /* BGTZ */
return (int)reg2 > 0;
default:
/* never reached */
return 0;
}
}
static void
compute_next_pc_for_16bit_insn(unsigned long insn, unsigned long pc,
unsigned long *next_pc,
struct task_struct *child)
{
unsigned long op, op2, op3;
unsigned long disp;
unsigned long regno;
int parallel = 0;
if (insn & 0x00008000)
parallel = 1;
if (pc & 3)
insn &= 0x7fff; /* right slot */
else
insn >>= 16; /* left slot */
op = (insn >> 12) & 0xf;
op2 = (insn >> 8) & 0xf;
op3 = (insn >> 4) & 0xf;
if (op == 0x7) {
switch (op2) {
case 0xd: /* BNC */
case 0x9: /* BNCL */
if (!check_condition_bit(child)) {
disp = (long)(insn << 24) >> 22;
*next_pc = (pc & ~0x3) + disp;
return;
}
break;
case 0x8: /* BCL */
case 0xc: /* BC */
if (check_condition_bit(child)) {
disp = (long)(insn << 24) >> 22;
*next_pc = (pc & ~0x3) + disp;
return;
}
break;
case 0xe: /* BL */
case 0xf: /* BRA */
disp = (long)(insn << 24) >> 22;
*next_pc = (pc & ~0x3) + disp;
return;
break;
}
} else if (op == 0x1) {
switch (op2) {
case 0x0:
if (op3 == 0xf) { /* TRAP */
#if 1
/* pass through */
#else
/* kernel space is not allowed as next_pc */
unsigned long evb;
unsigned long trapno;
trapno = insn & 0xf;
__asm__ __volatile__ (
"mvfc %0, cr5\n"
:"=r"(evb)
:
);
*next_pc = evb + (trapno << 2);
return;
#endif
} else if (op3 == 0xd) { /* RTE */
*next_pc = get_stack_long(child, PT_BPC);
return;
}
break;
case 0xc: /* JC */
if (op3 == 0xc && check_condition_bit(child)) {
regno = insn & 0xf;
*next_pc = get_stack_long(child,
reg_offset[regno]);
return;
}
break;
case 0xd: /* JNC */
if (op3 == 0xc && !check_condition_bit(child)) {
regno = insn & 0xf;
*next_pc = get_stack_long(child,
reg_offset[regno]);
return;
}
break;
case 0xe: /* JL */
case 0xf: /* JMP */
if (op3 == 0xc) { /* JMP */
regno = insn & 0xf;
*next_pc = get_stack_long(child,
reg_offset[regno]);
return;
}
break;
}
}
if (parallel)
*next_pc = pc + 4;
else
*next_pc = pc + 2;
}
static void
compute_next_pc_for_32bit_insn(unsigned long insn, unsigned long pc,
unsigned long *next_pc,
struct task_struct *child)
{
unsigned long op;
unsigned long op2;
unsigned long disp;
unsigned long regno1, regno2;
op = (insn >> 28) & 0xf;
if (op == 0xf) { /* branch 24-bit relative */
op2 = (insn >> 24) & 0xf;
switch (op2) {
case 0xd: /* BNC */
case 0x9: /* BNCL */
if (!check_condition_bit(child)) {
disp = (long)(insn << 8) >> 6;
*next_pc = (pc & ~0x3) + disp;
return;
}
break;
case 0x8: /* BCL */
case 0xc: /* BC */
if (check_condition_bit(child)) {
disp = (long)(insn << 8) >> 6;
*next_pc = (pc & ~0x3) + disp;
return;
}
break;
case 0xe: /* BL */
case 0xf: /* BRA */
disp = (long)(insn << 8) >> 6;
*next_pc = (pc & ~0x3) + disp;
return;
}
} else if (op == 0xb) { /* branch 16-bit relative */
op2 = (insn >> 20) & 0xf;
switch (op2) {
case 0x0: /* BEQ */
case 0x1: /* BNE */
case 0x8: /* BEQZ */
case 0x9: /* BNEZ */
case 0xa: /* BLTZ */
case 0xb: /* BGEZ */
case 0xc: /* BLEZ */
case 0xd: /* BGTZ */
regno1 = ((insn >> 24) & 0xf);
regno2 = ((insn >> 16) & 0xf);
if (check_condition_src(op2, regno1, regno2, child)) {
disp = (long)(insn << 16) >> 14;
*next_pc = (pc & ~0x3) + disp;
return;
}
break;
}
}
*next_pc = pc + 4;
}
static inline void
compute_next_pc(unsigned long insn, unsigned long pc,
unsigned long *next_pc, struct task_struct *child)
{
if (insn & 0x80000000)
compute_next_pc_for_32bit_insn(insn, pc, next_pc, child);
else
compute_next_pc_for_16bit_insn(insn, pc, next_pc, child);
}
static int
register_debug_trap(struct task_struct *child, unsigned long next_pc,
unsigned long next_insn, unsigned long *code)
{
struct debug_trap *p = &child->thread.debug_trap;
unsigned long addr = next_pc & ~3;
if (p->nr_trap == MAX_TRAPS) {
printk("kernel BUG at %s %d: p->nr_trap = %d\n",
__FILE__, __LINE__, p->nr_trap);
return -1;
}
p->addr[p->nr_trap] = addr;
p->insn[p->nr_trap] = next_insn;
p->nr_trap++;
if (next_pc & 3) {
*code = (next_insn & 0xffff0000) | 0x10f1;
/* xxx --> TRAP1 */
} else {
if ((next_insn & 0x80000000) || (next_insn & 0x8000)) {
*code = 0x10f17000;
/* TRAP1 --> NOP */
} else {
*code = (next_insn & 0xffff) | 0x10f10000;
/* TRAP1 --> xxx */
}
}
return 0;
}
static int
unregister_debug_trap(struct task_struct *child, unsigned long addr,
unsigned long *code)
{
struct debug_trap *p = &child->thread.debug_trap;
int i;
/* Search debug trap entry. */
for (i = 0; i < p->nr_trap; i++) {
if (p->addr[i] == addr)
break;
}
if (i >= p->nr_trap) {
/* The trap may be requested from debugger.
* ptrace should do nothing in this case.
*/
return 0;
}
/* Recover original instruction code. */
*code = p->insn[i];
/* Shift debug trap entries. */
while (i < p->nr_trap - 1) {
p->insn[i] = p->insn[i + 1];
p->addr[i] = p->addr[i + 1];
i++;
}
p->nr_trap--;
return 1;
}
static void
unregister_all_debug_traps(struct task_struct *child)
{
struct debug_trap *p = &child->thread.debug_trap;
int i;
for (i = 0; i < p->nr_trap; i++)
access_process_vm(child, p->addr[i], &p->insn[i], sizeof(p->insn[i]), 1);
p->nr_trap = 0;
}
static inline void
invalidate_cache(void)
{
#if defined(CONFIG_CHIP_M32700) || defined(CONFIG_CHIP_OPSP)
_flush_cache_copyback_all();
#else /* ! CONFIG_CHIP_M32700 */
/* Invalidate cache */
__asm__ __volatile__ (
"ldi r0, #-1 \n\t"
"ldi r1, #0 \n\t"
"stb r1, @r0 ; cache off \n\t"
"; \n\t"
"ldi r0, #-2 \n\t"
"ldi r1, #1 \n\t"
"stb r1, @r0 ; cache invalidate \n\t"
".fillinsn \n"
"0: \n\t"
"ldb r1, @r0 ; invalidate check \n\t"
"bnez r1, 0b \n\t"
"; \n\t"
"ldi r0, #-1 \n\t"
"ldi r1, #1 \n\t"
"stb r1, @r0 ; cache on \n\t"
: : : "r0", "r1", "memory"
);
/* FIXME: copying-back d-cache and invalidating i-cache are needed.
*/
#endif /* CONFIG_CHIP_M32700 */
}
/* Embed a debug trap (TRAP1) code */
static int
embed_debug_trap(struct task_struct *child, unsigned long next_pc)
{
unsigned long next_insn, code;
unsigned long addr = next_pc & ~3;
if (access_process_vm(child, addr, &next_insn, sizeof(next_insn), 0)
!= sizeof(next_insn)) {
return -1; /* error */
}
/* Set a trap code. */
if (register_debug_trap(child, next_pc, next_insn, &code)) {
return -1; /* error */
}
if (access_process_vm(child, addr, &code, sizeof(code), 1)
!= sizeof(code)) {
return -1; /* error */
}
return 0; /* success */
}
void
withdraw_debug_trap(struct pt_regs *regs)
{
unsigned long addr;
unsigned long code;
addr = (regs->bpc - 2) & ~3;
regs->bpc -= 2;
if (unregister_debug_trap(current, addr, &code)) {
access_process_vm(current, addr, &code, sizeof(code), 1);
invalidate_cache();
}
}
void
init_debug_traps(struct task_struct *child)
{
struct debug_trap *p = &child->thread.debug_trap;
int i;
p->nr_trap = 0;
for (i = 0; i < MAX_TRAPS; i++) {
p->addr[i] = 0;
p->insn[i] = 0;
}
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long next_pc;
unsigned long pc, insn;
clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
/* Compute next pc. */
pc = get_stack_long(child, PT_BPC);
if (access_process_vm(child, pc&~3, &insn, sizeof(insn), 0)
!= sizeof(insn))
return;
compute_next_pc(insn, pc, &next_pc, child);
if (next_pc & 0x80000000)
return;
if (embed_debug_trap(child, next_pc))
return;
invalidate_cache();
}
void user_disable_single_step(struct task_struct *child)
{
unregister_all_debug_traps(child);
invalidate_cache();
}
/*
* Called by kernel/ptrace.c when detaching..
*
* Make sure single step bits etc are not set.
*/
void ptrace_disable(struct task_struct *child)
{
/* nothing to do.. */
}
long
arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
int ret;
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/*
* read word at location "addr" in the child process.
*/
case PTRACE_PEEKTEXT:
case PTRACE_PEEKDATA:
ret = generic_ptrace_peekdata(child, addr, data);
break;
/*
* read the word at location addr in the USER area.
*/
case PTRACE_PEEKUSR:
ret = ptrace_read_user(child, addr, datap);
break;
/*
* write the word at location addr.
*/
case PTRACE_POKETEXT:
case PTRACE_POKEDATA:
ret = generic_ptrace_pokedata(child, addr, data);
if (ret == 0 && request == PTRACE_POKETEXT)
invalidate_cache();
break;
/*
* write the word at location addr in the USER area.
*/
case PTRACE_POKEUSR:
ret = ptrace_write_user(child, addr, data);
break;
case PTRACE_GETREGS:
ret = ptrace_getregs(child, datap);
break;
case PTRACE_SETREGS:
ret = ptrace_setregs(child, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
}
/* notification of system call entry/exit
* - triggered by current->work.syscall_trace
*/
void do_syscall_trace(void)
{
if (!test_thread_flag(TIF_SYSCALL_TRACE))
return;
if (!(current->ptrace & PT_PTRACED))
return;
/* the 0x80 provides a way for the tracing parent to distinguish
between a syscall stop and SIGTRAP delivery */
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
| gpl-2.0 |
nian0114/android_kernel_zte_n918st | arch/arm/mach-ixp4xx/gtwx5715-setup.c | 4650 | 4744 | /*
* arch/arm/mach-ixp4xx/gtwx5715-setup.c
*
* Gemtek GTWX5715 (Linksys WRV54G) board setup
*
* Copyright (C) 2004 George T. Joseph
* Derived from Coyote
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
*/
#include <linux/init.h>
#include <linux/device.h>
#include <linux/serial.h>
#include <linux/tty.h>
#include <linux/serial_8250.h>
#include <asm/types.h>
#include <asm/setup.h>
#include <asm/memory.h>
#include <mach/hardware.h>
#include <asm/irq.h>
#include <asm/mach-types.h>
#include <asm/mach/arch.h>
#include <asm/mach/flash.h>
/* GPIO 5,6,7 and 12 are hard wired to the Kendin KS8995M Switch
and operate as an SPI type interface. The details of the interface
are available on Kendin/Micrel's web site. */
#define GTWX5715_KSSPI_SELECT 5
#define GTWX5715_KSSPI_TXD 6
#define GTWX5715_KSSPI_CLOCK 7
#define GTWX5715_KSSPI_RXD 12
/* The "reset" button is wired to GPIO 3.
The GPIO is brought "low" when the button is pushed. */
#define GTWX5715_BUTTON_GPIO 3
/* Board Label Front Label
LED1 Power
LED2 Wireless-G
LED3 not populated but could be
LED4 Internet
LED5 - LED8 Controlled by KS8995M Switch
LED9 DMZ */
#define GTWX5715_LED1_GPIO 2
#define GTWX5715_LED2_GPIO 9
#define GTWX5715_LED3_GPIO 8
#define GTWX5715_LED4_GPIO 1
#define GTWX5715_LED9_GPIO 4
/*
* Xscale UART registers are 32 bits wide with only the least
* significant 8 bits having any meaning. From a configuration
* perspective, this means 2 things...
*
* Setting .regshift = 2 so that the standard 16550 registers
* line up on every 4th byte.
*
* Shifting the register start virtual address +3 bytes when
* compiled big-endian. Since register writes are done on a
* single byte basis, if the shift isn't done the driver will
* write the value into the most significant byte of the register,
* which is ignored, instead of the least significant.
*/
#ifdef __ARMEB__
#define REG_OFFSET 3
#else
#define REG_OFFSET 0
#endif
/*
* Only the second or "console" uart is connected on the gtwx5715.
*/
static struct resource gtwx5715_uart_resources[] = {
{
.start = IXP4XX_UART2_BASE_PHYS,
.end = IXP4XX_UART2_BASE_PHYS + 0x0fff,
.flags = IORESOURCE_MEM,
},
{
.start = IRQ_IXP4XX_UART2,
.end = IRQ_IXP4XX_UART2,
.flags = IORESOURCE_IRQ,
},
{ },
};
static struct plat_serial8250_port gtwx5715_uart_platform_data[] = {
{
.mapbase = IXP4XX_UART2_BASE_PHYS,
.membase = (char *)IXP4XX_UART2_BASE_VIRT + REG_OFFSET,
.irq = IRQ_IXP4XX_UART2,
.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST,
.iotype = UPIO_MEM,
.regshift = 2,
.uartclk = IXP4XX_UART_XTAL,
},
{ },
};
static struct platform_device gtwx5715_uart_device = {
.name = "serial8250",
.id = PLAT8250_DEV_PLATFORM,
.dev = {
.platform_data = gtwx5715_uart_platform_data,
},
.num_resources = 2,
.resource = gtwx5715_uart_resources,
};
static struct flash_platform_data gtwx5715_flash_data = {
.map_name = "cfi_probe",
.width = 2,
};
static struct resource gtwx5715_flash_resource = {
.flags = IORESOURCE_MEM,
};
static struct platform_device gtwx5715_flash = {
.name = "IXP4XX-Flash",
.id = 0,
.dev = {
.platform_data = >wx5715_flash_data,
},
.num_resources = 1,
.resource = >wx5715_flash_resource,
};
static struct platform_device *gtwx5715_devices[] __initdata = {
>wx5715_uart_device,
>wx5715_flash,
};
static void __init gtwx5715_init(void)
{
ixp4xx_sys_init();
gtwx5715_flash_resource.start = IXP4XX_EXP_BUS_BASE(0);
gtwx5715_flash_resource.end = IXP4XX_EXP_BUS_BASE(0) + SZ_8M - 1;
platform_add_devices(gtwx5715_devices, ARRAY_SIZE(gtwx5715_devices));
}
MACHINE_START(GTWX5715, "Gemtek GTWX5715 (Linksys WRV54G)")
/* Maintainer: George Joseph */
.map_io = ixp4xx_map_io,
.init_early = ixp4xx_init_early,
.init_irq = ixp4xx_init_irq,
.init_time = ixp4xx_timer_init,
.atag_offset = 0x100,
.init_machine = gtwx5715_init,
#if defined(CONFIG_PCI)
.dma_zone_size = SZ_64M,
#endif
.restart = ixp4xx_restart,
MACHINE_END
| gpl-2.0 |
issi5862/ishida_jbd2_linux-2.0 | net/ax25/ax25_dev.c | 4650 | 4784 | /*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2 of the License, or
* (at your option) any later version.
*
* Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk)
*/
#include <linux/errno.h>
#include <linux/types.h>
#include <linux/socket.h>
#include <linux/slab.h>
#include <linux/in.h>
#include <linux/kernel.h>
#include <linux/timer.h>
#include <linux/string.h>
#include <linux/sockios.h>
#include <linux/net.h>
#include <linux/spinlock.h>
#include <net/ax25.h>
#include <linux/inet.h>
#include <linux/netdevice.h>
#include <linux/if_arp.h>
#include <linux/skbuff.h>
#include <net/sock.h>
#include <asm/uaccess.h>
#include <linux/fcntl.h>
#include <linux/mm.h>
#include <linux/interrupt.h>
#include <linux/init.h>
ax25_dev *ax25_dev_list;
DEFINE_SPINLOCK(ax25_dev_lock);
ax25_dev *ax25_addr_ax25dev(ax25_address *addr)
{
ax25_dev *ax25_dev, *res = NULL;
spin_lock_bh(&ax25_dev_lock);
for (ax25_dev = ax25_dev_list; ax25_dev != NULL; ax25_dev = ax25_dev->next)
if (ax25cmp(addr, (ax25_address *)ax25_dev->dev->dev_addr) == 0) {
res = ax25_dev;
}
spin_unlock_bh(&ax25_dev_lock);
return res;
}
/*
* This is called when an interface is brought up. These are
* reasonable defaults.
*/
void ax25_dev_device_up(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = kzalloc(sizeof(*ax25_dev), GFP_ATOMIC)) == NULL) {
printk(KERN_ERR "AX.25: ax25_dev_device_up - out of memory\n");
return;
}
dev->ax25_ptr = ax25_dev;
ax25_dev->dev = dev;
dev_hold(dev);
ax25_dev->forward = NULL;
ax25_dev->values[AX25_VALUES_IPDEFMODE] = AX25_DEF_IPDEFMODE;
ax25_dev->values[AX25_VALUES_AXDEFMODE] = AX25_DEF_AXDEFMODE;
ax25_dev->values[AX25_VALUES_BACKOFF] = AX25_DEF_BACKOFF;
ax25_dev->values[AX25_VALUES_CONMODE] = AX25_DEF_CONMODE;
ax25_dev->values[AX25_VALUES_WINDOW] = AX25_DEF_WINDOW;
ax25_dev->values[AX25_VALUES_EWINDOW] = AX25_DEF_EWINDOW;
ax25_dev->values[AX25_VALUES_T1] = AX25_DEF_T1;
ax25_dev->values[AX25_VALUES_T2] = AX25_DEF_T2;
ax25_dev->values[AX25_VALUES_T3] = AX25_DEF_T3;
ax25_dev->values[AX25_VALUES_IDLE] = AX25_DEF_IDLE;
ax25_dev->values[AX25_VALUES_N2] = AX25_DEF_N2;
ax25_dev->values[AX25_VALUES_PACLEN] = AX25_DEF_PACLEN;
ax25_dev->values[AX25_VALUES_PROTOCOL] = AX25_DEF_PROTOCOL;
ax25_dev->values[AX25_VALUES_DS_TIMEOUT]= AX25_DEF_DS_TIMEOUT;
#if defined(CONFIG_AX25_DAMA_SLAVE) || defined(CONFIG_AX25_DAMA_MASTER)
ax25_ds_setup_timer(ax25_dev);
#endif
spin_lock_bh(&ax25_dev_lock);
ax25_dev->next = ax25_dev_list;
ax25_dev_list = ax25_dev;
spin_unlock_bh(&ax25_dev_lock);
ax25_register_dev_sysctl(ax25_dev);
}
void ax25_dev_device_down(struct net_device *dev)
{
ax25_dev *s, *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return;
ax25_unregister_dev_sysctl(ax25_dev);
spin_lock_bh(&ax25_dev_lock);
#ifdef CONFIG_AX25_DAMA_SLAVE
ax25_ds_del_timer(ax25_dev);
#endif
/*
* Remove any packet forwarding that points to this device.
*/
for (s = ax25_dev_list; s != NULL; s = s->next)
if (s->forward == dev)
s->forward = NULL;
if ((s = ax25_dev_list) == ax25_dev) {
ax25_dev_list = s->next;
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
return;
}
while (s != NULL && s->next != NULL) {
if (s->next == ax25_dev) {
s->next = ax25_dev->next;
spin_unlock_bh(&ax25_dev_lock);
dev_put(dev);
kfree(ax25_dev);
return;
}
s = s->next;
}
spin_unlock_bh(&ax25_dev_lock);
dev->ax25_ptr = NULL;
}
int ax25_fwd_ioctl(unsigned int cmd, struct ax25_fwd_struct *fwd)
{
ax25_dev *ax25_dev, *fwd_dev;
if ((ax25_dev = ax25_addr_ax25dev(&fwd->port_from)) == NULL)
return -EINVAL;
switch (cmd) {
case SIOCAX25ADDFWD:
if ((fwd_dev = ax25_addr_ax25dev(&fwd->port_to)) == NULL)
return -EINVAL;
if (ax25_dev->forward != NULL)
return -EINVAL;
ax25_dev->forward = fwd_dev->dev;
break;
case SIOCAX25DELFWD:
if (ax25_dev->forward == NULL)
return -EINVAL;
ax25_dev->forward = NULL;
break;
default:
return -EINVAL;
}
return 0;
}
struct net_device *ax25_fwd_dev(struct net_device *dev)
{
ax25_dev *ax25_dev;
if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL)
return dev;
if (ax25_dev->forward == NULL)
return dev;
return ax25_dev->forward;
}
/*
* Free all memory associated with device structures.
*/
void __exit ax25_dev_free(void)
{
ax25_dev *s, *ax25_dev;
spin_lock_bh(&ax25_dev_lock);
ax25_dev = ax25_dev_list;
while (ax25_dev != NULL) {
s = ax25_dev;
dev_put(ax25_dev->dev);
ax25_dev = ax25_dev->next;
kfree(s);
}
ax25_dev_list = NULL;
spin_unlock_bh(&ax25_dev_lock);
}
| gpl-2.0 |
mi3-dev/android_kernel_xiaomi_msm8x74pro | drivers/pnp/pnpbios/bioscalls.c | 7466 | 13188 | /*
* bioscalls.c - the lowlevel layer of the PnPBIOS driver
*/
#include <linux/types.h>
#include <linux/module.h>
#include <linux/init.h>
#include <linux/linkage.h>
#include <linux/kernel.h>
#include <linux/device.h>
#include <linux/pnp.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/kmod.h>
#include <linux/completion.h>
#include <linux/spinlock.h>
#include <asm/page.h>
#include <asm/desc.h>
#include <asm/byteorder.h>
#include "pnpbios.h"
static struct {
u16 offset;
u16 segment;
} pnp_bios_callpoint;
/*
* These are some opcodes for a "static asmlinkage"
* As this code is *not* executed inside the linux kernel segment, but in a
* alias at offset 0, we need a far return that can not be compiled by
* default (please, prove me wrong! this is *really* ugly!)
* This is the only way to get the bios to return into the kernel code,
* because the bios code runs in 16 bit protected mode and therefore can only
* return to the caller if the call is within the first 64kB, and the linux
* kernel begins at offset 3GB...
*/
asmlinkage void pnp_bios_callfunc(void);
__asm__(".text \n"
__ALIGN_STR "\n"
"pnp_bios_callfunc:\n"
" pushl %edx \n"
" pushl %ecx \n"
" pushl %ebx \n"
" pushl %eax \n"
" lcallw *pnp_bios_callpoint\n"
" addl $16, %esp \n"
" lret \n"
".previous \n");
#define Q2_SET_SEL(cpu, selname, address, size) \
do { \
struct desc_struct *gdt = get_cpu_gdt_table((cpu)); \
set_desc_base(&gdt[(selname) >> 3], (u32)(address)); \
set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
} while(0)
static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
(unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
/*
* At some point we want to use this stack frame pointer to unwind
* after PnP BIOS oopses.
*/
u32 pnp_bios_fault_esp;
u32 pnp_bios_fault_eip;
u32 pnp_bios_is_utter_crap = 0;
static spinlock_t pnp_bios_lock;
/*
* Support Functions
*/
static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
u16 arg4, u16 arg5, u16 arg6, u16 arg7,
void *ts1_base, u32 ts1_size,
void *ts2_base, u32 ts2_size)
{
unsigned long flags;
u16 status;
struct desc_struct save_desc_40;
int cpu;
/*
* PnP BIOSes are generally not terribly re-entrant.
* Also, don't rely on them to save everything correctly.
*/
if (pnp_bios_is_utter_crap)
return PNP_FUNCTION_NOT_SUPPORTED;
cpu = get_cpu();
save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
/* On some boxes IRQ's during PnP BIOS calls are deadly. */
spin_lock_irqsave(&pnp_bios_lock, flags);
/* The lock prevents us bouncing CPU here */
if (ts1_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size);
if (ts2_size)
Q2_SET_SEL(smp_processor_id(), PNP_TS2, ts2_base, ts2_size);
__asm__ __volatile__("pushl %%ebp\n\t"
"pushl %%edi\n\t"
"pushl %%esi\n\t"
"pushl %%ds\n\t"
"pushl %%es\n\t"
"pushl %%fs\n\t"
"pushl %%gs\n\t"
"pushfl\n\t"
"movl %%esp, pnp_bios_fault_esp\n\t"
"movl $1f, pnp_bios_fault_eip\n\t"
"lcall %5,%6\n\t"
"1:popfl\n\t"
"popl %%gs\n\t"
"popl %%fs\n\t"
"popl %%es\n\t"
"popl %%ds\n\t"
"popl %%esi\n\t"
"popl %%edi\n\t"
"popl %%ebp\n\t":"=a"(status)
:"0"((func) | (((u32) arg1) << 16)),
"b"((arg2) | (((u32) arg3) << 16)),
"c"((arg4) | (((u32) arg5) << 16)),
"d"((arg6) | (((u32) arg7) << 16)),
"i"(PNP_CS32), "i"(0)
:"memory");
spin_unlock_irqrestore(&pnp_bios_lock, flags);
get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
put_cpu();
/* If we get here and this is set then the PnP BIOS faulted on us. */
if (pnp_bios_is_utter_crap) {
printk(KERN_ERR
"PnPBIOS: Warning! Your PnP BIOS caused a fatal error. Attempting to continue\n");
printk(KERN_ERR
"PnPBIOS: You may need to reboot with the \"pnpbios=off\" option to operate stably\n");
printk(KERN_ERR
"PnPBIOS: Check with your vendor for an updated BIOS\n");
}
return status;
}
void pnpbios_print_status(const char *module, u16 status)
{
switch (status) {
case PNP_SUCCESS:
printk(KERN_ERR "PnPBIOS: %s: function successful\n", module);
break;
case PNP_NOT_SET_STATICALLY:
printk(KERN_ERR "PnPBIOS: %s: unable to set static resources\n",
module);
break;
case PNP_UNKNOWN_FUNCTION:
printk(KERN_ERR "PnPBIOS: %s: invalid function number passed\n",
module);
break;
case PNP_FUNCTION_NOT_SUPPORTED:
printk(KERN_ERR
"PnPBIOS: %s: function not supported on this system\n",
module);
break;
case PNP_INVALID_HANDLE:
printk(KERN_ERR "PnPBIOS: %s: invalid handle\n", module);
break;
case PNP_BAD_PARAMETER:
printk(KERN_ERR "PnPBIOS: %s: invalid parameters were passed\n",
module);
break;
case PNP_SET_FAILED:
printk(KERN_ERR "PnPBIOS: %s: unable to set resources\n",
module);
break;
case PNP_EVENTS_NOT_PENDING:
printk(KERN_ERR "PnPBIOS: %s: no events are pending\n", module);
break;
case PNP_SYSTEM_NOT_DOCKED:
printk(KERN_ERR "PnPBIOS: %s: the system is not docked\n",
module);
break;
case PNP_NO_ISA_PNP_CARDS:
printk(KERN_ERR
"PnPBIOS: %s: no isapnp cards are installed on this system\n",
module);
break;
case PNP_UNABLE_TO_DETERMINE_DOCK_CAPABILITIES:
printk(KERN_ERR
"PnPBIOS: %s: cannot determine the capabilities of the docking station\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_NO_BATTERY:
printk(KERN_ERR
"PnPBIOS: %s: unable to undock, the system does not have a battery\n",
module);
break;
case PNP_CONFIG_CHANGE_FAILED_RESOURCE_CONFLICT:
printk(KERN_ERR
"PnPBIOS: %s: could not dock due to resource conflicts\n",
module);
break;
case PNP_BUFFER_TOO_SMALL:
printk(KERN_ERR "PnPBIOS: %s: the buffer passed is too small\n",
module);
break;
case PNP_USE_ESCD_SUPPORT:
printk(KERN_ERR "PnPBIOS: %s: use ESCD instead\n", module);
break;
case PNP_MESSAGE_NOT_SUPPORTED:
printk(KERN_ERR "PnPBIOS: %s: the message is unsupported\n",
module);
break;
case PNP_HARDWARE_ERROR:
printk(KERN_ERR "PnPBIOS: %s: a hardware failure has occurred\n",
module);
break;
default:
printk(KERN_ERR "PnPBIOS: %s: unexpected status 0x%x\n", module,
status);
break;
}
}
/*
* PnP BIOS Low Level Calls
*/
#define PNP_GET_NUM_SYS_DEV_NODES 0x00
#define PNP_GET_SYS_DEV_NODE 0x01
#define PNP_SET_SYS_DEV_NODE 0x02
#define PNP_GET_EVENT 0x03
#define PNP_SEND_MESSAGE 0x04
#define PNP_GET_DOCKING_STATION_INFORMATION 0x05
#define PNP_SET_STATIC_ALLOCED_RES_INFO 0x09
#define PNP_GET_STATIC_ALLOCED_RES_INFO 0x0a
#define PNP_GET_APM_ID_TABLE 0x0b
#define PNP_GET_PNP_ISA_CONFIG_STRUC 0x40
#define PNP_GET_ESCD_INFO 0x41
#define PNP_READ_ESCD 0x42
#define PNP_WRITE_ESCD 0x43
/*
* Call PnP BIOS with function 0x00, "get number of system device nodes"
*/
static int __pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_NUM_SYS_DEV_NODES, 0, PNP_TS1, 2,
PNP_TS1, PNP_DS, 0, 0, data,
sizeof(struct pnp_dev_node_info), NULL, 0);
data->no_nodes &= 0xff;
return status;
}
int pnp_bios_dev_node_info(struct pnp_dev_node_info *data)
{
int status = __pnp_bios_dev_node_info(data);
if (status)
pnpbios_print_status("dev_node_info", status);
return status;
}
/*
* Note that some PnP BIOSes (e.g., on Sony Vaio laptops) die a horrible
* death if they are asked to access the "current" configuration.
* Therefore, if it's a matter of indifference, it's better to call
* get_dev_node() and set_dev_node() with boot=1 rather than with boot=0.
*/
/*
* Call PnP BIOS with function 0x01, "get system device node"
* Input: *nodenum = desired node,
* boot = whether to get nonvolatile boot (!=0)
* or volatile current (0) config
* Output: *nodenum=next node or 0xff if no more nodes
*/
static int __pnp_bios_get_dev_node(u8 *nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
u16 tmp_nodenum;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
tmp_nodenum = *nodenum;
status = call_pnp_bios(PNP_GET_SYS_DEV_NODE, 0, PNP_TS1, 0, PNP_TS2,
boot ? 2 : 1, PNP_DS, 0, &tmp_nodenum,
sizeof(tmp_nodenum), data, 65536);
*nodenum = tmp_nodenum;
return status;
}
int pnp_bios_get_dev_node(u8 *nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_get_dev_node(nodenum, boot, data);
if (status)
pnpbios_print_status("get_dev_node", status);
return status;
}
/*
* Call PnP BIOS with function 0x02, "set system device node"
* Input: *nodenum = desired node,
* boot = whether to set nonvolatile boot (!=0)
* or volatile current (0) config
*/
static int __pnp_bios_set_dev_node(u8 nodenum, char boot,
struct pnp_bios_node *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
if (!boot && pnpbios_dont_use_current_config)
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_SET_SYS_DEV_NODE, nodenum, 0, PNP_TS1,
boot ? 2 : 1, PNP_DS, 0, 0, data, 65536, NULL,
0);
return status;
}
int pnp_bios_set_dev_node(u8 nodenum, char boot, struct pnp_bios_node *data)
{
int status;
status = __pnp_bios_set_dev_node(nodenum, boot, data);
if (status) {
pnpbios_print_status("set_dev_node", status);
return status;
}
if (!boot) { /* Update devlist */
status = pnp_bios_get_dev_node(&nodenum, boot, data);
if (status)
return status;
}
return status;
}
/*
* Call PnP BIOS with function 0x05, "get docking station information"
*/
int pnp_bios_dock_station_info(struct pnp_docking_station_info *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_DOCKING_STATION_INFORMATION, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, data,
sizeof(struct pnp_docking_station_info), NULL,
0);
return status;
}
/*
* Call PnP BIOS with function 0x0a, "get statically allocated resource
* information"
*/
static int __pnp_bios_get_stat_res(char *info)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_STATIC_ALLOCED_RES_INFO, 0, PNP_TS1,
PNP_DS, 0, 0, 0, 0, info, 65536, NULL, 0);
return status;
}
int pnp_bios_get_stat_res(char *info)
{
int status;
status = __pnp_bios_get_stat_res(info);
if (status)
pnpbios_print_status("get_stat_res", status);
return status;
}
/*
* Call PnP BIOS with function 0x40, "get isa pnp configuration structure"
*/
static int __pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
u16 status;
if (!pnp_bios_present())
return PNP_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_PNP_ISA_CONFIG_STRUC, 0, PNP_TS1, PNP_DS,
0, 0, 0, 0, data,
sizeof(struct pnp_isa_config_struc), NULL, 0);
return status;
}
int pnp_bios_isapnp_config(struct pnp_isa_config_struc *data)
{
int status;
status = __pnp_bios_isapnp_config(data);
if (status)
pnpbios_print_status("isapnp_config", status);
return status;
}
/*
* Call PnP BIOS with function 0x41, "get ESCD info"
*/
static int __pnp_bios_escd_info(struct escd_info_struc *data)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_GET_ESCD_INFO, 0, PNP_TS1, 2, PNP_TS1, 4,
PNP_TS1, PNP_DS, data,
sizeof(struct escd_info_struc), NULL, 0);
return status;
}
int pnp_bios_escd_info(struct escd_info_struc *data)
{
int status;
status = __pnp_bios_escd_info(data);
if (status)
pnpbios_print_status("escd_info", status);
return status;
}
/*
* Call PnP BIOS function 0x42, "read ESCD"
* nvram_base is determined by calling escd_info
*/
static int __pnp_bios_read_escd(char *data, u32 nvram_base)
{
u16 status;
if (!pnp_bios_present())
return ESCD_FUNCTION_NOT_SUPPORTED;
status = call_pnp_bios(PNP_READ_ESCD, 0, PNP_TS1, PNP_TS2, PNP_DS, 0, 0,
0, data, 65536, __va(nvram_base), 65536);
return status;
}
int pnp_bios_read_escd(char *data, u32 nvram_base)
{
int status;
status = __pnp_bios_read_escd(data, nvram_base);
if (status)
pnpbios_print_status("read_escd", status);
return status;
}
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
int i;
spin_lock_init(&pnp_bios_lock);
pnp_bios_callpoint.offset = header->fields.pm16offset;
pnp_bios_callpoint.segment = PNP_CS16;
for_each_possible_cpu(i) {
struct desc_struct *gdt = get_cpu_gdt_table(i);
if (!gdt)
continue;
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
(unsigned long)&pnp_bios_callfunc);
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
(unsigned long)__va(header->fields.pm16cseg));
set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
(unsigned long)__va(header->fields.pm16dseg));
}
}
| gpl-2.0 |
jthatch12/STi2_M7 | drivers/isdn/hisax/isdnl3.c | 8234 | 13046 | /* $Id: isdnl3.c,v 2.22.2.3 2004/01/13 14:31:25 keil Exp $
*
* Author Karsten Keil
* based on the teles driver from Jan den Ouden
* Copyright by Karsten Keil <keil@isdn4linux.de>
*
* This software may be used and distributed according to the terms
* of the GNU General Public License, incorporated herein by reference.
*
* For changes and modifications please read
* Documentation/isdn/HiSax.cert
*
* Thanks to Jan den Ouden
* Fritz Elfert
*
*/
#include <linux/init.h>
#include <linux/slab.h>
#include "hisax.h"
#include "isdnl3.h"
const char *l3_revision = "$Revision: 2.22.2.3 $";
static struct Fsm l3fsm;
enum {
ST_L3_LC_REL,
ST_L3_LC_ESTAB_WAIT,
ST_L3_LC_REL_DELAY,
ST_L3_LC_REL_WAIT,
ST_L3_LC_ESTAB,
};
#define L3_STATE_COUNT (ST_L3_LC_ESTAB + 1)
static char *strL3State[] =
{
"ST_L3_LC_REL",
"ST_L3_LC_ESTAB_WAIT",
"ST_L3_LC_REL_DELAY",
"ST_L3_LC_REL_WAIT",
"ST_L3_LC_ESTAB",
};
enum {
EV_ESTABLISH_REQ,
EV_ESTABLISH_IND,
EV_ESTABLISH_CNF,
EV_RELEASE_REQ,
EV_RELEASE_CNF,
EV_RELEASE_IND,
EV_TIMEOUT,
};
#define L3_EVENT_COUNT (EV_TIMEOUT + 1)
static char *strL3Event[] =
{
"EV_ESTABLISH_REQ",
"EV_ESTABLISH_IND",
"EV_ESTABLISH_CNF",
"EV_RELEASE_REQ",
"EV_RELEASE_CNF",
"EV_RELEASE_IND",
"EV_TIMEOUT",
};
static __printf(2, 3) void
l3m_debug(struct FsmInst *fi, char *fmt, ...)
{
va_list args;
struct PStack *st = fi->userdata;
va_start(args, fmt);
VHiSax_putstatus(st->l1.hardware, st->l3.debug_id, fmt, args);
va_end(args);
}
u_char *
findie(u_char *p, int size, u_char ie, int wanted_set)
{
int l, codeset, maincodeset;
u_char *pend = p + size;
/* skip protocol discriminator, callref and message type */
p++;
l = (*p++) & 0xf;
p += l;
p++;
codeset = 0;
maincodeset = 0;
/* while there are bytes left... */
while (p < pend) {
if ((*p & 0xf0) == 0x90) {
codeset = *p & 0x07;
if (!(*p & 0x08))
maincodeset = codeset;
}
if (*p & 0x80)
p++;
else {
if (codeset == wanted_set) {
if (*p == ie)
{ /* improved length check (Werner Cornelius) */
if ((pend - p) < 2)
return (NULL);
if (*(p + 1) > (pend - (p + 2)))
return (NULL);
return (p);
}
if (*p > ie)
return (NULL);
}
p++;
l = *p++;
p += l;
codeset = maincodeset;
}
}
return (NULL);
}
int
getcallref(u_char *p)
{
int l, cr = 0;
p++; /* prot discr */
if (*p & 0xfe) /* wrong callref BRI only 1 octet*/
return (-2);
l = 0xf & *p++; /* callref length */
if (!l) /* dummy CallRef */
return (-1);
cr = *p++;
return (cr);
}
static int OrigCallRef = 0;
int
newcallref(void)
{
if (OrigCallRef == 127)
OrigCallRef = 1;
else
OrigCallRef++;
return (OrigCallRef);
}
void
newl3state(struct l3_process *pc, int state)
{
if (pc->debug & L3_DEB_STATE)
l3_debug(pc->st, "newstate cr %d %d --> %d",
pc->callref & 0x7F,
pc->state, state);
pc->state = state;
}
static void
L3ExpireTimer(struct L3Timer *t)
{
t->pc->st->lli.l4l3(t->pc->st, t->event, t->pc);
}
void
L3InitTimer(struct l3_process *pc, struct L3Timer *t)
{
t->pc = pc;
t->tl.function = (void *) L3ExpireTimer;
t->tl.data = (long) t;
init_timer(&t->tl);
}
void
L3DelTimer(struct L3Timer *t)
{
del_timer(&t->tl);
}
int
L3AddTimer(struct L3Timer *t,
int millisec, int event)
{
if (timer_pending(&t->tl)) {
printk(KERN_WARNING "L3AddTimer: timer already active!\n");
return -1;
}
init_timer(&t->tl);
t->event = event;
t->tl.expires = jiffies + (millisec * HZ) / 1000;
add_timer(&t->tl);
return 0;
}
void
StopAllL3Timer(struct l3_process *pc)
{
L3DelTimer(&pc->timer);
}
struct sk_buff *
l3_alloc_skb(int len)
{
struct sk_buff *skb;
if (!(skb = alloc_skb(len + MAX_HEADER_LEN, GFP_ATOMIC))) {
printk(KERN_WARNING "HiSax: No skb for D-channel\n");
return (NULL);
}
skb_reserve(skb, MAX_HEADER_LEN);
return (skb);
}
static void
no_l3_proto(struct PStack *st, int pr, void *arg)
{
struct sk_buff *skb = arg;
HiSax_putstatus(st->l1.hardware, "L3", "no D protocol");
if (skb) {
dev_kfree_skb(skb);
}
}
static int
no_l3_proto_spec(struct PStack *st, isdn_ctrl *ic)
{
printk(KERN_WARNING "HiSax: no specific protocol handler for proto %lu\n", ic->arg & 0xFF);
return (-1);
}
struct l3_process
*getl3proc(struct PStack *st, int cr)
{
struct l3_process *p = st->l3.proc;
while (p)
if (p->callref == cr)
return (p);
else
p = p->next;
return (NULL);
}
struct l3_process
*new_l3_process(struct PStack *st, int cr)
{
struct l3_process *p, *np;
if (!(p = kmalloc(sizeof(struct l3_process), GFP_ATOMIC))) {
printk(KERN_ERR "HiSax can't get memory for cr %d\n", cr);
return (NULL);
}
if (!st->l3.proc)
st->l3.proc = p;
else {
np = st->l3.proc;
while (np->next)
np = np->next;
np->next = p;
}
p->next = NULL;
p->debug = st->l3.debug;
p->callref = cr;
p->state = 0;
p->chan = NULL;
p->st = st;
p->N303 = st->l3.N303;
L3InitTimer(p, &p->timer);
return (p);
};
void
release_l3_process(struct l3_process *p)
{
struct l3_process *np, *pp = NULL;
if (!p)
return;
np = p->st->l3.proc;
while (np) {
if (np == p) {
StopAllL3Timer(p);
if (pp)
pp->next = np->next;
else if (!(p->st->l3.proc = np->next) &&
!test_bit(FLG_PTP, &p->st->l2.flag)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: last process");
if (skb_queue_empty(&p->st->l3.squeue)) {
if (p->debug)
l3_debug(p->st, "release_l3_process: release link");
if (p->st->protocol != ISDN_PTYPE_NI1)
FsmEvent(&p->st->l3.l3m, EV_RELEASE_REQ, NULL);
else
FsmEvent(&p->st->l3.l3m, EV_RELEASE_IND, NULL);
} else {
if (p->debug)
l3_debug(p->st, "release_l3_process: not release link");
}
}
kfree(p);
return;
}
pp = np;
np = np->next;
}
printk(KERN_ERR "HiSax internal L3 error CR(%d) not in list\n", p->callref);
l3_debug(p->st, "HiSax internal L3 error CR(%d) not in list", p->callref);
};
static void
l3ml3p(struct PStack *st, int pr)
{
struct l3_process *p = st->l3.proc;
struct l3_process *np;
while (p) {
/* p might be kfreed under us, so we need to save where we want to go on */
np = p->next;
st->l3.l3ml3(st, pr, p);
p = np;
}
}
void
setstack_l3dc(struct PStack *st, struct Channel *chanp)
{
char tmp[64];
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
FsmInitTimer(&st->l3.l3m, &st->l3.l3m_timer);
strcpy(st->l3.debug_id, "L3DC ");
st->lli.l4l3_proto = no_l3_proto_spec;
#ifdef CONFIG_HISAX_EURO
if (st->protocol == ISDN_PTYPE_EURO) {
setstack_dss1(st);
} else
#endif
#ifdef CONFIG_HISAX_NI1
if (st->protocol == ISDN_PTYPE_NI1) {
setstack_ni1(st);
} else
#endif
#ifdef CONFIG_HISAX_1TR6
if (st->protocol == ISDN_PTYPE_1TR6) {
setstack_1tr6(st);
} else
#endif
if (st->protocol == ISDN_PTYPE_LEASED) {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
printk(KERN_INFO "HiSax: Leased line mode\n");
} else {
st->lli.l4l3 = no_l3_proto;
st->l2.l2l3 = no_l3_proto;
st->l3.l3ml3 = no_l3_proto;
sprintf(tmp, "protocol %s not supported",
(st->protocol == ISDN_PTYPE_1TR6) ? "1tr6" :
(st->protocol == ISDN_PTYPE_EURO) ? "euro" :
(st->protocol == ISDN_PTYPE_NI1) ? "ni1" :
"unknown");
printk(KERN_WARNING "HiSax: %s\n", tmp);
st->protocol = -1;
}
}
static void
isdnl3_trans(struct PStack *st, int pr, void *arg) {
st->l3.l3l2(st, pr, arg);
}
void
releasestack_isdnl3(struct PStack *st)
{
while (st->l3.proc)
release_l3_process(st->l3.proc);
if (st->l3.global) {
StopAllL3Timer(st->l3.global);
kfree(st->l3.global);
st->l3.global = NULL;
}
FsmDelTimer(&st->l3.l3m_timer, 54);
skb_queue_purge(&st->l3.squeue);
}
void
setstack_l3bc(struct PStack *st, struct Channel *chanp)
{
st->l3.proc = NULL;
st->l3.global = NULL;
skb_queue_head_init(&st->l3.squeue);
st->l3.l3m.fsm = &l3fsm;
st->l3.l3m.state = ST_L3_LC_REL;
st->l3.l3m.debug = 1;
st->l3.l3m.userdata = st;
st->l3.l3m.userint = 0;
st->l3.l3m.printdebug = l3m_debug;
strcpy(st->l3.debug_id, "L3BC ");
st->lli.l4l3 = isdnl3_trans;
}
#define DREL_TIMER_VALUE 40000
static void
lc_activate(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_ESTAB_WAIT);
st->l3.l3l2(st, DL_ESTABLISH | REQUEST, NULL);
}
static void
lc_connect(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connect: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | INDICATION);
}
static void
lc_connected(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
struct sk_buff *skb = arg;
int dequeued = 0;
FsmDelTimer(&st->l3.l3m_timer, 51);
FsmChangeState(fi, ST_L3_LC_ESTAB);
while ((skb = skb_dequeue(&st->l3.squeue))) {
st->l3.l3l2(st, DL_DATA | REQUEST, skb);
dequeued++;
}
if ((!st->l3.proc) && dequeued) {
if (st->l3.debug)
l3_debug(st, "lc_connected: release link");
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
} else
l3ml3p(st, DL_ESTABLISH | CONFIRM);
}
static void
lc_start_delay(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_start_delay_check(struct FsmInst *fi, int event, void *arg)
/* 20/09/00 - GE timer not user for NI-1 as layer 2 should stay up */
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL_DELAY);
/* 19/09/00 - GE timer not user for NI-1 */
if (st->protocol != ISDN_PTYPE_NI1)
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 50);
}
static void
lc_release_req(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
if (test_bit(FLG_L2BLOCK, &st->l2.flag)) {
if (st->l3.debug)
l3_debug(st, "lc_release_req: l2 blocked");
/* restart release timer */
FsmAddTimer(&st->l3.l3m_timer, DREL_TIMER_VALUE, EV_TIMEOUT, NULL, 51);
} else {
FsmChangeState(fi, ST_L3_LC_REL_WAIT);
st->l3.l3l2(st, DL_RELEASE | REQUEST, NULL);
}
}
static void
lc_release_ind(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmDelTimer(&st->l3.l3m_timer, 52);
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | INDICATION);
}
static void
lc_release_cnf(struct FsmInst *fi, int event, void *arg)
{
struct PStack *st = fi->userdata;
FsmChangeState(fi, ST_L3_LC_REL);
skb_queue_purge(&st->l3.squeue);
l3ml3p(st, DL_RELEASE | CONFIRM);
}
/* *INDENT-OFF* */
static struct FsmNode L3FnList[] __initdata =
{
{ST_L3_LC_REL, EV_ESTABLISH_REQ, lc_activate},
{ST_L3_LC_REL, EV_ESTABLISH_IND, lc_connect},
{ST_L3_LC_REL, EV_ESTABLISH_CNF, lc_connect},
{ST_L3_LC_ESTAB_WAIT, EV_ESTABLISH_CNF, lc_connected},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_REQ, lc_start_delay},
{ST_L3_LC_ESTAB_WAIT, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_ESTAB, EV_RELEASE_REQ, lc_start_delay_check},
{ST_L3_LC_REL_DELAY, EV_RELEASE_IND, lc_release_ind},
{ST_L3_LC_REL_DELAY, EV_ESTABLISH_REQ, lc_connected},
{ST_L3_LC_REL_DELAY, EV_TIMEOUT, lc_release_req},
{ST_L3_LC_REL_WAIT, EV_RELEASE_CNF, lc_release_cnf},
{ST_L3_LC_REL_WAIT, EV_ESTABLISH_REQ, lc_activate},
};
/* *INDENT-ON* */
void
l3_msg(struct PStack *st, int pr, void *arg)
{
switch (pr) {
case (DL_DATA | REQUEST):
if (st->l3.l3m.state == ST_L3_LC_ESTAB) {
st->l3.l3l2(st, pr, arg);
} else {
struct sk_buff *skb = arg;
skb_queue_tail(&st->l3.squeue, skb);
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
}
break;
case (DL_ESTABLISH | REQUEST):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_REQ, NULL);
break;
case (DL_ESTABLISH | CONFIRM):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_CNF, NULL);
break;
case (DL_ESTABLISH | INDICATION):
FsmEvent(&st->l3.l3m, EV_ESTABLISH_IND, NULL);
break;
case (DL_RELEASE | INDICATION):
FsmEvent(&st->l3.l3m, EV_RELEASE_IND, NULL);
break;
case (DL_RELEASE | CONFIRM):
FsmEvent(&st->l3.l3m, EV_RELEASE_CNF, NULL);
break;
case (DL_RELEASE | REQUEST):
FsmEvent(&st->l3.l3m, EV_RELEASE_REQ, NULL);
break;
}
}
int __init
Isdnl3New(void)
{
l3fsm.state_count = L3_STATE_COUNT;
l3fsm.event_count = L3_EVENT_COUNT;
l3fsm.strEvent = strL3Event;
l3fsm.strState = strL3State;
return FsmNew(&l3fsm, L3FnList, ARRAY_SIZE(L3FnList));
}
void
Isdnl3Free(void)
{
FsmFree(&l3fsm);
}
| gpl-2.0 |
boa19861105/BOA | Documentation/filesystems/configfs/configfs_example_explicit.c | 12074 | 12614 | /*
* vim: noexpandtab ts=8 sts=0 sw=8:
*
* configfs_example_explicit.c - This file is a demonstration module
* containing a number of configfs subsystems. It explicitly defines
* each structure without using the helper macros defined in
* configfs.h.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public
* License as published by the Free Software Foundation; either
* version 2 of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public
* License along with this program; if not, write to the
* Free Software Foundation, Inc., 59 Temple Place - Suite 330,
* Boston, MA 021110-1307, USA.
*
* Based on sysfs:
* sysfs is Copyright (C) 2001, 2002, 2003 Patrick Mochel
*
* configfs Copyright (C) 2005 Oracle. All rights reserved.
*/
#include <linux/init.h>
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/configfs.h>
/*
* 01-childless
*
* This first example is a childless subsystem. It cannot create
* any config_items. It just has attributes.
*
* Note that we are enclosing the configfs_subsystem inside a container.
* This is not necessary if a subsystem has no attributes directly
* on the subsystem. See the next example, 02-simple-children, for
* such a subsystem.
*/
struct childless {
struct configfs_subsystem subsys;
int showme;
int storeme;
};
struct childless_attribute {
struct configfs_attribute attr;
ssize_t (*show)(struct childless *, char *);
ssize_t (*store)(struct childless *, const char *, size_t);
};
static inline struct childless *to_childless(struct config_item *item)
{
return item ? container_of(to_configfs_subsystem(to_config_group(item)), struct childless, subsys) : NULL;
}
static ssize_t childless_showme_read(struct childless *childless,
char *page)
{
ssize_t pos;
pos = sprintf(page, "%d\n", childless->showme);
childless->showme++;
return pos;
}
static ssize_t childless_storeme_read(struct childless *childless,
char *page)
{
return sprintf(page, "%d\n", childless->storeme);
}
static ssize_t childless_storeme_write(struct childless *childless,
const char *page,
size_t count)
{
unsigned long tmp;
char *p = (char *) page;
tmp = simple_strtoul(p, &p, 10);
if ((*p != '\0') && (*p != '\n'))
return -EINVAL;
if (tmp > INT_MAX)
return -ERANGE;
childless->storeme = tmp;
return count;
}
static ssize_t childless_description_read(struct childless *childless,
char *page)
{
return sprintf(page,
"[01-childless]\n"
"\n"
"The childless subsystem is the simplest possible subsystem in\n"
"configfs. It does not support the creation of child config_items.\n"
"It only has a few attributes. In fact, it isn't much different\n"
"than a directory in /proc.\n");
}
static struct childless_attribute childless_attr_showme = {
.attr = { .ca_owner = THIS_MODULE, .ca_name = "showme", .ca_mode = S_IRUGO },
.show = childless_showme_read,
};
static struct childless_attribute childless_attr_storeme = {
.attr = { .ca_owner = THIS_MODULE, .ca_name = "storeme", .ca_mode = S_IRUGO | S_IWUSR },
.show = childless_storeme_read,
.store = childless_storeme_write,
};
static struct childless_attribute childless_attr_description = {
.attr = { .ca_owner = THIS_MODULE, .ca_name = "description", .ca_mode = S_IRUGO },
.show = childless_description_read,
};
static struct configfs_attribute *childless_attrs[] = {
&childless_attr_showme.attr,
&childless_attr_storeme.attr,
&childless_attr_description.attr,
NULL,
};
static ssize_t childless_attr_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
struct childless *childless = to_childless(item);
struct childless_attribute *childless_attr =
container_of(attr, struct childless_attribute, attr);
ssize_t ret = 0;
if (childless_attr->show)
ret = childless_attr->show(childless, page);
return ret;
}
static ssize_t childless_attr_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
struct childless *childless = to_childless(item);
struct childless_attribute *childless_attr =
container_of(attr, struct childless_attribute, attr);
ssize_t ret = -EINVAL;
if (childless_attr->store)
ret = childless_attr->store(childless, page, count);
return ret;
}
static struct configfs_item_operations childless_item_ops = {
.show_attribute = childless_attr_show,
.store_attribute = childless_attr_store,
};
static struct config_item_type childless_type = {
.ct_item_ops = &childless_item_ops,
.ct_attrs = childless_attrs,
.ct_owner = THIS_MODULE,
};
static struct childless childless_subsys = {
.subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "01-childless",
.ci_type = &childless_type,
},
},
},
};
/* ----------------------------------------------------------------- */
/*
* 02-simple-children
*
* This example merely has a simple one-attribute child. Note that
* there is no extra attribute structure, as the child's attribute is
* known from the get-go. Also, there is no container for the
* subsystem, as it has no attributes of its own.
*/
struct simple_child {
struct config_item item;
int storeme;
};
static inline struct simple_child *to_simple_child(struct config_item *item)
{
return item ? container_of(item, struct simple_child, item) : NULL;
}
static struct configfs_attribute simple_child_attr_storeme = {
.ca_owner = THIS_MODULE,
.ca_name = "storeme",
.ca_mode = S_IRUGO | S_IWUSR,
};
static struct configfs_attribute *simple_child_attrs[] = {
&simple_child_attr_storeme,
NULL,
};
static ssize_t simple_child_attr_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
ssize_t count;
struct simple_child *simple_child = to_simple_child(item);
count = sprintf(page, "%d\n", simple_child->storeme);
return count;
}
static ssize_t simple_child_attr_store(struct config_item *item,
struct configfs_attribute *attr,
const char *page, size_t count)
{
struct simple_child *simple_child = to_simple_child(item);
unsigned long tmp;
char *p = (char *) page;
tmp = simple_strtoul(p, &p, 10);
if (!p || (*p && (*p != '\n')))
return -EINVAL;
if (tmp > INT_MAX)
return -ERANGE;
simple_child->storeme = tmp;
return count;
}
static void simple_child_release(struct config_item *item)
{
kfree(to_simple_child(item));
}
static struct configfs_item_operations simple_child_item_ops = {
.release = simple_child_release,
.show_attribute = simple_child_attr_show,
.store_attribute = simple_child_attr_store,
};
static struct config_item_type simple_child_type = {
.ct_item_ops = &simple_child_item_ops,
.ct_attrs = simple_child_attrs,
.ct_owner = THIS_MODULE,
};
struct simple_children {
struct config_group group;
};
static inline struct simple_children *to_simple_children(struct config_item *item)
{
return item ? container_of(to_config_group(item), struct simple_children, group) : NULL;
}
static struct config_item *simple_children_make_item(struct config_group *group, const char *name)
{
struct simple_child *simple_child;
simple_child = kzalloc(sizeof(struct simple_child), GFP_KERNEL);
if (!simple_child)
return ERR_PTR(-ENOMEM);
config_item_init_type_name(&simple_child->item, name,
&simple_child_type);
simple_child->storeme = 0;
return &simple_child->item;
}
static struct configfs_attribute simple_children_attr_description = {
.ca_owner = THIS_MODULE,
.ca_name = "description",
.ca_mode = S_IRUGO,
};
static struct configfs_attribute *simple_children_attrs[] = {
&simple_children_attr_description,
NULL,
};
static ssize_t simple_children_attr_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
return sprintf(page,
"[02-simple-children]\n"
"\n"
"This subsystem allows the creation of child config_items. These\n"
"items have only one attribute that is readable and writeable.\n");
}
static void simple_children_release(struct config_item *item)
{
kfree(to_simple_children(item));
}
static struct configfs_item_operations simple_children_item_ops = {
.release = simple_children_release,
.show_attribute = simple_children_attr_show,
};
/*
* Note that, since no extra work is required on ->drop_item(),
* no ->drop_item() is provided.
*/
static struct configfs_group_operations simple_children_group_ops = {
.make_item = simple_children_make_item,
};
static struct config_item_type simple_children_type = {
.ct_item_ops = &simple_children_item_ops,
.ct_group_ops = &simple_children_group_ops,
.ct_attrs = simple_children_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem simple_children_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "02-simple-children",
.ci_type = &simple_children_type,
},
},
};
/* ----------------------------------------------------------------- */
/*
* 03-group-children
*
* This example reuses the simple_children group from above. However,
* the simple_children group is not the subsystem itself, it is a
* child of the subsystem. Creation of a group in the subsystem creates
* a new simple_children group. That group can then have simple_child
* children of its own.
*/
static struct config_group *group_children_make_group(struct config_group *group, const char *name)
{
struct simple_children *simple_children;
simple_children = kzalloc(sizeof(struct simple_children),
GFP_KERNEL);
if (!simple_children)
return ERR_PTR(-ENOMEM);
config_group_init_type_name(&simple_children->group, name,
&simple_children_type);
return &simple_children->group;
}
static struct configfs_attribute group_children_attr_description = {
.ca_owner = THIS_MODULE,
.ca_name = "description",
.ca_mode = S_IRUGO,
};
static struct configfs_attribute *group_children_attrs[] = {
&group_children_attr_description,
NULL,
};
static ssize_t group_children_attr_show(struct config_item *item,
struct configfs_attribute *attr,
char *page)
{
return sprintf(page,
"[03-group-children]\n"
"\n"
"This subsystem allows the creation of child config_groups. These\n"
"groups are like the subsystem simple-children.\n");
}
static struct configfs_item_operations group_children_item_ops = {
.show_attribute = group_children_attr_show,
};
/*
* Note that, since no extra work is required on ->drop_item(),
* no ->drop_item() is provided.
*/
static struct configfs_group_operations group_children_group_ops = {
.make_group = group_children_make_group,
};
static struct config_item_type group_children_type = {
.ct_item_ops = &group_children_item_ops,
.ct_group_ops = &group_children_group_ops,
.ct_attrs = group_children_attrs,
.ct_owner = THIS_MODULE,
};
static struct configfs_subsystem group_children_subsys = {
.su_group = {
.cg_item = {
.ci_namebuf = "03-group-children",
.ci_type = &group_children_type,
},
},
};
/* ----------------------------------------------------------------- */
/*
* We're now done with our subsystem definitions.
* For convenience in this module, here's a list of them all. It
* allows the init function to easily register them. Most modules
* will only have one subsystem, and will only call register_subsystem
* on it directly.
*/
static struct configfs_subsystem *example_subsys[] = {
&childless_subsys.subsys,
&simple_children_subsys,
&group_children_subsys,
NULL,
};
static int __init configfs_example_init(void)
{
int ret;
int i;
struct configfs_subsystem *subsys;
for (i = 0; example_subsys[i]; i++) {
subsys = example_subsys[i];
config_group_init(&subsys->su_group);
mutex_init(&subsys->su_mutex);
ret = configfs_register_subsystem(subsys);
if (ret) {
printk(KERN_ERR "Error %d while registering subsystem %s\n",
ret,
subsys->su_group.cg_item.ci_namebuf);
goto out_unregister;
}
}
return 0;
out_unregister:
for (i--; i >= 0; i--)
configfs_unregister_subsystem(example_subsys[i]);
return ret;
}
static void __exit configfs_example_exit(void)
{
int i;
for (i = 0; example_subsys[i]; i++)
configfs_unregister_subsystem(example_subsys[i]);
}
module_init(configfs_example_init);
module_exit(configfs_example_exit);
MODULE_LICENSE("GPL");
| gpl-2.0 |
somcom3x/kernel_samsung_msm8660-common | arch/arm/mach-msm/clock-debug.c | 43 | 6728 | /*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2007-2011, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/ctype.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#include <linux/clk.h>
#include <linux/list.h>
#include <linux/clkdev.h>
#include "clock.h"
static int clock_debug_rate_set(void *data, u64 val)
{
struct clk *clock = data;
int ret;
/* Only increases to max rate will succeed, but that's actually good
* for debugging purposes so we don't check for error. */
if (clock->flags & CLKFLAG_MAX)
clk_set_max_rate(clock, val);
ret = clk_set_rate(clock, val);
if (ret)
pr_err("clk_set_rate failed (%d)\n", ret);
return ret;
}
static int clock_debug_rate_get(void *data, u64 *val)
{
struct clk *clock = data;
*val = clk_get_rate(clock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_rate_fops, clock_debug_rate_get,
clock_debug_rate_set, "%llu\n");
static struct clk *measure;
static int clock_debug_measure_get(void *data, u64 *val)
{
struct clk *clock = data;
int ret, is_hw_gated;
/* Check to see if the clock is in hardware gating mode */
if (clock->flags & CLKFLAG_HWCG)
is_hw_gated = clock->ops->in_hwcg_mode(clock);
else
is_hw_gated = 0;
ret = clk_set_parent(measure, clock);
if (!ret) {
/*
* Disable hw gating to get accurate rate measurements. Only do
* this if the clock is explictly enabled by software. This
* allows us to detect errors where clocks are on even though
* software is not requesting them to be on due to broken
* hardware gating signals.
*/
if (is_hw_gated && clock->count)
clock->ops->disable_hwcg(clock);
*val = clk_get_rate(measure);
/* Reenable hwgating if it was disabled */
if (is_hw_gated && clock->count)
clock->ops->enable_hwcg(clock);
}
return ret;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_measure_fops, clock_debug_measure_get,
NULL, "%lld\n");
static int clock_debug_enable_set(void *data, u64 val)
{
struct clk *clock = data;
int rc = 0;
if (val)
rc = clk_enable(clock);
else
clk_disable(clock);
return rc;
}
static int clock_debug_enable_get(void *data, u64 *val)
{
struct clk *clock = data;
int enabled;
if (clock->ops->is_enabled)
enabled = clock->ops->is_enabled(clock);
else
enabled = !!(clock->count);
*val = enabled;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_enable_fops, clock_debug_enable_get,
clock_debug_enable_set, "%lld\n");
static int clock_debug_local_get(void *data, u64 *val)
{
struct clk *clock = data;
*val = clock->ops->is_local(clock);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_local_fops, clock_debug_local_get,
NULL, "%llu\n");
static int clock_debug_hwcg_get(void *data, u64 *val)
{
struct clk *clock = data;
*val = !!(clock->flags & CLKFLAG_HWCG);
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(clock_hwcg_fops, clock_debug_hwcg_get,
NULL, "%llu\n");
static struct dentry *debugfs_base;
static u32 debug_suspend = 1;
static struct clk_lookup *msm_clocks;
static size_t num_msm_clocks;
int __init clock_debug_init(struct clock_init_data *data)
{
int ret = 0;
debugfs_base = debugfs_create_dir("clk", NULL);
if (!debugfs_base)
return -ENOMEM;
if (!debugfs_create_u32("debug_suspend", S_IRUGO | S_IWUSR,
debugfs_base, &debug_suspend)) {
debugfs_remove_recursive(debugfs_base);
return -ENOMEM;
}
msm_clocks = data->table;
num_msm_clocks = data->size;
measure = clk_get_sys("debug", "measure");
if (IS_ERR(measure)) {
ret = PTR_ERR(measure);
measure = NULL;
}
return ret;
}
static int clock_debug_print_clock(struct clk *c)
{
size_t ln = 0;
char s[128];
if (!c || !c->count)
return 0;
ln += snprintf(s, sizeof(s), "\t%s", c->dbg_name);
while (ln < sizeof(s) && (c = clk_get_parent(c)))
ln += snprintf(s + ln, sizeof(s) - ln, " -> %s", c->dbg_name);
pr_info("%s\n", s);
return 1;
}
void clock_debug_print_enabled(void)
{
unsigned i;
int cnt = 0;
if (likely(!debug_suspend))
return;
pr_info("Enabled clocks:\n");
for (i = 0; i < num_msm_clocks; i++)
cnt += clock_debug_print_clock(msm_clocks[i].clk);
if (cnt)
pr_info("Enabled clock count: %d\n", cnt);
else
pr_info("No clocks enabled.\n");
}
static int list_rates_show(struct seq_file *m, void *unused)
{
struct clk *clock = m->private;
int rate, level, fmax = 0, i = 0;
/* Find max frequency supported within voltage constraints. */
if (!clock->vdd_class) {
fmax = INT_MAX;
} else {
for (level = 0; level < ARRAY_SIZE(clock->fmax); level++)
if (clock->fmax[level])
fmax = clock->fmax[level];
}
/*
* List supported frequencies <= fmax. Higher frequencies may appear in
* the frequency table, but are not valid and should not be listed.
*/
while ((rate = clock->ops->list_rate(clock, i++)) >= 0) {
if (rate <= fmax)
seq_printf(m, "%u\n", rate);
}
return 0;
}
static int list_rates_open(struct inode *inode, struct file *file)
{
return single_open(file, list_rates_show, inode->i_private);
}
static const struct file_operations list_rates_fops = {
.open = list_rates_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int __init clock_debug_add(struct clk *clock)
{
char temp[50], *ptr;
struct dentry *clk_dir;
if (!debugfs_base)
return -ENOMEM;
strlcpy(temp, clock->dbg_name, ARRAY_SIZE(temp));
for (ptr = temp; *ptr; ptr++)
*ptr = tolower(*ptr);
clk_dir = debugfs_create_dir(temp, debugfs_base);
if (!clk_dir)
return -ENOMEM;
if (!debugfs_create_file("rate", S_IRUGO | S_IWUSR, clk_dir,
clock, &clock_rate_fops))
goto error;
if (!debugfs_create_file("enable", S_IRUGO | S_IWUSR, clk_dir,
clock, &clock_enable_fops))
goto error;
if (!debugfs_create_file("is_local", S_IRUGO, clk_dir, clock,
&clock_local_fops))
goto error;
if (!debugfs_create_file("has_hw_gating", S_IRUGO, clk_dir, clock,
&clock_hwcg_fops))
goto error;
if (measure &&
!clk_set_parent(measure, clock) &&
!debugfs_create_file("measure", S_IRUGO, clk_dir, clock,
&clock_measure_fops))
goto error;
if (clock->ops->list_rate)
if (!debugfs_create_file("list_rates",
S_IRUGO, clk_dir, clock, &list_rates_fops))
goto error;
return 0;
error:
debugfs_remove_recursive(clk_dir);
return -ENOMEM;
}
| gpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.